Fix setup of nova credentials.
authorThomas Goirand <zigo@debian.org>
Mon, 1 Feb 2016 12:39:00 +0000 (20:39 +0800)
committerThomas Goirand <zigo@debian.org>
Mon, 1 Feb 2016 12:39:00 +0000 (20:39 +0800)
Rewritten-From: 86c8299192b940bdbfde51c53956af3c5c51e4b5

1218 files changed:
.coveragerc [deleted file]
.gitignore [deleted file]
.gitreview [deleted file]
.mailmap [deleted file]
.pylintrc [deleted file]
.testr.conf [deleted file]
CONTRIBUTING.rst [deleted file]
HACKING.rst [deleted file]
LICENSE [deleted file]
MANIFEST.in [deleted file]
README.rst [deleted file]
TESTING.rst [deleted file]
babel.cfg [deleted file]
bin/neutron-rootwrap-xen-dom0 [deleted file]
devstack/lib/flavors [deleted file]
devstack/lib/l2_agent [deleted file]
devstack/lib/l2_agent_sriovnicswitch [deleted file]
devstack/lib/ml2 [deleted file]
devstack/lib/ml2_drivers/sriovnicswitch [deleted file]
devstack/lib/qos [deleted file]
devstack/plugin.sh [deleted file]
devstack/settings [deleted file]
doc/Makefile [deleted file]
doc/pom.xml [deleted file]
doc/source/conf.py [deleted file]
doc/source/dashboards/check.dashboard.rst [deleted file]
doc/source/dashboards/gate.dashboard.rst [deleted file]
doc/source/dashboards/index.rst [deleted file]
doc/source/devref/address_scopes.rst [deleted file]
doc/source/devref/advanced_services.rst [deleted file]
doc/source/devref/alembic_migrations.rst [deleted file]
doc/source/devref/api_extensions.rst [deleted file]
doc/source/devref/api_layer.rst [deleted file]
doc/source/devref/callbacks.rst [deleted file]
doc/source/devref/client_command_extensions.rst [deleted file]
doc/source/devref/contribute.rst [deleted file]
doc/source/devref/db_layer.rst [deleted file]
doc/source/devref/development.environment.rst [deleted file]
doc/source/devref/dns_order.rst [deleted file]
doc/source/devref/effective_neutron.rst [deleted file]
doc/source/devref/fullstack_testing.rst [deleted file]
doc/source/devref/i18n.rst [deleted file]
doc/source/devref/images/fullstack_multinode_simulation.png [deleted file]
doc/source/devref/images/under-the-hood-scenario-1-ovs-compute.png [deleted file]
doc/source/devref/images/under-the-hood-scenario-1-ovs-netns.png [deleted file]
doc/source/devref/images/under-the-hood-scenario-1-ovs-network.png [deleted file]
doc/source/devref/index.rst [deleted file]
doc/source/devref/instrumentation.rst [deleted file]
doc/source/devref/l2_agents.rst [deleted file]
doc/source/devref/layer3.rst [deleted file]
doc/source/devref/linuxbridge_agent.rst [deleted file]
doc/source/devref/ml2_ext_manager.rst [deleted file]
doc/source/devref/neutron_api.rst [deleted file]
doc/source/devref/openvswitch_agent.rst [deleted file]
doc/source/devref/oslo-incubator.rst [deleted file]
doc/source/devref/ovs_vhostuser.rst [deleted file]
doc/source/devref/plugin-api.rst [deleted file]
doc/source/devref/policy.rst [deleted file]
doc/source/devref/quality_of_service.rst [deleted file]
doc/source/devref/quota.rst [deleted file]
doc/source/devref/rpc_api.rst [deleted file]
doc/source/devref/rpc_callbacks.rst [deleted file]
doc/source/devref/security_group_api.rst [deleted file]
doc/source/devref/services_and_agents.rst [deleted file]
doc/source/devref/sriov_nic_agent.rst [deleted file]
doc/source/devref/template_model_sync_test.rst [deleted file]
doc/source/devref/testing_coverage.rst [deleted file]
doc/source/devref/upgrade.rst [deleted file]
doc/source/index.rst [deleted file]
doc/source/policies/blueprints.rst [deleted file]
doc/source/policies/bugs.rst [deleted file]
doc/source/policies/code-reviews.rst [deleted file]
doc/source/policies/contributor-onboarding.rst [deleted file]
doc/source/policies/gate-failure-triage.rst [deleted file]
doc/source/policies/index.rst [deleted file]
doc/source/policies/neutron-teams.rst [deleted file]
doc/source/policies/office-hours.rst [deleted file]
doc/source/policies/thirdparty-ci.rst [deleted file]
doc/source/stadium/index.rst [deleted file]
doc/source/stadium/sub_project_guidelines.rst [deleted file]
doc/source/stadium/sub_projects.rst [deleted file]
etc/README.txt [deleted file]
etc/api-paste.ini [deleted file]
etc/neutron/plugins/cisco/cisco_vpn_agent.ini [deleted file]
etc/neutron/plugins/ml2/.placeholder [deleted file]
etc/neutron/rootwrap.d/debug.filters [deleted file]
etc/neutron/rootwrap.d/dhcp.filters [deleted file]
etc/neutron/rootwrap.d/dibbler.filters [deleted file]
etc/neutron/rootwrap.d/ebtables.filters [deleted file]
etc/neutron/rootwrap.d/ipset-firewall.filters [deleted file]
etc/neutron/rootwrap.d/iptables-firewall.filters [deleted file]
etc/neutron/rootwrap.d/l3.filters [deleted file]
etc/neutron/rootwrap.d/linuxbridge-plugin.filters [deleted file]
etc/neutron/rootwrap.d/openvswitch-plugin.filters [deleted file]
etc/oslo-config-generator/dhcp_agent.ini [deleted file]
etc/oslo-config-generator/l3_agent.ini [deleted file]
etc/oslo-config-generator/linuxbridge_agent.ini [deleted file]
etc/oslo-config-generator/metadata_agent.ini [deleted file]
etc/oslo-config-generator/metering_agent.ini [deleted file]
etc/oslo-config-generator/ml2_conf.ini [deleted file]
etc/oslo-config-generator/ml2_conf_sriov.ini [deleted file]
etc/oslo-config-generator/neutron.conf [deleted file]
etc/oslo-config-generator/openvswitch_agent.ini [deleted file]
etc/oslo-config-generator/sriov_agent.ini [deleted file]
etc/policy.json [deleted file]
etc/rootwrap.conf [deleted file]
neutron/__init__.py [deleted file]
neutron/_i18n.py [deleted file]
neutron/agent/__init__.py [deleted file]
neutron/agent/common/__init__.py [deleted file]
neutron/agent/common/base_polling.py [deleted file]
neutron/agent/common/config.py [deleted file]
neutron/agent/common/ovs_lib.py [deleted file]
neutron/agent/common/polling.py [deleted file]
neutron/agent/common/utils.py [deleted file]
neutron/agent/dhcp/__init__.py [deleted file]
neutron/agent/dhcp/agent.py [deleted file]
neutron/agent/dhcp/config.py [deleted file]
neutron/agent/dhcp_agent.py [deleted file]
neutron/agent/firewall.py [deleted file]
neutron/agent/l2/__init__.py [deleted file]
neutron/agent/l2/agent_extension.py [deleted file]
neutron/agent/l2/extensions/__init__.py [deleted file]
neutron/agent/l2/extensions/manager.py [deleted file]
neutron/agent/l2/extensions/qos.py [deleted file]
neutron/agent/l3/__init__.py [deleted file]
neutron/agent/l3/agent.py [deleted file]
neutron/agent/l3/config.py [deleted file]
neutron/agent/l3/dvr.py [deleted file]
neutron/agent/l3/dvr_edge_ha_router.py [deleted file]
neutron/agent/l3/dvr_edge_router.py [deleted file]
neutron/agent/l3/dvr_fip_ns.py [deleted file]
neutron/agent/l3/dvr_local_router.py [deleted file]
neutron/agent/l3/dvr_router_base.py [deleted file]
neutron/agent/l3/dvr_snat_ns.py [deleted file]
neutron/agent/l3/fip_rule_priority_allocator.py [deleted file]
neutron/agent/l3/ha.py [deleted file]
neutron/agent/l3/ha_router.py [deleted file]
neutron/agent/l3/item_allocator.py [deleted file]
neutron/agent/l3/keepalived_state_change.py [deleted file]
neutron/agent/l3/legacy_router.py [deleted file]
neutron/agent/l3/link_local_allocator.py [deleted file]
neutron/agent/l3/namespace_manager.py [deleted file]
neutron/agent/l3/namespaces.py [deleted file]
neutron/agent/l3/router_info.py [deleted file]
neutron/agent/l3/router_processing_queue.py [deleted file]
neutron/agent/l3/rt_tables.py [deleted file]
neutron/agent/l3_agent.py [deleted file]
neutron/agent/linux/__init__.py [deleted file]
neutron/agent/linux/async_process.py [deleted file]
neutron/agent/linux/bridge_lib.py [deleted file]
neutron/agent/linux/daemon.py [deleted file]
neutron/agent/linux/dhcp.py [deleted file]
neutron/agent/linux/dibbler.py [deleted file]
neutron/agent/linux/external_process.py [deleted file]
neutron/agent/linux/interface.py [deleted file]
neutron/agent/linux/ip_conntrack.py [deleted file]
neutron/agent/linux/ip_lib.py [deleted file]
neutron/agent/linux/ip_link_support.py [deleted file]
neutron/agent/linux/ip_monitor.py [deleted file]
neutron/agent/linux/ipset_manager.py [deleted file]
neutron/agent/linux/iptables_comments.py [deleted file]
neutron/agent/linux/iptables_firewall.py [deleted file]
neutron/agent/linux/iptables_manager.py [deleted file]
neutron/agent/linux/keepalived.py [deleted file]
neutron/agent/linux/ovsdb_monitor.py [deleted file]
neutron/agent/linux/pd.py [deleted file]
neutron/agent/linux/pd_driver.py [deleted file]
neutron/agent/linux/polling.py [deleted file]
neutron/agent/linux/ra.py [deleted file]
neutron/agent/linux/utils.py [deleted file]
neutron/agent/metadata/__init__.py [deleted file]
neutron/agent/metadata/agent.py [deleted file]
neutron/agent/metadata/config.py [deleted file]
neutron/agent/metadata/driver.py [deleted file]
neutron/agent/metadata/namespace_proxy.py [deleted file]
neutron/agent/metadata_agent.py [deleted file]
neutron/agent/ovsdb/__init__.py [deleted file]
neutron/agent/ovsdb/api.py [deleted file]
neutron/agent/ovsdb/impl_idl.py [deleted file]
neutron/agent/ovsdb/impl_vsctl.py [deleted file]
neutron/agent/ovsdb/native/__init__.py [deleted file]
neutron/agent/ovsdb/native/commands.py [deleted file]
neutron/agent/ovsdb/native/connection.py [deleted file]
neutron/agent/ovsdb/native/helpers.py [deleted file]
neutron/agent/ovsdb/native/idlutils.py [deleted file]
neutron/agent/rpc.py [deleted file]
neutron/agent/securitygroups_rpc.py [deleted file]
neutron/agent/windows/__init__.py [deleted file]
neutron/agent/windows/polling.py [deleted file]
neutron/agent/windows/utils.py [deleted file]
neutron/api/__init__.py [deleted file]
neutron/api/api_common.py [deleted file]
neutron/api/extensions.py [deleted file]
neutron/api/rpc/__init__.py [deleted file]
neutron/api/rpc/agentnotifiers/__init__.py [deleted file]
neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py [deleted file]
neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py [deleted file]
neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py [deleted file]
neutron/api/rpc/callbacks/__init__.py [deleted file]
neutron/api/rpc/callbacks/consumer/__init__.py [deleted file]
neutron/api/rpc/callbacks/consumer/registry.py [deleted file]
neutron/api/rpc/callbacks/events.py [deleted file]
neutron/api/rpc/callbacks/exceptions.py [deleted file]
neutron/api/rpc/callbacks/producer/__init__.py [deleted file]
neutron/api/rpc/callbacks/producer/registry.py [deleted file]
neutron/api/rpc/callbacks/resource_manager.py [deleted file]
neutron/api/rpc/callbacks/resources.py [deleted file]
neutron/api/rpc/handlers/__init__.py [deleted file]
neutron/api/rpc/handlers/dhcp_rpc.py [deleted file]
neutron/api/rpc/handlers/dvr_rpc.py [deleted file]
neutron/api/rpc/handlers/l3_rpc.py [deleted file]
neutron/api/rpc/handlers/metadata_rpc.py [deleted file]
neutron/api/rpc/handlers/resources_rpc.py [deleted file]
neutron/api/rpc/handlers/securitygroups_rpc.py [deleted file]
neutron/api/v2/__init__.py [deleted file]
neutron/api/v2/attributes.py [deleted file]
neutron/api/v2/base.py [deleted file]
neutron/api/v2/resource.py [deleted file]
neutron/api/v2/resource_helper.py [deleted file]
neutron/api/v2/router.py [deleted file]
neutron/api/versions.py [deleted file]
neutron/api/views/__init__.py [deleted file]
neutron/api/views/versions.py [deleted file]
neutron/auth.py [deleted file]
neutron/callbacks/__init__.py [deleted file]
neutron/callbacks/events.py [deleted file]
neutron/callbacks/exceptions.py [deleted file]
neutron/callbacks/manager.py [deleted file]
neutron/callbacks/registry.py [deleted file]
neutron/callbacks/resources.py [deleted file]
neutron/cmd/__init__.py [deleted file]
neutron/cmd/eventlet/__init__.py [deleted file]
neutron/cmd/eventlet/agents/__init__.py [deleted file]
neutron/cmd/eventlet/agents/dhcp.py [deleted file]
neutron/cmd/eventlet/agents/l3.py [deleted file]
neutron/cmd/eventlet/agents/metadata.py [deleted file]
neutron/cmd/eventlet/agents/metadata_proxy.py [deleted file]
neutron/cmd/eventlet/plugins/__init__.py [deleted file]
neutron/cmd/eventlet/plugins/linuxbridge_neutron_agent.py [deleted file]
neutron/cmd/eventlet/plugins/ovs_neutron_agent.py [deleted file]
neutron/cmd/eventlet/plugins/sriov_nic_neutron_agent.py [deleted file]
neutron/cmd/eventlet/server/__init__.py [deleted file]
neutron/cmd/eventlet/services/__init__.py [deleted file]
neutron/cmd/eventlet/services/metering_agent.py [deleted file]
neutron/cmd/eventlet/usage_audit.py [deleted file]
neutron/cmd/ipset_cleanup.py [deleted file]
neutron/cmd/keepalived_state_change.py [deleted file]
neutron/cmd/linuxbridge_cleanup.py [deleted file]
neutron/cmd/netns_cleanup.py [deleted file]
neutron/cmd/ovs_cleanup.py [deleted file]
neutron/cmd/pd_notify.py [deleted file]
neutron/cmd/sanity/__init__.py [deleted file]
neutron/cmd/sanity/checks.py [deleted file]
neutron/cmd/sanity_check.py [deleted file]
neutron/common/__init__.py [deleted file]
neutron/common/config.py [deleted file]
neutron/common/constants.py [deleted file]
neutron/common/eventlet_utils.py [deleted file]
neutron/common/exceptions.py [deleted file]
neutron/common/ipv6_utils.py [deleted file]
neutron/common/rpc.py [deleted file]
neutron/common/test_lib.py [deleted file]
neutron/common/topics.py [deleted file]
neutron/common/utils.py [deleted file]
neutron/context.py [deleted file]
neutron/core_extensions/__init__.py [deleted file]
neutron/core_extensions/base.py [deleted file]
neutron/core_extensions/qos.py [deleted file]
neutron/db/__init__.py [deleted file]
neutron/db/address_scope_db.py [deleted file]
neutron/db/agents_db.py [deleted file]
neutron/db/agentschedulers_db.py [deleted file]
neutron/db/allowedaddresspairs_db.py [deleted file]
neutron/db/api.py [deleted file]
neutron/db/availability_zone/__init__.py [deleted file]
neutron/db/availability_zone/network.py [deleted file]
neutron/db/availability_zone/router.py [deleted file]
neutron/db/common_db_mixin.py [deleted file]
neutron/db/db_base_plugin_common.py [deleted file]
neutron/db/db_base_plugin_v2.py [deleted file]
neutron/db/dvr_mac_db.py [deleted file]
neutron/db/external_net_db.py [deleted file]
neutron/db/extradhcpopt_db.py [deleted file]
neutron/db/extraroute_db.py [deleted file]
neutron/db/flavors_db.py [deleted file]
neutron/db/ipam_backend_mixin.py [deleted file]
neutron/db/ipam_non_pluggable_backend.py [deleted file]
neutron/db/ipam_pluggable_backend.py [deleted file]
neutron/db/l3_agentschedulers_db.py [deleted file]
neutron/db/l3_attrs_db.py [deleted file]
neutron/db/l3_db.py [deleted file]
neutron/db/l3_dvr_db.py [deleted file]
neutron/db/l3_dvrscheduler_db.py [deleted file]
neutron/db/l3_gwmode_db.py [deleted file]
neutron/db/l3_hamode_db.py [deleted file]
neutron/db/l3_hascheduler_db.py [deleted file]
neutron/db/metering/__init__.py [deleted file]
neutron/db/metering/metering_db.py [deleted file]
neutron/db/metering/metering_rpc.py [deleted file]
neutron/db/migration/README [deleted file]
neutron/db/migration/__init__.py [deleted file]
neutron/db/migration/alembic.ini [deleted file]
neutron/db/migration/alembic_migrations/__init__.py [deleted file]
neutron/db/migration/alembic_migrations/agent_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/brocade_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/cisco_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/core_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/dvr_init_opts.py [deleted file]
neutron/db/migration/alembic_migrations/env.py [deleted file]
neutron/db/migration/alembic_migrations/external.py [deleted file]
neutron/db/migration/alembic_migrations/firewall_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/l3_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/lb_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/loadbalancer_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/metering_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/ml2_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/nec_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/nsxv_initial_opts.py [deleted file]
neutron/db/migration/alembic_migrations/nuage_init_opts.py [deleted file]
neutron/db/migration/alembic_migrations/other_extensions_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/other_plugins_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/ovs_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/portsec_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/script.py.mako [deleted file]
neutron/db/migration/alembic_migrations/secgroup_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/versions/CONTRACT_HEAD [deleted file]
neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD [deleted file]
neutron/db/migration/alembic_migrations/versions/README [deleted file]
neutron/db/migration/alembic_migrations/versions/kilo_initial.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_geneve_ml2_type_driver.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/contract/2a16083502f3_metaplugin_removal.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a0ad4d_add_missing_foreign_keys.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/contract/4af11ca47297_drop_cisco_monolithic_tables.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/contract/4ffceebfada_rbac_network.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/contract/5498d17be016_drop_legacy_ovs_and_lb.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/expand/1b4c6e320f79_address_scope_support_in_subnetpool.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d1677f7_dns_nameservers_order.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/expand/26c371498592_subnetpool_hash.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/expand/31337ec0ffee_flavors.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_name_to_port.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/expand/45f955889773_quota_usage.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/expand/52c5312f6baf_address_scopes.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/expand/599c6a226151_neutrodb_ipam.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/expand/8675309a5c4f_rbac_network.py [deleted file]
neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c136_quota_reservations.py [deleted file]
neutron/db/migration/alembic_migrations/versions/mitaka/contract/1b294093239c_remove_embrane_plugin.py [deleted file]
neutron/db/migration/alembic_migrations/versions/mitaka/contract/8a6d8bdae39_migrate_neutron_resources_table.py [deleted file]
neutron/db/migration/alembic_migrations/versions/mitaka/expand/13cfb89f881a_add_is_default_to_subnetpool.py [deleted file]
neutron/db/migration/alembic_migrations/versions/mitaka/expand/32e5974ada25_add_neutron_resources_table.py [deleted file]
neutron/db/migration/alembic_migrations/versions/mitaka/expand/59cb5b6cf4d_availability_zone.py [deleted file]
neutron/db/migration/alembic_migrations/versions/mitaka/expand/c3a73f615e4_add_ip_version_to_address_scope.py [deleted file]
neutron/db/migration/alembic_migrations/versions/mitaka/expand/dce3ec7a25c9_router_az.py [deleted file]
neutron/db/migration/alembic_migrations/versions/mitaka/expand/ec7fcfbf72ee_network_az.py [deleted file]
neutron/db/migration/alembic_migrations/vmware_init_ops.py [deleted file]
neutron/db/migration/alembic_migrations/vpn_init_ops.py [deleted file]
neutron/db/migration/autogen.py [deleted file]
neutron/db/migration/cli.py [deleted file]
neutron/db/migration/models/__init__.py [deleted file]
neutron/db/migration/models/head.py [deleted file]
neutron/db/model_base.py [deleted file]
neutron/db/models_v2.py [deleted file]
neutron/db/netmtu_db.py [deleted file]
neutron/db/portbindings_base.py [deleted file]
neutron/db/portbindings_db.py [deleted file]
neutron/db/portsecurity_db.py [deleted file]
neutron/db/portsecurity_db_common.py [deleted file]
neutron/db/qos/__init__.py [deleted file]
neutron/db/qos/api.py [deleted file]
neutron/db/qos/models.py [deleted file]
neutron/db/quota/__init__.py [deleted file]
neutron/db/quota/api.py [deleted file]
neutron/db/quota/driver.py [deleted file]
neutron/db/quota/models.py [deleted file]
neutron/db/quota_db.py [deleted file]
neutron/db/rbac_db_mixin.py [deleted file]
neutron/db/rbac_db_models.py [deleted file]
neutron/db/securitygroups_db.py [deleted file]
neutron/db/securitygroups_rpc_base.py [deleted file]
neutron/db/servicetype_db.py [deleted file]
neutron/db/sqlalchemyutils.py [deleted file]
neutron/db/vlantransparent_db.py [deleted file]
neutron/debug/README [deleted file]
neutron/debug/__init__.py [deleted file]
neutron/debug/commands.py [deleted file]
neutron/debug/debug_agent.py [deleted file]
neutron/debug/shell.py [deleted file]
neutron/extensions/__init__.py [deleted file]
neutron/extensions/address_scope.py [deleted file]
neutron/extensions/agent.py [deleted file]
neutron/extensions/allowedaddresspairs.py [deleted file]
neutron/extensions/availability_zone.py [deleted file]
neutron/extensions/dhcpagentscheduler.py [deleted file]
neutron/extensions/dns.py [deleted file]
neutron/extensions/dvr.py [deleted file]
neutron/extensions/external_net.py [deleted file]
neutron/extensions/extra_dhcp_opt.py [deleted file]
neutron/extensions/extraroute.py [deleted file]
neutron/extensions/flavors.py [deleted file]
neutron/extensions/l3.py [deleted file]
neutron/extensions/l3_ext_gw_mode.py [deleted file]
neutron/extensions/l3_ext_ha_mode.py [deleted file]
neutron/extensions/l3agentscheduler.py [deleted file]
neutron/extensions/metering.py [deleted file]
neutron/extensions/multiprovidernet.py [deleted file]
neutron/extensions/netmtu.py [deleted file]
neutron/extensions/network_availability_zone.py [deleted file]
neutron/extensions/portbindings.py [deleted file]
neutron/extensions/portsecurity.py [deleted file]
neutron/extensions/providernet.py [deleted file]
neutron/extensions/qos.py [deleted file]
neutron/extensions/quotasv2.py [deleted file]
neutron/extensions/rbac.py [deleted file]
neutron/extensions/router_availability_zone.py [deleted file]
neutron/extensions/routerservicetype.py [deleted file]
neutron/extensions/securitygroup.py [deleted file]
neutron/extensions/servicetype.py [deleted file]
neutron/extensions/subnetallocation.py [deleted file]
neutron/extensions/vlantransparent.py [deleted file]
neutron/hacking/__init__.py [deleted file]
neutron/hacking/checks.py [deleted file]
neutron/i18n.py [deleted file]
neutron/ipam/__init__.py [deleted file]
neutron/ipam/driver.py [deleted file]
neutron/ipam/drivers/__init__.py [deleted file]
neutron/ipam/drivers/neutrondb_ipam/__init__.py [deleted file]
neutron/ipam/drivers/neutrondb_ipam/db_api.py [deleted file]
neutron/ipam/drivers/neutrondb_ipam/db_models.py [deleted file]
neutron/ipam/drivers/neutrondb_ipam/driver.py [deleted file]
neutron/ipam/exceptions.py [deleted file]
neutron/ipam/requests.py [deleted file]
neutron/ipam/subnet_alloc.py [deleted file]
neutron/ipam/utils.py [deleted file]
neutron/locale/de/LC_MESSAGES/neutron.po [deleted file]
neutron/locale/es/LC_MESSAGES/neutron.po [deleted file]
neutron/locale/fr/LC_MESSAGES/neutron.po [deleted file]
neutron/locale/it/LC_MESSAGES/neutron.po [deleted file]
neutron/locale/ja/LC_MESSAGES/neutron.po [deleted file]
neutron/locale/ko_KR/LC_MESSAGES/neutron.po [deleted file]
neutron/locale/neutron-log-error.pot [deleted file]
neutron/locale/neutron-log-info.pot [deleted file]
neutron/locale/neutron-log-warning.pot [deleted file]
neutron/locale/neutron.pot [deleted file]
neutron/locale/pt_BR/LC_MESSAGES/neutron.po [deleted file]
neutron/locale/ru/LC_MESSAGES/neutron.po [deleted file]
neutron/locale/tr_TR/LC_MESSAGES/neutron-log-error.po [deleted file]
neutron/locale/tr_TR/LC_MESSAGES/neutron-log-info.po [deleted file]
neutron/locale/tr_TR/LC_MESSAGES/neutron-log-warning.po [deleted file]
neutron/locale/tr_TR/LC_MESSAGES/neutron.po [deleted file]
neutron/locale/zh_CN/LC_MESSAGES/neutron.po [deleted file]
neutron/locale/zh_TW/LC_MESSAGES/neutron.po [deleted file]
neutron/manager.py [deleted file]
neutron/neutron_plugin_base_v2.py [deleted file]
neutron/notifiers/__init__.py [deleted file]
neutron/notifiers/batch_notifier.py [deleted file]
neutron/notifiers/nova.py [deleted file]
neutron/objects/__init__.py [deleted file]
neutron/objects/base.py [deleted file]
neutron/objects/qos/__init__.py [deleted file]
neutron/objects/qos/policy.py [deleted file]
neutron/objects/qos/rule.py [deleted file]
neutron/objects/qos/rule_type.py [deleted file]
neutron/openstack/__init__.py [deleted file]
neutron/openstack/common/__init__.py [deleted file]
neutron/openstack/common/cache/__init__.py [deleted file]
neutron/openstack/common/cache/_backends/__init__.py [deleted file]
neutron/openstack/common/cache/_backends/memory.py [deleted file]
neutron/openstack/common/cache/backends.py [deleted file]
neutron/openstack/common/cache/cache.py [deleted file]
neutron/opts.py [deleted file]
neutron/pecan_wsgi/__init__.py [deleted file]
neutron/pecan_wsgi/app.py [deleted file]
neutron/pecan_wsgi/controllers/__init__.py [deleted file]
neutron/pecan_wsgi/controllers/quota.py [deleted file]
neutron/pecan_wsgi/controllers/root.py [deleted file]
neutron/pecan_wsgi/controllers/utils.py [deleted file]
neutron/pecan_wsgi/hooks/__init__.py [deleted file]
neutron/pecan_wsgi/hooks/body_validation.py [deleted file]
neutron/pecan_wsgi/hooks/context.py [deleted file]
neutron/pecan_wsgi/hooks/member_action.py [deleted file]
neutron/pecan_wsgi/hooks/notifier.py [deleted file]
neutron/pecan_wsgi/hooks/ownership_validation.py [deleted file]
neutron/pecan_wsgi/hooks/policy_enforcement.py [deleted file]
neutron/pecan_wsgi/hooks/quota_enforcement.py [deleted file]
neutron/pecan_wsgi/hooks/translation.py [deleted file]
neutron/pecan_wsgi/startup.py [deleted file]
neutron/plugins/__init__.py [deleted file]
neutron/plugins/common/__init__.py [deleted file]
neutron/plugins/common/constants.py [deleted file]
neutron/plugins/common/utils.py [deleted file]
neutron/plugins/hyperv/__init__.py [deleted file]
neutron/plugins/hyperv/agent/__init__.py [deleted file]
neutron/plugins/hyperv/agent/security_groups_driver.py [deleted file]
neutron/plugins/ml2/README [deleted file]
neutron/plugins/ml2/__init__.py [deleted file]
neutron/plugins/ml2/common/__init__.py [deleted file]
neutron/plugins/ml2/common/exceptions.py [deleted file]
neutron/plugins/ml2/config.py [deleted file]
neutron/plugins/ml2/db.py [deleted file]
neutron/plugins/ml2/driver_api.py [deleted file]
neutron/plugins/ml2/driver_context.py [deleted file]
neutron/plugins/ml2/drivers/__init__.py [deleted file]
neutron/plugins/ml2/drivers/helpers.py [deleted file]
neutron/plugins/ml2/drivers/l2pop/README [deleted file]
neutron/plugins/ml2/drivers/l2pop/__init__.py [deleted file]
neutron/plugins/ml2/drivers/l2pop/config.py [deleted file]
neutron/plugins/ml2/drivers/l2pop/db.py [deleted file]
neutron/plugins/ml2/drivers/l2pop/mech_driver.py [deleted file]
neutron/plugins/ml2/drivers/l2pop/rpc.py [deleted file]
neutron/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py [deleted file]
neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py [deleted file]
neutron/plugins/ml2/drivers/linuxbridge/__init__.py [deleted file]
neutron/plugins/ml2/drivers/linuxbridge/agent/__init__.py [deleted file]
neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py [deleted file]
neutron/plugins/ml2/drivers/linuxbridge/agent/common/__init__.py [deleted file]
neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py [deleted file]
neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py [deleted file]
neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py [deleted file]
neutron/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py [deleted file]
neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py [deleted file]
neutron/plugins/ml2/drivers/mech_agent.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/__init__.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/agent/__init__.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/mech_driver/exceptions.py [deleted file]
neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/__init__.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/__init__.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/common/__init__.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/main.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_phys.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_tun.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/main.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_dvr_process.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_phys.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_tun.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/main.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/README [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/build-rpm.sh [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec [deleted file]
neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap [deleted file]
neutron/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py [deleted file]
neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py [deleted file]
neutron/plugins/ml2/drivers/type_flat.py [deleted file]
neutron/plugins/ml2/drivers/type_geneve.py [deleted file]
neutron/plugins/ml2/drivers/type_gre.py [deleted file]
neutron/plugins/ml2/drivers/type_local.py [deleted file]
neutron/plugins/ml2/drivers/type_tunnel.py [deleted file]
neutron/plugins/ml2/drivers/type_vlan.py [deleted file]
neutron/plugins/ml2/drivers/type_vxlan.py [deleted file]
neutron/plugins/ml2/extensions/__init__.py [deleted file]
neutron/plugins/ml2/extensions/port_security.py [deleted file]
neutron/plugins/ml2/extensions/qos.py [deleted file]
neutron/plugins/ml2/managers.py [deleted file]
neutron/plugins/ml2/models.py [deleted file]
neutron/plugins/ml2/plugin.py [deleted file]
neutron/plugins/ml2/rpc.py [deleted file]
neutron/policy.py [deleted file]
neutron/quota/__init__.py [deleted file]
neutron/quota/resource.py [deleted file]
neutron/quota/resource_registry.py [deleted file]
neutron/scheduler/__init__.py [deleted file]
neutron/scheduler/base_resource_filter.py [deleted file]
neutron/scheduler/base_scheduler.py [deleted file]
neutron/scheduler/dhcp_agent_scheduler.py [deleted file]
neutron/scheduler/l3_agent_scheduler.py [deleted file]
neutron/server/__init__.py [deleted file]
neutron/server/rpc_eventlet.py [deleted file]
neutron/server/wsgi_eventlet.py [deleted file]
neutron/server/wsgi_pecan.py [deleted file]
neutron/service.py [deleted file]
neutron/services/__init__.py [deleted file]
neutron/services/firewall/__init__.py [deleted file]
neutron/services/firewall/agents/__init__.py [deleted file]
neutron/services/firewall/agents/firewall_agent_api.py [deleted file]
neutron/services/firewall/agents/l3reference/__init__.py [deleted file]
neutron/services/firewall/agents/l3reference/firewall_l3_agent.py [deleted file]
neutron/services/flavors/__init__.py [deleted file]
neutron/services/flavors/flavors_plugin.py [deleted file]
neutron/services/l3_router/README [deleted file]
neutron/services/l3_router/__init__.py [deleted file]
neutron/services/l3_router/l3_router_plugin.py [deleted file]
neutron/services/loadbalancer/__init__.py [deleted file]
neutron/services/metering/__init__.py [deleted file]
neutron/services/metering/agents/__init__.py [deleted file]
neutron/services/metering/agents/metering_agent.py [deleted file]
neutron/services/metering/drivers/__init__.py [deleted file]
neutron/services/metering/drivers/abstract_driver.py [deleted file]
neutron/services/metering/drivers/iptables/__init__.py [deleted file]
neutron/services/metering/drivers/iptables/iptables_driver.py [deleted file]
neutron/services/metering/drivers/noop/__init__.py [deleted file]
neutron/services/metering/drivers/noop/noop_driver.py [deleted file]
neutron/services/metering/metering_plugin.py [deleted file]
neutron/services/provider_configuration.py [deleted file]
neutron/services/qos/__init__.py [deleted file]
neutron/services/qos/notification_drivers/__init__.py [deleted file]
neutron/services/qos/notification_drivers/manager.py [deleted file]
neutron/services/qos/notification_drivers/message_queue.py [deleted file]
neutron/services/qos/notification_drivers/qos_base.py [deleted file]
neutron/services/qos/qos_consts.py [deleted file]
neutron/services/qos/qos_plugin.py [deleted file]
neutron/services/rbac/__init__.py [deleted file]
neutron/services/service_base.py [deleted file]
neutron/services/vpn/__init__.py [deleted file]
neutron/tests/__init__.py [deleted file]
neutron/tests/api/__init__.py [deleted file]
neutron/tests/api/admin/__init__.py [deleted file]
neutron/tests/api/admin/test_agent_management.py [deleted file]
neutron/tests/api/admin/test_dhcp_agent_scheduler.py [deleted file]
neutron/tests/api/admin/test_extension_driver_port_security_admin.py [deleted file]
neutron/tests/api/admin/test_external_network_extension.py [deleted file]
neutron/tests/api/admin/test_external_networks_negative.py [deleted file]
neutron/tests/api/admin/test_floating_ips_admin_actions.py [deleted file]
neutron/tests/api/admin/test_l3_agent_scheduler.py [deleted file]
neutron/tests/api/admin/test_quotas.py [deleted file]
neutron/tests/api/admin/test_routers_dvr.py [deleted file]
neutron/tests/api/admin/test_shared_network_extension.py [deleted file]
neutron/tests/api/base.py [deleted file]
neutron/tests/api/base_routers.py [deleted file]
neutron/tests/api/base_security_groups.py [deleted file]
neutron/tests/api/clients.py [deleted file]
neutron/tests/api/test_address_scopes.py [deleted file]
neutron/tests/api/test_address_scopes_negative.py [deleted file]
neutron/tests/api/test_allowed_address_pair.py [deleted file]
neutron/tests/api/test_dhcp_ipv6.py [deleted file]
neutron/tests/api/test_extension_driver_port_security.py [deleted file]
neutron/tests/api/test_extensions.py [deleted file]
neutron/tests/api/test_extra_dhcp_options.py [deleted file]
neutron/tests/api/test_flavors_extensions.py [deleted file]
neutron/tests/api/test_floating_ips.py [deleted file]
neutron/tests/api/test_floating_ips_negative.py [deleted file]
neutron/tests/api/test_fwaas_extensions.py [deleted file]
neutron/tests/api/test_metering_extensions.py [deleted file]
neutron/tests/api/test_networks.py [deleted file]
neutron/tests/api/test_networks_negative.py [deleted file]
neutron/tests/api/test_ports.py [deleted file]
neutron/tests/api/test_qos.py [deleted file]
neutron/tests/api/test_routers.py [deleted file]
neutron/tests/api/test_routers_negative.py [deleted file]
neutron/tests/api/test_security_groups.py [deleted file]
neutron/tests/api/test_security_groups_negative.py [deleted file]
neutron/tests/api/test_service_type_management.py [deleted file]
neutron/tests/api/test_subnetpools.py [deleted file]
neutron/tests/api/test_subnetpools_negative.py [deleted file]
neutron/tests/api/test_vpnaas_extensions.py [deleted file]
neutron/tests/base.py [deleted file]
neutron/tests/common/__init__.py [deleted file]
neutron/tests/common/agents/__init__.py [deleted file]
neutron/tests/common/agents/l2_extensions.py [deleted file]
neutron/tests/common/agents/l3_agent.py [deleted file]
neutron/tests/common/base.py [deleted file]
neutron/tests/common/config_fixtures.py [deleted file]
neutron/tests/common/conn_testers.py [deleted file]
neutron/tests/common/helpers.py [deleted file]
neutron/tests/common/l3_test_common.py [deleted file]
neutron/tests/common/machine_fixtures.py [deleted file]
neutron/tests/common/net_helpers.py [deleted file]
neutron/tests/contrib/README [deleted file]
neutron/tests/contrib/functional-testing.filters [deleted file]
neutron/tests/contrib/gate_hook.sh [deleted file]
neutron/tests/contrib/post_test_hook.sh [deleted file]
neutron/tests/etc/api-paste.ini.test [deleted file]
neutron/tests/etc/neutron.conf [deleted file]
neutron/tests/etc/neutron_test.conf [deleted file]
neutron/tests/etc/policy.json [deleted file]
neutron/tests/fake_notifier.py [deleted file]
neutron/tests/fullstack/README [deleted file]
neutron/tests/fullstack/__init__.py [deleted file]
neutron/tests/fullstack/base.py [deleted file]
neutron/tests/fullstack/resources/__init__.py [deleted file]
neutron/tests/fullstack/resources/client.py [deleted file]
neutron/tests/fullstack/resources/config.py [deleted file]
neutron/tests/fullstack/resources/environment.py [deleted file]
neutron/tests/fullstack/resources/machine.py [deleted file]
neutron/tests/fullstack/resources/process.py [deleted file]
neutron/tests/fullstack/test_connectivity.py [deleted file]
neutron/tests/fullstack/test_l3_agent.py [deleted file]
neutron/tests/fullstack/test_qos.py [deleted file]
neutron/tests/functional/__init__.py [deleted file]
neutron/tests/functional/agent/__init__.py [deleted file]
neutron/tests/functional/agent/l2/__init__.py [deleted file]
neutron/tests/functional/agent/l2/base.py [deleted file]
neutron/tests/functional/agent/l2/extensions/__init__.py [deleted file]
neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py [deleted file]
neutron/tests/functional/agent/l3/__init__.py [deleted file]
neutron/tests/functional/agent/l3/framework.py [deleted file]
neutron/tests/functional/agent/l3/test_dvr_router.py [deleted file]
neutron/tests/functional/agent/l3/test_ha_router.py [deleted file]
neutron/tests/functional/agent/l3/test_keepalived_state_change.py [deleted file]
neutron/tests/functional/agent/l3/test_legacy_router.py [deleted file]
neutron/tests/functional/agent/l3/test_metadata_proxy.py [deleted file]
neutron/tests/functional/agent/l3/test_namespace_manager.py [deleted file]
neutron/tests/functional/agent/linux/__init__.py [deleted file]
neutron/tests/functional/agent/linux/base.py [deleted file]
neutron/tests/functional/agent/linux/bin/__init__.py [deleted file]
neutron/tests/functional/agent/linux/bin/ipt_binname.py [deleted file]
neutron/tests/functional/agent/linux/helpers.py [deleted file]
neutron/tests/functional/agent/linux/simple_daemon.py [deleted file]
neutron/tests/functional/agent/linux/test_async_process.py [deleted file]
neutron/tests/functional/agent/linux/test_bridge_lib.py [deleted file]
neutron/tests/functional/agent/linux/test_dhcp.py [deleted file]
neutron/tests/functional/agent/linux/test_interface.py [deleted file]
neutron/tests/functional/agent/linux/test_ip_lib.py [deleted file]
neutron/tests/functional/agent/linux/test_ip_monitor.py [deleted file]
neutron/tests/functional/agent/linux/test_ipset.py [deleted file]
neutron/tests/functional/agent/linux/test_iptables.py [deleted file]
neutron/tests/functional/agent/linux/test_keepalived.py [deleted file]
neutron/tests/functional/agent/linux/test_linuxbridge_arp_protect.py [deleted file]
neutron/tests/functional/agent/linux/test_ovsdb_monitor.py [deleted file]
neutron/tests/functional/agent/linux/test_process_monitor.py [deleted file]
neutron/tests/functional/agent/linux/test_utils.py [deleted file]
neutron/tests/functional/agent/test_dhcp_agent.py [deleted file]
neutron/tests/functional/agent/test_firewall.py [deleted file]
neutron/tests/functional/agent/test_l2_lb_agent.py [deleted file]
neutron/tests/functional/agent/test_l2_ovs_agent.py [deleted file]
neutron/tests/functional/agent/test_ovs_flows.py [deleted file]
neutron/tests/functional/agent/test_ovs_lib.py [deleted file]
neutron/tests/functional/api/__init__.py [deleted file]
neutron/tests/functional/api/test_policies.py [deleted file]
neutron/tests/functional/base.py [deleted file]
neutron/tests/functional/cmd/__init__.py [deleted file]
neutron/tests/functional/cmd/test_linuxbridge_cleanup.py [deleted file]
neutron/tests/functional/cmd/test_netns_cleanup.py [deleted file]
neutron/tests/functional/common/__init__.py [deleted file]
neutron/tests/functional/common/test_utils.py [deleted file]
neutron/tests/functional/db/__init__.py [deleted file]
neutron/tests/functional/db/test_ipam.py [deleted file]
neutron/tests/functional/db/test_migrations.py [deleted file]
neutron/tests/functional/db/test_models.py [deleted file]
neutron/tests/functional/pecan_wsgi/__init__.py [deleted file]
neutron/tests/functional/pecan_wsgi/config.py [deleted file]
neutron/tests/functional/pecan_wsgi/test_functional.py [deleted file]
neutron/tests/functional/requirements.txt [deleted file]
neutron/tests/functional/sanity/__init__.py [deleted file]
neutron/tests/functional/sanity/test_sanity.py [deleted file]
neutron/tests/functional/scheduler/__init__.py [deleted file]
neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py [deleted file]
neutron/tests/functional/scheduler/test_l3_agent_scheduler.py [deleted file]
neutron/tests/functional/services/__init__.py [deleted file]
neutron/tests/functional/services/l3_router/__init__.py [deleted file]
neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py [deleted file]
neutron/tests/functional/test_server.py [deleted file]
neutron/tests/functional/test_service.py [deleted file]
neutron/tests/post_mortem_debug.py [deleted file]
neutron/tests/retargetable/__init__.py [deleted file]
neutron/tests/retargetable/base.py [deleted file]
neutron/tests/retargetable/client_fixtures.py [deleted file]
neutron/tests/retargetable/rest_fixture.py [deleted file]
neutron/tests/retargetable/test_example.py [deleted file]
neutron/tests/tempest/README.rst [deleted file]
neutron/tests/tempest/__init__.py [deleted file]
neutron/tests/tempest/auth.py [deleted file]
neutron/tests/tempest/common/__init__.py [deleted file]
neutron/tests/tempest/common/accounts.py [deleted file]
neutron/tests/tempest/common/commands.py [deleted file]
neutron/tests/tempest/common/cred_provider.py [deleted file]
neutron/tests/tempest/common/credentials.py [deleted file]
neutron/tests/tempest/common/custom_matchers.py [deleted file]
neutron/tests/tempest/common/generator/__init__.py [deleted file]
neutron/tests/tempest/common/generator/base_generator.py [deleted file]
neutron/tests/tempest/common/generator/negative_generator.py [deleted file]
neutron/tests/tempest/common/generator/valid_generator.py [deleted file]
neutron/tests/tempest/common/glance_http.py [deleted file]
neutron/tests/tempest/common/isolated_creds.py [deleted file]
neutron/tests/tempest/common/negative_rest_client.py [deleted file]
neutron/tests/tempest/common/service_client.py [deleted file]
neutron/tests/tempest/common/ssh.py [deleted file]
neutron/tests/tempest/common/tempest_fixtures.py [deleted file]
neutron/tests/tempest/common/utils/__init__.py [deleted file]
neutron/tests/tempest/common/utils/data_utils.py [deleted file]
neutron/tests/tempest/common/utils/file_utils.py [deleted file]
neutron/tests/tempest/common/utils/misc.py [deleted file]
neutron/tests/tempest/common/waiters.py [deleted file]
neutron/tests/tempest/config.py [deleted file]
neutron/tests/tempest/exceptions.py [deleted file]
neutron/tests/tempest/manager.py [deleted file]
neutron/tests/tempest/services/__init__.py [deleted file]
neutron/tests/tempest/services/botoclients.py [deleted file]
neutron/tests/tempest/services/identity/__init__.py [deleted file]
neutron/tests/tempest/services/identity/v2/__init__.py [deleted file]
neutron/tests/tempest/services/identity/v2/json/__init__.py [deleted file]
neutron/tests/tempest/services/identity/v2/json/identity_client.py [deleted file]
neutron/tests/tempest/services/identity/v3/__init__.py [deleted file]
neutron/tests/tempest/services/identity/v3/json/__init__.py [deleted file]
neutron/tests/tempest/services/identity/v3/json/credentials_client.py [deleted file]
neutron/tests/tempest/services/identity/v3/json/endpoints_client.py [deleted file]
neutron/tests/tempest/services/identity/v3/json/identity_client.py [deleted file]
neutron/tests/tempest/services/identity/v3/json/policy_client.py [deleted file]
neutron/tests/tempest/services/identity/v3/json/region_client.py [deleted file]
neutron/tests/tempest/services/identity/v3/json/service_client.py [deleted file]
neutron/tests/tempest/services/network/__init__.py [deleted file]
neutron/tests/tempest/services/network/json/__init__.py [deleted file]
neutron/tests/tempest/services/network/json/network_client.py [deleted file]
neutron/tests/tempest/services/network/resources.py [deleted file]
neutron/tests/tempest/test.py [deleted file]
neutron/tests/tools.py [deleted file]
neutron/tests/unit/__init__.py [deleted file]
neutron/tests/unit/_test_extension_portbindings.py [deleted file]
neutron/tests/unit/agent/__init__.py [deleted file]
neutron/tests/unit/agent/common/__init__.py [deleted file]
neutron/tests/unit/agent/common/test_config.py [deleted file]
neutron/tests/unit/agent/common/test_ovs_lib.py [deleted file]
neutron/tests/unit/agent/common/test_polling.py [deleted file]
neutron/tests/unit/agent/common/test_utils.py [deleted file]
neutron/tests/unit/agent/dhcp/__init__.py [deleted file]
neutron/tests/unit/agent/dhcp/test_agent.py [deleted file]
neutron/tests/unit/agent/l2/__init__.py [deleted file]
neutron/tests/unit/agent/l2/extensions/__init__.py [deleted file]
neutron/tests/unit/agent/l2/extensions/test_manager.py [deleted file]
neutron/tests/unit/agent/l2/extensions/test_qos.py [deleted file]
neutron/tests/unit/agent/l3/__init__.py [deleted file]
neutron/tests/unit/agent/l3/test_agent.py [deleted file]
neutron/tests/unit/agent/l3/test_dvr_fip_ns.py [deleted file]
neutron/tests/unit/agent/l3/test_dvr_local_router.py [deleted file]
neutron/tests/unit/agent/l3/test_fip_rule_priority_allocator.py [deleted file]
neutron/tests/unit/agent/l3/test_ha_router.py [deleted file]
neutron/tests/unit/agent/l3/test_item_allocator.py [deleted file]
neutron/tests/unit/agent/l3/test_legacy_router.py [deleted file]
neutron/tests/unit/agent/l3/test_link_local_allocator.py [deleted file]
neutron/tests/unit/agent/l3/test_namespace_manager.py [deleted file]
neutron/tests/unit/agent/l3/test_router_info.py [deleted file]
neutron/tests/unit/agent/l3/test_router_processing_queue.py [deleted file]
neutron/tests/unit/agent/l3/test_rt_tables.py [deleted file]
neutron/tests/unit/agent/linux/__init__.py [deleted file]
neutron/tests/unit/agent/linux/failing_process.py [deleted file]
neutron/tests/unit/agent/linux/test_async_process.py [deleted file]
neutron/tests/unit/agent/linux/test_bridge_lib.py [deleted file]
neutron/tests/unit/agent/linux/test_daemon.py [deleted file]
neutron/tests/unit/agent/linux/test_dhcp.py [deleted file]
neutron/tests/unit/agent/linux/test_external_process.py [deleted file]
neutron/tests/unit/agent/linux/test_interface.py [deleted file]
neutron/tests/unit/agent/linux/test_ip_lib.py [deleted file]
neutron/tests/unit/agent/linux/test_ip_link_support.py [deleted file]
neutron/tests/unit/agent/linux/test_ip_monitor.py [deleted file]
neutron/tests/unit/agent/linux/test_ipset_manager.py [deleted file]
neutron/tests/unit/agent/linux/test_iptables_firewall.py [deleted file]
neutron/tests/unit/agent/linux/test_iptables_manager.py [deleted file]
neutron/tests/unit/agent/linux/test_keepalived.py [deleted file]
neutron/tests/unit/agent/linux/test_ovsdb_monitor.py [deleted file]
neutron/tests/unit/agent/linux/test_pd.py [deleted file]
neutron/tests/unit/agent/linux/test_polling.py [deleted file]
neutron/tests/unit/agent/linux/test_utils.py [deleted file]
neutron/tests/unit/agent/metadata/__init__.py [deleted file]
neutron/tests/unit/agent/metadata/test_agent.py [deleted file]
neutron/tests/unit/agent/metadata/test_driver.py [deleted file]
neutron/tests/unit/agent/metadata/test_namespace_proxy.py [deleted file]
neutron/tests/unit/agent/ovsdb/__init__.py [deleted file]
neutron/tests/unit/agent/ovsdb/native/__init__.py [deleted file]
neutron/tests/unit/agent/ovsdb/native/test_helpers.py [deleted file]
neutron/tests/unit/agent/test_rpc.py [deleted file]
neutron/tests/unit/agent/test_securitygroups_rpc.py [deleted file]
neutron/tests/unit/api/__init__.py [deleted file]
neutron/tests/unit/api/rpc/__init__.py [deleted file]
neutron/tests/unit/api/rpc/agentnotifiers/__init__.py [deleted file]
neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py [deleted file]
neutron/tests/unit/api/rpc/callbacks/__init__.py [deleted file]
neutron/tests/unit/api/rpc/callbacks/consumer/__init__.py [deleted file]
neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py [deleted file]
neutron/tests/unit/api/rpc/callbacks/producer/__init__.py [deleted file]
neutron/tests/unit/api/rpc/callbacks/producer/test_registry.py [deleted file]
neutron/tests/unit/api/rpc/callbacks/test_resource_manager.py [deleted file]
neutron/tests/unit/api/rpc/callbacks/test_resources.py [deleted file]
neutron/tests/unit/api/rpc/handlers/__init__.py [deleted file]
neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py [deleted file]
neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py [deleted file]
neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py [deleted file]
neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py [deleted file]
neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py [deleted file]
neutron/tests/unit/api/test_api_common.py [deleted file]
neutron/tests/unit/api/test_extensions.py [deleted file]
neutron/tests/unit/api/v2/__init__.py [deleted file]
neutron/tests/unit/api/v2/test_attributes.py [deleted file]
neutron/tests/unit/api/v2/test_base.py [deleted file]
neutron/tests/unit/api/v2/test_resource.py [deleted file]
neutron/tests/unit/callbacks/__init__.py [deleted file]
neutron/tests/unit/callbacks/test_manager.py [deleted file]
neutron/tests/unit/cmd/__init__.py [deleted file]
neutron/tests/unit/cmd/server/__init__.py [deleted file]
neutron/tests/unit/cmd/test_netns_cleanup.py [deleted file]
neutron/tests/unit/cmd/test_ovs_cleanup.py [deleted file]
neutron/tests/unit/cmd/test_sanity_check.py [deleted file]
neutron/tests/unit/common/__init__.py [deleted file]
neutron/tests/unit/common/test_ipv6_utils.py [deleted file]
neutron/tests/unit/common/test_rpc.py [deleted file]
neutron/tests/unit/common/test_utils.py [deleted file]
neutron/tests/unit/core_extensions/__init__.py [deleted file]
neutron/tests/unit/core_extensions/test_qos.py [deleted file]
neutron/tests/unit/db/__init__.py [deleted file]
neutron/tests/unit/db/metering/__init__.py [deleted file]
neutron/tests/unit/db/metering/test_metering_db.py [deleted file]
neutron/tests/unit/db/quota/__init__.py [deleted file]
neutron/tests/unit/db/quota/test_api.py [deleted file]
neutron/tests/unit/db/quota/test_driver.py [deleted file]
neutron/tests/unit/db/test_agents_db.py [deleted file]
neutron/tests/unit/db/test_agentschedulers_db.py [deleted file]
neutron/tests/unit/db/test_allowedaddresspairs_db.py [deleted file]
neutron/tests/unit/db/test_api.py [deleted file]
neutron/tests/unit/db/test_db_base_plugin_common.py [deleted file]
neutron/tests/unit/db/test_db_base_plugin_v2.py [deleted file]
neutron/tests/unit/db/test_dvr_mac_db.py [deleted file]
neutron/tests/unit/db/test_ipam_backend_mixin.py [deleted file]
neutron/tests/unit/db/test_ipam_non_pluggable_backend.py [deleted file]
neutron/tests/unit/db/test_ipam_pluggable_backend.py [deleted file]
neutron/tests/unit/db/test_l3_db.py [deleted file]
neutron/tests/unit/db/test_l3_dvr_db.py [deleted file]
neutron/tests/unit/db/test_l3_hamode_db.py [deleted file]
neutron/tests/unit/db/test_migration.py [deleted file]
neutron/tests/unit/db/test_securitygroups_db.py [deleted file]
neutron/tests/unit/debug/__init__.py [deleted file]
neutron/tests/unit/debug/test_commands.py [deleted file]
neutron/tests/unit/dummy_plugin.py [deleted file]
neutron/tests/unit/extension_stubs.py [deleted file]
neutron/tests/unit/extensions/__init__.py [deleted file]
neutron/tests/unit/extensions/base.py [deleted file]
neutron/tests/unit/extensions/extendedattribute.py [deleted file]
neutron/tests/unit/extensions/extensionattribute.py [deleted file]
neutron/tests/unit/extensions/foxinsocks.py [deleted file]
neutron/tests/unit/extensions/test_address_scope.py [deleted file]
neutron/tests/unit/extensions/test_agent.py [deleted file]
neutron/tests/unit/extensions/test_availability_zone.py [deleted file]
neutron/tests/unit/extensions/test_dns.py [deleted file]
neutron/tests/unit/extensions/test_external_net.py [deleted file]
neutron/tests/unit/extensions/test_extra_dhcp_opt.py [deleted file]
neutron/tests/unit/extensions/test_extraroute.py [deleted file]
neutron/tests/unit/extensions/test_flavors.py [deleted file]
neutron/tests/unit/extensions/test_l3.py [deleted file]
neutron/tests/unit/extensions/test_l3_ext_gw_mode.py [deleted file]
neutron/tests/unit/extensions/test_netmtu.py [deleted file]
neutron/tests/unit/extensions/test_portsecurity.py [deleted file]
neutron/tests/unit/extensions/test_providernet.py [deleted file]
neutron/tests/unit/extensions/test_quotasv2.py [deleted file]
neutron/tests/unit/extensions/test_router_availability_zone.py [deleted file]
neutron/tests/unit/extensions/test_securitygroup.py [deleted file]
neutron/tests/unit/extensions/test_servicetype.py [deleted file]
neutron/tests/unit/extensions/test_vlantransparent.py [deleted file]
neutron/tests/unit/extensions/v2attributes.py [deleted file]
neutron/tests/unit/hacking/__init__.py [deleted file]
neutron/tests/unit/hacking/test_checks.py [deleted file]
neutron/tests/unit/ipam/__init__.py [deleted file]
neutron/tests/unit/ipam/drivers/__init__.py [deleted file]
neutron/tests/unit/ipam/drivers/neutrondb_ipam/__init__.py [deleted file]
neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_db_api.py [deleted file]
neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py [deleted file]
neutron/tests/unit/ipam/fake_driver.py [deleted file]
neutron/tests/unit/ipam/test_requests.py [deleted file]
neutron/tests/unit/ipam/test_subnet_alloc.py [deleted file]
neutron/tests/unit/ipam/test_utils.py [deleted file]
neutron/tests/unit/notifiers/__init__.py [deleted file]
neutron/tests/unit/notifiers/test_batch_notifier.py [deleted file]
neutron/tests/unit/notifiers/test_nova.py [deleted file]
neutron/tests/unit/objects/__init__.py [deleted file]
neutron/tests/unit/objects/qos/__init__.py [deleted file]
neutron/tests/unit/objects/qos/test_policy.py [deleted file]
neutron/tests/unit/objects/qos/test_rule.py [deleted file]
neutron/tests/unit/objects/qos/test_rule_type.py [deleted file]
neutron/tests/unit/objects/test_base.py [deleted file]
neutron/tests/unit/objects/test_objects.py [deleted file]
neutron/tests/unit/plugins/__init__.py [deleted file]
neutron/tests/unit/plugins/common/__init__.py [deleted file]
neutron/tests/unit/plugins/common/test_utils.py [deleted file]
neutron/tests/unit/plugins/ml2/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/_test_mech_agent.py [deleted file]
neutron/tests/unit/plugins/ml2/base.py [deleted file]
neutron/tests/unit/plugins/ml2/db/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/ext_test.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/l2pop/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc_base.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population_rpc.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/l2pop/test_db.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/linuxbridge/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/test_mech_linuxbridge.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mech_fake_agent.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manager.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/fake_oflib.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_test_base.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_int.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_phys.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_int.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_phys.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/ovs_test_base.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/test_helpers.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/test_type_flat.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/test_type_local.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/test_type_vlan.py [deleted file]
neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py [deleted file]
neutron/tests/unit/plugins/ml2/extensions/__init__.py [deleted file]
neutron/tests/unit/plugins/ml2/extensions/fake_extension.py [deleted file]
neutron/tests/unit/plugins/ml2/extensions/test_port_security.py [deleted file]
neutron/tests/unit/plugins/ml2/test_agent_scheduler.py [deleted file]
neutron/tests/unit/plugins/ml2/test_db.py [deleted file]
neutron/tests/unit/plugins/ml2/test_driver_context.py [deleted file]
neutron/tests/unit/plugins/ml2/test_ext_portsecurity.py [deleted file]
neutron/tests/unit/plugins/ml2/test_extension_driver_api.py [deleted file]
neutron/tests/unit/plugins/ml2/test_plugin.py [deleted file]
neutron/tests/unit/plugins/ml2/test_port_binding.py [deleted file]
neutron/tests/unit/plugins/ml2/test_rpc.py [deleted file]
neutron/tests/unit/plugins/ml2/test_security_group.py [deleted file]
neutron/tests/unit/plugins/ml2/test_tracked_resources.py [deleted file]
neutron/tests/unit/quota/__init__.py [deleted file]
neutron/tests/unit/quota/test_resource.py [deleted file]
neutron/tests/unit/quota/test_resource_registry.py [deleted file]
neutron/tests/unit/scheduler/__init__.py [deleted file]
neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py [deleted file]
neutron/tests/unit/scheduler/test_l3_agent_scheduler.py [deleted file]
neutron/tests/unit/services/__init__.py [deleted file]
neutron/tests/unit/services/l3_router/__init__.py [deleted file]
neutron/tests/unit/services/metering/__init__.py [deleted file]
neutron/tests/unit/services/metering/agents/__init__.py [deleted file]
neutron/tests/unit/services/metering/agents/test_metering_agent.py [deleted file]
neutron/tests/unit/services/metering/drivers/__init__.py [deleted file]
neutron/tests/unit/services/metering/drivers/test_iptables.py [deleted file]
neutron/tests/unit/services/metering/test_metering_plugin.py [deleted file]
neutron/tests/unit/services/qos/__init__.py [deleted file]
neutron/tests/unit/services/qos/base.py [deleted file]
neutron/tests/unit/services/qos/notification_drivers/__init__.py [deleted file]
neutron/tests/unit/services/qos/notification_drivers/dummy.py [deleted file]
neutron/tests/unit/services/qos/notification_drivers/test_manager.py [deleted file]
neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py [deleted file]
neutron/tests/unit/services/qos/test_qos_plugin.py [deleted file]
neutron/tests/unit/services/test_provider_configuration.py [deleted file]
neutron/tests/unit/test_auth.py [deleted file]
neutron/tests/unit/test_context.py [deleted file]
neutron/tests/unit/test_manager.py [deleted file]
neutron/tests/unit/test_policy.py [deleted file]
neutron/tests/unit/test_service.py [deleted file]
neutron/tests/unit/test_wsgi.py [deleted file]
neutron/tests/unit/testlib_api.py [deleted file]
neutron/tests/unit/tests/__init__.py [deleted file]
neutron/tests/unit/tests/test_base.py [deleted file]
neutron/tests/unit/tests/test_post_mortem_debug.py [deleted file]
neutron/tests/var/ca.crt [deleted file]
neutron/tests/var/certandkey.pem [deleted file]
neutron/tests/var/certificate.crt [deleted file]
neutron/tests/var/privatekey.key [deleted file]
neutron/version.py [deleted file]
neutron/worker.py [deleted file]
neutron/wsgi.py [deleted file]
openstack-common.conf [deleted file]
rally-jobs/README.rst [deleted file]
rally-jobs/extra/README.rst [deleted file]
rally-jobs/neutron-neutron.yaml [deleted file]
rally-jobs/plugins/README.rst [deleted file]
rally-jobs/plugins/__init__.py [deleted file]
releasenotes/notes/.placeholder [deleted file]
releasenotes/notes/add-availability-zone-4440cf00be7c54ba.yaml [deleted file]
releasenotes/notes/config-file-generation-2eafc6602d57178e.yaml [deleted file]
releasenotes/notes/default-local-dns-a1c3fa1451f228fa.yaml [deleted file]
releasenotes/notes/deprecate-router_id-34aca9ea5ee9e789.yaml [deleted file]
releasenotes/notes/deprecated-driver-e368e0befc9bee4c.yaml [deleted file]
releasenotes/notes/direct-physical-vnic-878d15bdb758b70e.yaml [deleted file]
releasenotes/notes/hyperv-neutron-agent-decomposition-ae6a052aeb48c6ac.yaml [deleted file]
releasenotes/notes/linuxbridge-agent-extensions-66bdf9feee25ef99.yaml [deleted file]
releasenotes/notes/macvtap_assigned_vf_check-f4d07660ffd82a24.yaml [deleted file]
releasenotes/notes/oslo-reports-166a169037bf64f2.yaml [deleted file]
releasenotes/notes/rm-notify-entry-points-aa442134a780469a.yaml [deleted file]
releasenotes/notes/sriov_show_l2_agent_extensions-ca852e155a529e99.yaml [deleted file]
releasenotes/notes/use-keystoneauth-24f309566001a16b.yaml [deleted file]
releasenotes/source/README.rst [deleted file]
releasenotes/source/_static/.placeholder [deleted file]
releasenotes/source/_templates/.placeholder [deleted file]
releasenotes/source/conf.py [deleted file]
releasenotes/source/index.rst [deleted file]
releasenotes/source/liberty.rst [deleted file]
releasenotes/source/unreleased.rst [deleted file]
requirements.txt [deleted file]
run_tests.sh [deleted file]
setup.cfg [deleted file]
setup.py [deleted file]
test-requirements.txt [deleted file]
tools/abandon_old_reviews.sh [deleted file]
tools/check_unit_test_structure.sh [deleted file]
tools/clean.sh [deleted file]
tools/coding-checks.sh [deleted file]
tools/configure_for_func_testing.sh [deleted file]
tools/copy_api_tests_from_tempest.sh [deleted file]
tools/deploy_rootwrap.sh [deleted file]
tools/generate_config_file_samples.sh [deleted file]
tools/install_venv.py [deleted file]
tools/install_venv_common.py [deleted file]
tools/milestone-review-dash.py [deleted file]
tools/misc-sanity-checks.sh [deleted file]
tools/ostestr_compat_shim.sh [deleted file]
tools/pecan_server.sh [deleted file]
tools/split.sh [deleted file]
tools/with_venv.sh [deleted file]
tox.ini [deleted file]
trusty/debian/changelog [moved from debian/changelog with 100% similarity]
trusty/debian/compat [moved from debian/compat with 100% similarity]
trusty/debian/control [moved from debian/control with 100% similarity]
trusty/debian/copyright [moved from debian/copyright with 100% similarity]
trusty/debian/cron.d/neutron-dhcp-agent-netns-cleanup [moved from debian/cron.d/neutron-dhcp-agent-netns-cleanup with 100% similarity]
trusty/debian/cron.d/neutron-l3-agent-netns-cleanup [moved from debian/cron.d/neutron-l3-agent-netns-cleanup with 100% similarity]
trusty/debian/cron.d/neutron-lbaas-agent-netns-cleanup [moved from debian/cron.d/neutron-lbaas-agent-netns-cleanup with 100% similarity]
trusty/debian/dnsmasq-neutron.conf [moved from debian/dnsmasq-neutron.conf with 100% similarity]
trusty/debian/gbp.conf [moved from debian/gbp.conf with 100% similarity]
trusty/debian/mans/neutron-linuxbridge-agent.8 [moved from debian/mans/neutron-linuxbridge-agent.8 with 100% similarity]
trusty/debian/mans/neutron-nec-agent.8 [moved from debian/mans/neutron-nec-agent.8 with 100% similarity]
trusty/debian/mans/neutron-openvswitch-agent.8 [moved from debian/mans/neutron-openvswitch-agent.8 with 100% similarity]
trusty/debian/mans/neutron-rootwrap.8 [moved from debian/mans/neutron-rootwrap.8 with 100% similarity]
trusty/debian/mans/neutron-ryu-agent.8 [moved from debian/mans/neutron-ryu-agent.8 with 100% similarity]
trusty/debian/mans/neutron-server.8 [moved from debian/mans/neutron-server.8 with 100% similarity]
trusty/debian/neutron-common.README.Debian [moved from debian/neutron-common.README.Debian with 100% similarity]
trusty/debian/neutron-common.config.in [moved from debian/neutron-common.config.in with 88% similarity]
trusty/debian/neutron-common.dirs [moved from debian/neutron-common.dirs with 100% similarity]
trusty/debian/neutron-common.install [moved from debian/neutron-common.install with 100% similarity]
trusty/debian/neutron-common.logrotate [moved from debian/neutron-common.logrotate with 100% similarity]
trusty/debian/neutron-common.postinst.in [moved from debian/neutron-common.postinst.in with 91% similarity]
trusty/debian/neutron-common.postrm [moved from debian/neutron-common.postrm with 100% similarity]
trusty/debian/neutron-common.prerm [moved from debian/neutron-common.prerm with 100% similarity]
trusty/debian/neutron-common.templates [moved from debian/neutron-common.templates with 97% similarity]
trusty/debian/neutron-common.tmpfile [moved from debian/neutron-common.tmpfile with 100% similarity]
trusty/debian/neutron-dhcp-agent.init.in [moved from debian/neutron-dhcp-agent.init.in with 100% similarity]
trusty/debian/neutron-dhcp-agent.install [moved from debian/neutron-dhcp-agent.install with 100% similarity]
trusty/debian/neutron-dhcp-agent.postinst.in [moved from debian/neutron-dhcp-agent.postinst.in with 100% similarity]
trusty/debian/neutron-l3-agent.init.in [moved from debian/neutron-l3-agent.init.in with 100% similarity]
trusty/debian/neutron-l3-agent.install [moved from debian/neutron-l3-agent.install with 100% similarity]
trusty/debian/neutron-linuxbridge-agent.init.in [moved from debian/neutron-linuxbridge-agent.init.in with 100% similarity]
trusty/debian/neutron-linuxbridge-agent.manpages [moved from debian/neutron-linuxbridge-agent.manpages with 100% similarity]
trusty/debian/neutron-metadata-agent.config.in [moved from debian/neutron-metadata-agent.config.in with 100% similarity]
trusty/debian/neutron-metadata-agent.init.in [moved from debian/neutron-metadata-agent.init.in with 100% similarity]
trusty/debian/neutron-metadata-agent.postinst.in [moved from debian/neutron-metadata-agent.postinst.in with 100% similarity]
trusty/debian/neutron-metadata-agent.postrm [moved from debian/neutron-metadata-agent.postrm with 100% similarity]
trusty/debian/neutron-metadata-agent.templates [moved from debian/neutron-metadata-agent.templates with 100% similarity]
trusty/debian/neutron-metering-agent.init.in [moved from debian/neutron-metering-agent.init.in with 100% similarity]
trusty/debian/neutron-openvswitch-agent.init.in [moved from debian/neutron-openvswitch-agent.init.in with 100% similarity]
trusty/debian/neutron-openvswitch-agent.manpages [moved from debian/neutron-openvswitch-agent.manpages with 100% similarity]
trusty/debian/neutron-openvswitch-agent.upstart.in [moved from debian/neutron-openvswitch-agent.upstart.in with 100% similarity]
trusty/debian/neutron-plugin-nec-agent.manpages [moved from debian/neutron-plugin-nec-agent.manpages with 100% similarity]
trusty/debian/neutron-server.config.in [moved from debian/neutron-server.config.in with 100% similarity]
trusty/debian/neutron-server.init.in [moved from debian/neutron-server.init.in with 100% similarity]
trusty/debian/neutron-server.manpages [moved from debian/neutron-server.manpages with 100% similarity]
trusty/debian/neutron-server.postinst.in [moved from debian/neutron-server.postinst.in with 100% similarity]
trusty/debian/neutron-server.prerm [moved from debian/neutron-server.prerm with 100% similarity]
trusty/debian/neutron-server.service.in [moved from debian/neutron-server.service.in with 100% similarity]
trusty/debian/neutron-server.templates [moved from debian/neutron-server.templates with 100% similarity]
trusty/debian/neutron-server.upstart.in [moved from debian/neutron-server.upstart.in with 100% similarity]
trusty/debian/neutron_sudoers [moved from debian/neutron_sudoers with 100% similarity]
trusty/debian/plugin_guess_func [moved from debian/plugin_guess_func with 100% similarity]
trusty/debian/po/POTFILES.in [moved from debian/po/POTFILES.in with 100% similarity]
trusty/debian/po/da.po [moved from debian/po/da.po with 100% similarity]
trusty/debian/po/de.po [moved from debian/po/de.po with 100% similarity]
trusty/debian/po/es.po [moved from debian/po/es.po with 100% similarity]
trusty/debian/po/fr.po [moved from debian/po/fr.po with 100% similarity]
trusty/debian/po/it.po [moved from debian/po/it.po with 100% similarity]
trusty/debian/po/nl.po [moved from debian/po/nl.po with 100% similarity]
trusty/debian/po/pt.po [moved from debian/po/pt.po with 100% similarity]
trusty/debian/po/ru.po [moved from debian/po/ru.po with 100% similarity]
trusty/debian/po/sv.po [moved from debian/po/sv.po with 100% similarity]
trusty/debian/po/templates.pot [moved from debian/po/templates.pot with 100% similarity]
trusty/debian/pydist-overrides [moved from debian/pydist-overrides with 100% similarity]
trusty/debian/python-neutron.install [moved from debian/python-neutron.install with 100% similarity]
trusty/debian/rules [moved from debian/rules with 100% similarity]
trusty/debian/source/format [moved from debian/source/format with 100% similarity]
trusty/debian/source/options [moved from debian/source/options with 100% similarity]
trusty/debian/watch [moved from debian/watch with 100% similarity]

diff --git a/.coveragerc b/.coveragerc
deleted file mode 100644 (file)
index 97df1db..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-[run]
-branch = True
-source = neutron
-omit = neutron/tests/*,neutron/openstack/*
-
-[report]
-ignore_errors = True
diff --git a/.gitignore b/.gitignore
deleted file mode 100644 (file)
index 57d3563..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-AUTHORS
-build/*
-build-stamp
-ChangeLog
-cover/
-covhtml/
-dist/
-doc/build
-etc/*.sample
-etc/neutron/plugins/ml2/*.sample
-*.DS_Store
-*.pyc
-neutron.egg-info/
-neutron/vcsversion.py
-neutron/versioninfo
-pbr*.egg/
-setuptools*.egg/
-*.log
-*.mo
-*.sw?
-*~
-/.*
-!/.coveragerc
-!/.gitignore
-!/.gitreview
-!/.mailmap
-!/.pylintrc
-!/.testr.conf
-
-# Files created by releasenotes build
-releasenotes/build
diff --git a/.gitreview b/.gitreview
deleted file mode 100644 (file)
index 184583f..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-[gerrit]
-host=review.openstack.org
-port=29418
-project=openstack/neutron.git
diff --git a/.mailmap b/.mailmap
deleted file mode 100644 (file)
index f3e7e5e..0000000
--- a/.mailmap
+++ /dev/null
@@ -1,11 +0,0 @@
-# Format is:
-# <preferred e-mail> <other e-mail 1>
-# <preferred e-mail> <other e-mail 2>
-lawrancejing <lawrancejing@gmail.com> <liuqing@windawn.com>
-Jiajun Liu <jiajun@unitedstack.com> <iamljj@gmail.com>
-Zhongyue Luo <zhongyue.nah@intel.com> <lzyeval@gmail.com>
-Kun Huang <gareth@unitedstack.com> <academicgareth@gmail.com>
-Zhenguo Niu <zhenguo@unitedstack.com> <Niu.ZGlinux@gmail.com>
-Isaku Yamahata <isaku.yamahata@intel.com> <isaku.yamahata@gmail.com>
-Isaku Yamahata <isaku.yamahata@intel.com> <yamahata@private.email.ne.jp>
-Morgan Fainberg <morgan.fainberg@gmail.com> <m@metacloud.com>
diff --git a/.pylintrc b/.pylintrc
deleted file mode 100644 (file)
index 5037da9..0000000
--- a/.pylintrc
+++ /dev/null
@@ -1,116 +0,0 @@
-# The format of this file isn't really documented; just use --generate-rcfile
-[MASTER]
-# Add <file or directory> to the black list. It should be a base name, not a
-# path. You may set this option multiple times.
-#
-# Note the 'openstack' below is intended to match only
-# neutron.openstack.common.  If we ever have another 'openstack'
-# dirname, then we'll need to expand the ignore features in pylint :/
-ignore=.git,tests,openstack
-
-[MESSAGES CONTROL]
-# NOTE(gus): This is a long list.  A number of these are important and
-# should be re-enabled once the offending code is fixed (or marked
-# with a local disable)
-disable=
-# "F" Fatal errors that prevent further processing
- import-error,
-# "I" Informational noise
- locally-disabled,
-# "E" Error for important programming issues (likely bugs)
- access-member-before-definition,
- no-member,
- no-method-argument,
- no-self-argument,
-# "W" Warnings for stylistic problems or minor programming issues
- abstract-method,
- arguments-differ,
- attribute-defined-outside-init,
- bad-builtin,
- bad-indentation,
- broad-except,
- dangerous-default-value,
- deprecated-lambda,
- expression-not-assigned,
- fixme,
- global-statement,
- no-init,
- non-parent-init-called,
- not-callable,
- protected-access,
- redefined-builtin,
- redefined-outer-name,
- signature-differs,
- star-args,
- super-init-not-called,
- super-on-old-class,
- unpacking-non-sequence,
- unused-argument,
- unused-import,
- unused-variable,
-# "C" Coding convention violations
- bad-continuation,
- invalid-name,
- missing-docstring,
- superfluous-parens,
-# "R" Refactor recommendations
- abstract-class-little-used,
- abstract-class-not-used,
- duplicate-code,
- interface-not-implemented,
- no-self-use,
- too-few-public-methods,
- too-many-ancestors,
- too-many-arguments,
- too-many-branches,
- too-many-instance-attributes,
- too-many-lines,
- too-many-locals,
- too-many-public-methods,
- too-many-return-statements,
- too-many-statements
-
-[BASIC]
-# Variable names can be 1 to 31 characters long, with lowercase and underscores
-variable-rgx=[a-z_][a-z0-9_]{0,30}$
-
-# Argument names can be 2 to 31 characters long, with lowercase and underscores
-argument-rgx=[a-z_][a-z0-9_]{1,30}$
-
-# Method names should be at least 3 characters long
-# and be lowecased with underscores
-method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$
-
-# Module names matching neutron-* are ok (files in bin/)
-module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$
-
-# Don't require docstrings on tests.
-no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
-
-[FORMAT]
-# Maximum number of characters on a single line.
-max-line-length=79
-
-[VARIABLES]
-# List of additional names supposed to be defined in builtins. Remember that
-# you should avoid to define new builtins when possible.
-# _ is used by our localization
-additional-builtins=_
-
-[CLASSES]
-# List of interface methods to ignore, separated by a comma.
-ignore-iface-methods=
-
-[IMPORTS]
-# Deprecated modules which should not be used, separated by a comma
-deprecated-modules=
-# should use openstack.common.jsonutils
- json
-
-[TYPECHECK]
-# List of module names for which member attributes should not be checked
-ignored-modules=six.moves,_MovedItems
-
-[REPORTS]
-# Tells whether to display a full report or only the messages
-reports=no
diff --git a/.testr.conf b/.testr.conf
deleted file mode 100644 (file)
index 23c9c58..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-[DEFAULT]
-test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION | cat
-test_id_option=--load-list $IDFILE
-test_list_option=--list
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
deleted file mode 100644 (file)
index 7f62f3f..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-If you would like to contribute to the development of OpenStack,
-you must follow the steps documented at:
-
-   http://docs.openstack.org/infra/manual/developers.html#development-workflow
-
-Once those steps have been completed, changes to OpenStack
-should be submitted for review via the Gerrit tool, following
-the workflow documented at:
-
-   http://docs.openstack.org/infra/manual/developers.html#development-workflow
-
-Pull requests submitted through GitHub will be ignored.
-
-Bugs should be filed on Launchpad, not GitHub:
-
-   https://bugs.launchpad.net/neutron
diff --git a/HACKING.rst b/HACKING.rst
deleted file mode 100644 (file)
index b1ecb52..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-Neutron Style Commandments
-==========================
-
-- Step 1: Read the OpenStack Style Commandments
-  http://docs.openstack.org/developer/hacking/
-- Step 2: Read on
-
-Neutron Specific Commandments
------------------------------
-
-- [N319] Validate that debug level logs are not translated
-- [N320] Validate that LOG messages, except debug ones, have translations
-- [N321] Validate that jsonutils module is used instead of json
-- [N322] Detect common errors with assert_called_once_with
-- [N323] Enforce namespace-less imports for oslo libraries
-- [N324] Prevent use of deprecated contextlib.nested.
-- [N325] Python 3: Do not use xrange.
-- [N326] Python 3: do not use basestring.
-- [N327] Python 3: do not use dict.iteritems.
-- [N328] Detect wrong usage with assertEqual
-- [N329] Method's default argument shouldn't be mutable
-- [N330] Use assertEqual(*empty*, observed) instead of
-         assertEqual(observed, *empty*)
-- [N331] Detect wrong usage with assertTrue(isinstance()).
-
-Creating Unit Tests
--------------------
-For every new feature, unit tests should be created that both test and
-(implicitly) document the usage of said feature. If submitting a patch for a
-bug that had no unit test, a new passing unit test should be added. If a
-submitted bug fix does have a unit test, be sure to add a new one that fails
-without the patch and passes with the patch.
-
-All unittest classes must ultimately inherit from testtools.TestCase. In the
-Neutron test suite, this should be done by inheriting from
-neutron.tests.base.BaseTestCase.
-
-All setUp and tearDown methods must upcall using the super() method.
-tearDown methods should be avoided and addCleanup calls should be preferred.
-Never manually create tempfiles. Always use the tempfile fixtures from
-the fixture library to ensure that they are cleaned up.
diff --git a/LICENSE b/LICENSE
deleted file mode 100644 (file)
index 68c771a..0000000
--- a/LICENSE
+++ /dev/null
@@ -1,176 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644 (file)
index 8ac6e4e..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-include AUTHORS
-include README.rst
-include ChangeLog
-include LICENSE
-include neutron/db/migration/README
-include neutron/db/migration/alembic.ini
-include neutron/db/migration/alembic_migrations/script.py.mako
-recursive-include neutron/db/migration/alembic_migrations/versions *
-recursive-include neutron/locale *
-
-exclude .gitignore
-exclude .gitreview
-
-global-exclude *.pyc
diff --git a/README.rst b/README.rst
deleted file mode 100644 (file)
index 671d954..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-Welcome!
-========
-
-You have come across a cloud computing network fabric controller. It has
-identified itself as "Neutron." It aims to tame your (cloud) networking!
-
-External Resources:
-===================
-
-The homepage for Neutron is: http://launchpad.net/neutron.  Use this
-site for asking for help, and filing bugs. Code is available on
-git.openstack.org at <http://git.openstack.org/cgit/openstack/neutron>.
-
-The latest and most in-depth documentation on how to use Neutron is
-available at: <http://docs.openstack.org>. This includes:
-
-Neutron Administrator Guide
-   http://docs.openstack.org/admin-guide-cloud/networking.html
-
-Networking Guide
-   http://docs.openstack.org/networking-guide/
-
-Neutron API Reference:
-   http://docs.openstack.org/api/openstack-network/2.0/content/
-
-Current Neutron developer documentation is available at:
-   http://wiki.openstack.org/NeutronDevelopment
-
-For help on usage and hacking of Neutron, please send mail to
-<mailto:openstack-dev@lists.openstack.org>.
-
-For information on how to contribute to Neutron, please see the
-contents of the CONTRIBUTING.rst file.
diff --git a/TESTING.rst b/TESTING.rst
deleted file mode 100644 (file)
index a790cca..0000000
+++ /dev/null
@@ -1,587 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Testing Neutron
-===============
-
-Why Should You Care
--------------------
-There's two ways to approach testing:
-
-1) Write unit tests because they're required to get your patch merged.
-   This typically involves mock heavy tests that assert that your code is as
-   written.
-2) Putting as much thought in to your testing strategy as you do to the rest
-   of your code. Use different layers of testing as appropriate to provide
-   high *quality* coverage. Are you touching an agent? Test it against an
-   actual system! Are you adding a new API? Test it for race conditions
-   against a real database! Are you adding a new cross-cutting feature?
-   Test that it does what it's supposed to do when run on a real cloud!
-
-Do you feel the need to verify your change manually? If so, the next few
-sections attempt to guide you through Neutron's different test infrastructures
-to help you make intelligent decisions and best exploit Neutron's test
-offerings.
-
-Definitions
------------
-We will talk about three classes of tests: unit, functional and integration.
-Each respective category typically targets a larger scope of code. Other than
-that broad categorization, here are a few more characteristic:
-
-  * Unit tests - Should be able to run on your laptop, directly following a
-    'git clone' of the project. The underlying system must not be mutated,
-    mocks can be used to achieve this. A unit test typically targets a function
-    or class.
-  * Functional tests - Run against a pre-configured environment
-    (tools/configure_for_func_testing.sh). Typically test a component
-    such as an agent using no mocks.
-  * Integration tests - Run against a running cloud, often target the API level,
-    but also 'scenarios' or 'user stories'. You may find such tests under
-    tests/api, tests/fullstack and in the Tempest and Rally projects.
-
-Tests in the Neutron tree are typically organized by the testing infrastructure
-used, and not by the scope of the test. For example, many tests under the
-'unit' directory invoke an API call and assert that the expected output was
-received. The scope of such a test is the entire Neutron server stack,
-and clearly not a specific function such as in a typical unit test.
-
-Testing Frameworks
-------------------
-
-The different frameworks are listed below. The intent is to list the
-capabilities of each testing framework as to help the reader understand when
-should each tool be used. Remember that when adding code that touches many
-areas of Neutron, each area should be tested with the appropriate framework.
-Overlap between different test layers is often desirable and encouraged.
-
-Unit Tests
-~~~~~~~~~~
-
-Unit tests (neutron/tests/unit/) are meant to cover as much code as
-possible. They are designed to test the various pieces of the Neutron tree to
-make sure any new changes don't break existing functionality. Unit tests have
-no requirements nor make changes to the system they are running on. They use
-an in-memory sqlite database to test DB interaction.
-
-At the start of each test run:
-
-* RPC listeners are mocked away.
-* The fake Oslo messaging driver is used.
-
-At the end of each test run:
-
-* Mocks are automatically reverted.
-* The in-memory database is cleared of content, but its schema is maintained.
-* The global Oslo configuration object is reset.
-
-The unit testing framework can be used to effectively test database interaction,
-for example, distributed routers allocate a MAC address for every host running
-an OVS agent. One of DVR's DB mixins implements a method that lists all host
-MAC addresses. Its test looks like this:
-
-.. code-block:: python
-
-    def test_get_dvr_mac_address_list(self):
-        self._create_dvr_mac_entry('host_1', 'mac_1')
-        self._create_dvr_mac_entry('host_2', 'mac_2')
-        mac_list = self.mixin.get_dvr_mac_address_list(self.ctx)
-        self.assertEqual(2, len(mac_list))
-
-It inserts two new host MAC address, invokes the method under test and asserts
-its output. The test has many things going for it:
-
-* It targets the method under test correctly, not taking on a larger scope
-  than is necessary.
-* It does not use mocks to assert that methods were called, it simply
-  invokes the method and asserts its output (In this case, that the list
-  method returns two records).
-
-This is allowed by the fact that the method was built to be testable -
-The method has clear input and output with no side effects.
-
-Functional Tests
-~~~~~~~~~~~~~~~~
-
-Functional tests (neutron/tests/functional/) are intended to
-validate actual system interaction. Mocks should be used sparingly,
-if at all. Care should be taken to ensure that existing system
-resources are not modified and that resources created in tests are
-properly cleaned up both on test success and failure.
-
-Let's examine the benefits of the functional testing framework.
-Neutron offers a library called 'ip_lib' that wraps around the 'ip' binary.
-One of its methods is called 'device_exists' which accepts a device name
-and a namespace and returns True if the device exists in the given namespace.
-It's easy building a test that targets the method directly, and such a test
-would be considered a 'unit' test. However, what framework should such a test
-use? A test using the unit tests framework could not mutate state on the system,
-and so could not actually create a device and assert that it now exists. Such
-a test would look roughly like this:
-
-* It would mock 'execute', a method that executes shell commands against the
-  system to return an IP device named 'foo'.
-* It would then assert that when 'device_exists' is called with 'foo', it
-  returns True, but when called with a different device name it returns False.
-* It would most likely assert that 'execute' was called using something like:
-  'ip link show foo'.
-
-The value of such a test is arguable. Remember that new tests are not free,
-they need to be maintained. Code is often refactored, reimplemented and
-optimized.
-
-* There are other ways to find out if a device exists (Such as
-  by looking at '/sys/class/net'), and in such a case the test would have
-  to be updated.
-* Methods are mocked using their name. When methods are renamed, moved or
-  removed, their mocks must be updated. This slows down development for
-  avoidable reasons.
-* Most importantly, the test does not assert the behavior of the method. It
-  merely asserts that the code is as written.
-
-When adding a functional test for 'device_exists', several framework level
-methods were added. These methods may now be used by other tests as well.
-One such method creates a virtual device in a namespace,
-and ensures that both the namespace and the device are cleaned up at the
-end of the test run regardless of success or failure using the 'addCleanup'
-method. The test generates details for a temporary device, asserts that
-a device by that name does not exist, create that device, asserts that
-it now exists, deletes it, and asserts that it no longer exists.
-Such a test avoids all three issues mentioned above if it were written
-using the unit testing framework.
-
-Functional tests are also used to target larger scope, such as agents.
-Many good examples exist: See the OVS, L3 and DHCP agents functional tests.
-Such tests target a top level agent method and assert that the system
-interaction that was supposed to be perform was indeed performed.
-For example, to test the DHCP agent's top level method that accepts network
-attributes and configures dnsmasq for that network, the test:
-
-* Instantiates an instance of the DHCP agent class (But does not start its
-  process).
-* Calls its top level function with prepared data.
-* Creates a temporary namespace and device, and calls 'dhclient' from that
-  namespace.
-* Assert that the device successfully obtained the expected IP address.
-
-Fullstack Tests
-~~~~~~~~~~~~~~~
-
-Why?
-++++
-
-The idea behind "fullstack" testing is to fill a gap between unit + functional
-tests and Tempest. Tempest tests are expensive to run, and target black box API
-tests exclusively. Tempest requires an OpenStack deployment to be run against,
-which can be difficult to configure and setup. Full stack testing addresses
-these issues by taking care of the deployment itself, according to the topology
-that the test requires. Developers further benefit from full stack testing as
-it can sufficiently simulate a real environment and provide a rapidly
-reproducible way to verify code while you're still writing it.
-
-How?
-++++
-
-Full stack tests set up their own Neutron processes (Server & agents). They
-assume a working Rabbit and MySQL server before the run starts. Instructions
-on how to run fullstack tests on a VM are available below.
-
-Each test defines its own topology (What and how many servers and agents should
-be running).
-
-Since the test runs on the machine itself, full stack testing enables
-"white box" testing. This means that you can, for example, create a router
-through the API and then assert that a namespace was created for it.
-
-Full stack tests run in the Neutron tree with Neutron resources alone. You
-may use the Neutron API (The Neutron server is set to NOAUTH so that Keystone
-is out of the picture). VMs may be simulated with a container-like class:
-neutron.tests.fullstack.resources.machine.FakeFullstackMachine.
-An example of its usage may be found at:
-neutron/tests/fullstack/test_connectivity.py.
-
-Full stack testing can simulate multi node testing by starting an agent
-multiple times. Specifically, each node would have its own copy of the
-OVS/DHCP/L3 agents, all configured with the same "host" value. Each OVS agent
-is connected to its own pair of br-int/br-ex, and those bridges are then
-interconnected.
-
-.. image:: images/fullstack_multinode_simulation.png
-
-Segmentation at the database layer is guaranteed by creating a database
-per test. The messaging layer achieves segmentation by utilizing a RabbitMQ
-feature called 'vhosts'. In short, just like a MySQL server serve multiple
-databases, so can a RabbitMQ server serve multiple messaging domains.
-Exchanges and queues in one 'vhost' are segmented from those in another
-'vhost'.
-
-When?
-+++++
-
-1) You'd like to test the interaction between Neutron components (Server
-   and agents) and have already tested each component in isolation via unit or
-   functional tests. You should have many unit tests, fewer tests to test
-   a component and even fewer to test their interaction. Edge cases should
-   not be tested with full stack testing.
-2) You'd like to increase coverage by testing features that require multi node
-   testing such as l2pop, L3 HA and DVR.
-3) You'd like to test agent restarts. We've found bugs in the OVS, DHCP and
-   L3 agents and haven't found an effective way to test these scenarios. Full
-   stack testing can help here as the full stack infrastructure can restart an
-   agent during the test.
-
-Example
-+++++++
-
-Neutron offers a Quality of Service API, initially offering bandwidth
-capping at the port level. In the reference implementation, it does this by
-utilizing an OVS feature.
-neutron.tests.fullstack.test_qos.TestQoSWithOvsAgent.test_qos_policy_rule_lifecycle
-is a positive example of how the fullstack testing infrastructure should be used.
-It creates a network, subnet, QoS policy & rule and a port utilizing that policy.
-It then asserts that the expected bandwidth limitation is present on the OVS
-bridge connected to that port. The test is a true integration test, in the
-sense that it invokes the API and then asserts that Neutron interacted with
-the hypervisor appropriately.
-
-API Tests
-~~~~~~~~~
-
-API tests (neutron/tests/api/) are intended to ensure the function
-and stability of the Neutron API. As much as possible, changes to
-this path should not be made at the same time as changes to the code
-to limit the potential for introducing backwards-incompatible changes,
-although the same patch that introduces a new API should include an API
-test.
-
-Since API tests target a deployed Neutron daemon that is not test-managed,
-they should not depend on controlling the runtime configuration
-of the target daemon. API tests should be black-box - no assumptions should
-be made about implementation. Only the contract defined by Neutron's REST API
-should be validated, and all interaction with the daemon should be via
-a REST client.
-
-neutron/tests/api was copied from the Tempest project. The Tempest networking
-API directory was frozen and any new tests belong to the Neutron repository.
-
-Development Process
--------------------
-
-It is expected that any new changes that are proposed for merge
-come with tests for that feature or code area. Any bugs
-fixes that are submitted must also have tests to prove that they stay
-fixed! In addition, before proposing for merge, all of the
-current tests should be passing.
-
-Structure of the Unit Test Tree
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The structure of the unit test tree should match the structure of the
-code tree, e.g. ::
-
- - target module: neutron.agent.utils
-
- - test module: neutron.tests.unit.agent.test_utils
-
-Unit test modules should have the same path under neutron/tests/unit/
-as the module they target has under neutron/, and their name should be
-the name of the target module prefixed by `test_`. This requirement
-is intended to make it easier for developers to find the unit tests
-for a given module.
-
-Similarly, when a test module targets a package, that module's name
-should be the name of the package prefixed by `test_` with the same
-path as when a test targets a module, e.g. ::
-
- - target package: neutron.ipam
-
- - test module: neutron.tests.unit.test_ipam
-
-The following command can be used to validate whether the unit test
-tree is structured according to the above requirements: ::
-
-    ./tools/check_unit_test_structure.sh
-
-Where appropriate, exceptions can be added to the above script. If
-code is not part of the Neutron namespace, for example, it's probably
-reasonable to exclude their unit tests from the check.
-
-Running Tests
--------------
-
-There are three mechanisms for running tests: run_tests.sh, tox,
-and nose2. Before submitting a patch for review you should always
-ensure all test pass; a tox run is triggered by the jenkins gate
-executed on gerrit for each patch pushed for review.
-
-With these mechanisms you can either run the tests in the standard
-environment or create a virtual environment to run them in.
-
-By default after running all of the tests, any pep8 errors
-found in the tree will be reported.
-
-
-With `run_tests.sh`
-~~~~~~~~~~~~~~~~~~~
-
-You can use the `run_tests.sh` script in the root source directory to execute
-tests in a virtualenv::
-
-    ./run_tests -V
-
-
-With `nose2`
-~~~~~~~~~~~~
-
-You can use `nose2`_ to run individual tests, as well as use for debugging
-portions of your code::
-
-    source .venv/bin/activate
-    pip install nose2
-    nose2
-
-There are disadvantages to running nose2 - the tests are run sequentially, so
-race condition bugs will not be triggered, and the full test suite will
-take significantly longer than tox & testr. The upside is that testr has
-some rough edges when it comes to diagnosing errors and failures, and there is
-no easy way to set a breakpoint in the Neutron code, and enter an
-interactive debugging session while using testr.
-
-Note that nose2's predecessor, `nose`_, does not understand
-`load_tests protocol`_ introduced in Python 2.7. This limitation will result in
-errors being reported for modules that depend on load_tests
-(usually due to use of `testscenarios`_). nose, therefore, is not supported,
-while nose2 is.
-
-.. _nose2: http://nose2.readthedocs.org/en/latest/index.html
-.. _nose: https://nose.readthedocs.org/en/latest/index.html
-.. _load_tests protocol: https://docs.python.org/2/library/unittest.html#load-tests-protocol
-.. _testscenarios: https://pypi.python.org/pypi/testscenarios/
-
-With `tox`
-~~~~~~~~~~
-
-Neutron, like other OpenStack projects, uses `tox`_ for managing the virtual
-environments for running test cases. It uses `Testr`_ for managing the running
-of the test cases.
-
-Tox handles the creation of a series of `virtualenvs`_ that target specific
-versions of Python.
-
-Testr handles the parallel execution of series of test cases as well as
-the tracking of long-running tests and other things.
-
-For more information on the standard Tox-based test infrastructure used by
-OpenStack and how to do some common test/debugging procedures with Testr,
-see this wiki page:
-
-  https://wiki.openstack.org/wiki/Testr
-
-.. _Testr: https://wiki.openstack.org/wiki/Testr
-.. _tox: http://tox.readthedocs.org/en/latest/
-.. _virtualenvs: https://pypi.python.org/pypi/virtualenv
-
-PEP8 and Unit Tests
-+++++++++++++++++++
-
-Running pep8 and unit tests is as easy as executing this in the root
-directory of the Neutron source code::
-
-    tox
-
-To run only pep8::
-
-    tox -e pep8
-
-Since pep8 includes running pylint on all files, it can take quite some time to run.
-To restrict the pylint check to only the files altered by the latest patch changes::
-
-    tox -e pep8 HEAD~1
-
-To run only the unit tests::
-
-    tox -e py27
-
-Functional Tests
-++++++++++++++++
-
-To run functional tests that do not require sudo privileges or
-specific-system dependencies::
-
-    tox -e functional
-
-To run all the functional tests, including those requiring sudo
-privileges and system-specific dependencies, the procedure defined by
-tools/configure_for_func_testing.sh should be followed.
-
-IMPORTANT: configure_for_func_testing.sh relies on DevStack to perform
-extensive modification to the underlying host. Execution of the
-script requires sudo privileges and it is recommended that the
-following commands be invoked only on a clean and disposeable VM.
-A VM that has had DevStack previously installed on it is also fine. ::
-
-    git clone https://git.openstack.org/openstack-dev/devstack ../devstack
-    ./tools/configure_for_func_testing.sh ../devstack -i
-    tox -e dsvm-functional
-
-The '-i' option is optional and instructs the script to use DevStack
-to install and configure all of Neutron's package dependencies. It is
-not necessary to provide this option if DevStack has already been used
-to deploy Neutron to the target host.
-
-Fullstack Tests
-+++++++++++++++
-
-To run all the full-stack tests, you may use: ::
-
-    tox -e dsvm-fullstack
-
-Since full-stack tests often require the same resources and
-dependencies as the functional tests, using the configuration script
-tools/configure_for_func_testing.sh is advised (As described above).
-When running full-stack tests on a clean VM for the first time, we
-advise to run ./stack.sh successfully to make sure all Neutron's
-dependencies are met. Full-stack based Neutron daemons produce logs to a
-sub-folder in /tmp/dsvm-fullstack-logs (for example, a test named
-"test_example" will produce logs to /tmp/dsvm-fullstack-logs/test_example/),
-so that will be a good place to look if your test is failing.
-Fullstack test suite assumes 240.0.0.0/4 (Class E) range in root namespace of
-the test machine is available for its usage.
-
-API Tests
-+++++++++
-
-To run the api tests, deploy Tempest and Neutron with DevStack and
-then run the following command: ::
-
-    tox -e api
-
-If tempest.conf cannot be found at the default location used by
-DevStack (/opt/stack/tempest/etc) it may be necessary to set
-TEMPEST_CONFIG_DIR before invoking tox: ::
-
-    export TEMPEST_CONFIG_DIR=[path to dir containing tempest.conf]
-    tox -e api
-
-
-Running Individual Tests
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-For running individual test modules, cases or tests, you just need to pass
-the dot-separated path you want as an argument to it.
-
-For example, the following would run only a single test or test case::
-
-      $ ./run_tests.sh neutron.tests.unit.test_manager
-      $ ./run_tests.sh neutron.tests.unit.test_manager.NeutronManagerTestCase
-      $ ./run_tests.sh neutron.tests.unit.test_manager.NeutronManagerTestCase.test_service_plugin_is_loaded
-
-or::
-
-      $ tox -e py27 neutron.tests.unit.test_manager
-      $ tox -e py27 neutron.tests.unit.test_manager.NeutronManagerTestCase
-      $ tox -e py27 neutron.tests.unit.test_manager.NeutronManagerTestCase.test_service_plugin_is_loaded
-
-If you want to pass other arguments to ostestr, you can do the following::
-      $ tox -e -epy27 -- --regex neutron.tests.unit.test_manager --serial
-
-
-Coverage
---------
-
-Neutron has a fast growing code base and there are plenty of areas that
-need better coverage.
-
-To get a grasp of the areas where tests are needed, you can check
-current unit tests coverage by running::
-
-    $ ./run_tests.sh -c
-
-or by running::
-
-    $ tox -ecover
-
-Since the coverage command can only show unit test coverage, a coverage
-document is maintained that shows test coverage per area of code in:
-doc/source/devref/testing_coverage.rst. You could also rely on Zuul
-logs, that are generated post-merge (not every project builds coverage
-results). To access them, do the following:
-
-  * Check out the latest `merge commit <https://review.openstack.org/gitweb?p=openstack/neutron.git;a=search;s=Jenkins;st=author>`_
-  * Go to: http://logs.openstack.org/<first-2-digits-of-sha1>/<sha1>/post/neutron-coverage/.
-  * `Spec <https://review.openstack.org/#/c/221494/>`_ is a work in progress to
-    provide a better landing page.
-
-Debugging
----------
-
-By default, calls to pdb.set_trace() will be ignored when tests
-are run. For pdb statements to work, invoke run_tests as follows::
-
-    $ ./run_tests.sh -d [test module path]
-
-It's possible to debug tests in a tox environment::
-
-    $ tox -e venv -- python -m testtools.run [test module path]
-
-Tox-created virtual environments (venv's) can also be activated
-after a tox run and reused for debugging::
-
-    $ tox -e venv
-    $ . .tox/venv/bin/activate
-    $ python -m testtools.run [test module path]
-
-Tox packages and installs the Neutron source tree in a given venv
-on every invocation, but if modifications need to be made between
-invocation (e.g. adding more pdb statements), it is recommended
-that the source tree be installed in the venv in editable mode::
-
-    # run this only after activating the venv
-    $ pip install --editable .
-
-Editable mode ensures that changes made to the source tree are
-automatically reflected in the venv, and that such changes are not
-overwritten during the next tox run.
-
-Post-mortem Debugging
-~~~~~~~~~~~~~~~~~~~~~
-
-Setting OS_POST_MORTEM_DEBUGGER in the shell environment will ensure
-that the debugger .post_mortem() method will be invoked on test failure::
-
-    $ OS_POST_MORTEM_DEBUGGER=pdb ./run_tests.sh -d [test module path]
-
-Supported debuggers are pdb, and pudb. Pudb is full-screen, console-based
-visual debugger for Python which let you inspect variables, the stack,
-and breakpoints in a very visual way, keeping a high degree of compatibility
-with pdb::
-
-    $ ./.venv/bin/pip install pudb
-
-    $ OS_POST_MORTEM_DEBUGGER=pudb ./run_tests.sh -d [test module path]
-
-References
-~~~~~~~~~~
-
-.. [#pudb] PUDB debugger:
-   https://pypi.python.org/pypi/pudb
diff --git a/babel.cfg b/babel.cfg
deleted file mode 100644 (file)
index 15cd6cb..0000000
--- a/babel.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-[python: **.py]
-
diff --git a/bin/neutron-rootwrap-xen-dom0 b/bin/neutron-rootwrap-xen-dom0
deleted file mode 100755 (executable)
index b4e2e31..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2012 Openstack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""Neutron root wrapper for dom0.
-
-Executes networking commands in dom0.  The XenAPI plugin is
-responsible determining whether a command is safe to execute.
-
-"""
-from __future__ import print_function
-
-from six.moves import configparser as ConfigParser
-from oslo_serialization import jsonutils as json
-
-import os
-import select
-import sys
-import traceback
-
-import XenAPI
-
-
-RC_UNAUTHORIZED = 99
-RC_NOCOMMAND = 98
-RC_BADCONFIG = 97
-RC_XENAPI_ERROR = 96
-
-
-def parse_args():
-    # Split arguments, require at least a command
-    exec_name = sys.argv.pop(0)
-    # argv[0] required; path to conf file
-    if len(sys.argv) < 2:
-        print("%s: No command specified" % exec_name)
-        sys.exit(RC_NOCOMMAND)
-
-    config_file = sys.argv.pop(0)
-    user_args = sys.argv[:]
-
-    return exec_name, config_file, user_args
-
-
-def _xenapi_section_name(config):
-    sections = [sect for sect in config.sections() if sect.lower() == "xenapi"]
-    if len(sections) == 1:
-        return sections[0]
-
-    print("Multiple [xenapi] sections or no [xenapi] section found!")
-    sys.exit(RC_BADCONFIG)
-
-
-def load_configuration(exec_name, config_file):
-    config = ConfigParser.RawConfigParser()
-    config.read(config_file)
-    try:
-        exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
-        filters_path = config.get("DEFAULT", "filters_path").split(",")
-        section = _xenapi_section_name(config)
-        url = config.get(section, "xenapi_connection_url")
-        username = config.get(section, "xenapi_connection_username")
-        password = config.get(section, "xenapi_connection_password")
-    except ConfigParser.Error:
-        print("%s: Incorrect configuration file: %s" % (exec_name, config_file))
-        sys.exit(RC_BADCONFIG)
-    if not url or not password:
-        msg = ("%s: Must specify xenapi_connection_url, "
-               "xenapi_connection_username (optionally), and "
-               "xenapi_connection_password in %s") % (exec_name, config_file)
-        print(msg)
-        sys.exit(RC_BADCONFIG)
-    return dict(
-        filters_path=filters_path,
-        url=url,
-        username=username,
-        password=password,
-        exec_dirs=exec_dirs,
-    )
-
-
-def filter_command(exec_name, filters_path, user_args, exec_dirs):
-    # Add ../ to sys.path to allow running from branch
-    possible_topdir = os.path.normpath(os.path.join(os.path.abspath(exec_name),
-                                                    os.pardir, os.pardir))
-    if os.path.exists(os.path.join(possible_topdir, "neutron", "__init__.py")):
-        sys.path.insert(0, possible_topdir)
-
-    from oslo_rootwrap import wrapper
-
-    # Execute command if it matches any of the loaded filters
-    filters = wrapper.load_filters(filters_path)
-    filter_match = wrapper.match_filter(
-        filters, user_args, exec_dirs=exec_dirs)
-    if not filter_match:
-        print("Unauthorized command: %s" % ' '.join(user_args))
-        sys.exit(RC_UNAUTHORIZED)
-
-
-def run_command(url, username, password, user_args, cmd_input):
-    try:
-        session = XenAPI.Session(url)
-        session.login_with_password(username, password)
-        host = session.xenapi.session.get_this_host(session.handle)
-        result = session.xenapi.host.call_plugin(
-            host, 'netwrap', 'run_command',
-            {'cmd': json.dumps(user_args), 'cmd_input': json.dumps(cmd_input)})
-        return json.loads(result)
-    except Exception as e:
-        traceback.print_exc()
-        sys.exit(RC_XENAPI_ERROR)
-
-
-def main():
-    exec_name, config_file, user_args = parse_args()
-    config = load_configuration(exec_name, config_file)
-    filter_command(exec_name, config['filters_path'], user_args, config['exec_dirs'])
-
-    # If data is available on the standard input, we need to pass it to the
-    # command executed in dom0
-    cmd_input = None
-    if select.select([sys.stdin,],[],[],0.0)[0]:
-        cmd_input = "".join(sys.stdin)
-
-    return run_command(config['url'], config['username'], config['password'],
-                       user_args, cmd_input)
-
-
-if __name__ == '__main__':
-    print(main())
diff --git a/devstack/lib/flavors b/devstack/lib/flavors
deleted file mode 100644 (file)
index b2ddb42..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-# Neutron flavors plugin
-# ----------------------
-
-FLAVORS_PLUGIN=flavors
-
-function configure_flavors {
-    _neutron_service_plugin_class_add $FLAVORS_PLUGIN
-}
diff --git a/devstack/lib/l2_agent b/devstack/lib/l2_agent
deleted file mode 100644 (file)
index b70efb1..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-function plugin_agent_add_l2_agent_extension {
-    local l2_agent_extension=$1
-    if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then
-        L2_AGENT_EXTENSIONS=$l2_agent_extension
-    elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then
-        L2_AGENT_EXTENSIONS+=",$l2_agent_extension"
-    fi
-}
-
-
-function configure_l2_agent {
-    iniset /$Q_PLUGIN_CONF_FILE agent extensions "$L2_AGENT_EXTENSIONS"
-}
diff --git a/devstack/lib/l2_agent_sriovnicswitch b/devstack/lib/l2_agent_sriovnicswitch
deleted file mode 100755 (executable)
index f422773..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-SRIOV_AGENT_CONF="${Q_PLUGIN_CONF_PATH}/sriov_agent.ini"
-SRIOV_AGENT_BINARY="${NEUTRON_BIN_DIR}/neutron-sriov-nic-agent"
-
-function configure_l2_agent_sriovnicswitch {
-    if [[ -n "$PHYSICAL_NETWORK" ]] && [[ -n "$PHYSICAL_INTERFACE" ]]; then
-        PHYSICAL_DEVICE_MAPPINGS=$PHYSICAL_NETWORK:$PHYSICAL_INTERFACE
-    fi
-    if [[ -n "$PHYSICAL_DEVICE_MAPPINGS" ]]; then
-        iniset /$SRIOV_AGENT_CONF sriov_nic physical_device_mappings $PHYSICAL_DEVICE_MAPPINGS
-    fi
-
-    iniset /$SRIOV_AGENT_CONF securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
-
-    iniset /$SRIOV_AGENT_CONF agent extensions "$L2_AGENT_EXTENSIONS"
-}
-
-function start_l2_agent_sriov {
-    run_process q-sriov-agt "$SRIOV_AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$SRIOV_AGENT_CONF"
-}
-
-function stop_l2_agent_sriov {
-    stop_process q-sriov-agt
-}
diff --git a/devstack/lib/ml2 b/devstack/lib/ml2
deleted file mode 100644 (file)
index 057d445..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-source $LIBDIR/ml2_drivers/sriovnicswitch
-
-
-function enable_ml2_extension_driver {
-    local extension_driver=$1
-    if [[ -z "$Q_ML2_PLUGIN_EXT_DRIVERS" ]]; then
-        Q_ML2_PLUGIN_EXT_DRIVERS=$extension_driver
-    elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension_driver}, ]]; then
-        Q_ML2_PLUGIN_EXT_DRIVERS+=",$extension_driver"
-    fi
-}
-
-
-function configure_qos_ml2 {
-    enable_ml2_extension_driver "qos"
-}
-
-
-function configure_ml2 {
-    OIFS=$IFS;
-    IFS=",";
-    mechanism_drivers_array=($Q_ML2_PLUGIN_MECHANISM_DRIVERS);
-    IFS=$OIFS;
-    for mechanism_driver in "${mechanism_drivers_array[@]}"; do
-        if [ "$(type -t configure_ml2_$mechanism_driver)" = function ]; then
-            configure_ml2_$mechanism_driver
-        fi
-    done
-}
\ No newline at end of file
diff --git a/devstack/lib/ml2_drivers/sriovnicswitch b/devstack/lib/ml2_drivers/sriovnicswitch
deleted file mode 100755 (executable)
index 3a12c06..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-function configure_ml2_sriovnicswitch {
-    :
-}
diff --git a/devstack/lib/qos b/devstack/lib/qos
deleted file mode 100644 (file)
index e9270c0..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-function configure_qos_service_plugin {
-    _neutron_service_plugin_class_add "qos"
-}
-
-
-function configure_qos_core_plugin {
-    configure_qos_$Q_PLUGIN
-}
-
-
-function configure_qos_l2_agent {
-    plugin_agent_add_l2_agent_extension "qos"
-}
-
-
-function configure_qos {
-    configure_qos_service_plugin
-    configure_qos_core_plugin
-    configure_qos_l2_agent
-}
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
deleted file mode 100644 (file)
index 6038e7e..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-LIBDIR=$DEST/neutron/devstack/lib
-
-source $LIBDIR/flavors
-source $LIBDIR/l2_agent
-source $LIBDIR/l2_agent_sriovnicswitch
-source $LIBDIR/ml2
-source $LIBDIR/qos
-
-if [[ "$1" == "stack" ]]; then
-    case "$2" in
-        install)
-            if is_service_enabled q-flavors; then
-                configure_flavors
-            fi
-            if is_service_enabled q-qos; then
-                configure_qos
-            fi
-            ;;
-        post-config)
-            if is_service_enabled q-agt; then
-                configure_l2_agent
-            fi
-            #Note: sriov agent should run with OVS or linux bridge agent
-            #because they are the mechanisms that bind the DHCP and router ports.
-            #Currently devstack lacks the option to run two agents on the same node.
-            #Therefore we create new service, q-sriov-agt, and the q-agt should be OVS
-            #or linux bridge.
-            if is_service_enabled q-sriov-agt; then
-                configure_$Q_PLUGIN
-                configure_l2_agent
-                configure_l2_agent_sriovnicswitch
-            fi
-            ;;
-        extra)
-            if is_service_enabled q-sriov-agt; then
-                start_l2_agent_sriov
-            fi
-            ;;
-    esac
-elif [[ "$1" == "unstack" ]]; then
-    if is_service_enabled q-sriov-agt; then
-        stop_l2_agent_sriov
-    fi
-fi
diff --git a/devstack/settings b/devstack/settings
deleted file mode 100644 (file)
index b452f88..0000000
+++ /dev/null
@@ -1 +0,0 @@
-L2_AGENT_EXTENSIONS=${L2_AGENT_EXTENSIONS:-}
diff --git a/doc/Makefile b/doc/Makefile
deleted file mode 100644 (file)
index b63e300..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build
-SPHINXSOURCE   = source
-PAPER         =
-BUILDDIR      = build
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE)
-
-.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
-
-.DEFAULT_GOAL  = html
-
-help:
-       @echo "Please use \`make <target>' where <target> is one of"
-       @echo "  html      to make standalone HTML files"
-       @echo "  dirhtml   to make HTML files named index.html in directories"
-       @echo "  pickle    to make pickle files"
-       @echo "  json      to make JSON files"
-       @echo "  htmlhelp  to make HTML files and a HTML help project"
-       @echo "  qthelp    to make HTML files and a qthelp project"
-       @echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-       @echo "  changes   to make an overview of all changed/added/deprecated items"
-       @echo "  linkcheck to check all external links for integrity"
-       @echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-       -rm -rf $(BUILDDIR)/*
-       if [ -f .autogenerated ] ; then \
-        cat .autogenerated | xargs rm ; \
-        rm .autogenerated ; \
-    fi
-
-html:
-       $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-       @echo
-       @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
-       $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-       @echo
-       @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-pickle:
-       $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-       @echo
-       @echo "Build finished; now you can process the pickle files."
-
-json:
-       $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-       @echo
-       @echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-       $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-       @echo
-       @echo "Build finished; now you can run HTML Help Workshop with the" \
-             ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
-       $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
-       @echo
-       @echo "Build finished; now you can run "qcollectiongenerator" with the" \
-             ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
-       @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nova.qhcp"
-       @echo "To view the help file:"
-       @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nova.qhc"
-
-latex:
-       $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-       @echo
-       @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-       @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
-             "run these through (pdf)latex."
-
-changes:
-       $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-       @echo
-       @echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-       $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-       @echo
-       @echo "Link check complete; look for any errors in the above output " \
-             "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-       $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-       @echo "Testing of doctests in the sources finished, look at the " \
-             "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/pom.xml b/doc/pom.xml
deleted file mode 100644 (file)
index 6fc579f..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-\r
-<project xmlns="http://maven.apache.org/POM/4.0.0"\r
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\r
-    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">\r
-    \r
-    <modelVersion>4.0.0</modelVersion>\r
-    \r
-    <groupId>org.openstack.docs</groupId>\r
-    <artifactId>openstack-guide</artifactId>\r
-    <version>1.0.0-SNAPSHOT</version>\r
-    <packaging>jar</packaging>\r
-    <name>OpenStack Guides</name>\r
-    <!-- ################################################ -->\r
-    <!-- USE "mvn clean generate-sources" to run this POM -->\r
-    <!-- ################################################ -->\r
-    <profiles>\r
-        <profile>\r
-            <id>Rackspace Research Repositories</id>\r
-            <activation>\r
-                <activeByDefault>true</activeByDefault>\r
-            </activation>\r
-            <repositories>\r
-                <repository>\r
-                    <id>rackspace-research</id>\r
-                    <name>Rackspace Research Repository</name>\r
-                    <url>http://maven.research.rackspacecloud.com/content/groups/public/</url>\r
-                </repository>\r
-            </repositories>\r
-            <pluginRepositories>\r
-                <pluginRepository>\r
-                    <id>rackspace-research</id>\r
-                    <name>Rackspace Research Repository</name>\r
-                    <url>http://maven.research.rackspacecloud.com/content/groups/public/</url>\r
-                </pluginRepository>\r
-            </pluginRepositories>\r
-        </profile>\r
-    </profiles>\r
-    \r
-    <build>\r
-        <resources>\r
-            <resource>\r
-                <directory>target/docbkx/pdf</directory>\r
-                <excludes>\r
-                    <exclude>**/*.fo</exclude>\r
-                </excludes>\r
-            </resource>\r
-        </resources>\r
-        <plugins>\r
-            \r
-            <plugin>\r
-                <groupId>com.rackspace.cloud.api</groupId>\r
-                <artifactId>clouddocs-maven-plugin</artifactId>\r
-                <version>1.0.5-SNAPSHOT</version>\r
-                <executions>\r
-                    <execution>\r
-                        <id>goal1</id>\r
-                        <goals>\r
-                            <goal>generate-pdf</goal>\r
-                        </goals>\r
-                        <phase>generate-sources</phase>\r
-                        <configuration>\r
-                            <highlightSource>false</highlightSource>\r
-                        </configuration>\r
-                    </execution>\r
-                    <execution>\r
-                        <id>goal2</id>\r
-                        <goals>\r
-                            <goal>generate-webhelp</goal>\r
-                        </goals>\r
-                        <phase>generate-sources</phase>\r
-                        <configuration>\r
-                            <!-- These parameters only apply to webhelp -->\r
-                            <enableDisqus>0</enableDisqus>\r
-                            <disqusShortname>openstackdocs</disqusShortname>\r
-                            <enableGoogleAnalytics>1</enableGoogleAnalytics>\r
-                            <googleAnalyticsId>UA-17511903-6</googleAnalyticsId>\r
-                            <generateToc>\r
-                                appendix  toc,title\r
-                                article/appendix  nop\r
-                                article   toc,title\r
-                                book      title,figure,table,example,equation\r
-                                chapter   toc,title\r
-                                part      toc,title\r
-                                preface   toc,title\r
-                                qandadiv  toc\r
-                                qandaset  toc\r
-                                reference toc,title\r
-                                set       toc,title\r
-                            </generateToc>\r
-                            <!-- The following elements sets the autonumbering of sections in output for chapter numbers but no numbered sections-->\r
-                            <sectionAutolabel>0</sectionAutolabel>\r
-                            <sectionLabelIncludesComponentLabel>0</sectionLabelIncludesComponentLabel>\r
-                            <postProcess>\r
-                                <!-- Copies the figures to the correct location for webhelp -->\r
-                                <copy todir="${basedir}/target/docbkx/webhelp/neutron-api-1.0/figures">\r
-                                    <fileset dir="${basedir}/source/docbkx/neutron-api-1.0/figures">\r
-                                        <include name="**/*.png" />\r
-                                    </fileset>\r
-                                </copy>\r
-                                \r
-                                <!-- New stuff -->\r
-                                <copy\r
-                                    todir="${basedir}/target/docbkx/webhelp/trunk/developer/neutron-api-1.0">\r
-                                    <fileset\r
-                                        dir="${basedir}/target/docbkx/webhelp/neutron-api-1.0/neutron-api-guide/">\r
-                                        <include name="**/*" />\r
-                                    </fileset>\r
-                                </copy>\r
-                                <!--Moves PDFs to the needed placement -->\r
-                                <move failonerror="false"\r
-                                    file="${basedir}/target/docbkx/pdf/neutron-api-1.0/neutron-api-guide.pdf"\r
-                                    tofile="${basedir}/target/docbkx/webhelp/trunk/developer/neutron-api-1.0/neutron-api-guide-trunk.pdf"/>\r
-\r
-                                <!--Deletes leftover uneeded directories -->\r
-                                <delete\r
-                                    dir="${basedir}/target/docbkx/webhelp/neutron-api-1.0"/>\r
-                            </postProcess>\r
-                        </configuration>\r
-                    </execution>\r
-                </executions>\r
-                <configuration>\r
-                    <!-- These parameters apply to pdf and webhelp -->\r
-                    <xincludeSupported>true</xincludeSupported>\r
-                    <sourceDirectory>source/docbkx</sourceDirectory>\r
-                    <includes>\r
-                        neutron-api-1.0/neutron-api-guide.xml\r
-                    </includes>\r
-                    <profileSecurity>reviewer</profileSecurity>\r
-                    <branding>openstack</branding>\r
-                </configuration>\r
-            </plugin>\r
-            \r
-        </plugins>\r
-    </build>\r
-</project>\r
diff --git a/doc/source/conf.py b/doc/source/conf.py
deleted file mode 100644 (file)
index 4f0b7de..0000000
+++ /dev/null
@@ -1,233 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2010 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Keystone documentation build configuration file, created by
-# sphinx-quickstart on Tue May 18 13:50:15 2010.
-#
-# This file is execfile()'d with the current directory set to it's containing
-# dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import os
-import subprocess
-import sys
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-BASE_DIR = os.path.dirname(os.path.abspath(__file__))
-NEUTRON_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
-sys.path.insert(0, NEUTRON_DIR)
-
-# -- General configuration ---------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc',
-              'sphinx.ext.coverage',
-              'sphinx.ext.ifconfig',
-              'sphinx.ext.pngmath',
-              'sphinx.ext.graphviz',
-              'sphinx.ext.todo',
-              'oslosphinx']
-
-todo_include_todos = True
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = []
-if os.getenv('HUDSON_PUBLISH_DOCS'):
-    templates_path = ['_ga', '_templates']
-else:
-    templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'Neutron'
-copyright = u'2011-present, OpenStack Foundation.'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# Version info
-from neutron.version import version_info as neutron_version
-release = neutron_version.release_string()
-# The short X.Y version.
-version = neutron_version.version_string()
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of documents that shouldn't be included in the build.
-# unused_docs = []
-
-# List of directories, relative to source directory, that shouldn't be searched
-# for source files.
-exclude_trees = []
-
-# The reST default role (for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-show_authors = True
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-modindex_common_prefix = ['neutron.']
-
-# -- Options for HTML output -------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages.  Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-# html_theme_path = ["."]
-# html_theme = '_theme'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = ['_theme']
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-# html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local -",
-   "n1"]
-html_last_updated_fmt = subprocess.Popen(git_cmd,
-                                         stdout=subprocess.PIPE).\
-                                         communicate()[0]
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_use_modindex = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'neutrondoc'
-
-
-# -- Options for LaTeX output ------------------------------------------------
-
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author,
-# documentclass [howto/manual]).
-latex_documents = [
-    ('index', 'Neutron.tex', u'Neutron Documentation',
-     u'Neutron development team', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_use_modindex = True
diff --git a/doc/source/dashboards/check.dashboard.rst b/doc/source/dashboards/check.dashboard.rst
deleted file mode 100644 (file)
index d2353ec..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-
-Neutron Check Pipeline Thumbnails
-=================================
-
-Click to see full size figure.
-
-.. raw:: html
-
-   <table border="1">
-   <tr>
-   <td align="center" width=50%>
-   Failure Percentage - Last 10 Days - DVR and Full Jobs<br>
-   <a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - DVR and Full Jobs&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr-multinode-full.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr-multinode-full.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-dvr-multinode-full%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-dvr%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-multinode-full.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-multinode-full.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-multinode-full%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-full.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-full.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-full%27%29,%27red%27%29&drawNullAsZero=true">
-   <img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr-multinode-full.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr-multinode-full.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-dvr-multinode-full%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-dvr.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-dvr%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-multinode-full.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-multinode-full.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-multinode-full%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-full.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-full.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-full%27%29,%27red%27%29&drawNullAsZero=true" width="400">
-   </a>
-   </td>
-   <td align="center">
-   Failure Percentage - Last 10 Days - Grenade, DSVM API/Functional/Fullstack<br>
-   <a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Grenade, DSVM API/Functional/Fullstack&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-grenade-dsvm-neutron.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-grenade-dsvm-neutron.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-grenade-dsvm-neutron%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-neutron-dsvm-api.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-neutron-dsvm-api.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-api%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-neutron-dsvm-functional.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-neutron-dsvm-functional.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-functional%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-neutron-dsvm-fullstack.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-neutron-dsvm-fullstack.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-fullstack%27%29,%27red%27%29&drawNullAsZero=true">
-   <img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-grenade-dsvm-neutron.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-grenade-dsvm-neutron.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-grenade-dsvm-neutron%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-neutron-dsvm-api.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-neutron-dsvm-api.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-api%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-neutron-dsvm-functional.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-neutron-dsvm-functional.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-functional%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-neutron-dsvm-fullstack.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-neutron-dsvm-fullstack.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-fullstack%27%29,%27red%27%29&drawNullAsZero=true" width="400">
-   </a>
-   </td>
-   </tr>
-   <tr>
-   <td align="center">
-   Failure Percentage - Last 10 Days - Rally, LinuxBridge, LBaaS v1/v2<br>
-   <a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Rally, LinuxBridge, LBaaS v1/v2&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-rally-dsvm-neutron-neutron.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-rally-dsvm-neutron-neutron.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-rally-dsvm-neutron-neutron%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-linuxbridge.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-linuxbridge.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-linuxbridge%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv1-dsvm-api.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv1-dsvm-api.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-lbaasv1-dsvm-api%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-minimal.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-minimal.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-lbaasv2-dsvm-minimal%27%29,%27red%27%29&drawNullAsZero=true">
-   <img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-rally-dsvm-neutron-neutron.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-rally-dsvm-neutron-neutron.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-rally-dsvm-neutron-neutron%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-linuxbridge.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-linuxbridge.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-linuxbridge%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv1-dsvm-api.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv1-dsvm-api.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-lbaasv1-dsvm-api%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-minimal.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-neutron-lbaasv2-dsvm-minimal.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-lbaasv2-dsvm-minimal%27%29,%27red%27%29&drawNullAsZero=true" width="400">
-   </a>
-   </td>
-   <td align="center">
-   Failure Percentage - Last 10 Days - Large Ops<br>
-   <a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Large Ops&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29&drawNullAsZero=true">
-   <img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.check.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27orange%27%29&drawNullAsZero=true" width="400">
-   </a>
-   </td>
-   </tr>
-   </table>
diff --git a/doc/source/dashboards/gate.dashboard.rst b/doc/source/dashboards/gate.dashboard.rst
deleted file mode 100644 (file)
index 1408c41..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-
-Neutron Gate Pipeline Thumbnails
-================================
-
-Click to see full size figure.
-
-.. raw:: html
-
-   <table border="1">
-   <tr>
-   <td align="center" width=50%>
-   Failure Percentage - Last 10 Days - Gate Jobs Set 1 (constrained docs, pep8, py27 unit, py34 unit)<br>
-   <a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Gate Jobs Set 1 (constrained docs, pep8, py27 unit, py34 unit)&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-docs.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-docs.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-docs%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-pep8-constraints.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-pep8-constraints.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-pep8-constraints%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-python27-constraints.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-python27-constraints.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-python27-constraints%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-python34-constraints.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-python34-constraints.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-python34-constraints%27%29,%27red%27%29&drawNullAsZero=true">
-   <img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-docs.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-docs.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-docs%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-pep8-constraints.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-pep8-constraints.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-pep8-constraints%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-python27-constraints.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-python27-constraints.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-python27-constraints%27%29,%27green%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-python34-constraints.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-python34-constraints.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-python34-constraints%27%29,%27red%27%29&drawNullAsZero=true" width="400">
-   </a>
-   </td>
-   <td align="center">
-   Failure Percentage - Last 10 Days - Gate Jobs Set 2 (DSVM Jobs)
-   <a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Gate Jobs Set 2 (DSVM Jobs)&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-dsvm-api.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-dsvm-api.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-api%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-tempest-dsvm-neutron-full.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-tempest-dsvm-neutron-full.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-full%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-tempest-dsvm-neutron-linuxbridge.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-tempest-dsvm-neutron-linuxbridge.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-linuxbridge%27%29,%27red%27%29&drawNullAsZero=true&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-grenade-dsvm-neutron.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-grenade-dsvm-neutron.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-grenade-dsvm-neutron%27%29,%27green%27%29&drawNullAsZero=true">
-   <img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-dsvm-api.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-dsvm-api.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-dsvm-api%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-tempest-dsvm-neutron-full.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-tempest-dsvm-neutron-full.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-full%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-tempest-dsvm-neutron-linuxbridge.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-tempest-dsvm-neutron-linuxbridge.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-linuxbridge%27%29,%27red%27%29&drawNullAsZero=true&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-grenade-dsvm-neutron.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-grenade-dsvm-neutron.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-grenade-dsvm-neutron%27%29,%27green%27%29&drawNullAsZero=true" width="400">
-   </a>
-   </td>
-   </tr>
-   <tr>
-   <td align="center" width=50%>
-   Failure Percentage - Last 10 Days - Gate Jobs Set 3 (LBaaS and Large Ops)<br>
-   <a href="http://graphite.openstack.org/render/?title=Failure Percentage - Last 10 Days - Gate Jobs Set 1 (LBaaS and Large Ops)&from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-tempest-dsvm-neutron-large-ops.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-lbaasv1-dsvm-api.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-lbaasv1-dsvm-api.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-lbaasv1-dsvm-api%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-lbaasv2-dsvm-minimal.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-lbaasv2-dsvm-minimal.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-lbaasv2-dsvm-minimal%27%29,%27green%27%29&drawNullAsZero=true">
-   <img src="http://graphite.openstack.org/render/?from=-10days&height=500&until=now&width=1200&bgcolor=ffffff&fgcolor=000000&yMax=100&yMin=0&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-lbaasv1-dsvm-api.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-lbaasv1-dsvm-api.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-lbaasv1-dsvm-api%27%29,%27orange%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-tempest-dsvm-neutron-large-ops.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-tempest-dsvm-neutron-large-ops.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-tempest-dsvm-neutron-large-ops%27%29,%27blue%27%29&target=color%28alias%28movingAverage%28asPercent%28transformNull%28stats_counts.zuul.pipeline.gate.job.gate-neutron-lbaasv2-dsvm-minimal.FAILURE%29,transformNull%28sum%28stats_counts.zuul.pipeline.gate.job.gate-neutron-lbaasv2-dsvm-minimal.{SUCCESS,FAILURE}%29%29%29,%2736hours%27%29,%20%27gate-neutron-lbaasv2-dsvm-minimal%27%29,%27green%27%29&drawNullAsZero=true" width="400">
-   </a>
-   </td>
-   </tr>
-   </table>
diff --git a/doc/source/dashboards/index.rst b/doc/source/dashboards/index.rst
deleted file mode 100644 (file)
index 5676816..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-Gerrit Dashboards
-=================
-
-- `Neutron master branch reviews <https://review.openstack.org/#/dashboard/?foreach=%28project%3Aopenstack%2Fneutron+OR+project%3Aopenstack%2Fpython%252Dneutronclient%29+status%3Aopen+NOT+owner%3Aself+NOT+label%3AWorkflow%3C%3D%252D1+label%3AVerified%3E%3D1%252cjenkins+NOT+label%3ACode%252DReview%3E%3D%252D2%252cself+branch%3Amaster&title=Neutron+Review+Inbox+%28master+branch+only%29&Needs+Feedback+%28Changes+older+than+5+days+that+have+not+been+reviewed+by+anyone%29=NOT+label%3ACode%252DReview%3C%3D2+age%3A5d&You+are+a+reviewer%252c+but+haven%27t+voted+in+the+current+revision=reviewer%3Aself&Needs+final+%2B2=label%3ACode%252DReview%3E%3D2+NOT%28reviewerin%3Aneutron%252Dcore+label%3ACode%252DReview%3C%3D%252D1%29+limit%3A50&Passed+Jenkins%252c+No+Negative+Core+Feedback=NOT+label%3ACode%252DReview%3E%3D2+NOT%28reviewerin%3Aneutron%252Dcore+label%3ACode%252DReview%3C%3D%252D1%29+limit%3A50&Wayward+Changes+%28Changes+with+no+code+review+in+the+last+2days%29=NOT+label%3ACode%252DReview%3C%3D2+age%3A2d>`_
-- `Neutron subproject reviews (master branch) <https://review.openstack.org/#/dashboard/?foreach=%28project%3Aopenstack%2Fdragonflow+OR+project%3Aopenstack%2Fnetworking%252Darista+OR+project%3Aopenstack%2Fnetworking%252Dbagpipe%252Dl2+OR+project%3Aopenstack%2Fnetworking%252Dbgpvpn+OR+project%3Aopenstack%2Fnetworking%252Dcisco+OR+project%3Aopenstack%2Fnetworking%252Dl2gw+OR+project%3Aopenstack%2Fnetworking%252Dlenovo+OR+project%3Aopenstack%2Fnetworking%252Dmidonet+OR+project%3Aopenstack%2Fnetworking%252Dodl+OR+project%3Aopenstack%2Fnetworking%252Dofagent+OR+project%3Aopenstack%2Fnetworking%252Donos+OR+project%3Aopenstack%2Fnetworking%252Dovn+OR+project%3Aopenstack%2Fnetworking%252Dsfc+OR+project%3Aopenstack%2Foctavia+OR+project%3Aopenstack%2Fnetworking%252Dplumgrid+OR+project%3Aopenstack%2Fvmware%252Dnsx+OR+project%3Aopenstack%2Fnetworking%252Dvsphere%29+status%3Aopen+NOT+owner%3Aself+NOT+label%3AWorkflow%3C%3D%252D1+label%3AVerified%3E%3D1%252cjenkins+NOT+label%3ACode%252DReview%3E%3D%252D2%252cself+branch%3Amaster&title=Neutron+Sub+Projects+Review+Inbox&Needs+Feedback+%28Changes+older+than+5+days+that+have+not+been+reviewed+by+anyone%29=NOT+label%3ACode%252DReview%3C%3D2+age%3A5d&You+are+a+reviewer%252c+but+haven%27t+voted+in+the+current+revision=reviewer%3Aself&Needs+final+%2B2=label%3ACode%252DReview%3E%3D2+NOT%28reviewerin%3Aneutron%252Dcore+label%3ACode%252DReview%3C%3D%252D1%29+limit%3A50&Passed+Jenkins%252c+No+Negative+Core+Feedback=NOT+label%3ACode%252DReview%3E%3D2+NOT%28reviewerin%3Aneutron%252Dcore+label%3ACode%252DReview%3C%3D%252D1%29+limit%3A50&Wayward+Changes+%28Changes+with+no+code+review+in+the+last+2days%29=NOT+label%3ACode%252DReview%3C%3D2+age%3A2d>`_
-- `Neutron stable branch reviews <https://review.openstack.org/#/dashboard/?foreach=%28project%3Aopenstack%2Fneutron+OR+project%3Aopenstack%2Fpython%252Dneutronclient+OR+project%3Aopenstack%2Fneutron%252Dfwaas+OR+project%3Aopenstack%2Fneutron%252Dlbaas+OR+project%3Aopenstack%2Fneutron%252Dvpnaas+OR+project%3Aopenstack%2Fdragonflow+OR+project%3Aopenstack%2Fnetworking%252Darista+OR+project%3Aopenstack%2Fnetworking%252Dbagpipe%252Dl2+OR+project%3Aopenstack%2Fnetworking%252Dbgpvpn+OR+project%3Aopenstack%2Fnetworking%252Dcisco+OR+project%3Aopenstack%2Fnetworking%252Dl2gw+OR+project%3Aopenstack%2Fnetworking%252Dlenovo+OR+project%3Aopenstack%2Fnetworking%252Dmidonet+OR+project%3Aopenstack%2Fnetworking%252Dodl+OR+project%3Aopenstack%2Fnetworking%252Dofagent+OR+project%3Aopenstack%2Fnetworking%252Donos+OR+project%3Aopenstack%2Fnetworking%252Dovn+OR+project%3Aopenstack%2Fnetworking%252Dsfc+OR+project%3Aopenstack%2Foctavia+OR+project%3Aopenstack%2Fnetworking%252Dplumgrid+OR+project%3Aopenstack%2Fvmware%252Dnsx+OR+project%3Aopenstack%2Fnetworking%252Dvsphere%29+status%3Aopen+NOT+owner%3Aself+NOT+label%3AWorkflow%3C%3D%252D1+label%3AVerified%3E%3D1%252cjenkins+NOT+label%3ACode%252DReview%3E%3D%252D2%252cself+branch%3A%5Estable%2F.%2A&title=Neutron+Stable+Related+Projects+Review+Inbox&Needs+Feedback+%28Changes+older+than+5+days+that+have+not+been+reviewed+by+anyone%29=NOT+label%3ACode%252DReview%3C%3D2+age%3A5d&You+are+a+reviewer%252c+but+haven%27t+voted+in+the+current+revision=reviewer%3Aself&Needs+final+%2B2=label%3ACode%252DReview%3E%3D2+NOT%28reviewerin%3Aneutron%252Dstable%252Dmaint+label%3ACode%252DReview%3C%3D%252D1%29+limit%3A50&Passed+Jenkins%252c+No+Negative+Core+Feedback=NOT+label%3ACode%252DReview%3E%3D2+NOT%28reviewerin%3Aneutron%252Dstable%252Dmaint+label%3ACode%252DReview%3C%3D%252D1%29+limit%3A50&Wayward+Changes+%28Changes+with+no+code+review+in+the+last+2days%29=NOT+label%3ACode%252DReview%3C%3D2+age%3A2d>`_
-
-These dashboard links can be generated by `Gerrit Dashboard Creator`_.
-Useful dashboard definitions are found in ``dashboards`` directory.
-
-.. _Gerrit Dashboard Creator: https://github.com/openstack/gerrit-dash-creator
-
-Neutron Graphite Pages
-======================
-
-.. toctree::
-   :maxdepth: 1
-
-   gate.dashboard
-   check.dashboard
diff --git a/doc/source/devref/address_scopes.rst b/doc/source/devref/address_scopes.rst
deleted file mode 100644 (file)
index c3c39cc..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-Subnet Pools and Address Scopes
-===============================
-
-This page discusses subnet pools and address scopes
-
-Subnet Pools
-------------
-
-Learn about subnet pools by watching the summit talk given in Vancouver [#]_.
-
-.. [#] http://www.youtube.com/watch?v=QqP8yBUUXBM&t=6m12s
-
-Subnet pools were added in Kilo.  They are relatively simple.  A SubnetPool has
-any number of SubnetPoolPrefix objects associated to it.  These prefixes are in
-CIDR format.  Each CIDR is a piece of the address space that is available for
-allocation.
-
-Subnet Pools support IPv6 just as well as IPv4.
-
-The Subnet model object now has a subnetpool_id attribute whose default is null
-for backward compatibility.  The subnetpool_id attribute stores the UUID of the
-subnet pool that acted as the source for the address range of a particular
-subnet.
-
-When creating a subnet, the subnetpool_id can be optionally specified.  If it
-is, the 'cidr' field is not required.  If 'cidr' is specified, it will be
-allocated from the pool assuming the pool includes it and hasn't already
-allocated any part of it.  If 'cidr' is left out, then the prefixlen attribute
-can be specified.  If it is not, the default prefix length will be taken from
-the subnet pool.  Think of it this way, the allocation logic always needs to
-know the size of the subnet desired.  It can pull it from a specific CIDR,
-prefixlen, or default.  A specific CIDR is optional and the allocation will try
-to honor it if provided.  The request will fail if it can't honor it.
-
-Subnet pools do not allow overlap of subnets.
-
-Subnet Pool Quotas
-~~~~~~~~~~~~~~~~~~
-
-A quota mechanism was provided for subnet pools.  It is different than other
-quota mechanisms in Neutron because it doesn't count instances of first class
-objects.  Instead it counts how much of the address space is used.
-
-For IPv4, it made reasonable sense to count quota in terms of individual
-addresses.  So, if you're allowed exactly one /24, your quota should be set to
-256.  Three /26s would be 192.  This mechanism encourages more efficient use of
-the IPv4 space which will be increasingly important when working with globally
-routable addresses.
-
-For IPv6, the smallest viable subnet in Neutron is a /64.  There is no reason
-to allocate a subnet of any other size for use on a Neutron network.  It would
-look pretty funny to set a quota of 4611686018427387904 to allow one /64
-subnet.  To avoid this, we count IPv6 quota in terms of /64s.  So, a quota of 3
-allows three /64 subnets.  When we need to allocate something smaller in the
-future, we will need to ensure that the code can handle non-integer quota
-consumption.
-
-Allocation
-~~~~~~~~~~
-
-Allocation is done in a way that aims to minimize fragmentation of the pool.
-The relevant code is here [#]_.  First, the available prefixes are computed
-using a set difference:  pool - allocations.  The result is compacted [#]_ and
-then sorted by size.  The subnet is then allocated from the smallest available
-prefix that is large enough to accommodate the request.
-
-.. [#] neutron/ipam/subnet_alloc.py (_allocate_any_subnet)
-.. [#] http://pythonhosted.org/netaddr/api.html#netaddr.IPSet.compact
-
-Address Scopes
---------------
-
-Before subnet pools or address scopes, it was impossible to tell if a network
-address was routable in a certain context because the address was given
-explicitly on subnet create and wasn't validated against any other addresses.
-Address scopes are meant to solve this by putting control over the address
-space in the hands of an authority:  the address scope owner.  It makes use of
-the already existing SubnetPool concept for allocation.
-
-Address scopes are "the thing within which address overlap is not allowed" and
-thus provide more flexible control as well as decoupling of address overlap
-from tenancy.
-
-Prior to the Mitaka release, there was implicitly only a single 'shared'
-address scope.  Arbitrary address overlap was allowed making it pretty much a
-"free for all".  To make things seem somewhat sane, normal tenants are not able
-to use routers to cross-plug networks from different tenants and NAT was used
-between internal networks and external networks.  It was almost as if each
-tenant had a private address scope.
-
-The problem is that this model cannot support use cases where NAT is not
-desired or supported (e.g. IPv6) or we want to allow different tenants to
-cross-plug their networks.
-
-An AddressScope covers only one address family.  But, they work equally well
-for IPv4 and IPv6.
-
-Routing
-~~~~~~~
-
-The reference implementation honors address scopes.  Within an address scope,
-addresses route freely (barring any FW rules or other external restrictions).
-Between scopes, routed is prevented unless address translation is used.  For
-now, floating IPs are the only place where traffic crosses scope boundaries.
-The 1-1 NAT allows this to happen.
-
-.. TODO (Carl) Implement NAT for floating ips crossing scopes
-.. TODO (Carl) Implement SNAT for crossing scopes
-
-RPC
-~~~
-
-The L3 agent in the reference implementation needs to know the address scope
-for each port on each router in order to map ingress traffic correctly.
-
-Each subnet from the same address family on a network is required to be from
-the same subnet pool.  Therefore, the address scope will also be the same.  If
-this were not the case, it would be more difficult to match ingress traffic on
-a port with the appropriate scope.  It may be counter-intuitive but L3 address
-scopes need to be anchored to some sort of non-L3 thing (e.g. an L2 interface)
-in the topology in order to determine the scope of ingress traffic.  For now,
-we use ports/networks.  In the future, we may be able to distinguish by
-something else like the remote MAC address or something.
-
-The address scope id is set on each port in a dict under the 'address_scopes'
-attribute.  The scope is distinct per address family.  If the attribute does
-not appear, it is assumed to be null for both families.  A value of null means
-that the addresses are in the "implicit" address scope which holds all
-addresses that don't have an explicit one.  All subnets that existed in Neutron
-before address scopes existed fall here.
-
-Here is an example of how the json will look in the context of a router port::
-
-    "address_scopes": {
-        "4": "d010a0ea-660e-4df4-86ca-ae2ed96da5c1",
-        "6": null
-    },
-
-To implement floating IPs crossing scope boundaries, the L3 agent needs to know
-the target scope of the floating ip.  The fixed address is not enough to
-disambiguate because, theoritically, there could be overlapping addresses from
-different scopes.  The scope is computed [#]_ from the floating ip fixed port
-and attached to the floating ip dict under the 'fixed_ip_address_scope'
-attribute.  Here's what the json looks like (trimmed)::
-
-    {
-         ...
-         "floating_ip_address": "172.24.4.4",
-         "fixed_ip_address": "172.16.0.3",
-         "fixed_ip_address_scope": "d010a0ea-660e-4df4-86ca-ae2ed96da5c1",
-         ...
-    }
-
-.. [#] neutron/db/l3_db.py (_get_sync_floating_ips)
-
-Model
-~~~~~
-
-The model for subnet pools and address scopes can be found in
-neutron/db/models_v2.py and neutron/db/address_scope_db.py.  This document
-won't go over all of the details.  It is worth noting how they relate to
-existing Neutron objects.  The existing Neutron subnet now optionally
-references a single subnet pool::
-
-    +----------------+        +------------------+        +--------------+
-    | Subnet         |        | SubnetPool       |        | AddressScope |
-    +----------------+        +------------------+        +--------------+
-    | subnet_pool_id +------> | address_scope_id +------> |              |
-    |                |        |                  |        |              |
-    |                |        |                  |        |              |
-    |                |        |                  |        |              |
-    +----------------+        +------------------+        +--------------+
diff --git a/doc/source/devref/advanced_services.rst b/doc/source/devref/advanced_services.rst
deleted file mode 100644 (file)
index 1499869..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Advanced Services
-=================
-
-Historically, Neutron supported the following advanced services:
-
-#. **FWaaS** (*Firewall-as-a-Service*): runs as part of the L3 agent.
-#. **LBaaS** (*Load-Balancer-as-a-Service*): implemented purely inside
-   neutron-server, does not interact directly with agents.
-#. **VPNaaS** (*VPN-as-a-Service*): derives from L3 agent to add
-   VPNaaS functionality.
-
-Starting with the Kilo release, these services are split into separate
-repositories managed by extended reviewer teams.
-
-#. http://git.openstack.org/cgit/openstack/neutron-fwaas/
-#. http://git.openstack.org/cgit/openstack/neutron-lbaas/
-#. http://git.openstack.org/cgit/openstack/neutron-vpnaas/
diff --git a/doc/source/devref/alembic_migrations.rst b/doc/source/devref/alembic_migrations.rst
deleted file mode 100644 (file)
index c7ab570..0000000
+++ /dev/null
@@ -1,472 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-.. _alembic_migrations:
-
-Alembic Migrations
-==================
-
-Introduction
-------------
-
-The migrations in the alembic/versions contain the changes needed to migrate
-from older Neutron releases to newer versions. A migration occurs by executing
-a script that details the changes needed to upgrade the database. The migration
-scripts are ordered so that multiple scripts can run sequentially to update the
-database.
-
-
-The Migration Wrapper
----------------------
-
-The scripts are executed by Neutron's migration wrapper ``neutron-db-manage``
-which uses the Alembic library to manage the migration. Pass the ``--help``
-option to the wrapper for usage information.
-
-The wrapper takes some options followed by some commands::
-
- neutron-db-manage <options> <commands>
-
-The wrapper needs to be provided with the database connection string, which is
-usually provided in the ``neutron.conf`` configuration file in an installation.
-The wrapper automatically reads from ``/etc/neutron/neutron.conf`` if it is
-present. If the configuration is in a different location::
-
- neutron-db-manage --config-file /path/to/neutron.conf <commands>
-
-Multiple ``--config-file`` options can be passed if needed.
-
-Instead of reading the DB connection from the configuration file(s) the
-``--database-connection`` option can be used::
-
- neutron-db-manage --database-connection mysql+pymysql://root:secret@127.0.0.1/neutron?charset=utf8 <commands>
-
-The ``branches``, ``current``, and ``history`` commands all accept a
-``--verbose`` option, which, when passed, will instruct ``neutron-db-manage``
-to display more verbose output for the specified command::
-
- neutron-db-manage current --verbose
-
-For some commands the wrapper needs to know the entrypoint of the core plugin
-for the installation. This can be read from the configuration file(s) or
-specified using the ``--core_plugin`` option::
-
- neutron-db-manage --core_plugin neutron.plugins.ml2.plugin.Ml2Plugin <commands>
-
-When giving examples below of using the wrapper the options will not be shown.
-It is assumed you will use the options that you need for your environment.
-
-For new deployments you will start with an empty database. You then upgrade
-to the latest database version via::
-
- neutron-db-manage upgrade heads
-
-For existing deployments the database will already be at some version. To
-check the current database version::
-
- neutron-db-manage current
-
-After installing a new version of Neutron server, upgrading the database is
-the same command::
-
- neutron-db-manage upgrade heads
-
-To create a script to run the migration offline::
-
- neutron-db-manage upgrade heads --sql
-
-To run the offline migration between specific migration versions::
-
- neutron-db-manage upgrade <start version>:<end version> --sql
-
-Upgrade the database incrementally::
-
- neutron-db-manage upgrade --delta <# of revs>
-
-**NOTE:** Database downgrade is not supported.
-
-
-Migration Branches
-------------------
-
-Neutron makes use of alembic branches for two purposes.
-
-1. Indepedent Sub-Project Tables
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Various `sub-projects <sub_projects.html>`_ can be installed with Neutron. Each
-sub-project registers its own alembic branch which is responsible for migrating
-the schemas of the tables owned by the sub-project.
-
-The neutron-db-manage script detects which sub-projects have been installed by
-enumerating the ``neutron.db.alembic_migrations`` entrypoints. For more details
-see the `Entry Points section of Contributing extensions to Neutron
-<contribute.html#entry-points>`_.
-
-The neutron-db-manage script runs the given alembic command against all
-installed sub-projects. (An exception is the ``revision`` command, which is
-discussed in the `Developers`_ section below.)
-
-2. Offline/Online Migrations
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Since Liberty, Neutron maintains two parallel alembic migration branches.
-
-The first one, called 'expand', is used to store expansion-only migration
-rules. Those rules are strictly additive and can be applied while
-neutron-server is running. Examples of additive database schema changes are:
-creating a new table, adding a new table column, adding a new index, etc.
-
-The second branch, called 'contract', is used to store those migration rules
-that are not safe to apply while neutron-server is running. Those include:
-column or table removal, moving data from one part of the database into another
-(renaming a column, transforming single table into multiple, etc.), introducing
-or modifying constraints, etc.
-
-The intent of the split is to allow invoking those safe migrations from
-'expand' branch while neutron-server is running, reducing downtime needed to
-upgrade the service.
-
-For more details, see the `Expand and Contract Scripts`_ section below.
-
-
-Developers
-----------
-
-A database migration script is required when you submit a change to Neutron or
-a sub-project that alters the database model definition. The migration script
-is a special python file that includes code to upgrade the database to match
-the changes in the model definition. Alembic will execute these scripts in
-order to provide a linear migration path between revisions. The
-neutron-db-manage command can be used to generate migration scripts for you to
-complete. The operations in the template are those supported by the Alembic
-migration library.
-
-
-.. _neutron-db-manage-without-devstack:
-
-Running neutron-db-manage without devstack
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-When, as a developer, you want to work with the Neutron DB schema and alembic
-migrations only, it can be rather tedious to rely on devstack just to get an
-up-to-date neutron-db-manage installed. This section describes how to work on
-the schema and migration scripts with just the unit test virtualenv and
-mysql. You can also operate on a separate test database so you don't mess up
-the installed Neutron database.
-
-Setting up the environment
-++++++++++++++++++++++++++
-
-Install mysql service
-'''''''''''''''''''''
-
-This only needs to be done once since it is a system install. If you have run
-devstack on your system before, then the mysql service is already installed and
-you can skip this step.
-
-Mysql must be configured as installed by devstack, and the following script
-accomplishes this without actually running devstack::
-
- INSTALL_MYSQL_ONLY=True ./tools/configure_for_func_testing.sh ../devstack
-
-Run this from the root of the neutron repo. It assumes an up-to-date clone of
-the devstack repo is in ``../devstack``.
-
-Note that you must know the mysql root password. It is derived from (in order
-of precedence):
-
-- ``$MYSQL_PASSWORD`` in your environment
-- ``$MYSQL_PASSWORD`` in ``../devstack/local.conf``
-- ``$MYSQL_PASSWORD`` in ``../devstack/localrc``
-- default of 'secretmysql' from ``tools/configure_for_func_testing.sh``
-
-Work on a test database
-'''''''''''''''''''''''
-
-Rather than using the neutron database when working on schema and alembic
-migration script changes, we can work on a test database. In the examples
-below, we use a database named ``testdb``.
-
-To create the database::
-
- mysql -e "create database testdb;"
-
-You will often need to clear it to re-run operations from a blank database::
-
- mysql -e "drop database testdb; create database testdb;"
-
-To work on the test database instead of the neutron database, point to it with
-the ``--database-connection`` option::
-
- neutron-db-manage --database-connection mysql+pymysql://root:secretmysql@127.0.0.1/testdb?charset=utf8 <commands>
-
-You may find it convenient to set up an alias (in your .bashrc) for this::
-
- alias test-db-manage='neutron-db-manage --database-connection mysql+pymysql://root:secretmysql@127.0.0.1/testdb?charset=utf8'
-
-Create and activate the virtualenv
-''''''''''''''''''''''''''''''''''
-
-From the root of the neutron (or sub-project) repo directory, run::
-
- tox --notest -r -e py27
- source .tox/py27/bin/activate
-
-Now you can use the ``test-db-manage`` alias in place of ``neutron-db-manage``
-in the script auto-generation instructions below.
-
-When you are done, exit the virtualenv::
-
- deactivate
-
-
-Script Auto-generation
-~~~~~~~~~~~~~~~~~~~~~~
-
-This section describes how to auto-generate an alembic migration script for a
-model change. You may either use the system installed devstack environment, or
-a virtualenv + testdb environment as described in
-:ref:`neutron-db-manage-without-devstack`.
-
-Stop the neutron service. Work from the base directory of the neutron (or
-sub-project) repo. Check out the master branch and and do ``git pull`` to
-ensure it is fully up to date. Check out your development branch and rebase to
-master.
-
-**NOTE:** Make sure you have not updated the ``CONTRACT_HEAD`` or
-``EXPAND_HEAD`` yet at this point.
-
-Start with an empty database and upgrade to heads::
-
- mysql -e "drop database neutron; create database neutron;"
- neutron-db-manage upgrade heads
-
-The database schema is now created without your model changes. The alembic
-``revision --autogenerate`` command will look for differences between the
-schema generated by the upgrade command and the schema defined by the models,
-including your model updates::
-
- neutron-db-manage revision -m "description of revision" --autogenerate
-
-This generates a prepopulated template with the changes needed to match the
-database state with the models.  You should inspect the autogenerated template
-to ensure that the proper models have been altered.
-When running the above command you will probably get the following error
-message::
-
-  Multiple heads are present; please specify the head revision on which the
-  new revision should be based, or perform a merge.
-
-This is alembic telling you that it does not know which branch (contract or
-expand) to generate the revision for. You must decide, based on whether you
-are doing contracting or expanding changes to the schema, and provide either
-the ``--contract`` or ``--expand`` option. If you have both types of changes,
-you must run the command twice, once with each option, and then manually edit
-the generated revision scripts to separate the migration operations.
-
-In rare circumstances, you may want to start with an empty migration template
-and manually author the changes necessary for an upgrade.  You can create a
-blank file for a branch via::
-
- neutron-db-manage revision -m "description of revision" --expand
- neutron-db-manage revision -m "description of revision" --contract
-
-**NOTE:** If you use above command you should check that migration is created
-in a directory that is named as current release. If not, please raise the issue
-with the development team (IRC, mailing list, launchpad bug).
-
-**NOTE:** The "description of revision" text should be a simple English
-sentence. The first 30 characters of the description will be used in the file
-name for the script, with underscores substituted for spaces. If the truncation
-occurs at an awkward point in the description, you can modify the script file
-name manually before committing.
-
-The timeline on each alembic branch should remain linear and not interleave
-with other branches, so that there is a clear path when upgrading. To verify
-that alembic branches maintain linear timelines, you can run this command::
-
- neutron-db-manage check_migration
-
-If this command reports an error, you can troubleshoot by showing the migration
-timelines using the ``history`` command::
-
- neutron-db-manage history
-
-
-Expand and Contract Scripts
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The obsolete "branchless" design of a migration script included that it
-indicates a specific "version" of the schema, and includes directives that
-apply all necessary changes to the database at once.  If we look for example at
-the script ``2d2a8a565438_hierarchical_binding.py``, we will see::
-
-    # .../alembic_migrations/versions/2d2a8a565438_hierarchical_binding.py
-
-    def upgrade():
-
-        # .. inspection code ...
-
-        op.create_table(
-            'ml2_port_binding_levels',
-            sa.Column('port_id', sa.String(length=36), nullable=False),
-            sa.Column('host', sa.String(length=255), nullable=False),
-            # ... more columns ...
-        )
-
-        for table in port_binding_tables:
-            op.execute((
-                "INSERT INTO ml2_port_binding_levels "
-                "SELECT port_id, host, 0 AS level, driver, segment AS segment_id "
-                "FROM %s "
-                "WHERE host <> '' "
-                "AND driver <> '';"
-            ) % table)
-
-        op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey')
-        op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter')
-        op.drop_column('ml2_dvr_port_bindings', 'segment')
-        op.drop_column('ml2_dvr_port_bindings', 'driver')
-
-        # ... more DROP instructions ...
-
-The above script contains directives that are both under the "expand"
-and "contract" categories, as well as some data migrations.  the ``op.create_table``
-directive is an "expand"; it may be run safely while the old version of the
-application still runs, as the old code simply doesn't look for this table.
-The ``op.drop_constraint`` and ``op.drop_column`` directives are
-"contract" directives (the drop column moreso than the drop constraint); running
-at least the ``op.drop_column`` directives means that the old version of the
-application will fail, as it will attempt to access these columns which no longer
-exist.
-
-The data migrations in this script are adding new
-rows to the newly added ``ml2_port_binding_levels`` table.
-
-Under the new migration script directory structure, the above script would be
-stated as two scripts; an "expand" and a "contract" script::
-
-    # expansion operations
-    # .../alembic_migrations/versions/liberty/expand/2bde560fc638_hierarchical_binding.py
-
-    def upgrade():
-
-        op.create_table(
-            'ml2_port_binding_levels',
-            sa.Column('port_id', sa.String(length=36), nullable=False),
-            sa.Column('host', sa.String(length=255), nullable=False),
-            # ... more columns ...
-        )
-
-
-    # contraction operations
-    # .../alembic_migrations/versions/liberty/contract/4405aedc050e_hierarchical_binding.py
-
-    def upgrade():
-
-        for table in port_binding_tables:
-            op.execute((
-                "INSERT INTO ml2_port_binding_levels "
-                "SELECT port_id, host, 0 AS level, driver, segment AS segment_id "
-                "FROM %s "
-                "WHERE host <> '' "
-                "AND driver <> '';"
-            ) % table)
-
-        op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey')
-        op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter')
-        op.drop_column('ml2_dvr_port_bindings', 'segment')
-        op.drop_column('ml2_dvr_port_bindings', 'driver')
-
-        # ... more DROP instructions ...
-
-The two scripts would be present in different subdirectories and also part of
-entirely separate versioning streams.  The "expand" operations are in the
-"expand" script, and the "contract" operations are in the "contract" script.
-
-For the time being, data migration rules also belong to contract branch. There
-is expectation that eventually live data migrations move into middleware that
-will be aware about different database schema elements to converge on, but
-Neutron is still not there.
-
-Scripts that contain only expansion or contraction rules do not require a split
-into two parts.
-
-If a contraction script depends on a script from expansion stream, the
-following directive should be added in the contraction script::
-
-    depends_on = ('<expansion-revision>',)
-
-
-HEAD files for conflict management
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In directory ``neutron/db/migration/alembic_migrations/versions`` there are two
-files, ``CONTRACT_HEAD`` and ``EXPAND_HEAD``. These files contain the ID of the
-head revision in each branch. The purpose of these files is to validate the
-revision timelines and prevent non-linear changes from entering the merge queue.
-
-When you create a new migration script by neutron-db-manage these files will be
-updated automatically. But if another migration script is merged while your
-change is under review, you will need to resolve the conflict manually by
-changing the ``down_revision`` in your migration script.
-
-Applying database migration rules
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To apply just expansion rules, execute::
-
- neutron-db-manage upgrade --expand
-
-After the first step is done, you can stop neutron-server, apply remaining
-non-expansive migration rules, if any::
-
- neutron-db-manage upgrade --contract
-
-and finally, start your neutron-server again.
-
-If you are not interested in applying safe migration rules while the service is
-running, you can still upgrade database the old way, by stopping the service,
-and then applying all available rules::
-
- neutron-db-manage upgrade head[s]
-
-It will apply all the rules from both the expand and the contract branches, in
-proper order.
-
-
-Tagging milestone revisions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-When named release (liberty, mitaka, etc.) is done for neutron or a
-sub-project, the alembic revision scripts at the head of each branch for that
-release must be tagged. This is referred to as a milestone revision tag.
-
-For example, `here <https://review.openstack.org/228272>`_ is a patch that tags
-the liberty milestone revisions for the neutron-fwaas sub-project. Note that
-each branch (expand and contract) is tagged.
-
-Tagging milestones allows neutron-db-manage to upgrade the schema to a
-milestone release, e.g.::
-
- neutron-db-manage upgrade liberty
diff --git a/doc/source/devref/api_extensions.rst b/doc/source/devref/api_extensions.rst
deleted file mode 100644 (file)
index 7af6d0f..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-API Extensions
-==============
-
-API extensions is the standard way of introducing new functionality
-to the Neutron project, it allows plugins to
-determine if they wish to support the functionality or not.
-
-Examples
---------
-
-The easiest way to demonstrate how an API extension is written, is
-by studying an existing API extension and explaining the different layers.
-
-.. toctree::
-   :maxdepth: 1
-
-   security_group_api
diff --git a/doc/source/devref/api_layer.rst b/doc/source/devref/api_layer.rst
deleted file mode 100644 (file)
index dd23fb9..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Neutron WSGI/HTTP API layer
-===========================
-
-This section will cover the internals of Neutron's HTTP API, and the classes
-in Neutron that can be used to create Extensions to the Neutron API.
-
-Python web applications interface with webservers through the Python Web
-Server Gateway Interface (WSGI) - defined in `PEP 333 <http://legacy.python.org/dev/peps/pep-0333/>`_
-
-Startup
--------
-
-Neutron's WSGI server is started from the `server module <http://git.openstack.org/cgit/openstack/neutron/tree/neutron/server/__init__.py>`_
-and the entry point `serve_wsgi` is called to build an instance of the
-`NeutronApiService`_, which is then returned to the server module,
-which spawns a `Eventlet`_ `GreenPool`_ that will run the WSGI
-application and respond to requests from clients.
-
-
-.. _NeutronApiService: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/service.py
-
-.. _Eventlet: http://eventlet.net/
-
-.. _GreenPool: http://eventlet.net/doc/modules/greenpool.html
-
-WSGI Application
-----------------
-
-During the building of the NeutronApiService, the `_run_wsgi` function
-creates a WSGI application using the `load_paste_app` function inside
-`config.py`_ - which parses `api-paste.ini`_ - in order to create a WSGI app
-using `Paste`_'s `deploy`_.
-
-The api-paste.ini file defines the WSGI applications and routes - using the
-`Paste INI file format`_.
-
-The INI file directs paste to instantiate the `APIRouter`_ class of
-Neutron, which contains several methods that map Neutron resources (such as
-Ports, Networks, Subnets) to URLs, and the controller for each resource.
-
-
-.. _config.py: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/common/config.py
-
-.. _api-paste.ini: http://git.openstack.org/cgit/openstack/neutron/tree/etc/api-paste.ini
-
-.. _APIRouter: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/router.py
-
-.. _Paste: http://pythonpaste.org/
-
-.. _Deploy: http://pythonpaste.org/deploy/
-
-.. _Paste INI file format: http://pythonpaste.org/deploy/#applications
-
-Further reading
----------------
-
-`Yong Sheng Gong: Deep Dive into Neutron <http://www.slideshare.net/gongys2004/inside-neutron-2>`_
diff --git a/doc/source/devref/callbacks.rst b/doc/source/devref/callbacks.rst
deleted file mode 100644 (file)
index 42ebf52..0000000
+++ /dev/null
@@ -1,420 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Neutron Callback System
-=======================
-
-In Neutron, core and service components may need to cooperate during the
-execution of certain operations, or they may need to react upon the occurrence
-of certain events. For instance, when a Neutron resource is associated to
-multiple services, the components in charge of these services may need to play
-an active role in determining what the right state of the resource needs to be.
-
-The cooperation may be achieved by making each object aware of each other, but
-this leads to tight coupling, or alternatively it can be achieved by using a
-callback-based system, where the same objects are allowed to cooperate in a
-loose manner.
-
-This is particularly important since the spin off of the advanced services like
-VPN, Firewall and Load Balancer, where each service's codebase lives independently
-from the core and from one another. This means that the tight coupling is no longer
-a practical solution for object cooperation. In addition to this, if more services
-are developed independently, there is no viable integration between them and the
-Neutron core. A callback system, and its registry, tries to address these issues.
-
-In object-oriented software systems, method invocation is also known as message
-passing: an object passes a message to another object, and it may or may not expect
-a message back. This point-to-point interaction can take place between the parties
-directly involved in the communication, or it can happen via an intermediary. The
-intermediary is then in charge of keeping track of who is interested in the messages
-and in delivering the messages forth and back, when required. As mentioned earlier,
-the use of an intermediary has the benefit of decoupling the parties involved
-in the communications, as now they only need to know about the intermediary; the
-other benefit is that the use of an intermediary opens up the possibility of
-multiple party communication: more than one object can express interest in
-receiving the same message, and the same message can be delivered to more than
-one object. To this aim, the intermediary is the entity that exists throughout
-the system lifecycle, as it needs to be able to track whose interest is associated
-to what message.
-
-In a design for a system that enables callback-based communication, the following
-aspects need to be taken into account:
-
-* how to become consumer of messages (i.e. how to be on the receiving end of the message);
-* how to become producer of messages (i.e. how to be on the sending end of the message);
-* how to consume/produce messages selectively;
-
-Translate and narrow this down to Neutron needs, and this means the design of a callback
-system where messages are about lifecycle events (e.g. before creation, before
-deletion, etc.) of Neutron resources (e.g. networks, routers, ports, etc.), where the
-various parties can express interest in knowing when these events for a specific
-resources take place.
-
-Rather than keeping the conversation abstract, let us delve into some examples, that would
-help understand better some of the principles behind the provided mechanism.
-
-
-Subscribing to events
----------------------
-
-Imagine that you have entity A, B, and C that have some common business over router creation.
-A wants to tell B and C that the router has been created and that they need to get on and
-do whatever they are supposed to do. In a callback-less world this would work like so:
-
-::
-
-  # A is done creating the resource
-  # A gets hold of the references of B and C
-  # A calls B
-  # A calls C
-  B->my_random_method_for_knowing_about_router_created()
-  C->my_random_very_difficult_to_remember_method_about_router_created()
-
-If B and/or C change, things become sour. In a callback-based world, things become a lot
-more uniform and straightforward:
-
-::
-
-  # B and C ask I to be notified when A is done creating the resource
-
-  # ...
-  # A is done creating the resource
-  # A gets hold of the reference to the intermediary I
-  # A calls I
-  I->notify()
-
-Since B and C will have expressed interest in knowing about A's business, 'I' will
-deliver the messages to B and C. If B and C changes, A and 'I' do not need to change.
-
-In practical terms this scenario would be translated in the code below:
-
-::
-
-  from neutron.callbacks import events
-  from neutron.callbacks import resources
-  from neutron.callbacks import registry
-
-
-  def callback1(resource, event, trigger, **kwargs):
-      print('Callback1 called by trigger: ', trigger)
-      print('kwargs: ', kwargs)
-
-  def callback2(resource, event, trigger, **kwargs):
-      print('Callback2 called by trigger: ', trigger)
-      print('kwargs: ', kwargs)
-
-
-  # B and C express interest with I
-  registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE)
-  registry.subscribe(callback2, resources.ROUTER, events.BEFORE_CREATE)
-  print('Subscribed')
-
-
-  # A notifies
-  def do_notify():
-      kwargs = {'foo': 'bar'}
-      registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
-
-
-  print('Notifying...')
-  do_notify()
-
-
-The output is:
-
-::
-
-  > Subscribed
-  > Notifying...
-  > Callback2 called by trigger:  <function do_notify at 0x7f2a5d663410>
-  > kwargs:  {'foo': 'bar'}
-  > Callback1 called by trigger:  <function do_notify at 0x7f2a5d663410>
-  > kwargs:  {'foo': 'bar'}
-
-Thanks to the intermediary existence throughout the life of the system, A, B, and C
-are flexible to evolve their internals, dynamics, and lifecycles.
-
-
-Subscribing and aborting events
--------------------------------
-
-Interestingly in Neutron, certain events may need to be forbidden from happening due to the
-nature of the resources involved. To this aim, the callback-based mechanism has been designed
-to support a use case where, when callbacks subscribe to specific events, the action that
-results from it, may lead to the propagation of a message back to the sender, so that it itself
-can be alerted and stop the execution of the activity that led to the message dispatch in the
-first place.
-
-The typical example is where a resource, like a router, is used by one or more high-level
-service(s), like a VPN or a Firewall, and actions like interface removal or router destruction
-cannot not take place, because the resource is shared.
-
-To address this scenario, special events are introduced, 'BEFORE_*' events, to which callbacks
-can subscribe and have the opportunity to 'abort', by raising an exception when notified.
-
-Since multiple callbacks may express an interest in the same event for a particular resource,
-and since callbacks are executed independently from one another, this may lead to situations
-where notifications that occurred before the exception must be aborted. To this aim, when an
-exception occurs during the notification process, an abort_* event is propagated immediately
-after. It is up to the callback developer to determine whether subscribing to an abort
-notification is required in order to revert the actions performed during the initial execution
-of the callback (when the BEFORE_* event was fired). Exceptions caused by callbacks registered
-to abort events are ignored. The snippet below shows this in action:
-
-::
-
-  from neutron.callbacks import events
-  from neutron.callbacks import exceptions
-  from neutron.callbacks import resources
-  from neutron.callbacks import registry
-
-
-  def callback1(resource, event, trigger, **kwargs):
-      raise Exception('I am failing!')
-
-  def callback2(resource, event, trigger, **kwargs):
-      print('Callback2 called by %s on event  %s' % (trigger, event))
-
-
-  registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE)
-  registry.subscribe(callback2, resources.ROUTER, events.BEFORE_CREATE)
-  registry.subscribe(callback2, resources.ROUTER, events.ABORT_CREATE)
-  print('Subscribed')
-
-
-  def do_notify():
-      kwargs = {'foo': 'bar'}
-      registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
-
-
-  print('Notifying...')
-  try:
-      do_notify()
-  except exceptions.CallbackFailure as e:
-      print('Error: ', e)
-
-The output is:
-
-::
-
-  > Subscribed
-  > Notifying...
-  > Callback2 called by <function do_notify at 0x7f3194c7f410> on event  before_create
-  > Callback2 called by <function do_notify at 0x7f3194c7f410> on event  abort_create
-  > Error:  Callback __main__.callback1 failed with "I am failing!"
-
-In this case, upon the notification of the BEFORE_CREATE event, Callback1 triggers an exception
-that can be used to stop the action from taking place in do_notify(). On the other end, Callback2
-will be executing twice, once for dealing with the BEFORE_CREATE event, and once to undo the
-actions during the ABORT_CREATE event. It is worth noting that it is not mandatory to have
-the same callback register to both BEFORE_* and the respective ABORT_* event; as a matter of
-fact, it is best to make use of different callbacks to keep the two logic separate.
-
-
-Unsubscribing to events
------------------------
-
-There are a few options to unsubscribe registered callbacks:
-
-* clear(): it unsubscribes all subscribed callbacks: this can be useful especially when
-  winding down the system, and notifications shall no longer be triggered.
-* unsubscribe(): it selectively unsubscribes a callback for a specific resource's event.
-  Say callback C has subscribed to event A for resource R, any notification of event A
-  for resource R will no longer be handed over to C, after the unsubscribe() invocation.
-* unsubscribe_by_resource(): say that callback C has subscribed to event A, B, and C for
-  resource R, any notification of events related to resource R will no longer be handed
-  over to C, after the unsubscribe_by_resource() invocation.
-* unsubscribe_all(): say that callback C has subscribed to events A, B for resource R1,
-  and events C, D for resource R2, any notification of events pertaining resources R1 and
-  R2 will no longer be handed over to C, after the unsubscribe_all() invocation.
-
-The snippet below shows these concepts in action:
-
-::
-
-  from neutron.callbacks import events
-  from neutron.callbacks import exceptions
-  from neutron.callbacks import resources
-  from neutron.callbacks import registry
-
-
-  def callback1(resource, event, trigger, **kwargs):
-      print('Callback1 called by %s on event %s for resource %s' % (trigger, event, resource))
-
-
-  def callback2(resource, event, trigger, **kwargs):
-      print('Callback2 called by %s on event %s for resource %s' % (trigger, event, resource))
-
-
-  registry.subscribe(callback1, resources.ROUTER, events.BEFORE_READ)
-  registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE)
-  registry.subscribe(callback1, resources.ROUTER, events.AFTER_DELETE)
-  registry.subscribe(callback1, resources.PORT, events.BEFORE_UPDATE)
-  registry.subscribe(callback2, resources.ROUTER_GATEWAY, events.BEFORE_UPDATE)
-  print('Subscribed')
-
-
-  def do_notify():
-      print('Notifying...')
-      kwargs = {'foo': 'bar'}
-      registry.notify(resources.ROUTER, events.BEFORE_READ, do_notify, **kwargs)
-      registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
-      registry.notify(resources.ROUTER, events.AFTER_DELETE, do_notify, **kwargs)
-      registry.notify(resources.PORT, events.BEFORE_UPDATE, do_notify, **kwargs)
-      registry.notify(resources.ROUTER_GATEWAY, events.BEFORE_UPDATE, do_notify, **kwargs)
-
-
-  do_notify()
-  registry.unsubscribe(callback1, resources.ROUTER, events.BEFORE_READ)
-  do_notify()
-  registry.unsubscribe_by_resource(callback1, resources.PORT)
-  do_notify()
-  registry.unsubscribe_all(callback1)
-  do_notify()
-  registry.clear()
-  do_notify()
-
-The output is:
-
-::
-
-  Subscribed
-  Notifying...
-  Callback1 called by <function do_notify at 0x7f062c8f67d0> on event before_read for resource router
-  Callback1 called by <function do_notify at 0x7f062c8f67d0> on event before_create for resource router
-  Callback1 called by <function do_notify at 0x7f062c8f67d0> on event after_delete for resource router
-  Callback1 called by <function do_notify at 0x7f062c8f67d0> on event before_update for resource port
-  Callback2 called by <function do_notify at 0x7f062c8f67d0> on event before_update for resource router_gateway
-  Notifying...
-  Callback1 called by <function do_notify at 0x7f062c8f67d0> on event before_create for resource router
-  Callback1 called by <function do_notify at 0x7f062c8f67d0> on event after_delete for resource router
-  Callback1 called by <function do_notify at 0x7f062c8f67d0> on event before_update for resource port
-  Callback2 called by <function do_notify at 0x7f062c8f67d0> on event before_update for resource router_gateway
-  Notifying...
-  Callback1 called by <function do_notify at 0x7f062c8f67d0> on event before_create for resource router
-  Callback1 called by <function do_notify at 0x7f062c8f67d0> on event after_delete for resource router
-  Callback2 called by <function do_notify at 0x7f062c8f67d0> on event before_update for resource router_gateway
-  Notifying...
-  Callback2 called by <function do_notify at 0x7f062c8f67d0> on event before_update for resource router_gateway
-  Notifying...
-
-
-FAQ
----
-
-Can I use the callbacks registry to subscribe and notify non-core resources and events?
-
-   Short answer is yes. The callbacks module defines literals for what are considered core Neutron
-   resources and events. However, the ability to subscribe/notify is not limited to these as you
-   can use your own defined resources and/or events. Just make sure you use string literals, as
-   typos are common, and the registry does not provide any runtime validation. Therefore, make
-   sure you test your code!
-
-What is the relationship between Callbacks and Taskflow?
-
-   There is no overlap between Callbacks and Taskflow or mutual exclusion; as matter of fact they
-   can be combined; You could have a callback that goes on and trigger a taskflow. It is a nice
-   way of separating implementation from abstraction, because you can keep the callback in place
-   and change Taskflow with something else.
-
-Is there any ordering guarantee during notifications?
-
-  No, the ordering in which callbacks are notified is completely arbitrary by design: callbacks
-  should know nothing about each other, and ordering should not matter; a callback will always be
-  notified and its outcome should always be the same regardless as to in which order is it
-  notified. Priorities can be a future extension, if a use case arises that require enforced
-  ordering.
-
-How is the the notifying object expected to interact with the subscribing objects?
-
-  The ``notify`` method implements a one-way communication paradigm: the notifier sends a message
-  without expecting a response back (in other words it fires and forget). However, due to the nature
-  of Python, the payload can be mutated by the subscribing objects, and this can lead to unexpected
-  behavior of your code, if you assume that this is the intentional design. Bear in mind, that
-  passing-by-value using deepcopy was not chosen for efficiency reasons. Having said that, if you
-  intend for the notifier object to expect a response, then the notifier itself would need to act
-  as a subscriber.
-
-Is the registry thread-safe?
-
-  Short answer is no: it is not safe to make mutations while callbacks are being called (more
-  details as to why can be found `here <https://hg.python.org/releasing/2.7.9/file/753a8f457ddc/Objects/dictobject.c#l937>`_).
-  A mutation could happen if a 'subscribe'/'unsubscribe' operation interleaves with the execution
-  of the notify loop. Albeit there is a possibility that things may end up in a bad state, the
-  registry works correctly under the assumption that subscriptions happen at the very beginning
-  of the life of the process and that the unsubscriptions (if any) take place at the very end.
-  In this case, chances that things do go badly may be pretty slim. Making the registry
-  thread-safe will be considered as a future improvement.
-
-What kind of function can be a callback?
-
-  Anything you fancy: lambdas, 'closures', class, object or module methods. For instance:
-
-::
-
-  from neutron.callbacks import events
-  from neutron.callbacks import resources
-  from neutron.callbacks import registry
-
-
-  def callback1(resource, event, trigger, **kwargs):
-      print('module callback')
-
-
-  class MyCallback(object):
-
-      def callback2(self, resource, event, trigger, **kwargs):
-          print('object callback')
-
-      @classmethod
-      def callback3(cls, resource, event, trigger, **kwargs):
-          print('class callback')
-
-
-  c = MyCallback()
-  registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE)
-  registry.subscribe(c.callback2, resources.ROUTER, events.BEFORE_CREATE)
-  registry.subscribe(MyCallback.callback3, resources.ROUTER, events.BEFORE_CREATE)
-
-  def do_notify():
-      def nested_subscribe(resource, event, trigger, **kwargs):
-          print('nested callback')
-
-      registry.subscribe(nested_subscribe, resources.ROUTER, events.BEFORE_CREATE)
-
-      kwargs = {'foo': 'bar'}
-      registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs)
-
-
-  print('Notifying...')
-  do_notify()
-
-And the output is going to be:
-
-::
-
-  Notifying...
-  module callback
-  object callback
-  class callback
-  nested callback
diff --git a/doc/source/devref/client_command_extensions.rst b/doc/source/devref/client_command_extensions.rst
deleted file mode 100644 (file)
index da9467d..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Client command extension support
-================================
-
-The client command extension adds support for extending the neutron client while
-considering ease of creation.
-
-The full document can be found in the python-neutronclient repository:
-http://docs.openstack.org/developer/python-neutronclient/devref/client_command_extensions.html
diff --git a/doc/source/devref/contribute.rst b/doc/source/devref/contribute.rst
deleted file mode 100644 (file)
index 3a05112..0000000
+++ /dev/null
@@ -1,632 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Contributing new extensions to Neutron
-======================================
-
-Introduction
-------------
-
-Neutron has a pluggable architecture, with a number of extension points.
-This documentation covers aspects relevant to contributing new Neutron
-v2 core (aka monolithic) plugins, ML2 mechanism drivers, and L3 service
-plugins. This document will initially cover a number of process-oriented
-aspects of the contribution process, and proceed to provide a how-to guide
-that shows how to go from 0 LOC's to successfully contributing new
-extensions to Neutron. In the remainder of this guide, we will try to
-use practical examples as much as we can so that people have working
-solutions they can start from.
-
-This guide is for a developer who wants to have a degree of visibility
-within the OpenStack Networking project. If you are a developer who
-wants to provide a Neutron-based solution without interacting with the
-Neutron community, you are free to do so, but you can stop reading now,
-as this guide is not for you.
-
-Plugins and drivers for non-reference implementations are known as
-"third-party" code. This includes code for supporting vendor products, as well
-as code for supporting open-source networking implementations.
-
-Before the Kilo release these plugins and drivers were included in the Neutron
-tree. During the Kilo cycle the third-party plugins and drivers underwent the
-first phase of a process called decomposition. During this phase, each plugin
-and driver moved the bulk of its logic to a separate git repository, while
-leaving a thin "shim" in the neutron tree together with the DB models and
-migrations (and perhaps some config examples).
-
-During the Liberty cycle the decomposition concept was taken to its conclusion
-by allowing third-party code to exist entirely out of tree. Further extension
-mechanisms have been provided to better support external plugins and drivers
-that alter the API and/or the data model.
-
-In the Mitaka cycle we will **require** all third-party code to be moved out of
-the neutron tree completely.
-
-'Outside the tree' can be anything that is publicly available: it may be a repo
-on git.openstack.org for instance, a tarball, a pypi package, etc. A
-plugin/drivers maintainer team self-governs in order to promote sharing, reuse,
-innovation, and release of the 'out-of-tree' deliverable. It should not be
-required for any member of the core team to be involved with this process,
-although core members of the Neutron team can participate in whichever capacity
-is deemed necessary to facilitate out-of-tree development.
-
-This guide is aimed at you as the maintainer of code that integrates with
-Neutron but resides in a separate repository.
-
-
-Contribution Process
---------------------
-
-If you want to extend OpenStack Networking with your technology, and you want
-to do it within the visibility of the OpenStack project, follow the guidelines
-and examples below. We'll describe best practices for:
-
-* Design and Development;
-* Testing and Continuous Integration;
-* Defect Management;
-* Backport Management for plugin specific code;
-* DevStack Integration;
-* Documentation;
-
-Once you have everything in place you may want to add your project to the list
-of Neutron sub-projects. Submit a patch via a gerrit review to neutron to add
-your project to ``doc/source/devref/sub_projects.rst``.
-
-
-Design and Development
-----------------------
-
-Assuming you have a working repository, any development to your own repo does
-not need any blueprint, specification or bugs against Neutron. However, if your
-project is a part of the Neutron Stadium effort, you are expected to
-participate in the principles of the Four Opens, meaning your design should be
-done in the open. Thus, it is encouraged to file documentation for changes in
-your own repository.
-
-If your code is hosted on git.openstack.org then the gerrit review system is
-automatically provided. Contributors should follow the review guidelines
-similar to those of Neutron. However, you as the maintainer have the
-flexibility to choose who can approve/merge changes in your own repo.
-
-It is recommended (but not required, see `policies
-<http://docs.openstack.org/developer/neutron/policies/thirdparty-ci.html>`_)
-that you set up a third-party CI system. This will provide a vehicle for
-checking the third-party code against Neutron changes. See `Testing and
-Continuous Integration`_ below for more detailed recommendations.
-
-Design documents can still be supplied in form of Restructured Text (RST)
-documents, within the same third-party library repo. If changes to the common
-Neutron code are required, an `RFE
-<http://docs.openstack.org/developer/neutron/policies/blueprints.html#neutron-request-for-feature-enhancements>`_
-may need to be filed. However every case is different and you are invited to
-seek guidance from Neutron core reviewers about what steps to follow.
-
-
-Testing and Continuous Integration
-----------------------------------
-
-The following strategies are recommendations only, since third-party CI testing
-is not a enforced requirement. However, these strategies are employed by the
-majority of the plugin/driver contributors that actively participate in the
-Neutron development community, since they have learned from experience how
-quickly their code can fall out of sync with the rapidly changing Neutron core
-code base.
-
-* You should run unit tests in your own external library (e.g. on
-  git.openstack.org where Jenkins setup is for free).
-
-* Your third-party CI should validate third-party integration with Neutron via
-  functional testing. The third-party CI is a communication mechanism. The
-  objective of this mechanism is as follows:
-
-  * it communicates to you when someone has contributed a change that
-    potentially breaks your code. It is then up to you maintaining the affected
-    plugin/driver to determine whether the failure is transient or real, and
-    resolve the problem if it is.
-  * it communicates to a patch author that they may be breaking a plugin/driver.
-    If they have the time/energy/relationship with the maintainer of the
-    plugin/driver in question, then they can (at their discretion) work to
-    resolve the breakage.
-  * it communicates to the community at large whether a given plugin/driver
-    is being actively maintained.
-  * A maintainer that is perceived to be responsive to failures in their
-    third-party CI jobs is likely to generate community goodwill.
-
-  It is worth noting that if the plugin/driver repository is hosted on
-  git.openstack.org, due to current openstack-infra limitations, it is not
-  possible to have third-party CI systems participating in the gate pipeline
-  for the repo. This means that the only validation provided during the merge
-  process to the repo is through unit tests. Post-merge hooks can still be
-  exploited to provide third-party CI feedback, and alert you of potential
-  issues. As mentioned above, third-party CI systems will continue to validate
-  Neutron core commits. This will allow them to detect when incompatible
-  changes occur, whether they are in Neutron or in the third-party repo.
-
-
-Defect Management
------------------
-
-Bugs affecting third-party code should *not* be filed in the Neutron project on
-launchpad. Bug tracking can be done in any system you choose, but by creating a
-third-party project in launchpad, bugs that affect both Neutron and your code
-can be more easily tracked using launchpad's "also affects project" feature.
-
-Security Issues
-~~~~~~~~~~~~~~~
-
-Here are some answers to how to handle security issues in your repo, taken
-from `this openstack-dev mailing list message
-<http://lists.openstack.org/pipermail/openstack-dev/2015-July/068617.html>`_:
-
-- How should security your issues be managed?
-
-The OpenStack Vulnerability Management Team (VMT) follows a `documented process
-<https://security.openstack.org/vmt-process.html>`_ which can basically be
-reused by any project-team when needed.
-
-- Should the OpenStack security team be involved?
-
-The OpenStack VMT directly oversees vulnerability reporting and disclosure for
-a `subset of OpenStack source code repositories
-<https://wiki.openstack.org/wiki/Security_supported_projects>`_.  However they
-are still quite happy to answer any questions you might have about
-vulnerability management for your own projects even if they're not part of that
-set. Feel free to reach out to the VMT in public or in private.
-
-Also, the VMT is an autonomous subgroup of the much larger `OpenStack Security
-project-team
-<http://governance.openstack.org/reference/projects/security.html>`_. They're a
-knowledgeable bunch and quite responsive if you want to get their opinions or
-help with security-related issues (vulnerabilities or otherwise).
-
-- Does a CVE need to be filed?
-
-It can vary widely. If a commercial distribution such as Red Hat is
-redistributing a vulnerable version of your software then they may assign one
-anyway even if you don't request one yourself. Or the reporter may request one;
-the reporter may even be affiliated with an organization who has already
-assigned/obtained a CVE before they initiate contact with you.
-
-- Do the maintainers need to publish OSSN or equivalent documents?
-
-OpenStack Security Advisories (OSSA) are official publications of the OpenStack
-VMT and only cover VMT-supported software. OpenStack Security Notes (OSSN) are
-published by editors within the OpenStack Security project-team on more general
-security topics and may even cover issues in non-OpenStack software commonly
-used in conjunction with OpenStack, so it's at their discretion as to whether
-they would be able to accommodate a particular issue with an OSSN.
-
-However, these are all fairly arbitrary labels, and what really matters in the
-grand scheme of things is that vulnerabilities are handled seriously, fixed
-with due urgency and care, and announced widely -- not just on relevant
-OpenStack mailing lists but also preferably somewhere with broader distribution
-like the `Open Source Security mailing list
-<http://oss-security.openwall.org/wiki/mailing-lists/oss-security>`_. The goal
-is to get information on your vulnerabilities, mitigating measures and fixes
-into the hands of the people using your software in a timely manner.
-
-- Anything else to consider here?
-
-The OpenStack VMT is in the process of trying to reinvent itself so that it can
-better scale within the context of the "Big Tent." This includes making sure
-the policy/process documentation is more consumable and reusable even by
-project-teams working on software outside the scope of our charter. It's a work
-in progress, and any input is welcome on how we can make this function well for
-everyone.
-
-
-Backport Management Strategies
-------------------------------
-
-This section applies only to third-party maintainers who had code in the
-Neutron tree during the Kilo and earlier releases. It will be obsolete once the
-Kilo release is no longer supported.
-
-If a change made to out-of-tree third-party code needs to be back-ported to
-in-tree code in a stable branch, you may submit a review without a
-corresponding master branch change. The change will be evaluated by core
-reviewers for stable branches to ensure that the backport is justified and that
-it does not affect Neutron core code stability.
-
-
-DevStack Integration Strategies
--------------------------------
-
-When developing and testing a new or existing plugin or driver, the aid provided
-by DevStack is incredibly valuable: DevStack can help get all the software bits
-installed, and configured correctly, and more importantly in a predictable way.
-For DevStack integration there are a few options available, and they may or may not
-make sense depending on whether you are contributing a new or existing plugin or
-driver.
-
-If you are contributing a new plugin, the approach to choose should be based on
-`Extras.d Hooks' externally hosted plugins
-<http://docs.openstack.org/developer/devstack/plugins.html#extras-d-hooks>`_.
-With the extra.d hooks, the DevStack integration is co-located with the
-third-party integration library, and it leads to the greatest level of
-flexibility when dealing with DevStack based dev/test deployments.
-
-One final consideration is worth making for third-party CI setups: if `Devstack
-Gate <https://git.openstack.org/cgit/openstack-infra/devstack-gate>`_ is used,
-it does provide hook functions that can be executed at specific times of the
-devstack-gate-wrap script run.  For example, the `Neutron Functional job
-<https://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/neutron.yaml>`_
-uses them. For more details see `devstack-vm-gate-wrap.sh
-<https://git.openstack.org/cgit/openstack-infra/devstack-gate/tree/devstack-vm-gate-wrap.sh>`_.
-
-
-Project Initial Setup
----------------------
-
-The how-to below assumes that the third-party library will be hosted on
-git.openstack.org. This lets you tap in the entire OpenStack CI infrastructure
-and can be a great place to start from to contribute your new or existing
-driver/plugin. The list of steps below are summarized version of what you can
-find on http://docs.openstack.org/infra/manual/creators.html. They are meant to
-be the bare minimum you have to complete in order to get you off the ground.
-
-* Create a public repository: this can be a personal git.openstack.org repo or any
-  publicly available git repo, e.g. ``https://github.com/john-doe/foo.git``. This
-  would be a temporary buffer to be used to feed the one on git.openstack.org.
-* Initialize the repository: if you are starting afresh, you may *optionally*
-  want to use cookiecutter to get a skeleton project. You can learn how to use
-  cookiecutter on https://git.openstack.org/cgit/openstack-dev/cookiecutter.
-  If you want to build the repository from an existing Neutron module, you may
-  want to skip this step now, build the history first (next step), and come back
-  here to initialize the remainder of the repository with other files being
-  generated by the cookiecutter (like tox.ini, setup.cfg, setup.py, etc.).
-* Create a repository on git.openstack.org (see `Official Sub-Projects
-  <http://docs.openstack.org/developer/neutron/devref/sub_projects.html>`_). For
-  this you need the help of the OpenStack infra team. It is worth noting that
-  you only get one shot at creating the repository on git.openstack.org. This
-  is the time you get to choose whether you want to start from a clean slate,
-  or you want to import the repo created during the previous step. In the
-  latter case, you can do so by specifying the upstream section for your
-  project in project-config/gerrit/project.yaml.  Steps are documented on the
-  `Repository Creator's Guide
-  <http://docs.openstack.org/infra/manual/creators.html>`_.
-* Ask for a Launchpad user to be assigned to the core team created. Steps are
-  documented in `this section
-  <http://docs.openstack.org/infra/manual/creators.html#update-the-gerrit-group-members>`_.
-* Fix, fix, fix: at this point you have an external base to work on. You can
-  develop against the new git.openstack.org project, the same way you work with
-  any other OpenStack project: you have pep8, docs, and python27 CI jobs that
-  validate your patches when posted to Gerrit. For instance, one thing you
-  would need to do is to define an entry point for your plugin or driver in
-  your own setup.cfg similarly as to how it is done in the `setup.cfg for ODL
-  <https://git.openstack.org/cgit/openstack/networking-odl/tree/setup.cfg#n31>`_.
-* Define an entry point for your plugin or driver in setup.cfg
-* Create third-party CI account: if you do not already have one, follow
-  instructions for `third-party CI
-  <http://docs.openstack.org/infra/system-config/third_party.html>`_ to get
-  one.
-
-Internationalization support
-----------------------------
-
-OpenStack is committed to broad international support.
-Internationalization (I18n) is one of important areas to make OpenStack ubiquitous.
-Each project is recommended to support i18n.
-
-This section describes how to set up translation support.
-The description in this section uses the following variables.
-
-* repository : ``openstack/${REPOSITORY}`` (e.g., ``openstack/networking-foo``)
-* top level python path : ``${MODULE_NAME}`` (e.g., ``networking_foo``)
-
-oslo.i18n
-~~~~~~~~~
-
-* Each subproject repository should have its own oslo.i18n integration
-  wrapper module ``${MODULE_NAME}/_i18n.py``. The detail is found at
-  http://docs.openstack.org/developer/oslo.i18n/usage.html.
-
-  .. note::
-
-     **DOMAIN** name should match your **repository** name ``${REPOSITORY}``.
-     (Note that it is not a top level python path name ``${MODULE_NAME}``.)
-
-* Import ``_()`` from your ``${MODULE_NAME}/_i18n.py``.
-
-  .. warning::
-
-     Do not use ``_()`` in the builtins namespace which is
-     registered by **gettext.install()** in ``neutron/__init__.py``.
-     It is now deprecated as described in oslo.18n documentation.
-
-Setting up translation support
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-You need to create or edit the following files to start translation support:
-
-* setup.cfg
-* babel.cfg
-* skeleton POT file
-
-We have a good example for an oslo project at
-https://review.openstack.org/#/c/98248/.
-
-Add the following to ``setup.cfg``::
-
-    [extract_messages]
-    keywords = _ gettext ngettext l_ lazy_gettext
-    mapping_file = babel.cfg
-    output_file = ${REPOSITORY}/locale/${REPOSITORY}.pot
-
-    [compile_catalog]
-    directory = ${REPOSITORY}/locale
-    domain = ${REPOSITORY}
-
-    [update_catalog]
-    domain = ${REPOSITORY}
-    output_dir = ${REPOSITORY}/locale
-    input_file = ${REPOSITORY}/locale/${REPOSITORY}.pot
-
-Note that ``${REPOSITORY}`` is used in all names. Both come from the
-implementation of the current infra scripts. Changing it affects many
-projects, so it is not a good idea.
-
-Create ``babel.cfg`` with the following contents::
-
-    [python: **.py]
-
-Finally, create a skeleton POT file.
-To import translation, we need to place it at the proper place.
-Run the following commands in the top directory of your repository::
-
-  $ mkdir -p ${REPOSITORY}/locale
-  $ tox -e venv -- python setup.py extract_messages
-
-Now you see ``${REPOSITORY}/locale/${REPOSITORY}.pot``.
-
-Enable Translation
-~~~~~~~~~~~~~~~~~~
-
-To update and import translations, you need to make a change in project-config.
-A good example is found at https://review.openstack.org/#/c/224222/.
-After doing this, the necessary jobs will be run and push/pull a
-message catalog to/from the translation infrastructure.
-
-Integrating with the Neutron system
------------------------------------
-
-Configuration Files
-~~~~~~~~~~~~~~~~~~~
-
-The ``data_files`` in the ``[files]`` section of ``setup.cfg`` of Neutron shall
-not contain any third-party references. These shall be located in the same
-section of the third-party repo's own ``setup.cfg`` file.
-
-* Note: Care should be taken when naming sections in configuration files. When
-  the Neutron service or an agent starts, oslo.config loads sections from all
-  specified config files. This means that if a section [foo] exists in multiple
-  config files, duplicate settings will collide. It is therefore recommended to
-  prefix section names with a third-party string, e.g. [vendor_foo].
-
-Since Mitaka, configuration files are not maintained in the git repository but
-should be generated as follows::
-
-``tox -e genconfig``
-
-If a 'tox' environment is unavailable, then you can run the following script
-instead to generate the configuration files::
-
-./tools/generate_config_file_samples.sh
-
-It is advised that subprojects do not keep their configuration files in their
-respective trees and instead generate them using a similar approach as Neutron
-does.
-
-**ToDo: Inclusion in OpenStack documentation?**
-    Is there a recommended way to have third-party config options listed in the
-    configuration guide in docs.openstack.org?
-
-
-Database Models and Migrations
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-A third-party repo may contain database models for its own tables. Although
-these tables are in the Neutron database, they are independently managed
-entirely within the third-party code. Third-party code shall **never** modify
-neutron core tables in any way.
-
-Each repo has its own *expand* and *contract* `alembic migration branches
-<alembic_migrations.html#migration-branches>`_. A third-party repo's alembic
-migration branches may operate only on tables that are owned by the repo.
-
-* Note: Care should be taken when adding new tables. To prevent collision of
-  table names it is **required** to prefix them with a vendor/plugin string.
-
-* Note: A third-party maintainer may opt to use a separate database for their
-  tables. This may complicate cases where there are foreign key constraints
-  across schemas for DBMS that do not support this well. Third-party maintainer
-  discretion advised.
-
-The database tables owned by a third-party repo can have references to fields
-in neutron core tables. However, the alembic branch for a plugin/driver repo
-shall never update any part of a table that it does not own.
-
-**Note: What happens when a referenced item changes?**
-
-* **Q:** If a driver's table has a reference (for example a foreign key) to a
-  neutron core table, and the referenced item is changed in neutron, what
-  should you do?
-
-* **A:** Fortunately, this should be an extremely rare occurrence. Neutron core
-  reviewers will not allow such a change unless there is a very carefully
-  thought-out design decision behind it. That design will include how to
-  address any third-party code affected. (This is another good reason why you
-  should stay actively involved with the Neutron developer community.)
-
-The ``neutron-db-manage`` alembic wrapper script for neutron detects alembic
-branches for installed third-party repos, and the upgrade command automatically
-applies to all of them. A third-party repo must register its alembic migrations
-at installation time. This is done by providing an entrypoint in setup.cfg as
-follows:
-
-For a third-party repo named ``networking-foo``, add the alembic_migrations
-directory as an entrypoint in the ``neutron.db.alembic_migrations`` group::
-
-    [entry_points]
-    neutron.db.alembic_migrations =
-        networking-foo = networking_foo.db.migration:alembic_migrations
-
-**ToDo: neutron-db-manage autogenerate**
-    The alembic autogenerate command needs to support branches in external
-    repos. Bug #1471333 has been filed for this.
-
-
-DB Model/Migration Testing
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Here is a `template functional test
-<http://docs.openstack.org/developer/neutron/devref/template_model_sync_test.html>`_ third-party
-maintainers can use to develop tests for model-vs-migration sync in their
-repos. It is recommended that each third-party CI sets up such a test, and runs
-it regularly against Neutron master.
-
-Entry Points
-~~~~~~~~~~~~
-
-The `Python setuptools <https://pythonhosted.org/setuptools>`_ installs all
-entry points for packages in one global namespace for an environment. Thus each
-third-party repo can define its package's own ``[entry_points]`` in its own
-``setup.cfg`` file.
-
-For example, for the ``networking-foo`` repo::
-
-    [entry_points]
-    console_scripts =
-        neutron-foo-agent = networking_foo.cmd.eventlet.agents.foo:main
-    neutron.core_plugins =
-        foo_monolithic = networking_foo.plugins.monolithic.plugin:FooPluginV2
-    neutron.service_plugins =
-        foo_l3 = networking_foo.services.l3_router.l3_foo:FooL3ServicePlugin
-    neutron.ml2.type_drivers =
-        foo_type = networking_foo.plugins.ml2.drivers.foo:FooType
-    neutron.ml2.mechanism_drivers =
-        foo_ml2 = networking_foo.plugins.ml2.drivers.foo:FooDriver
-    neutron.ml2.extension_drivers =
-        foo_ext = networking_foo.plugins.ml2.drivers.foo:FooExtensionDriver
-
-* Note: It is advisable to include ``foo`` in the names of these entry points to
-  avoid conflicts with other third-party packages that may get installed in the
-  same environment.
-
-
-API Extensions
-~~~~~~~~~~~~~~
-
-Extensions can be loaded in two ways:
-
-#. Use the ``append_api_extensions_path()`` library API. This method is defined
-   in ``neutron/api/extensions.py`` in the neutron tree.
-#. Leverage the ``api_extensions_path`` config variable when deploying. See the
-   example config file ``etc/neutron.conf`` in the neutron tree where this
-   variable is commented.
-
-
-Service Providers
-~~~~~~~~~~~~~~~~~
-
-If your project uses service provider(s) the same way VPNAAS and LBAAS do, you
-specify your service provider in your ``project_name.conf`` file like so::
-
-    [service_providers]
-    # Must be in form:
-    # service_provider=<service_type>:<name>:<driver>[:default][,...]
-
-In order for Neutron to load this correctly, make sure you do the following in
-your code::
-
-    from neutron.db import servicetype_db
-    service_type_manager = servicetype_db.ServiceTypeManager.get_instance()
-    service_type_manager.add_provider_configuration(
-        YOUR_SERVICE_TYPE,
-        pconf.ProviderConfiguration(YOUR_SERVICE_MODULE))
-
-This is typically required when you instantiate your service plugin class.
-
-
-Interface Drivers
-~~~~~~~~~~~~~~~~~
-
-Interface (VIF) drivers for the reference implementations are defined in
-``neutron/agent/linux/interface.py``. Third-party interface drivers shall be
-defined in a similar location within their own repo.
-
-The entry point for the interface driver is a Neutron config option. It is up to
-the installer to configure this item in the ``[default]`` section. For example::
-
-    [default]
-    interface_driver = networking_foo.agent.linux.interface.FooInterfaceDriver
-
-**ToDo: Interface Driver port bindings.**
-    ``VIF_TYPE_*`` constants in ``neutron/extensions/portbindings.py`` should be
-    moved from neutron core to the repositories where their drivers are
-    implemented. We need to provide some config or hook mechanism for VIF types
-    to be registered by external interface drivers. For Nova, selecting the VIF
-    driver can be done outside of
-    Neutron (using the new `os-vif python library
-    <https://review.openstack.org/193668>`_?). Armando and Akihiro to discuss.
-
-
-Rootwrap Filters
-~~~~~~~~~~~~~~~~
-
-If a third-party repo needs a rootwrap filter for a command that is not used by
-Neutron core, then the filter shall be defined in the third-party repo.
-
-For example, to add a rootwrap filters for commands in repo ``networking-foo``:
-
-* In the repo, create the file:
-  ``etc/neutron/rootwrap.d/foo.filters``
-
-* In the repo's ``setup.cfg`` add the filters to data_files::
-
-    [files]
-    data_files =
-        etc/neutron/rootwrap.d =
-            etc/neutron/rootwrap.d/foo.filters
-
-
-Extending python-neutronclient
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The maintainer of a third-party component may wish to add extensions to the
-Neutron CLI client. Thanks to https://review.openstack.org/148318 this can now
-be accomplished. See `Client Command Extensions
-<client_command_extensions.html>`_.
-
-
-Other repo-split items
-~~~~~~~~~~~~~~~~~~~~~~
-
-(These are still TBD.)
-
-* Splitting policy.json? **ToDo** Armando will investigate.
-
-* Generic instructions (or a template) for installing an out-of-tree plugin or
-  driver for Neutron. Possibly something for the networking guide, and/or a
-  template that plugin/driver maintainers can modify and include with their
-  package.
diff --git a/doc/source/devref/db_layer.rst b/doc/source/devref/db_layer.rst
deleted file mode 100644 (file)
index 3edad0e..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Neutron Database Layer
-======================
-
-This section contains some common information that will be useful for
-developers that need to do some db changes.
-
-Difference between 'default' and 'server_default' parameters for columns
-------------------------------------------------------------------------
-
-For columns it is possible to set 'default' or 'server_default'. What is the
-difference between them and why should they be used?
-
-The explanation is quite simple:
-
-*  `default <http://docs.sqlalchemy.org/en/rel_0_9/core/metadata.html#sqlalchemy.schema.Column.params.default>`_ - the default value that SQLAlchemy will specify in queries for creating instances of a given model;
-*  `server_default <http://docs.sqlalchemy.org/en/rel_0_9/core/metadata.html#sqlalchemy.schema.Column.params.server_default>`_ - the default value for a column that SQLAlchemy will specify in DDL.
-
-Summarizing, 'default' is useless in migrations and only 'server_default'
-should be used. For synchronizing migrations with models server_default parameter
-should also be added in model. If default value in database is not needed,
-'server_default' should not be used. The declarative approach can be bypassed
-(i.e. 'default' may be omitted in the model) if default is enforced through
-business logic.
-
-
-Database migrations
--------------------
-
-For details on the neutron-db-manage wrapper and alembic migrations, see
-`Alembic Migrations <alembic_migrations.html>`_.
-
-
-Tests to verify that database migrations and models are in sync
----------------------------------------------------------------
-
-.. automodule:: neutron.tests.functional.db.test_migrations
-
-.. autoclass:: _TestModelsMigrations
-   :members:
-
-
-The Standard Attribute Table
-----------------------------
-
-There are many attributes that we would like to store in the database which
-are common across many Neutron objects (e.g. tags, timestamps, rbac entries).
-We have previously been handling this by duplicating the schema to every table
-via model mixins. This means that a DB migration is required for each object
-that wants to adopt one of these common attributes. This becomes even more
-cumbersome when the relationship between the attribute and the object is
-many-to-one because each object then needs its own table for the attributes
-(assuming referential integrity is a concern).
-
-To address this issue, the 'standardattribute' table is available. Any model
-can add support for this table by inheriting the 'HasStandardAttributes' mixin
-in neutron.db.model_base. This mixin will add a standard_attr_id BigInteger
-column to the model with a foreign key relationship to the 'standardattribute'
-table. The model will then be able to access any columns of the
-'standardattribute' table and any tables related to it.
-
-The introduction of a new standard attribute only requires one column addition
-to the 'standardattribute' table for one-to-one relationships or a new table
-for one-to-many or one-to-zero relationships. Then all of the models using the
-'HasStandardAttribute' mixin will automatically gain access to the new attribute.
-
-Any attributes that will apply to every neutron resource (e.g. timestamps)
-can be added directly to the 'standardattribute' table. For things that will
-frequently be NULL for most entries (e.g. a column to store an error reason),
-a new table should be added and joined to in a query to prevent a bunch of
-NULL entries in the database.
diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst
deleted file mode 100644 (file)
index 4a74e12..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-..
-      Copyright 2010-2013 United States Government as represented by the
-      Administrator of the National Aeronautics and Space Administration.
-      All Rights Reserved.
-
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Setting Up a Development Environment
-====================================
-
-This page describes how to setup a working Python development
-environment that can be used in developing Neutron on Ubuntu, Fedora or
-Mac OS X. These instructions assume you're already familiar with
-Git and Gerrit, which is a code repository mirror and code review toolset
-, however if you aren't please see `this Git tutorial`_ for an introduction
-to using Git and `this guide`_ for a tutorial on using Gerrit and Git for
-code contribution to OpenStack projects.
-
-.. _this Git tutorial: http://git-scm.com/book/en/Getting-Started
-.. _this guide: http://docs.openstack.org/infra/manual/developers.html#development-workflow
-
-Following these instructions will allow you to run the Neutron unit
-tests. If you want to be able to run Neutron in a full OpenStack environment,
-you can use the excellent `DevStack`_ project to do so. There is a wiki page
-that describes `setting up Neutron using DevStack`_.
-
-.. _DevStack: https://git.openstack.org/cgit/openstack-dev/devstack
-.. _setting up Neutron using Devstack: https://wiki.openstack.org/wiki/NeutronDevstack
-
-Getting the code
-----------------
-
-Grab the code::
-
-    git clone git://git.openstack.org/openstack/neutron.git
-    cd neutron
-
-
-.. include:: ../../../TESTING.rst
diff --git a/doc/source/devref/dns_order.rst b/doc/source/devref/dns_order.rst
deleted file mode 100644 (file)
index cdac014..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Keep DNS Nameserver Order Consistency In Neutron
-================================================
-
-In Neutron subnets, DNS nameservers are given priority when created or updated.
-This means if you create a subnet with multiple DNS servers, the order will
-be retained and guests will receive the DNS servers in the order you
-created them in when the subnet was created. The same thing applies for update
-operations on subnets to add, remove, or update DNS servers.
-
-Get Subnet Details Info
------------------------
-::
-
-        changzhi@stack:~/devstack$ neutron subnet-list
-        +--------------------------------------+------+-------------+--------------------------------------------+
-        | id                                   | name | cidr        | allocation_pools                           |
-        +--------------------------------------+------+-------------+--------------------------------------------+
-        | 1a2d261b-b233-3ab9-902e-88576a82afa6 |      | 10.0.0.0/24 | {"start": "10.0.0.2", "end": "10.0.0.254"} |
-        +--------------------------------------+------+-------------+--------------------------------------------+
-
-        changzhi@stack:~/devstack$ neutron subnet-show 1a2d261b-b233-3ab9-902e-88576a82afa6
-        +------------------+--------------------------------------------+
-        | Field            | Value                                      |
-        +------------------+--------------------------------------------+
-        | allocation_pools | {"start": "10.0.0.2", "end": "10.0.0.254"} |
-        | cidr             | 10.0.0.0/24                                |
-        | dns_nameservers  | 1.1.1.1                                    |
-        |                  | 2.2.2.2                                    |
-        |                  | 3.3.3.3                                    |
-        | enable_dhcp      | True                                       |
-        | gateway_ip       | 10.0.0.1                                   |
-        | host_routes      |                                            |
-        | id               | 1a2d26fb-b733-4ab3-992e-88554a87afa6       |
-        | ip_version       | 4                                          |
-        | name             |                                            |
-        | network_id       | a404518c-800d-2353-9193-57dbb42ac5ee       |
-        | tenant_id        | 3868290ab10f417390acbb754160dbb2           |
-        +------------------+--------------------------------------------+
-
-Update Subnet DNS Nameservers
------------------------------
-::
-
-    neutron subnet-update 1a2d261b-b233-3ab9-902e-88576a82afa6 \
-    --dns_nameservers list=true 3.3.3.3 2.2.2.2 1.1.1.1
-
-    changzhi@stack:~/devstack$ neutron subnet-show 1a2d261b-b233-3ab9-902e-88576a82afa6
-    +------------------+--------------------------------------------+
-    | Field            | Value                                      |
-    +------------------+--------------------------------------------+
-    | allocation_pools | {"start": "10.0.0.2", "end": "10.0.0.254"} |
-    | cidr             | 10.0.0.0/24                                |
-    | dns_nameservers  | 3.3.3.3                                    |
-    |                  | 2.2.2.2                                    |
-    |                  | 1.1.1.1                                    |
-    | enable_dhcp      | True                                       |
-    | gateway_ip       | 10.0.0.1                                   |
-    | host_routes      |                                            |
-    | id               | 1a2d26fb-b733-4ab3-992e-88554a87afa6       |
-    | ip_version       | 4                                          |
-    | name             |                                            |
-    | network_id       | a404518c-800d-2353-9193-57dbb42ac5ee       |
-    | tenant_id        | 3868290ab10f417390acbb754160dbb2           |
-    +------------------+--------------------------------------------+
-
-As shown in above output, the order of the DNS nameservers has been updated.
-New virtual machines deployed to this subnet will receive the DNS nameservers
-in this new priority order. Existing virtual machines that have already been
-deployed will not be immediately affected by changing the DNS nameserver order
-on the neutron subnet. Virtual machines that are configured to get their IP
-address via DHCP will detect the DNS nameserver order change
-when their DHCP lease expires or when the virtual machine is restarted.
-Existing virtual machines configured with a static IP address will never
-detect the updated DNS nameserver order.
diff --git a/doc/source/devref/effective_neutron.rst b/doc/source/devref/effective_neutron.rst
deleted file mode 100644 (file)
index 6d4e4a5..0000000
+++ /dev/null
@@ -1,396 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Effective Neutron: 100 specific ways to improve your Neutron contributions
-==========================================================================
-
-There are a number of skills that make a great Neutron developer: writing good
-code, reviewing effectively, listening to peer feedback, etc. The objective of
-this document is to describe, by means of examples, the pitfalls, the good and
-bad practices that 'we' as project encounter on a daily basis and that make us
-either go slower or accelerate while contributing to Neutron.
-
-By reading and collaboratively contributing to such a knowledge base, your
-development and review cycle becomes shorter, because you will learn (and teach
-to others after you) what to watch out for, and how to be proactive in order
-to prevent negative feedback, minimize programming errors, writing better
-tests, and so on and so forth...in a nutshell, how to become an effective Neutron
-developer.
-
-The notes below are meant to be free-form and brief by design. They are not meant
-to replace or duplicate `OpenStack documentation <http://docs.openstack.org>`_,
-or any project-wide documentation initiative like `peer-review notes <http://docs.openstack.org/infra/manual/developers.html#peer-review>`_
-or the `team guide <http://docs.openstack.org/project-team-guide/>`_. For this
-reason, references are acceptable and should be favored, if the shortcut is
-deemed useful to expand on the distilled information.
-We will try to keep these notes tidy by breaking them down into sections if it
-makes sense. Feel free to add, adjust, remove as you see fit. Please do so,
-taking into consideration yourself and other Neutron developers as readers.
-Capture your experience during development and review and add any comment that
-you believe will make your life and others' easier.
-
-Happy hacking!
-
-Developing better software
---------------------------
-
-Plugin development
-~~~~~~~~~~~~~~~~~~
-
-Document common pitfalls as well as good practices done during plugin development.
-
-* Use mixin classes as last resort. They can be a powerful tool to add behavior
-  but their strength is also a weakness, as they can introduce `unpredictable <https://review.openstack.org/#/c/121290/>`_
-  behavior to the `MRO <https://www.python.org/download/releases/2.3/mro/>`_,
-  amongst other issues.
-* In lieu of mixins, if you need to add behavior that is relevant for ML2,
-  consider using the `extension manager <http://specs.openstack.org/openstack/neutron-specs/specs/juno/neutron-ml2-mechanismdriver-extensions.html>`_.
-* If you make changes to the DB class methods, like calling methods that can
-  be inherited, think about what effect that may have to plugins that have
-  controller `backends <https://review.openstack.org/#/c/116924/>`_.
-* If you make changes to the ML2 plugin or components used by the ML2 plugin,
-  think about the `effect <http://lists.openstack.org/pipermail/openstack-dev/2015-October/076134.html>`_
-  that may have to other plugins.
-* When adding behavior to the L2 and L3 db base classes, do not assume that
-  there is an agent on the other side of the message broker that interacts
-  with the server. Plugins may not rely on `agents <https://review.openstack.org/#/c/174020/>`_ at all.
-
-Database interaction
-~~~~~~~~~~~~~~~~~~~~
-
-Document common pitfalls as well as good practices done during database development.
-
-* `first() <http://docs.sqlalchemy.org/en/rel_1_0/orm/query.html#sqlalchemy.orm.query.Query.first>`_
-  does not raise an exception.
-* Do not get an object to delete it. If you can `delete() <http://docs.sqlalchemy.org/en/rel_1_0/orm/query.html#sqlalchemy.orm.query.Query.delete>`_
-  on the query object. Read the warnings for more details about in-python cascades.
-* For PostgreSQL if you're using GROUP BY everything in the SELECT list must be
-  an aggregate SUM(...), COUNT(...), etc or used in the GROUP BY.
-
-  The incorrect variant:
-
-  .. code:: python
-
-     q = query(Object.id, Object.name,
-               func.count(Object.number)).group_by(Object.name)
-
-  The correct variant:
-
-  .. code:: python
-
-     q = query(Object.id, Object.name,
-               func.count(Object.number)).group_by(Object.id, Object.name)
-* Beware of the `InvalidRequestError <http://docs.sqlalchemy.org/en/rel_0_8/faq.html#this-session-s-transaction-has-been-rolled-back-due-to-a-previous-exception-during-flush-or-similar>`_ exception.
-  There is even a `Neutron bug <https://bugs.launchpad.net/neutron/+bug/1409774>`_
-  registered for it. Bear in mind that this error may also occur when nesting
-  transaction blocks, and the innermost block raises an error without proper
-  rollback. Consider if `savepoints <http://docs.sqlalchemy.org/en/rel_1_0/orm/session_transaction.html#using-savepoint>`_
-  can fit your use case.
-* When designing data models that are related to each other, be careful to how
-  you model the relationships' loading `strategy <http://docs.sqlalchemy.org/en/latest/orm/loading_relationships.html#using-loader-strategies-lazy-loading-eager-loading>`_. For instance a joined relationship can
-  be very efficient over others (some examples include `router gateways <https://review.openstack.org/#/c/88665/>`_
-  or `network availability zones <https://review.openstack.org/#/c/257086/>`_).
-* If you add a relationship to a Neutron object that will be referenced in the
-  majority of cases where the object is retrieved, be sure to use the
-  lazy='joined' parameter to the relationship so the related objects are loaded
-  as part of the same query. Otherwise, the default method is 'select', which
-  emits a new DB query to retrieve each related object adversely impacting
-  performance. For example, see `patch 88665 <https://review.openstack.org/#/c/88665/>`_
-  which resulted in a significant improvement since router retrieval functions
-  always include the gateway interface.
-* Conversely, do not use lazy='joined' if the relationship is only used in
-  corner cases because the JOIN statement comes at a cost that may be
-  significant if the relationship contains many objects. For example, see
-  `patch 168214 <https://review.openstack.org/#/c/168214/>`_ which reduced a
-  subnet retrieval by ~90% by avoiding a join to the IP allocation table.
-* When writing extensions to existing objects (e.g. Networks), ensure that
-  they are written in a way that the data on the object can be calculated
-  without additional DB lookup. If that's not possible, ensure the DB lookup
-  is performed once in bulk during a list operation. Otherwise a list call
-  for a 1000 objects will change from a constant small number of DB queries
-  to 1000 DB queries. For example, see
-  `patch 257086 <https://review.openstack.org/#/c/257086/>`_ which changed the
-  availability zone code from the incorrect style to a database friendly one.
-
-System development
-~~~~~~~~~~~~~~~~~~
-
-Document common pitfalls as well as good practices done when invoking system commands
-and interacting with linux utils.
-
-Eventlet concurrent model
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Document common pitfalls as well as good practices done when using eventlet and monkey
-patching.
-
-* Do not use with_lockmode('update') on SQL queries without protecting the operation
-  with a lockutils semaphore. For some SQLAlchemy database drivers that operators may
-  choose (e.g. MySQLdb) it may result in a temporary deadlock by yielding to another
-  coroutine while holding the DB lock. The following wiki provides more details:
-  https://wiki.openstack.org/wiki/OpenStack_and_SQLAlchemy#MySQLdb_.2B_eventlet_.3D_sad
-
-Mocking and testing
-~~~~~~~~~~~~~~~~~~~
-
-Document common pitfalls as well as good practices done when writing tests, any test.
-For anything more elaborate, please visit the testing section.
-
-* Preferring low level testing versus full path testing (e.g. not testing database
-  via client calls). The former is to be favored in unit testing, whereas the latter
-  is to be favored in functional testing.
-* Prefer specific assertions (assert(Not)In, assert(Not)IsInstance, assert(Not)IsNone,
-  etc) over generic ones (assertTrue/False, assertEqual) because they raise more
-  meaningful errors:
-
-  .. code:: python
-
-     def test_specific(self):
-         self.assertIn(3, [1, 2])
-         # raise meaningful error: "MismatchError: 3 not in [1, 2]"
-
-     def test_generic(self):
-         self.assertTrue(3 in [1, 2])
-         # raise meaningless error: "AssertionError: False is not true"
-
-* Use the pattern "self.assertEqual(expected, observed)" not the opposite, it helps
-  reviewers to understand which one is the expected/observed value in non-trivial
-  assertions. The expected and observed values are also labeled in the output when
-  the assertion fails.
-* Prefer specific assertions (assertTrue, assertFalse) over assertEqual(True/False, observed).
-* Don't write tests that don't test the intended code. This might seem silly but
-  it's easy to do with a lot of mocks in place. Ensure that your tests break as
-  expected before your code change.
-* Avoid heavy use of the mock library to test your code. If your code requires more
-  than one mock to ensure that it does the correct thing, it needs to be refactored
-  into smaller, testable units. Otherwise we depend on fullstack/tempest/api tests
-  to test all of the real behavior and we end up with code containing way too many
-  hidden dependencies and side effects.
-* All behavior changes to fix bugs should include a test that prevents a
-  regression. If you made a change and it didn't break a test, it means the
-  code was not adequately tested in the first place, it's not an excuse to leave
-  it untested.
-* Test the failure cases. Use a mock side effect to throw the necessary
-  exceptions to test your 'except' clauses.
-* Don't mimic existing tests that violate these guidelines. We are attempting to
-  replace all of these so more tests like them create more work. If you need help
-  writing a test, reach out to the testing lieutenants and the team on IRC.
-* Mocking open() is a dangerous practice because it can lead to unexpected
-  bugs like `bug 1503847 <https://bugs.launchpad.net/neutron/+bug/1503847>`_.
-  In fact, when the built-in open method is mocked during tests, some
-  utilities (like debtcollector) may still rely on the real thing, and may
-  end up using the mock rather what they are really looking for. If you must,
-  consider using `OpenFixture <https://review.openstack.org/#/c/232716/>`_, but
-  it is better not to mock open() at all.
-
-
-Backward compatibility
-~~~~~~~~~~~~~~~~~~~~~~
-
-Document common pitfalls as well as good practices done when extending the RPC Interfaces.
-
-* Make yourself familiar with :ref:`Upgrade review guidelines <upgrade_review_guidelines>`.
-
-
-Scalability issues
-~~~~~~~~~~~~~~~~~~
-
-Document common pitfalls as well as good practices done when writing code that needs to process
-a lot of data.
-
-Translation and logging
-~~~~~~~~~~~~~~~~~~~~~~~
-
-Document common pitfalls as well as good practices done when instrumenting your code.
-
-* Make yourself familiar with `OpenStack logging guidelines <http://specs.openstack.org/openstack/openstack-specs/specs/log-guidelines.html#definition-of-log-levels>`_
-  to avoid littering the logs with traces logged at inappropriate levels.
-* The logger should only be passed unicode values. For example, do not pass it
-  exceptions or other objects directly (LOG.error(exc), LOG.error(port), etc.).
-  See http://docs.openstack.org/developer/oslo.log/usage.html#no-more-implicit-conversion-to-unicode-str
-  for more details.
-* Don't pass exceptions into LOG.exception: it is already implicitly included
-  in the log message by Python logging module.
-* Don't use LOG.exception when there is no exception registered in current
-  thread context: Python 3.x versions before 3.5 are known to fail on it.
-
-Project interfaces
-~~~~~~~~~~~~~~~~~~
-
-Document common pitfalls as well as good practices done when writing code that is used
-to interface with other projects, like Keystone or Nova.
-
-Documenting your code
-~~~~~~~~~~~~~~~~~~~~~
-
-Document common pitfalls as well as good practices done when writing docstrings.
-
-Landing patches more rapidly
-----------------------------
-
-Scoping your patch appropriately
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-* Do not make multiple changes in one patch unless absolutely necessary.
-  Cleaning up nearby functions or fixing a small bug you noticed while working
-  on something else makes the patch very difficult to review. It also makes
-  cherry-picking and reverting very difficult.  Even apparently minor changes
-  such as reformatting whitespace around your change can burden reviewers and
-  cause merge conflicts.
-* If a fix or feature requires code refactoring, submit the refactoring as a
-  separate patch than the one that changes the logic. Otherwise
-  it's difficult for a reviewer to tell the difference between mistakes
-  in the refactor and changes required for the fix/feature. If it's a bug fix,
-  try to implement the fix before the refactor to avoid making cherry-picks to
-  stable branches difficult.
-* Consider your reviewers' time before submitting your patch. A patch that
-  requires many hours or days to review will sit in the "todo" list until
-  someone has many hours or days free (which may never happen.) If you can
-  deliver your patch in small but incrementally understandable and testable
-  pieces you will be more likely to attract reviewers.
-
-Nits and pedantic comments
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Document common nits and pedantic comments to watch out for.
-
-* Make sure you spell correctly, the best you can, no-one wants rebase generators at
-  the end of the release cycle!
-* The odd pep8 error may cause an entire CI run to be wasted. Consider running
-  validation (pep8 and/or tests) before submitting your patch. If you keep forgetting
-  consider installing a git `hook <https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks>`_
-  so that Git will do it for you.
-* Sometimes, new contributors want to dip their toes with trivial patches, but we
-  at OpenStack *love* bike shedding and their patches may sometime stall. In
-  some extreme cases, the more trivial the patch, the higher the chances it fails
-  to merge. To ensure we as a team provide/have a frustration-free experience
-  new contributors should be redirected to fixing `low-hanging-fruit bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=low-hanging-fruit>`_
-  that have a tangible positive impact to the codebase. Spelling mistakes, and
-  docstring are fine, but there is a lot more that is relatively easy to fix
-  and has a direct impact to Neutron users.
-
-Reviewer comments
-~~~~~~~~~~~~~~~~~
-
-* Acknowledge them one by one by either clicking 'Done' or by replying extensively.
-  If you do not, the reviewer won't know whether you thought it was not important,
-  or you simply forgot. If the reply satisfies the reviewer, consider capturing the
-  input in the code/document itself so that it's for reviewers of newer patchsets to
-  see (and other developers when the patch merges).
-* Watch for the feedback on your patches. Acknowledge it promptly and act on it
-  quickly, so that the reviewer remains engaged. If you disappear for a week after
-  you posted a patchset, it is very likely that the patch will end up being
-  neglected.
-* Do not take negative feedback personally. Neutron is a large project with lots
-  of contributors with different opinions on how things should be done. Many come
-  from widely varying cultures and languages so the English, text-only feedback
-  can unintentionally come across as harsh. Getting a -1 means reviewers are
-  trying to help get the patch into a state that can be merged, it doesn't just
-  mean they are trying to block it. It's very rare to get a patch merged on the
-  first iteration that makes everyone happy.
-
-Code Review
-~~~~~~~~~~~
-
-* You should visit `OpenStack How To Review wiki <https://wiki.openstack.org/wiki/How_To_Contribute#Reviewing>`_
-
-IRC
-~~~~
-
-* IRC is a place where you can speak with many of the Neutron developers and core
-  reviewers. For more information you should visit
-  `OpenStack IRC wiki <http://wiki.openstack.org/wiki/IRC>`_
-  Neutron IRC channel is #openstack-neutron
-* There are weekly IRC meetings related to many different projects/teams
-  in Neutron.
-  A full list of these meetings and their date/time can be found in
-  `OpenStack IRC Meetings <http://eavesdrop.openstack.org>`_.
-  It is important to attend these meetings in the area of your contribution
-  and possibly mention your work and patches.
-* When you have questions regarding an idea or a specific patch of yours, it
-  can be helpful to find a relevant person in IRC and speak with them about
-  it.
-  You can find a user's IRC nickname in their launchpad account.
-* Being available on IRC is useful, since reviewers can contact
-  you directly to quickly clarify a review issue. This speeds
-  up the feedback loop.
-* Each area of Neutron or sub-project of Neutron has a specific lieutenant
-  in charge of it.
-  You can most likely find these lieutenants on IRC, it is advised however to try
-  and send public questions to the channel rather then to a specific person if possible.
-  (This increase the chances of getting faster answers to your questions).
-  A list of the areas and lieutenants nicknames can be found at
-  `Core Reviewers <http://docs.openstack.org/developer/neutron/policies/core-reviewers.html>`_.
-
-Commit messages
-~~~~~~~~~~~~~~~
-
-Document common pitfalls as well as good practices done when writing commit messages.
-For more details see `Git commit message best practices <https://wiki.openstack.org/wiki/GitCommitMessages>`_.
-This is the TL;DR version with the important points for committing to Neutron.
-
-
-* One liners are bad, unless the change is trivial.
-* Remember to use DocImpact, APIImpact, UpgradeImpact appropriately.
-* Make sure the commit message doesn't have any spelling/grammar errors. This
-  is the first thing reviewers read and they can be distracting enough to
-  invite -1's.
-* Describe what the change accomplishes. If it's a bug fix, explain how this
-  code will fix the problem. If it's part of a feature implementation, explain
-  what component of the feature the patch implements. Do not just describe the
-  bug, that's what launchpad is for.
-* Use the "Closes-Bug: #BUG-NUMBER" tag if the patch addresses a bug. Submitting
-  a bugfix without a launchpad bug reference is unacceptable, even if it's
-  trivial. Launchpad is how bugs are tracked so fixes without a launchpad bug are
-  a nightmare when users report the bug from an older version and the Neutron team
-  can't tell if/why/how it's been fixed. Launchpad is also how backports are
-  identified and tracked so patches without a bug report cannot be picked to stable
-  branches.
-* Use the "Implements: blueprint NAME-OF-BLUEPRINT" or "Partially-Implements:
-  blueprint NAME-OF-BLUEPRINT" for features so reviewers can determine if the
-  code matches the spec that was agreed upon. This also updates the blueprint
-  on launchpad so it's easy to see all patches that are related to a feature.
-* If it's not immediately obvious, explain what the previous code was doing
-  that was incorrect. (e.g. code assumed it would never get 'None' from
-  a function call)
-* Be specific in your commit message about what the patch does and why it does
-  this. For example, "Fixes incorrect logic in security groups" is not helpful
-  because the code diff already shows that you are modifying security groups.
-  The message should be specific enough that a reviewer looking at the code can
-  tell if the patch does what the commit says in the most appropriate manner.
-  If the reviewer has to guess why you did something, lots of your time will be
-  wasted explaining why certain changes were made.
-
-
-Dealing with Zuul
-~~~~~~~~~~~~~~~~~
-
-Document common pitfalls as well as good practices done when dealing with OpenStack CI.
-
-* When you submit a patch, consider checking its `status <http://status.openstack.org/zuul/>`_
-  in the queue. If you see a job failures, you might as well save time and try to figure out
-  in advance why it is failing.
-* Excessive use of 'recheck' to get test to pass is discouraged. Please examine the logs for
-  the failing test(s) and make sure your change has not tickled anything that might be causing
-  a new failure or race condition. Getting your change in could make it even harder to debug
-  what is actually broken later on.
diff --git a/doc/source/devref/fullstack_testing.rst b/doc/source/devref/fullstack_testing.rst
deleted file mode 100644 (file)
index fca5d43..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Full Stack Testing
-==================
-
-Goals
------
-
-* Stabilize the job:
-    - Fix L3 HA failure
-    - Look in to non-deterministic failures when adding a large amount of
-      tests (Possibly bug 1486199).
-    - Switch to kill signal 15 to terminate neutron-server & agents (Bugs
-      1487548 and 1494363).
-* Convert the L3 HA failover functional test to a full stack test
-* Write a test for DHCP HA / Multiple DHCP agents per network
-* Write DVR tests
-* Write additional L3 HA tests
-* Write a test that validates DVR + L3 HA integration after
-  https://bugs.launchpad.net/neutron/+bug/1365473 is fixed.
diff --git a/doc/source/devref/i18n.rst b/doc/source/devref/i18n.rst
deleted file mode 100644 (file)
index 1f0d698..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Neutron Stadiun i18n
-====================
-
-* Refer to oslo_i18n documentation for the general mechanisms that should
-  be used: http://docs.openstack.org/developer/oslo.i18n/usage.html
-
-* Do NOT use the _i18n module in neutron-lib or neutron.
-
-* It is recommended that you create a {package_name}/_i18n.py file
-  in your repo, and use that. Your localization strings will also live
-  in your repo.
-
-* The neutron.i18n module will be around for a release or two, with
-  shared localization strings, but migration is encouraged.
diff --git a/doc/source/devref/images/fullstack_multinode_simulation.png b/doc/source/devref/images/fullstack_multinode_simulation.png
deleted file mode 100644 (file)
index 9736944..0000000
Binary files a/doc/source/devref/images/fullstack_multinode_simulation.png and /dev/null differ
diff --git a/doc/source/devref/images/under-the-hood-scenario-1-ovs-compute.png b/doc/source/devref/images/under-the-hood-scenario-1-ovs-compute.png
deleted file mode 100644 (file)
index f3f0972..0000000
Binary files a/doc/source/devref/images/under-the-hood-scenario-1-ovs-compute.png and /dev/null differ
diff --git a/doc/source/devref/images/under-the-hood-scenario-1-ovs-netns.png b/doc/source/devref/images/under-the-hood-scenario-1-ovs-netns.png
deleted file mode 100644 (file)
index 250ef5b..0000000
Binary files a/doc/source/devref/images/under-the-hood-scenario-1-ovs-netns.png and /dev/null differ
diff --git a/doc/source/devref/images/under-the-hood-scenario-1-ovs-network.png b/doc/source/devref/images/under-the-hood-scenario-1-ovs-network.png
deleted file mode 100644 (file)
index 3c21c84..0000000
Binary files a/doc/source/devref/images/under-the-hood-scenario-1-ovs-network.png and /dev/null differ
diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst
deleted file mode 100644 (file)
index d72bdad..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-..
-      Copyright 2010-2011 United States Government as represented by the
-      Administrator of the National Aeronautics and Space Administration.
-      All Rights Reserved.
-
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Developer Guide
-===============
-
-In the Developer Guide, you will find information on Neutron's lower level
-programming APIs. There are sections that cover the core pieces of Neutron,
-including its database, message queue, and scheduler components. There are
-also subsections that describe specific plugins inside Neutron. Finally,
-the developer guide includes information about Neutron testing infrastructure.
-
-
-Programming HowTos and Tutorials
---------------------------------
-.. toctree::
-    :maxdepth: 3
-
-    effective_neutron
-    development.environment
-    contribute
-    neutron_api
-    client_command_extensions
-    alembic_migrations
-
-
-Neutron Internals
------------------
-.. toctree::
-   :maxdepth: 3
-
-   services_and_agents
-   api_layer
-   ml2_ext_manager
-   quota
-   api_extensions
-   plugin-api
-   db_layer
-   policy
-   rpc_api
-   rpc_callbacks
-   layer3
-   l2_agents
-   ovs_vhostuser
-   quality_of_service
-   advanced_services
-   oslo-incubator
-   callbacks
-   dns_order
-   upgrade
-   i18n
-   instrumentation
-   address_scopes
-
-Testing
--------
-.. toctree::
-   :maxdepth: 3
-
-   fullstack_testing
-   testing_coverage
-   template_model_sync_test
-
-Module Reference
-----------------
-.. toctree::
-   :maxdepth: 3
-
-.. todo::
-
-    Add in all the big modules as automodule indexes.
-
-
-Indices and tables
-------------------
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
diff --git a/doc/source/devref/instrumentation.rst b/doc/source/devref/instrumentation.rst
deleted file mode 100644 (file)
index 5f2eeb7..0000000
+++ /dev/null
@@ -1,336 +0,0 @@
-Neutron Instrumentation
-=======================
-
-OpenStack operators require information about the status and health
-of the Neutron system. While it is possible for an operator to pull
-all of the interface counters from compute and network nodes, today
-there is no capability to aggregate that information to provide
-comprehensive counters for each project within Neutron. Neutron
-instrumentation sets out to meet this need.
-
-Neutron instrumentation can be broken down into three major pieces:
-
-#. Data Collection (i.e. what data should be collected and how),
-#. Data Aggregation (i.e. how and where raw data should be aggregated
-   into project information)
-#. Data Consumption (i.e. how is aggregated data consumed)
-
-While instrumentation might also be considered to include asynchronous event
-notifications, like fault detection, this is considered out of scope
-for the following two reasons:
-
-#. In Kilo, Neutron added the ProcessManager class to allow agents to
-   spawn a monitor thread that would either respawn or exit the agent.
-   While this is a useful feature for ensuring that the agent gets
-   restarted, the only notification of this event is an error log entry.
-   To ensure that this event is asynchronously passed up to an upstream
-   consumer, the Neutron logger object should have its publish_errors
-   option set to True and the transport URL set to the point at the
-   upstream consumer. As the particular URL is consumer specific, further
-   discussion is outside the scope of this section.
-#. For the data plane, it is necessary to have visibility into the hardware
-   status of the compute and networking nodes. As some upstream consumers
-   already support this (even incompletely) it is considered to be within
-   the scope of the upstream consumer and not Neutron itself.
-
-How does Instrumentation differ from Metering Labels and Rules
---------------------------------------------------------------
-
-The existing metering label and rule extension provides the ability to
-collect traffic information on a per CIDR basis. Therefore, a possible
-implementation of instrumentation would be to use per-instance metering
-rules for all IP addresses in both directions. However, the information
-collected by metering rules is focused more on billing and so does not
-have the desired granularity (i.e. it counts transmitted packets without
-keeping track of what caused packets to fail).
-
-What Data to Collect
---------------------
-
-The first step is to consider what data to collect. In the absence of a
-standard, it is proposed to use the information set defined in
-[RFC2863]_ and [RFC4293]_. This proposal should not be read as implying
-that Neutron instrumentation data will be browsable via a MIB browser as
-that would be a potential Data Consumption model.
-
-.. [RFC2863] https://tools.ietf.org/html/rfc2863
-.. [RFC4293] https://tools.ietf.org/html/rfc4293
-
-For the reference implementation (Nova/VIF, OVS, and Linux Bridge), this
-section identifies what data is already available and how it can be
-mapped into the structures defined by the RFC. Other plugins are welcome
-to define either their own data sets and/or their own mappings
-to the data sets defined in the referenced RFCs.
-
-Focus here is on what is available from "stock" Linux and OpenStack.
-Additional statistics may become available if other items like NetFlow or
-sFlow are added to the mix, but those should be covered as an addition to
-the basic information discussed here.
-
-What is Available from Nova
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Within Nova, the libvirt driver makes the following host traffic statistics
-available under the get_diagnostics() and get_instance_diagnostics() calls
-on a per-virtual NIC basis:
-
-* Receive bytes, packets, errors and drops
-* Transmit bytes, packets, errors and drops
-
-There continues to be a long running effort to get these counters into
-Ceilometer (the wiki page at [#]_ attempted to do this via a direct call
-while [#]_ is trying to accomplish this via notifications from Nova).
-Rather than propose another way for collecting these statistics from Nova,
-this devref takes the approach of declaring them out of scope until there is
-an agreed upon method for getting the counters from Nova to Ceilometer and
-then see if Neutron can/should piggy-back off of that.
-
-.. [#] https://wiki.openstack.org/wiki/EfficientMetering/FutureNovaInteractionModel
-.. [#] http://lists.openstack.org/pipermail/openstack-dev/2015-June/067589.html
-
-What is Available from Linux Bridge
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-For the Linux bridge, a check of [#]_ shows that IEEE 802.1d
-mandated statistics are only a "wishlist" item. The alternative
-is to use NETLINK/shell to list the interfaces attached to
-a particular bridge and then to collect statistics for each
-interface attached to the bridge. These statistics could then
-be mapped to appropriate places, as discussed below.
-
-Note: the examples below talk in terms of mapping counters
-available from the Linux operating system:
-
-* Receive bytes, packets, errors, dropped, overrun and multicast
-* Transmit bytes, packets, errors, dropped, carrier and collisions
-
-Available counters for interfaces on other operating systems
-can be mapped in a similar fashion.
-
-.. [#] http://git.kernel.org/cgit/linux/kernel/git/shemminger/bridge-utils.git/tree/doc/WISHLIST
-
-Of interest are counters from the each of the following (as of this writing,
-Linux Bridge only supports legacy routers, so the DVR case need not be
-considered):
-
-* Compute node
-
-* * Instance tap interface
-
-* Network node
-
-  * DHCP namespace tap interface (if defined)
-  * Router namespace qr interface
-  * Router namespace qg interface
-
-What is Available from Openvswitch
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Like Linux bridge, the openvswitch implementation has interface counters
-that will be collected of interest are the receive and transmit counters
-from the following:
-
-Legacy Routing
-++++++++++++++
-
-* Compute node
-
-* * Instance tap interface
-
-* Network node
-
-  * DHCP namespace tap interface (if defined)
-  * Router namespace qr interface
-  * Router namespace qg interface
-
-Distributed Routing (DVR)
-+++++++++++++++++++++++++
-
-* Compute node
-
-* * Instance tap interface
-* * Router namespace qr interface
-* * FIP namespace fg interface
-
-* Network node
-
-  * DHCP tap interface (if defined)
-  * Router namespace qr interface
-  * SNAT namespace qg interface
-
-Mapping from Available Information to MIB Data Set
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The following table summarizes how the interface counters are mapped
-into each MIB Data Set. Specific details are covered in the sections
-below:
-
-+---------+--------------+----------------------+
-| Node    | Interface    | Included in Data Set |
-|         |              +-----------+----------+
-|         |              | RFC2863   | RFC4293  |
-+=========+==============+===========+==========+
-| Compute | Instance tap | Yes       | No       |
-|         +--------------+-----------+----------+
-|         | Router qr    | Yes       | Yes      |
-|         +--------------+-----------+----------+
-|         | FIP fg       | No        | Yes      |
-+---------+--------------+-----------+----------+
-| Network | DHCP tap     | Yes       | No       |
-|         +--------------+-----------+----------+
-|         | Router qr    | Yes       | Yes      |
-|         +--------------+-----------+----------+
-|         | Router qg    | No        | Yes      |
-|         +--------------+-----------+----------+
-|         | SNAT sg      | No        | Yes      |
-+---------+--------------+-----------+----------+
-
-Note: because of replication of the router qg interface when running
-distributed routing, aggregation of the individual counter information
-will be necessary to fill in the appropriate data set entries. This
-will be covered in the Data Aggregation section below:
-
-RFC 2863 Structures
-+++++++++++++++++++
-
-For each compute host, each network will be represented with a
-"switch", modeled by instances of ifTable and ifXTable. This
-mapping has the advantage that for a particular network, the
-view to the project or the operator is identical - the only
-difference is that the operator can see all networks, while a
-project will only see the networks under their project id.
-
-The current reference implementation identifies tap interface names with
-the Neutron port they are associated with. In turn, the Neutron port
-identifies the Neutron network. Therefore, it is possible to take counters
-from each tap interface and map them into entries in the appropriate tables,
-using the following proposed assignments:
-
-* ifTable
-
-  * ifInOctets = low 32 bits of interface received byte count
-  * ifInUcastPkts = low 32 bits of interface received packet count
-  * ifInDiscards = interface received dropped count
-  * ifInErrors = interface received errors count
-  * ifOutOctets = low 32 bits of interface transmit byte count
-  * ifOutUcastPkts = low 32 bits of interface transmit packet count
-  * ifOutDiscards = interface transmit dropped count
-  * ifOutErrors = interface transmit errors count
-
-* ifXTable
-
-  * ifHCInOctets = 64 bits of interface received byte count
-  * ifHCInUcastPkts = 64 bits of interface received packet count
-  * ifHCOctOctets = 64 bits of interface transmit byte count
-  * ifHCOctUcastPkts = 64 bits of interface transmit packet count
-
-Section 3.1.6 of [RFC2863]_ provides the details of why 64-bit sized
-counters need to be supported. The summary is that with increasing
-transmission bandwidth use of 32-bit counters would require a
-problematic increase in counter polling frequency (a 1Gbs stream of
-full-sized packets will cause a 32-bit counter to wrap in 34 seconds).
-
-RFC 4293 Structures
-+++++++++++++++++++
-
-Counters tracked by RFC 4293 come in two flavors: ones that are
-inherited from the interface, and those that track L3 events,
-such as fragmentation, re-assembly, truncations, etc. As the current
-instrumentation available from the reference implementation does not
-provide appropriate source information, the following counters are
-declared out of scope for this devref:
-
-* ipSystemStatsInHdrErrors, ipIfStatsInHdrErrors
-* ipSystemStatsInNoRoutes, ipIfStatsInNoRoutes
-* ipSystemStatsInAddrErrors, ipIfStatsInAddrErrors
-* ipSystemStatsInUnknownProtos, ipIfStatsInUnknownProtos
-* ipSystemStatsInTruncatedPkts, ipIfStatsInTruncatedPkts
-* ipSystemStatsInForwDatagrams, ipIfStatsInForwDatagrams
-* ipSystemStatsHCInForwDatagrams, ipIfStatsHCInForwDatagrams
-* ipSystemStatsReasmReqds, ipIfStatsReasmReqds
-* ipSystemStatsReasmOKs, ipIfStatsReasmOKs
-* ipSystemStatsReasmFails, ipIfStatsReasmFails
-* ipSystemStatsInDelivers, ipIfStatsInDelivers
-* ipSystemStatsHCInDelivers, ipIfStatsHCInDelivers
-* ipSystemStatsOutRequests, ipIfStatsOutRequests
-* ipSystemStatsHCOutRequests, ipIfStatsHCOutRequests
-* ipSystemStatsOutNoRoutes, ipIfStatsOutNoRoutes
-* ipSystemStatsOutForwDatagrams, ipIfStatsOutForwDatagrams
-* ipSystemStatsHCOutForwDatagrams, ipIfStatsHCOutForwDatagrams
-* ipSystemStatsOutFragReqds, ipIfStatsOutFragReqds
-* ipSystemStatsOutFragOKs, ipIfStatsOutFragOKs
-* ipSystemStatsOutFragFails, ipIfStatsOutFragFails
-* ipSystemStatsOutFragCreates, ipIfStatsOutFragCreates
-
-In ipIfStatsTable, the following counters will hold the same
-value as the referenced counter from RFC 2863:
-
-* ipIfStatsInReceives :== ifInUcastPkts
-* ipIfStatsHCInReceives :== ifInHCUcastPkts
-* ipIfStatsInOctets :== ifInOctets
-* ipIfStatsHCInOctets :== ifInHCOctets
-* ipIfStatsInDiscard :== ifInDiscards
-* ipIfStatsOutDiscard :== ifOutDiscards
-* ipIfStatsOutTransmits :== ifOutUcastPkts
-* ipIfStatsHCOutTransmits :== ifHCOutUcastPkts
-* ipIfStatsOutOctets :== ifOutOctets
-* ipIfStatsHCOutOctets :== ifHCOutOctets
-
-For ipSystemStatsTable, the following counters will hold values based
-on the following assignments. Thess summations are covered in more detail
-in the Data Aggregation section below
-
-* ipSystemStatsInReceives :== sum of all ipIfStatsInReceives for the router
-* ipSystemStatsHCInReceives :== sum of all ipIfStatsHCInReceives for the router
-* ipSystemStatsInOctets :== sum of all ipIfStatsInOctets for the router
-* ipSystemStatsHCInOctets :== sum of all ipIfStatsHCInOctets for the router
-* ipSystemStatsInDiscard :== sum of all ipIfStatsInDiscard for the router
-* ipSystemStatsOutDiscard :== sum of all ipIfStatsOutDiscard for the router
-* ipSystemStatsOutTransmits :== sum of all ipIfStatsOutTrasmit for the router
-* ipSystemStatsHCOutTransmits :== sum of all ipIfStatsHCOutTrasmit for the
-  router
-* ipSystemStatsOutOctets :== sum of all ipIfStatsOctOctets for the router
-* ipSystemStatsHCOutOctets :== sum of all ipIfStatsHCOutOctets for the router
-
-Data Collection
----------------
-
-There are two options for how data can be collected:
-
-#. The Neutron L3 and ML2 agents could collect the counters themselves.
-#. A separate collection agent could be started on each compute/network node
-   to collect counters.
-
-Because of the number of counters needed to be collected (for example,
-a cloud running legacy routing would need to collect (for each project)
-three counters from a network node and a tap counter for each running
-instance. While it would be desirable to reuse the existing L3 and ML2 agents,
-the initial proof of concept will run a separate agent that will use
-a separate threads to isolate the effects of counter collection from
-reporting. Once the performance of the collection agent is understood,
-then merging the functionality into the L3 or ML2 agents can be considered.
-The collection thread will initially use shell commands via rootwrap, with
-the plan of moving to native python libraries when support for them is
-available.
-
-In addition, there are two options for how to report counters back to the
-Neutron server: push or pull (or asynchronous notification vs polling).
-On the one hand, pull/polling eases the Neutron server's task in that it
-only needs to store/aggregate the results from the current polling cycle.
-However, this comes at the cost of dealing with the stale data issues that
-scaling a polling cycle will entail. On the other hand, asynchronous
-notification requires that the Neutron server has the capability to hold
-the current results from each collector. As the L3 and ML2 agents already
-have use asynchronous notification to report status back to the Neutron
-server, the proof of concept will follow the same model to ease a future
-merging of functionality.
-
-Data Aggregation
-----------------
-
-Will be covered in a follow-on patch set.
-
-Data Consumption
-----------------
-
-Will be covered in a follow-on patch set.
diff --git a/doc/source/devref/l2_agents.rst b/doc/source/devref/l2_agents.rst
deleted file mode 100644 (file)
index daa3b2a..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-L2 Agent Networking
--------------------
-.. toctree::
-   :maxdepth: 3
-
-   openvswitch_agent
-   linuxbridge_agent
-   sriov_nic_agent
diff --git a/doc/source/devref/layer3.rst b/doc/source/devref/layer3.rst
deleted file mode 100644 (file)
index e382c17..0000000
+++ /dev/null
@@ -1,222 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Layer 3 Networking in Neutron - via Layer 3 agent & OpenVSwitch
-===============================================================
-
-This page discusses the usage of Neutron with Layer 3 functionality enabled.
-
-Neutron logical network setup
------------------------------
-::
-
-        vagrant@precise64:~/devstack$ neutron net-list
-        +--------------------------------------+---------+--------------------------------------------------+
-        | id                                   | name    | subnets                                          |
-        +--------------------------------------+---------+--------------------------------------------------+
-        | 84b6b0cc-503d-448a-962f-43def05e85be | public  | 3a56da7c-2f6e-41af-890a-b324d7bc374d             |
-        | a4b4518c-800d-4357-9193-57dbb42ac5ee | private | 1a2d26fb-b733-4ab3-992e-88554a87afa6 10.0.0.0/24 |
-        +--------------------------------------+---------+--------------------------------------------------+
-        vagrant@precise64:~/devstack$ neutron subnet-list
-        +--------------------------------------+------+-------------+--------------------------------------------+
-        | id                                   | name | cidr        | allocation_pools                           |
-        +--------------------------------------+------+-------------+--------------------------------------------+
-        | 1a2d26fb-b733-4ab3-992e-88554a87afa6 |      | 10.0.0.0/24 | {"start": "10.0.0.2", "end": "10.0.0.254"} |
-        +--------------------------------------+------+-------------+--------------------------------------------+
-        vagrant@precise64:~/devstack$ neutron port-list
-        +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
-        | id                                   | name | mac_address       | fixed_ips                                                                       |
-        +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
-        | 0ba8700e-da06-4318-8fe9-00676dd994b8 |      | fa:16:3e:78:43:5b | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.1"} |
-        | b2044570-ad52-4f31-a2c3-5d767dc9a8a7 |      | fa:16:3e:5b:cf:4c | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.3"} |
-        | bb60d1bb-0cab-41cb-9678-30d2b2fdb169 |      | fa:16:3e:af:a9:bd | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.2"} |
-        +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
-
-        vagrant@precise64:~/devstack$ neutron subnet-show 1a2d26fb-b733-4ab3-992e-88554a87afa6
-        +------------------+--------------------------------------------+
-        | Field            | Value                                      |
-        +------------------+--------------------------------------------+
-        | allocation_pools | {"start": "10.0.0.2", "end": "10.0.0.254"} |
-        | cidr             | 10.0.0.0/24                                |
-        | dns_nameservers  |                                            |
-        | enable_dhcp      | True                                       |
-        | gateway_ip       | 10.0.0.1                                   |
-        | host_routes      |                                            |
-        | id               | 1a2d26fb-b733-4ab3-992e-88554a87afa6       |
-        | ip_version       | 4                                          |
-        | name             |                                            |
-        | network_id       | a4b4518c-800d-4357-9193-57dbb42ac5ee       |
-        | tenant_id        | 3368290ab10f417390acbb754160dbb2           |
-        +------------------+--------------------------------------------+
-
-
-Neutron logical router setup
-----------------------------
-
-* http://docs.openstack.org/networking-guide/scenario_legacy_ovs.html
-
-
-::
-
-        vagrant@precise64:~/devstack$ neutron router-list
-        +--------------------------------------+---------+--------------------------------------------------------+
-        | id                                   | name    | external_gateway_info                                  |
-        +--------------------------------------+---------+--------------------------------------------------------+
-        | 569469c7-a2a5-4d32-9cdd-f0b18a13f45e | router1 | {"network_id": "84b6b0cc-503d-448a-962f-43def05e85be"} |
-        +--------------------------------------+---------+--------------------------------------------------------+
-        vagrant@precise64:~/devstack$ neutron router-show router1
-        +-----------------------+--------------------------------------------------------+
-        | Field                 | Value                                                  |
-        +-----------------------+--------------------------------------------------------+
-        | admin_state_up        | True                                                   |
-        | external_gateway_info | {"network_id": "84b6b0cc-503d-448a-962f-43def05e85be"} |
-        | id                    | 569469c7-a2a5-4d32-9cdd-f0b18a13f45e                   |
-        | name                  | router1                                                |
-        | routes                |                                                        |
-        | status                | ACTIVE                                                 |
-        | tenant_id             | 3368290ab10f417390acbb754160dbb2                       |
-        +-----------------------+--------------------------------------------------------+
-        vagrant@precise64:~/devstack$ neutron router-port-list router1
-        +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
-        | id                                   | name | mac_address       | fixed_ips                                                                       |
-        +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
-        | 0ba8700e-da06-4318-8fe9-00676dd994b8 |      | fa:16:3e:78:43:5b | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.1"} |
-        +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
-
-Neutron Routers are realized in OpenVSwitch
--------------------------------------------
-
-.. image:: images/under-the-hood-scenario-1-ovs-network.png
-
-
-"router1" in the Neutron logical network is realized through a port ("qr-0ba8700e-da") in OpenVSwitch - attached to "br-int"::
-
-        vagrant@precise64:~/devstack$ sudo ovs-vsctl show
-        b9b27fc3-5057-47e7-ba64-0b6afe70a398
-            Bridge br-int
-                Port "qr-0ba8700e-da"
-                    tag: 1
-                    Interface "qr-0ba8700e-da"
-                        type: internal
-                Port br-int
-                    Interface br-int
-                        type: internal
-                Port int-br-ex
-                    Interface int-br-ex
-                Port "tapbb60d1bb-0c"
-                    tag: 1
-                    Interface "tapbb60d1bb-0c"
-                        type: internal
-                Port "qvob2044570-ad"
-                    tag: 1
-                    Interface "qvob2044570-ad"
-                Port "int-br-eth1"
-                    Interface "int-br-eth1"
-            Bridge "br-eth1"
-                Port "phy-br-eth1"
-                    Interface "phy-br-eth1"
-                Port "br-eth1"
-                    Interface "br-eth1"
-                        type: internal
-            Bridge br-ex
-                Port phy-br-ex
-                    Interface phy-br-ex
-                Port "qg-0143bce1-08"
-                    Interface "qg-0143bce1-08"
-                        type: internal
-                Port br-ex
-                    Interface br-ex
-                        type: internal
-            ovs_version: "1.4.0+build0"
-
-
-        vagrant@precise64:~/devstack$ brctl show
-        bridge name    bridge id               STP enabled     interfaces
-        br-eth1                0000.e2e7fc5ccb4d       no
-        br-ex          0000.82ee46beaf4d       no              phy-br-ex
-                                                                qg-39efb3f9-f0
-                                                                qg-77e0666b-cd
-        br-int         0000.5e46cb509849       no              int-br-ex
-                                                                qr-54c9cd83-43
-                                                                qvo199abeb2-63
-                                                                qvo1abbbb60-b8
-                                                                tap74b45335-cc
-        qbr199abeb2-63         8000.ba06e5f8675c       no              qvb199abeb2-63
-                                                                tap199abeb2-63
-        qbr1abbbb60-b8         8000.46a87ed4fb66       no              qvb1abbbb60-b8
-                                                                tap1abbbb60-b8
-        virbr0         8000.000000000000       yes
-
-Finding the router in ip/ipconfig
----------------------------------
-
-* http://docs.openstack.org/admin-guide-cloud/networking.html
-
-        The neutron-l3-agent uses the Linux IP stack and iptables to perform L3 forwarding and NAT.
-        In order to support multiple routers with potentially overlapping IP addresses, neutron-l3-agent
-        defaults to using Linux network namespaces to provide isolated forwarding contexts. As a result,
-        the IP addresses of routers will not be visible simply by running "ip addr list" or "ifconfig" on
-        the node. Similarly, you will not be able to directly ping fixed IPs.
-
-        To do either of these things, you must run the command within a particular router's network
-        namespace. The namespace will have the name "qrouter-<UUID of the router>.
-
-.. image:: images/under-the-hood-scenario-1-ovs-netns.png
-
-For example::
-
-        vagrant@precise64:~$ neutron router-list
-        +--------------------------------------+---------+--------------------------------------------------------+
-        | id                                   | name    | external_gateway_info                                  |
-        +--------------------------------------+---------+--------------------------------------------------------+
-        | ad948c6e-afb6-422a-9a7b-0fc44cbb3910 | router1 | {"network_id": "e6634fef-03fa-482a-9fa7-e0304ce5c995"} |
-        +--------------------------------------+---------+--------------------------------------------------------+
-        vagrant@precise64:~/devstack$ sudo ip netns exec qrouter-ad948c6e-afb6-422a-9a7b-0fc44cbb3910 ip addr list
-        18: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
-            link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
-            inet 127.0.0.1/8 scope host lo
-            inet6 ::1/128 scope host
-               valid_lft forever preferred_lft forever
-        19: qr-54c9cd83-43: <BROADCAST,MULTICAST,PROMISC,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN
-            link/ether fa:16:3e:dd:c1:8f brd ff:ff:ff:ff:ff:ff
-            inet 10.0.0.1/24 brd 10.0.0.255 scope global qr-54c9cd83-43
-            inet6 fe80::f816:3eff:fedd:c18f/64 scope link
-               valid_lft forever preferred_lft forever
-        20: qg-77e0666b-cd: <BROADCAST,MULTICAST,PROMISC,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN
-            link/ether fa:16:3e:1f:d3:ec brd ff:ff:ff:ff:ff:ff
-            inet 192.168.27.130/28 brd 192.168.27.143 scope global qg-77e0666b-cd
-            inet6 fe80::f816:3eff:fe1f:d3ec/64 scope link
-               valid_lft forever preferred_lft forever
-
-
-Provider Networking
--------------------
-
-Neutron can also be configured to create `provider networks <http://docs.openstack.org/admin-guide-cloud/networking_adv-features.html#provider-networks>`_
-
-Further Reading
----------------
-* `Packet Pushers - Neutron Network Implementation on Linux <http://packetpushers.net/openstack-quantum-network-implementation-in-linux/>`_
-* `OpenStack Cloud Administrator Guide <http://docs.openstack.org/admin-guide-cloud/networking.html>`_
-* `Neutron - Layer 3 API extension usage guide <http://docs.openstack.org/api/openstack-network/2.0/content/router_ext.html>`_
-*  `Darragh O'Reilly -  The Quantum L3 router and floating IPs <http://techbackground.blogspot.com/2013/05/the-quantum-l3-router-and-floating-ips.html>`_
diff --git a/doc/source/devref/linuxbridge_agent.rst b/doc/source/devref/linuxbridge_agent.rst
deleted file mode 100644 (file)
index e6a1371..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-L2 Networking with Linux Bridge
-===============================
-
-This Agent uses the `Linux Bridge
-<http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge>`_ to
-provide L2 connectivity for VM instances running on the compute node to the
-public network.  A graphical illustration of the deployment can be found in
-`Networking Guide
-<http://docs.openstack.org/networking-guide/scenario_legacy_lb.html>`_
-
-In most common deployments, there is a compute and a network node. On both the
-compute and the network node, the Linux Bridge Agent will manage virtual
-switches, connectivity among them, and interaction via virtual ports with other
-network components such as namespaces and underlying interfaces. Additionally,
-on the compute node, the Linux Bridge Agent will manage security groups.
-
-Three use cases and their packet flow are documented as follows:
-
-1. `Legacy implementation with Linux Bridge
-   <http://docs.openstack.org/networking-guide/deploy_scenario1b.html>`_
-
-2. `High Availability using L3HA with Linux Bridge
-   <http://docs.openstack.org/networking-guide/deploy_scenario3b.html>`_
-
-3. `Provider networks with Linux Bridge
-   <http://docs.openstack.org/networking-guide/deploy_scenario4b.html>`_
diff --git a/doc/source/devref/ml2_ext_manager.rst b/doc/source/devref/ml2_ext_manager.rst
deleted file mode 100644 (file)
index 9f82784..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-ML2 Extension Manager
-=====================
-
-The extension manager for ML2 was introduced in Juno (more details
-can be found in the approved `spec <http://specs.openstack.org/openstack/neutron-specs/specs/juno/neutron-ml2-mechanismdriver-extensions.html>`_). The features allows for extending ML2 resources without
-actually having to introduce cross cutting concerns to ML2. The
-mechanism has been applied for a number of use cases, and extensions
-that currently use this frameworks are available under `ml2/extensions <https://github.com/openstack/neutron/tree/master/neutron/plugins/ml2/extensions>`_.
diff --git a/doc/source/devref/neutron_api.rst b/doc/source/devref/neutron_api.rst
deleted file mode 100644 (file)
index 28c779e..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Neutron public API
-==================
-
-Neutron main tree serves as a library for multiple subprojects that rely on
-different modules from neutron.* namespace to accommodate their needs.
-Specifically, advanced service repositories and open source or vendor
-plugin/driver repositories do it.
-
-Neutron modules differ in their API stability a lot, and there is no part of it
-that is explicitly marked to be consumed by other projects.
-
-That said, there are modules that other projects should definitely avoid relying on.
-
-Specifically, no external repository should use anything located under
-neutron.openstack.common.* import path. This code belongs to oslo-incubator
-modules and is not meant to work for consumers other than neutron main tree
-itself. (The only exception is made for advanced service repositories that are
-tightly controlled by neutron community.) Long story short, if your repository
-uses those modules, please switch to corresponding oslo libraries or use your
-own copy of oslo-incubator files.
-
-
-Breakages
----------
-
-Neutron API is not very stable, and there are cases when a desired change in
-neutron tree is expected to trigger breakage for one or more external
-repositories under the neutron tent. Below you can find a list of known
-incompatible changes that could or are known to trigger those breakages.
-The changes are listed in reverse chronological order (newer at the top).
-
-* change: Consume ConfigurableMiddleware from oslo_middleware.
-
-  - commit: If7360608f94625b7d0972267b763f3e7d7624fee
-  - solution: switch to oslo_middleware.base.ConfigurableMiddleware;
-              stop using neutron.wsgi.Middleware and neutron.wsgi.Debug.
-  - severity: Low (some out-of-tree plugins might be affected).
-
-* change: Consume sslutils and wsgi modules from oslo.service.
-
-  - commit: Ibfdf07e665fcfcd093a0e31274e1a6116706aec2
-  - solution: switch using oslo_service.wsgi.Router; stop using neutron.wsgi.Router.
-  - severity: Low (some out-of-tree plugins might be affected).
-
-* change: oslo.service adopted.
-
-  - commit: 6e693fc91dd79cfbf181e3b015a1816d985ad02c
-  - solution: switch using oslo_service.* namespace; stop using ANY neutron.openstack.* contents.
-  - severity: low (plugins must not rely on that subtree).
-
-* change: oslo.utils.fileutils adopted.
-
-  - commit: I933d02aa48260069149d16caed02b020296b943a
-  - solution: switch using oslo_utils.fileutils module; stop using neutron.openstack.fileutils module.
-  - severity: low (plugins must not rely on that subtree).
-
-* change: Reuse caller's session in DB methods.
-
-  - commit: 47dd65cf986d712e9c6ca5dcf4420dfc44900b66
-  - solution: Add context to args and reuse.
-  - severity: High (mostly undetected, because 3rd party CI run Tempest tests only).
-
-* change: switches to oslo.log, removes neutron.openstack.common.log.
-
-  - commit: 22328baf1f60719fcaa5b0fbd91c0a3158d09c31
-  - solution: a) switch to oslo.log; b) copy log module into your tree and use it
-    (may not work due to conflicts between the module and oslo.log configuration options).
-  - severity: High (most CI systems are affected).
-
-* change: Implements reorganize-unit-test-tree spec.
-
-  - commit: 1105782e3914f601b8f4be64939816b1afe8fb54
-  - solution: Code affected need to update existing unit tests to reflect new locations.
-  - severity: High (mostly undetected, because 3rd party CI run Tempest tests only).
-
-* change: drop linux/ovs_lib compat layer.
-
-  - commit: 3bbf473b49457c4afbfc23fd9f59be8aa08a257d
-  - solution: switch to using neutron/agent/common/ovs_lib.py.
-  - severity: High (most CI systems are affected).
diff --git a/doc/source/devref/openvswitch_agent.rst b/doc/source/devref/openvswitch_agent.rst
deleted file mode 100644 (file)
index 62104cd..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-OpenVSwitch L2 Agent
-====================
-
-This Agent uses the `OpenVSwitch`_ virtual switch to create L2
-connectivity for instances, along with bridges created in conjunction
-with OpenStack Nova for filtering.
-
-ovs-neutron-agent can be configured to use different networking technologies
-to create tenant isolation.
-These technologies are implemented as ML2 type drivers which are used in
-conjunction with the OpenVSwitch mechanism driver.
-
-VLAN Tags
----------
-
-.. image:: images/under-the-hood-scenario-1-ovs-compute.png
-
-.. _OpenVSwitch: http://openvswitch.org
-
-
-GRE Tunnels
------------
-
-GRE Tunneling is documented in depth in the `Networking in too much
-detail <http://openstack.redhat.com/Networking_in_too_much_detail>`_
-by RedHat.
-
-VXLAN Tunnels
--------------
-
-VXLAN is an overlay technology which encapsulates MAC frames
-at layer 2 into a UDP header.
-More information can be found in `The VXLAN wiki page.
-<http://en.wikipedia.org/wiki/Virtual_Extensible_LAN>`_
-
-Geneve Tunnels
---------------
-
-Geneve uses UDP as its transport protocol and is dynamic
-in size using extensible option headers.
-It is important to note that currently it is only supported in
-newer kernels. (kernel >= 3.18, OVS version >=2.4)
-More information can be found in the `Geneve RFC document.
-<https://tools.ietf.org/html/draft-ietf-nvo3-geneve-00>`_
-
-
-Bridge Management
------------------
-
-In order to make the agent capable of handling more than one tunneling
-technology, to decouple the requirements of segmentation technology
-from tenant isolation, and to preserve backward compatibility for OVS
-agents working without tunneling, the agent relies on a tunneling bridge,
-or br-tun, and the well known integration bridge, or br-int.
-
-All VM VIFs are plugged into the integration bridge. VM VIFs on a given
-virtual network share a common "local" VLAN (i.e. not propagated
-externally). The VLAN id of this local VLAN is mapped to the physical
-networking details realizing that virtual network.
-
-For virtual networks realized as VXLAN/GRE tunnels, a Logical Switch
-(LS) identifier is used to differentiate tenant traffic on inter-HV
-tunnels. A mesh of tunnels is created to other Hypervisors in the
-cloud. These tunnels originate and terminate on the tunneling bridge
-of each hypervisor, leaving br-int unaffected. Port patching is done
-to connect local VLANs on the integration bridge to inter-hypervisor
-tunnels on the tunnel bridge.
-
-For each virtual network realized as a VLAN or flat network, a veth
-or a pair of patch ports is used to connect the local VLAN on
-the integration bridge with the physical network bridge, with flow
-rules adding, modifying, or stripping VLAN tags as necessary, thus
-preserving backward compatibility with the way the OVS agent used
-to work prior to the tunneling capability (for more details, please
-look at https://review.openstack.org/#/c/4367).
-
-Bear in mind, that this design decision may be overhauled in the
-future to support existing VLAN-tagged traffic (coming from NFV VMs
-for instance) and/or to deal with potential QinQ support natively
-available in the Open vSwitch.
-
-
-Further Reading
----------------
-
-* `Darragh O'Reilly - The Open vSwitch plugin with VLANs <http://techbackground.blogspot.com/2013/07/the-open-vswitch-plugin-with-vlans.html>`_
diff --git a/doc/source/devref/oslo-incubator.rst b/doc/source/devref/oslo-incubator.rst
deleted file mode 100644 (file)
index ddc2a5d..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-oslo-incubator
-==============
-
-A number of modules used are from the oslo-incubator project. Imported modules
-that are directly used by Neutron are listed in openstack-common.conf.
-
-More information can be found in `the corresponding policy
-<http://specs.openstack.org/openstack/oslo-specs/specs/policy/incubator.html>`_.
diff --git a/doc/source/devref/ovs_vhostuser.rst b/doc/source/devref/ovs_vhostuser.rst
deleted file mode 100644 (file)
index 769ad77..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Neutron Open vSwitch vhost-user support
-=======================================
-
-Neutron supports using Open vSwitch + DPDK vhost-user interfaces directly in
-the OVS ML2 driver and agent. The current implementation relies on a multiple
-configuration values and includes runtime verification of Open vSwitch's
-capability to provide these interfaces.
-
-The OVS agent detects the capability of the underlying Open vSwitch
-installation and passes that information over RPC via the agent
-'configurations' dictionary. The ML2 driver uses this information to select
-the proper VIF type and binding details.
-
-Neutron+OVS+DPDK platform requirements
---------------------------------------
-OVS 2.4.0+
-DPDK 2.0+
-
-Neutron OVS+DPDK vhost-user config
-----------------------------------
-
-[OVS]
-datapath_type=netdev
-vhostuser_socket_dir=/var/run/openvswitch
-
-When OVS is running with DPDK support enabled, and the datapath_type is set to
-"netdev", then the OVS ML2 driver will use the vhost-user VIF type and pass
-the necessary binding details to use OVS+DPDK and vhost-user sockets. This
-includes the vhostuser_socket_dir setting, which must match the directory
-passed to ovs-vswitchd on startup.
-
-What about the networking-ovs-dpdk repo?
-----------------------------------------
-
-The networking-ovs-dpdk repo will continue to exist and undergo active
-development. This feature just removes the necessity for a separate ML2 driver
-and OVS agent in the networking-ovs-dpdk repo. The networking-ovs-dpdk project
-also provides a devstack plugin which also allows automated CI, a puppet
-module, and an OpenFlow-based security group implementation.
diff --git a/doc/source/devref/plugin-api.rst b/doc/source/devref/plugin-api.rst
deleted file mode 100644 (file)
index 7bb68d0..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Neutron Plugin Architecture
-===========================
-
-`Salvatore Orlando: How to write a Neutron Plugin (if you really need to) <http://www.slideshare.net/salv_orlando/how-to-write-a-neutron-plugin-if-you-really-need-to>`_
-
-Plugin API
-----------
-
-.. automodule:: neutron.neutron_plugin_base_v2
-
-.. autoclass:: NeutronPluginBaseV2
-    :members:
diff --git a/doc/source/devref/policy.rst b/doc/source/devref/policy.rst
deleted file mode 100644 (file)
index 83a1c89..0000000
+++ /dev/null
@@ -1,314 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Authorization Policy Enforcement
-==================================
-
-As most OpenStack projects, Neutron leverages oslo_policy [#]_. However, since
-Neutron loves to be special and complicate every developer's life, it also
-"augments" oslo_policy capabilities by:
-
- * A wrapper module with its own API: neutron.policy;
- * The ability of adding fine-grained checks on attributes for resources in
-   request bodies;
- * The ability of using the policy engine to filter out attributes in responses;
- * Adding some custom rule checks beyond those defined in oslo_policy;
-
-This document discusses Neutron-specific aspects of policy enforcement, and in
-particular how the enforcement logic is wired into API processing.
-For any other information please refer to the developer documentation for
-oslo_policy [#]_.
-
-Authorization workflow
------------------------
-
-The Neutron API controllers perform policy checks in two phases during the
-processing of an API request:
-
- * Request authorization, immediately before dispatching the request to the
-   plugin layer for ``POST``, ``PUT``, and ``DELETE``, and immediately after
-   returning from the plugin layer for ``GET`` requests;
- * Response filtering, when building the response to be returned to the API
-   consumer.
-
-Request authorization
-~~~~~~~~~~~~~~~~~~~~~~
-
-The aim of this step is to authorize processing for a request or reject it
-with an error status code.
-This step uses the ``neutron.policy.enforce`` routine. This routine raises
-``oslo_policy.PolicyNotAuthorized`` when policy enforcement fails. The Neutron
-REST API controllers catch this exception and return:
-
- * A 403 response code on a ``POST`` request or an ``PUT`` request for an
-   object owned by the tenant submitting the request;
- * A 403 response for failures while authorizing API actions such as
-   ``add_router_interface``;
- * A 404 response for ``DELETE``, ``GET`` and all other ``PUT`` requests.
-
-For ``DELETE`` operations the resource must first be fetched. This is done
-invoking the same ``_item`` [#]_ method used for processing ``GET`` requests.
-This is also true for ``PUT`` operations, since the Neutron API implements
-``PATCH`` semantics for ``PUTs``.
-The criteria to evaluate are built in the ``_build_match_rule`` [#]_ routine.
-This routine takes in input the following parameters:
-
- * The action to be performed, in the ``<operation>_<resource>`` form,
-   ``e.g.: create_network``
- * The data to use for performing checks. For ``POST`` operations this could
-   be a partial specification of the object, whereas it is always a full
-   specification for ``GET``, ``PUT``, and ``DELETE`` requests, as resource
-   data are retrieved before dispatching the call to the plugin layer.
- * The collection name for the resource specified in the previous parameter;
-   for instance, for a network it would be the "networks".
-
-The ``_build_match_rule`` routine returns a ``oslo_policy.RuleCheck`` instance
-built in the following way:
-
- * Always add a check for the action being performed. This will match
-   a policy like create_network in ``policy.json``;
- * Return for ``GET`` operations; more detailed checks will be performed anyway
-   when building the response;
- * For each attribute which has been explicitly specified in the request
-   create a rule matching policy names in the form
-   ``<operation>_<resource>:<attribute>`` rule, and link it with the
-   previous rule with an 'And' relationship (using ``oslo_policy.AndCheck``);
-   this step will be performed only if the enforce_policy flag is set to
-   True in the resource attribute descriptor (usually found in a data
-   structure called ``RESOURCE_ATTRIBUTE_MAP``);
- * If the attribute is a composite one then further rules will be created;
-   These will match policy names in the form ``<operation>_<resource>:
-   <attribute>:<sub_attribute>``. An 'And' relationship will be used in this
-   case too.
-
-As all the rules to verify are linked by 'And' relationships, all the policy
-checks should succeed in order for a request to be authorized. Rule
-verification is performed by ``oslo_policy`` with no "customization" from the
-Neutron side.
-
-Response Filtering
-~~~~~~~~~~~~~~~~~~~
-
-Some Neutron extensions, like the provider networks one, add some attribute
-to resources which are however not meant to be consumed by all clients. This
-might be because these attributes contain implementation details, or are
-meant only to be used when exchanging information between services, such
-as Nova and Neutron;
-
-For this reason the policy engine is invoked again when building API
-responses. This is achieved by the ``_exclude_attributes_by_policy`` [#]_
-method in ``neutron.api.v2.base.Controller``;
-
-This method, for each attribute in the response returned by the plugin layer,
-first checks if the ``is_visible`` flag is True. In that case it proceeds to
-checking policies for the attribute; if the policy check fails the attribute
-is added to a list of attributes that should be removed from the response
-before returning it to the API client.
-
-The neutron.policy API
-------------------------
-
-The ``neutron.policy`` module exposes a simple API whose main goal if to allow the
-REST API controllers to implement the authorization workflow discussed in this
-document. It is a bad practice to call the policy engine from within the plugin
-layer, as this would make request authorization dependent on configured
-plugins, and therefore make API behaviour dependent on the plugin itself, which
-defies Neutron tenet of being backend agnostic.
-
-The neutron.policy API exposes the following routines:
-
- * ``init``
-   Initializes the policy engine loading rules from the json policy (files).
-   This method can safely be called several times.
- * ``reset``
-   Clears all the rules currently configured in the policy engine. It is
-   called in unit tests and at the end of the initialization of core API
-   router [#]_ in order to ensure rules are loaded after all the extensions
-   are loaded.
- * ``refresh``
-   Combines init and reset. Called when a SIGHUP signal is sent to an API
-   worker.
- * ``set_rules``
-   Explicitly set policy engine's rules. Used only in unit tests.
- * ``check``
-   Perform a check using the policy engine. Builds match rules as described
-   in this document, and then evaluates the resulting rule using oslo_policy's
-   policy engine. Returns True if the checks succeeds, false otherwise.
- * ``enforce``
-   Operates like the check routine but raises if the check in oslo_policy
-   fails.
- * ``check_is_admin``
-   Enforce the predefined context_is_admin rule; used to determine the is_admin
-   property for a neutron context.
- * ``check_is_advsvc``
-   Enforce the predefined context_is_advsvc rule; used to determine the
-   is_advsvc property for a neutron context.
-
-Neutron specific policy rules
-------------------------------
-
-Neutron provides two additional policy rule classes in order to support the
-"augmented" authorization capabilities it provides. They both extend
-``oslo_policy.RuleCheck`` and are registered using the
-``oslo_policy.register`` decorator.
-
-OwnerCheck: Extended Checks for Resource Ownership
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This class is registered for rules matching the ``tenant_id`` keyword and
-overrides the generic check performed by oslo_policy in this case.
-It uses for those cases where neutron needs to check whether the tenant
-submitting a request for a new resource owns the parent resource of the one
-being created. Current usages of ``OwnerCheck`` include, for instance,
-creating and updating a subnet.
-
-The check, performed in the ``__call__`` method, works as follows:
-
-  * verify if the target field is already in the target data. If yes, then
-    simply verify whether the value for the target field in target data
-    is equal to value for the same field in credentials, just like
-    ``oslo_policy.GeneriCheck`` would do. This is also the most frequent case
-    as the target field is usually ``tenant_id``;
-  * if the previous check failed, extract a parent resource type and a
-    parent field name from the target field. For instance
-    ``networks:tenant_id`` identifies the ``tenant_id`` attribute of the
-    ``network`` resource;
-  * if no parent resource or target field could be identified raise a
-    ``PolicyCheckError`` exception;
-  * Retrieve a 'parent foreign key' from the ``RESOURCE_FOREIGN_KEYS`` data
-    structure in ``neutron.api.v2.attributes``. This foreign key is simply the
-    attribute acting as a primary key in the parent resource. A
-    ``PolicyCheckError`` exception will be raised if such 'parent foreign key'
-    cannot be retrieved;
-  * Using the core plugin, retrieve an instance of the resource having
-    'parent foreign key' as an identifier;
-  * Finally, verify whether the target field in this resource matches the
-    one in the initial request data. For instance, for a port create request,
-    verify whether the ``tenant_id`` of the port data structure matches the
-    ``tenant_id`` of the network where this port is being created.
-
-
-FieldCheck: Verify Resource Attributes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This class is registered with the policy engine for rules matching the 'field'
-keyword, and provides a way to perform fine grained checks on resource
-attributes. For instance, using this class of rules it is possible to specify
-a rule for granting every tenant read access to shared resources.
-
-In policy.json, a FieldCheck rules is specified in the following way::
-
-> field: <resource>:<field>=<value>
-
-This will result in the initialization of a FieldCheck that will check for
-``<field>`` in the target resource data, and return ``True`` if it is equal
-to ``<value>`` or return ``False`` is the ``<field>`` either is not equal to
-``<value>`` or does not exist at all.
-
-
-Guidance for API developers
-----------------------------
-
-When developing REST APIs for Neutron it is important to be aware of how the
-policy engine will authorize these requests. This is true both for APIs
-served by Neutron "core" and for the APIs served by the various Neutron
-"stadium" services.
-
- * If an attribute of a resource might be subject to authorization checks
-   then the ``enforce_policy`` attribute should be set to ``True``. While
-   setting this flag to ``True`` for each attribute is a viable strategy,
-   it is worth noting that this will require a call to the policy engine
-   for each attribute, thus consistently increasing the time required to
-   complete policy checks for a resource. This could result in a scalability
-   issue, especially in the case of list operations retrieving a large
-   number of resources;
- * Some resource attributes, even if not directly used in policy checks
-   might still be required by the policy engine. This is for instance the
-   case of the ``tenant_id`` attribute. For these attributes the
-   ``required_by_policy`` attribute should always set to ``True``. This will
-   ensure that the attribute is included in the resource data sent to the
-   policy engine for evaluation;
- * The ``tenant_id`` attribute is a fundamental one in Neutron API request
-   authorization. The default policy, ``admin_or_owner``, uses it to validate
-   if a tenant owns the resource it is trying to operate on. To this aim,
-   if a resource without a tenant_id is created, it is important to ensure
-   that ad-hoc authZ policies are specified for this resource.
- * There is still only one check which is hardcoded in Neutron's API layer:
-   the check to verify that a tenant owns the network on which it is creating
-   a port. This check is hardcoded and is always executed when creating a
-   port, unless the network is shared. Unfortunatelu a solution for performing
-   this check in an efficient way through the policy engine has not yet been
-   found. Due to its nature, there is no way to override this check using the
-   policy engine.
- * It is strongly advised to not perform policy checks in the plugin or in
-   the database management classes. This might lead to divergent API
-   behaviours across plugins. Also, it might leave the Neutron DB in an
-   inconsistent state if a request is not authorized after it has already
-   been dispatched to the backend.
-
-
-Notes
------------------------
-
- * No authorization checks are performed for requests coming from the RPC over
-   AMQP channel. For all these requests a neutron admin context is built, and
-   the plugins will process them as such.
- * For ``PUT`` and ``DELETE`` requests a 404 error is returned on request
-   authorization failures rather than a 403, unless the tenant submitting the
-   request own the resource to update or delete. This is to avoid conditions
-   in which an API client might try and find out other tenants' resource
-   identifiers by sending out ``PUT`` and ``DELETE`` requests for random
-   resource identifiers.
- * There is no way at the moment to specify an ``OR`` relationship between two
-   attributes of a given resource (eg.: ``port.name == 'meh' or
-   port.status == 'DOWN'``), unless the rule with the or condition is explicitly
-   added to the policy.json file.
- * ``OwnerCheck`` performs a plugin access; this will likely require a database
-   access, but since the behaviour is implementation specific it might also
-   imply a round-trip to the backend. This class of checks, when involving
-   retrieving attributes for 'parent' resources should be used very sparingly.
- * In order for ``OwnerCheck`` rules to work, parent resources should have an
-   entry in ``neutron.api.v2.attributes.RESOURCE_FOREIGN_KEYS``; moreover the
-   resource must be managed by the 'core' plugin (ie: the one defined in the
-   core_plugin configuration variable)
-
-References
-----------
-
-.. [#] `Oslo policy module <http://git.openstack.org/cgit/openstack/oslo.policy/>`_
-.. [#] `Oslo policy developer <documentation: http://docs.openstack.org/developer/oslo.policy/>`_
-.. [#] API controller item_ method
-
-.. _item: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/base.py?id=2015.1.1#n282
-
-.. [#] Policy engine's build_match_rule_ method
-
-.. _build_match_rule: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/policy.py?id=2015.1.1#n187
-
-.. [#] exclude_attributes_by_policy_ method
-
-.. _exclude_attributes_by_policy: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/base.py?id=2015.1.1#n132
-
-.. [#] Policy reset_ in neutron.api.v2.router
-
-.. _reset: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/router.py?id=2015.1.1#n122
diff --git a/doc/source/devref/quality_of_service.rst b/doc/source/devref/quality_of_service.rst
deleted file mode 100644 (file)
index 8e3e6d8..0000000
+++ /dev/null
@@ -1,387 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Quality of Service
-==================
-
-Quality of Service advanced service is designed as a service plugin. The
-service is decoupled from the rest of Neutron code on multiple levels (see
-below).
-
-QoS extends core resources (ports, networks) without using mixins inherited
-from plugins but through an ml2 extension driver.
-
-Details about the DB models, API extension, and use cases can be found here: `qos spec <http://specs.openstack.org/openstack/neutron-specs/specs/liberty/qos-api-extension.html>`_
-.
-
-Service side design
--------------------
-
-* neutron.extensions.qos:
-  base extension + API controller definition. Note that rules are subattributes
-  of policies and hence embedded into their URIs.
-
-* neutron.services.qos.qos_plugin:
-  QoSPlugin, service plugin that implements 'qos' extension, receiving and
-  handling API calls to create/modify policies and rules.
-
-* neutron.services.qos.notification_drivers.manager:
-  the manager that passes object notifications down to every enabled
-  notification driver.
-
-* neutron.services.qos.notification_drivers.qos_base:
-  the interface class for pluggable notification drivers that are used to
-  update backends about new {create, update, delete} events on any rule or
-  policy change.
-
-* neutron.services.qos.notification_drivers.message_queue:
-  MQ-based reference notification driver which updates agents via messaging
-  bus, using `RPC callbacks <rpc_callbacks.html>`_.
-
-* neutron.core_extensions.base:
-  Contains an interface class to implement core resource (port/network)
-  extensions. Core resource extensions are then easily integrated into
-  interested plugins. We may need to  have a core resource extension manager
-  that would utilize those extensions, to avoid plugin modifications for every
-  new core resource extension.
-
-* neutron.core_extensions.qos:
-  Contains QoS core resource extension that conforms to the interface described
-  above.
-
-* neutron.plugins.ml2.extensions.qos:
-  Contains ml2 extension driver that handles core resource updates by reusing
-  the core_extensions.qos module mentioned above. In the future, we would like
-  to see a plugin-agnostic core resource extension manager that could be
-  integrated into other plugins with ease.
-
-
-Supported QoS rule types
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-Any plugin or Ml2 mechanism driver can claim support for some QoS rule types by
-providing a plugin/driver class property called 'supported_qos_rule_types' that
-should return a list of strings that correspond to QoS rule types (for the list
-of all rule types, see: neutron.services.qos.qos_consts.VALID_RULE_TYPES).
-
-In the most simple case, the property can be represented by a simple Python
-list defined on the class.
-
-For Ml2 plugin, the list of supported QoS rule types is defined as a common
-subset of rules supported by all active mechanism drivers.
-
-Note: the list of supported rule types reported by core plugin is not enforced
-when accessing QoS rule resources. This is mostly because then we would not be
-able to create any rules while at least one ml2 driver in gate lacks support
-for QoS (at the moment of writing, linuxbridge is such a driver).
-
-
-Database models
-~~~~~~~~~~~~~~~
-
-QoS design defines the following two conceptual resources to apply QoS rules
-for a port or a network:
-
-* QoS policy
-* QoS rule (type specific)
-
-Each QoS policy contains zero or more QoS rules. A policy is then applied to a
-network or a port, making all rules of the policy applied to the corresponding
-Neutron resource.
-
-When applied through a network association, policy rules could apply or not
-to neutron internal ports (like router, dhcp, load balancer, etc..). The QosRule
-base object provides a default should_apply_to_port method which could be
-overridden. In the future we may want to have a flag in QoSNetworkPolicyBinding
-or QosRule to enforce such type of application (for example when limiting all
-the ingress of routers devices on an external network automatically).
-
-From database point of view, following objects are defined in schema:
-
-* QosPolicy: directly maps to the conceptual policy resource.
-* QosNetworkPolicyBinding, QosPortPolicyBinding: defines attachment between a
-  Neutron resource and a QoS policy.
-* QosBandwidthLimitRule: defines the only rule type available at the moment.
-
-
-All database models are defined under:
-
-* neutron.db.qos.models
-
-
-QoS versioned objects
-~~~~~~~~~~~~~~~~~~~~~
-
-There is a long history of passing database dictionaries directly into business
-logic of Neutron. This path is not the one we wanted to take for QoS effort, so
-we've also introduced a new objects middleware to encapsulate the database logic
-from the rest of the Neutron code that works with QoS resources. For this, we've
-adopted oslo.versionedobjects library and introduced a new NeutronObject class
-that is a base for all other objects that will belong to the middle layer.
-There is an expectation that Neutron will evolve into using objects for all
-resources it handles, though that part was obviously out of scope for the QoS
-effort.
-
-Every NeutronObject supports the following operations:
-
-* get_by_id: returns specific object that is represented by the id passed as an
-  argument.
-* get_objects: returns all objects of the type, potentially with a filter
-  applied.
-* create/update/delete: usual persistence operations.
-
-Base object class is defined in:
-
-* neutron.objects.base
-
-For QoS, new neutron objects were implemented:
-
-* QosPolicy: directly maps to the conceptual policy resource, as defined above.
-* QosBandwidthLimitRule: class that represents the only rule type supported by
-  initial QoS design.
-
-Those are defined in:
-
-* neutron.objects.qos.policy
-* neutron.objects.qos.rule
-
-For QosPolicy neutron object, the following public methods were implemented:
-
-* get_network_policy/get_port_policy: returns a policy object that is attached
-  to the corresponding Neutron resource.
-* attach_network/attach_port: attach a policy to the corresponding Neutron
-  resource.
-* detach_network/detach_port: detach a policy from the corresponding Neutron
-  resource.
-
-In addition to the fields that belong to QoS policy database object itself,
-synthetic fields were added to the object that represent lists of rules that
-belong to the policy. To get a list of all rules for a specific policy, a
-consumer of the object can just access the corresponding attribute via:
-
-* policy.rules
-
-Implementation is done in a way that will allow adding a new rule list field
-with little or no modifications in the policy object itself. This is achieved
-by smart introspection of existing available rule object definitions and
-automatic definition of those fields on the policy class.
-
-Note that rules are loaded in a non lazy way, meaning they are all fetched from
-the database on policy fetch.
-
-For Qos<type>Rule objects, an extendable approach was taken to allow easy
-addition of objects for new rule types. To accommodate this, fields common to
-all types are put into a base class called QosRule that is then inherited into
-type-specific rule implementations that, ideally, only define additional fields
-and some other minor things.
-
-Note that the QosRule base class is not registered with oslo.versionedobjects
-registry, because it's not expected that 'generic' rules should be
-instantiated (and to suggest just that, the base rule class is marked as ABC).
-
-QoS objects rely on some primitive database API functions that are added in:
-
-* neutron.db.api: those can be reused to fetch other models that do not have
-  corresponding versioned objects yet, if needed.
-* neutron.db.qos.api: contains database functions that are specific to QoS
-  models.
-
-
-RPC communication
-~~~~~~~~~~~~~~~~~
-
-Details on RPC communication implemented in reference backend driver are
-discussed in `a separate page <rpc_callbacks.html>`_.
-
-One thing that should be mentioned here explicitly is that RPC callback
-endpoints communicate using real versioned objects (as defined by serialization
-for oslo.versionedobjects library), not vague json dictionaries. Meaning,
-oslo.versionedobjects are on the wire and not just used internally inside a
-component.
-
-One more thing to note is that though RPC interface relies on versioned
-objects, it does not yet rely on versioning features the oslo.versionedobjects
-library provides. This is because Liberty is the first release where we start
-using the RPC interface, so we have no way to get different versions in a
-cluster. That said, the versioning strategy for QoS is thought through and
-described in `the separate page <rpc_callbacks.html>`_.
-
-There is expectation that after RPC callbacks are introduced in Neutron, we
-will be able to migrate propagation from server to agents for other resources
-(f.e. security groups) to the new mechanism. This will need to wait until those
-resources get proper NeutronObject implementations.
-
-The flow of updates is as follows:
-
-* if a port that is bound to the agent is attached to a QoS policy, then ML2
-  plugin detects the change by relying on ML2 QoS extension driver, and
-  notifies the agent about a port change. The agent proceeds with the
-  notification by calling to get_device_details() and getting the new port dict
-  that contains a new qos_policy_id. Each device details dict is passed into l2
-  agent extension manager that passes it down into every enabled extension,
-  including QoS. QoS extension sees that there is a new unknown QoS policy for
-  a port, so it uses ResourcesPullRpcApi to fetch the current state of the
-  policy (with all the rules included) from the server. After that, the QoS
-  extension applies the rules by calling into QoS driver that corresponds to
-  the agent.
-* on existing QoS policy update (it includes any policy or its rules change),
-  server pushes the new policy object state through ResourcesPushRpcApi
-  interface. The interface fans out the serialized (dehydrated) object to any
-  agent that is listening for QoS policy updates. If an agent have seen the
-  policy before (it is attached to one of the ports it maintains), then it goes
-  with applying the updates to the port. Otherwise, the agent silently ignores
-  the update.
-
-
-Agent side design
------------------
-
-To ease code reusability between agents and to avoid the need to patch an agent
-for each new core resource extension, pluggable L2 agent extensions were
-introduced. They can be especially interesting to third parties that don't want
-to maintain their code in Neutron tree.
-
-Extensions are meant to receive handle_port events, and do whatever they need
-with them.
-
-* neutron.agent.l2.agent_extension:
-  This module defines an abstract extension interface.
-
-* neutron.agent.l2.extensions.manager:
-  This module contains a manager that allows to register multiple extensions,
-  and passes handle_port events down to all enabled extensions.
-
-* neutron.agent.l2.extensions.qos
-  defines QoS L2 agent extension. It receives handle_port and delete_port
-  events and passes them down into QoS agent backend driver (see below). The
-  file also defines the QosAgentDriver interface. Note: each backend implements
-  its own driver. The driver handles low level interaction with the underlying
-  networking technology, while the QoS extension handles operations that are
-  common to all agents.
-
-
-Agent backends
-~~~~~~~~~~~~~~
-
-At the moment, QoS is supported by Open vSwitch and SR-IOV ml2 drivers.
-
-Each agent backend defines a QoS driver that implements the QosAgentDriver
-interface:
-
-* Open vSwitch (QosOVSAgentDriver);
-* SR-IOV (QosSRIOVAgentDriver).
-
-
-Open vSwitch
-++++++++++++
-
-Open vSwitch implementation relies on the new ovs_lib OVSBridge functions:
-
-* get_egress_bw_limit_for_port
-* create_egress_bw_limit_for_port
-* delete_egress_bw_limit_for_port
-
-An egress bandwidth limit is effectively configured on the port by setting
-the port Interface parameters ingress_policing_rate and
-ingress_policing_burst.
-
-That approach is less flexible than linux-htb, Queues and OvS QoS profiles,
-which we may explore in the future, but which will need to be used in
-combination with openflow rules.
-
-SR-IOV
-++++++
-
-SR-IOV bandwidth limit implementation relies on the new pci_lib function:
-
-* set_vf_max_rate
-
-As the name of the function suggests, the limit is applied on a Virtual
-Function (VF).
-
-ip link interface has the following limitation for bandwidth limit: it uses
-Mbps as units of bandwidth measurement, not kbps, and does not support float
-numbers. So in case the limit is set to something less than 1000 kbps, it's set
-to 1 Mbps only. If the limit is set to something that does not divide to 1000
-kbps chunks, then the effective limit is rounded to the nearest integer Mbps
-value.
-
-
-Configuration
--------------
-
-To enable the service, the following steps should be followed:
-
-On server side:
-
-* enable qos service in service_plugins;
-* set the needed notification_drivers in [qos] section (message_queue is the default);
-* for ml2, add 'qos' to extension_drivers in [ml2] section.
-
-On agent side (OVS):
-
-* add 'qos' to extensions in [agent] section.
-
-
-Testing strategy
-----------------
-
-All the code added or extended as part of the effort got reasonable unit test
-coverage.
-
-
-Neutron objects
-~~~~~~~~~~~~~~~
-
-Base unit test classes to validate neutron objects were implemented in a way
-that allows code reuse when introducing a new object type.
-
-There are two test classes that are utilized for that:
-
-* BaseObjectIfaceTestCase: class to validate basic object operations (mostly
-  CRUD) with database layer isolated.
-* BaseDbObjectTestCase: class to validate the same operations with models in
-  place and database layer unmocked.
-
-Every new object implemented on top of one of those classes is expected to
-either inherit existing test cases as is, or reimplement it, if it makes sense
-in terms of how those objects are implemented. Specific test classes can
-obviously extend the set of test cases as they see needed (f.e. you need to
-define new test cases for those additional methods that you may add to your
-object implementations on top of base semantics common to all neutron objects).
-
-
-Functional tests
-~~~~~~~~~~~~~~~~
-
-Additions to ovs_lib to set bandwidth limits on ports are covered in:
-
-* neutron.tests.functional.agent.test_ovs_lib
-
-
-API tests
-~~~~~~~~~
-
-API tests for basic CRUD operations for ports, networks, policies, and rules were added in:
-
-* neutron.tests.api.test_qos
diff --git a/doc/source/devref/quota.rst b/doc/source/devref/quota.rst
deleted file mode 100644 (file)
index a32b12d..0000000
+++ /dev/null
@@ -1,348 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Quota Management and Enforcement
-================================
-
-Most resources exposed by the Neutron API are subject to quota limits.
-The Neutron API exposes an extension for managing such quotas. Quota limits are
-enforced at the API layer, before the request is dispatched to the plugin.
-
-Default values for quota limits are specified in neutron.conf. Admin users
-can override those defaults values on a per-tenant basis. Limits are stored
-in the Neutron database; if no limit is found for a given resource and tenant,
-then the default value for such resource is used.
-Configuration-based quota management, where every tenant gets the same quota
-limit specified in the configuration file, has been deprecated as of the
-Liberty release.
-
-Please note that Neutron does not support both specification of quota limits
-per user and quota management for hierarchical multitenancy (as a matter of
-fact Neutron does not support hierarchical multitenancy at all). Also, quota
-limits are currently not enforced on RPC interfaces listening on the AMQP
-bus.
-
-Plugin and ML2 drivers are not supposed to enforce quotas for resources they
-manage. However, the subnet_allocation [#]_ extension is an exception and will
-be discussed below.
-
-The quota management and enforcement mechanisms discussed here apply to every
-resource which has been registered with the Quota engine, regardless of
-whether such resource belongs to the core Neutron API or one of its extensions.
-
-High Level View
----------------
-
-There are two main components in the Neutron quota system:
-
- * The Quota API extension;
- * The Quota Engine.
-
-Both components rely on a quota driver. The neutron codebase currently defines
-two quota drivers:
-
- * neutron.db.quota.driver.DbQuotaDriver
- * neutron.quota.ConfDriver
-
-The latter driver is however deprecated.
-
-The Quota API extension handles quota management, whereas the Quota Engine
-component handles quota enforcement. This API extension is loaded like any
-other extension. For this reason plugins must explicitly support it by including
-"quotas" in the support_extension_aliases attribute.
-
-In the Quota API simple CRUD operations are used for managing tenant quotas.
-Please note that the current behaviour when deleting a tenant quota is to reset
-quota limits for that tenant to configuration defaults. The API
-extension does not validate the tenant identifier with the identity service.
-
-Performing quota enforcement is the responsibility of the Quota Engine.
-RESTful API controllers, before sending a request to the plugin, try to obtain
-a reservation from the quota engine for the resources specified in the client
-request. If the reservation is successful, then it proceeds to dispatch the
-operation to the plugin.
-
-For a reservation to be successful, the total amount of resources requested,
-plus the total amount of resources reserved, plus the total amount of resources
-already stored in the database should not exceed the tenant's quota limit.
-
-Finally, both quota management and enforcement rely on a "quota driver" [#]_,
-whose task is basically to perform database operations.
-
-Quota Management
-----------------
-
-The quota management component is fairly straightforward.
-
-However, unlike the vast majority of Neutron extensions, it uses it own
-controller class [#]_.
-This class does not implement the POST operation. List, get, update, and
-delete operations are implemented by the usual index, show, update and
-delete methods. These method simply call into the quota driver for either
-fetching tenant quotas or updating them.
-
-The _update_attributes method is called only once in the controller lifetime.
-This method dynamically updates Neutron's resource attribute map [#]_ so that
-an attribute is added for every resource managed by the quota engine.
-Request authorisation is performed in this controller, and only 'admin' users
-are allowed to modify quotas for tenants. As the neutron policy engine is not
-used, it is not possible to configure which users should be allowed to manage
-quotas using policy.json.
-
-The driver operations dealing with quota management are:
-
- * delete_tenant_quota, which simply removes all entries from the 'quotas'
-   table for a given tenant identifier;
- * update_quota_limit, which adds or updates an entry in the 'quotas' tenant for
-   a given tenant identifier and a given resource name;
- * _get_quotas, which fetches limits for a set of resource and a given tenant
-   identifier
- * _get_all_quotas, which behaves like _get_quotas, but for all tenants.
-
-
-Resource Usage Info
--------------------
-
-Neutron has two ways of tracking resource usage info:
-
- * CountableResource, where resource usage is calculated every time quotas
-   limits are enforced by counting rows in the resource table and reservations
-   for that resource.
- * TrackedResource, which instead relies on a specific table tracking usage
-   data, and performs explicitly counting only when the data in this table are
-   not in sync with actual used and reserved resources.
-
-Another difference between CountableResource and TrackedResource is that the
-former invokes a plugin method to count resources. CountableResource should be
-therefore employed for plugins which do not leverage the Neutron database.
-The actual class that the Neutron quota engine will use is determined by the
-track_quota_usage variable in the quota configuration section. If True,
-TrackedResource instances will be created, otherwise the quota engine will
-use CountableResource instances.
-Resource creation is performed by the create_resource_instance factory method
-in the neutron.quota.resource module.
-
-From a performance perspective, having a table tracking resource usage
-has some advantages, albeit not fundamental. Indeed the time required for
-executing queries to explicitly count objects will increase with the number of
-records in the table. On the other hand, using TrackedResource will fetch a
-single record, but has the drawback of having to execute an UPDATE statement
-once the operation is completed.
-Nevertheless, CountableResource instances do not simply perform a SELECT query
-on the relevant table for a resource, but invoke a plugin method, which might
-execute several statements and sometimes even interacts with the backend
-before returning.
-Resource usage tracking also becomes important for operational correctness
-when coupled with the concept of resource reservation, discussed in another
-section of this chapter.
-
-Tracking quota usage is not as simple as updating a counter every time
-resources are created or deleted.
-Indeed a quota-limited resource in Neutron can be created in several ways.
-While a RESTful API request is the most common one, resources can be created
-by RPC handlers listing on the AMQP bus, such as those which create DHCP
-ports, or by plugin operations, such as those which create router ports.
-
-To this aim, TrackedResource instances are initialised with a reference to
-the model class for the resource for which they track usage data. During
-object initialisation, SqlAlchemy event handlers are installed for this class.
-The event handler is executed after a record is inserted or deleted.
-As result usage data for that resource and will be marked as 'dirty' once
-the operation completes, so that the next time usage data is requested,
-it will be synchronised counting resource usage from the database.
-Even if this solution has some drawbacks, listed in the 'exceptions and
-caveats' section, it is more reliable than solutions such as:
-
- * Updating the usage counters with the new 'correct' value every time an
-   operation completes.
- * Having a periodic task synchronising quota usage data with actual data in
-   the Neutron DB.
-
-Finally, regardless of whether CountableResource or TrackedResource is used,
-the quota engine always invokes its count() method to retrieve resource usage.
-Therefore, from the perspective of the Quota engine there is absolutely no
-difference between CountableResource and TrackedResource.
-
-Quota Enforcement
------------------
-
-Before dispatching a request to the plugin, the Neutron 'base' controller [#]_
-attempts to make a reservation for requested resource(s).
-Reservations are made by calling the make_reservation method in
-neutron.quota.QuotaEngine.
-The process of making a reservation is fairly straightforward:
-
- * Get current resource usages. This is achieved by invoking the count method
-   on every requested resource, and then retrieving the amount of reserved
-   resources.
- * Fetch current quota limits for requested resources, by invoking the
-   _get_tenant_quotas method.
- * Fetch expired reservations for selected resources. This amount will be
-   subtracted from resource usage. As in most cases there won't be any
-   expired reservation, this approach actually requires less DB operations than
-   doing a sum of non-expired, reserved resources for each request.
- * For each resource calculate its headroom, and verify the requested
-   amount of resource is less than the headroom.
- * If the above is true for all resource, the reservation is saved in the DB,
-   otherwise an OverQuotaLimit exception is raised.
-
-The quota engine is able to make a reservation for multiple resources.
-However, it is worth noting that because of the current structure of the
-Neutron API layer, there will not be any practical case in which a reservation
-for multiple resources is made. For this reason performance optimisation
-avoiding repeating queries for every resource are not part of the current
-implementation.
-
-In order to ensure correct operations, a row-level lock is acquired in
-the transaction which creates the reservation. The lock is acquired when
-reading usage data. In case of write-set certification failures,
-which can occur in active/active clusters such as MySQL galera, the decorator
-oslo_db.api.wrap_db_retry will retry the transaction if a DBDeadLock
-exception is raised.
-While non-locking approaches are possible, it has been found out that, since
-a non-locking algorithms increases the chances of collision, the cost of
-handling a DBDeadlock is still lower than the cost of retrying the operation
-when a collision is detected. A study in this direction was conducted for
-IP allocation operations, but the same principles apply here as well [#]_.
-Nevertheless, moving away for DB-level locks is something that must happen
-for quota enforcement in the future.
-
-Committing and cancelling a reservation is as simple as deleting the
-reservation itself. When a reservation is committed, the resources which
-were committed are now stored in the database, so the reservation itself
-should be deleted. The Neutron quota engine simply removes the record when
-cancelling a reservation (ie: the request failed to complete), and also
-marks quota usage info as dirty when the reservation is committed (ie:
-the request completed correctly).
-Reservations are committed or cancelled by respectively calling the
-commit_reservation and cancel_reservation methods in neutron.quota.QuotaEngine.
-
-Reservations are not perennial. Eternal reservation would eventually exhaust
-tenants' quotas because they would never be removed when an API worker crashes
-whilst in the middle of an operation.
-Reservation expiration is currently set to 120 seconds, and is not
-configurable, not yet at least. Expired reservations are not counted when
-calculating resource usage. While creating a reservation, if any expired
-reservation is found, all expired reservation for that tenant and resource
-will be removed from the database, thus avoiding build-up of expired
-reservations.
-
-Setting up Resource Tracking for a Plugin
-------------------------------------------
-
-By default plugins do not leverage resource tracking. Having the plugin
-explicitly declare which resources should be tracked is a precise design
-choice aimed at limiting as much as possible the chance of introducing
-errors in existing plugins.
-
-For this reason a plugin must declare which resource it intends to track.
-This can be achieved using the tracked_resources decorator available in the
-neutron.quota.resource_registry module.
-The decorator should ideally be applied to the plugin's __init__ method.
-
-The decorator accepts in input a list of keyword arguments. The name of the
-argument must be a resource name, and the value of the argument must be
-a DB model class. For example:
-
-::
- @resource_registry.tracked_resources(network=models_v2.Network,
-                                      port=models_v2.Port,
-                                      subnet=models_v2.Subnet,
-                                      subnetpool=models_v2.SubnetPool)
-
-Will ensure network, port, subnet and subnetpool resources are tracked.
-In theory, it is possible to use this decorator multiple times, and not
-exclusively to __init__ methods. However, this would eventually lead to
-code readability and maintainability problems, so developers are strongly
-encourage to apply this decorator exclusively to the plugin's __init__
-method (or any other method which is called by the plugin only once
-during its initialization).
-
-Notes for Implementors of RPC Interfaces and RESTful Controllers
--------------------------------------------------------------------------------
-
-Neutron unfortunately does not have a layer which is called before dispatching
-the operation from the plugin which can be leveraged both from RESTful and
-RPC over AMQP APIs. In particular the RPC handlers call straight into the
-plugin, without doing any request authorisation or quota enforcement.
-
-Therefore RPC handlers must explicitly indicate if they are going to call the
-plugin to create or delete any sort of resources. This is achieved in a simple
-way, by ensuring modified resources are marked as dirty after the RPC handler
-execution terminates. To this aim developers can use the mark_resources_dirty
-decorator available in the module neutron.quota.resource_registry.
-
-The decorator would scan the whole list of registered resources, and store
-the dirty status for their usage trackers in the database for those resources
-for which items have been created or destroyed during the plugin operation.
-
-Exceptions and Caveats
------------------------
-
-Please be aware of the following limitations of the quota enforcement engine:
-
- * Subnet allocation from subnet pools, in particularly shared pools, is also
-   subject to quota limit checks. However this checks are not enforced by the
-   quota engine, but trough a mechanism implemented in the
-   neutron.ipam.subnetalloc module. This is because the Quota engine is not
-   able to satisfy the requirements for quotas on subnet allocation.
- * The quota engine also provides a limit_check routine which enforces quota
-   checks without creating reservations. This way of doing quota enforcement
-   is extremely unreliable and superseded by the reservation mechanism. It
-   has not been removed to ensure off-tree plugins and extensions which leverage
-   are not broken.
- * SqlAlchemy events might not be the most reliable way for detecting changes
-   in resource usage. Since the event mechanism monitors the data model class,
-   it is paramount for a correct quota enforcement, that resources are always
-   created and deleted using object relational mappings. For instance, deleting
-   a resource with a query.delete call, will not trigger the event. SQLAlchemy
-   events should be considered as a temporary measure adopted as Neutron lacks
-   persistent API objects.
- * As CountableResource instance do not track usage data, when making a
-   reservation no write-intent lock is acquired. Therefore the quota engine
-   with CountableResource is not concurrency-safe.
- * The mechanism for specifying for which resources enable usage tracking
-   relies on the fact that the plugin is loaded before quota-limited resources
-   are registered. For this reason it is not possible to validate whether a
-   resource actually exists or not when enabling tracking for it. Developers
-   should pay particular attention into ensuring resource names are correctly
-   specified.
- * The code assumes usage trackers are a trusted source of truth: if they
-   report a usage counter and the dirty bit is not set, that counter is
-   correct. If it's dirty than surely that counter is out of sync.
-   This is not very robust, as there might be issues upon restart when toggling
-   the use_tracked_resources configuration variable, as stale counters might be
-   trusted upon for making reservations. Also, the same situation might occur
-   if a server crashes after the API operation is completed but before the
-   reservation is committed, as the actual resource usage is changed but
-   the corresponding usage tracker is not marked as dirty.
-
-References
-----------
-
-.. [#] Subnet allocation extension: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/extensions/subnetallocation.py
-.. [#] DB Quota driver class: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/db/quota_db.py#n33
-.. [#] Quota API extension controller: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/extensions/quotasv2.py#n40
-.. [#] Neutron resource attribute map: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/attributes.py#n639
-.. [#] Base controller class: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/base.py#n50
-.. [#] http://lists.openstack.org/pipermail/openstack-dev/2015-February/057534.html
diff --git a/doc/source/devref/rpc_api.rst b/doc/source/devref/rpc_api.rst
deleted file mode 100644 (file)
index 5be9978..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Neutron RPC API Layer
-=====================
-
-Neutron uses the oslo.messaging library to provide an internal communication
-channel between Neutron services.  This communication is typically done via
-AMQP, but those details are mostly hidden by the use of oslo.messaging and it
-could be some other protocol in the future.
-
-RPC APIs are defined in Neutron in two parts: client side and server side.
-
-Client Side
------------
-
-Here is an example of an rpc client definition:
-
-::
-
-  import oslo_messaging
-
-  from neutron.common import rpc as n_rpc
-
-
-  class ClientAPI(object):
-      """Client side RPC interface definition.
-
-      API version history:
-          1.0 - Initial version
-          1.1 - Added my_remote_method_2
-      """
-
-      def __init__(self, topic):
-          target = oslo_messaging.Target(topic=topic, version='1.0')
-          self.client = n_rpc.get_client(target)
-
-      def my_remote_method(self, context, arg1, arg2):
-          cctxt = self.client.prepare()
-          return cctxt.call(context, 'my_remote_method', arg1=arg1, arg2=arg2)
-
-      def my_remote_method_2(self, context, arg1):
-          cctxt = self.client.prepare(version='1.1')
-          return cctxt.call(context, 'my_remote_method_2', arg1=arg1)
-
-
-This class defines the client side interface for an rpc API.  The interface has
-2 methods.  The first method existed in version 1.0 of the interface.  The
-second method was added in version 1.1.  When the newer method is called, it
-specifies that the remote side must implement at least version 1.1 to handle
-this request.
-
-Server Side
------------
-
-The server side of an rpc interface looks like this:
-
-::
-
-  import oslo_messaging
-
-
-  class ServerAPI(object):
-
-      target = oslo_messaging.Target(version='1.1')
-
-      def my_remote_method(self, context, arg1, arg2):
-          return 'foo'
-
-      def my_remote_method_2(self, context, arg1):
-          return 'bar'
-
-
-This class implements the server side of the interface.  The
-oslo_messaging.Target() defined says that this class currently implements
-version 1.1 of the interface.
-
-.. _rpc_versioning:
-
-Versioning
-----------
-
-Note that changes to rpc interfaces must always be done in a backwards
-compatible way.  The server side should always be able to handle older clients
-(within the same major version series, such as 1.X).
-
-It is possible to bump the major version number and drop some code only needed
-for backwards compatibility.  For more information about how to do that, see
-https://wiki.openstack.org/wiki/RpcMajorVersionUpdates.
-
-Example Change
-~~~~~~~~~~~~~~
-
-As an example minor API change, let's assume we want to add a new parameter to
-my_remote_method_2.  First, we add the argument on the server side.  To be
-backwards compatible, the new argument must have a default value set so that the
-interface will still work even if the argument is not supplied.  Also, the
-interface's minor version number must be incremented.  So, the new server side
-code would look like this:
-
-::
-
-  import oslo_messaging
-
-
-  class ServerAPI(object):
-
-      target = oslo_messaging.Target(version='1.2')
-
-      def my_remote_method(self, context, arg1, arg2):
-          return 'foo'
-
-      def my_remote_method_2(self, context, arg1, arg2=None):
-          if not arg2:
-              # Deal with the fact that arg2 was not specified if needed.
-          return 'bar'
-
-We can now update the client side to pass the new argument.  The client must
-also specify that version '1.2' is required for this method call to be
-successful.  The updated client side would look like this:
-
-::
-
-  import oslo_messaging
-
-  from neutron.common import rpc as n_rpc
-
-
-  class ClientAPI(object):
-      """Client side RPC interface definition.
-
-      API version history:
-          1.0 - Initial version
-          1.1 - Added my_remote_method_2
-          1.2 - Added arg2 to my_remote_method_2
-      """
-
-      def __init__(self, topic):
-          target = oslo_messaging.Target(topic=topic, version='1.0')
-          self.client = n_rpc.get_client(target)
-
-      def my_remote_method(self, context, arg1, arg2):
-          cctxt = self.client.prepare()
-          return cctxt.call(context, 'my_remote_method', arg1=arg1, arg2=arg2)
-
-      def my_remote_method_2(self, context, arg1, arg2):
-          cctxt = self.client.prepare(version='1.2')
-          return cctxt.call(context, 'my_remote_method_2',
-                            arg1=arg1, arg2=arg2)
-
-Neutron RPC APIs
-----------------
-
-As discussed before, RPC APIs are defined in two parts: a client side and a
-server side.  Several of these pairs exist in the Neutron code base.  The code
-base is being updated with documentation on every rpc interface implementation
-that indicates where the corresponding server or client code is located.
-
-Example: DHCP
-~~~~~~~~~~~~~
-
-The DHCP agent includes a client API, neutron.agent.dhcp.agent.DhcpPluginAPI.
-The DHCP agent uses this class to call remote methods back in the Neutron
-server.  The server side is defined in
-neutron.api.rpc.handlers.dhcp_rpc.DhcpRpcCallback.  It is up to the Neutron
-plugin in use to decide whether the DhcpRpcCallback interface should be
-exposed.
-
-Similarly, there is an RPC interface defined that allows the Neutron plugin to
-remotely invoke methods in the DHCP agent.  The client side is defined in
-neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyApi.  The
-server side of this interface that runs in the DHCP agent is
-neutron.agent.dhcp.agent.DhcpAgent.
-
-More Info
----------
-
-For more information, see the oslo.messaging documentation:
-http://docs.openstack.org/developer/oslo.messaging/.
diff --git a/doc/source/devref/rpc_callbacks.rst b/doc/source/devref/rpc_callbacks.rst
deleted file mode 100644 (file)
index bb0aeac..0000000
+++ /dev/null
@@ -1,285 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-.. _rpc_callbacks:
-
-Neutron Messaging Callback System
-=================================
-
-Neutron already has a :doc:`callback system <callbacks>` for
-in-process resource callbacks where publishers and subscribers are able
-to publish and subscribe for resource events.
-
-This system is different, and is intended to be used for inter-process
-callbacks, via the messaging fanout mechanisms.
-
-In Neutron, agents may need to subscribe to specific resource details which
-may change over time. And the purpose of this messaging callback system
-is to allow agent subscription to those resources without the need to extend
-modify existing RPC calls, or creating new RPC messages.
-
-A few resource which can benefit of this system:
-
-* QoS policies;
-* Security Groups.
-
-Using a remote publisher/subscriber pattern, the information about such
-resources could be published using fanout messages to all interested nodes,
-minimizing messaging requests from agents to server since the agents
-get subscribed for their whole lifecycle (unless they unsubscribe).
-
-Within an agent, there could be multiple subscriber callbacks to the same
-resource events, the resources updates would be dispatched to the subscriber
-callbacks from a single message. Any update would come in a single message,
-doing only a single oslo versioned objects deserialization on each receiving
-agent.
-
-This publishing/subscription mechanism is highly dependent on the format
-of the resources passed around. This is why the library only allows
-versioned objects to be published and subscribed. Oslo versioned objects
-allow object version down/up conversion. [#vo_mkcompat]_ [#vo_mkcptests]_
-
-For the VO's versioning schema look here: [#vo_versioning]_
-
-versioned_objects serialization/deserialization with the
-obj_to_primitive(target_version=..) and primitive_to_obj() [#ov_serdes]_
-methods is used internally to convert/retrieve objects before/after messaging.
-
-Serialized versioned objects look like::
-
-   {'versioned_object.version': '1.0',
-    'versioned_object.name': 'QoSPolicy',
-    'versioned_object.data': {'rules': [
-                                        {'versioned_object.version': '1.0',
-                                         'versioned_object.name': 'QoSBandwidthLimitRule',
-                                         'versioned_object.data': {'name': u'a'},
-                                         'versioned_object.namespace': 'versionedobjects'}
-                                        ],
-                              'uuid': u'abcde',
-                              'name': u'aaa'},
-    'versioned_object.namespace': 'versionedobjects'}
-
-Rolling upgrades strategy
--------------------------
-In this section we assume the standard Neutron upgrade process, which means
-upgrade the server first and then upgrade the agents:
-
-:doc:`More information about the upgrade strategy <upgrade>`.
-
-The plan is to provide a semi-automatic method which avoids manual pinning and
-unpinning of versions by the administrator which could be prone to error.
-
-Resource pull requests
-~~~~~~~~~~~~~~~~~~~~~~
-Resource pull requests will always be ok because the underlying resource RPC
-does provide the version of the requested resource id  / ids. The server will
-be upgraded first, so it will always be able to satisfy any version the agents
-request.
-
-Resource push notifications
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Agents will subscribe to the neutron-vo-<resource_type>-<version> fanout queue
-which carries updated objects for the version they know about. The versions
-they know about depend on the runtime Neutron versioned objects they started with.
-
-When the server upgrades, it should be able to instantly calculate a census of
-agent versions per object (we will define a mechanism for this in a later
-section). It will use the census to send fanout messages on all the version
-span a resource type has.
-
-For example, if neutron-server knew it has rpc-callback aware agents with
-versions 1.0, and versions 1.2 of resource type "A", any update would be sent
-to neutron-vo-A_1.0 and neutron-vo-A_1.2.
-
-TODO(mangelajo): Verify that after upgrade is finished any unused messaging
-resources (queues, exchanges, and so on) are released as older agents go away
-and neutron-server stops producing new message casts. Otherwise document the
-need for a neutron-server restart after rolling upgrade has finished if we
-want the queues cleaned up.
-
-
-Leveraging agent state reports for object version discovery
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-We would add a row to the agent db for tracking agent known objects and version
-numbers. This would resemble the implementation of the configuration column.
-
-Agents would report at start not only their configuration now, but also
-their subscribed object type / version pairs, that would be stored in the
-database and would be available to any neutron-server requesting it::
-
-    'subscribed_versions': {'QoSPolicy': '1.1',
-                            'SecurityGroup': '1.0',
-                            'Port': '1.0'}
-
-There's a subset of Liberty agents depending on QoSPolicy that will
-require 'QoSPolicy': '1.0' if the qos plugin is installed. We will be able
-to identify those by the binary name (included in the report):
-
-* 'neutron-openvswitch-agent'
-* 'neutron-sriov-nic-agent'
-
-Version discovery
-+++++++++++++++++
-With the above mechanism in place and considering the exception of
-neutron-openvswitch-agent and neutron-sriov-agent requiring QoSpolicy 1.0,
-we could discover the subset of versions to be sent on every push
-notification.
-
-Agents that are in down state would be excluded from this calculation.
-We would use an extended timeout for agents in this calculation to make sure
-we're on the safe side, specially if deployer marked agents with low
-timeouts.
-
-Starting at Mitaka, any agent interested in versioned objects via this API
-should report their resource/version tuples of interest (the resource type/
-version pairs they're subscribed to).
-
-Caching mechanism
-'''''''''''''''''
-The version subset per object will be cached to avoid DB requests on every push
-given that we assume that all old agents are already registered at the time of
-upgrade.
-
-Cached subset will be re-evaluated (to cut down the version sets as agents
-upgrade) after configured TTL.
-
-As a fast path to update this cache on all neutron-servers when upgraded agents
-come up (or old agents revive after a long timeout or even a downgrade), we could
-introduce a fanout queue consumed by servers, to additionally notify from one
-agent to all neutron-servers about the "versions of interest" in the agent just
-comming up.
-
-All notifications for all calculated version sets must be sent, as non-upgraded
-agents would otherwise not receive them.
-
-It is safe to send notifications to any fanout queue as they will be discarded
-if no agent is listening.
-
-Topic names for every resource type RPC endpoint
-------------------------------------------------
-
-neutron-vo-<resource_class_name>-<version>
-
-In the future, we may want to get oslo messaging to support subscribing
-topics dynamically, then we may want to use:
-
-neutron-vo-<resource_class_name>-<resource_id>-<version> instead,
-
-or something equivalent which would allow fine granularity for the receivers
-to only get interesting information to them.
-
-Subscribing to resources
-------------------------
-
-Imagine that you have agent A, which just got to handle a new port, which
-has an associated security group, and QoS policy.
-
-The agent code processing port updates may look like::
-
-    from neutron.api.rpc.callbacks.consumer import registry
-    from neutron.api.rpc.callbacks import events
-    from neutron.api.rpc.callbacks import resources
-
-
-    def process_resource_updates(resource_type, resource, event_type):
-
-        # send to the right handler which will update any control plane
-        # details related to the updated resource...
-
-
-    def subscribe_resources():
-        registry.subscribe(process_resource_updates, resources.SEC_GROUP)
-
-        registry.subscribe(process_resource_updates, resources.QOS_POLICY)
-
-    def port_update(port):
-
-        # here we extract sg_id and qos_policy_id from port..
-
-        sec_group = registry.pull(resources.SEC_GROUP, sg_id)
-        qos_policy = registry.pull(resources.QOS_POLICY, qos_policy_id)
-
-
-The relevant function is:
-
-* subscribe(callback, resource_type): subscribes callback to a resource type.
-
-
-The callback function will receive the following arguments:
-
-* resource_type: the type of resource which is receiving the update.
-* resource: resource of supported object
-* event_type: will be one of CREATED, UPDATED, or DELETED, see
-  neutron.api.rpc.callbacks.events for details.
-
-With the underlaying oslo_messaging support for dynamic topics on the receiver
-we cannot implement a per "resource type + resource id" topic, rabbitmq seems
-to handle 10000's of topics without suffering, but creating 100's of
-oslo_messaging receivers on different topics seems to crash.
-
-We may want to look into that later, to avoid agents receiving resource updates
-which are uninteresting to them.
-
-Unsubscribing from resources
-----------------------------
-
-To unsubscribe registered callbacks:
-
-* unsubscribe(callback, resource_type): unsubscribe from specific resource type.
-* unsubscribe_all(): unsubscribe from all resources.
-
-
-Sending resource events
------------------------
-
-On the server side, resource updates could come from anywhere, a service plugin,
-an extension, anything that updates, creates, or destroys the resource and that
-is of any interest to subscribed agents.
-
-The server/publisher side may look like::
-
-    from neutron.api.rpc.callbacks.producer import registry
-    from neutron.api.rpc.callbacks import events
-
-    def create_qos_policy(...):
-        policy = fetch_policy(...)
-        update_the_db(...)
-        registry.push(policy, events.CREATED)
-
-    def update_qos_policy(...):
-        policy = fetch_policy(...)
-        update_the_db(...)
-        registry.push(policy, events.UPDATED)
-
-    def delete_qos_policy(...):
-        policy = fetch_policy(...)
-        update_the_db(...)
-        registry.push(policy, events.DELETED)
-
-
-References
-----------
-.. [#ov_serdes] https://github.com/openstack/oslo.versionedobjects/blob/ce00f18f7e9143b5175e889970564813189e3e6d/oslo_versionedobjects/tests/test_objects.py#L410
-.. [#vo_mkcompat] https://github.com/openstack/oslo.versionedobjects/blob/ce00f18f7e9143b5175e889970564813189e3e6d/oslo_versionedobjects/base.py#L474
-.. [#vo_mkcptests] https://github.com/openstack/oslo.versionedobjects/blob/ce00f18f7e9143b5175e889970564813189e3e6d/oslo_versionedobjects/tests/test_objects.py#L114
-.. [#vo_versioning] https://github.com/openstack/oslo.versionedobjects/blob/ce00f18f7e9143b5175e889970564813189e3e6d/oslo_versionedobjects/base.py#L248
diff --git a/doc/source/devref/security_group_api.rst b/doc/source/devref/security_group_api.rst
deleted file mode 100644 (file)
index 72bc397..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Guided Tour: The Neutron Security Group API
-===========================================
-
-https://wiki.openstack.org/wiki/Neutron/SecurityGroups
-
-
-API Extension
--------------
-
-The API extension is the 'front' end portion of the code, which handles defining a `REST-ful API`_, which is used by tenants.
-
-
-.. _`REST-ful API`: https://git.openstack.org/cgit/openstack/neutron/tree/neutron/extensions/securitygroup.py
-
-
-Database API
-------------
-
-The Security Group API extension adds a number of `methods to the database layer`_ of Neutron
-
-.. _`methods to the database layer`: https://git.openstack.org/cgit/openstack/neutron/tree/neutron/db/securitygroups_db.py
-
-Agent RPC
----------
-
-This portion of the code handles processing requests from tenants, after they have been stored in the database. It involves messaging all the L2 agents
-running on the compute nodes, and modifying the IPTables rules on each hypervisor.
-
-
-* `Plugin RPC classes <https://git.openstack.org/cgit/openstack/neutron/tree/neutron/db/securitygroups_rpc_base.py>`_
-
-  * `SecurityGroupServerRpcMixin <https://git.openstack.org/cgit/openstack/neutron/tree/neutron/db/securitygroups_rpc_base.py>`_ - defines the RPC API that the plugin uses to communicate with the agents running on the compute nodes
-  * SecurityGroupServerRpcMixin  -  Defines the API methods used to fetch data from the database, in order to return responses to agents via the RPC API
-
-* `Agent RPC classes <https://git.openstack.org/cgit/openstack/neutron/tree/neutron/agent/securitygroups_rpc.py>`_
-
-  * The SecurityGroupServerRpcApi defines the API methods that can be called by agents, back to the plugin that runs on the Neutron controller
-  * The SecurityGroupAgentRpcCallbackMixin defines methods that a plugin uses to call back to an agent after performing an action called by an agent.
-
-
-IPTables Driver
----------------
-
-*  ``prepare_port_filter`` takes a ``port`` argument, which is a ``dictionary`` object that contains information about the port - including the ``security_group_rules``
-
-*  ``prepare_port_filter`` appends the port to an internal dictionary, ``filtered_ports`` which is used to track the internal state.
-
-* Each security group has a `chain <http://www.thegeekstuff.com/2011/01/iptables-fundamentals/>`_ in Iptables.
-
-* The ``IptablesFirewallDriver`` has a method to convert security group rules into iptables statements.
diff --git a/doc/source/devref/services_and_agents.rst b/doc/source/devref/services_and_agents.rst
deleted file mode 100644 (file)
index fcb9fa7..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Services and agents
-===================
-
-A usual Neutron setup consists of multiple services and agents running on one
-or multiple nodes (though some exotic setups potentially may not need any
-agents). Each of those services provides some of the networking or API
-services. Among those of special interest:
-
-#. neutron-server that provides API endpoints and serves as a single point of
-   access to the database. It usually runs on nodes called Controllers.
-#. Layer2 agent that can utilize Open vSwitch, Linuxbridge or other vendor
-   specific technology to provide network segmentation and isolation for tenant
-   networks. The L2 agent should run on every node where it is deemed
-   responsible for wiring and securing virtual interfaces (usually both Compute
-   and Network nodes).
-#. Layer3 agent that runs on Network node and provides East-West and
-   North-South routing plus some advanced services such as FWaaS or VPNaaS.
-
-For the purpose of this document, we call all services, servers and agents that
-run on any node as just "services".
-
-
-Entry points
-------------
-
-Entry points for services are defined in setup.cfg under "console_scripts"
-section.  Those entry points should generally point to main() functions located
-under neutron/cmd/... path.
-
-Note: some existing vendor/plugin agents still maintain their entry points in
-other locations. Developers responsible for those agents are welcome to apply
-the guideline above.
-
-
-Interacting with Eventlet
--------------------------
-
-Neutron extensively utilizes the eventlet library to provide asynchronous
-concurrency model to its services. To utilize it correctly, the following
-should be kept in mind.
-
-If a service utilizes the eventlet library, then it should not call
-eventlet.monkey_patch() directly but instead maintain its entry point main()
-function under neutron/cmd/eventlet/... If that is the case, the standard
-Python library will be automatically patched for the service on entry point
-import (monkey patching is done inside `python package file
-<http://git.openstack.org/cgit/openstack/neutron/tree/neutron/cmd/eventlet/__init__.py>`_).
-
-Note: an entry point 'main()' function may just be an indirection to a real
-callable located elsewhere, as is done for reference services such as DHCP, L3
-and the neutron-server.
-
-For more info on the rationale behind the code tree setup, see `the
-corresponding cross-project spec <https://review.openstack.org/154642>`_.
-
-
-Connecting to the Database
---------------------------
-
-Only the neutron-server connects to the neutron database. Agents may never
-connect directly to the database, as this would break the ability to do rolling
-upgrades.
diff --git a/doc/source/devref/sriov_nic_agent.rst b/doc/source/devref/sriov_nic_agent.rst
deleted file mode 100644 (file)
index bc63e96..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-L2 Networking with SR-IOV enabled NICs
-======================================
-SR-IOV (Single Root I/O Virtualization) is a specification that allows
-a PCIe device to appear to be multiple separate physical PCIe devices.
-SR-IOV works by introducing the idea of physical functions (PFs) and virtual functions (VFs).
-Physical functions (PFs) are full-featured PCIe functions.
-Virtual functions (VFs) are “lightweight” functions that lack configuration resources.
-
-SR-IOV supports VLANs for L2 network isolation, other networking technologies
-such as VXLAN/GRE may be supported in the future.
-
-SR-IOV NIC agent manages configuration of SR-IOV Virtual Functions that connect
-VM instances running on the compute node to the public network.
-
-In most common deployments, there are compute and a network nodes.
-Compute node can support VM connectivity via SR-IOV enabled NIC. SR-IOV NIC Agent manages
-Virtual Functions admin state. In the future it will manage additional settings, such as
-quality of service, rate limit settings, spoofcheck and more.
-Network node will be usually deployed with either Open vSwitch or Linux Bridge to support network node functionality.
-
-
-Further Reading
----------------
-
-* `Nir Yechiel - SR-IOV Networking – Part I: Understanding the Basics <http://redhatstackblog.redhat.com/2015/03/05/red-hat-enterprise-linux-openstack-platform-6-sr-iov-networking-part-i-understanding-the-basics/>`_
-* `SR-IOV Passthrough For Networking <https://wiki.openstack.org/wiki/SR-IOV-Passthrough-For-Networking/>`_
diff --git a/doc/source/devref/template_model_sync_test.rst b/doc/source/devref/template_model_sync_test.rst
deleted file mode 100644 (file)
index 43f7b87..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Template for ModelMigrationSync for external repos
-==================================================
-
-This section contains a template for a test which checks that the Python models
-for database tables are synchronized with the alembic migrations that create
-the database schema. This test should be implemented in all driver/plugin
-repositories that were split out from Neutron.
-
-What does the test do?
-----------------------
-
-This test compares models with the result of existing migrations. It is based on
-`ModelsMigrationsSync
-<http://docs.openstack.org/developer/oslo.db/api/sqlalchemy/test_migrations.html#oslo_db.sqlalchemy.test_migrations.ModelsMigrationsSync>`_
-which is provided by oslo.db and was adapted for Neutron. It compares core
-Neutron models and vendor specific models with migrations from Neutron core and
-migrations from the driver/plugin repo. This test is functional - it runs against
-MySQL and PostgreSQL dialects. The detailed description of this test can be
-found in Neutron Database Layer section - `Tests to verify that database
-migrations and models are in sync
-<http://docs.openstack.org/developer/neutron/devref/db_layer.html#module-neutron.tests.functional.db.test_migrations>`_.
-
-Steps for implementing the test
--------------------------------
-
-1. Import all models in one place
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Create a module ``networking_foo/db/models/head.py`` with the following
-content: ::
-
- from neutron.db.migration.models import head
- from networking_foo import models  # noqa
- # Alternatively, import separate modules here if the models are not in one
- # models.py file
-
-
- def get_metadata():
-    return head.model_base.BASEV2.metadata
-
-
-2. Implement the test module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The test uses external.py from Neutron. This file contains lists of table
-names, which were moved out of Neutron: ::
-
- VPNAAS_TABLES = [...]
-
- LBAAS_TABLES = [...]
-
- FWAAS_TABLES = [...]
-
- # Arista ML2 driver Models moved to openstack/networking-arista
- REPO_ARISTA_TABLES = [...]
-
- # Models moved to openstack/networking-cisco
- REPO_CISCO_TABLES = [...]
-
- ...
-
- TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES + ...
-           + REPO_ARISTA_TABLES + REPO_CISCO_TABLES)
-
-
-Also the test uses **VERSION_TABLE**, it is the name of table in database which
-contains revision id of head migration. It is preferred to keep this variable in
-``networking_foo/db/migration/alembic_migrations/__init__.py`` so it will be easy
-to use in test.
-
-Create a module ``networking_foo/tests/functional/db/test_migrations.py``
-with the following content: ::
-
- from oslo_config import cfg
-
- from neutron.db.migration.alembic_migrations import external
- from neutron.db.migration import cli as migration
- from neutron.tests.common import base
- from neutron.tests.functional.db import test_migrations
-
- from networking_foo.db.migration import alembic_migrations
- from networking_foo.db.models import head
-
- # EXTERNAL_TABLES should contain all names of tables that are not related to
- # current repo.
- EXTERNAL_TABLES = set(external.TABLES) - set(external.REPO_FOO_TABLES)
-
-
- class _TestModelsMigrationsFoo(test_migrations._TestModelsMigrations):
-
-   def db_sync(self, engine):
-       cfg.CONF.set_override('connection', engine.url, group='database')
-       for conf in migration.get_alembic_configs():
-           self.alembic_config = conf
-           self.alembic_config.neutron_config = cfg.CONF
-           migration.do_alembic_command(conf, 'upgrade', 'heads')
-
-   def get_metadata(self):
-       return head.get_metadata()
-
-   def include_object(self, object_, name, type_, reflected, compare_to):
-       if type_ == 'table' and (name == 'alembic' or
-                                name == alembic_migrations.VERSION_TABLE or
-                                name in EXTERNAL_TABLES):
-           return False
-       else:
-           return True
-
-
- class TestModelsMigrationsMysql(_TestModelsMigrationsFoo,
-                                 base.MySQLTestCase):
-    pass
-
-
- class TestModelsMigrationsPsql(_TestModelsMigrationsFoo,
-                                base.PostgreSQLTestCase):
-    pass
-
-
-3. Add functional requirements
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-A separate file ``networking_foo/tests/functional/requirements.txt`` should be
-created containing the following requirements that are needed for successful
-test execution.
-
-::
-
- psutil>=1.1.1,<2.0.0
- psycopg2
- PyMySQL>=0.6.2  # MIT License
-
-
-Example implementation `in VPNaaS <https://review.openstack.org/209943>`_
diff --git a/doc/source/devref/testing_coverage.rst b/doc/source/devref/testing_coverage.rst
deleted file mode 100644 (file)
index bf0b44d..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Test Coverage
-=============
-
-The intention is to track merged features or areas of code that lack certain
-types of tests. This document may be used both by developers that want to
-contribute tests, and operators that are considering adopting a feature.
-
-Coverage
---------
-
-Note that while both API and scenario tests target a deployed OpenStack cloud,
-API tests are under the Neutron tree and scenario tests are under the Tempest
-tree.
-
-It is the expectation that API changes involve API tests, agent features
-or modifications involve functional tests, and Neutron-wide features involve
-fullstack or scenario tests as appropriate.
-
-The table references tests that explicitly target a feature, and not a job
-that is configured to run against a specific backend (Thereby testing it
-implicitly). So, for example, while the Linux bridge agent has a job that runs
-the API and scenario tests with the Linux bridge agent configured, it does not
-have functional tests that target the agent explicitly. The 'gate' column
-is about running API/scenario tests with Neutron configured in a certain way,
-such as what L2 agent to use or what type of routers to create.
-
-* V            - Merged
-* Blank        - Not applicable
-* X            - Absent or lacking
-* Patch number - Currently in review
-* A name       - That person has committed to work on an item
-
-+------------------------+------------+------------+------------+------------+------------+------------+
-| Area                   | Unit       | Functional | API        | Fullstack  | Scenario   | Gate       |
-+========================+============+============+============+============+============+============+
-| DVR                    | Partial*   | L3-V OVS-X | V          | amuller    | X          | V          |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| L3 HA                  | V          | V          | X          | 196393     | X          | X          |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| L2pop                  | V          | X          |            | X          |            |            |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| DHCP HA                | V          |            |            | amuller    |            |            |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| OVS ARP responder      | V          | X*         |            | X*         |            |            |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| OVS agent              | V          | Partial    |            | V          |            | V          |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| Linux Bridge agent     | V          | X          |            | X          |            | Non-voting |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| Metering               | V          | X          | V          | X          |            |            |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| DHCP agent             | V          | 136834     |            | amuller    |            | V          |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| rpc_workers            |            |            |            |            |            | X          |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| Reference ipam driver  | V          |            |            |            |            | X (?)      |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| MTU advertisement      | V          |            |            | X          |            |            |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| VLAN transparency      | V          |            | X          | X          |            |            |
-+------------------------+------------+------------+------------+------------+------------+------------+
-| Prefix delegation      | V          | X          |            | X          |            |            |
-+------------------------+------------+------------+------------+------------+------------+------------+
-
-* DVR DB unit tests often assert that internal methods were called instead of
-  testing functionality. A lot of our unit tests are flawed in this way,
-  and DVR unit tests especially so. An attempt to remedy this was made
-  in patch 178880.
-* OVS ARP responder cannot be tested at the gate because the gate uses Ubuntu
-  14.04 that only packages OVS 2.0. OVS added ARP manipulation support in
-  version 2.1.
-* Prefix delegation doesn't have functional tests for the dibbler and pd
-  layers, nor for the L3 agent changes.
-
-Missing Infrastructure
-----------------------
-
-The following section details missing test *types*. If you want to pick up
-an action item, please contact amuller for more context and guidance.
-
-* The Neutron team would like Rally to persist results over a window of time,
-  graph and visualize this data, so that reviewers could compare average runs
-  against a proposed patch.
-* It's possible to test RPC methods via the unit tests infrastructure. This was
-  proposed in patch 162811. The goal is provide developers a light weight
-  way to rapidly run tests that target the RPC layer, so that a patch that
-  modifies an RPC method's signature could be verified quickly and locally.
-* Neutron currently does not test an in-place upgrade (Upgrading the server
-  first, followed by agents one machine at a time). We make sure that the RPC
-  layer remains backwards compatible manually via the review process but have
-  no CI that verifies this.
diff --git a/doc/source/devref/upgrade.rst b/doc/source/devref/upgrade.rst
deleted file mode 100644 (file)
index 9ddeee6..0000000
+++ /dev/null
@@ -1,250 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-.. note::
-
-    Much of this document discusses upgrade considerations for the Neutron
-    reference implementation using Neutron's agents. It's expected that each
-    Neutron plugin provides its own documentation that discusses upgrade
-    considerations specific to that choice of backend. For example, OVN does
-    not use Neutron agents, but does have a local controller that runs on each
-    compute node. OVN supports rolling upgrades, but information about how that
-    works should be covered in the documentation for networking-ovn, the OVN
-    Neutron plugin.
-
-Upgrade strategy
-================
-
-There are two general upgrade scenarios supported by Neutron:
-
-#. All services are shut down, code upgraded, then all services are started again.
-#. Services are upgraded gradually, based on operator service windows.
-
-The latter is the preferred way to upgrade an OpenStack cloud, since it allows
-for more granularity and less service downtime. This scenario is usually called
-'rolling upgrade'.
-
-Rolling upgrade
----------------
-
-Rolling upgrades imply that during some interval of time there will be services
-of different code versions running and interacting in the same cloud. It puts
-multiple constraints onto the software.
-
-#. older services should be able to talk with newer services.
-#. older services should not require the database to have older schema
-   (otherwise newer services that require the newer schema would not work).
-
-`More info on rolling upgrades in OpenStack
-<http://governance.openstack.org/reference/tags/assert_supports-rolling-upgrade.html>`_.
-
-Those requirements are achieved in Neutron by:
-
-#. If the Neutron backend makes use of Neutron agents, the Neutron server have
-   backwards compatibility code to deal with older messaging payloads.
-#. isolating a single service that accesses database (neutron-server).
-
-To simplify the matter, it's always assumed that the order of service upgrades
-is as following:
-
-#. first, all neutron-servers are upgraded.
-#. then, if applicable, neutron agents are upgraded.
-
-This approach allows us to avoid backwards compatibility code on agent side and
-is in line with other OpenStack projects that support rolling upgrades
-(specifically, nova).
-
-Server upgrade
-~~~~~~~~~~~~~~
-
-Neutron-server is the very first component that should be upgraded to the new
-code. It's also the only component that relies on new database schema to be
-present, other components communicate with the cloud through AMQP and hence do
-not depend on particular database state.
-
-Database upgrades are implemented with alembic migration chains.
-
-Database upgrade is split into two parts:
-
-#. neutron-db-manage upgrade --expand
-#. neutron-db-manage upgrade --contract
-
-Each part represents a separate alembic branch.
-
-:ref:`More info on alembic scripts <alembic_migrations>`.
-
-The former step can be executed while old neutron-server code is running. The
-latter step requires *all* neutron-server instances to be shut down. Once it's
-complete, neutron-servers can be started again.
-
-Agents upgrade
-~~~~~~~~~~~~~~
-
-.. note::
-
-    This section does not apply when the cloud does not use AMQP agents to
-    provide networking services to instances. In that case, other backend
-    specific upgrade instructions may also apply.
-
-Once neutron-server services are restarted with the new database schema and the
-new code, it's time to upgrade Neutron agents.
-
-Note that in the meantime, neutron-server should be able to serve AMQP messages
-sent by older versions of agents which are part of the cloud.
-
-The recommended order of agent upgrade (per node) is:
-
-#. first, L2 agents (openvswitch, linuxbridge, sr-iov).
-#. then, all other agents (L3, DHCP, Metadata, ...).
-
-The rationale of the agent upgrade order is that L2 agent is usually
-responsible for wiring ports for other agents to use, so it's better to allow
-it to do its job first and then proceed with other agents that will use the
-already configured ports for their needs.
-
-Each network/compute node can have its own upgrade schedule that is independent
-of other nodes.
-
-AMQP considerations
-+++++++++++++++++++
-
-Since it's always assumed that neutron-server component is upgraded before
-agents, only the former should handle both old and new RPC versions.
-
-The implication of that is that no code that handles UnsupportedVersion
-oslo.messaging exceptions belongs to agent code.
-
-:ref:`More information about RPC versioning <rpc_versioning>`.
-
-Interface signature
-'''''''''''''''''''
-
-An RPC interface is defined by its name, version, and (named) arguments that
-it accepts. There are no strict guarantees that arguments will have expected
-types or meaning, as long as they are serializable.
-
-Message content versioning
-''''''''''''''''''''''''''
-
-To provide better compatibility guarantees for rolling upgrades, RPC interfaces
-could also define specific format for arguments they accept. In OpenStack
-world, it's usually implemented using oslo.versionedobjects library, and
-relying on the library to define serialized form for arguments that are passed
-through AMQP wire.
-
-Note that Neutron has *not* adopted oslo.versionedobjects library for its RPC
-interfaces yet (except for QoS feature).
-
-:ref:`More information about RPC callbacks used for QoS <rpc_callbacks>`.
-
-Networking backends
-~~~~~~~~~~~~~~~~~~~
-
-Backend software upgrade should not result in any data plane disruptions.
-Meaning, e.g. Open vSwitch L2 agent should not reset flows or rewire ports;
-Neutron L3 agent should not delete namespaces left by older version of the
-agent; Neutron DHCP agent should not require immediate DHCP lease renewal; etc.
-
-The same considerations apply to setups that do not rely on agents. Meaning,
-f.e. OpenDaylight or OVN controller should not break data plane connectivity
-during its upgrade process.
-
-Upgrade testing
----------------
-
-`Grenade <https://github.com/openstack-dev/grenade>`_ is the OpenStack project
-that is designed to validate upgrade scenarios.
-
-Currently, only offline (non-rolling) upgrade scenario is validated in Neutron
-gate. The upgrade scenario follows the following steps:
-
-#. the 'old' cloud is set up using latest stable release code
-#. all services are stopped
-#. code is updated to the patch under review
-#. new database migration scripts are applied, if needed
-#. all services are started
-#. the 'new' cloud is validated with a subset of tempest tests
-
-The scenario validates that no configuration option names are changed in one
-cycle. More generally, it validates that the 'new' cloud is capable of running
-using the 'old' configuration files. It also validates that database migration
-scripts can be executed.
-
-The scenario does *not* validate AMQP versioning compatibility.
-
-Other projects (for example Nova) have so called 'partial' grenade jobs where
-some services are left running using the old version of code. Such a job would
-be needed in Neutron gate to validate rolling upgrades for the project. Till
-that time, it's all up to reviewers to catch compatibility issues in patches on
-review.
-
-Another hole in testing belongs to split migration script branches. It's
-assumed that an 'old' cloud can successfully run after 'expand' migration
-scripts from the 'new' cloud are applied to its database; but it's not
-validated in gate.
-
-.. _upgrade_review_guidelines:
-
-Review guidelines
------------------
-
-There are several upgrade related gotchas that should be tracked by reviewers.
-
-First things first, a general advice to reviewers: make sure new code does not
-violate requirements set by `global OpenStack deprecation policy
-<http://governance.openstack.org/reference/tags/assert_follows-standard-deprecation.html>`_.
-
-Now to specifics:
-
-#. Configuration options:
-
-   * options should not be dropped from the tree without waiting for
-     deprecation period (currently it's one development cycle long) and a
-     deprecation message issued if the deprecated option is used.
-   * option values should not change their meaning between releases.
-
-#. Data plane:
-
-   * agent restart should not result in data plane disruption (no Open vSwitch
-     ports reset; no network namespaces deleted; no device names changed).
-
-#. RPC versioning:
-
-   * no RPC version major number should be bumped before all agents had a
-     chance to upgrade (meaning, at least one release cycle is needed before
-     compatibility code to handle old clients is stripped from the tree).
-   * no compatibility code should be added to agent side of AMQP interfaces.
-   * server code should be able to handle all previous versions of agents,
-     unless the major version of an interface is bumped.
-   * no RPC interface arguments should change their meaning, or names.
-   * new arguments added to RPC interfaces should not be mandatory. It means
-     that server should be able to handle old requests, without the new
-     argument specified. Also, if the argument is not passed, the old behaviour
-     before the addition of the argument should be retained.
-
-#. Database migrations:
-
-   * migration code should be split into two branches (contract, expand) as
-     needed. No code that is unsafe to execute while neutron-server is running
-     should be added to expand branch.
-   * if possible, contract migrations should be minimized or avoided to reduce
-     the time when API endpoints must be down during database upgrade.
diff --git a/doc/source/index.rst b/doc/source/index.rst
deleted file mode 100644 (file)
index 8c3825a..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-..
-      Copyright 2011-2013 OpenStack Foundation
-      All Rights Reserved.
-
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-Welcome to Neutron's developer documentation!
-=============================================
-
-Neutron is an OpenStack project to provide "network connectivity as a service"
-between interface devices (e.g., vNICs) managed by other OpenStack services
-(e.g., nova). It implements the `Neutron API`_.
-
-.. _`Neutron API`: http://docs.openstack.org/api/openstack-network/2.0/content/
-
-This document describes Neutron for contributors of the project, and assumes
-that you are already familiar with Neutron from an `end-user perspective`_.
-
-.. _`end-user perspective`: http://docs.openstack.org/trunk/openstack-network/admin/content/index.html
-
-This documentation is generated by the Sphinx toolkit and lives in the source
-tree.  Additional documentation on Neutron and other components of OpenStack
-can be found on the `OpenStack wiki`_ and the `Neutron section of the wiki`.
-The `Neutron Development wiki`_ is also a good resource for new contributors.
-
-.. _`OpenStack wiki`: http://wiki.openstack.org
-.. _`Neutron section of the wiki`: http://wiki.openstack.org/Neutron
-.. _`Neutron Development wiki`: http://wiki.openstack.org/NeutronDevelopment
-
-Enjoy!
-
-Neutron Policies
-================
-
-.. toctree::
-   :maxdepth: 2
-
-   policies/index
-
-Neutron Stadium
-===============
-
-.. toctree::
-   :maxdepth: 2
-
-   stadium/index
-
-Developer Docs
-==============
-
-.. toctree::
-   :maxdepth: 3
-
-   devref/index
-
-Dashboards
-==========
-
-There is a collection of dashboards to help developers and reviewers
-located here.
-
-.. toctree::
-   :maxdepth: 2
-
-   dashboards/index
-
-API Extensions
-==============
-
-Go to http://api.openstack.org for information about OpenStack Network API extensions.
diff --git a/doc/source/policies/blueprints.rst b/doc/source/policies/blueprints.rst
deleted file mode 100644 (file)
index e78ba84..0000000
+++ /dev/null
@@ -1,298 +0,0 @@
-Blueprints and Specs
-====================
-
-The Neutron team uses the `neutron-specs
-<http://git.openstack.org/cgit/openstack/neutron-specs>`_ repository for its
-specification reviews. Detailed information can be found on the `wiki
-<https://wiki.openstack.org/wiki/Blueprints>`_. Please also find
-additional information in the reviews.rst file.
-
-The Neutron team does not enforce deadlines for specs. These can be submitted
-throughout the release cycle. The drivers team will review this on a regular
-basis throughout the release, and based on the load for the milestones, will
-assign these into milestones or move them to the backlog for selection into
-a future release.
-
-Please note that we use a `template
-<http://git.openstack.org/cgit/openstack/neutron-specs/tree/specs/template.rst>`_
-for spec submissions. It is not required to fill out all sections in the
-template. Review of the spec may require filling in information left out by
-the submitter.
-
-Sub-Projects and Specs
-----------------------
-
-The `neutron-specs <http://git.openstack.org/cgit/openstack/neutron-specs>`_
-repository is only meant for specs from Neutron itself, and the advanced
-services repositories as well. This includes FWaaS, LBaaS, and VPNaaS. Other
-sub-projects are encouraged to fold their specs into their own devref code
-in their sub-project gerrit repositories. Please see additional commments
-in the Neutron teams `section <http://docs.openstack.org/developer/neutron/policies/neutron-teams.html#neutron-specs-core-reviewer-team>`_
-for reviewer requirements of the neutron-specs repository.
-
-Neutron Request for Feature Enhancements
-----------------------------------------
-
-In Liberty the team introduced the concept of feature requests. Feature
-requests are tracked as Launchpad bugs, tagged with the new 'rfe' tag, and
-allow for the submission and review of these feature requests before code
-is submitted.
-This allows the team to verify the validity of a feature request before the
-process of submitting a neutron-spec is undertaken, or code is written.  It
-also allows the community to express interest in a feature by subscribing to
-the bug and posting a comment in Launchpad. The 'rfe' tag should not be used
-for work that is already well-defined and has an assignee. If you are intending
-to submit code immediately, a simple bug report will suffice. Note the
-temptation to game the system exists, but given the history in Neutron for this
-type of activity, it will not be tolerated and will be called out as such in
-public on the mailing list.
-
-RFEs can be submitted by anyone and by having the community vote on them in
-Launchpad, we can gauge interest in features. The drivers team will evaluate
-these on a weekly basis along with the specs. RFEs will be evaluated in the
-current cycle against existing project priorities and available resources.
-
-The process for moving work from RFEs into the code involves someone assigning
-themselves the RFE bug and filing a matching spec using the slimmed down
-template in the neutron-specs repository. The spec will then be reviewed by the
-community and approved by the drivers team before landing in a release. This is
-the same process as before RFEs existed in Neutron.
-
-The workflow for the life an RFE in Launchpad is as follows:
-
-* The bug is submitted and will by default land in the "New" state.
-* As soon as a member of the neutron-drivers team acknowledges the bug, it will
-  be moved into the "Confirmed" state. No assignee, or milestone is set at this
-  time. The importance will be set to 'Wishlist' to signal the fact that the
-  report is indeed a feature or enhancement and there is no severity associated
-  to it.
-* The bug goes into the "Triaged" state while the discussion is ongoing.
-* The neutron-drivers team will evaluate the RFE and may advise the submitter
-  to file a spec in neutron-specs to elaborate on the feature request, in case
-  the RFE requires extra scrutiny, more design discussion, etc.
-* The PTL will work with the Lieutenant for the area being identified by the
-  RFE to evaluate resources against the current workload.
-* If a spec is necessary, a member of the Neutron release team will register
-  a matching Launchpad blueprint to be used for milestone tracking purposes,
-  and as a landing page for the spec document, as available on `specs.o.o. <http://specs.openstack.org/openstack/neutron-specs/>`_.
-  The blueprint will then be linked to the original RFE bug report. This
-  step will ensure higher visibility of the RFE over the other RFEs
-  and consistency across the various fields required during the blueprint
-  registration process (Approver, Drafter, etc.). More precisely, the
-  blueprint submitter will work with the RFE submitter to identify the
-  following:
-
-  * Priority: there will be only two priorities to choose from, High and Low.
-    It is worth noting that priority is not to be confused with `importance <https://wiki.openstack.org/wiki/Bugs#Importance>`_,
-    which is a property of Launchpad Bugs. Priority gives an indication of
-    how promptly a work item should be tackled to allow it to complete. High
-    priority is to be chosen for work items that must make substantial
-    progress in the span of the targeted release, and deal with the
-    following aspects:
-
-    * OpenStack cross-project interaction and interoperability issues;
-    * Issues that affect the existing system's usability;
-    * Stability and testability of the platform;
-    * Risky implementations that may require complex and/or pervasive
-      changes to API and the logical model;
-
-    Low priority is to be chosen for everything else. RFEs without an associated
-    blueprint are effectively equivalent to low priority items. Bear in mind that,
-    even though staffing should take priorities into account (i.e. by giving more
-    resources to high priority items over low priority ones), the open source
-    reality is that they can both proceed at their own pace and low priority items
-    can indeed complete faster than high priority ones, even though they are
-    given fewer resources.
-
-  * Drafter: who is going to submit and iterate on the spec proposal; he/she
-    may be the RFE submitter.
-  * Assignee: who is going to develop the bulk of the code, or the
-    go-to contributor, if more people are involved. Typically this is
-    the RFE submitter, but not necessarily.
-  * Approver: a member of the Neutron team who can commit enough time
-    during the ongoing release cycle to ensure that code posted for review
-    does not languish, and that all aspects of the feature development are
-    taken care of (client, server changes and/or support from other projects
-    if needed - tempest, nova, openstack-infra, devstack, etc.), as well as
-    comprehensive testing.
-    This is typically a core member who has enough experience with what it
-    takes to get code merged, but other resources amongst the wider team can
-    also be identified. Approvers are volunteers who show a specific interest
-    in the blueprint specification, and have enough insight in the area of
-    work so that they can make effective code reviews and provide design
-    feedback. An approver will not work in isolation, as he/she can and will
-    reach out for help to get the job done; however he/she is the main
-    point of contact with the following responsibilities:
-
-  * Pair up with the drafter/assignee in order to help skip development
-    blockers.
-  * Review patches associated with the blueprint: approver and assignee
-    should touch base regularly and ping each other when new code is
-    available for review, or if review feedback goes unaddressed.
-  * Reach out to other reviewers for feedback in areas that may step
-    out of the zone of her/his confidence.
-  * Escalate issues, and raise warnings to the release team/PTL if the
-    effort shows slow progress. Approver and assignee are key parts to land
-    a blueprint: should the approver and/or assignee be unable to continue
-    the commitment during the release cycle, it is the Approver's
-    responsibility to reach out the release team/PTL so that replacements
-    can be identified.
-  * Provide a status update during the Neutron IRC meeting, if required.
-
-  Approver `assignments <https://blueprints.launchpad.net/neutron/+assignments>`_
-  must be carefully identified to ensure that no-one overcommits. A
-  Neutron contributor develops code himself/herself, and if he/she is an
-  approver of more than a couple of blueprints in a single cycle/milestone
-  (depending on the complexity of the spec), it may mean that he/she is
-  clearly oversubscribed.
-  The Neutron team will review the status of blueprints targeted for the
-  milestone during their weekly meeting to ensure a smooth progression of
-  the work planned. Blueprints for which resources cannot be identified
-  will have to be deferred.
-
-* In either case (a spec being required or not), once the discussion has
-  happened and there is positive consensus on the RFE, the report is 'approved',
-  and its tag will move from 'rfe' to 'rfe-approved'.
-* At this point, the RFE needs resources, and if none are identified for
-  some time the report will be marked incomplete.
-* As for setting the milestone (both for RFE bugs or blueprints), the current
-  milestone is always chosen, assuming that work will start as soon as the feature
-  is approved. Work that fails to complete by the defined milestone will roll
-  over automatically until it gets completed or abandoned.
-* If the code fails to merge, the bug report may be marked as incomplete,
-  unassigned and untargeted, and it will be garbage collected by
-  the Launchpad Janitor if no-one takes over in time. Renewed interest in the
-  feature will have to go through RFE submission process once again.
-
-In summary:
-
-+------------+-----------------------------------------------------------------------------+
-|State       | Meaning                                                                     |
-+============+=============================================================================+
-|New         | This is where all RFE's start, as filed by the community.                   |
-+------------+-----------------------------------------------------------------------------+
-|Incomplete  | Drivers/LTs - Move to this state to mean, "more needed before proceeding"   |
-+------------+-----------------------------------------------------------------------------+
-|Confirmed   | Drivers/LTs - Move to this state to mean, "yeah, I see that you filed it"   |
-+------------+-----------------------------------------------------------------------------+
-|Triaged     | Drivers/LTs - Move to this state to mean, "discussion is ongoing"           |
-+------------+-----------------------------------------------------------------------------+
-|Won't Fix   | Drivers/LTs - Move to this state to reject an RFE.                          |
-+------------+-----------------------------------------------------------------------------+
-
-Once the triaging (discussion is complete) and the RFE is approved, the tag goes from 'rfe'
-to 'rfe-approved', and at this point the bug report goes through the usual state transition.
-Note, that the importance will be set to 'wishlist', to reflect the fact that the bug report
-is indeed not a bug, but a new feature or enhancement. This will also help have RFEs that are
-not followed up by a blueprint standout in the Launchpad `milestone dashboards <https://launchpad.net/neutron/+milestones>`_.
-
-The drivers team will be discussing the following bug reports during their IRC meeting:
-
-* `New RFE's <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=NEW&field.tag=rfe>`_
-* `Incomplete RFE's <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INCOMPLETE&field.tag=rfe>`_
-* `Confirmed RFE's <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=CONFIRMED&field.tag=rfe>`_
-* `Triaged RFE's <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=TRIAGED&field.tag=rfe>`_
-
-
-RFE Submission Guidelines
--------------------------
-
-Before we dive into the guidelines for writing a good RFE, it is worth mentioning
-that depending on your level of engagement with the Neutron project and your role
-(user, developer, deployer, operator, etc.), you are more than welcome to have
-a preliminary discussion of a potential RFE by reaching out to other people involved
-in the project. This usually happens by posting mails on the relevant mailing
-lists (e.g. `openstack-dev <http://lists.openstack.org>`_ - include [neutron] in
-the subject) or on #openstack-neutron IRC channel on Freenode. If current ongoing
-code reviews are related to your feature, posting comments/questions on gerrit
-may also be a way to engage. Some amount of interaction with Neutron developers
-will give you an idea of the plausibility and form of your RFE before you submit
-it. That said, this is not mandatory.
-
-When you submit a bug report on https://bugs.launchpad.net/neutron/+filebug,
-there are two fields that must be filled: 'summary' and 'further information'.
-The 'summary' must be brief enough to fit in one line: if you can't describe it
-in a few words it may mean that you are either trying to capture more than one
-RFE at once, or that you are having a hard time defining what you are trying to
-solve at all.
-
-The 'further information' section must be a description of what you would like
-to see implemented in Neutron. The description should provide enough details for
-a knowledgeable developer to understand what is the existing problem in the
-current platform that needs to be addressed, or what is the enhancement that
-would make the platform more capable, both for a functional and a non-functional
-standpoint. To this aim it is important to describe 'why' you believe the RFE
-should be accepted, and motivate the reason why without it Neutron is a poorer
-platform. The description should be self contained, and no external references
-should be necessary to further explain the RFE.
-
-In other words, when you write an RFE you should ask yourself the following
-questions:
-
-* What is that I (specify what user - a user can be a human or another system)
-  cannot do today when interacting with Neutron? On the other hand, is there a
-  Neutron component X that is unable to accomplish something?
-* Is there something that you would like Neutron handle better, ie. in a more
-  scalable, or in a more reliable way?
-* What is that I would like to see happen after the RFE is accepted and
-  implemented?
-* Why do you think it is important?
-
-Once you are happy with what you wrote, add 'rfe' as tag, and submit. Do not
-worry, we are here to help you get it right! Happy hacking.
-
-
-Missing your target
--------------------
-
-There are occasions when a spec will be approved and the code will not land in
-the cycle it was targeted at. For these cases, the work flow to get the spec
-into the next release is as follows:
-
-* During the RC window, the PTL will create a directory named '<release>' under
-  the 'backlog' directory in the neutron specs repo, and he/she will move all
-  specs that did not make the release to this directory.
-* Anyone can propose a patch to neutron-specs which moves a spec from the
-  previous release into the new release directory.
-
-The specs which are moved in this way can be fast-tracked into the next
-release. Please note that it is required to re-propose the spec for the new
-release.
-
-
-Documentation
--------------
-
-The above process involves two places where any given feature can start to be
-documented - namely in the RFE bug, and in the spec - and in addition to those
-Neutron has a substantial `developer reference guide
-<http://docs.openstack.org/developer/neutron/devref/index.html>`_ (aka
-'devref'), and user-facing docs such as the `networking guide
-<http://docs.openstack.org/networking-guide/>`_.  So it might be asked:
-
-* What is the relationship between all of those?
-
-* What is the point of devref documentation, if everything has already been
-  described in the spec?
-
-The answers have been beautifully expressed in an `openstack-dev post
-<http://lists.openstack.org/pipermail/openstack-dev/2015-December/081458.html>`_:
-
-1. RFE: "I want X"
-2. Spec: "I plan to implement X like this"
-3. devref: "How X is implemented and how to extend it"
-4. OS docs: "API and guide for using X"
-
-Once a feature X has been implemented, we shouldn't have to go to back to its
-RFE bug or spec to find information on it.  The devref may reuse a lot of
-content from the spec, but the spec is not maintained and the implementation
-may differ in some ways from what was intended when the spec was agreed.  The
-devref should be kept current with refactorings, etc., of the implementation.
-
-Devref content should be added as part of the implementation of a new feature.
-Since the spec is not maintained after the feature is implemented, the devref
-should include a maintained version of the information from the spec.
-
-If a feature requires OS docs (4), the commit message for the feature patch
-shall include 'DocImpact'.  If the feature is purely a developer facing thing,
-(4) is not needed.
diff --git a/doc/source/policies/bugs.rst b/doc/source/policies/bugs.rst
deleted file mode 100644 (file)
index 6d45114..0000000
+++ /dev/null
@@ -1,685 +0,0 @@
-Neutron Bugs
-============
-
-Neutron (client, core, FwaaS, LBaaS, VPNaaS) maintains all of its bugs in the following
-Launchpad projects:
-
-* `Launchpad Neutron <https://bugs.launchpad.net/neutron>`_
-* `Launchpad python-neutronclient <https://bugs.launchpad.net/python-neutronclient>`_
-
-
-Neutron Bugs Team In Launchpad
-------------------------------
-
-The `Neutron Bugs <https://launchpad.net/~neutron-bugs>`_ team in Launchpad
-is used to allow access to the projects above. Members of the above group
-have the ability to set bug priorities, target bugs to releases, and other
-administrative tasks around bugs. The administrators of this group are the
-members of the `neutron-drivers-core
-<https://review.openstack.org/#/admin/groups/464,members>`_ gerrit group.
-Non administrators of this group include anyone who is involved with the
-Neutron project and has a desire to assist with bug triage.
-
-If you would like to join this Launchpad group, it's best to reach out to a
-member of the above mentioned neutron-drivers-core team in #openstack-neutron
-on Freenode and let them know why you would like to be a member. The team is
-more than happy to add additional bug triage capability, but it helps to know
-who is requesting access, and IRC is a quick way to make the connection.
-
-As outlined below the bug deputy is a volunteer who wants to help with defect
-management. Permissions will have to be granted assuming that people sign up
-on the deputy role. The permission won't be given freely, a person must show
-some degree of prior involvement.
-
-
-Neutron Bug Deputy
-------------------
-
-Neutron maintains the notion of a "bug deputy". The bug deputy plays an
-important role in the Neutron community. As a large project, Neutron is
-routinely fielding many bug reports. The bug deputy is responsible for
-acting as a "first contact" for these bug reports and performing initial
-screening/triaging. The bug deputy is expected to communicate with the
-various Neutron teams when a bug has been triaged. In addition, the bug
-deputy should be reporting "High" and "Critical" priority bugs.
-
-To avoid burnout, and to give a chance to everyone to gain experience in
-defect management, the Neutron bug deputy is a rotating role. The rotation
-will be set on a period (typically one or two weeks) determined by the team
-during the weekly Neutron IRC meeting and/or according to holidays. During
-the Neutron IRC meeting we will expect a volunteer to step up for the period.
-Members of the Neutron core team are invited to fill in the role,
-however non-core Neutron contributors who are interested are also
-encouraged to take up the role.
-
-This contributor is going to be the bug deputy for the period, and he/she
-will be asked to report to the team during the subsequent IRC meeting. The
-PTL will also work with the team to assess that everyone gets his/her fair
-share at fulfilling this duty. It is reasonable to expect some imbalance
-from time to time, and the team will work together to resolve it to ensure
-that everyone is 100% effective and well rounded in their role as
-_custodian_ of Neutron quality. Should the duty load be too much in busy
-times of the release, the PTL and the team will work together to assess
-whether more than one deputy is necessary in a given period.
-
-The presence of a bug deputy does not mean the rest of the team is simply off
-the hook for the period, in fact the bug deputy will have to actively work
-with the Lieutenants/Drivers, and these should help in getting the bug report
-moving down the resolution pipeline.
-
-During the period a member acts as bug deputy, he/she is expected to watch
-bugs filed against the Neutron projects (as listed above) and do a first
-screening to determine potential severity, tagging, logstash queries, other
-affected projects, affected releases, etc.
-
-From time to time bugs will be filed and auto-assigned by members of the
-core team to get them to a swift resolution. Obviously, the deputy is exempt
-from screening these.
-
-Finally, the PTL will work with the deputy to produce a brief summary of the
-issues of the week to be shared with the larger team during the weekly IRC
-meeting and tracked in the meeting notes.
-
-
-Plugin and Driver Repositories
-------------------------------
-
-Many plugins and drivers have backend code that exists in another repository.
-These repositories may have their own Launchpad projects for bugs.  The teams
-working on the code in these repos assume full responsibility for bug handling
-in those projects. For this reason, bugs whose solution would exist solely in
-the plugin/driver repo should not have Neutron in the affected projects section.
-However, you should add Neutron (Or any other project) to that list only if you
-expect that a patch is needed to that repo in order to solve the bug.
-
-It's also worth adding that some of these projects are part of the so
-called Neutron `stadium <http://governance.openstack.org/reference/projects/neutron.html#deliverables-and-tags>`_.
-Because of that, their release is managed centrally by the Neutron
-release team; requests for releases need to be funnelled and screened
-properly before they can happen. To this aim, the process to request a release
-is as follows:
-
-* Create a bug report to your Launchpad project: provide details as to what
-  you would like to release;
-* Add Neutron to the list of affected projects.
-* Add 'release-subproject' tag to the list of tags for the bug report.
-* The Neutron release management team will watch these bugs, and work with
-  you to have the request fulfilled by following the instructions found `here <http://docs.openstack.org/developer/neutron/devref/sub_project_guidelines.html#sub-project-release-process>`_.
-
-
-.. _guidelines:
-
-Bug Screening Best Practices
-----------------------------
-
-When screening bug reports, the first step for the bug deputy is to assess
-how well written the bug report is, and whether there is enough information
-for anyone else besides the bug submitter to reproduce the bug and come up
-with a fix. There is plenty of information on the `OpenStack wiki <https://wiki.openstack.org/wiki/Bugs>`_
-on how to write a good bug `report <https://wiki.openstack.org/wiki/BugFilingRecommendations>`_
-and to learn how to tell a good bug report from a bad one. Should the bug
-report not adhere to these best practices, the bug deputy's first step
-would be to redirect the submitter to this section, invite him/her to supply
-the missing information, and mark the bug report as 'Incomplete'. For future
-submissions, the reporter can then use the template provided below to ensure
-speedy triaging. Done often enough, this practice should (ideally) ensure that
-in the long run, only 'good' bug reports are going to be filed.
-
-Bug Report Template
-~~~~~~~~~~~~~~~~~~~
-
-The more information you provide, the higher the chance of speedy triaging and
-resolution: identifying the problem is half the solution. To this aim, when
-writing a bug report, please consider supplying the following details and
-following these suggestions:
-
-* Summary (Bug title): keep it small, possibly one line. If you cannot describe
-  the issue in less than 100 characters, you are probably submitting more than
-  one bug at once.
-* Further information (Bug description): conversely from other bug trackers,
-  Launchpad does not provide a structured way of submitting bug-related
-  information, but everything goes in this section. Therefore, you are invited
-  to break down the description in the following fields:
-
-  * High level description: provide a brief sentence (a couple of lines) of
-    what are you trying to accomplish, or would like to accomplish differently;
-    the 'why' is important, but can be omitted if obvious (not to you of course).
-  * Pre-conditions: what is the initial state of your system? Please consider
-    enumerating resources available in the system, if useful in diagnosing
-    the problem. Who are you? A regular tenant or a super-user? Are you
-    describing service-to-service interaction?
-  * Step-by-step reproduction steps: these can be actual neutron client
-    commands or raw API requests; Grab the output if you think it is useful.
-    Please, consider using `paste.o.o <http://paste.openstack.org>`_ for long
-    outputs as Launchpad poorly format the description field, making the
-    reading experience somewhat painful.
-  * Expected output: what did you hope to see? How would you have expected the
-    system to behave? A specific error/success code? The output in a specific
-    format? Or more than a user was supposed to see, or less?
-  * Actual output: did the system silently fail (in this case log traces are
-    useful)? Did you get a different response from what you expected?
-  * Version:
-
-    * OpenStack version (Specific stable branch, or git hash if from trunk);
-    * Linux distro, kernel. For a distro, it's also worth knowing specific
-      versions of client and server, not just major release;
-    * Relevant underlying processes such as openvswitch, iproute etc;
-    * DevStack or other _deployment_ mechanism?
-
-  * Environment: what services are you running (core services like DB and
-    AMQP broker, as well as Nova/hypervisor if it matters), and which type
-    of deployment (clustered servers); if you are running DevStack, is it a
-    single node? Is it multi-node? Are you reporting an issue in your own
-    environment or something you encountered in the OpenStack CI
-    Infrastructure, aka the Gate?
-  * Perceived severity: what would you consider the `importance <https://wiki.openstack.org/wiki/Bugs#Importance>`_
-    to be?
-
-* Tags (Affected component): try to use the existing tags by relying on
-  auto-completion. Please, refrain from creating new ones, if you need
-  new "official" tags_, please reach out to the PTL. If you would like
-  a fix to be backported, please add a backport-potential tag.
-  This does not mean you are gonna get the backport, as the stable team needs
-  to follow the `stable branch policy <https://wiki.openstack.org/wiki/StableBranch#Stable_branch_policy>`_
-  for merging fixes to stable branches.
-* Attachments: consider attaching logs, truncated log snippets are rarely
-  useful. Be proactive, and consider attaching redacted configuration files
-  if you can, as that will speed up the resolution process greatly.
-
-
-Bug Triage Process
-~~~~~~~~~~~~~~~~~~
-
-The process of bug triaging consists of the following steps:
-
-* Check if a bug was filed for a correct component (project). If not, either
-  change the project or mark it as "Invalid".
-* Check if a similar bug was filed before. Rely on your memory if Launchpad
-  is not clever enough to spot a duplicate upon submission. If so, mark it
-  as a duplicate of the previous bug.
-* Check if the bug meets the requirements of a good bug report, by checking
-  that the guidelines_ are being followed. Omitted information is still
-  acceptable if the issue is clear nonetheless; use your good judgement and
-  your experience. Consult another core member/PTL if in doubt. If the bug
-  report needs some love, mark the bug as 'Incomplete', point the submitter
-  to this document and hope he/she turns around quickly with the missing
-  information.
-
-If the bug report is sound, move next:
-
-* Revise tags as recommended by the submitter. Ensure they are 'official'
-  tags.
-* Depending on ease of reproduction (or if the issue can be spotted in the
-  code), mark it as 'Confirmed'. If you are unable to assess/triage the
-  issue because you do not have access to a repro environment, consider
-  reaching out the Lieutenant, go-to person for the affected component;
-  he/she may be able to help: assign the bug to him/her for further
-  screening. If the bug already has an assignee, check that a patch is
-  in progress. Sometimes more than one patch is required to address an
-  issue, make sure that there is at least one patch that 'Closes' the bug
-  or document/question what it takes to mark the bug as fixed.
-* If the bug is the result of a misuse of the system, mark the bug either
-  as 'Won't fix', or 'Opinion' if you are still on the fence and need
-  other people's input.
-* Assign the importance after reviewing the proposed severity. Bugs that
-  obviously break core and widely used functionality should get assigned as
-  "High" or "Critical" importance. The same applies to bugs that were filed
-  for gate failures.
-* Choose a milestone, if you can. Targeted bugs are especially important
-  close to the end of the release.
-* (Optional). Add comments explaining the issue and possible strategy of
-  fixing/working around the bug. Also, as good as some are at adding all
-  thoughts to bugs, it is still helpful to share the in-progress items
-  that might not be captured in a bug description or during our weekly
-  meeting. In order to provide some guidance and reduce ramp up time as
-  we rotate, tagging bugs with 'needs-attention' can be useful to quickly
-  identify what reports need further screening/eyes on.
-
-You are done! Iterate.
-
-
-Bug Expiration Policy and Bug Squashing
----------------------------------------
-
-More can be found at this `Launchpad page <https://help.launchpad.net/BugExpiry>`_.
-In a nutshell, in order to make a bug report expire automatically, it needs to be
-unassigned, untargeted, and marked as Incomplete.
-
-The OpenStack community has had `Bug Days <https://wiki.openstack.org/wiki/BugDays>`_
-but they have not been wildly successful. In order to keep the list of open bugs set
-to a manageable number (more like <100+, rather than closer to 1000+), at the end of
-each release (in feature freeze and/or during less busy times), the PTL with the
-help of team will go through the list of open (namely new, opinion, in progress,
-confirmed, triaged) bugs, and do a major sweep to have the Launchpad Janitor pick
-them up. This gives 60 days grace period to reporters/assignees to come back and
-revive the bug. Assuming that at regime, bugs are properly reported, acknowledged
-and fix-proposed, losing unaddressed issues is not going to be a major issue,
-but brief stats will be collected to assess how the team is doing over time.
-
-
-.. _tags:
-
-Tagging Bugs
-------------
-
-Launchpad's Bug Tracker allows you to create ad-hoc groups of bugs with tagging.
-
-In the Neutron team, we have a list of agreed tags that we may apply to bugs
-reported against various aspects of Neutron itself. The list of approved tags
-used to be available on the `wiki <https://wiki.openstack.org/wiki/Bug_Tags#Neutron>`_,
-however the section has been moved here, to improve collaborative editing, and
-keep the information more current. By using a standard set of tags, each
-explained on this page, we can avoid confusion. A bug report can have more than
-one tag at any given time.
-
-Proposing New Tags
-~~~~~~~~~~~~~~~~~~
-
-New tags, or changes in the meaning of existing tags (or deletion), are to be
-proposed via patch to this section. After discussion, and approval, a member of
-the bug team will create/delete the tag in Launchpad. Each tag covers an area
-with an identified go-to contact or `Lieutenant <http://docs.openstack.org/developer/neutron/policies/core-reviewers.html#core-review-hierarchy>`_,
-who can provide further insight. Bug queries are provided below for convenience,
-more will be added over time if needed.
-
-+-------------------------------+---------------------------------------+----------------------+
-| Tag                           | Description                           | Contact              |
-+===============================+=======================================+======================+
-| access-control_               | A bug affecting RBAC and policy.json  | Kevin Benton         |
-+-------------------------------+---------------------------------------+----------------------+
-| api_                          | A bug affecting the API layer         | Salvatore Orlando    |
-+-------------------------------+---------------------------------------+----------------------+
-| baremetal_                    | A bug affecting Ironic support        | Sukhdev Kapur        |
-+-------------------------------+---------------------------------------+----------------------+
-| db_                           | A bug affecting the DB layer          | Henry Gessau         |
-+-------------------------------+---------------------------------------+----------------------+
-| dns_                          | A bug affecting DNS integration       | Miguel Lavalle       |
-+-------------------------------+---------------------------------------+----------------------+
-| doc_                          | A bug affecting in-tree doc           | Edgar Magana         |
-+-------------------------------+---------------------------------------+----------------------+
-| fullstack_                    | A bug in the fullstack subtree        | Assaf Muller         |
-+-------------------------------+---------------------------------------+----------------------+
-| functional-tests_             | A bug in the functional tests subtree | Assaf Muller         |
-+-------------------------------+---------------------------------------+----------------------+
-| fwaas_                        | A bug affecting neutron-fwass         | Sean Collins         |
-+-------------------------------+---------------------------------------+----------------------+
-| gate-failure_                 | A bug affecting gate stability        | Armando Migliaccio   |
-+-------------------------------+---------------------------------------+----------------------+
-| ipv6_                         | A bug affecting IPv6 support          | Henry Gessau         |
-+-------------------------------+---------------------------------------+----------------------+
-| l2-pop_                       | A bug in L2 Population mech driver    | Kevin Benton         |
-+-------------------------------+---------------------------------------+----------------------+
-| l3-dvr-backlog_               | A bug affecting distributed routing   | Ryan Moats           |
-+-------------------------------+---------------------------------------+----------------------+
-| l3-ha_                        | A bug affecting L3 HA (vrrp)          | Assaf Muller         |
-+-------------------------------+---------------------------------------+----------------------+
-| l3-ipam-dhcp_                 | A bug affecting L3/DHCP/metadata      | Miguel Lavalle       |
-+-------------------------------+---------------------------------------+----------------------+
-| lbaas_                        | A bug affecting neutron-lbaas         | Brandon Logan        |
-+-------------------------------+---------------------------------------+----------------------+
-| linuxbridge_                  | A bug affecting ML2/linuxbridge       | Sean Collins         |
-+-------------------------------+---------------------------------------+----------------------+
-| loadimpact_                   | Performance penalty/improvements      | Ryan Moats           |
-+-------------------------------+---------------------------------------+----------------------+
-| logging_                      | An issue with logging guidelines      | Matt Riedemann       |
-+-------------------------------+---------------------------------------+----------------------+
-| low-hanging-fruit_            | Starter bugs for new contributors     | N/A                  |
-+-------------------------------+---------------------------------------+----------------------+
-| metering_                     | A bug affecting the metering layer    | ?                    |
-+-------------------------------+---------------------------------------+----------------------+
-| needs-attention_              | A bug that needs further screening    | PTL/Bug Deputy       |
-+-------------------------------+---------------------------------------+----------------------+
-| opnfv_                        | Reported by/affecting OPNFV initiative| Drivers team         |
-+-------------------------------+---------------------------------------+----------------------+
-| ops_                          | Reported by or affecting operators    | Drivers Team         |
-+-------------------------------+---------------------------------------+----------------------+
-| oslo_                         | An interop/cross-project issue        | Ihar Hrachyshka      |
-+-------------------------------+---------------------------------------+----------------------+
-| ovs_                          | A bug affecting ML2/OVS               | Kevin Benton         |
-+-------------------------------+---------------------------------------+----------------------+
-| ovs-lib_                      | A bug affecting OVS Lib               | Terry Wilson         |
-+-------------------------------+---------------------------------------+----------------------+
-| py34_                         | Issues affecting the Python 3 porting | Cedric Brandily      |
-+-------------------------------+---------------------------------------+----------------------+
-| qos_                          | A bug affecting ML2/QoS               | Miguel Ajo           |
-+-------------------------------+---------------------------------------+----------------------+
-| released-neutronclient_       | A bug affecting released clients      | Kyle Mestery         |
-+-------------------------------+---------------------------------------+----------------------+
-| release-subproject_           | A request to release a subproject     | Kyle Mestery         |
-+-------------------------------+---------------------------------------+----------------------+
-| rfe_                          | Feature enhancements being screened   | Drivers Team         |
-+-------------------------------+---------------------------------------+----------------------+
-| rfe-approved_                 | Approved feature enhancements         | Drivers Team         |
-+-------------------------------+---------------------------------------+----------------------+
-| sg-fw_                        | A bug affecting security groups       | Kevin Benton         |
-+-------------------------------+---------------------------------------+----------------------+
-| sriov-pci-pt_                 | A bug affecting Sriov/PCI PassThrough | Moshe Levi           |
-+-------------------------------+---------------------------------------+----------------------+
-| troubleshooting_              | An issue affecting ease of debugging  | Assaf Muller         |
-+-------------------------------+---------------------------------------+----------------------+
-| unittest_                     | A bug affecting the unit test subtree | Cedric Brandily      |
-+-------------------------------+---------------------------------------+----------------------+
-| usability_                    | UX, interoperability, feature parity  | PTL/Drivers Team     |
-+-------------------------------+---------------------------------------+----------------------+
-| vpnaas_                       | A bug affecting neutron-vpnaas        | Paul Michali         |
-+-------------------------------+---------------------------------------+----------------------+
-| xxx-backport-potential_       | Cherry-pick request for stable team   | Ihar Hrachyshka      |
-+-------------------------------+---------------------------------------+----------------------+
-
-.. _access-control:
-
-Access Control
-++++++++++++++
-
-* `Access Control - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=access-control>`_
-* `Access Control - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=access-control>`_
-
-.. _api:
-
-API
-+++
-
-* `API - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=api>`_
-* `API - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=api>`_
-
-.. _baremetal:
-
-Baremetal
-+++++++++
-
-* `Baremetal - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=baremetal>`_
-* `Baremetal - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=baremetal>`_
-
-.. _db:
-
-DB
-++
-
-* `DB - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=db>`_
-* `DB - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=db>`_
-
-.. _dns:
-
-DNS
-+++
-
-* `DNS - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=dns>`_
-* `DNS - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=dns>`_
-
-.. _doc:
-
-DOC
-+++
-
-* `DOC - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=doc>`_
-* `DOC - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=doc>`_
-
-.. _fullstack:
-
-Fullstack
-+++++++++
-
-* `Fullstack - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=fullstack>`_
-* `Fullstack - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=fullstack>`_
-
-.. _functional-tests:
-
-Functional Tests
-++++++++++++++++
-
-* `Functional tests - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=functional-tests>`_
-* `Functional tests - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=functional-tests>`_
-
-.. _fwaas:
-
-FWAAS
-+++++
-
-* `FWaaS - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=fwaas>`_
-* `FWaaS - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=fwaas>`_
-
-.. _gate-failure:
-
-Gate Failure
-++++++++++++
-
-* `Gate failure - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=gate-failure>`_
-* `Gate failure - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=gate-failure>`_
-
-.. _ipv6:
-
-IPV6
-++++
-
-* `IPv6 - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=ipv6>`_
-* `IPv6 - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=ipv6>`_
-
-.. _l2-pop:
-
-L2 Population
-+++++++++++++
-
-* `L2 Pop - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=l2-pop>`_
-* `L2 Pop - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=l2-pop>`_
-
-.. _l3-dvr-backlog:
-
-L3 DVR Backlog
-++++++++++++++
-
-* `L3 DVR - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=l3-dvr-backlog>`_
-* `L3 DVR - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=l3-dvr-backlog>`_
-
-.. _l3-ha:
-
-L3 HA
-+++++
-
-* `L3 HA - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=l3-ha>`_
-* `L3 HA - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=l3-ha>`_
-
-.. _l3-ipam-dhcp:
-
-L3 IPAM DHCP
-++++++++++++
-
-* `L3 IPAM DHCP - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=l3-ipam-dhcp>`_
-* `L3 IPAM DHCP - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=l3-ipam-dhcp>`_
-
-.. _lbaas:
-
-LBAAS
-+++++
-
-* `LBaaS - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=lbaas>`_
-* `LBaaS - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=lbaas>`_
-
-.. _linuxbridge:
-
-LinuxBridge
-+++++++++++
-
-* `LinuxBridge - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=linuxbridge>`_
-* `LinuxBridge - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=linuxbridge>`_
-
-.. _loadimpact:
-
-Load Impact
-+++++++++++
-
-* `Load Impact - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=loadimpact>`_
-* `Load Impact - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=loadimpact>`_
-
-.. _logging:
-
-Logging
-+++++++
-
-* `Logging - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=logging>`_
-* `Logging - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=logging>`_
-
-.. _low-hanging-fruit:
-
-Low hanging fruit
-+++++++++++++++++
-
-* `Low hanging fruit - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=low-hanging-fruit>`_
-* `Low hanging fruit - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=low-hanging-fruit>`_
-
-.. _metering:
-
-Metering
-++++++++
-
-* `Metering - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=metering>`_
-* `Metering - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=metering>`_
-
-.. _needs-attention:
-
-Needs Attention
-+++++++++++++++
-
-* `Needs Attention - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=needs-attention>`_
-
-.. _opnfv:
-
-OPNFV
-+++++
-
-* `OPNFV - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=opnfv>`_
-
-.. _ops:
-
-Operators/Operations (ops)
-++++++++++++++++++++++++++
-
-* `Ops - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=ops>`_
-
-.. _oslo:
-
-OSLO
-++++
-
-* `Oslo - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=oslo>`_
-* `Oslo - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=oslo>`_
-
-.. _ovs:
-
-OVS
-+++
-
-* `OVS - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=ovs>`_
-* `OVS - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=ovs>`_
-
-.. _ovs-lib:
-
-OVS Lib
-+++++++
-
-* `OVS Lib - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=ovs-lib>`_
-* `OVS Lib - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=ovs-lib>`_
-
-.. _py34:
-
-PY34
-++++
-
-* `Py34 - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=py34>`_
-* `Py34 - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=py34>`_
-
-.. _qos:
-
-QoS
-+++
-
-* `QoS - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=qos>`_
-* `QoS - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=qos>`_
-
-.. _released-neutronclient:
-
-Released Neutron Client
-+++++++++++++++++++++++
-
-* `Released Neutron Client - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=released-neutronclient>`_
-* `Released Neutron Client - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=released-neutronclient>`_
-
-.. _release-subproject:
-
-Release Subproject
-++++++++++++++++++
-
-* `Release Subproject - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=release-subproject>`_
-* `Release Subproject - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=release-subproject>`_
-
-.. _rfe:
-
-RFE
-+++
-
-* `RFE - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=rfe>`_
-* `RFE - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=rfe>`_
-
-.. _rfe-approved:
-
-RFE-Approved
-++++++++++++
-
-* `RFE-Approved - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=rfe-approved>`_
-* `RFE-Approved - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=rfe-approved>`_
-
-.. _sriov-pci-pt:
-
-SRIOV-PCI PASSTHROUGH
-+++++++++++++++++++++
-
-* `SRIOV/PCI-PT - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=sriov-pci-pt>`_
-* `SRIOV/PCI-PT - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=sriov-pci-pt>`_
-
-.. _sg-fw:
-
-SG-FW
-+++++
-
-* `Security groups - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=sg-fw>`_
-* `Security groups - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=sg-fw>`_
-
-.. _troubleshooting:
-
-Troubleshooting
-+++++++++++++++
-
-* `Troubleshooting - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=Troubleshooting>`_
-* `Troubleshooting - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=Troubleshooting>`_
-
-.. _unittest:
-
-Unit test
-+++++++++
-
-* `Unit test - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=unittest>`_
-* `Unit test - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=unittest>`_
-
-.. _usability:
-
-Usability
-+++++++++
-
-* `UX - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=usability>`_
-* `UX - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=usability>`_
-
-.. _vpnaas:
-
-VPNAAS
-++++++
-
-* `VPNaaS - All bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=vpnaas>`_
-* `VPNaaS - In progress <https://bugs.launchpad.net/neutron/+bugs?field.status%3Alist=INPROGRESS&field.tag=vpnaas>`_
-
-.. _xxx-backport-potential:
-
-Backport/RC potential
-+++++++++++++++++++++
-
-* `All Liberty bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=liberty-backport-potential>`_
-* `All Kilo bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=kilo-backport-potential>`_
-* `All Juno bugs <https://bugs.launchpad.net/neutron/+bugs?field.tag=juno-backport-potential>`_
diff --git a/doc/source/policies/code-reviews.rst b/doc/source/policies/code-reviews.rst
deleted file mode 100644 (file)
index bdc102c..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-Neutron Code Reviews
-====================
-
-Code reviews are a critical component of all OpenStack projects. Neutron accepts patches from many
-diverse people with diverse backgrounds, employers, and experience levels. Code reviews provide a
-way to enforce a level of consistency across the project, and also allow for the careful on boarding
-of contributions from new contributors.
-
-Neutron Code Review Practices
------------------------------
-Neutron follows the `code review guidelines <https://wiki.openstack.org/wiki/ReviewChecklist>`_ as
-set forth for all OpenStack projects. It is expected that all reviewers are following the guidelines
-set forth on that page.
-
-Neutron Spec Review Practices
------------------------------
-In addition to code reviews, Neutron also maintains a BP specification git repository. Detailed
-instructions for the use of this repository are provided `here <https://wiki.openstack.org/wiki/Blueprints>`_.
-It is expected that Neutron core team members are actively reviewing specifications which are pushed out
-for review to the specification repository. In addition, there is a neutron-drivers team, composed of a
-handful of Neutron core reviewers, who can approve and merge Neutron specs.
-
-Some guidelines around this process are provided below:
-
-* Once a specification has been pushed, it is expected that it will not be approved for at least 3 days
-  after a first Neutron core reviewer has reviewed it. This allows for additional cores to review the
-  specification.
-* For blueprints which the core team deems of High or Critical importance, core reviewers may be assigned
-  based on their subject matter expertise.
-* Specification priority will be set by the PTL with review by the core team once the specification is
-  approved.
-
-Tracking Review Statistics
---------------------------
-Stackalytics provides some nice interfaces to track review statistics. The links are provided below. These
-statistics are used to track not only Neutron core reviewer statistics, but also to track review statistics
-for potential future core members.
-
-* `30 day review stats <http://stackalytics.com/report/contribution/neutron-group/30>`_
-* `60 day review stats <http://stackalytics.com/report/contribution/neutron-group/60>`_
-* `90 day review stats <http://stackalytics.com/report/contribution/neutron-group/90>`_
-* `180 day review stats <http://stackalytics.com/report/contribution/neutron-group/180>`_
diff --git a/doc/source/policies/contributor-onboarding.rst b/doc/source/policies/contributor-onboarding.rst
deleted file mode 100644 (file)
index 6c792bb..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-Contributor Onboarding
-======================
-
-For new contributors, the following are useful onboarding information.
-
-Contributing to Neutron
------------------------
-
-Work within Neutron is discussed on the openstack-dev mailing list, as well as in the
-#openstack-neutron IRC channel. While these are great channels for engaging Neutron,
-the bulk of discussion of patches and code happens in gerrit itself.
-
-With regards to gerrit, code reviews are a great way to learn about the project. There
-is also a list of `low or wishlist <https://bugs.launchpad.net/neutron/+bugs?field.searchtext=&orderby=-importance&field.status%3Alist=NEW&field.status%3Alist=CONFIRMED&field.status%3Alist=TRIAGED&field.status%3Alist=INPROGRESS&field.status%3Alist=FIXCOMMITTED&field.status%3Alist=INCOMPLETE_WITH_RESPONSE&field.status%3Alist=INCOMPLETE_WITHOUT_RESPONSE&field.importance%3Alist=LOW&field.importance%3Alist=WISHLIST&assignee_option=any&field.assignee=&field.bug_reporter=&field.bug_commenter=&field.subscriber=&field.structural_subscriber=&field.tag=&field.tags_combinator=ANY&field.has_cve.used=&field.omit_dupes.used=&field.omit_dupes=on&field.affects_me.used=&field.has_patch.used=&field.has_branches.used=&field.has_branches=on&field.has_no_branches.used=&field.has_no_branches=on&field.has_blueprints.used=&field.has_blueprints=on&field.has_no_blueprints.used=&field.has_no_blueprints=on&search=Search>`_ priority bugs which are ideal for a new contributor to take
-on. If you haven't done so you should setup a Neutron development environment so you
-can actually run the code. Devstack is the usual convenient environment to setup such
-an environment. See `devstack.org <http://devstack.org/>`_ or `NeutronDevstack <https://wiki.openstack.org/wiki/NeutronDevstack#Basic_Setup>`_
-for more information on using Neutron with devstack.
-
-Helping with documentation can also be a useful first step for a newcomer. `Here <https://bugs.launchpad.net/openstack-manuals/+bugs?field.tag=neutron>`_
-is a list of documentation bugs that are tagged with 'neutron'; bug reports are created
-here for neutron reviews with a 'DocImpact' in the commit message.
-
-IRC Information and Etiquette
------------------------------
-
-The main IRC channel for Neutron is #openstack-neutron. We also utilize #openstack-lbaas
-for LBaaS specific discussions. The weekly meeting is documented in the `list of meetings <https://wiki.openstack.org/wiki/Meetings#OpenStack_Networking_.28Neutron.29>`_ wiki page.
diff --git a/doc/source/policies/gate-failure-triage.rst b/doc/source/policies/gate-failure-triage.rst
deleted file mode 100644 (file)
index db757b0..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-Neutron Gate Failure Triage
-===========================
-
-This page provides guidelines for spotting and assessing neutron gate failures. Some hints for triaging
-failures are also provided.
-
-Spotting Gate Failures
-----------------------
-This can be achieved using several tools:
-
-* `Joe Gordon's github.io pages <http://jogo.github.io/gate/>`_
-* `logstash <http://logstash.openstack.org/>`_
-
-Even though Joe's script is not an "official" OpenStack page it provides a quick snapshot of the current
-status for the most important jobs This page is built using data available at graphite.openstack.org.
-If you want to check how that is done go `here <https://github.com/jogo/jogo.github.io/tree/master/gate>`_
-(caveat: the color of the neutron job is very similar to that of the full job with nova-network).
-
-For checking gate failures with logstash the following query will return failures for a specific job:
-
-> build_status:FAILURE AND message:Finished  AND build_name:"check-tempest-dsvm-neutron" AND build_queue:"gate"
-
-And divided by the total number of jobs executed:
-
-> message:Finished  AND build_name:"check-tempest-dsvm-neutron" AND build_queue:"gate"
-
-It will return the failure rate in the selected period for a given job. It is important to remark that
-failures in the check queue might be misleading as the problem causing the failure is most of the time in
-the patch being checked. Therefore it is always advisable to work on failures occurred in the gate queue.
-However, these failures are a precious resource for assessing frequency and determining root cause of
-failures which manifest in the gate queue.
-
-The step above will provide a quick outlook of where things stand. When the failure rate raises above 10% for
-a job in 24 hours, it's time to be on alert. 25% is amber alert. 33% is red alert. Anything above 50% means
-that probably somebody from the infra team has already a contract out on you. Whether you are relaxed, in
-alert mode, or freaking out because you see a red dot on your chest, it is always a good idea to check on
-daily bases the elastic-recheck pages.
-
-Under the `gate pipeline <http://status.openstack.org/elastic-recheck/gate.html>`_ tab, you can see gate
-failure rates for already known bugs. The bugs in this page are ordered by decreasing failure rates (for the
-past 24 hours). If one of the bugs affecting Neutron is among those on top of that list, you should check
-that the corresponding bug is already assigned and somebody is working on it. If not, and there is not a good
-reason for that, it should be ensured somebody gets a crack at it as soon as possible. The other part of the
-story is to check for `uncategorized <http://status.openstack.org/elastic-recheck/data/uncategorized.html>`_
-failures. This is where failures for new (unknown) gate breaking bugs end up; on the other hand also infra
-error causing job failures end up here. It should be duty of the diligent Neutron developer to ensure the
-classification rate for neutron jobs is as close as possible to 100%. To this aim, the diligent Neutron
-developer should adopt the following procedure:
-
-1. Open logs for failed jobs and look for logs/testr_results.html.gz.
-2. If that file is missing, check console.html and see where the job failed.
-    1. If there is a failure in devstack-gate-cleanup-host.txt it's likely to be an infra issue.
-    2. If the failure is in devstacklog.txt it could a devstack, neutron, or infra issue.
-3. However, most of the time the failure is in one of the tempest tests. Take note of the error message and go to
-   logstash.
-4. On logstash, search for occurrences of this error message, and try to identify the root cause for the failure
-   (see below).
-5. File a bug for this failure, and push a elastic-recheck query for it (see below).
-6. If you are confident with the area of this bug, and you have time, assign it to yourself; otherwise look for an
-    assignee or talk to the Neutron's bug czar to find an assignee.
-
-Root Causing a Gate Failure
----------------------------
-Time-based identification, i.e. find the naughty patch by log scavenging.
-
-Filing An Elastic Recheck Query
--------------------------------
-The `elastic recheck <http://status.openstack.org/elastic-recheck/>`_ page has all the current open ER queries.
-To file one, please see the `ER Wiki <https://wiki.openstack.org/wiki/ElasticRecheck>`_.
diff --git a/doc/source/policies/index.rst b/doc/source/policies/index.rst
deleted file mode 100644 (file)
index b849baa..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-..
-      Copyright 2014 Hewlett-Packard Development Company, L.P.
-
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-Neutron Policies
-================
-
-In the Policies Guide, you will find documented policies for developing with
-Neutron. This includes the processes we use for blueprints and specs, bugs,
-contributor onboarding, core reviewer memberships, and other procedural
-items.
-
-.. toctree::
-   :maxdepth: 3
-
-   blueprints
-   bugs
-   contributor-onboarding
-   neutron-teams
-   gate-failure-triage
-   code-reviews
-   thirdparty-ci
-   office-hours
diff --git a/doc/source/policies/neutron-teams.rst b/doc/source/policies/neutron-teams.rst
deleted file mode 100644 (file)
index fe5daf2..0000000
+++ /dev/null
@@ -1,357 +0,0 @@
-Neutron Core Reviewers
-======================
-
-The `Neutron Core Reviewer Team <https://review.openstack.org/#/admin/groups/38,members>`_
-is responsible for many things related to Neutron. A lot of these things include mundane
-tasks such as the following:
-
-* Ensuring the bug count is low
-* Curating the gate and triaging failures
-* Working on integrating shared code from projects such as Oslo
-* Ensuring documentation is up to date and remains relevant
-* Ensuring the level of testing for Neutron is adequate and remains relevant
-  as features are added
-* Helping new contributors with questions as they peel back the covers of
-  Neutron
-* Answering questions and participating in mailing list discussions
-* Interfacing with other OpenStack teams and ensuring they are going in the
-  same parallel direction
-* Reviewing and merging code into the neutron tree
-
-In essence, core reviewers share the following common ideals:
-
-1. They share responsibility in the project's success.
-2. They have made a long-term, recurring time investment to improve the
-   project.
-3. They spend their time doing what needs to be done to ensure the projects
-   success, not necessarily what is the most interesting or fun.
-
-A core reviewer's responsibility doesn't end up with merging code. The above
-lists are adding context around these responsibilities.
-
-Core Review Hierarchy
----------------------
-
-As Neutron has grown in complexity, it has become impossible for any one
-person to know enough to merge changes across the entire codebase. Areas of
-expertise have developed organically, and it is not uncommon for existing
-cores to defer to these experts when changes are proposed. Existing cores
-should be aware of the implications when they do merge changes outside the
-scope of their knowledge. It is with this in mind we propose a new system
-built around Lieutenants through a model of trust.
-
-In order to scale development and responsibility in Neutron, we have adopted
-a Lieutenant system. The PTL is the leader of the Neutron project, and
-ultimately responsible for decisions made in the project. The PTL has
-designated Lieutenants in place to help run portions of the Neutron project.
-The Lieutenants are in charge of their own areas, and they can propose core
-reviewers for their areas as well. The core reviewer addition and removal
-polices are in place below. The Lieutenants for each system, while responsible
-for their area, ultimately report to the PTL. The PTL may opt to have regular
-one on one meetings with the lieutenants. The PTL will resolve disputes in
-the project that arise between areas of focus, core reviewers, and other
-projects. Please note Lieutenants should be leading their own area of focus,
-not doing all the work themselves.
-
-As was mentioned in the previous section, a core's responsibilities do not
-end with merging code. They are responsible for bug triage and gate issues
-among other things. Lieutenants have an increased responsibility to ensure
-gate and bug triage for their area of focus is under control.
-
-The following are the current Neutron Lieutenants.
-
-+------------------------+---------------------------+----------------------+
-| Area                   | Lieutenant                | IRC nick             |
-+========================+===========================+======================+
-| API and DB             | Akihiro Motoki            | amotoki              |
-|                        +---------------------------+----------------------+
-|                        | Henry Gessau              | HenryG               |
-+------------------------+---------------------------+----------------------+
-| Built-In Control Plane | Kevin Benton              | kevinbenton          |
-+------------------------+---------------------------+----------------------+
-| Client                 | Akihiro Motoki            | amotoki              |
-+------------------------+---------------------------+----------------------+
-| Docs                   | Edgar Magana              | emagana              |
-+------------------------+---------------------------+----------------------+
-| Infra                  | Armando Migliaccio        | armax                |
-|                        +---------------------------+----------------------+
-|                        | Doug Wiegley              | dougwig              |
-+------------------------+---------------------------+----------------------+
-| L3                     | Carl Baldwin              | carl_baldwin         |
-+------------------------+---------------------------+----------------------+
-| Services               | Doug Wiegley              | dougwig              |
-+------------------------+---------------------------+----------------------+
-| Testing                | Assaf Muller              | amuller              |
-+------------------------+---------------------------+----------------------+
-
-Some notes on the above:
-
-* "Built-In Control Plane" means the L2 agents, DHCP agents, SGs, metadata
-  agents and ML2.
-* The client includes commands installed server side.
-* L3 includes the L3 agent, DVR, and IPAM.
-* Services includes FWaaS, LBaaS, and VPNaaS.
-* Note these areas may change as the project evolves due to code refactoring,
-  new feature areas, and libification of certain pieces of code.
-* Infra means interactions with infra from a neutron perspective
-
-Neutron also consists of several plugins, drivers, and agents that are developed
-effectively as sub-projects within Neutron in their own git repositories.
-Lieutenants are also named for these sub-projects to identify a clear point of
-contact and leader for that area.  The Lieutenant is also responsible for
-updating the core review team for the sub-project's repositories.
-
-+------------------------+---------------------------+----------------------+
-| Area                   | Lieutenant                | IRC nick             |
-+========================+===========================+======================+
-| dragonflow             | Eran Gampel               | gampel               |
-|                        +---------------------------+----------------------+
-|                        | Gal Sagie                 | gsagie               |
-+------------------------+---------------------------+----------------------+
-| kuryr                  | Antoni Segura Puimedon    | apuimedo             |
-|                        +---------------------------+----------------------+
-|                        | Gal Sagie                 | gsagie               |
-+------------------------+---------------------------+----------------------+
-| networking-bgpvpn      | Mathieu Rohon             | matrohon             |
-|                        +---------------------------+----------------------+
-|                        | Thomas Morin              | tmorin               |
-+------------------------+---------------------------+----------------------+
-| networking-calico      | Neil Jerram               | neiljerram           |
-+------------------------+---------------------------+----------------------+
-| networking-infoblox    | John Belamaric            | johnbelamaric        |
-+------------------------+---------------------------+----------------------+
-| networking-l2gw        | Sukhdev Kapur             | sukhdev              |
-+------------------------+---------------------------+----------------------+
-| networking-midonet     | Ryu Ishimoto              | ryu25                |
-|                        +---------------------------+----------------------+
-|                        | Jaume Devesa              | devvesa              |
-|                        +---------------------------+----------------------+
-|                        | YAMAMOTO Takashi          | yamamoto             |
-+------------------------+---------------------------+----------------------+
-| networking-odl         | Flavio Fernandes          | flaviof              |
-|                        +---------------------------+----------------------+
-|                        | Kyle Mestery              | mestery              |
-+------------------------+---------------------------+----------------------+
-| networking-ofagent     | YAMAMOTO Takashi          | yamamoto             |
-+------------------------+---------------------------+----------------------+
-| networking-onos        | Vikram Choudhary          | vikram               |
-|                        +---------------------------+----------------------+
-|                        | Albert Dongfeng           | albert_dongfeng      |
-+------------------------+---------------------------+----------------------+
-| networking-ovn         | Russell Bryant            | russellb             |
-+------------------------+---------------------------+----------------------+
-| networking-plumgrid    | Fawad Khaliq              | fawadkhaliq          |
-+------------------------+---------------------------+----------------------+
-| networking-sfc         | Cathy Zhang               | cathy                |
-+------------------------+---------------------------+----------------------+
-| networking-vshpere     | Vivekanandan Narasimhan   | viveknarasimhan      |
-+------------------------+---------------------------+----------------------+
-| octavia                | German Eichberger         | xgerman              |
-+------------------------+---------------------------+----------------------+
-| vmware-nsx             | Gary Kotton               | garyk                |
-+------------------------+---------------------------+----------------------+
-
-Existing Core Reviewers
------------------------
-
-Existing core reviewers have been reviewing code for a varying degree of
-cycles. With the new plan of Lieutenants and ownership, it's fair to try to
-understand how they fit into the new model. Existing core reviewers seem
-to mostly focus in particular areas and are cognizant of their own strengths
-and weaknesses. These members may not be experts in all areas, but know their
-limits, and will not exceed those limits when reviewing changes outside their
-area of expertise. The model is built on trust, and when that trust is broken,
-responsibilities will be taken away.
-
-Lieutenant Responsibilities
----------------------------
-
-In the hierarchy of Neutron responsibilities, Lieutenants are expected to
-partake in the following additional activities compared to other core
-reviewers:
-
-* Ensuring feature requests for their areas have adequate testing and
-  documentation coverage.
-* Gate triage and resolution. Lieutenants are expected to work to keep the
-  Neutron gate running smoothly by triaging issues, filing elastic recheck
-  queries, and closing gate bugs.
-* Triaging bugs for the specific areas.
-
-Neutron Teams
-=============
-
-Given all of the above, Neutron has the following core reviewer teams with
-responsibility over the areas of code listed below:
-
-Neutron Core Reviewer Team
---------------------------
-`Neutron core reviewers <https://review.openstack.org/#/admin/groups/38,members>`_ have
-merge rights to the following git repositories:
-
-* `openstack/neutron <https://git.openstack.org/cgit/openstack/neutron/>`_
-* `openstack/python-neutronclient <https://git.openstack.org/cgit/openstack/python-neutronclient/>`_
-
-Please note that as we adopt to the system above with core specialty in
-particular areas, we expect this broad core team to shrink as people naturally
-evolve into an area of specialization.
-
-Neutron FWaaS Core Reviewer Team
---------------------------------
-Neutron `FWaaS core reviewers <https://review.openstack.org/#/admin/groups/500,members>`_
-have merge rights to the following git repositories:
-
-* `openstack/neutron-fwaas <https://git.openstack.org/cgit/openstack/neutron-fwaas/>`_
-
-Neutron LBaaS Core Reviewer Team
---------------------------------
-Neutron `LBaaS core reviewers <https://review.openstack.org/#/admin/groups/501,members>`_
-have merge rights to the following git repositories:
-
-* `openstack/neutron-lbaas <https://git.openstack.org/cgit/openstack/neutron-lbaas/>`_
-
-Neutron VPNaaS Core Reviewer Team
----------------------------------
-Neutron `VPNaaS core reviewers <https://review.openstack.org/#/admin/groups/502,members>`_
-have merge rights to the following git repositories:
-
-* `openstack/neutron-vpnaas <https://git.openstack.org/cgit/openstack/neutron-vpnaas/>`_
-
-Neutron Core Reviewer Teams for Plugins and Drivers
----------------------------------------------------
-The plugin decomposition effort has led to having many drivers with code in
-separate repositories with their own core reviewer teams. For each one of
-these repositories in the following repository list, there is a core team
-associated with it:
-
-* `Neutron project team <http://governance.openstack.org/reference/projects/neutron.html>`_
-
-These teams are also responsible for handling their own specs/RFEs/features if
-they choose to use them.  However, by choosing to be a part of the Neutron
-project, they submit to oversight and veto by the Neutron PTL if any issues
-arise.
-
-Neutron Specs Core Reviewer Team
---------------------------------
-Neutron `specs core reviewers <https://review.openstack.org/#/admin/groups/314,members>`_
-have +2 rights to the following git repositories:
-
-* `openstack/neutron-specs <https://git.openstack.org/cgit/openstack/neutron-specs/>`_
-
-The Neutron specs core reviewer team is responsible for reviewing specs targeted to
-all Neutron git repositories (Neutron + Advanced Services). It is worth noting that
-specs reviewers have the following attributes which are potentially different than
-code reviewers:
-
-* Broad understanding of cloud and networking technologies
-* Broad understanding of core OpenStack projects and technologies
-* An understanding of the effect approved specs have on the teams development
-  capacity for each cycle
-
-Specs core reviewers may match core members of the above mentioned groups, but
-the group can be extended to other individuals, if required.
-
-Drivers Team
-------------
-
-The `drivers team <https://review.openstack.org/#/admin/groups/464,members>`_ is
-the group of people who have full rights to the specs repo. This team, which matches
-`Launchpad Neutron Drivers team <https://launchpad.net/~neutron-drivers>`_, is
-instituted to ensure a consistent architectural vision for the Neutron project, and
-to continue to disaggregate and share the responsibilities of the Neutron PTL.
-The team is in charge of reviewing and commenting on
-`RFEs <http://docs.openstack.org/developer/neutron/policies/blueprints.html#neutron-request-for-feature-enhancements>`_,
-and working with specification contributors to provide guidance on the process
-that govern contributions to the Neutron project as a whole. The team
-`meets regularly <https://wiki.openstack.org/wiki/Meetings/NeutronDrivers>`_
-to go over RFE's and discuss the project roadmap. Anyone is welcome to join
-and/or read the meeting notes.
-
-Release Team
-------------
-
-The `release team <https://review.openstack.org/#/admin/groups/150,members>`_ is
-a group of people with some additional gerrit permissions primarily aimed at
-allowing release management of Neutron sub-projects.  These permissions include:
-
-* Ability to push signed tags to sub-projects whose releases are managed by the
-  Neutron release team as opposed to the OpenStack release team.
-* Ability to push merge commits for Neutron or other sub-projects.
-* Ability to approve changes in all Neutron git repositories.  This is required
-  as the team needs to be able to quickly unblock things if needed, especially
-  at release time.
-
-Code Merge Responsibilities
-===========================
-
-While everyone is encouraged to review changes for these repositories, members
-of the Neutron core reviewer group have the ability to +2/-2 and +A changes to
-these repositories. This is an extra level of responsibility not to be taken
-lightly. Correctly merging code requires not only understanding the code
-itself, but also how the code affects things like documentation, testing, and
-interactions with other projects. It also means you pay attention to release
-milestones and understand if a patch you're merging is marked for the release,
-especially critical during the feature freeze.
-
-The bottom line here is merging code is a responsibility Neutron core reviewers
-have.
-
-Adding or Removing Core Reviewers
----------------------------------
-
-A new Neutron core reviewer may be proposed at anytime on the openstack-dev
-mailing list. Typically, the Lieutenant for a given area will propose a new
-core reviewer for their specific area of coverage, though the Neutron PTL may
-propose new core reviewers as well. The proposal is typically made after
-discussions with existing core reviewers. Once a proposal has been made,
-three existing Neutron core reviewers from the Lieutenant's area of focus must
-respond to the email with a +1. If the member is being added by a Lieutenant
-from an area of focus with less than three members, a simple majority will be
-used to determine if the vote is successful. Another Neutron core reviewer
-from the same area of focus can vote -1 to veto the proposed new core
-reviewer. The PTL will mediate all disputes for core reviewer additions.
-
-The PTL may remove a Neutron core reviewer at any time. Typically when a
-member has decreased their involvement with the project through a drop in
-reviews and participation in general project development, the PTL will propose
-their removal and remove them. Please note there is no voting or vetoing of
-core reviewer removal. Members who have previously been a core reviewer may be
-fast-tracked back into a core reviewer role if their involvement picks back up
-and the existing core reviewers support their re-instatement.
-
-Neutron Core Reviewer Membership Expectations
----------------------------------------------
-
-Neutron core reviewers have the following expectations:
-
-* Reasonable attendance at the weekly Neutron IRC meetings.
-* Participation in Neutron discussions on the mailing list, as well as
-   in-channel in #openstack-neutron.
-* Participation in Neutron related design summit sessions at the OpenStack
-  Summits.
-
-Please note in-person attendance at design summits, mid-cycles, and other code
-sprints is not a requirement to be a Neutron core reviewer. The Neutron team
-will do its best to facilitate virtual attendance at all events. Travel is not
-to be taken lightly, and we realize the costs involved for those who partake
-in attending these events.
-
-In addition to the above, code reviews are the most important requirement of
-Neutron core reviewers. Neutron follows the documented OpenStack `code review
-guidelines <https://wiki.openstack.org/wiki/ReviewChecklist>`_. We encourage
-all people to review Neutron patches, but core reviewers are required to
-maintain a level of review numbers relatively close to other core reviewers.
-There are no hard statistics around code review numbers, but in general we
-use 30, 60, 90 and 180 day stats when examining review stats.
-
-* `30 day review stats <http://stackalytics.com/report/contribution/neutron-group/30>`_
-* `60 day review stats <http://stackalytics.com/report/contribution/neutron-group/60>`_
-* `90 day review stats <http://stackalytics.com/report/contribution/neutron-group/90>`_
-* `180 day review stats <http://stackalytics.com/report/contribution/neutron-group/180>`_
-
-There are soft-touch items around being a Neutron core reviewer as well.
-Gaining trust with the existing Neutron core reviewers is important. Being
-able to work together with the existing Neutron core reviewer team is
-critical as well. Being a Neutron core reviewer means spending a significant
-amount of time with the existing Neutron core reviewers team on IRC, the
-mailing list, at Summits, and in reviews. Ensuring you participate and engage
-here is critical to becoming and remaining a core reviewer.
diff --git a/doc/source/policies/office-hours.rst b/doc/source/policies/office-hours.rst
deleted file mode 100644 (file)
index 4771e36..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-Neutron PTL Office Hours
-------------------------
-
-Neutron has evolved into a platform. As part of the broader `Big Tent <http://superuser.openstack.org/articles/openstack-as-layers-but-also-a-big-tent-but-also-a-bunch-of-cats>`_ initiative, Neutron has also opened it's doors to the Neutron "Stadium"
-effort.
-
-This, combined with the new `Lieutenant System <http://docs.openstack.org/developer/neutron/policies/core-reviewers.html#core-review-hierarchy>`_
-means the PTL is now responsible for leading an increasingly large and
-diverse group of contributors. To ensure weekly syncs between the PTL and
-the Lieutenants, as well as to allow for projects under the Neutron Stadium
-to have a sync point with the PTL, the project is setting up office hours
-in the #openstack-neutron-release IRC channel. The PTL will use these
-office hours to allow for questions and syncing with Lieutenants or anyone
-else .
-
-The current office hours can be seen on the `OpenStack eavesdrop page <http://eavesdrop.openstack.org/#Neutron_PTL_Office_Hours>`_.
-Please note the #openstack-neutron-release channel is logged to allow the
-consumption of these discussion by those who cannot make the times above.
diff --git a/doc/source/policies/thirdparty-ci.rst b/doc/source/policies/thirdparty-ci.rst
deleted file mode 100644 (file)
index 637d364..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-Neutron Third-party CI
-======================
-
-What Is Expected of Third Party CI System for Neutron
------------------------------------------------------
-
-As of the Liberty summit, Neutron no longer *requires* a third-party CI,
-but it is strongly encouraged, as internal neutron refactoring can break
-external plugins and drivers at any time.
-
-Neutron expects any Third Party CI system that interacts with gerrit to
-follow the requirements set by the Infrastructure team [1]_ as well as the
-Neutron Third Party CI guidelines below. Please ping the PTL in
-#openstack-neutron or send an email to the openstack-dev ML (with subject
-[neutron]) with any questions. Be aware that the Infrastructure documentation
-as well as this document are living documents and undergo changes. Track
-changes to the infrastructure documentation using this url [2]_ (and please
-review the patches) and check this doc on a regular basis for updates.
-
-What Changes to Run Against
----------------------------
-
-If your code is a neutron plugin or driver, you should run against every
-neutron change submitted, except for docs, tests, tools, and top-level
-setup files. You can skip your CI runs for such exceptions by using
-``skip-if`` and ``all-files-match-any`` directives in Zuul.
-You can see a programmatic example of the exceptions here [3]_.
-
-If your code is in a neutron-\*aas repo, you should run against the tests
-for that repo. You may also run against every neutron change, if your service
-driver is using neutron interfaces that are not provided by your service
-plugin (e.g. loadbalancer/plugin.py). If you are using only plugin interfaces,
-it should be safe to test against only the service repo tests.
-
-What Tests To Run
------------------
-
-Network API tests (git link).
-Network scenario tests (The test_network_* tests here).
-Any tests written specifically for your setup.
-http://git.openstack.org/cgit/openstack/tempest/tree/tempest/api/network
-
-Run with the test filter: 'network'. This will include all neutron specific
-tests as well as any other tests that are tagged as requiring networking. An
-example tempest setup for devstack-gate::
-
-   export DEVSTACK_GATE_NEUTRON=1
-   export DEVSTACK_GATE_TEMPEST_REGEX='(?!.*\[.*\bslow\b.*\])((network)|(neutron))'
-
-An example setup for LBaaS::
-
-   export DEVSTACK_GATE_NEUTRON=1
-   export DEVSTACK_GATE_TEMPEST_REGEX='(?!.*\[.*\bslow\b.*\])(alancer|SimpleReadOnlyNeutron|tempest.api.network)'
-
-Third Party CI Voting
----------------------
-
-The Neutron team encourages you to NOT vote -1 with a third-party CI. False
-negatives are noisy to the community, and have given -1 from third-party
-CIs a bad reputation. Really bad, to the point of people ignoring them all.
-Failure messages are useful to those doing refactors, and provide you
-feedback on the state of your plugin.
-
-If you insist on voting, by default, the infra team will not allow voting
-by new 3rd party CI systems. The way to get your 3rd party CI system to vote
-is to talk with the Neutron PTL, who will let infra know the system is ready
-to vote. The requirements for a new system to be given voting rights are as
-follows:
-
-* A new system must be up and running for a month, with a track record of
-  voting on the sandbox system.
-* A new system must correctly run and pass tests on patches for the third
-  party driver/plugin for a month.
-* A new system must have a logfile setup and retention setup similar to the
-  below.
-
-Once the system has been running for a month, the owner of the third party CI
-system can contact the Neutron PTL to have a conversation about getting voting
-rights upstream.
-
-The general process to get these voting rights is outlined here. Please follow
-that, taking note of the guidelines Neutron also places on voting for it's CI
-systems.
-
-A third party system can have it's voting rights removed as well. If the
-system becomes unstable (stops running, voting, or start providing inaccurate
-results), the Neutron PTL or any core reviewer will make an attempt to contact
-the owner and copy the openstack-dev mailing list. If no response is received
-within 2 days, the Neutron PTL will remove voting rights for the third party
-CI system. If a response is received, the owner will work to correct the
-issue. If the issue cannot be addressed in a reasonable amount of time, the
-voting rights will be temporarily removed.
-
-Log & Test Results Filesystem Layout
-------------------------------------
-
-Third-Party CI systems MUST provide logs and configuration data to help
-developers troubleshoot test failures. A third-party CI that DOES NOT post
-logs should be a candidate for removal, and new CI systems MUST post logs
-before they can be awarded voting privileges.
-
-Third party CI systems should follow the filesystem layout convention of the
-OpenStack CI system. Please store your logs as viewable in a web browser, in
-a directory structure. Requiring the user to download a giant tarball is not
-acceptable, and will be reason to not allow your system to vote from the
-start, or cancel it's voting rights if this changes while the system is
-running.
-
-At the root of the results - there should be the following:
-
-* console.html.gz - contains the output of stdout of the test run
-* local.conf / localrc - contains the setup used for this run
-* logs - contains the output of detail test log of the test run
-
-The above "logs" must be a directory, which contains the following:
-
-* Log files for each screen session that DevStack creates and launches an
-  OpenStack component in
-* Test result files
-* testr_results.html.gz
-* tempest.txt.gz
-
-List of existing plugins and drivers
-------------------------------------
-
-https://wiki.openstack.org/wiki/Neutron_Plugins_and_Drivers#Existing_Plugin_and_Drivers
-
-References
-----------
-
-.. [1] http://ci.openstack.org/third_party.html
-.. [2] https://review.openstack.org/#/q/status:open+project:openstack-infra/system-config+branch:master+topic:third-party,n,z
-.. [3] https://github.com/openstack-infra/project-config/blob/master/zuul/layout.yaml
diff --git a/doc/source/stadium/index.rst b/doc/source/stadium/index.rst
deleted file mode 100644 (file)
index e17648c..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-..
-      Copyright 2014 Hewlett-Packard Development Company, L.P.
-
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-Neutron Stadium
-================
-
-The Stadium Guide contains information on policies and procedures for the
-Neutron Stadium.
-
-.. toctree::
-   :maxdepth: 3
-
-   sub_projects
-   sub_project_guidelines
diff --git a/doc/source/stadium/sub_project_guidelines.rst b/doc/source/stadium/sub_project_guidelines.rst
deleted file mode 100644 (file)
index 79abf69..0000000
+++ /dev/null
@@ -1,180 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Sub-Project Guidelines
-======================
-
-This document provides guidance for those who maintain projects that consume
-main neutron or neutron advanced services repositories as a dependency. It is
-not meant to describe projects that are not tightly coupled with Neutron code.
-
-Code Reuse
-----------
-
-At all times, avoid using any Neutron symbols that are explicitly marked as
-private (those have an underscore at the start of their names).
-
-Oslo Incubator
-~~~~~~~~~~~~~~
-
-Don't ever reuse neutron code that comes from oslo-incubator in your
-subprojects. For neutron repository, the code is usually located under the
-following path: neutron.openstack.common.*
-
-If you need any oslo-incubator code in your repository, copy it into your
-repository from oslo-incubator and then use it from there.
-
-Neutron team does not maintain any backwards compatibility strategy for the
-code subtree and can break anyone who relies on it at any time.
-
-Requirements
-------------
-
-Neutron dependency
-~~~~~~~~~~~~~~~~~~
-
-Subprojects usually depend on neutron repositories, by using -e git://...
-schema to define such a dependency. The dependency *must not* be present in
-requirements lists though, and instead belongs to tox.ini deps section. This is
-because next pbr library releases do not guarantee -e git://... dependencies
-will work.
-
-You may still put some versioned neutron dependency in your requirements list
-to indicate the dependency for anyone who packages your subproject.
-
-Explicit dependencies
-~~~~~~~~~~~~~~~~~~~~~
-
-Each neutron project maintains its own lists of requirements. Subprojects that
-depend on neutron while directly using some of those libraries that neutron
-maintains as its dependencies must not rely on the fact that neutron will pull
-the needed dependencies for them. Direct library usage requires that this
-library is mentioned in requirements lists of the subproject.
-
-The reason to duplicate those dependencies is that neutron team does not stick
-to any backwards compatibility strategy in regards to requirements lists, and
-is free to drop any of those dependencies at any time, breaking anyone who
-could rely on those libraries to be pulled by neutron itself.
-
-Automated requirements updates
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-At all times, subprojects that use neutron as a dependency should make sure
-their dependencies do not conflict with neutron's ones.
-
-Core neutron projects maintain their requirements lists by utilizing a
-so-called proposal bot. To keep your subproject in sync with neutron, it is
-highly recommended that you register your project in
-openstack/requirements:projects.txt file to enable the bot to update
-requirements for you.
-
-Once a subproject opts in global requirements synchronization, it should enable
-check-requirements jobs in project-config. For example, see `this patch
-<https://review.openstack.org/#/c/215671/>`_.
-
-Stable branches
----------------
-
-Stable branches for subprojects should be created at the same time when
-corresponding neutron stable branches are created. This is to avoid situations
-when a postponed cut-off results in a stable branch that contains some patches
-that belong to the next release. This would require reverting patches, and this
-is something you should avoid.
-
-Make sure your neutron dependency uses corresponding stable branch for neutron,
-not master.
-
-Note that to keep requirements in sync with core neutron repositories in stable
-branches, you should make sure that your project is registered in
-openstack/requirements:projects.txt *for the branch in question*.
-
-Subproject stable branches are supervised by horizontal `neutron-stable-maint
-team <https://review.openstack.org/#/admin/groups/539,members>`_.
-
-More info on stable branch process can be found on `the following page
-<https://wiki.openstack.org/wiki/StableBranch>`_.
-
-Stable merge requirements
--------------------------
-
-Merges into stable branches are handled by members of the `neutron-stable-maint
-gerrit group <https://review.openstack.org/#/admin/groups/539,members>`_. The
-reason for this is to ensure consistency among stable branches, and compliance
-with policies for stable backports.
-
-For sub-projects who participate in the Neutron Stadium effort and who also
-create and utilize stable branches, there is an expectation around what is
-allowed to be merged in these stable branches. The Stadium projects should be
-following the stable branch policies as defined by on the `Stable Branch wiki
-<https://wiki.openstack.org/wiki/StableBranch#Stable_branch_policy>`_. This
-means that, among other things, no features are allowed to be backported into
-stable branches.
-
-Releases
---------
-
-It is suggested that sub-projects release new tarballs on PyPI from time to
-time, especially for stable branches. It will make the life of packagers and
-other consumers of your code easier.
-
-It is highly suggested that you do not strip pieces of the source tree (tests,
-executables, tools) before releasing on PyPI: those missing pieces may be
-needed to validate the package, or make the packaging easier or more complete.
-As a rule of thumb, don't strip anything from the source tree unless completely
-needed.
-
-Sub-Project Release Process
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Only members of the `neutron-release
-<https://review.openstack.org/#/admin/groups/150,members>`_ gerrit group can do
-releases. The same group can create stable branches. Make sure you talk to
-a member of neutron-release to perform your release.
-
-To release a sub-project, follow the following steps:
-
-* First, follow the process found `here <http://docs.openstack.org/developer/neutron/policies/bugs.html#plugin-and-driver-repositories>`_
-  for creating a bug for your release and/or stable branch creation.
-* For projects which have not moved to post-versioning, we need to push an
-  alpha tag to avoid pbr complaining. A member of the neutron-release group
-  will handle this.
-* A sub-project owner should modify setup.cfg to remove the version (if you
-  have one), which moves your project to post-versioning, similar to all the
-  other Neutron projects. You can skip this step if you don't have a version in
-  setup.cfg.
-* A member of neutron-release will then `tag the release
-  <http://docs.openstack.org/infra/manual/drivers.html#tagging-a-release>`_,
-  which will release the code to PyPI.
-* The releases will now be on PyPI. A sub-project owner should verify this by
-  going to an URL similar to
-  `this <https://pypi.python.org/pypi/networking-odl>`_.
-* A sub-project owner should next go to Launchpad and release this version
-  using the "Release Now" button for the release itself.
-* A sub-project owner should update any bugs that were fixed with this
-  release to "Fix Released" in Launchpad.
-* A sub-project owner should add the tarball to the Launchpad page for the
-  release using the "Add download file" link.
-* A sub-project owner should add the next milestone to the Launchpad series, or
-  if a new series is required, create the new series and a new milestone.
-* Finally a sub-project owner should send an email to the openstack-announce
-  mailing list announcing the new release.
diff --git a/doc/source/stadium/sub_projects.rst b/doc/source/stadium/sub_projects.rst
deleted file mode 100644 (file)
index 6707cd5..0000000
+++ /dev/null
@@ -1,431 +0,0 @@
-..
-      Licensed under the Apache License, Version 2.0 (the "License"); you may
-      not use this file except in compliance with the License. You may obtain
-      a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-      License for the specific language governing permissions and limitations
-      under the License.
-
-
-      Convention for heading levels in Neutron devref:
-      =======  Heading 0 (reserved for the title in a document)
-      -------  Heading 1
-      ~~~~~~~  Heading 2
-      +++++++  Heading 3
-      '''''''  Heading 4
-      (Avoid deeper levels because they do not render well.)
-
-
-Official Sub-Projects
-=====================
-
-Neutron has a set of official sub-projects.  These projects are recognized as a
-part of the overall Neutron project.
-
-Inclusion Process
------------------
-
-The process for proposing a repo into openstack/ and under the Neutron
-project is to propose a patch to the openstack/governance repository.
-For example, to propose networking-foo, one would add the following entry
-under Neutron in reference/projects.yaml::
-
-    - repo: openstack/networking-foo
-      tags:
-        - name: release:independent
-
-For more information about the release:independent tag (and other
-currently defined tags) see:
-
-    http://governance.openstack.org/reference/tags/
-
-The Neutron PTL must approve the change.  The TC clarified that once a
-project has been approved (Neutron in this case), the project can add
-additional repos without needing TC approval as long as the added
-repositories are within the existing approved scope of the project.
-
-    http://git.openstack.org/cgit/openstack/governance/commit/?id=321a020cbcaada01976478ea9f677ebb4df7bd6d
-
-In order to create a project, in case it does not exist, follow steps
-as explained in:
-
-    http://docs.openstack.org/infra/manual/creators.html
-
-Responsibilities
-----------------
-
-All affected repositories already have their own review teams.  The
-sub-team working on the sub-project is entirely responsible for
-day-to-day development.  That includes reviews, bug tracking, and
-working on testing.
-
-By being included, the project accepts oversight by the TC as a part of
-being in OpenStack, and also accepts oversight by the Neutron PTL.
-
-It is also assumed the respective review teams will make sure their projects
-stay in line with `current best practices <sub_project_guidelines.html>`_.
-
-Inclusion Criteria
-------------------
-
-As mentioned before, the Neutron PTL must approve the inclusion of each
-additional repository under the Neutron project.  That evaluation will be
-primarily based on the new project requirements used for all new OpenStack
-projects for the criteria that is applicable:
-
-    http://governance.openstack.org/reference/new-projects-requirements.html
-
-Official Sub-Project List
--------------------------
-
-The official source of all repositories that exist under the Neutron project is:
-
-    http://governance.openstack.org/reference/projects/neutron.html
-
-Affiliated projects
-~~~~~~~~~~~~~~~~~~~
-
-This table shows the affiliated projects that integrate with Neutron,
-in one form or another.  These projects typically leverage the pluggable
-capabilities of Neutron, the Neutron API, or a combination of both.
-This list may contain projects that are already listed in the governance
-repo but are summarized here to describe the functionality they provide.
-
-+-------------------------------+-----------------------+
-| Name                          |    Functionality      |
-+===============================+=======================+
-| dragonflow_                   |           l3          |
-+-------------------------------+-----------------------+
-| kuryr_                        |         docker        |
-+-------------------------------+-----------------------+
-| networking-ale-omniswitch_    |          ml2          |
-+-------------------------------+-----------------------+
-| networking-arista_            |         ml2,l3        |
-+-------------------------------+-----------------------+
-| networking-bagpipe-l2_        |          ml2          |
-+-------------------------------+-----------------------+
-| networking-bgpvpn_            |          vpn          |
-+-------------------------------+-----------------------+
-| networking-bigswitch_         |      ml2,core,l3      |
-+-------------------------------+-----------------------+
-| networking-brocade_           |        ml2,l3         |
-+-------------------------------+-----------------------+
-| networking-calico_            |          ml2          |
-+-------------------------------+-----------------------+
-| networking-cisco_             |  core,ml2,l3,fw,vpn   |
-+-------------------------------+-----------------------+
-| networking-edge-vpn_          |          vpn          |
-+-------------------------------+-----------------------+
-| networking-fujitsu_           |          ml2          |
-+-------------------------------+-----------------------+
-| networking-hyperv_            |          ml2          |
-+-------------------------------+-----------------------+
-| networking-infoblox_          |         ipam          |
-+-------------------------------+-----------------------+
-| networking-l2gw_              |         l2            |
-+-------------------------------+-----------------------+
-| networking-midonet_           |  core,ml2,l3,lb,fw    |
-+-------------------------------+-----------------------+
-| networking-mlnx_              |          ml2          |
-+-------------------------------+-----------------------+
-| networking-nec_               |         core          |
-+-------------------------------+-----------------------+
-| nuage-openstack-neutron_      |         core          |
-+-------------------------------+-----------------------+
-| networking-odl_               |      ml2,l3,lb,fw     |
-+-------------------------------+-----------------------+
-| networking-ofagent_           |          ml2          |
-+-------------------------------+-----------------------+
-| networking-onos_              |        ml2,l3         |
-+-------------------------------+-----------------------+
-| networking-ovn_               |          ml2          |
-+-------------------------------+-----------------------+
-| networking-ovs-dpdk_          |          ml2          |
-+-------------------------------+-----------------------+
-| networking-plumgrid_          |          core         |
-+-------------------------------+-----------------------+
-| networking-powervm_           |          ml2          |
-+-------------------------------+-----------------------+
-| networking-sfc_               |  service composition  |
-+-------------------------------+-----------------------+
-| networking-vsphere_           |          ml2          |
-+-------------------------------+-----------------------+
-| vmware-nsx_                   |          core         |
-+-------------------------------+-----------------------+
-| octavia_                      |          lb           |
-+-------------------------------+-----------------------+
-
-Functionality legend
-++++++++++++++++++++
-
-- l2: a Layer 2 service;
-- ml2: an ML2 mechanism driver;
-- core: a monolithic plugin that can implement API at multiple layers L3-L7;
-- l3: a Layer 3 service plugin;
-- fw: a Firewall service plugin;
-- vpn: a VPN service plugin;
-- lb: a Load Balancer service plugin;
-- intent: a service plugin that provides a declarative API to realize networking;
-- docker: a Docker network plugin that uses Neutron to provide networking services to Docker containers;
-- ipam: an IP address management driver;
-
-.. _networking-ale-omniswitch:
-
-ALE Omniswitch
-++++++++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-ale-omniswitch
-* Launchpad: https://launchpad.net/networking-ale-omniswitch
-* Pypi: https://pypi.python.org/pypi/networking-ale-omniswitch
-
-.. _networking-arista:
-
-Arista
-++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-arista
-* Launchpad: https://launchpad.net/networking-arista
-* Pypi: https://pypi.python.org/pypi/networking-arista
-
-.. _networking-bagpipe-l2:
-
-BaGPipe
-+++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-bagpipe-l2
-* Launchpad: https://launchpad.net/bagpipe-l2
-* Pypi: https://pypi.python.org/pypi/bagpipe-l2
-
-.. _networking-bgpvpn:
-
-BGPVPN
-++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-bgpvpn
-
-.. _networking-bigswitch:
-
-Big Switch Networks
-+++++++++++++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-bigswitch
-* Pypi: https://pypi.python.org/pypi/bsnstacklib
-
-.. _networking-brocade:
-
-Brocade
-+++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-brocade
-* Launchpad: https://launchpad.net/networking-brocade
-* PyPI: https://pypi.python.org/pypi/networking-brocade
-
-.. _networking-calico:
-
-Calico
-++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-calico
-* Launchpad: https://launchpad.net/networking-calico
-* PyPI: https://pypi.python.org/pypi/networking-calico
-
-.. _networking-cisco:
-
-Cisco
-+++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-cisco
-* Launchpad: https://launchpad.net/networking-cisco
-* PyPI: https://pypi.python.org/pypi/networking-cisco
-
-.. _dragonflow:
-
-DragonFlow
-++++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/dragonflow
-* Launchpad: https://launchpad.net/dragonflow
-* PyPI: https://pypi.python.org/pypi/DragonFlow
-
-.. _networking-edge-vpn:
-
-Edge VPN
-++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-edge-vpn
-* Launchpad: https://launchpad.net/edge-vpn
-
-.. _networking-fujitsu:
-
-FUJITSU
-+++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-fujitsu
-* Launchpad: https://launchpad.net/networking-fujitsu
-* PyPI: https://pypi.python.org/pypi/networking-fujitsu
-
-.. _networking-hyperv:
-
-Hyper-V
-+++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-hyperv
-* Launchpad: https://launchpad.net/networking-hyperv
-* PyPI: https://pypi.python.org/pypi/networking-hyperv
-
-.. _networking-infoblox:
-
-Infoblox
-++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-infoblox
-* Launchpad: https://launchpad.net/networking-infoblox
-* PyPI: https://pypi.python.org/pypi/networking-infoblox
-
-.. _kuryr:
-
-Kuryr
-+++++
-
-* Git: https://git.openstack.org/cgit/openstack/kuryr/
-* Launchpad: https://launchpad.net/kuryr
-* PyPI: https://pypi.python.org/pypi/kuryr/
-
-.. _networking-l2gw:
-
-L2 Gateway
-++++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-l2gw
-* Launchpad: https://launchpad.net/networking-l2gw
-
-.. _networking-midonet:
-
-MidoNet
-+++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-midonet
-* Launchpad: https://launchpad.net/networking-midonet
-* PyPI: https://pypi.python.org/pypi/networking-midonet
-
-.. _networking-mlnx:
-
-Mellanox
-++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-mlnx
-* Launchpad: https://launchpad.net/networking-mlnx
-
-.. _networking-nec:
-
-NEC
-+++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-nec
-* Launchpad: https://launchpad.net/networking-nec
-* PyPI: https://pypi.python.org/pypi/networking-nec
-
-.. _nuage-openstack-neutron:
-
-Nuage
-+++++
-
-* Git: https://github.com/nuagenetworks/nuage-openstack-neutron
-
-.. _networking-odl:
-
-OpenDayLight
-++++++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-odl
-* Launchpad: https://launchpad.net/networking-odl
-
-.. _networking-ofagent:
-
-OpenFlow Agent (ofagent)
-++++++++++++++++++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-ofagent
-* Launchpad: https://launchpad.net/networking-ofagent
-* PyPI: https://pypi.python.org/pypi/networking-ofagent
-
-.. _networking-onos:
-
-Open Network Operating System (onos)
-++++++++++++++++++++++++++++++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-onos
-* Launchpad: https://launchpad.net/networking-onos
-* PyPI: https://pypi.python.org/pypi/networking-onos
-
-.. _networking-ovn:
-
-Open Virtual Network
-++++++++++++++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-ovn
-* Launchpad: https://launchpad.net/networking-ovn
-* PyPI: https://pypi.python.org/pypi/networking-ovn
-
-.. _networking-ovs-dpdk:
-
-Open DPDK
-+++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-ovs-dpdk
-* Launchpad: https://launchpad.net/networking-ovs-dpdk
-
-.. _networking-plumgrid:
-
-PLUMgrid
-++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-plumgrid
-* Launchpad: https://launchpad.net/networking-plumgrid
-* PyPI: https://pypi.python.org/pypi/networking-plumgrid
-
-.. _networking-powervm:
-
-PowerVM
-+++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-powervm
-* Launchpad: https://launchpad.net/networking-powervm
-* PyPI: https://pypi.python.org/pypi/networking-powervm
-
-.. _networking-sfc:
-
-SFC
-+++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-sfc
-
-.. _networking-vsphere:
-
-vSphere
-+++++++
-
-* Git: https://git.openstack.org/cgit/openstack/networking-vsphere
-* Launchpad: https://launchpad.net/networking-vsphere
-
-.. _vmware-nsx:
-
-VMware NSX
-++++++++++
-
-* Git: https://git.openstack.org/cgit/openstack/vmware-nsx
-* Launchpad: https://launchpad.net/vmware-nsx
-* PyPI: https://pypi.python.org/pypi/vmware-nsx
-
-.. _octavia:
-
-Octavia
-+++++++
-
-* Git: https://git.openstack.org/cgit/openstack/octavia
-* Launchpad: https://launchpad.net/octavia
diff --git a/etc/README.txt b/etc/README.txt
deleted file mode 100644 (file)
index 40690ee..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-To generate the sample neutron configuration files, run the following
-command from the top level of the neutron directory:
-
-tox -e genconfig
-
-If a 'tox' environment is unavailable, then you can run the following script
-instead to generate the configuration files:
-
-./tools/generate_config_file_samples.sh
diff --git a/etc/api-paste.ini b/etc/api-paste.ini
deleted file mode 100644 (file)
index 4884fe3..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-[composite:neutron]
-use = egg:Paste#urlmap
-/: neutronversions
-/v2.0: neutronapi_v2_0
-
-[composite:neutronapi_v2_0]
-use = call:neutron.auth:pipeline_factory
-noauth = cors request_id catch_errors extensions neutronapiapp_v2_0
-keystone = cors request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0
-
-[filter:request_id]
-paste.filter_factory = oslo_middleware:RequestId.factory
-
-[filter:catch_errors]
-paste.filter_factory = oslo_middleware:CatchErrors.factory
-
-[filter:cors]
-paste.filter_factory = oslo_middleware.cors:filter_factory
-oslo_config_project = neutron
-latent_allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID
-latent_expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID
-latent_allow_methods = GET, PUT, POST, DELETE, PATCH
-
-[filter:keystonecontext]
-paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-
-[filter:extensions]
-paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
-
-[app:neutronversions]
-paste.app_factory = neutron.api.versions:Versions.factory
-
-[app:neutronapiapp_v2_0]
-paste.app_factory = neutron.api.v2.router:APIRouter.factory
diff --git a/etc/neutron/plugins/cisco/cisco_vpn_agent.ini b/etc/neutron/plugins/cisco/cisco_vpn_agent.ini
deleted file mode 100644 (file)
index 0aee17e..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-[cisco_csr_ipsec]
-# Status check interval in seconds, for VPNaaS IPSec connections used on CSR
-# status_check_interval = 60
-
-# Cisco CSR management port information for REST access used by VPNaaS
-# TODO(pcm): Remove once CSR is integrated in as a Neutron router.
-#
-# Format is:
-# [cisco_csr_rest:<public IP>]
-# rest_mgmt = <mgmt port IP>
-# tunnel_ip = <tunnel IP>
-# username = <user>
-# password = <password>
-# timeout = <timeout>
-# host = <hostname>
-# tunnel_if = <tunnel I/F>
-#
-# where:
-#   public IP ----- Public IP address of router used with a VPN service (1:1 with CSR)
-#   tunnel IP ----- Public IP address of the CSR used for the IPSec tunnel
-#   mgmt port IP -- IP address of CSR for REST API access
-#   user ---------- Username for REST management port access to Cisco CSR
-#   password ------ Password for REST management port access to Cisco CSR
-#   timeout ------- REST request timeout to Cisco CSR (optional)
-#   hostname ------ Name of host where CSR is running as a VM
-#   tunnel I/F ---- CSR port name used for tunnels' IP address
diff --git a/etc/neutron/plugins/ml2/.placeholder b/etc/neutron/plugins/ml2/.placeholder
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/etc/neutron/rootwrap.d/debug.filters b/etc/neutron/rootwrap.d/debug.filters
deleted file mode 100644 (file)
index 8d72ce2..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-# neutron-rootwrap command filters for nodes on which neutron is
-# expected to control network
-#
-# This file should be owned by (and only-writeable by) the root user
-
-# format seems to be
-# cmd-name: filter-name, raw-command, user, args
-
-[Filters]
-
-# This is needed because we should ping
-# from inside a namespace which requires root
-# _alt variants allow to match -c and -w in any order
-#   (used by NeutronDebugAgent.ping_all)
-ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
-ping_alt: RegExpFilter, ping, root, ping, -c, \d+, -w, \d+, [0-9\.]+
-ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+
-ping6_alt: RegExpFilter, ping6, root, ping6, -c, \d+, -w, \d+, [0-9A-Fa-f:]+
\ No newline at end of file
diff --git a/etc/neutron/rootwrap.d/dhcp.filters b/etc/neutron/rootwrap.d/dhcp.filters
deleted file mode 100644 (file)
index 156c9cf..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-# neutron-rootwrap command filters for nodes on which neutron is
-# expected to control network
-#
-# This file should be owned by (and only-writeable by) the root user
-
-# format seems to be
-# cmd-name: filter-name, raw-command, user, args
-
-[Filters]
-
-# dhcp-agent
-dnsmasq: CommandFilter, dnsmasq, root
-# dhcp-agent uses kill as well, that's handled by the generic KillFilter
-# it looks like these are the only signals needed, per
-# neutron/agent/linux/dhcp.py
-kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP
-kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
-
-ovs-vsctl: CommandFilter, ovs-vsctl, root
-ivs-ctl: CommandFilter, ivs-ctl, root
-mm-ctl: CommandFilter, mm-ctl, root
-dhcp_release: CommandFilter, dhcp_release, root
-
-# metadata proxy
-metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
-# RHEL invocation of the metadata proxy will report /usr/bin/python
-kill_metadata: KillFilter, root, python, -9
-kill_metadata7: KillFilter, root, python2.7, -9
-
-# ip_lib
-ip: IpFilter, ip, root
-find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
-ip_exec: IpNetnsExecFilter, ip, root
diff --git a/etc/neutron/rootwrap.d/dibbler.filters b/etc/neutron/rootwrap.d/dibbler.filters
deleted file mode 100644 (file)
index eea5525..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-# neutron-rootwrap command filters for nodes on which neutron is
-# expected to control network
-#
-# This file should be owned by (and only-writeable by) the root user
-
-# format seems to be
-# cmd-name: filter-name, raw-command, user, args
-
-[Filters]
-
-# Filters for the dibbler-based reference implementation of the pluggable
-# Prefix Delegation driver. Other implementations using an alternative agent
-# should include a similar filter in this folder.
-
-# prefix_delegation_agent
-dibbler-client: CommandFilter, dibbler-client, root
diff --git a/etc/neutron/rootwrap.d/ebtables.filters b/etc/neutron/rootwrap.d/ebtables.filters
deleted file mode 100644 (file)
index 8e810e7..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-# neutron-rootwrap command filters for nodes on which neutron is
-# expected to control network
-#
-# This file should be owned by (and only-writeable by) the root user
-
-# format seems to be
-# cmd-name: filter-name, raw-command, user, args
-
-[Filters]
-
-ebtables: CommandFilter, ebtables, root
diff --git a/etc/neutron/rootwrap.d/ipset-firewall.filters b/etc/neutron/rootwrap.d/ipset-firewall.filters
deleted file mode 100644 (file)
index 52c6637..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-# neutron-rootwrap command filters for nodes on which neutron is
-# expected to control network
-#
-# This file should be owned by (and only-writeable by) the root user
-
-# format seems to be
-# cmd-name: filter-name, raw-command, user, args
-
-[Filters]
-# neutron/agent/linux/iptables_firewall.py
-#   "ipset", "-A", ...
-ipset: CommandFilter, ipset, root
diff --git a/etc/neutron/rootwrap.d/iptables-firewall.filters b/etc/neutron/rootwrap.d/iptables-firewall.filters
deleted file mode 100644 (file)
index 29c78da..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-# neutron-rootwrap command filters for nodes on which neutron is
-# expected to control network
-#
-# This file should be owned by (and only-writeable by) the root user
-
-# format seems to be
-# cmd-name: filter-name, raw-command, user, args
-
-[Filters]
-
-# neutron/agent/linux/iptables_manager.py
-#   "iptables-save", ...
-iptables-save: CommandFilter, iptables-save, root
-iptables-restore: CommandFilter, iptables-restore, root
-ip6tables-save: CommandFilter, ip6tables-save, root
-ip6tables-restore: CommandFilter, ip6tables-restore, root
-
-# neutron/agent/linux/iptables_manager.py
-#   "iptables", "-A", ...
-iptables: CommandFilter, iptables, root
-ip6tables: CommandFilter, ip6tables, root
-
-# neutron/agent/linux/iptables_manager.py
-#   "sysctl", "-w", ...
-sysctl: CommandFilter, sysctl, root
-
-# neutron/agent/linux/ip_conntrack.py
-conntrack: CommandFilter, conntrack, root
\ No newline at end of file
diff --git a/etc/neutron/rootwrap.d/l3.filters b/etc/neutron/rootwrap.d/l3.filters
deleted file mode 100644 (file)
index f1abc26..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-# neutron-rootwrap command filters for nodes on which neutron is
-# expected to control network
-#
-# This file should be owned by (and only-writeable by) the root user
-
-# format seems to be
-# cmd-name: filter-name, raw-command, user, args
-
-[Filters]
-
-# arping
-arping: CommandFilter, arping, root
-
-# l3_agent
-sysctl: CommandFilter, sysctl, root
-route: CommandFilter, route, root
-radvd: CommandFilter, radvd, root
-
-# metadata proxy
-metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
-# RHEL invocation of the metadata proxy will report /usr/bin/python
-kill_metadata: KillFilter, root, python, -9
-kill_metadata7: KillFilter, root, python2.7, -9
-kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -9, -HUP
-kill_radvd: KillFilter, root, /sbin/radvd, -9, -HUP
-
-# ip_lib
-ip: IpFilter, ip, root
-find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
-ip_exec: IpNetnsExecFilter, ip, root
-
-# For ip monitor
-kill_ip_monitor: KillFilter, root, ip, -9
-
-# ovs_lib (if OVSInterfaceDriver is used)
-ovs-vsctl: CommandFilter, ovs-vsctl, root
-
-# iptables_manager
-iptables-save: CommandFilter, iptables-save, root
-iptables-restore: CommandFilter, iptables-restore, root
-ip6tables-save: CommandFilter, ip6tables-save, root
-ip6tables-restore: CommandFilter, ip6tables-restore, root
-
-# Keepalived
-keepalived: CommandFilter, keepalived, root
-kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9
-
-# l3 agent to delete floatingip's conntrack state
-conntrack: CommandFilter, conntrack, root
-
-# keepalived state change monitor
-keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root
-
-# For creating namespace local /etc
-rt_tables_mkdir: RegExpFilter, mkdir, root, mkdir, -p, /etc/netns/qrouter-[^/].*
-rt_tables_chown: RegExpFilter, chown, root, chown, [1-9][0-9].*, /etc/netns/qrouter-[^/].*
-rt_tables_rmdir: RegExpFilter, rm, root, rm, -r, -f, /etc/netns/qrouter-[^/].*
diff --git a/etc/neutron/rootwrap.d/linuxbridge-plugin.filters b/etc/neutron/rootwrap.d/linuxbridge-plugin.filters
deleted file mode 100644 (file)
index 1e0b891..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-# neutron-rootwrap command filters for nodes on which neutron is
-# expected to control network
-#
-# This file should be owned by (and only-writeable by) the root user
-
-# format seems to be
-# cmd-name: filter-name, raw-command, user, args
-
-[Filters]
-
-# linuxbridge-agent
-# unclear whether both variants are necessary, but I'm transliterating
-# from the old mechanism
-brctl: CommandFilter, brctl, root
-bridge: CommandFilter, bridge, root
-
-# ip_lib
-ip: IpFilter, ip, root
-find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
-ip_exec: IpNetnsExecFilter, ip, root
diff --git a/etc/neutron/rootwrap.d/openvswitch-plugin.filters b/etc/neutron/rootwrap.d/openvswitch-plugin.filters
deleted file mode 100644 (file)
index c738733..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-# neutron-rootwrap command filters for nodes on which neutron is
-# expected to control network
-#
-# This file should be owned by (and only-writeable by) the root user
-
-# format seems to be
-# cmd-name: filter-name, raw-command, user, args
-
-[Filters]
-
-# openvswitch-agent
-# unclear whether both variants are necessary, but I'm transliterating
-# from the old mechanism
-ovs-vsctl: CommandFilter, ovs-vsctl, root
-# NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
-ovs-ofctl: CommandFilter, ovs-ofctl, root
-kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
-ovsdb-client: CommandFilter, ovsdb-client, root
-xe: CommandFilter, xe, root
-
-# ip_lib
-ip: IpFilter, ip, root
-find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
-ip_exec: IpNetnsExecFilter, ip, root
diff --git a/etc/oslo-config-generator/dhcp_agent.ini b/etc/oslo-config-generator/dhcp_agent.ini
deleted file mode 100644 (file)
index 13a0de1..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-[DEFAULT]
-output_file = etc/dhcp_agent.ini.sample
-wrap_width = 79
-
-namespace = neutron.base.agent
-namespace = neutron.dhcp.agent
-namespace = oslo.log
diff --git a/etc/oslo-config-generator/l3_agent.ini b/etc/oslo-config-generator/l3_agent.ini
deleted file mode 100644 (file)
index 82bf2ca..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-[DEFAULT]
-output_file = etc/l3_agent.ini.sample
-wrap_width = 79
-
-namespace = neutron.base.agent
-namespace = neutron.l3.agent
-namespace = oslo.log
diff --git a/etc/oslo-config-generator/linuxbridge_agent.ini b/etc/oslo-config-generator/linuxbridge_agent.ini
deleted file mode 100644 (file)
index 060346b..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-[DEFAULT]
-output_file = etc/neutron/plugins/ml2/linuxbridge_agent.ini.sample
-wrap_width = 79
-
-namespace = neutron.ml2.linuxbridge.agent
-namespace = oslo.log
diff --git a/etc/oslo-config-generator/metadata_agent.ini b/etc/oslo-config-generator/metadata_agent.ini
deleted file mode 100644 (file)
index 5c27309..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-[DEFAULT]
-output_file = etc/metadata_agent.ini.sample
-wrap_width = 79
-
-namespace = neutron.metadata.agent
-namespace = oslo.log
diff --git a/etc/oslo-config-generator/metering_agent.ini b/etc/oslo-config-generator/metering_agent.ini
deleted file mode 100644 (file)
index 9c00d30..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-[DEFAULT]
-output_file = etc/metering_agent.ini.sample
-wrap_width = 79
-
-namespace = neutron.metering.agent
-namespace = oslo.log
diff --git a/etc/oslo-config-generator/ml2_conf.ini b/etc/oslo-config-generator/ml2_conf.ini
deleted file mode 100644 (file)
index ab97bd4..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-[DEFAULT]
-output_file = etc/neutron/plugins/ml2/ml2_conf.ini.sample
-wrap_width = 79
-
-namespace = neutron.ml2
-namespace = oslo.log
diff --git a/etc/oslo-config-generator/ml2_conf_sriov.ini b/etc/oslo-config-generator/ml2_conf_sriov.ini
deleted file mode 100644 (file)
index ed03c78..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-[DEFAULT]
-output_file = etc/neutron/plugins/ml2/ml2_conf_sriov.ini.sample
-wrap_width = 79
-
-namespace = neutron.ml2.sriov
-namespace = oslo.log
diff --git a/etc/oslo-config-generator/neutron.conf b/etc/oslo-config-generator/neutron.conf
deleted file mode 100644 (file)
index 1d61c36..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-[DEFAULT]
-output_file = etc/neutron.conf.sample
-wrap_width = 79
-
-namespace = neutron
-namespace = neutron.agent
-namespace = neutron.db
-namespace = neutron.extensions
-namespace = neutron.qos
-namespace = nova.auth
-namespace = oslo.log
-namespace = oslo.db
-namespace = oslo.policy
-namespace = oslo.concurrency
-namespace = oslo.messaging
-namespace = oslo.middleware.cors
-namespace = oslo.service.sslutils
-namespace = oslo.service.wsgi
-namespace = keystonemiddleware.auth_token
diff --git a/etc/oslo-config-generator/openvswitch_agent.ini b/etc/oslo-config-generator/openvswitch_agent.ini
deleted file mode 100644 (file)
index 3fab70d..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-[DEFAULT]
-output_file = etc/neutron/plugins/ml2/openvswitch_agent.ini.sample
-wrap_width = 79
-
-namespace = neutron.ml2.ovs.agent
-namespace = oslo.log
diff --git a/etc/oslo-config-generator/sriov_agent.ini b/etc/oslo-config-generator/sriov_agent.ini
deleted file mode 100644 (file)
index 29864f5..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-[DEFAULT]
-output_file = etc/neutron/plugins/ml2/sriov_agent.ini.sample
-wrap_width = 79
-
-namespace = neutron.ml2.sriov.agent
-namespace = oslo.log
diff --git a/etc/policy.json b/etc/policy.json
deleted file mode 100644 (file)
index c551eb8..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-{
-    "context_is_admin":  "role:admin",
-    "owner": "tenant_id:%(tenant_id)s",
-    "admin_or_owner": "rule:context_is_admin or rule:owner",
-    "context_is_advsvc":  "role:advsvc",
-    "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
-    "admin_owner_or_network_owner": "rule:admin_or_network_owner or rule:owner",
-    "admin_only": "rule:context_is_admin",
-    "regular_user": "",
-    "shared": "field:networks:shared=True",
-    "shared_firewalls": "field:firewalls:shared=True",
-    "shared_firewall_policies": "field:firewall_policies:shared=True",
-    "shared_subnetpools": "field:subnetpools:shared=True",
-    "shared_address_scopes": "field:address_scopes:shared=True",
-    "external": "field:networks:router:external=True",
-    "default": "rule:admin_or_owner",
-
-    "create_subnet": "rule:admin_or_network_owner",
-    "get_subnet": "rule:admin_or_owner or rule:shared",
-    "update_subnet": "rule:admin_or_network_owner",
-    "delete_subnet": "rule:admin_or_network_owner",
-
-    "create_subnetpool": "",
-    "create_subnetpool:shared": "rule:admin_only",
-    "create_subnetpool:is_default": "rule:admin_only",
-    "get_subnetpool": "rule:admin_or_owner or rule:shared_subnetpools",
-    "update_subnetpool": "rule:admin_or_owner",
-    "update_subnetpool:is_default": "rule:admin_only",
-    "delete_subnetpool": "rule:admin_or_owner",
-
-    "create_address_scope": "",
-    "create_address_scope:shared": "rule:admin_only",
-    "get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes",
-    "update_address_scope": "rule:admin_or_owner",
-    "update_address_scope:shared": "rule:admin_only",
-    "delete_address_scope": "rule:admin_or_owner",
-
-    "create_network": "",
-    "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
-    "get_network:router:external": "rule:regular_user",
-    "get_network:segments": "rule:admin_only",
-    "get_network:provider:network_type": "rule:admin_only",
-    "get_network:provider:physical_network": "rule:admin_only",
-    "get_network:provider:segmentation_id": "rule:admin_only",
-    "get_network:queue_id": "rule:admin_only",
-    "create_network:shared": "rule:admin_only",
-    "create_network:router:external": "rule:admin_only",
-    "create_network:segments": "rule:admin_only",
-    "create_network:provider:network_type": "rule:admin_only",
-    "create_network:provider:physical_network": "rule:admin_only",
-    "create_network:provider:segmentation_id": "rule:admin_only",
-    "update_network": "rule:admin_or_owner",
-    "update_network:segments": "rule:admin_only",
-    "update_network:shared": "rule:admin_only",
-    "update_network:provider:network_type": "rule:admin_only",
-    "update_network:provider:physical_network": "rule:admin_only",
-    "update_network:provider:segmentation_id": "rule:admin_only",
-    "update_network:router:external": "rule:admin_only",
-    "delete_network": "rule:admin_or_owner",
-
-    "network_device": "field:port:device_owner=~^network:",
-    "create_port": "",
-    "create_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:binding:host_id": "rule:admin_only",
-    "create_port:binding:profile": "rule:admin_only",
-    "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:allowed_address_pairs": "rule:admin_or_network_owner",
-    "get_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
-    "get_port:queue_id": "rule:admin_only",
-    "get_port:binding:vif_type": "rule:admin_only",
-    "get_port:binding:vif_details": "rule:admin_only",
-    "get_port:binding:host_id": "rule:admin_only",
-    "get_port:binding:profile": "rule:admin_only",
-    "update_port": "rule:admin_or_owner or rule:context_is_advsvc",
-    "update_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
-    "update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
-    "update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "update_port:binding:host_id": "rule:admin_only",
-    "update_port:binding:profile": "rule:admin_only",
-    "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "update_port:allowed_address_pairs": "rule:admin_or_network_owner",
-    "delete_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
-
-    "get_router:ha": "rule:admin_only",
-    "create_router": "rule:regular_user",
-    "create_router:external_gateway_info:enable_snat": "rule:admin_only",
-    "create_router:distributed": "rule:admin_only",
-    "create_router:ha": "rule:admin_only",
-    "get_router": "rule:admin_or_owner",
-    "get_router:distributed": "rule:admin_only",
-    "update_router:external_gateway_info:enable_snat": "rule:admin_only",
-    "update_router:distributed": "rule:admin_only",
-    "update_router:ha": "rule:admin_only",
-    "delete_router": "rule:admin_or_owner",
-
-    "add_router_interface": "rule:admin_or_owner",
-    "remove_router_interface": "rule:admin_or_owner",
-
-    "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
-    "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
-
-    "create_firewall": "",
-    "get_firewall": "rule:admin_or_owner",
-    "create_firewall:shared": "rule:admin_only",
-    "get_firewall:shared": "rule:admin_only",
-    "update_firewall": "rule:admin_or_owner",
-    "update_firewall:shared": "rule:admin_only",
-    "delete_firewall": "rule:admin_or_owner",
-
-    "create_firewall_policy": "",
-    "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewall_policies",
-    "create_firewall_policy:shared": "rule:admin_or_owner",
-    "update_firewall_policy": "rule:admin_or_owner",
-    "delete_firewall_policy": "rule:admin_or_owner",
-
-    "insert_rule": "rule:admin_or_owner",
-    "remove_rule": "rule:admin_or_owner",
-
-    "create_firewall_rule": "",
-    "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
-    "update_firewall_rule": "rule:admin_or_owner",
-    "delete_firewall_rule": "rule:admin_or_owner",
-
-    "create_qos_queue": "rule:admin_only",
-    "get_qos_queue": "rule:admin_only",
-
-    "update_agent": "rule:admin_only",
-    "delete_agent": "rule:admin_only",
-    "get_agent": "rule:admin_only",
-
-    "create_dhcp-network": "rule:admin_only",
-    "delete_dhcp-network": "rule:admin_only",
-    "get_dhcp-networks": "rule:admin_only",
-    "create_l3-router": "rule:admin_only",
-    "delete_l3-router": "rule:admin_only",
-    "get_l3-routers": "rule:admin_only",
-    "get_dhcp-agents": "rule:admin_only",
-    "get_l3-agents": "rule:admin_only",
-    "get_loadbalancer-agent": "rule:admin_only",
-    "get_loadbalancer-pools": "rule:admin_only",
-    "get_agent-loadbalancers": "rule:admin_only",
-    "get_loadbalancer-hosting-agent": "rule:admin_only",
-
-    "create_floatingip": "rule:regular_user",
-    "create_floatingip:floating_ip_address": "rule:admin_only",
-    "update_floatingip": "rule:admin_or_owner",
-    "delete_floatingip": "rule:admin_or_owner",
-    "get_floatingip": "rule:admin_or_owner",
-
-    "create_network_profile": "rule:admin_only",
-    "update_network_profile": "rule:admin_only",
-    "delete_network_profile": "rule:admin_only",
-    "get_network_profiles": "",
-    "get_network_profile": "",
-    "update_policy_profiles": "rule:admin_only",
-    "get_policy_profiles": "",
-    "get_policy_profile": "",
-
-    "create_metering_label": "rule:admin_only",
-    "delete_metering_label": "rule:admin_only",
-    "get_metering_label": "rule:admin_only",
-
-    "create_metering_label_rule": "rule:admin_only",
-    "delete_metering_label_rule": "rule:admin_only",
-    "get_metering_label_rule": "rule:admin_only",
-
-    "get_service_provider": "rule:regular_user",
-    "get_lsn": "rule:admin_only",
-    "create_lsn": "rule:admin_only",
-
-    "create_flavor": "rule:admin_only",
-    "update_flavor": "rule:admin_only",
-    "delete_flavor": "rule:admin_only",
-    "get_flavors": "rule:regular_user",
-    "get_flavor": "rule:regular_user",
-    "create_service_profile": "rule:admin_only",
-    "update_service_profile": "rule:admin_only",
-    "delete_service_profile": "rule:admin_only",
-    "get_service_profiles": "rule:admin_only",
-    "get_service_profile": "rule:admin_only",
-
-    "get_policy": "rule:regular_user",
-    "create_policy": "rule:admin_only",
-    "update_policy": "rule:admin_only",
-    "delete_policy": "rule:admin_only",
-    "get_policy_bandwidth_limit_rule": "rule:regular_user",
-    "create_policy_bandwidth_limit_rule": "rule:admin_only",
-    "delete_policy_bandwidth_limit_rule": "rule:admin_only",
-    "update_policy_bandwidth_limit_rule": "rule:admin_only",
-    "get_rule_type": "rule:regular_user",
-
-    "restrict_wildcard": "(not field:rbac_policy:target_tenant=*) or rule:admin_only",
-    "create_rbac_policy": "",
-    "create_rbac_policy:target_tenant": "rule:restrict_wildcard",
-    "update_rbac_policy": "rule:admin_or_owner",
-    "update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner",
-    "get_rbac_policy": "rule:admin_or_owner",
-    "delete_rbac_policy": "rule:admin_or_owner",
-
-    "create_flavor_service_profile": "rule:admin_only",
-    "delete_flavor_service_profile": "rule:admin_only",
-    "get_flavor_service_profile": "rule:regular_user"
-}
diff --git a/etc/rootwrap.conf b/etc/rootwrap.conf
deleted file mode 100644 (file)
index 3a6b11f..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-# Configuration for neutron-rootwrap
-# This file should be owned by (and only-writeable by) the root user
-
-[DEFAULT]
-# List of directories to load filter definitions from (separated by ',').
-# These directories MUST all be only writeable by root !
-filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap
-
-# List of directories to search executables in, in case filters do not
-# explicitely specify a full path (separated by ',')
-# If not specified, defaults to system PATH environment variable.
-# These directories MUST all be only writeable by root !
-exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin
-
-# Enable logging to syslog
-# Default value is False
-use_syslog=False
-
-# Which syslog facility to use.
-# Valid values include auth, authpriv, syslog, local0, local1...
-# Default value is 'syslog'
-syslog_log_facility=syslog
-
-# Which messages to log.
-# INFO means log all usage
-# ERROR means only log unsuccessful attempts
-syslog_log_level=ERROR
-
-[xenapi]
-# XenAPI configuration is only required by the L2 agent if it is to
-# target a XenServer/XCP compute host's dom0.
-xenapi_connection_url=<None>
-xenapi_connection_username=root
-xenapi_connection_password=<None>
diff --git a/neutron/__init__.py b/neutron/__init__.py
deleted file mode 100644 (file)
index a796497..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import gettext
-
-from debtcollector import removals
-import six
-
-
-if six.PY2:
-    gettext.install('neutron', unicode=1)
-else:
-    gettext.install('neutron')
-
-
-six.moves.builtins.__dict__['_'] = removals.remove(
-    message='Builtin _ translation function is deprecated in OpenStack; '
-            'use the function from _i18n module for your project.')(_)
diff --git a/neutron/_i18n.py b/neutron/_i18n.py
deleted file mode 100644 (file)
index 5b9b5fa..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import oslo_i18n
-
-DOMAIN = "neutron"
-
-_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
-
-# The primary translation function using the well-known name "_"
-_ = _translators.primary
-
-# The contextual translation function using the name "_C"
-_C = _translators.contextual_form
-
-# The plural translation function using the name "_P"
-_P = _translators.plural_form
-
-# Translators for log levels.
-#
-# The abbreviated names are meant to reflect the usual use of a short
-# name like '_'. The "L" is for "log" and the other letter comes from
-# the level.
-_LI = _translators.log_info
-_LW = _translators.log_warning
-_LE = _translators.log_error
-_LC = _translators.log_critical
-
-
-def get_available_languages():
-    return oslo_i18n.get_available_languages(DOMAIN)
diff --git a/neutron/agent/__init__.py b/neutron/agent/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/agent/common/__init__.py b/neutron/agent/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/agent/common/base_polling.py b/neutron/agent/common/base_polling.py
deleted file mode 100644 (file)
index d654522..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2015 Cloudbase Solutions.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-class BasePollingManager(object):
-
-    def __init__(self):
-        self._force_polling = False
-        self._polling_completed = True
-
-    def force_polling(self):
-        self._force_polling = True
-
-    def polling_completed(self):
-        self._polling_completed = True
-
-    def _is_polling_required(self):
-        raise NotImplementedError()
-
-    @property
-    def is_polling_required(self):
-        # Always consume the updates to minimize polling.
-        polling_required = self._is_polling_required()
-
-        # Polling is required regardless of whether updates have been
-        # detected.
-        if self._force_polling:
-            self._force_polling = False
-            polling_required = True
-
-        # Polling is required if not yet done for previously detected
-        # updates.
-        if not self._polling_completed:
-            polling_required = True
-
-        if polling_required:
-            # Track whether polling has been completed to ensure that
-            # polling can be required until the caller indicates via a
-            # call to polling_completed() that polling has been
-            # successfully performed.
-            self._polling_completed = False
-
-        return polling_required
-
-
-class AlwaysPoll(BasePollingManager):
-
-    @property
-    def is_polling_required(self):
-        return True
diff --git a/neutron/agent/common/config.py b/neutron/agent/common/config.py
deleted file mode 100644 (file)
index d3594c4..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.common import config
-
-
-ROOT_HELPER_OPTS = [
-    cfg.StrOpt('root_helper', default='sudo',
-               help=_("Root helper application. "
-                      "Use 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' "
-                      "to use the real root filter facility. Change to 'sudo' "
-                      "to skip the filtering and just run the command "
-                      "directly.")),
-    cfg.BoolOpt('use_helper_for_ns_read',
-                default=True,
-                help=_("Use the root helper when listing the namespaces on a "
-                       "system. This may not be required depending on the "
-                       "security configuration. If the root helper is "
-                       "not required, set this to False for a performance "
-                       "improvement.")),
-    # We can't just use root_helper=sudo neutron-rootwrap-daemon $cfg because
-    # it isn't appropriate for long-lived processes spawned with create_process
-    # Having a bool use_rootwrap_daemon option precludes specifying the
-    # rootwrap daemon command, which may be necessary for Xen?
-    cfg.StrOpt('root_helper_daemon',
-               help=_('Root helper daemon application to use when possible.')),
-]
-
-AGENT_STATE_OPTS = [
-    cfg.FloatOpt('report_interval', default=30,
-                 help=_('Seconds between nodes reporting state to server; '
-                        'should be less than agent_down_time, best if it '
-                        'is half or less than agent_down_time.')),
-    cfg.BoolOpt('log_agent_heartbeats', default=False,
-                help=_('Log agent heartbeats')),
-]
-
-INTERFACE_DRIVER_OPTS = [
-    cfg.StrOpt('interface_driver',
-               help=_("The driver used to manage the virtual interface.")),
-]
-
-IPTABLES_OPTS = [
-    cfg.BoolOpt('comment_iptables_rules', default=True,
-                help=_("Add comments to iptables rules. "
-                       "Set to false to disallow the addition of comments to "
-                       "generated iptables rules that describe each rule's "
-                       "purpose. System must support the iptables comments "
-                       "module for addition of comments.")),
-]
-
-PROCESS_MONITOR_OPTS = [
-    cfg.StrOpt('check_child_processes_action', default='respawn',
-               choices=['respawn', 'exit'],
-               help=_('Action to be executed when a child process dies')),
-    cfg.IntOpt('check_child_processes_interval', default=60,
-               help=_('Interval between checks of child process liveness '
-                      '(seconds), use 0 to disable')),
-]
-
-AVAILABILITY_ZONE_OPTS = [
-    # The default AZ name "nova" is selected to match the default
-    # AZ name in Nova and Cinder.
-    cfg.StrOpt('availability_zone', max_length=255, default='nova',
-               help=_("Availability zone of this node")),
-]
-
-EXT_NET_BRIDGE_OPTS = [
-    cfg.StrOpt('external_network_bridge', default='br-ex',
-               deprecated_for_removal=True,
-               help=_("Name of bridge used for external network "
-                      "traffic. This should be set to an empty value for the "
-                      "Linux Bridge. When this parameter is set, each L3 "
-                      "agent can be associated with no more than one external "
-                      "network. This option is deprecated and will be removed "
-                      "in the M release.")),
-]
-
-
-def get_log_args(conf, log_file_name, **kwargs):
-    cmd_args = []
-    if conf.debug:
-        cmd_args.append('--debug')
-    if conf.verbose:
-        cmd_args.append('--verbose')
-    if (conf.log_dir or conf.log_file):
-        cmd_args.append('--log-file=%s' % log_file_name)
-        log_dir = None
-        if conf.log_dir and conf.log_file:
-            log_dir = os.path.dirname(
-                os.path.join(conf.log_dir, conf.log_file))
-        elif conf.log_dir:
-            log_dir = conf.log_dir
-        elif conf.log_file:
-            log_dir = os.path.dirname(conf.log_file)
-        if log_dir:
-            cmd_args.append('--log-dir=%s' % log_dir)
-        if kwargs.get('metadata_proxy_watch_log') is False:
-            cmd_args.append('--nometadata_proxy_watch_log')
-    else:
-        if conf.use_syslog:
-            cmd_args.append('--use-syslog')
-            if conf.syslog_log_facility:
-                cmd_args.append(
-                    '--syslog-log-facility=%s' % conf.syslog_log_facility)
-    return cmd_args
-
-
-def register_root_helper(conf):
-    conf.register_opts(ROOT_HELPER_OPTS, 'AGENT')
-
-
-def register_agent_state_opts_helper(conf):
-    conf.register_opts(AGENT_STATE_OPTS, 'AGENT')
-
-
-def register_interface_driver_opts_helper(conf):
-    conf.register_opts(INTERFACE_DRIVER_OPTS)
-
-
-def register_iptables_opts(conf):
-    conf.register_opts(IPTABLES_OPTS, 'AGENT')
-
-
-def register_process_monitor_opts(conf):
-    conf.register_opts(PROCESS_MONITOR_OPTS, 'AGENT')
-
-
-def register_availability_zone_opts_helper(conf):
-    conf.register_opts(AVAILABILITY_ZONE_OPTS, 'AGENT')
-
-
-def get_root_helper(conf):
-    return conf.AGENT.root_helper
-
-
-def setup_conf():
-    bind_opts = [
-        cfg.StrOpt('state_path',
-                   default='/var/lib/neutron',
-                   help=_("Where to store Neutron state files. "
-                          "This directory must be writable by the agent.")),
-    ]
-
-    conf = cfg.ConfigOpts()
-    conf.register_opts(bind_opts)
-    return conf
-
-# add a logging setup method here for convenience
-setup_logging = config.setup_logging
diff --git a/neutron/agent/common/ovs_lib.py b/neutron/agent/common/ovs_lib.py
deleted file mode 100644 (file)
index a542bb1..0000000
+++ /dev/null
@@ -1,682 +0,0 @@
-# Copyright 2011 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import itertools
-import operator
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import excutils
-import retrying
-import six
-import uuid
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.agent.common import utils
-from neutron.agent.linux import ip_lib
-from neutron.agent.ovsdb import api as ovsdb
-from neutron.common import exceptions
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers.openvswitch.agent.common \
-    import constants
-
-# Default timeout for ovs-vsctl command
-DEFAULT_OVS_VSCTL_TIMEOUT = 10
-
-# Special return value for an invalid OVS ofport
-INVALID_OFPORT = -1
-UNASSIGNED_OFPORT = []
-
-# OVS bridge fail modes
-FAILMODE_SECURE = 'secure'
-FAILMODE_STANDALONE = 'standalone'
-
-OPTS = [
-    cfg.IntOpt('ovs_vsctl_timeout',
-               default=DEFAULT_OVS_VSCTL_TIMEOUT,
-               help=_('Timeout in seconds for ovs-vsctl commands. '
-                      'If the timeout expires, ovs commands will fail with '
-                      'ALARMCLOCK error.')),
-]
-cfg.CONF.register_opts(OPTS)
-
-LOG = logging.getLogger(__name__)
-
-OVS_DEFAULT_CAPS = {
-    'datapath_types': [],
-    'iface_types': [],
-}
-
-
-def _ofport_result_pending(result):
-    """Return True if ovs-vsctl indicates the result is still pending."""
-    # ovs-vsctl can return '[]' for an ofport that has not yet been assigned
-    try:
-        int(result)
-        return False
-    except (ValueError, TypeError):
-        return True
-
-
-def _ofport_retry(fn):
-    """Decorator for retrying when OVS has yet to assign an ofport.
-
-    The instance's vsctl_timeout is used as the max waiting time. This relies
-    on the fact that instance methods receive self as the first argument.
-    """
-    @six.wraps(fn)
-    def wrapped(*args, **kwargs):
-        self = args[0]
-        new_fn = retrying.retry(
-            retry_on_result=_ofport_result_pending,
-            stop_max_delay=self.vsctl_timeout * 1000,
-            wait_exponential_multiplier=10,
-            wait_exponential_max=1000,
-            retry_on_exception=lambda _: False)(fn)
-        return new_fn(*args, **kwargs)
-    return wrapped
-
-
-class VifPort(object):
-    def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
-        self.port_name = port_name
-        self.ofport = ofport
-        self.vif_id = vif_id
-        self.vif_mac = vif_mac
-        self.switch = switch
-
-    def __str__(self):
-        return ("iface-id=%s, vif_mac=%s, port_name=%s, ofport=%s, "
-                "bridge_name=%s") % (
-                    self.vif_id, self.vif_mac,
-                    self.port_name, self.ofport,
-                    self.switch.br_name)
-
-
-class BaseOVS(object):
-
-    def __init__(self):
-        self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout
-        self.ovsdb = ovsdb.API.get(self)
-
-    def add_bridge(self, bridge_name,
-                   datapath_type=constants.OVS_DATAPATH_SYSTEM):
-
-        self.ovsdb.add_br(bridge_name,
-                          datapath_type).execute()
-        br = OVSBridge(bridge_name)
-        # Don't return until vswitchd sets up the internal port
-        br.get_port_ofport(bridge_name)
-        return br
-
-    def delete_bridge(self, bridge_name):
-        self.ovsdb.del_br(bridge_name).execute()
-
-    def bridge_exists(self, bridge_name):
-        return self.ovsdb.br_exists(bridge_name).execute()
-
-    def port_exists(self, port_name):
-        cmd = self.ovsdb.db_get('Port', port_name, 'name')
-        return bool(cmd.execute(check_error=False, log_errors=False))
-
-    def get_bridge_for_iface(self, iface):
-        return self.ovsdb.iface_to_br(iface).execute()
-
-    def get_bridges(self):
-        return self.ovsdb.list_br().execute(check_error=True)
-
-    def get_bridge_external_bridge_id(self, bridge):
-        return self.ovsdb.br_get_external_id(bridge, 'bridge-id').execute()
-
-    def set_db_attribute(self, table_name, record, column, value,
-                         check_error=False, log_errors=True):
-        self.ovsdb.db_set(table_name, record, (column, value)).execute(
-            check_error=check_error, log_errors=log_errors)
-
-    def clear_db_attribute(self, table_name, record, column):
-        self.ovsdb.db_clear(table_name, record, column).execute()
-
-    def db_get_val(self, table, record, column, check_error=False,
-                   log_errors=True):
-        return self.ovsdb.db_get(table, record, column).execute(
-            check_error=check_error, log_errors=log_errors)
-
-    @property
-    def config(self):
-        """A dict containing the only row from the root Open_vSwitch table
-
-        This row contains several columns describing the Open vSwitch install
-        and the system on which it is installed. Useful keys include:
-            datapath_types: a list of supported datapath types
-            iface_types: a list of supported interface types
-            ovs_version: the OVS version
-        """
-        return self.ovsdb.db_list("Open_vSwitch").execute()[0]
-
-    @property
-    def capabilities(self):
-        _cfg = self.config
-        return {k: _cfg.get(k, OVS_DEFAULT_CAPS[k]) for k in OVS_DEFAULT_CAPS}
-
-
-class OVSBridge(BaseOVS):
-    def __init__(self, br_name, datapath_type=constants.OVS_DATAPATH_SYSTEM):
-        super(OVSBridge, self).__init__()
-        self.br_name = br_name
-        self.datapath_type = datapath_type
-        self.agent_uuid_stamp = 0
-
-    def set_agent_uuid_stamp(self, val):
-        self.agent_uuid_stamp = val
-
-    def set_controller(self, controllers):
-        self.ovsdb.set_controller(self.br_name,
-                                  controllers).execute(check_error=True)
-
-    def del_controller(self):
-        self.ovsdb.del_controller(self.br_name).execute(check_error=True)
-
-    def get_controller(self):
-        return self.ovsdb.get_controller(self.br_name).execute(
-            check_error=True)
-
-    def _set_bridge_fail_mode(self, mode):
-        self.ovsdb.set_fail_mode(self.br_name, mode).execute(check_error=True)
-
-    def set_secure_mode(self):
-        self._set_bridge_fail_mode(FAILMODE_SECURE)
-
-    def set_standalone_mode(self):
-        self._set_bridge_fail_mode(FAILMODE_STANDALONE)
-
-    def set_protocols(self, protocols):
-        self.set_db_attribute('Bridge', self.br_name, 'protocols', protocols,
-                              check_error=True)
-
-    def create(self, secure_mode=False):
-        with self.ovsdb.transaction() as txn:
-            txn.add(
-                self.ovsdb.add_br(self.br_name,
-                datapath_type=self.datapath_type))
-            if secure_mode:
-                txn.add(self.ovsdb.set_fail_mode(self.br_name,
-                                                 FAILMODE_SECURE))
-        # Don't return until vswitchd sets up the internal port
-        self.get_port_ofport(self.br_name)
-
-    def destroy(self):
-        self.delete_bridge(self.br_name)
-
-    def add_port(self, port_name, *interface_attr_tuples):
-        with self.ovsdb.transaction() as txn:
-            txn.add(self.ovsdb.add_port(self.br_name, port_name))
-            if interface_attr_tuples:
-                txn.add(self.ovsdb.db_set('Interface', port_name,
-                                          *interface_attr_tuples))
-        return self.get_port_ofport(port_name)
-
-    def replace_port(self, port_name, *interface_attr_tuples):
-        """Replace existing port or create it, and configure port interface."""
-
-        # NOTE(xiaohhui): If del_port is inside the transaction, there will
-        # only be one command for replace_port. This will cause the new port
-        # not be found by system, which will lead to Bug #1519926.
-        self.ovsdb.del_port(port_name).execute()
-        with self.ovsdb.transaction() as txn:
-            txn.add(self.ovsdb.add_port(self.br_name, port_name,
-                                        may_exist=False))
-            if interface_attr_tuples:
-                txn.add(self.ovsdb.db_set('Interface', port_name,
-                                          *interface_attr_tuples))
-        # Don't return until the port has been assigned by vswitchd
-        self.get_port_ofport(port_name)
-
-    def delete_port(self, port_name):
-        self.ovsdb.del_port(port_name, self.br_name).execute()
-
-    def run_ofctl(self, cmd, args, process_input=None):
-        full_args = ["ovs-ofctl", cmd, self.br_name] + args
-        try:
-            return utils.execute(full_args, run_as_root=True,
-                                 process_input=process_input)
-        except Exception as e:
-            LOG.error(_LE("Unable to execute %(cmd)s. Exception: "
-                          "%(exception)s"),
-                      {'cmd': full_args, 'exception': e})
-
-    def count_flows(self):
-        flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:]
-        return len(flow_list) - 1
-
-    def remove_all_flows(self):
-        self.run_ofctl("del-flows", [])
-
-    @_ofport_retry
-    def _get_port_ofport(self, port_name):
-        return self.db_get_val("Interface", port_name, "ofport")
-
-    def get_port_ofport(self, port_name):
-        """Get the port's assigned ofport, retrying if not yet assigned."""
-        ofport = INVALID_OFPORT
-        try:
-            ofport = self._get_port_ofport(port_name)
-        except retrying.RetryError:
-            LOG.exception(_LE("Timed out retrieving ofport on port %s."),
-                          port_name)
-        return ofport
-
-    def get_datapath_id(self):
-        return self.db_get_val('Bridge',
-                               self.br_name, 'datapath_id')
-
-    def do_action_flows(self, action, kwargs_list):
-        if action != 'del':
-            for kw in kwargs_list:
-                if 'cookie' not in kw:
-                    kw['cookie'] = self.agent_uuid_stamp
-        flow_strs = [_build_flow_expr_str(kw, action) for kw in kwargs_list]
-        self.run_ofctl('%s-flows' % action, ['-'], '\n'.join(flow_strs))
-
-    def add_flow(self, **kwargs):
-        self.do_action_flows('add', [kwargs])
-
-    def mod_flow(self, **kwargs):
-        self.do_action_flows('mod', [kwargs])
-
-    def delete_flows(self, **kwargs):
-        self.do_action_flows('del', [kwargs])
-
-    def dump_flows_for_table(self, table):
-        retval = None
-        flow_str = "table=%s" % table
-        flows = self.run_ofctl("dump-flows", [flow_str])
-        if flows:
-            retval = '\n'.join(item for item in flows.splitlines()
-                               if 'NXST' not in item)
-        return retval
-
-    def dump_all_flows(self):
-        return [f for f in self.run_ofctl("dump-flows", []).splitlines()
-                if 'NXST' not in f]
-
-    def deferred(self, **kwargs):
-        return DeferredOVSBridge(self, **kwargs)
-
-    def add_tunnel_port(self, port_name, remote_ip, local_ip,
-                        tunnel_type=p_const.TYPE_GRE,
-                        vxlan_udp_port=p_const.VXLAN_UDP_PORT,
-                        dont_fragment=True,
-                        tunnel_csum=False):
-        attrs = [('type', tunnel_type)]
-        # TODO(twilson) This is an OrderedDict solely to make a test happy
-        options = collections.OrderedDict()
-        vxlan_uses_custom_udp_port = (
-            tunnel_type == p_const.TYPE_VXLAN and
-            vxlan_udp_port != p_const.VXLAN_UDP_PORT
-        )
-        if vxlan_uses_custom_udp_port:
-            options['dst_port'] = vxlan_udp_port
-        options['df_default'] = str(dont_fragment).lower()
-        options['remote_ip'] = remote_ip
-        options['local_ip'] = local_ip
-        options['in_key'] = 'flow'
-        options['out_key'] = 'flow'
-        if tunnel_csum:
-            options['csum'] = str(tunnel_csum).lower()
-        attrs.append(('options', options))
-
-        return self.add_port(port_name, *attrs)
-
-    def add_patch_port(self, local_name, remote_name):
-        attrs = [('type', 'patch'),
-                 ('options', {'peer': remote_name})]
-        return self.add_port(local_name, *attrs)
-
-    def get_iface_name_list(self):
-        # get the interface name list for this bridge
-        return self.ovsdb.list_ifaces(self.br_name).execute(check_error=True)
-
-    def get_port_name_list(self):
-        # get the port name list for this bridge
-        return self.ovsdb.list_ports(self.br_name).execute(check_error=True)
-
-    def get_port_stats(self, port_name):
-        return self.db_get_val("Interface", port_name, "statistics")
-
-    def get_xapi_iface_id(self, xs_vif_uuid):
-        args = ["xe", "vif-param-get", "param-name=other-config",
-                "param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid]
-        try:
-            return utils.execute(args, run_as_root=True).strip()
-        except Exception as e:
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE("Unable to execute %(cmd)s. "
-                              "Exception: %(exception)s"),
-                          {'cmd': args, 'exception': e})
-
-    def get_ports_attributes(self, table, columns=None, ports=None,
-                             check_error=True, log_errors=True,
-                             if_exists=False):
-        port_names = ports or self.get_port_name_list()
-        if not port_names:
-            return []
-        return (self.ovsdb.db_list(table, port_names, columns=columns,
-                                   if_exists=if_exists).
-                execute(check_error=check_error, log_errors=log_errors))
-
-    # returns a VIF object for each VIF port
-    def get_vif_ports(self):
-        edge_ports = []
-        port_info = self.get_ports_attributes(
-            'Interface', columns=['name', 'external_ids', 'ofport'],
-            if_exists=True)
-        for port in port_info:
-            name = port['name']
-            external_ids = port['external_ids']
-            ofport = port['ofport']
-            if "iface-id" in external_ids and "attached-mac" in external_ids:
-                p = VifPort(name, ofport, external_ids["iface-id"],
-                            external_ids["attached-mac"], self)
-                edge_ports.append(p)
-            elif ("xs-vif-uuid" in external_ids and
-                  "attached-mac" in external_ids):
-                # if this is a xenserver and iface-id is not automatically
-                # synced to OVS from XAPI, we grab it from XAPI directly
-                iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
-                p = VifPort(name, ofport, iface_id,
-                            external_ids["attached-mac"], self)
-                edge_ports.append(p)
-
-        return edge_ports
-
-    def get_vif_port_to_ofport_map(self):
-        results = self.get_ports_attributes(
-            'Interface', columns=['name', 'external_ids', 'ofport'],
-            if_exists=True)
-        port_map = {}
-        for r in results:
-            # fall back to basic interface name
-            key = self.portid_from_external_ids(r['external_ids']) or r['name']
-            try:
-                port_map[key] = int(r['ofport'])
-            except TypeError:
-                # port doesn't yet have an ofport entry so we ignore it
-                pass
-        return port_map
-
-    def get_vif_port_set(self):
-        edge_ports = set()
-        results = self.get_ports_attributes(
-            'Interface', columns=['name', 'external_ids', 'ofport'],
-            if_exists=True)
-        for result in results:
-            if result['ofport'] == UNASSIGNED_OFPORT:
-                LOG.warn(_LW("Found not yet ready openvswitch port: %s"),
-                         result['name'])
-            elif result['ofport'] == INVALID_OFPORT:
-                LOG.warn(_LW("Found failed openvswitch port: %s"),
-                         result['name'])
-            elif 'attached-mac' in result['external_ids']:
-                port_id = self.portid_from_external_ids(result['external_ids'])
-                if port_id:
-                    edge_ports.add(port_id)
-        return edge_ports
-
-    def portid_from_external_ids(self, external_ids):
-        if 'iface-id' in external_ids:
-            return external_ids['iface-id']
-        if 'xs-vif-uuid' in external_ids:
-            iface_id = self.get_xapi_iface_id(
-                external_ids['xs-vif-uuid'])
-            return iface_id
-
-    def get_port_tag_dict(self):
-        """Get a dict of port names and associated vlan tags.
-
-        e.g. the returned dict is of the following form::
-
-            {u'int-br-eth2': [],
-             u'patch-tun': [],
-             u'qr-76d9e6b6-21': 1,
-             u'tapce5318ff-78': 1,
-             u'tape1400310-e6': 1}
-
-        The TAG ID is only available in the "Port" table and is not available
-        in the "Interface" table queried by the get_vif_port_set() method.
-
-        """
-        results = self.get_ports_attributes(
-            'Port', columns=['name', 'tag'], if_exists=True)
-        return {p['name']: p['tag'] for p in results}
-
-    def get_vifs_by_ids(self, port_ids):
-        interface_info = self.get_ports_attributes(
-            "Interface", columns=["name", "external_ids", "ofport"],
-            if_exists=True)
-        by_id = {x['external_ids'].get('iface-id'): x for x in interface_info}
-        result = {}
-        for port_id in port_ids:
-            result[port_id] = None
-            if port_id not in by_id:
-                LOG.info(_LI("Port %(port_id)s not present in bridge "
-                             "%(br_name)s"),
-                         {'port_id': port_id, 'br_name': self.br_name})
-                continue
-            pinfo = by_id[port_id]
-            if not self._check_ofport(port_id, pinfo):
-                continue
-            mac = pinfo['external_ids'].get('attached-mac')
-            result[port_id] = VifPort(pinfo['name'], pinfo['ofport'],
-                                      port_id, mac, self)
-        return result
-
-    @staticmethod
-    def _check_ofport(port_id, port_info):
-        if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]:
-            LOG.warn(_LW("ofport: %(ofport)s for VIF: %(vif)s is not a"
-                         " positive integer"),
-                     {'ofport': port_info['ofport'], 'vif': port_id})
-            return False
-        return True
-
-    def get_vif_port_by_id(self, port_id):
-        ports = self.ovsdb.db_find(
-            'Interface', ('external_ids', '=', {'iface-id': port_id}),
-            ('external_ids', '!=', {'attached-mac': ''}),
-            columns=['external_ids', 'name', 'ofport']).execute()
-        for port in ports:
-            if self.br_name != self.get_bridge_for_iface(port['name']):
-                continue
-            if not self._check_ofport(port_id, port):
-                continue
-            mac = port['external_ids'].get('attached-mac')
-            return VifPort(port['name'], port['ofport'], port_id, mac, self)
-        LOG.info(_LI("Port %(port_id)s not present in bridge %(br_name)s"),
-                 {'port_id': port_id, 'br_name': self.br_name})
-
-    def delete_ports(self, all_ports=False):
-        if all_ports:
-            port_names = self.get_port_name_list()
-        else:
-            port_names = (port.port_name for port in self.get_vif_ports())
-
-        for port_name in port_names:
-            self.delete_port(port_name)
-
-    def get_local_port_mac(self):
-        """Retrieve the mac of the bridge's local port."""
-        address = ip_lib.IPDevice(self.br_name).link.address
-        if address:
-            return address
-        else:
-            msg = _('Unable to determine mac address for %s') % self.br_name
-            raise Exception(msg)
-
-    def set_controllers_connection_mode(self, connection_mode):
-        """Set bridge controllers connection mode.
-
-        :param connection_mode: "out-of-band" or "in-band"
-        """
-        attr = [('connection_mode', connection_mode)]
-        controllers = self.db_get_val('Bridge', self.br_name, 'controller')
-        controllers = [controllers] if isinstance(
-            controllers, uuid.UUID) else controllers
-        with self.ovsdb.transaction(check_error=True) as txn:
-            for controller_uuid in controllers:
-                txn.add(self.ovsdb.db_set('Controller',
-                                          controller_uuid, *attr))
-
-    def _set_egress_bw_limit_for_port(self, port_name, max_kbps,
-                                      max_burst_kbps):
-        with self.ovsdb.transaction(check_error=True) as txn:
-            txn.add(self.ovsdb.db_set('Interface', port_name,
-                                      ('ingress_policing_rate', max_kbps)))
-            txn.add(self.ovsdb.db_set('Interface', port_name,
-                                      ('ingress_policing_burst',
-                                       max_burst_kbps)))
-
-    def create_egress_bw_limit_for_port(self, port_name, max_kbps,
-                                        max_burst_kbps):
-        self._set_egress_bw_limit_for_port(
-            port_name, max_kbps, max_burst_kbps)
-
-    def get_egress_bw_limit_for_port(self, port_name):
-
-        max_kbps = self.db_get_val('Interface', port_name,
-                                   'ingress_policing_rate')
-        max_burst_kbps = self.db_get_val('Interface', port_name,
-                                         'ingress_policing_burst')
-
-        max_kbps = max_kbps or None
-        max_burst_kbps = max_burst_kbps or None
-
-        return max_kbps, max_burst_kbps
-
-    def delete_egress_bw_limit_for_port(self, port_name):
-        self._set_egress_bw_limit_for_port(
-            port_name, 0, 0)
-
-    def __enter__(self):
-        self.create()
-        return self
-
-    def __exit__(self, exc_type, exc_value, exc_tb):
-        self.destroy()
-
-
-class DeferredOVSBridge(object):
-    '''Deferred OVSBridge.
-
-    This class wraps add_flow, mod_flow and delete_flows calls to an OVSBridge
-    and defers their application until apply_flows call in order to perform
-    bulk calls. It wraps also ALLOWED_PASSTHROUGHS calls to avoid mixing
-    OVSBridge and DeferredOVSBridge uses.
-    This class can be used as a context, in such case apply_flows is called on
-    __exit__ except if an exception is raised.
-    This class is not thread-safe, that's why for every use a new instance
-    must be implemented.
-    '''
-    ALLOWED_PASSTHROUGHS = 'add_port', 'add_tunnel_port', 'delete_port'
-
-    def __init__(self, br, full_ordered=False,
-                 order=('add', 'mod', 'del')):
-        '''Constructor.
-
-        :param br: wrapped bridge
-        :param full_ordered: Optional, disable flow reordering (slower)
-        :param order: Optional, define in which order flow are applied
-        '''
-
-        self.br = br
-        self.full_ordered = full_ordered
-        self.order = order
-        if not self.full_ordered:
-            self.weights = dict((y, x) for x, y in enumerate(self.order))
-        self.action_flow_tuples = []
-
-    def __getattr__(self, name):
-        if name in self.ALLOWED_PASSTHROUGHS:
-            return getattr(self.br, name)
-        raise AttributeError(name)
-
-    def add_flow(self, **kwargs):
-        self.action_flow_tuples.append(('add', kwargs))
-
-    def mod_flow(self, **kwargs):
-        self.action_flow_tuples.append(('mod', kwargs))
-
-    def delete_flows(self, **kwargs):
-        self.action_flow_tuples.append(('del', kwargs))
-
-    def apply_flows(self):
-        action_flow_tuples = self.action_flow_tuples
-        self.action_flow_tuples = []
-        if not action_flow_tuples:
-            return
-
-        if not self.full_ordered:
-            action_flow_tuples.sort(key=lambda af: self.weights[af[0]])
-
-        grouped = itertools.groupby(action_flow_tuples,
-                                    key=operator.itemgetter(0))
-        itemgetter_1 = operator.itemgetter(1)
-        for action, action_flow_list in grouped:
-            flows = list(map(itemgetter_1, action_flow_list))
-            self.br.do_action_flows(action, flows)
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, exc_type, exc_value, traceback):
-        if exc_type is None:
-            self.apply_flows()
-        else:
-            LOG.exception(_LE("OVS flows could not be applied on bridge %s"),
-                          self.br.br_name)
-
-
-def _build_flow_expr_str(flow_dict, cmd):
-    flow_expr_arr = []
-    actions = None
-
-    if cmd == 'add':
-        flow_expr_arr.append("hard_timeout=%s" %
-                             flow_dict.pop('hard_timeout', '0'))
-        flow_expr_arr.append("idle_timeout=%s" %
-                             flow_dict.pop('idle_timeout', '0'))
-        flow_expr_arr.append("priority=%s" %
-                             flow_dict.pop('priority', '1'))
-    elif 'priority' in flow_dict:
-        msg = _("Cannot match priority on flow deletion or modification")
-        raise exceptions.InvalidInput(error_message=msg)
-
-    if cmd != 'del':
-        if "actions" not in flow_dict:
-            msg = _("Must specify one or more actions on flow addition"
-                    " or modification")
-            raise exceptions.InvalidInput(error_message=msg)
-        actions = "actions=%s" % flow_dict.pop('actions')
-
-    for key, value in six.iteritems(flow_dict):
-        if key == 'proto':
-            flow_expr_arr.append(value)
-        else:
-            flow_expr_arr.append("%s=%s" % (key, str(value)))
-
-    if actions:
-        flow_expr_arr.append(actions)
-
-    return ','.join(flow_expr_arr)
diff --git a/neutron/agent/common/polling.py b/neutron/agent/common/polling.py
deleted file mode 100644 (file)
index 45e51f0..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2015 Cloudbase Solutions.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-
-if os.name == 'nt':
-    from neutron.agent.windows import polling
-else:
-    from neutron.agent.linux import polling
-
-get_polling_manager = polling.get_polling_manager
diff --git a/neutron/agent/common/utils.py b/neutron/agent/common/utils.py
deleted file mode 100644 (file)
index 08fdc68..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2015 Cloudbase Solutions.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from neutron._i18n import _LE
-from neutron.agent.common import config
-from neutron.common import utils as neutron_utils
-
-
-if os.name == 'nt':
-    from neutron.agent.windows import utils
-else:
-    from neutron.agent.linux import utils
-
-
-LOG = logging.getLogger(__name__)
-config.register_root_helper(cfg.CONF)
-
-INTERFACE_NAMESPACE = 'neutron.interface_drivers'
-
-
-execute = utils.execute
-
-
-def load_interface_driver(conf):
-    """Load interface driver for agents like DHCP or L3 agent.
-
-    :param conf: driver configuration object
-    :raises SystemExit of 1 if driver cannot be loaded
-    """
-
-    try:
-        loaded_class = neutron_utils.load_class_by_alias_or_classname(
-                INTERFACE_NAMESPACE, conf.interface_driver)
-        return loaded_class(conf)
-    except ImportError:
-        LOG.error(_LE("Error loading interface driver '%s'"),
-                  conf.interface_driver)
-        raise SystemExit(1)
diff --git a/neutron/agent/dhcp/__init__.py b/neutron/agent/dhcp/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/agent/dhcp/agent.py b/neutron/agent/dhcp/agent.py
deleted file mode 100644 (file)
index 7b00de5..0000000
+++ /dev/null
@@ -1,597 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import os
-
-import eventlet
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_service import loopingcall
-from oslo_utils import importutils
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.agent.linux import dhcp
-from neutron.agent.linux import external_process
-from neutron.agent.metadata import driver as metadata_driver
-from neutron.agent import rpc as agent_rpc
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.common import utils
-from neutron import context
-from neutron import manager
-
-LOG = logging.getLogger(__name__)
-
-
-class DhcpAgent(manager.Manager):
-    """DHCP agent service manager.
-
-    Note that the public methods of this class are exposed as the server side
-    of an rpc interface.  The neutron server uses
-    neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyApi as the
-    client side to execute the methods here.  For more information about
-    changing rpc interfaces, see doc/source/devref/rpc_api.rst.
-    """
-    target = oslo_messaging.Target(version='1.0')
-
-    def __init__(self, host=None, conf=None):
-        super(DhcpAgent, self).__init__(host=host)
-        self.needs_resync_reasons = collections.defaultdict(list)
-        self.conf = conf or cfg.CONF
-        self.cache = NetworkCache()
-        self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
-        ctx = context.get_admin_context_without_session()
-        self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, ctx, self.conf.host)
-        # create dhcp dir to store dhcp info
-        dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
-        utils.ensure_dir(dhcp_dir)
-        self.dhcp_version = self.dhcp_driver_cls.check_version()
-        self._populate_networks_cache()
-        # keep track of mappings between networks and routers for
-        # metadata processing
-        self._metadata_routers = {}  # {network_id: router_id}
-        self._process_monitor = external_process.ProcessMonitor(
-            config=self.conf,
-            resource_type='dhcp')
-
-    def init_host(self):
-        self.sync_state()
-
-    def _populate_networks_cache(self):
-        """Populate the networks cache when the DHCP-agent starts."""
-        try:
-            existing_networks = self.dhcp_driver_cls.existing_dhcp_networks(
-                self.conf
-            )
-            for net_id in existing_networks:
-                net = dhcp.NetModel({"id": net_id, "subnets": [], "ports": []})
-                self.cache.put(net)
-        except NotImplementedError:
-            # just go ahead with an empty networks cache
-            LOG.debug("The '%s' DHCP-driver does not support retrieving of a "
-                      "list of existing networks",
-                      self.conf.dhcp_driver)
-
-    def after_start(self):
-        self.run()
-        LOG.info(_LI("DHCP agent started"))
-
-    def run(self):
-        """Activate the DHCP agent."""
-        self.sync_state()
-        self.periodic_resync()
-
-    def call_driver(self, action, network, **action_kwargs):
-        """Invoke an action on a DHCP driver instance."""
-        LOG.debug('Calling driver for network: %(net)s action: %(action)s',
-                  {'net': network.id, 'action': action})
-        try:
-            # the Driver expects something that is duck typed similar to
-            # the base models.
-            driver = self.dhcp_driver_cls(self.conf,
-                                          network,
-                                          self._process_monitor,
-                                          self.dhcp_version,
-                                          self.plugin_rpc)
-            getattr(driver, action)(**action_kwargs)
-            return True
-        except exceptions.Conflict:
-            # No need to resync here, the agent will receive the event related
-            # to a status update for the network
-            LOG.warning(_LW('Unable to %(action)s dhcp for %(net_id)s: there '
-                            'is a conflict with its current state; please '
-                            'check that the network and/or its subnet(s) '
-                            'still exist.'),
-                        {'net_id': network.id, 'action': action})
-        except Exception as e:
-            if getattr(e, 'exc_type', '') != 'IpAddressGenerationFailure':
-                # Don't resync if port could not be created because of an IP
-                # allocation failure. When the subnet is updated with a new
-                # allocation pool or a port is  deleted to free up an IP, this
-                # will automatically be retried on the notification
-                self.schedule_resync(e, network.id)
-            if (isinstance(e, oslo_messaging.RemoteError)
-                and e.exc_type == 'NetworkNotFound'
-                or isinstance(e, exceptions.NetworkNotFound)):
-                LOG.warning(_LW("Network %s has been deleted."), network.id)
-            else:
-                LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'),
-                              {'net_id': network.id, 'action': action})
-
-    def schedule_resync(self, reason, network_id=None):
-        """Schedule a resync for a given network and reason. If no network is
-        specified, resync all networks.
-        """
-        self.needs_resync_reasons[network_id].append(reason)
-
-    @utils.synchronized('dhcp-agent')
-    def sync_state(self, networks=None):
-        """Sync the local DHCP state with Neutron. If no networks are passed,
-        or 'None' is one of the networks, sync all of the networks.
-        """
-        only_nets = set([] if (not networks or None in networks) else networks)
-        LOG.info(_LI('Synchronizing state'))
-        pool = eventlet.GreenPool(self.conf.num_sync_threads)
-        known_network_ids = set(self.cache.get_network_ids())
-
-        try:
-            active_networks = self.plugin_rpc.get_active_networks_info()
-            active_network_ids = set(network.id for network in active_networks)
-            for deleted_id in known_network_ids - active_network_ids:
-                try:
-                    self.disable_dhcp_helper(deleted_id)
-                except Exception as e:
-                    self.schedule_resync(e, deleted_id)
-                    LOG.exception(_LE('Unable to sync network state on '
-                                      'deleted network %s'), deleted_id)
-
-            for network in active_networks:
-                if (not only_nets or  # specifically resync all
-                        network.id not in known_network_ids or  # missing net
-                        network.id in only_nets):  # specific network to sync
-                    pool.spawn(self.safe_configure_dhcp_for_network, network)
-            pool.waitall()
-            LOG.info(_LI('Synchronizing state complete'))
-
-        except Exception as e:
-            if only_nets:
-                for network_id in only_nets:
-                    self.schedule_resync(e, network_id)
-            else:
-                self.schedule_resync(e)
-            LOG.exception(_LE('Unable to sync network state.'))
-
-    @utils.exception_logger()
-    def _periodic_resync_helper(self):
-        """Resync the dhcp state at the configured interval."""
-        while True:
-            eventlet.sleep(self.conf.resync_interval)
-            if self.needs_resync_reasons:
-                # be careful to avoid a race with additions to list
-                # from other threads
-                reasons = self.needs_resync_reasons
-                self.needs_resync_reasons = collections.defaultdict(list)
-                for net, r in reasons.items():
-                    if not net:
-                        net = "*"
-                    LOG.debug("resync (%(network)s): %(reason)s",
-                              {"reason": r, "network": net})
-                self.sync_state(reasons.keys())
-
-    def periodic_resync(self):
-        """Spawn a thread to periodically resync the dhcp state."""
-        eventlet.spawn(self._periodic_resync_helper)
-
-    def safe_get_network_info(self, network_id):
-        try:
-            network = self.plugin_rpc.get_network_info(network_id)
-            if not network:
-                LOG.warn(_LW('Network %s has been deleted.'), network_id)
-            return network
-        except Exception as e:
-            self.schedule_resync(e, network_id)
-            LOG.exception(_LE('Network %s info call failed.'), network_id)
-
-    def enable_dhcp_helper(self, network_id):
-        """Enable DHCP for a network that meets enabling criteria."""
-        network = self.safe_get_network_info(network_id)
-        if network:
-            self.configure_dhcp_for_network(network)
-
-    @utils.exception_logger()
-    def safe_configure_dhcp_for_network(self, network):
-        try:
-            self.configure_dhcp_for_network(network)
-        except (exceptions.NetworkNotFound, RuntimeError):
-            LOG.warn(_LW('Network %s may have been deleted and its resources '
-                         'may have already been disposed.'), network.id)
-
-    def configure_dhcp_for_network(self, network):
-        if not network.admin_state_up:
-            return
-
-        enable_metadata = self.dhcp_driver_cls.should_enable_metadata(
-                self.conf, network)
-        dhcp_network_enabled = False
-
-        for subnet in network.subnets:
-            if subnet.enable_dhcp:
-                if self.call_driver('enable', network):
-                    dhcp_network_enabled = True
-                    self.cache.put(network)
-                break
-
-        if enable_metadata and dhcp_network_enabled:
-            for subnet in network.subnets:
-                if subnet.ip_version == 4 and subnet.enable_dhcp:
-                    self.enable_isolated_metadata_proxy(network)
-                    break
-
-    def disable_dhcp_helper(self, network_id):
-        """Disable DHCP for a network known to the agent."""
-        network = self.cache.get_network_by_id(network_id)
-        if network:
-            if self.conf.enable_isolated_metadata:
-                # NOTE(jschwarz): In the case where a network is deleted, all
-                # the subnets and ports are deleted before this function is
-                # called, so checking if 'should_enable_metadata' is True
-                # for any subnet is false logic here.
-                self.disable_isolated_metadata_proxy(network)
-            if self.call_driver('disable', network):
-                self.cache.remove(network)
-
-    def refresh_dhcp_helper(self, network_id):
-        """Refresh or disable DHCP for a network depending on the current state
-        of the network.
-        """
-        old_network = self.cache.get_network_by_id(network_id)
-        if not old_network:
-            # DHCP current not running for network.
-            return self.enable_dhcp_helper(network_id)
-
-        network = self.safe_get_network_info(network_id)
-        if not network:
-            return
-
-        old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)
-        new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)
-
-        if new_cidrs and old_cidrs == new_cidrs:
-            self.call_driver('reload_allocations', network)
-            self.cache.put(network)
-        elif new_cidrs:
-            if self.call_driver('restart', network):
-                self.cache.put(network)
-        else:
-            self.disable_dhcp_helper(network.id)
-
-    @utils.synchronized('dhcp-agent')
-    def network_create_end(self, context, payload):
-        """Handle the network.create.end notification event."""
-        network_id = payload['network']['id']
-        self.enable_dhcp_helper(network_id)
-
-    @utils.synchronized('dhcp-agent')
-    def network_update_end(self, context, payload):
-        """Handle the network.update.end notification event."""
-        network_id = payload['network']['id']
-        if payload['network']['admin_state_up']:
-            self.enable_dhcp_helper(network_id)
-        else:
-            self.disable_dhcp_helper(network_id)
-
-    @utils.synchronized('dhcp-agent')
-    def network_delete_end(self, context, payload):
-        """Handle the network.delete.end notification event."""
-        self.disable_dhcp_helper(payload['network_id'])
-
-    @utils.synchronized('dhcp-agent')
-    def subnet_update_end(self, context, payload):
-        """Handle the subnet.update.end notification event."""
-        network_id = payload['subnet']['network_id']
-        self.refresh_dhcp_helper(network_id)
-
-    # Use the update handler for the subnet create event.
-    subnet_create_end = subnet_update_end
-
-    @utils.synchronized('dhcp-agent')
-    def subnet_delete_end(self, context, payload):
-        """Handle the subnet.delete.end notification event."""
-        subnet_id = payload['subnet_id']
-        network = self.cache.get_network_by_subnet_id(subnet_id)
-        if network:
-            self.refresh_dhcp_helper(network.id)
-
-    @utils.synchronized('dhcp-agent')
-    def port_update_end(self, context, payload):
-        """Handle the port.update.end notification event."""
-        updated_port = dhcp.DictModel(payload['port'])
-        network = self.cache.get_network_by_id(updated_port.network_id)
-        if network:
-            LOG.info(_LI("Trigger reload_allocations for port %s"),
-                     updated_port)
-            driver_action = 'reload_allocations'
-            if self._is_port_on_this_agent(updated_port):
-                orig = self.cache.get_port_by_id(updated_port['id'])
-                # assume IP change if not in cache
-                old_ips = {i['ip_address'] for i in orig['fixed_ips'] or []}
-                new_ips = {i['ip_address'] for i in updated_port['fixed_ips']}
-                if old_ips != new_ips:
-                    driver_action = 'restart'
-            self.cache.put_port(updated_port)
-            self.call_driver(driver_action, network)
-
-    def _is_port_on_this_agent(self, port):
-        thishost = utils.get_dhcp_agent_device_id(
-            port['network_id'], self.conf.host)
-        return port['device_id'] == thishost
-
-    # Use the update handler for the port create event.
-    port_create_end = port_update_end
-
-    @utils.synchronized('dhcp-agent')
-    def port_delete_end(self, context, payload):
-        """Handle the port.delete.end notification event."""
-        port = self.cache.get_port_by_id(payload['port_id'])
-        if port:
-            network = self.cache.get_network_by_id(port.network_id)
-            self.cache.remove_port(port)
-            self.call_driver('reload_allocations', network)
-
-    def enable_isolated_metadata_proxy(self, network):
-
-        # The proxy might work for either a single network
-        # or all the networks connected via a router
-        # to the one passed as a parameter
-        kwargs = {'network_id': network.id}
-        # When the metadata network is enabled, the proxy might
-        # be started for the router attached to the network
-        if self.conf.enable_metadata_network:
-            router_ports = [port for port in network.ports
-                            if (port.device_owner in
-                                constants.ROUTER_INTERFACE_OWNERS)]
-            if router_ports:
-                # Multiple router ports should not be allowed
-                if len(router_ports) > 1:
-                    LOG.warning(_LW("%(port_num)d router ports found on the "
-                                    "metadata access network. Only the port "
-                                    "%(port_id)s, for router %(router_id)s "
-                                    "will be considered"),
-                                {'port_num': len(router_ports),
-                                 'port_id': router_ports[0].id,
-                                 'router_id': router_ports[0].device_id})
-                kwargs = {'router_id': router_ports[0].device_id}
-                self._metadata_routers[network.id] = router_ports[0].device_id
-
-        metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
-            self._process_monitor, network.namespace, dhcp.METADATA_PORT,
-            self.conf, **kwargs)
-
-    def disable_isolated_metadata_proxy(self, network):
-        if (self.conf.enable_metadata_network and
-            network.id in self._metadata_routers):
-            uuid = self._metadata_routers[network.id]
-            is_router_id = True
-        else:
-            uuid = network.id
-            is_router_id = False
-        metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy(
-            self._process_monitor, uuid, self.conf)
-        if is_router_id:
-            del self._metadata_routers[network.id]
-
-
-class DhcpPluginApi(object):
-    """Agent side of the dhcp rpc API.
-
-    This class implements the client side of an rpc interface.  The server side
-    of this interface can be found in
-    neutron.api.rpc.handlers.dhcp_rpc.DhcpRpcCallback.  For more information
-    about changing rpc interfaces, see doc/source/devref/rpc_api.rst.
-
-    API version history:
-        1.0 - Initial version.
-        1.1 - Added get_active_networks_info, create_dhcp_port,
-              and update_dhcp_port methods.
-
-    """
-
-    def __init__(self, topic, context, host):
-        self.context = context
-        self.host = host
-        target = oslo_messaging.Target(
-                topic=topic,
-                namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN,
-                version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def get_active_networks_info(self):
-        """Make a remote process call to retrieve all network info."""
-        cctxt = self.client.prepare(version='1.1')
-        networks = cctxt.call(self.context, 'get_active_networks_info',
-                              host=self.host)
-        return [dhcp.NetModel(n) for n in networks]
-
-    def get_network_info(self, network_id):
-        """Make a remote process call to retrieve network info."""
-        cctxt = self.client.prepare()
-        network = cctxt.call(self.context, 'get_network_info',
-                             network_id=network_id, host=self.host)
-        if network:
-            return dhcp.NetModel(network)
-
-    def create_dhcp_port(self, port):
-        """Make a remote process call to create the dhcp port."""
-        cctxt = self.client.prepare(version='1.1')
-        port = cctxt.call(self.context, 'create_dhcp_port',
-                          port=port, host=self.host)
-        if port:
-            return dhcp.DictModel(port)
-
-    def update_dhcp_port(self, port_id, port):
-        """Make a remote process call to update the dhcp port."""
-        cctxt = self.client.prepare(version='1.1')
-        port = cctxt.call(self.context, 'update_dhcp_port',
-                          port_id=port_id, port=port, host=self.host)
-        if port:
-            return dhcp.DictModel(port)
-
-    def release_dhcp_port(self, network_id, device_id):
-        """Make a remote process call to release the dhcp port."""
-        cctxt = self.client.prepare()
-        return cctxt.call(self.context, 'release_dhcp_port',
-                          network_id=network_id, device_id=device_id,
-                          host=self.host)
-
-
-class NetworkCache(object):
-    """Agent cache of the current network state."""
-    def __init__(self):
-        self.cache = {}
-        self.subnet_lookup = {}
-        self.port_lookup = {}
-
-    def get_network_ids(self):
-        return self.cache.keys()
-
-    def get_network_by_id(self, network_id):
-        return self.cache.get(network_id)
-
-    def get_network_by_subnet_id(self, subnet_id):
-        return self.cache.get(self.subnet_lookup.get(subnet_id))
-
-    def get_network_by_port_id(self, port_id):
-        return self.cache.get(self.port_lookup.get(port_id))
-
-    def put(self, network):
-        if network.id in self.cache:
-            self.remove(self.cache[network.id])
-
-        self.cache[network.id] = network
-
-        for subnet in network.subnets:
-            self.subnet_lookup[subnet.id] = network.id
-
-        for port in network.ports:
-            self.port_lookup[port.id] = network.id
-
-    def remove(self, network):
-        del self.cache[network.id]
-
-        for subnet in network.subnets:
-            del self.subnet_lookup[subnet.id]
-
-        for port in network.ports:
-            del self.port_lookup[port.id]
-
-    def put_port(self, port):
-        network = self.get_network_by_id(port.network_id)
-        for index in range(len(network.ports)):
-            if network.ports[index].id == port.id:
-                network.ports[index] = port
-                break
-        else:
-            network.ports.append(port)
-
-        self.port_lookup[port.id] = network.id
-
-    def remove_port(self, port):
-        network = self.get_network_by_port_id(port.id)
-
-        for index in range(len(network.ports)):
-            if network.ports[index] == port:
-                del network.ports[index]
-                del self.port_lookup[port.id]
-                break
-
-    def get_port_by_id(self, port_id):
-        network = self.get_network_by_port_id(port_id)
-        if network:
-            for port in network.ports:
-                if port.id == port_id:
-                    return port
-
-    def get_state(self):
-        net_ids = self.get_network_ids()
-        num_nets = len(net_ids)
-        num_subnets = 0
-        num_ports = 0
-        for net_id in net_ids:
-            network = self.get_network_by_id(net_id)
-            num_subnets += len(network.subnets)
-            num_ports += len(network.ports)
-        return {'networks': num_nets,
-                'subnets': num_subnets,
-                'ports': num_ports}
-
-
-class DhcpAgentWithStateReport(DhcpAgent):
-    def __init__(self, host=None, conf=None):
-        super(DhcpAgentWithStateReport, self).__init__(host=host, conf=conf)
-        self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
-        self.agent_state = {
-            'binary': 'neutron-dhcp-agent',
-            'host': host,
-            'availability_zone': self.conf.AGENT.availability_zone,
-            'topic': topics.DHCP_AGENT,
-            'configurations': {
-                'dhcp_driver': self.conf.dhcp_driver,
-                'dhcp_lease_duration': self.conf.dhcp_lease_duration,
-                'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats},
-            'start_flag': True,
-            'agent_type': constants.AGENT_TYPE_DHCP}
-        report_interval = self.conf.AGENT.report_interval
-        if report_interval:
-            self.heartbeat = loopingcall.FixedIntervalLoopingCall(
-                self._report_state)
-            self.heartbeat.start(interval=report_interval)
-
-    def _report_state(self):
-        try:
-            self.agent_state.get('configurations').update(
-                self.cache.get_state())
-            ctx = context.get_admin_context_without_session()
-            agent_status = self.state_rpc.report_state(
-                ctx, self.agent_state, True)
-            if agent_status == constants.AGENT_REVIVED:
-                LOG.info(_LI("Agent has just been revived. "
-                             "Scheduling full sync"))
-                self.schedule_resync("Agent has just been revived")
-        except AttributeError:
-            # This means the server does not support report_state
-            LOG.warn(_LW("Neutron server does not support state report."
-                         " State report for this agent will be disabled."))
-            self.heartbeat.stop()
-            self.run()
-            return
-        except Exception:
-            LOG.exception(_LE("Failed reporting state!"))
-            return
-        if self.agent_state.pop('start_flag', None):
-            self.run()
-
-    def agent_updated(self, context, payload):
-        """Handle the agent_updated notification event."""
-        self.schedule_resync(_("Agent updated: %(payload)s") %
-                             {"payload": payload})
-        LOG.info(_LI("agent_updated by server side %s!"), payload)
-
-    def after_start(self):
-        LOG.info(_LI("DHCP agent started"))
diff --git a/neutron/agent/dhcp/config.py b/neutron/agent/dhcp/config.py
deleted file mode 100644 (file)
index ab0d9b7..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-
-DHCP_AGENT_OPTS = [
-    cfg.IntOpt('resync_interval', default=5,
-               help=_("The DHCP agent will resync its state with Neutron to "
-                      "recover from any transient notification or RPC errors. "
-                      "The interval is number of seconds between attempts.")),
-    cfg.StrOpt('dhcp_driver',
-               default='neutron.agent.linux.dhcp.Dnsmasq',
-               help=_("The driver used to manage the DHCP server.")),
-    cfg.BoolOpt('enable_isolated_metadata', default=False,
-                help=_("The DHCP server can assist with providing metadata "
-                       "support on isolated networks. Setting this value to "
-                       "True will cause the DHCP server to append specific "
-                       "host routes to the DHCP request. The metadata service "
-                       "will only be activated when the subnet does not "
-                       "contain any router port. The guest instance must be "
-                       "configured to request host routes via DHCP (Option "
-                       "121). This option doesn't have any effect when "
-                       "force_metadata is set to True.")),
-    cfg.BoolOpt('force_metadata', default=False,
-                help=_("In some cases the Neutron router is not present to "
-                       "provide the metadata IP but the DHCP server can be "
-                       "used to provide this info. Setting this value will "
-                       "force the DHCP server to append specific host routes "
-                       "to the DHCP request. If this option is set, then the "
-                       "metadata service will be activated for all the "
-                       "networks.")),
-    cfg.BoolOpt('enable_metadata_network', default=False,
-                help=_("Allows for serving metadata requests coming from a "
-                       "dedicated metadata access network whose CIDR is "
-                       "169.254.169.254/16 (or larger prefix), and is "
-                       "connected to a Neutron router from which the VMs send "
-                       "metadata:1 request. In this case DHCP Option 121 will "
-                       "not be injected in VMs, as they will be able to reach "
-                       "169.254.169.254 through a router. This option "
-                       "requires enable_isolated_metadata = True.")),
-    cfg.IntOpt('num_sync_threads', default=4,
-               help=_('Number of threads to use during sync process. '
-                      'Should not exceed connection pool size configured on '
-                      'server.'))
-]
-
-DHCP_OPTS = [
-    cfg.StrOpt('dhcp_confs',
-               default='$state_path/dhcp',
-               help=_('Location to store DHCP server config files')),
-    cfg.StrOpt('dhcp_domain',
-               default='openstacklocal',
-               help=_('Domain to use for building the hostnames.'
-                      'This option is deprecated. It has been moved to '
-                      'neutron.conf as dns_domain. It will removed from here '
-                      'in a future release'),
-               deprecated_for_removal=True),
-]
-
-DNSMASQ_OPTS = [
-    cfg.StrOpt('dnsmasq_config_file',
-               default='',
-               help=_('Override the default dnsmasq settings with this file')),
-    cfg.ListOpt('dnsmasq_dns_servers',
-                help=_('Comma-separated list of the DNS servers which will be '
-                       'used as forwarders.'),
-                deprecated_name='dnsmasq_dns_server'),
-    cfg.StrOpt('dnsmasq_base_log_dir',
-               help=_("Base log dir for dnsmasq logging. "
-                      "The log contains DHCP and DNS log information and "
-                      "is useful for debugging issues with either DHCP or "
-                      "DNS. If this section is null, disable dnsmasq log.")),
-    cfg.BoolOpt('dnsmasq_local_resolv', default=True,
-                help=_("Enables the dnsmasq service to provide name "
-                       "resolution for instances via DNS resolvers on the "
-                       "host running the DHCP agent. Effectively removes the "
-                       "'--no-resolv' option from the dnsmasq process "
-                       "arguments. Adding custom DNS resolvers to the "
-                       "'dnsmasq_dns_servers' option disables this feature.")),
-    cfg.IntOpt(
-        'dnsmasq_lease_max',
-        default=(2 ** 24),
-        help=_('Limit number of leases to prevent a denial-of-service.')),
-    cfg.BoolOpt('dhcp_broadcast_reply', default=False,
-                help=_("Use broadcast in DHCP replies")),
-]
diff --git a/neutron/agent/dhcp_agent.py b/neutron/agent/dhcp_agent.py
deleted file mode 100644 (file)
index 46245fc..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-from oslo_config import cfg
-from oslo_service import service
-
-from neutron.agent.common import config
-from neutron.agent.dhcp import config as dhcp_config
-from neutron.agent.linux import interface
-from neutron.agent.metadata import config as metadata_config
-from neutron.common import config as common_config
-from neutron.common import topics
-from neutron import service as neutron_service
-
-
-def register_options(conf):
-    config.register_interface_driver_opts_helper(conf)
-    config.register_agent_state_opts_helper(conf)
-    config.register_availability_zone_opts_helper(conf)
-    conf.register_opts(dhcp_config.DHCP_AGENT_OPTS)
-    conf.register_opts(dhcp_config.DHCP_OPTS)
-    conf.register_opts(dhcp_config.DNSMASQ_OPTS)
-    conf.register_opts(metadata_config.DRIVER_OPTS)
-    conf.register_opts(metadata_config.SHARED_OPTS)
-    conf.register_opts(interface.OPTS)
-
-
-def main():
-    register_options(cfg.CONF)
-    common_config.init(sys.argv[1:])
-    config.setup_logging()
-    server = neutron_service.Service.create(
-        binary='neutron-dhcp-agent',
-        topic=topics.DHCP_AGENT,
-        report_interval=cfg.CONF.AGENT.report_interval,
-        manager='neutron.agent.dhcp.agent.DhcpAgentWithStateReport')
-    service.launch(cfg.CONF, server).wait()
diff --git a/neutron/agent/firewall.py b/neutron/agent/firewall.py
deleted file mode 100644 (file)
index 42f4a01..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import contextlib
-
-import six
-
-
-INGRESS_DIRECTION = 'ingress'
-EGRESS_DIRECTION = 'egress'
-
-
-@six.add_metaclass(abc.ABCMeta)
-class FirewallDriver(object):
-    """Firewall Driver base class.
-
-    Defines methods that any driver providing security groups
-    and provider firewall functionality should implement.
-    Note port attribute should have information of security group ids and
-    security group rules.
-
-    the dict of port should have
-      device : interface name
-      fixed_ips: ips of the device
-      mac_address: mac_address of the device
-      security_groups: [sgid, sgid]
-      security_group_rules : [ rule, rule ]
-      the rule must contain ethertype and direction
-      the rule may contain security_group_id,
-          protocol, port_min, port_max
-          source_ip_prefix, source_port_min,
-          source_port_max, dest_ip_prefix, and
-          remote_group_id
-      Note: source_group_ip in REST API should be converted by this rule
-      if direction is ingress:
-        remote_group_ip will be a source_ip_prefix
-      if direction is egress:
-        remote_group_ip will be a dest_ip_prefix
-      Note: remote_group_id in REST API should be converted by this rule
-      if direction is ingress:
-        remote_group_id will be a list of source_ip_prefix
-      if direction is egress:
-        remote_group_id will be a list of dest_ip_prefix
-      remote_group_id will also remaining membership update management
-    """
-
-    @abc.abstractmethod
-    def prepare_port_filter(self, port):
-        """Prepare filters for the port.
-
-        This method should be called before the port is created.
-        """
-
-    def apply_port_filter(self, port):
-        """Apply port filter.
-
-        Once this method returns, the port should be firewalled
-        appropriately. This method should as far as possible be a
-        no-op. It's vastly preferred to get everything set up in
-        prepare_port_filter.
-        """
-        raise NotImplementedError()
-
-    @abc.abstractmethod
-    def update_port_filter(self, port):
-        """Refresh security group rules from data store
-
-        Gets called when a port gets added to or removed from
-        the security group the port is a member of or if the
-        group gains or looses a rule.
-        """
-
-    def remove_port_filter(self, port):
-        """Stop filtering port."""
-        raise NotImplementedError()
-
-    def filter_defer_apply_on(self):
-        """Defer application of filtering rule."""
-        pass
-
-    def filter_defer_apply_off(self):
-        """Turn off deferral of rules and apply the rules now."""
-        pass
-
-    @property
-    def ports(self):
-        """Returns filtered ports."""
-        pass
-
-    @contextlib.contextmanager
-    def defer_apply(self):
-        """Defer apply context."""
-        self.filter_defer_apply_on()
-        try:
-            yield
-        finally:
-            self.filter_defer_apply_off()
-
-    def update_security_group_members(self, sg_id, ips):
-        """Update group members in a security group."""
-        raise NotImplementedError()
-
-    def update_security_group_rules(self, sg_id, rules):
-        """Update rules in a security group."""
-        raise NotImplementedError()
-
-    def security_group_updated(self, action_type, sec_group_ids,
-                               device_id=None):
-        """Called when a security group is updated.
-
-        Note: This method needs to be implemented by the firewall drivers
-        which use enhanced RPC for security_groups.
-        """
-        raise NotImplementedError()
-
-
-class NoopFirewallDriver(FirewallDriver):
-    """Noop Firewall Driver.
-
-    Firewall driver which does nothing.
-    This driver is for disabling the firewall functionality.
-    """
-
-    def prepare_port_filter(self, port):
-        pass
-
-    def apply_port_filter(self, port):
-        pass
-
-    def update_port_filter(self, port):
-        pass
-
-    def remove_port_filter(self, port):
-        pass
-
-    def filter_defer_apply_on(self):
-        pass
-
-    def filter_defer_apply_off(self):
-        pass
-
-    @property
-    def ports(self):
-        return {}
-
-    def update_security_group_members(self, sg_id, ips):
-        pass
-
-    def update_security_group_rules(self, sg_id, rules):
-        pass
-
-    def security_group_updated(self, action_type, sec_group_ids,
-                               device_id=None):
-        pass
diff --git a/neutron/agent/l2/__init__.py b/neutron/agent/l2/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/agent/l2/agent_extension.py b/neutron/agent/l2/agent_extension.py
deleted file mode 100644 (file)
index d77ea84..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright (c) 2015 Mellanox Technologies, Ltd
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-import six
-
-
-@six.add_metaclass(abc.ABCMeta)
-class AgentCoreResourceExtension(object):
-    """Define stable abstract interface for agent extensions.
-
-    An agent extension extends the agent core functionality.
-    """
-
-    def initialize(self, connection, driver_type):
-        """Perform agent core resource extension initialization.
-
-        :param connection: RPC connection that can be reused by the extension
-                           to define its RPC endpoints
-        :param driver_type: a string that defines the agent type to the
-                            extension. Can be used to choose the right backend
-                            implementation.
-
-        Called after all extensions have been loaded.
-        No port handling will be called before this method.
-        """
-
-    @abc.abstractmethod
-    def handle_port(self, context, data):
-        """Handle agent extension for port.
-
-        This can be called on either create or update, depending on the
-        code flow. Thus, it's this function's responsibility to check what
-        actually changed.
-
-        :param context: rpc context
-        :param data: port data
-        """
-
-    @abc.abstractmethod
-    def delete_port(self, context, data):
-        """Delete port from agent extension.
-
-        :param context: rpc context
-        :param data: port data
-        """
diff --git a/neutron/agent/l2/extensions/__init__.py b/neutron/agent/l2/extensions/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/agent/l2/extensions/manager.py b/neutron/agent/l2/extensions/manager.py
deleted file mode 100644 (file)
index 2f0436d..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright (c) 2015 Mellanox Technologies, Ltd
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log
-import stevedore
-
-from neutron._i18n import _, _LE, _LI
-
-LOG = log.getLogger(__name__)
-
-
-L2_AGENT_EXT_MANAGER_NAMESPACE = 'neutron.agent.l2.extensions'
-L2_AGENT_EXT_MANAGER_OPTS = [
-    cfg.ListOpt('extensions',
-                default=[],
-                help=_('Extensions list to use')),
-]
-
-
-def register_opts(conf):
-    conf.register_opts(L2_AGENT_EXT_MANAGER_OPTS, 'agent')
-
-
-class AgentExtensionsManager(stevedore.named.NamedExtensionManager):
-    """Manage agent extensions."""
-
-    def __init__(self, conf):
-        super(AgentExtensionsManager, self).__init__(
-            L2_AGENT_EXT_MANAGER_NAMESPACE, conf.agent.extensions,
-            invoke_on_load=True, name_order=True)
-        LOG.info(_LI("Loaded agent extensions: %s"), self.names())
-
-    def initialize(self, connection, driver_type):
-        """Initialize enabled L2 agent extensions.
-
-        :param connection: RPC connection that can be reused by extensions to
-                           define their RPC endpoints
-        :param driver_type: a string that defines the agent type to the
-                            extension. Can be used by the extension to choose
-                            the right backend implementation.
-        """
-        # Initialize each agent extension in the list.
-        for extension in self:
-            LOG.info(_LI("Initializing agent extension '%s'"), extension.name)
-            extension.obj.initialize(connection, driver_type)
-
-    def handle_port(self, context, data):
-        """Notify all agent extensions to handle port."""
-        for extension in self:
-            try:
-                extension.obj.handle_port(context, data)
-            # TODO(QoS) add agent extensions exception and catch them here
-            except AttributeError:
-                LOG.exception(
-                    _LE("Agent Extension '%(name)s' failed "
-                        "while handling port update"),
-                    {'name': extension.name}
-                )
-
-    def delete_port(self, context, data):
-        """Notify all agent extensions to delete port."""
-        for extension in self:
-            try:
-                extension.obj.delete_port(context, data)
-            # TODO(QoS) add agent extensions exception and catch them here
-            # instead of AttributeError
-            except AttributeError:
-                LOG.exception(
-                    _LE("Agent Extension '%(name)s' failed "
-                        "while handling port deletion"),
-                    {'name': extension.name}
-                )
diff --git a/neutron/agent/l2/extensions/qos.py b/neutron/agent/l2/extensions/qos.py
deleted file mode 100644 (file)
index 20ec764..0000000
+++ /dev/null
@@ -1,263 +0,0 @@
-# Copyright (c) 2015 Mellanox Technologies, Ltd
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import collections
-
-from oslo_concurrency import lockutils
-from oslo_log import log as logging
-import six
-
-from neutron._i18n import _LW, _LI
-from neutron.agent.l2 import agent_extension
-from neutron.api.rpc.callbacks.consumer import registry
-from neutron.api.rpc.callbacks import events
-from neutron.api.rpc.callbacks import resources
-from neutron.api.rpc.handlers import resources_rpc
-from neutron.common import exceptions
-from neutron import manager
-
-LOG = logging.getLogger(__name__)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class QosAgentDriver(object):
-    """Defines stable abstract interface for QoS Agent Driver.
-
-    QoS Agent driver defines the interface to be implemented by Agent
-    for applying QoS Rules on a port.
-    """
-
-    # Each QoS driver should define the set of rule types that it supports, and
-    # corresponding handlers that has the following names:
-    #
-    # create_<type>
-    # update_<type>
-    # delete_<type>
-    #
-    # where <type> is one of VALID_RULE_TYPES
-    SUPPORTED_RULES = set()
-
-    @abc.abstractmethod
-    def initialize(self):
-        """Perform QoS agent driver initialization.
-        """
-
-    def create(self, port, qos_policy):
-        """Apply QoS rules on port for the first time.
-
-        :param port: port object.
-        :param qos_policy: the QoS policy to be applied on port.
-        """
-        self._handle_update_create_rules('create', port, qos_policy)
-
-    def update(self, port, qos_policy):
-        """Apply QoS rules on port.
-
-        :param port: port object.
-        :param qos_policy: the QoS policy to be applied on port.
-        """
-        self._handle_update_create_rules('update', port, qos_policy)
-
-    def delete(self, port, qos_policy=None):
-        """Remove QoS rules from port.
-
-        :param port: port object.
-        :param qos_policy: the QoS policy to be removed from port.
-        """
-        if qos_policy is None:
-            rule_types = self.SUPPORTED_RULES
-        else:
-            rule_types = set(
-                [rule.rule_type
-                 for rule in self._iterate_rules(qos_policy.rules)])
-
-        for rule_type in rule_types:
-            self._handle_rule_delete(port, rule_type)
-
-    def _iterate_rules(self, rules):
-        for rule in rules:
-            rule_type = rule.rule_type
-            if rule_type in self.SUPPORTED_RULES:
-                yield rule
-            else:
-                LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: '
-                                '%(rule_type)s; skipping'),
-                            {'rule_id': rule.id, 'rule_type': rule_type})
-
-    def _handle_rule_delete(self, port, rule_type):
-        handler_name = "".join(("delete_", rule_type))
-        handler = getattr(self, handler_name)
-        handler(port)
-
-    def _handle_update_create_rules(self, action, port, qos_policy):
-        for rule in self._iterate_rules(qos_policy.rules):
-            if rule.should_apply_to_port(port):
-                handler_name = "".join((action, "_", rule.rule_type))
-                handler = getattr(self, handler_name)
-                handler(port, rule)
-            else:
-                LOG.debug("Port %(port)s excluded from QoS rule %(rule)s",
-                          {'port': port, 'rule': rule.id})
-
-
-class PortPolicyMap(object):
-    def __init__(self):
-        # we cannot use a dict of sets here because port dicts are not hashable
-        self.qos_policy_ports = collections.defaultdict(dict)
-        self.known_policies = {}
-        self.port_policies = {}
-
-    def get_ports(self, policy):
-        return self.qos_policy_ports[policy.id].values()
-
-    def get_policy(self, policy_id):
-        return self.known_policies.get(policy_id)
-
-    def update_policy(self, policy):
-        self.known_policies[policy.id] = policy
-
-    def has_policy_changed(self, port, policy_id):
-        return self.port_policies.get(port['port_id']) != policy_id
-
-    def get_port_policy(self, port):
-        policy_id = self.port_policies.get(port['port_id'])
-        if policy_id:
-            return self.get_policy(policy_id)
-
-    def set_port_policy(self, port, policy):
-        """Attach a port to policy and return any previous policy on port."""
-        port_id = port['port_id']
-        old_policy = self.get_port_policy(port)
-        self.known_policies[policy.id] = policy
-        self.port_policies[port_id] = policy.id
-        self.qos_policy_ports[policy.id][port_id] = port
-        if old_policy and old_policy.id != policy.id:
-            del self.qos_policy_ports[old_policy.id][port_id]
-        return old_policy
-
-    def clean_by_port(self, port):
-        """Detach port from policy and cleanup data we don't need anymore."""
-        port_id = port['port_id']
-        if port_id in self.port_policies:
-            del self.port_policies[port_id]
-            for qos_policy_id, port_dict in self.qos_policy_ports.items():
-                if port_id in port_dict:
-                    del port_dict[port_id]
-                    if not port_dict:
-                        self._clean_policy_info(qos_policy_id)
-                    return
-        raise exceptions.PortNotFound(port_id=port['port_id'])
-
-    def _clean_policy_info(self, qos_policy_id):
-        del self.qos_policy_ports[qos_policy_id]
-        del self.known_policies[qos_policy_id]
-
-
-class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
-    SUPPORTED_RESOURCES = [resources.QOS_POLICY]
-
-    def initialize(self, connection, driver_type):
-        """Perform Agent Extension initialization.
-
-        """
-        self.resource_rpc = resources_rpc.ResourcesPullRpcApi()
-        self.qos_driver = manager.NeutronManager.load_class_for_provider(
-            'neutron.qos.agent_drivers', driver_type)()
-        self.qos_driver.initialize()
-
-        self.policy_map = PortPolicyMap()
-
-        registry.subscribe(self._handle_notification, resources.QOS_POLICY)
-        self._register_rpc_consumers(connection)
-
-    def _register_rpc_consumers(self, connection):
-        endpoints = [resources_rpc.ResourcesPushRpcCallback()]
-        for resource_type in self.SUPPORTED_RESOURCES:
-            # we assume that neutron-server always broadcasts the latest
-            # version known to the agent
-            topic = resources_rpc.resource_type_versioned_topic(resource_type)
-            connection.create_consumer(topic, endpoints, fanout=True)
-
-    @lockutils.synchronized('qos-port')
-    def _handle_notification(self, qos_policy, event_type):
-        # server does not allow to remove a policy that is attached to any
-        # port, so we ignore DELETED events. Also, if we receive a CREATED
-        # event for a policy, it means that there are no ports so far that are
-        # attached to it. That's why we are interested in UPDATED events only
-        if event_type == events.UPDATED:
-            self._process_update_policy(qos_policy)
-
-    @lockutils.synchronized('qos-port')
-    def handle_port(self, context, port):
-        """Handle agent QoS extension for port.
-
-        This method applies a new policy to a port using the QoS driver.
-        Update events are handled in _handle_notification.
-        """
-        port_id = port['port_id']
-        port_qos_policy_id = port.get('qos_policy_id')
-        network_qos_policy_id = port.get('network_qos_policy_id')
-        qos_policy_id = port_qos_policy_id or network_qos_policy_id
-        if qos_policy_id is None:
-            self._process_reset_port(port)
-            return
-
-        if not self.policy_map.has_policy_changed(port, qos_policy_id):
-            return
-
-        qos_policy = self.resource_rpc.pull(
-            context, resources.QOS_POLICY, qos_policy_id)
-        if qos_policy is None:
-            LOG.info(_LI("QoS policy %(qos_policy_id)s applied to port "
-                         "%(port_id)s is not available on server, "
-                         "it has been deleted. Skipping."),
-                     {'qos_policy_id': qos_policy_id, 'port_id': port_id})
-            self._process_reset_port(port)
-        else:
-            old_qos_policy = self.policy_map.set_port_policy(port, qos_policy)
-            if old_qos_policy:
-                self.qos_driver.delete(port, old_qos_policy)
-                self.qos_driver.update(port, qos_policy)
-            else:
-                self.qos_driver.create(port, qos_policy)
-
-    def delete_port(self, context, port):
-        self._process_reset_port(port)
-
-    def _policy_rules_modified(self, old_policy, policy):
-        return not (len(old_policy.rules) == len(policy.rules) and
-                    all(i in old_policy.rules for i in policy.rules))
-
-    def _process_update_policy(self, qos_policy):
-        old_qos_policy = self.policy_map.get_policy(qos_policy.id)
-        if old_qos_policy:
-            if self._policy_rules_modified(old_qos_policy, qos_policy):
-                for port in self.policy_map.get_ports(qos_policy):
-                    #NOTE(QoS): for now, just reflush the rules on the port.
-                    #           Later, we may want to apply the difference
-                    #           between the old and new rule lists.
-                    self.qos_driver.delete(port, old_qos_policy)
-                    self.qos_driver.update(port, qos_policy)
-            self.policy_map.update_policy(qos_policy)
-
-    def _process_reset_port(self, port):
-        try:
-            self.policy_map.clean_by_port(port)
-            self.qos_driver.delete(port)
-        except exceptions.PortNotFound:
-            LOG.info(_LI("QoS extension did have no information about the "
-                         "port %s that we were trying to reset"),
-                     port['port_id'])
diff --git a/neutron/agent/l3/__init__.py b/neutron/agent/l3/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/agent/l3/agent.py b/neutron/agent/l3/agent.py
deleted file mode 100644 (file)
index 7033379..0000000
+++ /dev/null
@@ -1,704 +0,0 @@
-# Copyright 2012 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import eventlet
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_service import loopingcall
-from oslo_service import periodic_task
-from oslo_utils import excutils
-from oslo_utils import timeutils
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.agent.common import utils as common_utils
-from neutron.agent.l3 import dvr
-from neutron.agent.l3 import dvr_edge_ha_router
-from neutron.agent.l3 import dvr_edge_router as dvr_router
-from neutron.agent.l3 import dvr_local_router as dvr_local_router
-from neutron.agent.l3 import ha
-from neutron.agent.l3 import ha_router
-from neutron.agent.l3 import legacy_router
-from neutron.agent.l3 import namespace_manager
-from neutron.agent.l3 import namespaces
-from neutron.agent.l3 import router_processing_queue as queue
-from neutron.agent.linux import external_process
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import pd
-from neutron.agent.metadata import driver as metadata_driver
-from neutron.agent import rpc as agent_rpc
-from neutron.callbacks import events
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-from neutron.common import constants as l3_constants
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron import context as n_context
-from neutron import manager
-
-try:
-    from neutron_fwaas.services.firewall.agents.l3reference \
-        import firewall_l3_agent
-except Exception:
-    # TODO(dougw) - REMOVE THIS FROM NEUTRON; during l3_agent refactor only
-    from neutron.services.firewall.agents.l3reference import firewall_l3_agent
-
-LOG = logging.getLogger(__name__)
-# TODO(Carl) Following constants retained to increase SNR during refactoring
-NS_PREFIX = namespaces.NS_PREFIX
-INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
-EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
-
-# Number of routers to fetch from server at a time on resync.
-# Needed to reduce load on server side and to speed up resync on agent side.
-SYNC_ROUTERS_MAX_CHUNK_SIZE = 256
-SYNC_ROUTERS_MIN_CHUNK_SIZE = 32
-
-
-class L3PluginApi(object):
-    """Agent side of the l3 agent RPC API.
-
-    API version history:
-        1.0 - Initial version.
-        1.1 - Floating IP operational status updates
-        1.2 - DVR support: new L3 plugin methods added.
-              - get_ports_by_subnet
-              - get_agent_gateway_port
-              Needed by the agent when operating in DVR/DVR_SNAT mode
-        1.3 - Get the list of activated services
-        1.4 - Added L3 HA update_router_state. This method was reworked in
-              to update_ha_routers_states
-        1.5 - Added update_ha_routers_states
-        1.6 - Added process_prefix_update
-        1.7 - DVR support: new L3 plugin methods added.
-              - delete_agent_gateway_port
-        1.8 - Added address scope information
-        1.9 - Added get_router_ids
-    """
-
-    def __init__(self, topic, host):
-        self.host = host
-        target = oslo_messaging.Target(topic=topic, version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def get_routers(self, context, router_ids=None):
-        """Make a remote process call to retrieve the sync data for routers."""
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'sync_routers', host=self.host,
-                          router_ids=router_ids)
-
-    def get_router_ids(self, context):
-        """Make a remote process call to retrieve scheduled routers ids."""
-        cctxt = self.client.prepare(version='1.9')
-        return cctxt.call(context, 'get_router_ids', host=self.host)
-
-    def get_external_network_id(self, context):
-        """Make a remote process call to retrieve the external network id.
-
-        @raise oslo_messaging.RemoteError: with TooManyExternalNetworks as
-                                           exc_type if there are more than one
-                                           external network
-        """
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'get_external_network_id', host=self.host)
-
-    def update_floatingip_statuses(self, context, router_id, fip_statuses):
-        """Call the plugin update floating IPs's operational status."""
-        cctxt = self.client.prepare(version='1.1')
-        return cctxt.call(context, 'update_floatingip_statuses',
-                          router_id=router_id, fip_statuses=fip_statuses)
-
-    def get_ports_by_subnet(self, context, subnet_id):
-        """Retrieve ports by subnet id."""
-        cctxt = self.client.prepare(version='1.2')
-        return cctxt.call(context, 'get_ports_by_subnet', host=self.host,
-                          subnet_id=subnet_id)
-
-    def get_agent_gateway_port(self, context, fip_net):
-        """Get or create an agent_gateway_port."""
-        cctxt = self.client.prepare(version='1.2')
-        return cctxt.call(context, 'get_agent_gateway_port',
-                          network_id=fip_net, host=self.host)
-
-    def get_service_plugin_list(self, context):
-        """Make a call to get the list of activated services."""
-        cctxt = self.client.prepare(version='1.3')
-        return cctxt.call(context, 'get_service_plugin_list')
-
-    def update_ha_routers_states(self, context, states):
-        """Update HA routers states."""
-        cctxt = self.client.prepare(version='1.5')
-        return cctxt.call(context, 'update_ha_routers_states',
-                          host=self.host, states=states)
-
-    def process_prefix_update(self, context, prefix_update):
-        """Process prefix update whenever prefixes get changed."""
-        cctxt = self.client.prepare(version='1.6')
-        return cctxt.call(context, 'process_prefix_update',
-                          subnets=prefix_update)
-
-    def delete_agent_gateway_port(self, context, fip_net):
-        """Delete Floatingip_agent_gateway_port."""
-        cctxt = self.client.prepare(version='1.7')
-        return cctxt.call(context, 'delete_agent_gateway_port',
-                          host=self.host, network_id=fip_net)
-
-
-class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
-                 ha.AgentMixin,
-                 dvr.AgentMixin,
-                 manager.Manager):
-    """Manager for L3NatAgent
-
-        API version history:
-        1.0 initial Version
-        1.1 changed the type of the routers parameter
-            to the routers_updated method.
-            It was previously a list of routers in dict format.
-            It is now a list of router IDs only.
-            Per rpc versioning rules,  it is backwards compatible.
-        1.2 - DVR support: new L3 agent methods added.
-              - add_arp_entry
-              - del_arp_entry
-        1.3 - fipnamespace_delete_on_ext_net - to delete fipnamespace
-              after the external network is removed
-              Needed by the L3 service when dealing with DVR
-    """
-    target = oslo_messaging.Target(version='1.3')
-
-    def __init__(self, host, conf=None):
-        if conf:
-            self.conf = conf
-        else:
-            self.conf = cfg.CONF
-        self.router_info = {}
-
-        self._check_config_params()
-
-        self.process_monitor = external_process.ProcessMonitor(
-            config=self.conf,
-            resource_type='router')
-
-        self.driver = common_utils.load_interface_driver(self.conf)
-
-        self.context = n_context.get_admin_context_without_session()
-        self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
-        self.fullsync = True
-        self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE
-
-        # Get the list of service plugins from Neutron Server
-        # This is the first place where we contact neutron-server on startup
-        # so retry in case its not ready to respond.
-        retry_count = 5
-        while True:
-            retry_count = retry_count - 1
-            try:
-                self.neutron_service_plugins = (
-                    self.plugin_rpc.get_service_plugin_list(self.context))
-            except oslo_messaging.RemoteError as e:
-                with excutils.save_and_reraise_exception() as ctx:
-                    ctx.reraise = False
-                    LOG.warning(_LW('l3-agent cannot check service plugins '
-                                    'enabled at the neutron server when '
-                                    'startup due to RPC error. It happens '
-                                    'when the server does not support this '
-                                    'RPC API. If the error is '
-                                    'UnsupportedVersion you can ignore this '
-                                    'warning. Detail message: %s'), e)
-                self.neutron_service_plugins = None
-            except oslo_messaging.MessagingTimeout as e:
-                with excutils.save_and_reraise_exception() as ctx:
-                    if retry_count > 0:
-                        ctx.reraise = False
-                        LOG.warning(_LW('l3-agent cannot check service '
-                                        'plugins enabled on the neutron '
-                                        'server. Retrying. '
-                                        'Detail message: %s'), e)
-                        continue
-            break
-
-        self.metadata_driver = None
-        if self.conf.enable_metadata_proxy:
-            self.metadata_driver = metadata_driver.MetadataDriver(self)
-
-        self.namespaces_manager = namespace_manager.NamespaceManager(
-            self.conf,
-            self.driver,
-            self.metadata_driver)
-
-        self._queue = queue.RouterProcessingQueue()
-        super(L3NATAgent, self).__init__(conf=self.conf)
-
-        self.target_ex_net_id = None
-        self.use_ipv6 = ipv6_utils.is_enabled()
-
-        self.pd = pd.PrefixDelegation(self.context, self.process_monitor,
-                                      self.driver,
-                                      self.plugin_rpc.process_prefix_update,
-                                      self.create_pd_router_update,
-                                      self.conf)
-
-    def _check_config_params(self):
-        """Check items in configuration files.
-
-        Check for required and invalid configuration items.
-        The actual values are not verified for correctness.
-        """
-        if not self.conf.interface_driver:
-            msg = _LE('An interface driver must be specified')
-            LOG.error(msg)
-            raise SystemExit(1)
-
-        if self.conf.ipv6_gateway:
-            # ipv6_gateway configured. Check for valid v6 link-local address.
-            try:
-                msg = _LE("%s used in config as ipv6_gateway is not a valid "
-                          "IPv6 link-local address."),
-                ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway)
-                if ip_addr.version != 6 or not ip_addr.is_link_local():
-                    LOG.error(msg, self.conf.ipv6_gateway)
-                    raise SystemExit(1)
-            except netaddr.AddrFormatError:
-                LOG.error(msg, self.conf.ipv6_gateway)
-                raise SystemExit(1)
-
-    def _fetch_external_net_id(self, force=False):
-        """Find UUID of single external network for this agent."""
-        if self.conf.gateway_external_network_id:
-            return self.conf.gateway_external_network_id
-
-        # L3 agent doesn't use external_network_bridge to handle external
-        # networks, so bridge_mappings with provider networks will be used
-        # and the L3 agent is able to handle any external networks.
-        if not self.conf.external_network_bridge:
-            return
-
-        if not force and self.target_ex_net_id:
-            return self.target_ex_net_id
-
-        try:
-            self.target_ex_net_id = self.plugin_rpc.get_external_network_id(
-                self.context)
-            return self.target_ex_net_id
-        except oslo_messaging.RemoteError as e:
-            with excutils.save_and_reraise_exception() as ctx:
-                if e.exc_type == 'TooManyExternalNetworks':
-                    ctx.reraise = False
-                    msg = _(
-                        "The 'gateway_external_network_id' option must be "
-                        "configured for this agent as Neutron has more than "
-                        "one external network.")
-                    raise Exception(msg)
-
-    def _create_router(self, router_id, router):
-        args = []
-        kwargs = {
-            'router_id': router_id,
-            'router': router,
-            'use_ipv6': self.use_ipv6,
-            'agent_conf': self.conf,
-            'interface_driver': self.driver,
-        }
-
-        if router.get('distributed'):
-            kwargs['agent'] = self
-            kwargs['host'] = self.host
-
-        if router.get('distributed') and router.get('ha'):
-            if self.conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT:
-                kwargs['state_change_callback'] = self.enqueue_state_change
-                return dvr_edge_ha_router.DvrEdgeHaRouter(*args, **kwargs)
-
-        if router.get('distributed'):
-            if self.conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT:
-                return dvr_router.DvrEdgeRouter(*args, **kwargs)
-            else:
-                return dvr_local_router.DvrLocalRouter(*args, **kwargs)
-
-        if router.get('ha'):
-            kwargs['state_change_callback'] = self.enqueue_state_change
-            return ha_router.HaRouter(*args, **kwargs)
-
-        return legacy_router.LegacyRouter(*args, **kwargs)
-
-    def _router_added(self, router_id, router):
-        ri = self._create_router(router_id, router)
-        registry.notify(resources.ROUTER, events.BEFORE_CREATE,
-                        self, router=ri)
-
-        self.router_info[router_id] = ri
-
-        ri.initialize(self.process_monitor)
-
-        # TODO(Carl) This is a hook in to fwaas.  It should be cleaned up.
-        self.process_router_add(ri)
-
-    def _safe_router_removed(self, router_id):
-        """Try to delete a router and return True if successful."""
-
-        try:
-            self._router_removed(router_id)
-        except Exception:
-            LOG.exception(_LE('Error while deleting router %s'), router_id)
-            return False
-        else:
-            return True
-
-    def _router_removed(self, router_id):
-        ri = self.router_info.get(router_id)
-        if ri is None:
-            LOG.warn(_LW("Info for router %s was not found. "
-                         "Performing router cleanup"), router_id)
-            self.namespaces_manager.ensure_router_cleanup(router_id)
-            return
-
-        registry.notify(resources.ROUTER, events.BEFORE_DELETE,
-                        self, router=ri)
-
-        ri.delete(self)
-        del self.router_info[router_id]
-
-        registry.notify(resources.ROUTER, events.AFTER_DELETE, self, router=ri)
-
-    def router_deleted(self, context, router_id):
-        """Deal with router deletion RPC message."""
-        LOG.debug('Got router deleted notification for %s', router_id)
-        update = queue.RouterUpdate(router_id,
-                                    queue.PRIORITY_RPC,
-                                    action=queue.DELETE_ROUTER)
-        self._queue.add(update)
-
-    def routers_updated(self, context, routers):
-        """Deal with routers modification and creation RPC message."""
-        LOG.debug('Got routers updated notification :%s', routers)
-        if routers:
-            # This is needed for backward compatibility
-            if isinstance(routers[0], dict):
-                routers = [router['id'] for router in routers]
-            for id in routers:
-                update = queue.RouterUpdate(id, queue.PRIORITY_RPC)
-                self._queue.add(update)
-
-    def router_removed_from_agent(self, context, payload):
-        LOG.debug('Got router removed from agent :%r', payload)
-        router_id = payload['router_id']
-        update = queue.RouterUpdate(router_id,
-                                    queue.PRIORITY_RPC,
-                                    action=queue.DELETE_ROUTER)
-        self._queue.add(update)
-
-    def router_added_to_agent(self, context, payload):
-        LOG.debug('Got router added to agent :%r', payload)
-        self.routers_updated(context, payload)
-
-    def _process_router_if_compatible(self, router):
-        if (self.conf.external_network_bridge and
-            not ip_lib.device_exists(self.conf.external_network_bridge)):
-            LOG.error(_LE("The external network bridge '%s' does not exist"),
-                      self.conf.external_network_bridge)
-            return
-
-        if self.conf.router_id and router['id'] != self.conf.router_id:
-            raise n_exc.RouterNotCompatibleWithAgent(router_id=router['id'])
-
-        # Either ex_net_id or handle_internal_only_routers must be set
-        ex_net_id = (router['external_gateway_info'] or {}).get('network_id')
-        if not ex_net_id and not self.conf.handle_internal_only_routers:
-            raise n_exc.RouterNotCompatibleWithAgent(router_id=router['id'])
-
-        # If target_ex_net_id and ex_net_id are set they must be equal
-        target_ex_net_id = self._fetch_external_net_id()
-        if (target_ex_net_id and ex_net_id and ex_net_id != target_ex_net_id):
-            # Double check that our single external_net_id has not changed
-            # by forcing a check by RPC.
-            if ex_net_id != self._fetch_external_net_id(force=True):
-                raise n_exc.RouterNotCompatibleWithAgent(
-                    router_id=router['id'])
-
-        if router['id'] not in self.router_info:
-            self._process_added_router(router)
-        else:
-            self._process_updated_router(router)
-
-    def _process_added_router(self, router):
-        self._router_added(router['id'], router)
-        ri = self.router_info[router['id']]
-        ri.router = router
-        ri.process(self)
-        registry.notify(resources.ROUTER, events.AFTER_CREATE, self, router=ri)
-
-    def _process_updated_router(self, router):
-        ri = self.router_info[router['id']]
-        ri.router = router
-        registry.notify(resources.ROUTER, events.BEFORE_UPDATE,
-                        self, router=ri)
-        ri.process(self)
-        registry.notify(resources.ROUTER, events.AFTER_UPDATE, self, router=ri)
-
-    def _resync_router(self, router_update,
-                       priority=queue.PRIORITY_SYNC_ROUTERS_TASK):
-        router_update.timestamp = timeutils.utcnow()
-        router_update.priority = priority
-        router_update.router = None  # Force the agent to resync the router
-        self._queue.add(router_update)
-
-    def _process_router_update(self):
-        for rp, update in self._queue.each_update_to_next_router():
-            LOG.debug("Starting router update for %s, action %s, priority %s",
-                      update.id, update.action, update.priority)
-            if update.action == queue.PD_UPDATE:
-                self.pd.process_prefix_update()
-                LOG.debug("Finished a router update for %s", update.id)
-                continue
-            router = update.router
-            if update.action != queue.DELETE_ROUTER and not router:
-                try:
-                    update.timestamp = timeutils.utcnow()
-                    routers = self.plugin_rpc.get_routers(self.context,
-                                                          [update.id])
-                except Exception:
-                    msg = _LE("Failed to fetch router information for '%s'")
-                    LOG.exception(msg, update.id)
-                    self._resync_router(update)
-                    continue
-
-                if routers:
-                    router = routers[0]
-
-            if not router:
-                removed = self._safe_router_removed(update.id)
-                if not removed:
-                    self._resync_router(update)
-                else:
-                    # need to update timestamp of removed router in case
-                    # there are older events for the same router in the
-                    # processing queue (like events from fullsync) in order to
-                    # prevent deleted router re-creation
-                    rp.fetched_and_processed(update.timestamp)
-                LOG.debug("Finished a router update for %s", update.id)
-                continue
-
-            try:
-                self._process_router_if_compatible(router)
-            except n_exc.RouterNotCompatibleWithAgent as e:
-                LOG.exception(e.msg)
-                # Was the router previously handled by this agent?
-                if router['id'] in self.router_info:
-                    LOG.error(_LE("Removing incompatible router '%s'"),
-                              router['id'])
-                    self._safe_router_removed(router['id'])
-            except Exception:
-                msg = _LE("Failed to process compatible router '%s'")
-                LOG.exception(msg, update.id)
-                self._resync_router(update)
-                continue
-
-            LOG.debug("Finished a router update for %s", update.id)
-            rp.fetched_and_processed(update.timestamp)
-
-    def _process_routers_loop(self):
-        LOG.debug("Starting _process_routers_loop")
-        pool = eventlet.GreenPool(size=8)
-        while True:
-            pool.spawn_n(self._process_router_update)
-
-    # NOTE(kevinbenton): this is set to 1 second because the actual interval
-    # is controlled by a FixedIntervalLoopingCall in neutron/service.py that
-    # is responsible for task execution.
-    @periodic_task.periodic_task(spacing=1, run_immediately=True)
-    def periodic_sync_routers_task(self, context):
-        self.process_services_sync(context)
-        if not self.fullsync:
-            return
-        LOG.debug("Starting fullsync periodic_sync_routers_task")
-
-        # self.fullsync is True at this point. If an exception -- caught or
-        # uncaught -- prevents setting it to False below then the next call
-        # to periodic_sync_routers_task will re-enter this code and try again.
-
-        # Context manager self.namespaces_manager captures a picture of
-        # namespaces *before* fetch_and_sync_all_routers fetches the full list
-        # of routers from the database.  This is important to correctly
-        # identify stale ones.
-
-        try:
-            with self.namespaces_manager as ns_manager:
-                self.fetch_and_sync_all_routers(context, ns_manager)
-        except n_exc.AbortSyncRouters:
-            self.fullsync = True
-
-    def fetch_and_sync_all_routers(self, context, ns_manager):
-        prev_router_ids = set(self.router_info)
-        curr_router_ids = set()
-        timestamp = timeutils.utcnow()
-
-        try:
-            router_ids = ([self.conf.router_id] if self.conf.router_id else
-                          self.plugin_rpc.get_router_ids(context))
-            # fetch routers by chunks to reduce the load on server and to
-            # start router processing earlier
-            for i in range(0, len(router_ids), self.sync_routers_chunk_size):
-                routers = self.plugin_rpc.get_routers(
-                    context, router_ids[i:i + self.sync_routers_chunk_size])
-                LOG.debug('Processing :%r', routers)
-                for r in routers:
-                    curr_router_ids.add(r['id'])
-                    ns_manager.keep_router(r['id'])
-                    if r.get('distributed'):
-                        # need to keep fip namespaces as well
-                        ext_net_id = (r['external_gateway_info'] or {}).get(
-                            'network_id')
-                        if ext_net_id:
-                            ns_manager.keep_ext_net(ext_net_id)
-                    update = queue.RouterUpdate(
-                        r['id'],
-                        queue.PRIORITY_SYNC_ROUTERS_TASK,
-                        router=r,
-                        timestamp=timestamp)
-                    self._queue.add(update)
-        except oslo_messaging.MessagingTimeout:
-            if self.sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE:
-                self.sync_routers_chunk_size = max(
-                    self.sync_routers_chunk_size / 2,
-                    SYNC_ROUTERS_MIN_CHUNK_SIZE)
-                LOG.error(_LE('Server failed to return info for routers in '
-                              'required time, decreasing chunk size to: %s'),
-                          self.sync_routers_chunk_size)
-            else:
-                LOG.error(_LE('Server failed to return info for routers in '
-                              'required time even with min chunk size: %s. '
-                              'It might be under very high load or '
-                              'just inoperable'),
-                          self.sync_routers_chunk_size)
-            raise
-        except oslo_messaging.MessagingException:
-            LOG.exception(_LE("Failed synchronizing routers due to RPC error"))
-            raise n_exc.AbortSyncRouters()
-
-        self.fullsync = False
-        LOG.debug("periodic_sync_routers_task successfully completed")
-        # adjust chunk size after successful sync
-        if self.sync_routers_chunk_size < SYNC_ROUTERS_MAX_CHUNK_SIZE:
-            self.sync_routers_chunk_size = min(
-                self.sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE,
-                SYNC_ROUTERS_MAX_CHUNK_SIZE)
-
-        # Delete routers that have disappeared since the last sync
-        for router_id in prev_router_ids - curr_router_ids:
-            ns_manager.keep_router(router_id)
-            update = queue.RouterUpdate(router_id,
-                                        queue.PRIORITY_SYNC_ROUTERS_TASK,
-                                        timestamp=timestamp,
-                                        action=queue.DELETE_ROUTER)
-            self._queue.add(update)
-
-    def after_start(self):
-        # Note: the FWaaS' vArmourL3NATAgent is a subclass of L3NATAgent. It
-        # calls this method here. So Removing this after_start() would break
-        # vArmourL3NATAgent. We need to find out whether vArmourL3NATAgent
-        # can have L3NATAgentWithStateReport as its base class instead of
-        # L3NATAgent.
-        eventlet.spawn_n(self._process_routers_loop)
-        LOG.info(_LI("L3 agent started"))
-
-    def create_pd_router_update(self):
-        router_id = None
-        update = queue.RouterUpdate(router_id,
-                                    queue.PRIORITY_PD_UPDATE,
-                                    timestamp=timeutils.utcnow(),
-                                    action=queue.PD_UPDATE)
-        self._queue.add(update)
-
-
-class L3NATAgentWithStateReport(L3NATAgent):
-
-    def __init__(self, host, conf=None):
-        super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)
-        self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
-        self.agent_state = {
-            'binary': 'neutron-l3-agent',
-            'host': host,
-            'availability_zone': self.conf.AGENT.availability_zone,
-            'topic': topics.L3_AGENT,
-            'configurations': {
-                'agent_mode': self.conf.agent_mode,
-                'router_id': self.conf.router_id,
-                'handle_internal_only_routers':
-                self.conf.handle_internal_only_routers,
-                'external_network_bridge': self.conf.external_network_bridge,
-                'gateway_external_network_id':
-                self.conf.gateway_external_network_id,
-                'interface_driver': self.conf.interface_driver,
-                'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats},
-            'start_flag': True,
-            'agent_type': l3_constants.AGENT_TYPE_L3}
-        report_interval = self.conf.AGENT.report_interval
-        if report_interval:
-            self.heartbeat = loopingcall.FixedIntervalLoopingCall(
-                self._report_state)
-            self.heartbeat.start(interval=report_interval)
-
-    def _report_state(self):
-        num_ex_gw_ports = 0
-        num_interfaces = 0
-        num_floating_ips = 0
-        router_infos = self.router_info.values()
-        num_routers = len(router_infos)
-        for ri in router_infos:
-            ex_gw_port = ri.get_ex_gw_port()
-            if ex_gw_port:
-                num_ex_gw_ports += 1
-            num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY,
-                                                []))
-            num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY,
-                                                  []))
-        configurations = self.agent_state['configurations']
-        configurations['routers'] = num_routers
-        configurations['ex_gw_ports'] = num_ex_gw_ports
-        configurations['interfaces'] = num_interfaces
-        configurations['floating_ips'] = num_floating_ips
-        try:
-            agent_status = self.state_rpc.report_state(self.context,
-                                                       self.agent_state,
-                                                       True)
-            if agent_status == l3_constants.AGENT_REVIVED:
-                LOG.info(_LI('Agent has just been revived. '
-                             'Doing a full sync.'))
-                self.fullsync = True
-            self.agent_state.pop('start_flag', None)
-        except AttributeError:
-            # This means the server does not support report_state
-            LOG.warn(_LW("Neutron server does not support state report. "
-                         "State report for this agent will be disabled."))
-            self.heartbeat.stop()
-            return
-        except Exception:
-            LOG.exception(_LE("Failed reporting state!"))
-
-    def after_start(self):
-        eventlet.spawn_n(self._process_routers_loop)
-        LOG.info(_LI("L3 agent started"))
-        # Do the report state before we do the first full sync.
-        self._report_state()
-
-        self.pd.after_start()
-
-    def agent_updated(self, context, payload):
-        """Handle the agent_updated notification event."""
-        self.fullsync = True
-        LOG.info(_LI("agent_updated by server side %s!"), payload)
diff --git a/neutron/agent/l3/config.py b/neutron/agent/l3/config.py
deleted file mode 100644 (file)
index c54f7f6..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.
-#
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.agent.common import config
-from neutron.common import constants
-
-
-OPTS = [
-    cfg.StrOpt('agent_mode', default=constants.L3_AGENT_MODE_LEGACY,
-               choices=(constants.L3_AGENT_MODE_DVR,
-                        constants.L3_AGENT_MODE_DVR_SNAT,
-                        constants.L3_AGENT_MODE_LEGACY),
-               help=_("The working mode for the agent. Allowed modes are: "
-                      "'legacy' - this preserves the existing behavior "
-                      "where the L3 agent is deployed on a centralized "
-                      "networking node to provide L3 services like DNAT, "
-                      "and SNAT. Use this mode if you do not want to "
-                      "adopt DVR. 'dvr' - this mode enables DVR "
-                      "functionality and must be used for an L3 agent "
-                      "that runs on a compute host. 'dvr_snat' - this "
-                      "enables centralized SNAT support in conjunction "
-                      "with DVR.  This mode must be used for an L3 agent "
-                      "running on a centralized node (or in single-host "
-                      "deployments, e.g. devstack)")),
-    cfg.PortOpt('metadata_port',
-                default=9697,
-                help=_("TCP Port used by Neutron metadata namespace proxy.")),
-    cfg.IntOpt('send_arp_for_ha',
-               default=3,
-               help=_("Send this many gratuitous ARPs for HA setup, if "
-                      "less than or equal to 0, the feature is disabled")),
-    cfg.StrOpt('router_id', default='',
-               deprecated_for_removal=True,
-               help=_("If non-empty, the l3 agent can only configure a router "
-                      "that has the matching router ID.")),
-    cfg.BoolOpt('handle_internal_only_routers',
-                default=True,
-                help=_("Indicates that this L3 agent should also handle "
-                       "routers that do not have an external network gateway "
-                       "configured. This option should be True only for a "
-                       "single agent in a Neutron deployment, and may be "
-                       "False for all agents if all routers must have an "
-                       "external network gateway.")),
-    cfg.StrOpt('gateway_external_network_id', default='',
-               help=_("When external_network_bridge is set, each L3 agent can "
-                      "be associated with no more than one external network. "
-                      "This value should be set to the UUID of that external "
-                      "network. To allow L3 agent support multiple external "
-                      "networks, both the external_network_bridge and "
-                      "gateway_external_network_id must be left empty.")),
-    cfg.StrOpt('ipv6_gateway', default='',
-               help=_("With IPv6, the network used for the external gateway "
-                      "does not need to have an associated subnet, since the "
-                      "automatically assigned link-local address (LLA) can "
-                      "be used. However, an IPv6 gateway address is needed "
-                      "for use as the next-hop for the default route. "
-                      "If no IPv6 gateway address is configured here, "
-                      "(and only then) the neutron router will be configured "
-                      "to get its default route from router advertisements "
-                      "(RAs) from the upstream router; in which case the "
-                      "upstream router must also be configured to send "
-                      "these RAs. "
-                      "The ipv6_gateway, when configured, should be the LLA "
-                      "of the interface on the upstream router. If a "
-                      "next-hop using a global unique address (GUA) is "
-                      "desired, it needs to be done via a subnet allocated "
-                      "to the network and not through this parameter. ")),
-    cfg.StrOpt('prefix_delegation_driver',
-               default='dibbler',
-               help=_('Driver used for ipv6 prefix delegation. This needs to '
-                      'be an entry point defined in the '
-                      'neutron.agent.linux.pd_drivers namespace. See '
-                      'setup.cfg for entry points included with the neutron '
-                      'source.')),
-    cfg.BoolOpt('enable_metadata_proxy', default=True,
-                help=_("Allow running metadata proxy.")),
-    cfg.StrOpt('metadata_access_mark',
-               default='0x1',
-               help=_('Iptables mangle mark used to mark metadata valid '
-                      'requests. This mark will be masked with 0xffff so '
-                      'that only the lower 16 bits will be used.')),
-    cfg.StrOpt('external_ingress_mark',
-               default='0x2',
-               help=_('Iptables mangle mark used to mark ingress from '
-                      'external network. This mark will be masked with '
-                      '0xffff so that only the lower 16 bits will be used.')),
-]
-
-OPTS += config.EXT_NET_BRIDGE_OPTS
diff --git a/neutron/agent/l3/dvr.py b/neutron/agent/l3/dvr.py
deleted file mode 100644 (file)
index bb4c1a4..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (c) 2014 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import weakref
-
-from neutron.agent.l3 import dvr_fip_ns
-from neutron.agent.l3 import dvr_snat_ns
-
-
-# TODO(Carl) Following constants retained to increase SNR during refactoring
-SNAT_INT_DEV_PREFIX = dvr_snat_ns.SNAT_INT_DEV_PREFIX
-SNAT_NS_PREFIX = dvr_snat_ns.SNAT_NS_PREFIX
-
-
-class AgentMixin(object):
-    def __init__(self, host):
-        # dvr data
-        self._fip_namespaces = weakref.WeakValueDictionary()
-        super(AgentMixin, self).__init__(host)
-
-    def get_fip_ns(self, ext_net_id):
-        # TODO(Carl) is this necessary?  Code that this replaced was careful to
-        # convert these to string like this so I preserved that.
-        ext_net_id = str(ext_net_id)
-
-        fip_ns = self._fip_namespaces.get(ext_net_id)
-        if fip_ns and not fip_ns.destroyed:
-            return fip_ns
-
-        fip_ns = dvr_fip_ns.FipNamespace(ext_net_id,
-                                         self.conf,
-                                         self.driver,
-                                         self.use_ipv6)
-        self._fip_namespaces[ext_net_id] = fip_ns
-
-        return fip_ns
-
-    def get_ports_by_subnet(self, subnet_id):
-        return self.plugin_rpc.get_ports_by_subnet(self.context, subnet_id)
-
-    def _update_arp_entry(self, context, payload, action):
-        router_id = payload['router_id']
-        ri = self.router_info.get(router_id)
-        if not ri:
-            return
-
-        arp_table = payload['arp_table']
-        ip = arp_table['ip_address']
-        mac = arp_table['mac_address']
-        subnet_id = arp_table['subnet_id']
-
-        ri._update_arp_entry(ip, mac, subnet_id, action)
-
-    def add_arp_entry(self, context, payload):
-        """Add arp entry into router namespace.  Called from RPC."""
-        self._update_arp_entry(context, payload, 'add')
-
-    def del_arp_entry(self, context, payload):
-        """Delete arp entry from router namespace.  Called from RPC."""
-        self._update_arp_entry(context, payload, 'delete')
-
-    def fipnamespace_delete_on_ext_net(self, context, ext_net_id):
-        """Delete fip namespace after external network removed."""
-        fip_ns = self.get_fip_ns(ext_net_id)
-        if fip_ns.agent_gateway_port and not fip_ns.destroyed:
-            fip_ns.delete()
diff --git a/neutron/agent/l3/dvr_edge_ha_router.py b/neutron/agent/l3/dvr_edge_ha_router.py
deleted file mode 100644 (file)
index 377d51e..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.l3.dvr_edge_router import DvrEdgeRouter
-from neutron.agent.l3 import dvr_snat_ns
-from neutron.agent.l3.ha_router import HaRouter
-from neutron.agent.l3.router_info import RouterInfo
-from neutron.common import constants as l3_constants
-
-
-class DvrEdgeHaRouter(DvrEdgeRouter, HaRouter):
-    """Router class which represents a centralized SNAT
-       DVR router with HA capabilities.
-    """
-
-    def __init__(self, agent, host, *args, **kwargs):
-        super(DvrEdgeHaRouter, self).__init__(agent, host,
-                                              *args, **kwargs)
-        self.enable_snat = None
-        self.snat_ports = None
-
-    @property
-    def ha_namespace(self):
-        if self.snat_namespace:
-            return self.snat_namespace.name
-        return None
-
-    def internal_network_added(self, port):
-        # Call RouterInfo's internal_network_added (Plugs the port, adds IP)
-        RouterInfo.internal_network_added(self, port)
-
-        for subnet in port['subnets']:
-            self._set_subnet_arp_info(subnet['id'])
-        self._snat_redirect_add_from_port(port)
-
-        if not self.get_ex_gw_port() or not self._is_this_snat_host():
-            return
-
-        sn_port = self.get_snat_port_for_internal_port(port)
-        if not sn_port:
-            return
-
-        self._plug_ha_router_port(
-            sn_port,
-            self._get_snat_int_device_name,
-            dvr_snat_ns.SNAT_INT_DEV_PREFIX)
-
-    def external_gateway_added(self, ex_gw_port, interface_name):
-        super(DvrEdgeHaRouter, self).external_gateway_added(
-            ex_gw_port, interface_name)
-        for port in self.get_snat_interfaces():
-            snat_interface_name = self._get_snat_int_device_name(port['id'])
-            self._disable_ipv6_addressing_on_interface(snat_interface_name)
-            self._add_vips(
-                self.get_snat_port_for_internal_port(port),
-                snat_interface_name)
-
-        self._add_gateway_vip(ex_gw_port, interface_name)
-        self._disable_ipv6_addressing_on_interface(interface_name)
-
-    def external_gateway_removed(self, ex_gw_port, interface_name):
-        for port in self.snat_ports:
-            snat_interface = self._get_snat_int_device_name(port['id'])
-            self.driver.unplug(snat_interface,
-                               namespace=self.ha_namespace,
-                               prefix=l3_constants.SNAT_INT_DEV_PREFIX)
-            self._clear_vips(snat_interface)
-        super(DvrEdgeHaRouter, self)._external_gateway_removed(
-            ex_gw_port, interface_name)
-        self._clear_vips(interface_name)
-
-    def external_gateway_updated(self, ex_gw_port, interface_name):
-        HaRouter.external_gateway_updated(self, ex_gw_port, interface_name)
-
-    def initialize(self, process_monitor):
-        self._create_snat_namespace()
-        super(DvrEdgeHaRouter, self).initialize(process_monitor)
-
-    def process(self, agent):
-        super(DvrEdgeHaRouter, self).process(agent)
-        if self.ha_port:
-            self.enable_keepalived()
-
-    def get_router_cidrs(self, device):
-        return RouterInfo.get_router_cidrs(self, device)
-
-    def _external_gateway_added(self, ex_gw_port, interface_name,
-                                ns_name, preserve_ips):
-        self._plug_external_gateway(ex_gw_port, interface_name, ns_name)
-
-    def _is_this_snat_host(self):
-        return (self.agent_conf.agent_mode
-                == l3_constants.L3_AGENT_MODE_DVR_SNAT)
-
-    def _dvr_internal_network_removed(self, port):
-        super(DvrEdgeHaRouter, self)._dvr_internal_network_removed(port)
-        sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
-        if not sn_port:
-            return
-        self._clear_vips(self._get_snat_int_device_name(sn_port['id']))
-
-    def _plug_snat_port(self, port):
-        """Used by _create_dvr_gateway in DvrEdgeRouter."""
-        interface_name = self._get_snat_int_device_name(port['id'])
-        self.driver.plug(port['network_id'], port['id'],
-                         interface_name, port['mac_address'],
-                         namespace=self.snat_namespace.name,
-                         prefix=dvr_snat_ns.SNAT_INT_DEV_PREFIX)
diff --git a/neutron/agent/l3/dvr_edge_router.py b/neutron/agent/l3/dvr_edge_router.py
deleted file mode 100644 (file)
index 4e45beb..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-# Copyright (c) 2015 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from neutron._i18n import _LE
-from neutron.agent.l3 import dvr_local_router
-from neutron.agent.l3 import dvr_snat_ns
-from neutron.agent.l3 import router_info as router
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import iptables_manager
-
-LOG = logging.getLogger(__name__)
-
-
-class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
-
-    def __init__(self, agent, host, *args, **kwargs):
-        super(DvrEdgeRouter, self).__init__(agent, host, *args, **kwargs)
-        self.snat_namespace = None
-        self.snat_iptables_manager = None
-
-    def external_gateway_added(self, ex_gw_port, interface_name):
-        super(DvrEdgeRouter, self).external_gateway_added(
-            ex_gw_port, interface_name)
-        if self._is_this_snat_host():
-            self._create_dvr_gateway(ex_gw_port, interface_name)
-            # NOTE: When a router is created without a gateway the routes get
-            # added to the router namespace, but if we wanted to populate
-            # the same routes to the snat namespace after the gateway port
-            # is added, we need to call routes_updated here.
-            self.routes_updated([], self.router['routes'])
-
-    def external_gateway_updated(self, ex_gw_port, interface_name):
-        if not self._is_this_snat_host():
-            # no centralized SNAT gateway for this node/agent
-            LOG.debug("not hosting snat for router: %s", self.router['id'])
-            if self.snat_namespace:
-                LOG.debug("SNAT was rescheduled to host %s. Clearing snat "
-                          "namespace.", self.router.get('gw_port_host'))
-                return self.external_gateway_removed(
-                    ex_gw_port, interface_name)
-            return
-
-        if not self.snat_namespace:
-            # SNAT might be rescheduled to this agent; need to process like
-            # newly created gateway
-            return self.external_gateway_added(ex_gw_port, interface_name)
-        else:
-            self._external_gateway_added(ex_gw_port,
-                                        interface_name,
-                                        self.snat_namespace.name,
-                                        preserve_ips=[])
-
-    def _external_gateway_removed(self, ex_gw_port, interface_name):
-        super(DvrEdgeRouter, self).external_gateway_removed(ex_gw_port,
-                                                            interface_name)
-        if not self._is_this_snat_host() and not self.snat_namespace:
-            # no centralized SNAT gateway for this node/agent
-            LOG.debug("not hosting snat for router: %s", self.router['id'])
-            return
-
-        self.driver.unplug(interface_name,
-                           bridge=self.agent_conf.external_network_bridge,
-                           namespace=self.snat_namespace.name,
-                           prefix=router.EXTERNAL_DEV_PREFIX)
-
-    def external_gateway_removed(self, ex_gw_port, interface_name):
-        self._external_gateway_removed(ex_gw_port, interface_name)
-        if self.snat_namespace:
-            self.snat_namespace.delete()
-            self.snat_namespace = None
-
-    def internal_network_added(self, port):
-        super(DvrEdgeRouter, self).internal_network_added(port)
-
-        # TODO(gsagie) some of this checks are already implemented
-        # in the base class, think how to avoid re-doing them
-        if not self._is_this_snat_host():
-            return
-
-        sn_port = self.get_snat_port_for_internal_port(port)
-        if not sn_port:
-            return
-
-        ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id'])
-        interface_name = self._get_snat_int_device_name(sn_port['id'])
-        self._internal_network_added(
-            ns_name,
-            sn_port['network_id'],
-            sn_port['id'],
-            sn_port['fixed_ips'],
-            sn_port['mac_address'],
-            interface_name,
-            dvr_snat_ns.SNAT_INT_DEV_PREFIX)
-
-    def _dvr_internal_network_removed(self, port):
-        super(DvrEdgeRouter, self)._dvr_internal_network_removed(port)
-
-        if not self.ex_gw_port:
-            return
-
-        sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
-        if not sn_port:
-            return
-
-        if not self._is_this_snat_host():
-            return
-
-        snat_interface = self._get_snat_int_device_name(sn_port['id'])
-        ns_name = self.snat_namespace.name
-        prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX
-        if ip_lib.device_exists(snat_interface, namespace=ns_name):
-            self.driver.unplug(snat_interface, namespace=ns_name,
-                               prefix=prefix)
-
-    def _plug_snat_port(self, port):
-        interface_name = self._get_snat_int_device_name(port['id'])
-        self._internal_network_added(
-            self.snat_namespace.name, port['network_id'],
-            port['id'], port['fixed_ips'],
-            port['mac_address'], interface_name,
-            dvr_snat_ns.SNAT_INT_DEV_PREFIX)
-
-    def _create_dvr_gateway(self, ex_gw_port, gw_interface_name):
-        """Create SNAT namespace."""
-        snat_ns = self._create_snat_namespace()
-        # connect snat_ports to br_int from SNAT namespace
-        for port in self.get_snat_interfaces():
-            # create interface_name
-            self._plug_snat_port(port)
-        self._external_gateway_added(ex_gw_port, gw_interface_name,
-                                     snat_ns.name, preserve_ips=[])
-        self.snat_iptables_manager = iptables_manager.IptablesManager(
-            namespace=snat_ns.name,
-            use_ipv6=self.use_ipv6)
-        # kicks the FW Agent to add rules for the snat namespace
-        self.agent.process_router_add(self)
-
-    def _create_snat_namespace(self):
-        # TODO(mlavalle): in the near future, this method should contain the
-        # code in the L3 agent that creates a gateway for a dvr. The first step
-        # is to move the creation of the snat namespace here
-        self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'],
-                                                        self.agent_conf,
-                                                        self.driver,
-                                                        self.use_ipv6)
-        self.snat_namespace.create()
-        return self.snat_namespace
-
-    def _get_snat_int_device_name(self, port_id):
-        long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id
-        return long_name[:self.driver.DEV_NAME_LEN]
-
-    def _is_this_snat_host(self):
-        host = self.router.get('gw_port_host')
-        if not host:
-            LOG.debug("gw_port_host missing from router: %s",
-                      self.router['id'])
-        return host == self.host
-
-    def _handle_router_snat_rules(self, ex_gw_port, interface_name):
-        if not self._is_this_snat_host():
-            return
-        if not self.get_ex_gw_port():
-            return
-
-        if not self.snat_iptables_manager:
-            LOG.debug("DVR router: no snat rules to be handled")
-            return
-
-        with self.snat_iptables_manager.defer_apply():
-            self._empty_snat_chains(self.snat_iptables_manager)
-
-            # NOTE DVR doesn't add the jump to float snat like the super class.
-
-            self._add_snat_rules(ex_gw_port, self.snat_iptables_manager,
-                                 interface_name)
-
-    def update_routing_table(self, operation, route):
-        if self.get_ex_gw_port() and self._is_this_snat_host():
-            ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
-                self.router['id'])
-            # NOTE: For now let us apply the static routes both in SNAT
-            # namespace and Router Namespace, to reduce the complexity.
-            ip_wrapper = ip_lib.IPWrapper(namespace=ns_name)
-            if ip_wrapper.netns.exists(ns_name):
-                super(DvrEdgeRouter, self)._update_routing_table(
-                    operation, route, namespace=ns_name)
-            else:
-                LOG.error(_LE("The SNAT namespace %s does not exist for "
-                              "the router."), ns_name)
-        super(DvrEdgeRouter, self).update_routing_table(operation, route)
-
-    def delete(self, agent):
-        super(DvrEdgeRouter, self).delete(agent)
-        if self.snat_namespace:
-            self.snat_namespace.delete()
diff --git a/neutron/agent/l3/dvr_fip_ns.py b/neutron/agent/l3/dvr_fip_ns.py
deleted file mode 100644 (file)
index 6d4cf7b..0000000
+++ /dev/null
@@ -1,258 +0,0 @@
-# Copyright (c) 2015 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from neutron.agent.l3 import fip_rule_priority_allocator as frpa
-from neutron.agent.l3 import link_local_allocator as lla
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import iptables_manager
-from neutron.common import utils as common_utils
-from oslo_log import log as logging
-
-LOG = logging.getLogger(__name__)
-
-FIP_NS_PREFIX = 'fip-'
-FIP_EXT_DEV_PREFIX = 'fg-'
-FIP_2_ROUTER_DEV_PREFIX = 'fpr-'
-ROUTER_2_FIP_DEV_PREFIX = namespaces.ROUTER_2_FIP_DEV_PREFIX
-# Route Table index for FIPs
-FIP_RT_TBL = 16
-FIP_LL_SUBNET = '169.254.30.0/23'
-# Rule priority range for FIPs
-FIP_PR_START = 32768
-FIP_PR_END = FIP_PR_START + 40000
-
-
-class FipNamespace(namespaces.Namespace):
-
-    def __init__(self, ext_net_id, agent_conf, driver, use_ipv6):
-        name = self._get_ns_name(ext_net_id)
-        super(FipNamespace, self).__init__(
-            name, agent_conf, driver, use_ipv6)
-
-        self._ext_net_id = ext_net_id
-        self.agent_conf = agent_conf
-        self.driver = driver
-        self.use_ipv6 = use_ipv6
-        self.agent_gateway_port = None
-        self._subscribers = set()
-        path = os.path.join(agent_conf.state_path, 'fip-priorities')
-        self._rule_priorities = frpa.FipRulePriorityAllocator(path,
-                                                              FIP_PR_START,
-                                                              FIP_PR_END)
-        self._iptables_manager = iptables_manager.IptablesManager(
-            namespace=self.get_name(),
-            use_ipv6=self.use_ipv6)
-        path = os.path.join(agent_conf.state_path, 'fip-linklocal-networks')
-        self.local_subnets = lla.LinkLocalAllocator(path, FIP_LL_SUBNET)
-        self.destroyed = False
-
-    @classmethod
-    def _get_ns_name(cls, ext_net_id):
-        return namespaces.build_ns_name(FIP_NS_PREFIX, ext_net_id)
-
-    def get_name(self):
-        return self._get_ns_name(self._ext_net_id)
-
-    def get_ext_device_name(self, port_id):
-        return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
-
-    def get_int_device_name(self, router_id):
-        return (FIP_2_ROUTER_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN]
-
-    def get_rtr_ext_device_name(self, router_id):
-        return (ROUTER_2_FIP_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN]
-
-    def has_subscribers(self):
-        return len(self._subscribers) != 0
-
-    def subscribe(self, router_id):
-        is_first = not self.has_subscribers()
-        self._subscribers.add(router_id)
-        return is_first
-
-    def unsubscribe(self, router_id):
-        self._subscribers.discard(router_id)
-        return not self.has_subscribers()
-
-    def allocate_rule_priority(self, floating_ip):
-        return self._rule_priorities.allocate(floating_ip)
-
-    def deallocate_rule_priority(self, floating_ip):
-        self._rule_priorities.release(floating_ip)
-
-    def _gateway_added(self, ex_gw_port, interface_name):
-        """Add Floating IP gateway port."""
-        LOG.debug("add gateway interface(%s)", interface_name)
-        ns_name = self.get_name()
-        self.driver.plug(ex_gw_port['network_id'],
-                         ex_gw_port['id'],
-                         interface_name,
-                         ex_gw_port['mac_address'],
-                         bridge=self.agent_conf.external_network_bridge,
-                         namespace=ns_name,
-                         prefix=FIP_EXT_DEV_PREFIX)
-
-        ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
-        self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name,
-                            clean_connections=True)
-
-        for fixed_ip in ex_gw_port['fixed_ips']:
-            ip_lib.send_ip_addr_adv_notif(ns_name,
-                                          interface_name,
-                                          fixed_ip['ip_address'],
-                                          self.agent_conf)
-
-        for subnet in ex_gw_port['subnets']:
-            gw_ip = subnet.get('gateway_ip')
-            if gw_ip:
-                ipd = ip_lib.IPDevice(interface_name,
-                                      namespace=ns_name)
-                ipd.route.add_gateway(gw_ip)
-
-        cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name]
-        # TODO(Carl) mlavelle's work has self.ip_wrapper
-        ip_wrapper = ip_lib.IPWrapper(namespace=ns_name)
-        ip_wrapper.netns.execute(cmd, check_exit_code=False)
-
-    def create(self):
-        # TODO(Carl) Get this functionality from mlavelle's namespace baseclass
-        LOG.debug("DVR: add fip namespace: %s", self.name)
-        ip_wrapper_root = ip_lib.IPWrapper()
-        ip_wrapper = ip_wrapper_root.ensure_namespace(self.get_name())
-        # Somewhere in the 3.19 kernel timeframe ip_nonlocal_bind was
-        # changed to be a per-namespace attribute.  To be backwards
-        # compatible we need to try both if at first we fail.
-        try:
-            ip_wrapper.netns.execute(['sysctl',
-                                      '-w',
-                                      'net.ipv4.ip_nonlocal_bind=1'],
-                                     log_fail_as_error=False,
-                                     run_as_root=True)
-        except RuntimeError:
-            LOG.debug('DVR: fip namespace (%s) does not support setting '
-                      'net.ipv4.ip_nonlocal_bind, trying in root namespace',
-                      self.name)
-            ip_wrapper_root.netns.execute(['sysctl',
-                                           '-w',
-                                           'net.ipv4.ip_nonlocal_bind=1'],
-                                          run_as_root=True)
-
-        ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1'])
-        if self.use_ipv6:
-            ip_wrapper.netns.execute(['sysctl', '-w',
-                                      'net.ipv6.conf.all.forwarding=1'])
-
-        # no connection tracking needed in fip namespace
-        self._iptables_manager.ipv4['raw'].add_rule('PREROUTING',
-                                                    '-j CT --notrack')
-        self._iptables_manager.apply()
-
-    def delete(self):
-        self.destroyed = True
-        ip_wrapper = ip_lib.IPWrapper(namespace=self.name)
-        for d in ip_wrapper.get_devices(exclude_loopback=True):
-            if d.name.startswith(FIP_2_ROUTER_DEV_PREFIX):
-                # internal link between IRs and FIP NS
-                ip_wrapper.del_veth(d.name)
-            elif d.name.startswith(FIP_EXT_DEV_PREFIX):
-                # single port from FIP NS to br-ext
-                # TODO(carl) Where does the port get deleted?
-                LOG.debug('DVR: unplug: %s', d.name)
-                ext_net_bridge = self.agent_conf.external_network_bridge
-                self.driver.unplug(d.name,
-                                   bridge=ext_net_bridge,
-                                   namespace=self.name,
-                                   prefix=FIP_EXT_DEV_PREFIX)
-        self.agent_gateway_port = None
-
-        # TODO(mrsmith): add LOG warn if fip count != 0
-        LOG.debug('DVR: destroy fip namespace: %s', self.name)
-        super(FipNamespace, self).delete()
-
-    def create_gateway_port(self, agent_gateway_port):
-        """Create Floating IP gateway port.
-
-           Request port creation from Plugin then creates
-           Floating IP namespace and adds gateway port.
-        """
-        self.agent_gateway_port = agent_gateway_port
-
-        self.create()
-
-        iface_name = self.get_ext_device_name(agent_gateway_port['id'])
-        self._gateway_added(agent_gateway_port, iface_name)
-
-    def _internal_ns_interface_added(self, ip_cidr,
-                                    interface_name, ns_name):
-        ip_wrapper = ip_lib.IPWrapper(namespace=ns_name)
-        ip_wrapper.netns.execute(['ip', 'addr', 'add',
-                                  ip_cidr, 'dev', interface_name])
-
-    def create_rtr_2_fip_link(self, ri):
-        """Create interface between router and Floating IP namespace."""
-        LOG.debug("Create FIP link interfaces for router %s", ri.router_id)
-        rtr_2_fip_name = self.get_rtr_ext_device_name(ri.router_id)
-        fip_2_rtr_name = self.get_int_device_name(ri.router_id)
-        fip_ns_name = self.get_name()
-
-        # add link local IP to interface
-        if ri.rtr_fip_subnet is None:
-            ri.rtr_fip_subnet = self.local_subnets.allocate(ri.router_id)
-        rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair()
-        ip_wrapper = ip_lib.IPWrapper(namespace=ri.ns_name)
-        device_exists = ip_lib.device_exists(rtr_2_fip_name,
-                                             namespace=ri.ns_name)
-        if not device_exists:
-            int_dev = ip_wrapper.add_veth(rtr_2_fip_name,
-                                          fip_2_rtr_name,
-                                          fip_ns_name)
-            self._internal_ns_interface_added(str(rtr_2_fip),
-                                              rtr_2_fip_name,
-                                              ri.ns_name)
-            self._internal_ns_interface_added(str(fip_2_rtr),
-                                              fip_2_rtr_name,
-                                              fip_ns_name)
-            if self.agent_conf.network_device_mtu:
-                int_dev[0].link.set_mtu(self.agent_conf.network_device_mtu)
-                int_dev[1].link.set_mtu(self.agent_conf.network_device_mtu)
-            int_dev[0].link.set_up()
-            int_dev[1].link.set_up()
-
-        # add default route for the link local interface
-        device = ip_lib.IPDevice(rtr_2_fip_name, namespace=ri.ns_name)
-        device.route.add_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL)
-        #setup the NAT rules and chains
-        ri._handle_fip_nat_rules(rtr_2_fip_name)
-
-    def scan_fip_ports(self, ri):
-        # don't scan if not dvr or count is not None
-        if ri.dist_fip_count is not None:
-            return
-
-        # scan system for any existing fip ports
-        ri.dist_fip_count = 0
-        rtr_2_fip_interface = self.get_rtr_ext_device_name(ri.router_id)
-        device = ip_lib.IPDevice(rtr_2_fip_interface, namespace=ri.ns_name)
-        if device.exists():
-            existing_cidrs = [addr['cidr'] for addr in device.addr.list()]
-            fip_cidrs = [c for c in existing_cidrs if
-                         common_utils.is_cidr_host(c)]
-            for fip_cidr in fip_cidrs:
-                fip_ip = fip_cidr.split('/')[0]
-                rule_pr = self._rule_priorities.allocate(fip_ip)
-                ri.floating_ips_dict[fip_ip] = rule_pr
-            ri.dist_fip_count = len(fip_cidrs)
diff --git a/neutron/agent/l3/dvr_local_router.py b/neutron/agent/l3/dvr_local_router.py
deleted file mode 100644 (file)
index 563c88a..0000000
+++ /dev/null
@@ -1,464 +0,0 @@
-# Copyright (c) 2015 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import binascii
-import collections
-import netaddr
-
-from oslo_log import log as logging
-from oslo_utils import excutils
-import six
-
-from neutron._i18n import _LE, _LW
-from neutron.agent.l3 import dvr_fip_ns
-from neutron.agent.l3 import dvr_router_base
-from neutron.agent.linux import ip_lib
-from neutron.common import constants as l3_constants
-from neutron.common import exceptions
-from neutron.common import utils as common_utils
-
-LOG = logging.getLogger(__name__)
-# xor-folding mask used for IPv6 rule index
-MASK_30 = 0x3fffffff
-
-# Tracks the arp entry cache
-Arp_entry = collections.namedtuple(
-    'Arp_entry', 'ip mac subnet_id operation')
-
-
-class DvrLocalRouter(dvr_router_base.DvrRouterBase):
-    def __init__(self, agent, host, *args, **kwargs):
-        super(DvrLocalRouter, self).__init__(agent, host, *args, **kwargs)
-
-        self.floating_ips_dict = {}
-        # Linklocal subnet for router and floating IP namespace link
-        self.rtr_fip_subnet = None
-        self.dist_fip_count = None
-        self.fip_ns = None
-        self._pending_arp_set = set()
-
-    def get_floating_ips(self):
-        """Filter Floating IPs to be hosted on this agent."""
-        floating_ips = super(DvrLocalRouter, self).get_floating_ips()
-        return [i for i in floating_ips if i['host'] == self.host]
-
-    def _handle_fip_nat_rules(self, interface_name):
-        """Configures NAT rules for Floating IPs for DVR."""
-        self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
-        self.iptables_manager.ipv4['nat'].empty_chain('snat')
-
-        # Add back the jump to float-snat
-        self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
-
-        # And add the NAT rule back
-        rule = ('POSTROUTING', '! -i %(interface_name)s '
-                '! -o %(interface_name)s -m conntrack ! '
-                '--ctstate DNAT -j ACCEPT' %
-                {'interface_name': interface_name})
-        self.iptables_manager.ipv4['nat'].add_rule(*rule)
-
-        self.iptables_manager.apply()
-
-    def floating_ip_added_dist(self, fip, fip_cidr):
-        """Add floating IP to FIP namespace."""
-        floating_ip = fip['floating_ip_address']
-        fixed_ip = fip['fixed_ip_address']
-        rule_pr = self.fip_ns.allocate_rule_priority(floating_ip)
-        self.floating_ips_dict[floating_ip] = rule_pr
-        fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
-        ip_rule = ip_lib.IPRule(namespace=self.ns_name)
-        ip_rule.rule.add(ip=fixed_ip,
-                         table=dvr_fip_ns.FIP_RT_TBL,
-                         priority=rule_pr)
-        #Add routing rule in fip namespace
-        fip_ns_name = self.fip_ns.get_name()
-        if self.rtr_fip_subnet is None:
-            self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
-                self.router_id)
-        rtr_2_fip, _ = self.rtr_fip_subnet.get_pair()
-        device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
-        device.route.add_route(fip_cidr, str(rtr_2_fip.ip))
-        interface_name = (
-            self.fip_ns.get_ext_device_name(
-                self.fip_ns.agent_gateway_port['id']))
-        ip_lib.send_ip_addr_adv_notif(fip_ns_name,
-                                      interface_name,
-                                      floating_ip,
-                                      self.agent_conf)
-        # update internal structures
-        self.dist_fip_count = self.dist_fip_count + 1
-
-    def floating_ip_removed_dist(self, fip_cidr):
-        """Remove floating IP from FIP namespace."""
-        floating_ip = fip_cidr.split('/')[0]
-        rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)
-        fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
-        if self.rtr_fip_subnet is None:
-            self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
-                self.router_id)
-
-        rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair()
-        fip_ns_name = self.fip_ns.get_name()
-        if floating_ip in self.floating_ips_dict:
-            rule_pr = self.floating_ips_dict[floating_ip]
-            ip_rule = ip_lib.IPRule(namespace=self.ns_name)
-            ip_rule.rule.delete(ip=floating_ip,
-                                table=dvr_fip_ns.FIP_RT_TBL,
-                                priority=rule_pr)
-            self.fip_ns.deallocate_rule_priority(floating_ip)
-            #TODO(rajeev): Handle else case - exception/log?
-
-        device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
-
-        device.route.delete_route(fip_cidr, str(rtr_2_fip.ip))
-        # check if this is the last FIP for this router
-        self.dist_fip_count = self.dist_fip_count - 1
-        if self.dist_fip_count == 0:
-            #remove default route entry
-            device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name)
-            ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name)
-            device.route.delete_gateway(str(fip_2_rtr.ip),
-                                        table=dvr_fip_ns.FIP_RT_TBL)
-            self.fip_ns.local_subnets.release(self.router_id)
-            self.rtr_fip_subnet = None
-            ns_ip.del_veth(fip_2_rtr_name)
-            self.fip_ns.unsubscribe(self.router_id)
-            # NOTE (Swami): The fg interface and the namespace will be deleted
-            # when the external gateway port is removed. This will be
-            # initiated from the server through an RPC call.
-
-    def add_floating_ip(self, fip, interface_name, device):
-        if not self._add_fip_addr_to_device(fip, device):
-            return l3_constants.FLOATINGIP_STATUS_ERROR
-
-        # Special Handling for DVR - update FIP namespace
-        ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
-        self.floating_ip_added_dist(fip, ip_cidr)
-        return l3_constants.FLOATINGIP_STATUS_ACTIVE
-
-    def remove_floating_ip(self, device, ip_cidr):
-        super(DvrLocalRouter, self).remove_floating_ip(device, ip_cidr)
-        self.floating_ip_removed_dist(ip_cidr)
-
-    def _get_internal_port(self, subnet_id):
-        """Return internal router port based on subnet_id."""
-        router_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
-        for port in router_ports:
-            fips = port['fixed_ips']
-            for f in fips:
-                if f['subnet_id'] == subnet_id:
-                    return port
-
-    def _cache_arp_entry(self, ip, mac, subnet_id, operation):
-        """Cache the arp entries if device not ready."""
-        arp_entry_tuple = Arp_entry(ip=ip,
-                                    mac=mac,
-                                    subnet_id=subnet_id,
-                                    operation=operation)
-        self._pending_arp_set.add(arp_entry_tuple)
-
-    def _process_arp_cache_for_internal_port(self, subnet_id):
-        """Function to process the cached arp entries."""
-        arp_remove = set()
-        for arp_entry in self._pending_arp_set:
-            if subnet_id == arp_entry.subnet_id:
-                try:
-                    state = self._update_arp_entry(
-                        arp_entry.ip, arp_entry.mac,
-                        arp_entry.subnet_id, arp_entry.operation)
-                except Exception:
-                    state = False
-                if state:
-                    # If the arp update was successful, then
-                    # go ahead and add it to the remove set
-                    arp_remove.add(arp_entry)
-
-        self._pending_arp_set -= arp_remove
-
-    def _delete_arp_cache_for_internal_port(self, subnet_id):
-        """Function to delete the cached arp entries."""
-        arp_delete = set()
-        for arp_entry in self._pending_arp_set:
-            if subnet_id == arp_entry.subnet_id:
-                arp_delete.add(arp_entry)
-        self._pending_arp_set -= arp_delete
-
-    def _update_arp_entry(self, ip, mac, subnet_id, operation):
-        """Add or delete arp entry into router namespace for the subnet."""
-        port = self._get_internal_port(subnet_id)
-        # update arp entry only if the subnet is attached to the router
-        if not port:
-            return False
-
-        try:
-            # TODO(mrsmith): optimize the calls below for bulk calls
-            interface_name = self.get_internal_device_name(port['id'])
-            device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
-            if device.exists():
-                if operation == 'add':
-                    device.neigh.add(ip, mac)
-                elif operation == 'delete':
-                    device.neigh.delete(ip, mac)
-                return True
-            else:
-                if operation == 'add':
-                    LOG.warn(_LW("Device %s does not exist so ARP entry "
-                                 "cannot be updated, will cache information "
-                                 "to be applied later when the device exists"),
-                             device)
-                    self._cache_arp_entry(ip, mac, subnet_id, operation)
-                return False
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("DVR: Failed updating arp entry"))
-
-    def _set_subnet_arp_info(self, subnet_id):
-        """Set ARP info retrieved from Plugin for existing ports."""
-        # TODO(Carl) Can we eliminate the need to make this RPC while
-        # processing a router.
-        subnet_ports = self.agent.get_ports_by_subnet(subnet_id)
-
-        for p in subnet_ports:
-            if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS:
-                for fixed_ip in p['fixed_ips']:
-                    self._update_arp_entry(fixed_ip['ip_address'],
-                                           p['mac_address'],
-                                           subnet_id,
-                                           'add')
-        self._process_arp_cache_for_internal_port(subnet_id)
-
-    @staticmethod
-    def _get_snat_idx(ip_cidr):
-        """Generate index for DVR snat rules and route tables.
-
-        The index value has to be 32 bits or less but more than the system
-        generated entries i.e. 32768. For IPv4 use the numeric value of the
-        cidr. For IPv6 generate a crc32 bit hash and xor-fold to 30 bits.
-        Use the freed range to extend smaller values so that they become
-        greater than system generated entries.
-        """
-        net = netaddr.IPNetwork(ip_cidr)
-        if net.version == 6:
-            if isinstance(ip_cidr, six.text_type):
-                ip_cidr = ip_cidr.encode()  # Needed for Python 3.x
-            # the crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility
-            snat_idx = binascii.crc32(ip_cidr) & 0xffffffff
-            # xor-fold the hash to reserve upper range to extend smaller values
-            snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30)
-            if snat_idx < 32768:
-                snat_idx = snat_idx + MASK_30
-        else:
-            snat_idx = net.value
-        return snat_idx
-
-    def _delete_gateway_device_if_exists(self, ns_ip_device, gw_ip_addr,
-                                         snat_idx):
-        try:
-            ns_ip_device.route.delete_gateway(gw_ip_addr,
-                                        table=snat_idx)
-        except exceptions.DeviceNotFoundError:
-            pass
-
-    def _snat_redirect_modify(self, gateway, sn_port, sn_int, is_add):
-        """Adds or removes rules and routes for SNAT redirection."""
-        try:
-            ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
-            ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)
-            if is_add:
-                ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name)
-            for port_fixed_ip in sn_port['fixed_ips']:
-                # Iterate and find the gateway IP address matching
-                # the IP version
-                port_ip_addr = port_fixed_ip['ip_address']
-                port_ip_vers = netaddr.IPAddress(port_ip_addr).version
-                for gw_fixed_ip in gateway['fixed_ips']:
-                    gw_ip_addr = gw_fixed_ip['ip_address']
-                    if netaddr.IPAddress(gw_ip_addr).version == port_ip_vers:
-                        sn_port_cidr = common_utils.ip_to_cidr(
-                            port_ip_addr, port_fixed_ip['prefixlen'])
-                        snat_idx = self._get_snat_idx(sn_port_cidr)
-                        if is_add:
-                            ns_ipd.route.add_gateway(gw_ip_addr,
-                                                     table=snat_idx)
-                            ns_ipr.rule.add(ip=sn_port_cidr,
-                                            table=snat_idx,
-                                            priority=snat_idx)
-                            ns_ipwrapr.netns.execute(
-                                ['sysctl', '-w',
-                                 'net.ipv4.conf.%s.send_redirects=0' % sn_int])
-                        else:
-                            self._delete_gateway_device_if_exists(ns_ipd,
-                                                                  gw_ip_addr,
-                                                                  snat_idx)
-                            ns_ipr.rule.delete(ip=sn_port_cidr,
-                                               table=snat_idx,
-                                               priority=snat_idx)
-        except Exception:
-            if is_add:
-                exc = _LE('DVR: error adding redirection logic')
-            else:
-                exc = _LE('DVR: snat remove failed to clear the rule '
-                          'and device')
-            LOG.exception(exc)
-
-    def _snat_redirect_add(self, gateway, sn_port, sn_int):
-        """Adds rules and routes for SNAT redirection."""
-        self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=True)
-
-    def _snat_redirect_remove(self, gateway, sn_port, sn_int):
-        """Removes rules and routes for SNAT redirection."""
-        self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=False)
-
-    def internal_network_added(self, port):
-        super(DvrLocalRouter, self).internal_network_added(port)
-
-        # NOTE: The following function _set_subnet_arp_info
-        # should be called to dynamically populate the arp
-        # entries for the dvr services ports into the router
-        # namespace. This does not have dependency on the
-        # external_gateway port or the agent_mode.
-        for subnet in port['subnets']:
-            self._set_subnet_arp_info(subnet['id'])
-        self._snat_redirect_add_from_port(port)
-
-    def _snat_redirect_add_from_port(self, port):
-        ex_gw_port = self.get_ex_gw_port()
-        if not ex_gw_port:
-            return
-
-        sn_port = self.get_snat_port_for_internal_port(port)
-        if not sn_port:
-            return
-
-        interface_name = self.get_internal_device_name(port['id'])
-        self._snat_redirect_add(sn_port, port, interface_name)
-
-    def _dvr_internal_network_removed(self, port):
-        if not self.ex_gw_port:
-            return
-
-        sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
-        if not sn_port:
-            return
-
-        # DVR handling code for SNAT
-        interface_name = self.get_internal_device_name(port['id'])
-        self._snat_redirect_remove(sn_port, port, interface_name)
-        # Clean up the cached arp entries related to the port subnet
-        for subnet in port['subnets']:
-            self._delete_arp_cache_for_internal_port(subnet)
-
-    def internal_network_removed(self, port):
-        self._dvr_internal_network_removed(port)
-        super(DvrLocalRouter, self).internal_network_removed(port)
-
-    def get_floating_agent_gw_interface(self, ext_net_id):
-        """Filter Floating Agent GW port for the external network."""
-        fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, [])
-        return next(
-            (p for p in fip_ports if p['network_id'] == ext_net_id), None)
-
-    def get_external_device_interface_name(self, ex_gw_port):
-        fip_int = self.fip_ns.get_int_device_name(self.router_id)
-        if ip_lib.device_exists(fip_int, namespace=self.fip_ns.get_name()):
-            return self.fip_ns.get_rtr_ext_device_name(self.router_id)
-
-    def external_gateway_added(self, ex_gw_port, interface_name):
-        # TODO(Carl) Refactor external_gateway_added/updated/removed to use
-        # super class implementation where possible.  Looks like preserve_ips,
-        # and ns_name are the key differences.
-        ip_wrapr = ip_lib.IPWrapper(namespace=self.ns_name)
-        ip_wrapr.netns.execute(['sysctl', '-w',
-                               'net.ipv4.conf.all.send_redirects=0'])
-        for p in self.internal_ports:
-            gateway = self.get_snat_port_for_internal_port(p)
-            id_name = self.get_internal_device_name(p['id'])
-            if gateway:
-                self._snat_redirect_add(gateway, p, id_name)
-
-        for port in self.get_snat_interfaces():
-            for ip in port['fixed_ips']:
-                self._update_arp_entry(ip['ip_address'],
-                                       port['mac_address'],
-                                       ip['subnet_id'],
-                                       'add')
-
-    def external_gateway_updated(self, ex_gw_port, interface_name):
-        pass
-
-    def external_gateway_removed(self, ex_gw_port, interface_name):
-        # TODO(Carl) Should this be calling process_snat_dnat_for_fip?
-        self.process_floating_ip_nat_rules()
-        if self.fip_ns:
-            to_fip_interface_name = (
-                self.get_external_device_interface_name(ex_gw_port))
-            self.process_floating_ip_addresses(to_fip_interface_name)
-        for p in self.internal_ports:
-            # NOTE: When removing the gateway port, pass in the snat_port
-            # cache along with the current ports.
-            gateway = self.get_snat_port_for_internal_port(p, self.snat_ports)
-            internal_interface = self.get_internal_device_name(p['id'])
-            self._snat_redirect_remove(gateway, p, internal_interface)
-
-    def _handle_router_snat_rules(self, ex_gw_port, interface_name):
-        pass
-
-    def process_external(self, agent):
-        ex_gw_port = self.get_ex_gw_port()
-        if ex_gw_port:
-            self.create_dvr_fip_interfaces(ex_gw_port)
-        super(DvrLocalRouter, self).process_external(agent)
-
-    def create_dvr_fip_interfaces(self, ex_gw_port):
-        floating_ips = self.get_floating_ips()
-        fip_agent_port = self.get_floating_agent_gw_interface(
-            ex_gw_port['network_id'])
-        if fip_agent_port:
-            LOG.debug("FloatingIP agent gateway port received from the "
-                "plugin: %s", fip_agent_port)
-        is_first = False
-        if floating_ips:
-            is_first = self.fip_ns.subscribe(self.router_id)
-            if is_first and not fip_agent_port:
-                LOG.debug("No FloatingIP agent gateway port possibly due to "
-                          "late binding of the private port to the host, "
-                          "requesting agent gateway port for 'network-id' :"
-                          "%s", ex_gw_port['network_id'])
-                fip_agent_port = self.agent.plugin_rpc.get_agent_gateway_port(
-                    self.agent.context, ex_gw_port['network_id'])
-                if not fip_agent_port:
-                    LOG.error(_LE("No FloatingIP agent gateway port "
-                                  "returned from server for 'network-id': "
-                                  "%s"), ex_gw_port['network_id'])
-            if is_first and fip_agent_port:
-                if 'subnets' not in fip_agent_port:
-                    LOG.error(_LE('Missing subnet/agent_gateway_port'))
-                else:
-                    self.fip_ns.create_gateway_port(fip_agent_port)
-
-            if (self.fip_ns.agent_gateway_port and
-                (self.dist_fip_count == 0 or is_first)):
-                self.fip_ns.create_rtr_2_fip_link(self)
-
-                # kicks the FW Agent to add rules for the IR namespace if
-                # configured
-                self.agent.process_router_add(self)
-
-    def process(self, agent):
-        ex_gw_port = self.get_ex_gw_port()
-        if ex_gw_port:
-            self.fip_ns = agent.get_fip_ns(ex_gw_port['network_id'])
-            self.fip_ns.scan_fip_ports(self)
-
-        super(DvrLocalRouter, self).process(agent)
diff --git a/neutron/agent/l3/dvr_router_base.py b/neutron/agent/l3/dvr_router_base.py
deleted file mode 100644 (file)
index 781085a..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from neutron._i18n import _LE
-from neutron.agent.l3 import router_info as router
-from neutron.common import constants as l3_constants
-
-LOG = logging.getLogger(__name__)
-
-
-class DvrRouterBase(router.RouterInfo):
-    def __init__(self, agent, host, *args, **kwargs):
-        super(DvrRouterBase, self).__init__(*args, **kwargs)
-
-        self.agent = agent
-        self.host = host
-
-    def process(self, agent):
-        super(DvrRouterBase, self).process(agent)
-        # NOTE:  Keep a copy of the interfaces around for when they are removed
-        self.snat_ports = self.get_snat_interfaces()
-
-    def get_snat_interfaces(self):
-        return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
-
-    def get_snat_port_for_internal_port(self, int_port, snat_ports=None):
-        """Return the SNAT port for the given internal interface port."""
-        if snat_ports is None:
-            snat_ports = self.get_snat_interfaces()
-        fixed_ip = int_port['fixed_ips'][0]
-        subnet_id = fixed_ip['subnet_id']
-        if snat_ports:
-            match_port = [p for p in snat_ports
-                          if p['fixed_ips'][0]['subnet_id'] == subnet_id]
-            if match_port:
-                return match_port[0]
-            else:
-                LOG.error(_LE('DVR: SNAT port not found in the list '
-                              '%(snat_list)s for the given router '
-                              ' internal port %(int_p)s'), {
-                                  'snat_list': snat_ports,
-                                  'int_p': int_port})
diff --git a/neutron/agent/l3/dvr_snat_ns.py b/neutron/agent/l3/dvr_snat_ns.py
deleted file mode 100644 (file)
index 2e360cc..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import ip_lib
-from neutron.common import constants
-
-LOG = logging.getLogger(__name__)
-SNAT_NS_PREFIX = 'snat-'
-SNAT_INT_DEV_PREFIX = constants.SNAT_INT_DEV_PREFIX
-
-
-class SnatNamespace(namespaces.Namespace):
-
-    def __init__(self, router_id, agent_conf, driver, use_ipv6):
-        self.router_id = router_id
-        name = self.get_snat_ns_name(router_id)
-        super(SnatNamespace, self).__init__(
-            name, agent_conf, driver, use_ipv6)
-
-    @classmethod
-    def get_snat_ns_name(cls, router_id):
-        return namespaces.build_ns_name(SNAT_NS_PREFIX, router_id)
-
-    def delete(self):
-        ns_ip = ip_lib.IPWrapper(namespace=self.name)
-        for d in ns_ip.get_devices(exclude_loopback=True):
-            if d.name.startswith(SNAT_INT_DEV_PREFIX):
-                LOG.debug('Unplugging DVR device %s', d.name)
-                self.driver.unplug(d.name, namespace=self.name,
-                                   prefix=SNAT_INT_DEV_PREFIX)
-
-        # TODO(mrsmith): delete ext-gw-port
-        LOG.debug('DVR: destroy snat ns: %s', self.name)
-        super(SnatNamespace, self).delete()
diff --git a/neutron/agent/l3/fip_rule_priority_allocator.py b/neutron/agent/l3/fip_rule_priority_allocator.py
deleted file mode 100644 (file)
index 016f12c..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2015 IBM Corporation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.l3.item_allocator import ItemAllocator
-
-
-class FipPriority(object):
-    def __init__(self, index):
-        self.index = index
-
-    def __repr__(self):
-        return str(self.index)
-
-    def __hash__(self):
-        return hash(self.__repr__())
-
-    def __eq__(self, other):
-        if isinstance(other, FipPriority):
-            return (self.index == other.index)
-        else:
-            return False
-
-
-class FipRulePriorityAllocator(ItemAllocator):
-    """Manages allocation of floating ips rule priorities.
-        IP rule priorities assigned to DVR floating IPs need
-        to be preserved over L3 agent restarts.
-        This class provides an allocator which saves the prirorities
-        to a datastore which will survive L3 agent restarts.
-    """
-    def __init__(self, data_store_path, priority_rule_start,
-                 priority_rule_end):
-        """Create the necessary pool and create the item allocator
-            using ',' as the delimiter and FipRulePriorityAllocator as the
-            class type
-        """
-        pool = set(FipPriority(str(s)) for s in range(priority_rule_start,
-                                                      priority_rule_end))
-
-        super(FipRulePriorityAllocator, self).__init__(data_store_path,
-                                                       FipPriority,
-                                                       pool)
diff --git a/neutron/agent/l3/ha.py b/neutron/agent/l3/ha.py
deleted file mode 100644 (file)
index c6cc9c4..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-import eventlet
-from oslo_config import cfg
-from oslo_log import log as logging
-import webob
-
-from neutron._i18n import _, _LI
-from neutron.agent.linux import keepalived
-from neutron.agent.linux import utils as agent_utils
-from neutron.common import utils as common_utils
-from neutron.notifiers import batch_notifier
-
-LOG = logging.getLogger(__name__)
-
-KEEPALIVED_STATE_CHANGE_SERVER_BACKLOG = 4096
-
-OPTS = [
-    cfg.StrOpt('ha_confs_path',
-               default='$state_path/ha_confs',
-               help=_('Location to store keepalived/conntrackd '
-                      'config files')),
-    cfg.StrOpt('ha_vrrp_auth_type',
-               default='PASS',
-               choices=keepalived.VALID_AUTH_TYPES,
-               help=_('VRRP authentication type')),
-    cfg.StrOpt('ha_vrrp_auth_password',
-               help=_('VRRP authentication password'),
-               secret=True),
-    cfg.IntOpt('ha_vrrp_advert_int',
-               default=2,
-               help=_('The advertisement interval in seconds')),
-]
-
-
-class KeepalivedStateChangeHandler(object):
-    def __init__(self, agent):
-        self.agent = agent
-
-    @webob.dec.wsgify(RequestClass=webob.Request)
-    def __call__(self, req):
-        router_id = req.headers['X-Neutron-Router-Id']
-        state = req.headers['X-Neutron-State']
-        self.enqueue(router_id, state)
-
-    def enqueue(self, router_id, state):
-        LOG.debug('Handling notification for router '
-                  '%(router_id)s, state %(state)s', {'router_id': router_id,
-                                                     'state': state})
-        self.agent.enqueue_state_change(router_id, state)
-
-
-class L3AgentKeepalivedStateChangeServer(object):
-    def __init__(self, agent, conf):
-        self.agent = agent
-        self.conf = conf
-
-        agent_utils.ensure_directory_exists_without_file(
-            self.get_keepalived_state_change_socket_path(self.conf))
-
-    @classmethod
-    def get_keepalived_state_change_socket_path(cls, conf):
-        return os.path.join(conf.state_path, 'keepalived-state-change')
-
-    def run(self):
-        server = agent_utils.UnixDomainWSGIServer(
-            'neutron-keepalived-state-change')
-        server.start(KeepalivedStateChangeHandler(self.agent),
-                     self.get_keepalived_state_change_socket_path(self.conf),
-                     workers=0,
-                     backlog=KEEPALIVED_STATE_CHANGE_SERVER_BACKLOG)
-        server.wait()
-
-
-class AgentMixin(object):
-    def __init__(self, host):
-        self._init_ha_conf_path()
-        super(AgentMixin, self).__init__(host)
-        self.state_change_notifier = batch_notifier.BatchNotifier(
-            self._calculate_batch_duration(), self.notify_server)
-        eventlet.spawn(self._start_keepalived_notifications_server)
-
-    def _start_keepalived_notifications_server(self):
-        state_change_server = (
-            L3AgentKeepalivedStateChangeServer(self, self.conf))
-        state_change_server.run()
-
-    def _calculate_batch_duration(self):
-        # Slave becomes the master after not hearing from it 3 times
-        detection_time = self.conf.ha_vrrp_advert_int * 3
-
-        # Keepalived takes a couple of seconds to configure the VIPs
-        configuration_time = 2
-
-        # Give it enough slack to batch all events due to the same failure
-        return (detection_time + configuration_time) * 2
-
-    def enqueue_state_change(self, router_id, state):
-        LOG.info(_LI('Router %(router_id)s transitioned to %(state)s'),
-                 {'router_id': router_id,
-                  'state': state})
-
-        try:
-            ri = self.router_info[router_id]
-        except KeyError:
-            LOG.info(_LI('Router %s is not managed by this agent. It was '
-                         'possibly deleted concurrently.'), router_id)
-            return
-
-        self._configure_ipv6_ra_on_ext_gw_port_if_necessary(ri, state)
-        if self.conf.enable_metadata_proxy:
-            self._update_metadata_proxy(ri, router_id, state)
-        self._update_radvd_daemon(ri, state)
-        self.state_change_notifier.queue_event((router_id, state))
-
-    def _configure_ipv6_ra_on_ext_gw_port_if_necessary(self, ri, state):
-        # If ipv6 is enabled on the platform, ipv6_gateway config flag is
-        # not set and external_network associated to the router does not
-        # include any IPv6 subnet, enable the gateway interface to accept
-        # Router Advts from upstream router for default route.
-        ex_gw_port_id = ri.ex_gw_port and ri.ex_gw_port['id']
-        if state == 'master' and ex_gw_port_id and ri.use_ipv6:
-            gateway_ips = ri._get_external_gw_ips(ri.ex_gw_port)
-            if not ri.is_v6_gateway_set(gateway_ips):
-                interface_name = ri.get_external_device_name(ex_gw_port_id)
-                if ri.router.get('distributed', False):
-                    namespace = ri.ha_namespace
-                else:
-                    namespace = ri.ns_name
-                ri.driver.configure_ipv6_ra(namespace, interface_name)
-
-    def _update_metadata_proxy(self, ri, router_id, state):
-        if state == 'master':
-            LOG.debug('Spawning metadata proxy for router %s', router_id)
-            self.metadata_driver.spawn_monitored_metadata_proxy(
-                self.process_monitor, ri.ns_name, self.conf.metadata_port,
-                self.conf, router_id=ri.router_id)
-        else:
-            LOG.debug('Closing metadata proxy for router %s', router_id)
-            self.metadata_driver.destroy_monitored_metadata_proxy(
-                self.process_monitor, ri.router_id, self.conf)
-
-    def _update_radvd_daemon(self, ri, state):
-        # Radvd has to be spawned only on the Master HA Router. If there are
-        # any state transitions, we enable/disable radvd accordingly.
-        if state == 'master':
-            ri.enable_radvd()
-        else:
-            ri.disable_radvd()
-
-    def notify_server(self, batched_events):
-        translation_map = {'master': 'active',
-                           'backup': 'standby',
-                           'fault': 'standby'}
-        translated_states = dict((router_id, translation_map[state]) for
-                                 router_id, state in batched_events)
-        LOG.debug('Updating server with HA routers states %s',
-                  translated_states)
-        self.plugin_rpc.update_ha_routers_states(
-            self.context, translated_states)
-
-    def _init_ha_conf_path(self):
-        ha_full_path = os.path.dirname("/%s/" % self.conf.ha_confs_path)
-        common_utils.ensure_dir(ha_full_path)
diff --git a/neutron/agent/l3/ha_router.py b/neutron/agent/l3/ha_router.py
deleted file mode 100644 (file)
index 3e4389b..0000000
+++ /dev/null
@@ -1,393 +0,0 @@
-# Copyright (c) 2015 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-import shutil
-
-import netaddr
-from oslo_log import log as logging
-
-from neutron._i18n import _LE
-from neutron.agent.l3 import router_info as router
-from neutron.agent.linux import external_process
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import keepalived
-from neutron.common import constants as n_consts
-from neutron.common import utils as common_utils
-from neutron.extensions import portbindings
-
-LOG = logging.getLogger(__name__)
-HA_DEV_PREFIX = 'ha-'
-IP_MONITOR_PROCESS_SERVICE = 'ip_monitor'
-
-
-class HaRouter(router.RouterInfo):
-    def __init__(self, state_change_callback, *args, **kwargs):
-        super(HaRouter, self).__init__(*args, **kwargs)
-
-        self.ha_port = None
-        self.keepalived_manager = None
-        self.state_change_callback = state_change_callback
-
-    @property
-    def ha_priority(self):
-        return self.router.get('priority', keepalived.HA_DEFAULT_PRIORITY)
-
-    @property
-    def ha_vr_id(self):
-        return self.router.get('ha_vr_id')
-
-    @property
-    def ha_state(self):
-        ha_state_path = self.keepalived_manager.get_full_config_file_path(
-            'state')
-        try:
-            with open(ha_state_path, 'r') as f:
-                return f.read()
-        except (OSError, IOError):
-            LOG.debug('Error while reading HA state for %s', self.router_id)
-            return None
-
-    @ha_state.setter
-    def ha_state(self, new_state):
-        ha_state_path = self.keepalived_manager.get_full_config_file_path(
-            'state')
-        try:
-            with open(ha_state_path, 'w') as f:
-                f.write(new_state)
-        except (OSError, IOError):
-            LOG.error(_LE('Error while writing HA state for %s'),
-                      self.router_id)
-
-    @property
-    def ha_namespace(self):
-        return self.ns_name
-
-    def initialize(self, process_monitor):
-        super(HaRouter, self).initialize(process_monitor)
-        ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
-        if not ha_port:
-            LOG.error(_LE('Unable to process HA router %s without HA port'),
-                      self.router_id)
-            return
-
-        self.ha_port = ha_port
-        self._init_keepalived_manager(process_monitor)
-        self.ha_network_added()
-        self.update_initial_state(self.state_change_callback)
-        self.spawn_state_change_monitor(process_monitor)
-
-    def _init_keepalived_manager(self, process_monitor):
-        self.keepalived_manager = keepalived.KeepalivedManager(
-            self.router['id'],
-            keepalived.KeepalivedConf(),
-            process_monitor,
-            conf_path=self.agent_conf.ha_confs_path,
-            namespace=self.ha_namespace)
-
-        config = self.keepalived_manager.config
-
-        interface_name = self.get_ha_device_name()
-        subnets = self.ha_port.get('subnets', [])
-        ha_port_cidrs = [subnet['cidr'] for subnet in subnets]
-        instance = keepalived.KeepalivedInstance(
-            'BACKUP',
-            interface_name,
-            self.ha_vr_id,
-            ha_port_cidrs,
-            nopreempt=True,
-            advert_int=self.agent_conf.ha_vrrp_advert_int,
-            priority=self.ha_priority)
-        instance.track_interfaces.append(interface_name)
-
-        if self.agent_conf.ha_vrrp_auth_password:
-            # TODO(safchain): use oslo.config types when it will be available
-            # in order to check the validity of ha_vrrp_auth_type
-            instance.set_authentication(self.agent_conf.ha_vrrp_auth_type,
-                                        self.agent_conf.ha_vrrp_auth_password)
-
-        config.add_instance(instance)
-
-    def enable_keepalived(self):
-        self.keepalived_manager.spawn()
-
-    def disable_keepalived(self):
-        self.keepalived_manager.disable()
-        conf_dir = self.keepalived_manager.get_conf_dir()
-        shutil.rmtree(conf_dir)
-
-    def _get_keepalived_instance(self):
-        return self.keepalived_manager.config.get_instance(self.ha_vr_id)
-
-    def _get_primary_vip(self):
-        return self._get_keepalived_instance().get_primary_vip()
-
-    def get_ha_device_name(self):
-        return (HA_DEV_PREFIX + self.ha_port['id'])[:self.driver.DEV_NAME_LEN]
-
-    def ha_network_added(self):
-        interface_name = self.get_ha_device_name()
-
-        self.driver.plug(self.ha_port['network_id'],
-                         self.ha_port['id'],
-                         interface_name,
-                         self.ha_port['mac_address'],
-                         namespace=self.ha_namespace,
-                         prefix=HA_DEV_PREFIX)
-        ip_cidrs = common_utils.fixed_ip_cidrs(self.ha_port['fixed_ips'])
-        self.driver.init_l3(interface_name, ip_cidrs,
-                            namespace=self.ha_namespace,
-                            preserve_ips=[self._get_primary_vip()])
-
-    def ha_network_removed(self):
-        self.driver.unplug(self.get_ha_device_name(),
-                           namespace=self.ha_namespace,
-                           prefix=HA_DEV_PREFIX)
-        self.ha_port = None
-
-    def _add_vips(self, port, interface_name):
-        for ip_cidr in common_utils.fixed_ip_cidrs(port['fixed_ips']):
-            self._add_vip(ip_cidr, interface_name)
-
-    def _add_vip(self, ip_cidr, interface, scope=None):
-        instance = self._get_keepalived_instance()
-        instance.add_vip(ip_cidr, interface, scope)
-
-    def _remove_vip(self, ip_cidr):
-        instance = self._get_keepalived_instance()
-        instance.remove_vip_by_ip_address(ip_cidr)
-
-    def _clear_vips(self, interface):
-        instance = self._get_keepalived_instance()
-        instance.remove_vips_vroutes_by_interface(interface)
-
-    def _get_cidrs_from_keepalived(self, interface_name):
-        instance = self._get_keepalived_instance()
-        return instance.get_existing_vip_ip_addresses(interface_name)
-
-    def get_router_cidrs(self, device):
-        return set(self._get_cidrs_from_keepalived(device.name))
-
-    def routes_updated(self, old_routes, new_routes):
-        instance = self._get_keepalived_instance()
-        instance.virtual_routes.extra_routes = [
-            keepalived.KeepalivedVirtualRoute(
-                route['destination'], route['nexthop'])
-            for route in new_routes]
-
-    def _add_default_gw_virtual_route(self, ex_gw_port, interface_name):
-        gateway_ips = self._get_external_gw_ips(ex_gw_port)
-        if not gateway_ips:
-            return
-
-        default_gw_rts = []
-        instance = self._get_keepalived_instance()
-        for gw_ip in gateway_ips:
-                # TODO(Carl) This is repeated everywhere.  A method would
-                # be nice.
-                default_gw = n_consts.IP_ANY[netaddr.IPAddress(gw_ip).version]
-                default_gw_rts.append(keepalived.KeepalivedVirtualRoute(
-                    default_gw, gw_ip, interface_name))
-        instance.virtual_routes.gateway_routes = default_gw_rts
-
-    def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name):
-        extra_subnets = ex_gw_port.get('extra_subnets', [])
-        instance = self._get_keepalived_instance()
-        onlink_route_cidrs = set(s['cidr'] for s in extra_subnets)
-        instance.virtual_routes.extra_subnets = [
-            keepalived.KeepalivedVirtualRoute(
-                onlink_route_cidr, None, interface_name, scope='link') for
-            onlink_route_cidr in onlink_route_cidrs]
-
-    def _should_delete_ipv6_lladdr(self, ipv6_lladdr):
-        """Only the master should have any IP addresses configured.
-        Let keepalived manage IPv6 link local addresses, the same way we let
-        it manage IPv4 addresses. If the router is not in the master state,
-        we must delete the address first as it is autoconfigured by the kernel.
-        """
-        manager = self.keepalived_manager
-        if manager.get_process().active:
-            if self.ha_state != 'master':
-                conf = manager.get_conf_on_disk()
-                managed_by_keepalived = conf and ipv6_lladdr in conf
-                if managed_by_keepalived:
-                    return False
-            else:
-                return False
-        return True
-
-    def _disable_ipv6_addressing_on_interface(self, interface_name):
-        """Disable IPv6 link local addressing on the device and add it as
-        a VIP to keepalived. This means that the IPv6 link local address
-        will only be present on the master.
-        """
-        device = ip_lib.IPDevice(interface_name, namespace=self.ha_namespace)
-        ipv6_lladdr = ip_lib.get_ipv6_lladdr(device.link.address)
-
-        if self._should_delete_ipv6_lladdr(ipv6_lladdr):
-            device.addr.flush(n_consts.IP_VERSION_6)
-
-        self._remove_vip(ipv6_lladdr)
-        self._add_vip(ipv6_lladdr, interface_name, scope='link')
-
-    def _add_gateway_vip(self, ex_gw_port, interface_name):
-        self._add_vips(ex_gw_port, interface_name)
-        self._add_default_gw_virtual_route(ex_gw_port, interface_name)
-        self._add_extra_subnet_onlink_routes(ex_gw_port, interface_name)
-
-    def add_floating_ip(self, fip, interface_name, device):
-        fip_ip = fip['floating_ip_address']
-        ip_cidr = common_utils.ip_to_cidr(fip_ip)
-        self._add_vip(ip_cidr, interface_name)
-        # TODO(Carl) Should this return status?
-        # return l3_constants.FLOATINGIP_STATUS_ACTIVE
-
-    def remove_floating_ip(self, device, ip_cidr):
-        self._remove_vip(ip_cidr)
-        if self.ha_state == 'master' and device.addr.list():
-            super(HaRouter, self).remove_floating_ip(device, ip_cidr)
-
-    def internal_network_updated(self, interface_name, ip_cidrs):
-        self._clear_vips(interface_name)
-        self._disable_ipv6_addressing_on_interface(interface_name)
-        for ip_cidr in ip_cidrs:
-            self._add_vip(ip_cidr, interface_name)
-
-    def _plug_ha_router_port(self, port, name_getter, prefix):
-        port_id = port['id']
-        interface_name = name_getter(port_id)
-
-        self.driver.plug(port['network_id'],
-                         port_id,
-                         interface_name,
-                         port['mac_address'],
-                         namespace=self.ha_namespace,
-                         prefix=prefix)
-
-        self._disable_ipv6_addressing_on_interface(interface_name)
-        self._add_vips(port, interface_name)
-
-    def internal_network_added(self, port):
-        self._plug_ha_router_port(
-            port, self.get_internal_device_name, router.INTERNAL_DEV_PREFIX)
-
-    def internal_network_removed(self, port):
-        super(HaRouter, self).internal_network_removed(port)
-
-        interface_name = self.get_internal_device_name(port['id'])
-        self._clear_vips(interface_name)
-
-    def _get_state_change_monitor_process_manager(self):
-        return external_process.ProcessManager(
-            self.agent_conf,
-            '%s.monitor' % self.router_id,
-            self.ha_namespace,
-            default_cmd_callback=self._get_state_change_monitor_callback())
-
-    def _get_state_change_monitor_callback(self):
-        ha_device = self.get_ha_device_name()
-        ha_cidr = self._get_primary_vip()
-
-        def callback(pid_file):
-            cmd = [
-                'neutron-keepalived-state-change',
-                '--router_id=%s' % self.router_id,
-                '--namespace=%s' % self.ha_namespace,
-                '--conf_dir=%s' % self.keepalived_manager.get_conf_dir(),
-                '--monitor_interface=%s' % ha_device,
-                '--monitor_cidr=%s' % ha_cidr,
-                '--pid_file=%s' % pid_file,
-                '--state_path=%s' % self.agent_conf.state_path,
-                '--user=%s' % os.geteuid(),
-                '--group=%s' % os.getegid()]
-            return cmd
-
-        return callback
-
-    def spawn_state_change_monitor(self, process_monitor):
-        pm = self._get_state_change_monitor_process_manager()
-        pm.enable()
-        process_monitor.register(
-            self.router_id, IP_MONITOR_PROCESS_SERVICE, pm)
-
-    def destroy_state_change_monitor(self, process_monitor):
-        pm = self._get_state_change_monitor_process_manager()
-        process_monitor.unregister(
-            self.router_id, IP_MONITOR_PROCESS_SERVICE)
-        pm.disable()
-
-    def update_initial_state(self, callback):
-        ha_device = ip_lib.IPDevice(
-            self.get_ha_device_name(),
-            self.ha_namespace)
-        addresses = ha_device.addr.list()
-        cidrs = (address['cidr'] for address in addresses)
-        ha_cidr = self._get_primary_vip()
-        state = 'master' if ha_cidr in cidrs else 'backup'
-        self.ha_state = state
-        callback(self.router_id, state)
-
-    @staticmethod
-    def _gateway_ports_equal(port1, port2):
-        def _get_filtered_dict(d, ignore):
-            return {k: v for k, v in d.items() if k not in ignore}
-
-        keys_to_ignore = set([portbindings.HOST_ID])
-        port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
-        port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
-        return port1_filtered == port2_filtered
-
-    def external_gateway_added(self, ex_gw_port, interface_name):
-        self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name)
-        self._add_gateway_vip(ex_gw_port, interface_name)
-        self._disable_ipv6_addressing_on_interface(interface_name)
-
-    def external_gateway_updated(self, ex_gw_port, interface_name):
-        self._plug_external_gateway(
-            ex_gw_port, interface_name, self.ha_namespace)
-        ip_cidrs = common_utils.fixed_ip_cidrs(self.ex_gw_port['fixed_ips'])
-        for old_gateway_cidr in ip_cidrs:
-            self._remove_vip(old_gateway_cidr)
-        self._add_gateway_vip(ex_gw_port, interface_name)
-
-    def external_gateway_removed(self, ex_gw_port, interface_name):
-        self._clear_vips(interface_name)
-
-        if self.ha_state == 'master':
-            super(HaRouter, self).external_gateway_removed(ex_gw_port,
-                                                           interface_name)
-        else:
-            # We are not the master node, so no need to delete ip addresses.
-            self.driver.unplug(interface_name,
-                               bridge=self.agent_conf.external_network_bridge,
-                               namespace=self.ns_name,
-                               prefix=router.EXTERNAL_DEV_PREFIX)
-
-    def delete(self, agent):
-        super(HaRouter, self).delete(agent)
-        self.destroy_state_change_monitor(self.process_monitor)
-        self.ha_network_removed()
-        self.disable_keepalived()
-
-    def process(self, agent):
-        super(HaRouter, self).process(agent)
-
-        if self.ha_port:
-            self.enable_keepalived()
-
-    @common_utils.synchronized('enable_radvd')
-    def enable_radvd(self, internal_ports=None):
-        if (self.keepalived_manager.get_process().active and
-                self.ha_state == 'master'):
-            super(HaRouter, self).enable_radvd(internal_ports)
diff --git a/neutron/agent/l3/item_allocator.py b/neutron/agent/l3/item_allocator.py
deleted file mode 100644 (file)
index 79e8ee1..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2015 IBM Corporation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-
-class ItemAllocator(object):
-    """Manages allocation of items from a pool
-
-    Some of the allocations such as link local addresses used for routing
-    inside the fip namespaces need to persist across agent restarts to maintain
-    consistency. Persisting such allocations in the neutron database is
-    unnecessary and would degrade performance. ItemAllocator utilizes local
-    file system to track allocations made for objects of a given class.
-
-    The persistent datastore is a file. The records are one per line of
-    the format: key<delimiter>value.  For example if the delimiter is a ','
-    (the default value) then the records will be: key,value (one per line)
-    """
-
-    def __init__(self, state_file, ItemClass, item_pool, delimiter=','):
-        """Read the file with previous allocations recorded.
-
-        See the note in the allocate method for more detail.
-        """
-        self.ItemClass = ItemClass
-        self.state_file = state_file
-
-        self.allocations = {}
-
-        self.remembered = {}
-        self.pool = item_pool
-
-        for line in self._read():
-            key, saved_value = line.strip().split(delimiter)
-            self.remembered[key] = self.ItemClass(saved_value)
-
-        self.pool.difference_update(self.remembered.values())
-
-    def allocate(self, key):
-        """Try to allocate an item of ItemClass type.
-
-        I expect this to work in all cases because I expect the pool size to be
-        large enough for any situation.  Nonetheless, there is some defensive
-        programming in here.
-
-        Since the allocations are persisted, there is the chance to leak
-        allocations which should have been released but were not.  This leak
-        could eventually exhaust the pool.
-
-        So, if a new allocation is needed, the code first checks to see if
-        there are any remembered allocations for the key.  If not, it checks
-        the free pool.  If the free pool is empty then it dumps the remembered
-        allocations to free the pool.  This final desperate step will not
-        happen often in practice.
-        """
-        if key in self.remembered:
-            self.allocations[key] = self.remembered.pop(key)
-            return self.allocations[key]
-
-        if not self.pool:
-            # Desperate times.  Try to get more in the pool.
-            self.pool.update(self.remembered.values())
-            self.remembered.clear()
-            if not self.pool:
-                # More than 256 routers on a compute node!
-                raise RuntimeError("Cannot allocate item of type:"
-                                   " %s from pool using file %s"
-                                   % (self.ItemClass, self.state_file))
-
-        self.allocations[key] = self.pool.pop()
-        self._write_allocations()
-        return self.allocations[key]
-
-    def release(self, key):
-        self.pool.add(self.allocations.pop(key))
-        self._write_allocations()
-
-    def _write_allocations(self):
-        current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()]
-        remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()]
-        current.extend(remembered)
-        self._write(current)
-
-    def _write(self, lines):
-        with open(self.state_file, "w") as f:
-            f.writelines(lines)
-
-    def _read(self):
-        if not os.path.exists(self.state_file):
-            return []
-        with open(self.state_file) as f:
-            return f.readlines()
diff --git a/neutron/agent/l3/keepalived_state_change.py b/neutron/agent/l3/keepalived_state_change.py
deleted file mode 100644 (file)
index 1d986ef..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright (c) 2015 Red Hat Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-import sys
-
-import httplib2
-from oslo_config import cfg
-from oslo_log import log as logging
-import requests
-
-from neutron._i18n import _, _LE
-from neutron.agent.l3 import ha
-from neutron.agent.linux import daemon
-from neutron.agent.linux import ip_monitor
-from neutron.agent.linux import utils as agent_utils
-from neutron.common import config
-
-
-LOG = logging.getLogger(__name__)
-
-
-class KeepalivedUnixDomainConnection(agent_utils.UnixDomainHTTPConnection):
-    def __init__(self, *args, **kwargs):
-        # Old style super initialization is required!
-        agent_utils.UnixDomainHTTPConnection.__init__(
-            self, *args, **kwargs)
-        self.socket_path = (
-            ha.L3AgentKeepalivedStateChangeServer.
-            get_keepalived_state_change_socket_path(cfg.CONF))
-
-
-class MonitorDaemon(daemon.Daemon):
-    def __init__(self, pidfile, router_id, user, group, namespace, conf_dir,
-                 interface, cidr):
-        self.router_id = router_id
-        self.namespace = namespace
-        self.conf_dir = conf_dir
-        self.interface = interface
-        self.cidr = cidr
-        super(MonitorDaemon, self).__init__(pidfile, uuid=router_id,
-                                            user=user, group=group)
-
-    def run(self, run_as_root=False):
-        monitor = ip_monitor.IPMonitor(namespace=self.namespace,
-                                       run_as_root=run_as_root)
-        monitor.start()
-        # Only drop privileges if the process is currently running as root
-        # (The run_as_root variable name here is unfortunate - It means to
-        # use a root helper when the running process is NOT already running
-        # as root
-        if not run_as_root:
-            super(MonitorDaemon, self).run()
-        for iterable in monitor:
-            self.parse_and_handle_event(iterable)
-
-    def parse_and_handle_event(self, iterable):
-        try:
-            event = ip_monitor.IPMonitorEvent.from_text(iterable)
-            if event.interface == self.interface and event.cidr == self.cidr:
-                new_state = 'master' if event.added else 'backup'
-                self.write_state_change(new_state)
-                self.notify_agent(new_state)
-        except Exception:
-            LOG.exception(_LE(
-                'Failed to process or handle event for line %s'), iterable)
-
-    def write_state_change(self, state):
-        with open(os.path.join(
-                self.conf_dir, 'state'), 'w') as state_file:
-            state_file.write(state)
-        LOG.debug('Wrote router %s state %s', self.router_id, state)
-
-    def notify_agent(self, state):
-        resp, content = httplib2.Http().request(
-            # Note that the message is sent via a Unix domain socket so that
-            # the URL doesn't matter.
-            'http://127.0.0.1/',
-            headers={'X-Neutron-Router-Id': self.router_id,
-                     'X-Neutron-State': state},
-            connection_type=KeepalivedUnixDomainConnection)
-
-        if resp.status != requests.codes.ok:
-            raise Exception(_('Unexpected response: %s') % resp)
-
-        LOG.debug('Notified agent router %s, state %s', self.router_id, state)
-
-
-def register_opts(conf):
-    conf.register_cli_opt(
-        cfg.StrOpt('router_id', help=_('ID of the router')))
-    conf.register_cli_opt(
-        cfg.StrOpt('namespace', help=_('Namespace of the router')))
-    conf.register_cli_opt(
-        cfg.StrOpt('conf_dir', help=_('Path to the router directory')))
-    conf.register_cli_opt(
-        cfg.StrOpt('monitor_interface', help=_('Interface to monitor')))
-    conf.register_cli_opt(
-        cfg.StrOpt('monitor_cidr', help=_('CIDR to monitor')))
-    conf.register_cli_opt(
-        cfg.StrOpt('pid_file', help=_('Path to PID file for this process')))
-    conf.register_cli_opt(
-        cfg.StrOpt('user', help=_('User (uid or name) running this process '
-                                  'after its initialization')))
-    conf.register_cli_opt(
-        cfg.StrOpt('group', help=_('Group (gid or name) running this process '
-                                   'after its initialization')))
-    conf.register_opt(
-        cfg.StrOpt('metadata_proxy_socket',
-                   default='$state_path/metadata_proxy',
-                   help=_('Location of Metadata Proxy UNIX domain '
-                          'socket')))
-
-
-def configure(conf):
-    config.init(sys.argv[1:])
-    conf.set_override('log_dir', cfg.CONF.conf_dir)
-    conf.set_override('debug', True)
-    conf.set_override('verbose', True)
-    config.setup_logging()
-
-
-def main():
-    register_opts(cfg.CONF)
-    configure(cfg.CONF)
-    MonitorDaemon(cfg.CONF.pid_file,
-                  cfg.CONF.router_id,
-                  cfg.CONF.user,
-                  cfg.CONF.group,
-                  cfg.CONF.namespace,
-                  cfg.CONF.conf_dir,
-                  cfg.CONF.monitor_interface,
-                  cfg.CONF.monitor_cidr).start()
diff --git a/neutron/agent/l3/legacy_router.py b/neutron/agent/l3/legacy_router.py
deleted file mode 100644 (file)
index 2b8ccdb..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (c) 2015 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.l3 import router_info as router
-from neutron.agent.linux import ip_lib
-from neutron.common import constants as l3_constants
-
-
-class LegacyRouter(router.RouterInfo):
-    def add_floating_ip(self, fip, interface_name, device):
-        if not self._add_fip_addr_to_device(fip, device):
-            return l3_constants.FLOATINGIP_STATUS_ERROR
-
-        # As GARP is processed in a distinct thread the call below
-        # won't raise an exception to be handled.
-        ip_lib.send_ip_addr_adv_notif(self.ns_name,
-                                      interface_name,
-                                      fip['floating_ip_address'],
-                                      self.agent_conf)
-        return l3_constants.FLOATINGIP_STATUS_ACTIVE
diff --git a/neutron/agent/l3/link_local_allocator.py b/neutron/agent/l3/link_local_allocator.py
deleted file mode 100644 (file)
index f7fc84b..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-
-from neutron.agent.l3.item_allocator import ItemAllocator
-
-
-class LinkLocalAddressPair(netaddr.IPNetwork):
-    def __init__(self, addr):
-        super(LinkLocalAddressPair, self).__init__(addr)
-
-    def get_pair(self):
-        """Builds an address pair from the first and last addresses. """
-        # TODO(kevinbenton): the callers of this seem only interested in an IP,
-        # so we should just return two IPAddresses.
-        return (netaddr.IPNetwork("%s/%s" % (self.network, self.prefixlen)),
-                netaddr.IPNetwork("%s/%s" % (self[-1], self.prefixlen)))
-
-
-class LinkLocalAllocator(ItemAllocator):
-    """Manages allocation of link local IP addresses.
-
-    These link local addresses are used for routing inside the fip namespaces.
-    The associations need to persist across agent restarts to maintain
-    consistency.  Without this, there is disruption in network connectivity
-    as the agent rewires the connections with the new IP address assocations.
-
-    Persisting these in the database is unnecessary and would degrade
-    performance.
-    """
-    def __init__(self, data_store_path, subnet):
-        """Create the necessary pool and item allocator
-            using ',' as the delimiter and LinkLocalAllocator as the
-            class type
-        """
-        subnet = netaddr.IPNetwork(subnet)
-        pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31))
-        super(LinkLocalAllocator, self).__init__(data_store_path,
-                                                 LinkLocalAddressPair,
-                                                 pool)
diff --git a/neutron/agent/l3/namespace_manager.py b/neutron/agent/l3/namespace_manager.py
deleted file mode 100644 (file)
index 7d3acec..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from neutron._i18n import _LE
-from neutron.agent.l3 import dvr_fip_ns
-from neutron.agent.l3 import dvr_snat_ns
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import external_process
-from neutron.agent.linux import ip_lib
-
-LOG = logging.getLogger(__name__)
-
-
-class NamespaceManager(object):
-
-    """Keeps track of namespaces that need to be cleaned up.
-
-    This is a context manager that looks to clean up stale namespaces that
-    have not been touched by the end of the "with" statement it is called
-    in.  This formalizes the pattern used in the L3 agent which enumerated
-    all of the namespaces known to the system before a full sync.  Then,
-    after the full sync completed, it cleaned up any that were not touched
-    during the sync. The agent and this context manager use method keep_router
-    to communicate. In the "with" statement, the agent calls keep_router to
-    record the id's of the routers whose namespaces should be preserved.
-    Any other router and snat namespace present in the system will be deleted
-    by the __exit__ method of this context manager
-
-    This pattern can be more generally applicable to other resources
-    besides namespaces in the future because it is idempotent and, as such,
-    does not rely on state recorded at runtime in the agent so it handles
-    agent restarts gracefully.
-    """
-
-    ns_prefix_to_class_map = {
-        namespaces.NS_PREFIX: namespaces.RouterNamespace,
-        dvr_snat_ns.SNAT_NS_PREFIX: dvr_snat_ns.SnatNamespace,
-        dvr_fip_ns.FIP_NS_PREFIX: dvr_fip_ns.FipNamespace,
-    }
-
-    def __init__(self, agent_conf, driver, metadata_driver=None):
-        """Initialize the NamespaceManager.
-
-        :param agent_conf: configuration from l3 agent
-        :param driver: to perform operations on devices
-        :param metadata_driver: used to cleanup stale metadata proxy processes
-        """
-        self.agent_conf = agent_conf
-        self.driver = driver
-        self._clean_stale = True
-        self.metadata_driver = metadata_driver
-        if metadata_driver:
-            self.process_monitor = external_process.ProcessMonitor(
-                config=agent_conf,
-                resource_type='router')
-
-    def __enter__(self):
-        self._all_namespaces = set()
-        self._ids_to_keep = set()
-        if self._clean_stale:
-            self._all_namespaces = self.list_all()
-        return self
-
-    def __exit__(self, exc_type, value, traceback):
-        # TODO(carl) Preserves old behavior of L3 agent where cleaning
-        # namespaces was only done once after restart.  Still a good idea?
-        if exc_type:
-            # An exception occurred in the caller's with statement
-            return False
-        if not self._clean_stale:
-            # No need to cleanup
-            return True
-        self._clean_stale = False
-
-        for ns in self._all_namespaces:
-            _ns_prefix, ns_id = self.get_prefix_and_id(ns)
-            if ns_id in self._ids_to_keep:
-                continue
-            self._cleanup(_ns_prefix, ns_id)
-
-        return True
-
-    def keep_router(self, router_id):
-        self._ids_to_keep.add(router_id)
-
-    def keep_ext_net(self, ext_net_id):
-        self._ids_to_keep.add(ext_net_id)
-
-    def get_prefix_and_id(self, ns_name):
-        """Get the prefix and id from the namespace name.
-
-        :param ns_name: The name of the namespace
-        :returns: tuple with prefix and id or None if no prefix matches
-        """
-        prefix = namespaces.get_prefix_from_ns_name(ns_name)
-        if prefix in self.ns_prefix_to_class_map:
-            identifier = namespaces.get_id_from_ns_name(ns_name)
-            return (prefix, identifier)
-
-    def is_managed(self, ns_name):
-        """Return True if the namespace name passed belongs to this manager."""
-        return self.get_prefix_and_id(ns_name) is not None
-
-    def list_all(self):
-        """Get a set of all namespaces on host managed by this manager."""
-        try:
-            root_ip = ip_lib.IPWrapper()
-            namespaces = root_ip.get_namespaces()
-            return set(ns for ns in namespaces if self.is_managed(ns))
-        except RuntimeError:
-            LOG.exception(_LE('RuntimeError in obtaining namespace list for '
-                              'namespace cleanup.'))
-            return set()
-
-    def ensure_router_cleanup(self, router_id):
-        """Performs cleanup for a router"""
-        for ns in self.list_all():
-            if ns.endswith(router_id):
-                ns_prefix, ns_id = self.get_prefix_and_id(ns)
-                self._cleanup(ns_prefix, ns_id)
-
-    def _cleanup(self, ns_prefix, ns_id):
-        ns_class = self.ns_prefix_to_class_map[ns_prefix]
-        ns = ns_class(ns_id, self.agent_conf, self.driver, use_ipv6=False)
-        try:
-            if self.metadata_driver:
-                # cleanup stale metadata proxy processes first
-                self.metadata_driver.destroy_monitored_metadata_proxy(
-                    self.process_monitor, ns_id, self.agent_conf)
-            ns.delete()
-        except RuntimeError:
-            LOG.exception(_LE('Failed to destroy stale namespace %s'), ns)
diff --git a/neutron/agent/l3/namespaces.py b/neutron/agent/l3/namespaces.py
deleted file mode 100644 (file)
index ccb0bf7..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright 2015 Hewlett-Packard Development Company, L.P.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from oslo_log import log as logging
-
-from neutron._i18n import _LE
-from neutron.agent.linux import ip_lib
-
-LOG = logging.getLogger(__name__)
-
-NS_PREFIX = 'qrouter-'
-INTERNAL_DEV_PREFIX = 'qr-'
-EXTERNAL_DEV_PREFIX = 'qg-'
-# TODO(Carl) It is odd that this file needs this.  It is a dvr detail.
-ROUTER_2_FIP_DEV_PREFIX = 'rfp-'
-
-
-def build_ns_name(prefix, identifier):
-    """Builds a namespace name from the given prefix and identifier
-
-    :param prefix: The prefix which must end with '-' for legacy reasons
-    :param identifier: The id associated with the namespace
-    """
-    return prefix + identifier
-
-
-def get_prefix_from_ns_name(ns_name):
-    """Parses prefix from prefix-identifier
-
-    :param ns_name: The name of a namespace
-    :returns: The prefix ending with a '-' or None if there is no '-'
-    """
-    dash_index = ns_name.find('-')
-    if 0 <= dash_index:
-        return ns_name[:dash_index + 1]
-
-
-def get_id_from_ns_name(ns_name):
-    """Parses identifier from prefix-identifier
-
-    :param ns_name: The name of a namespace
-    :returns: Identifier or None if there is no - to end the prefix
-    """
-    dash_index = ns_name.find('-')
-    if 0 <= dash_index:
-        return ns_name[dash_index + 1:]
-
-
-class Namespace(object):
-
-    def __init__(self, name, agent_conf, driver, use_ipv6):
-        self.name = name
-        self.ip_wrapper_root = ip_lib.IPWrapper()
-        self.agent_conf = agent_conf
-        self.driver = driver
-        self.use_ipv6 = use_ipv6
-
-    def create(self):
-        ip_wrapper = self.ip_wrapper_root.ensure_namespace(self.name)
-        cmd = ['sysctl', '-w', 'net.ipv4.ip_forward=1']
-        ip_wrapper.netns.execute(cmd)
-        if self.use_ipv6:
-            cmd = ['sysctl', '-w', 'net.ipv6.conf.all.forwarding=1']
-            ip_wrapper.netns.execute(cmd)
-
-    def delete(self):
-        try:
-            self.ip_wrapper_root.netns.delete(self.name)
-        except RuntimeError:
-            msg = _LE('Failed trying to delete namespace: %s')
-            LOG.exception(msg, self.name)
-
-
-class RouterNamespace(Namespace):
-
-    def __init__(self, router_id, agent_conf, driver, use_ipv6):
-        self.router_id = router_id
-        name = self._get_ns_name(router_id)
-        super(RouterNamespace, self).__init__(
-            name, agent_conf, driver, use_ipv6)
-
-    @classmethod
-    def _get_ns_name(cls, router_id):
-        return build_ns_name(NS_PREFIX, router_id)
-
-    def delete(self):
-        ns_ip = ip_lib.IPWrapper(namespace=self.name)
-        for d in ns_ip.get_devices(exclude_loopback=True):
-            if d.name.startswith(INTERNAL_DEV_PREFIX):
-                # device is on default bridge
-                self.driver.unplug(d.name, namespace=self.name,
-                                   prefix=INTERNAL_DEV_PREFIX)
-            elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX):
-                ns_ip.del_veth(d.name)
-            elif d.name.startswith(EXTERNAL_DEV_PREFIX):
-                self.driver.unplug(
-                    d.name,
-                    bridge=self.agent_conf.external_network_bridge,
-                    namespace=self.name,
-                    prefix=EXTERNAL_DEV_PREFIX)
-
-        super(RouterNamespace, self).delete()
diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py
deleted file mode 100644 (file)
index 5be4609..0000000
+++ /dev/null
@@ -1,747 +0,0 @@
-# Copyright (c) 2014 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-
-from oslo_log import log as logging
-
-from neutron._i18n import _LE, _LW
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import iptables_manager
-from neutron.agent.linux import ra
-from neutron.common import constants as l3_constants
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.common import utils as common_utils
-
-LOG = logging.getLogger(__name__)
-INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
-EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
-
-FLOATINGIP_STATUS_NOCHANGE = object()
-
-
-class RouterInfo(object):
-
-    def __init__(self,
-                 router_id,
-                 router,
-                 agent_conf,
-                 interface_driver,
-                 use_ipv6=False):
-        self.router_id = router_id
-        self.ex_gw_port = None
-        self._snat_enabled = None
-        self.internal_ports = []
-        self.floating_ips = set()
-        # Invoke the setter for establishing initial SNAT action
-        self.router = router
-        self.use_ipv6 = use_ipv6
-        ns = namespaces.RouterNamespace(
-            router_id, agent_conf, interface_driver, use_ipv6)
-        self.router_namespace = ns
-        self.ns_name = ns.name
-        self.iptables_manager = iptables_manager.IptablesManager(
-            use_ipv6=use_ipv6,
-            namespace=self.ns_name)
-        self.routes = []
-        self.agent_conf = agent_conf
-        self.driver = interface_driver
-        # radvd is a neutron.agent.linux.ra.DaemonMonitor
-        self.radvd = None
-
-    def initialize(self, process_monitor):
-        """Initialize the router on the system.
-
-        This differs from __init__ in that this method actually affects the
-        system creating namespaces, starting processes, etc.  The other merely
-        initializes the python object.  This separates in-memory object
-        initialization from methods that actually go do stuff to the system.
-
-        :param process_monitor: The agent's process monitor instance.
-        """
-        self.process_monitor = process_monitor
-        self.radvd = ra.DaemonMonitor(self.router_id,
-                                      self.ns_name,
-                                      process_monitor,
-                                      self.get_internal_device_name,
-                                      self.agent_conf)
-
-        self.router_namespace.create()
-
-    @property
-    def router(self):
-        return self._router
-
-    @router.setter
-    def router(self, value):
-        self._router = value
-        if not self._router:
-            return
-        # enable_snat by default if it wasn't specified by plugin
-        self._snat_enabled = self._router.get('enable_snat', True)
-
-    def get_internal_device_name(self, port_id):
-        return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
-
-    def get_external_device_name(self, port_id):
-        return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
-
-    def get_external_device_interface_name(self, ex_gw_port):
-        return self.get_external_device_name(ex_gw_port['id'])
-
-    def _update_routing_table(self, operation, route, namespace):
-        cmd = ['ip', 'route', operation, 'to', route['destination'],
-               'via', route['nexthop']]
-        ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
-        ip_wrapper.netns.execute(cmd, check_exit_code=False)
-
-    def update_routing_table(self, operation, route):
-        self._update_routing_table(operation, route, self.ns_name)
-
-    def routes_updated(self, old_routes, new_routes):
-        adds, removes = common_utils.diff_list_of_dict(old_routes,
-                                                       new_routes)
-        for route in adds:
-            LOG.debug("Added route entry is '%s'", route)
-            # remove replaced route from deleted route
-            for del_route in removes:
-                if route['destination'] == del_route['destination']:
-                    removes.remove(del_route)
-            #replace success even if there is no existing route
-            self.update_routing_table('replace', route)
-        for route in removes:
-            LOG.debug("Removed route entry is '%s'", route)
-            self.update_routing_table('delete', route)
-
-    def get_ex_gw_port(self):
-        return self.router.get('gw_port')
-
-    def get_floating_ips(self):
-        """Filter Floating IPs to be hosted on this agent."""
-        return self.router.get(l3_constants.FLOATINGIP_KEY, [])
-
-    def floating_forward_rules(self, floating_ip, fixed_ip):
-        return [('PREROUTING', '-d %s -j DNAT --to %s' %
-                 (floating_ip, fixed_ip)),
-                ('OUTPUT', '-d %s -j DNAT --to %s' %
-                 (floating_ip, fixed_ip)),
-                ('float-snat', '-s %s -j SNAT --to %s' %
-                 (fixed_ip, floating_ip))]
-
-    def process_floating_ip_nat_rules(self):
-        """Configure NAT rules for the router's floating IPs.
-
-        Configures iptables rules for the floating ips of the given router
-        """
-        # Clear out all iptables rules for floating ips
-        self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
-
-        floating_ips = self.get_floating_ips()
-        # Loop once to ensure that floating ips are configured.
-        for fip in floating_ips:
-            # Rebuild iptables rules for the floating ip.
-            fixed = fip['fixed_ip_address']
-            fip_ip = fip['floating_ip_address']
-            for chain, rule in self.floating_forward_rules(fip_ip, fixed):
-                self.iptables_manager.ipv4['nat'].add_rule(chain, rule,
-                                                           tag='floating_ip')
-
-        self.iptables_manager.apply()
-
-    def process_snat_dnat_for_fip(self):
-        try:
-            self.process_floating_ip_nat_rules()
-        except Exception:
-            # TODO(salv-orlando): Less broad catching
-            raise n_exc.FloatingIpSetupException(
-                'L3 agent failure to setup NAT for floating IPs')
-
-    def _add_fip_addr_to_device(self, fip, device):
-        """Configures the floating ip address on the device.
-        """
-        try:
-            ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
-            device.addr.add(ip_cidr)
-            return True
-        except RuntimeError:
-            # any exception occurred here should cause the floating IP
-            # to be set in error state
-            LOG.warn(_LW("Unable to configure IP address for "
-                         "floating IP: %s"), fip['id'])
-
-    def add_floating_ip(self, fip, interface_name, device):
-        raise NotImplementedError()
-
-    def remove_floating_ip(self, device, ip_cidr):
-        device.delete_addr_and_conntrack_state(ip_cidr)
-
-    def remove_external_gateway_ip(self, device, ip_cidr):
-        device.delete_addr_and_conntrack_state(ip_cidr)
-
-    def get_router_cidrs(self, device):
-        return set([addr['cidr'] for addr in device.addr.list()])
-
-    def process_floating_ip_addresses(self, interface_name):
-        """Configure IP addresses on router's external gateway interface.
-
-        Ensures addresses for existing floating IPs and cleans up
-        those that should not longer be configured.
-        """
-
-        fip_statuses = {}
-        if interface_name is None:
-            LOG.debug('No Interface for floating IPs router: %s',
-                      self.router['id'])
-            return fip_statuses
-
-        device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
-        existing_cidrs = self.get_router_cidrs(device)
-        new_cidrs = set()
-
-        floating_ips = self.get_floating_ips()
-        # Loop once to ensure that floating ips are configured.
-        for fip in floating_ips:
-            fip_ip = fip['floating_ip_address']
-            ip_cidr = common_utils.ip_to_cidr(fip_ip)
-            new_cidrs.add(ip_cidr)
-            fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
-            if ip_cidr not in existing_cidrs:
-                fip_statuses[fip['id']] = self.add_floating_ip(
-                    fip, interface_name, device)
-                LOG.debug('Floating ip %(id)s added, status %(status)s',
-                          {'id': fip['id'],
-                           'status': fip_statuses.get(fip['id'])})
-            elif fip_statuses[fip['id']] == fip['status']:
-                # mark the status as not changed. we can't remove it because
-                # that's how the caller determines that it was removed
-                fip_statuses[fip['id']] = FLOATINGIP_STATUS_NOCHANGE
-        fips_to_remove = (
-            ip_cidr for ip_cidr in existing_cidrs - new_cidrs
-            if common_utils.is_cidr_host(ip_cidr))
-        for ip_cidr in fips_to_remove:
-            LOG.debug("Removing floating ip %s from interface %s in "
-                      "namespace %s", ip_cidr, interface_name, self.ns_name)
-            self.remove_floating_ip(device, ip_cidr)
-
-        return fip_statuses
-
-    def configure_fip_addresses(self, interface_name):
-        try:
-            return self.process_floating_ip_addresses(interface_name)
-        except Exception:
-            # TODO(salv-orlando): Less broad catching
-            raise n_exc.FloatingIpSetupException('L3 agent failure to setup '
-                'floating IPs')
-
-    def put_fips_in_error_state(self):
-        fip_statuses = {}
-        for fip in self.router.get(l3_constants.FLOATINGIP_KEY, []):
-            fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
-        return fip_statuses
-
-    def delete(self, agent):
-        self.router['gw_port'] = None
-        self.router[l3_constants.INTERFACE_KEY] = []
-        self.router[l3_constants.FLOATINGIP_KEY] = []
-        self.process_delete(agent)
-        self.disable_radvd()
-        self.router_namespace.delete()
-
-    def _internal_network_updated(self, port, subnet_id, prefix, old_prefix,
-                                  updated_cidrs):
-        interface_name = self.get_internal_device_name(port['id'])
-        if prefix != l3_constants.PROVISIONAL_IPV6_PD_PREFIX:
-            fixed_ips = port['fixed_ips']
-            for fixed_ip in fixed_ips:
-                if fixed_ip['subnet_id'] == subnet_id:
-                    v6addr = common_utils.ip_to_cidr(fixed_ip['ip_address'],
-                                                     fixed_ip.get('prefixlen'))
-                    if v6addr not in updated_cidrs:
-                        self.driver.add_ipv6_addr(interface_name, v6addr,
-                                                  self.ns_name)
-        else:
-            self.driver.delete_ipv6_addr_with_prefix(interface_name,
-                                                     old_prefix,
-                                                     self.ns_name)
-
-    def _internal_network_added(self, ns_name, network_id, port_id,
-                                fixed_ips, mac_address,
-                                interface_name, prefix):
-        LOG.debug("adding internal network: prefix(%s), port(%s)",
-                  prefix, port_id)
-        self.driver.plug(network_id, port_id, interface_name, mac_address,
-                         namespace=ns_name,
-                         prefix=prefix)
-
-        ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips)
-        self.driver.init_router_port(
-            interface_name, ip_cidrs, namespace=ns_name)
-        for fixed_ip in fixed_ips:
-            ip_lib.send_ip_addr_adv_notif(ns_name,
-                                          interface_name,
-                                          fixed_ip['ip_address'],
-                                          self.agent_conf)
-
-    def internal_network_added(self, port):
-        network_id = port['network_id']
-        port_id = port['id']
-        fixed_ips = port['fixed_ips']
-        mac_address = port['mac_address']
-
-        interface_name = self.get_internal_device_name(port_id)
-
-        self._internal_network_added(self.ns_name,
-                                     network_id,
-                                     port_id,
-                                     fixed_ips,
-                                     mac_address,
-                                     interface_name,
-                                     INTERNAL_DEV_PREFIX)
-
-    def internal_network_removed(self, port):
-        interface_name = self.get_internal_device_name(port['id'])
-        LOG.debug("removing internal network: port(%s) interface(%s)",
-                  port['id'], interface_name)
-        if ip_lib.device_exists(interface_name, namespace=self.ns_name):
-            self.driver.unplug(interface_name, namespace=self.ns_name,
-                               prefix=INTERNAL_DEV_PREFIX)
-
-    def _get_existing_devices(self):
-        ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
-        ip_devs = ip_wrapper.get_devices(exclude_loopback=True)
-        return [ip_dev.name for ip_dev in ip_devs]
-
-    @staticmethod
-    def _get_updated_ports(existing_ports, current_ports):
-        updated_ports = dict()
-        current_ports_dict = {p['id']: p for p in current_ports}
-        for existing_port in existing_ports:
-            current_port = current_ports_dict.get(existing_port['id'])
-            if current_port:
-                if (sorted(existing_port['fixed_ips'],
-                           key=common_utils.safe_sort_key) !=
-                        sorted(current_port['fixed_ips'],
-                               key=common_utils.safe_sort_key)):
-                    updated_ports[current_port['id']] = current_port
-        return updated_ports
-
-    @staticmethod
-    def _port_has_ipv6_subnet(port):
-        if 'subnets' in port:
-            for subnet in port['subnets']:
-                if (netaddr.IPNetwork(subnet['cidr']).version == 6 and
-                    subnet['cidr'] != l3_constants.PROVISIONAL_IPV6_PD_PREFIX):
-                    return True
-
-    def enable_radvd(self, internal_ports=None):
-        LOG.debug('Spawning radvd daemon in router device: %s', self.router_id)
-        if not internal_ports:
-            internal_ports = self.internal_ports
-        self.radvd.enable(internal_ports)
-
-    def disable_radvd(self):
-        LOG.debug('Terminating radvd daemon in router device: %s',
-                  self.router_id)
-        self.radvd.disable()
-
-    def internal_network_updated(self, interface_name, ip_cidrs):
-        self.driver.init_l3(interface_name, ip_cidrs=ip_cidrs,
-            namespace=self.ns_name)
-
-    def _process_internal_ports(self, pd):
-        existing_port_ids = set(p['id'] for p in self.internal_ports)
-
-        internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
-        current_port_ids = set(p['id'] for p in internal_ports
-                               if p['admin_state_up'])
-
-        new_port_ids = current_port_ids - existing_port_ids
-        new_ports = [p for p in internal_ports if p['id'] in new_port_ids]
-        old_ports = [p for p in self.internal_ports
-                     if p['id'] not in current_port_ids]
-        updated_ports = self._get_updated_ports(self.internal_ports,
-                                                internal_ports)
-
-        enable_ra = False
-        for p in new_ports:
-            self.internal_network_added(p)
-            LOG.debug("appending port %s to internal_ports cache", p)
-            self.internal_ports.append(p)
-            enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
-            for subnet in p['subnets']:
-                if ipv6_utils.is_ipv6_pd_enabled(subnet):
-                    interface_name = self.get_internal_device_name(p['id'])
-                    pd.enable_subnet(self.router_id, subnet['id'],
-                                     subnet['cidr'],
-                                     interface_name, p['mac_address'])
-
-        for p in old_ports:
-            self.internal_network_removed(p)
-            LOG.debug("removing port %s from internal_ports cache", p)
-            self.internal_ports.remove(p)
-            enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
-            for subnet in p['subnets']:
-                if ipv6_utils.is_ipv6_pd_enabled(subnet):
-                    pd.disable_subnet(self.router_id, subnet['id'])
-
-        updated_cidrs = []
-        if updated_ports:
-            for index, p in enumerate(internal_ports):
-                if not updated_ports.get(p['id']):
-                    continue
-                self.internal_ports[index] = updated_ports[p['id']]
-                interface_name = self.get_internal_device_name(p['id'])
-                ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips'])
-                LOG.debug("updating internal network for port %s", p)
-                updated_cidrs += ip_cidrs
-                self.internal_network_updated(interface_name, ip_cidrs)
-                enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
-
-        # Check if there is any pd prefix update
-        for p in internal_ports:
-            if p['id'] in (set(current_port_ids) & set(existing_port_ids)):
-                for subnet in p.get('subnets', []):
-                    if ipv6_utils.is_ipv6_pd_enabled(subnet):
-                        old_prefix = pd.update_subnet(self.router_id,
-                                                      subnet['id'],
-                                                      subnet['cidr'])
-                        if old_prefix:
-                            self._internal_network_updated(p, subnet['id'],
-                                                           subnet['cidr'],
-                                                           old_prefix,
-                                                           updated_cidrs)
-                            enable_ra = True
-
-        # Enable RA
-        if enable_ra:
-            self.enable_radvd(internal_ports)
-
-        existing_devices = self._get_existing_devices()
-        current_internal_devs = set(n for n in existing_devices
-                                    if n.startswith(INTERNAL_DEV_PREFIX))
-        current_port_devs = set(self.get_internal_device_name(port_id)
-                                for port_id in current_port_ids)
-        stale_devs = current_internal_devs - current_port_devs
-        for stale_dev in stale_devs:
-            LOG.debug('Deleting stale internal router device: %s',
-                      stale_dev)
-            pd.remove_stale_ri_ifname(self.router_id, stale_dev)
-            self.driver.unplug(stale_dev,
-                               namespace=self.ns_name,
-                               prefix=INTERNAL_DEV_PREFIX)
-
-    def _list_floating_ip_cidrs(self):
-        # Compute a list of addresses this router is supposed to have.
-        # This avoids unnecessarily removing those addresses and
-        # causing a momentarily network outage.
-        floating_ips = self.get_floating_ips()
-        return [common_utils.ip_to_cidr(ip['floating_ip_address'])
-                for ip in floating_ips]
-
-    def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name):
-        self.driver.plug(ex_gw_port['network_id'],
-                         ex_gw_port['id'],
-                         interface_name,
-                         ex_gw_port['mac_address'],
-                         bridge=self.agent_conf.external_network_bridge,
-                         namespace=ns_name,
-                         prefix=EXTERNAL_DEV_PREFIX)
-
-    def _get_external_gw_ips(self, ex_gw_port):
-        gateway_ips = []
-        if 'subnets' in ex_gw_port:
-            gateway_ips = [subnet['gateway_ip']
-                           for subnet in ex_gw_port['subnets']
-                           if subnet['gateway_ip']]
-        if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
-            # No IPv6 gateway is available, but IPv6 is enabled.
-            if self.agent_conf.ipv6_gateway:
-                # ipv6_gateway configured, use address for default route.
-                gateway_ips.append(self.agent_conf.ipv6_gateway)
-        return gateway_ips
-
-    def _external_gateway_added(self, ex_gw_port, interface_name,
-                                ns_name, preserve_ips):
-        LOG.debug("External gateway added: port(%s), interface(%s), ns(%s)",
-                  ex_gw_port, interface_name, ns_name)
-        self._plug_external_gateway(ex_gw_port, interface_name, ns_name)
-
-        # Build up the interface and gateway IP addresses that
-        # will be added to the interface.
-        ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
-
-        gateway_ips = self._get_external_gw_ips(ex_gw_port)
-        enable_ra_on_gw = False
-        if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
-            # There is no IPv6 gw_ip, use RouterAdvt for default route.
-            enable_ra_on_gw = True
-
-        self.driver.init_router_port(
-            interface_name,
-            ip_cidrs,
-            namespace=ns_name,
-            extra_subnets=ex_gw_port.get('extra_subnets', []),
-            preserve_ips=preserve_ips,
-            clean_connections=True)
-
-        device = ip_lib.IPDevice(interface_name, namespace=ns_name)
-        for ip in gateway_ips or []:
-            device.route.add_gateway(ip)
-
-        if enable_ra_on_gw:
-            self.driver.configure_ipv6_ra(ns_name, interface_name)
-
-        for fixed_ip in ex_gw_port['fixed_ips']:
-            ip_lib.send_ip_addr_adv_notif(ns_name,
-                                          interface_name,
-                                          fixed_ip['ip_address'],
-                                          self.agent_conf)
-
-    def is_v6_gateway_set(self, gateway_ips):
-        """Check to see if list of gateway_ips has an IPv6 gateway.
-        """
-        # Note - don't require a try-except here as all
-        # gateway_ips elements are valid addresses, if they exist.
-        return any(netaddr.IPAddress(gw_ip).version == 6
-                   for gw_ip in gateway_ips)
-
-    def external_gateway_added(self, ex_gw_port, interface_name):
-        preserve_ips = self._list_floating_ip_cidrs()
-        self._external_gateway_added(
-            ex_gw_port, interface_name, self.ns_name, preserve_ips)
-
-    def external_gateway_updated(self, ex_gw_port, interface_name):
-        preserve_ips = self._list_floating_ip_cidrs()
-        self._external_gateway_added(
-            ex_gw_port, interface_name, self.ns_name, preserve_ips)
-
-    def external_gateway_removed(self, ex_gw_port, interface_name):
-        LOG.debug("External gateway removed: port(%s), interface(%s)",
-                  ex_gw_port, interface_name)
-        device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
-        for ip_addr in ex_gw_port['fixed_ips']:
-            self.remove_external_gateway_ip(device,
-                                            common_utils.ip_to_cidr(
-                                                ip_addr['ip_address'],
-                                                ip_addr['prefixlen']))
-        self.driver.unplug(interface_name,
-                           bridge=self.agent_conf.external_network_bridge,
-                           namespace=self.ns_name,
-                           prefix=EXTERNAL_DEV_PREFIX)
-
-    @staticmethod
-    def _gateway_ports_equal(port1, port2):
-        return port1 == port2
-
-    def _process_external_gateway(self, ex_gw_port, pd):
-        # TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port
-        ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
-                         self.ex_gw_port and self.ex_gw_port['id'])
-
-        interface_name = None
-        if ex_gw_port_id:
-            interface_name = self.get_external_device_name(ex_gw_port_id)
-        if ex_gw_port:
-            if not self.ex_gw_port:
-                self.external_gateway_added(ex_gw_port, interface_name)
-                pd.add_gw_interface(self.router['id'], interface_name)
-            elif not self._gateway_ports_equal(ex_gw_port, self.ex_gw_port):
-                self.external_gateway_updated(ex_gw_port, interface_name)
-        elif not ex_gw_port and self.ex_gw_port:
-            self.external_gateway_removed(self.ex_gw_port, interface_name)
-            pd.remove_gw_interface(self.router['id'])
-
-        existing_devices = self._get_existing_devices()
-        stale_devs = [dev for dev in existing_devices
-                      if dev.startswith(EXTERNAL_DEV_PREFIX)
-                      and dev != interface_name]
-        for stale_dev in stale_devs:
-            LOG.debug('Deleting stale external router device: %s', stale_dev)
-            pd.remove_gw_interface(self.router['id'])
-            self.driver.unplug(stale_dev,
-                               bridge=self.agent_conf.external_network_bridge,
-                               namespace=self.ns_name,
-                               prefix=EXTERNAL_DEV_PREFIX)
-
-        # Process SNAT rules for external gateway
-        gw_port = self._router.get('gw_port')
-        self._handle_router_snat_rules(gw_port, interface_name)
-
-    def external_gateway_nat_postroute_rules(self, interface_name):
-        dont_snat_traffic_to_internal_ports_if_not_to_floating_ip = (
-            'POSTROUTING', '! -i %(interface_name)s '
-                           '! -o %(interface_name)s -m conntrack ! '
-                           '--ctstate DNAT -j ACCEPT' %
-                           {'interface_name': interface_name})
-        return [dont_snat_traffic_to_internal_ports_if_not_to_floating_ip]
-
-    def external_gateway_nat_snat_rules(self, ex_gw_ip, interface_name):
-        snat_normal_external_traffic = (
-            'snat', '-o %s -j SNAT --to-source %s' %
-                    (interface_name, ex_gw_ip))
-
-        # Makes replies come back through the router to reverse DNAT
-        ext_in_mark = self.agent_conf.external_ingress_mark
-        snat_internal_traffic_to_floating_ip = (
-            'snat', '-m mark ! --mark %s/%s '
-                    '-m conntrack --ctstate DNAT '
-                    '-j SNAT --to-source %s'
-                    % (ext_in_mark, l3_constants.ROUTER_MARK_MASK, ex_gw_ip))
-        return [snat_normal_external_traffic,
-                snat_internal_traffic_to_floating_ip]
-
-    def external_gateway_mangle_rules(self, interface_name):
-        mark = self.agent_conf.external_ingress_mark
-        mark_packets_entering_external_gateway_port = (
-            'mark', '-i %s -j MARK --set-xmark %s/%s' %
-                    (interface_name, mark, l3_constants.ROUTER_MARK_MASK))
-        return [mark_packets_entering_external_gateway_port]
-
-    def _empty_snat_chains(self, iptables_manager):
-        iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
-        iptables_manager.ipv4['nat'].empty_chain('snat')
-        iptables_manager.ipv4['mangle'].empty_chain('mark')
-
-    def _add_snat_rules(self, ex_gw_port, iptables_manager,
-                        interface_name):
-        if ex_gw_port:
-            # ex_gw_port should not be None in this case
-            # NAT rules are added only if ex_gw_port has an IPv4 address
-            for ip_addr in ex_gw_port['fixed_ips']:
-                ex_gw_ip = ip_addr['ip_address']
-                if netaddr.IPAddress(ex_gw_ip).version == 4:
-                    rules = self.external_gateway_nat_postroute_rules(
-                        interface_name)
-                    for rule in rules:
-                        iptables_manager.ipv4['nat'].add_rule(*rule)
-                    if self._snat_enabled:
-                        rules = self.external_gateway_nat_snat_rules(
-                            ex_gw_ip, interface_name)
-                        for rule in rules:
-                            iptables_manager.ipv4['nat'].add_rule(*rule)
-                        rules = self.external_gateway_mangle_rules(
-                            interface_name)
-                        for rule in rules:
-                            iptables_manager.ipv4['mangle'].add_rule(*rule)
-                    break
-
-    def _handle_router_snat_rules(self, ex_gw_port, interface_name):
-        self._empty_snat_chains(self.iptables_manager)
-
-        self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
-
-        self._add_snat_rules(ex_gw_port,
-                             self.iptables_manager,
-                             interface_name)
-
-    def _process_external_on_delete(self, agent):
-        fip_statuses = {}
-        try:
-            ex_gw_port = self.get_ex_gw_port()
-            self._process_external_gateway(ex_gw_port, agent.pd)
-            if not ex_gw_port:
-                return
-
-            interface_name = self.get_external_device_interface_name(
-                ex_gw_port)
-            fip_statuses = self.configure_fip_addresses(interface_name)
-
-        except (n_exc.FloatingIpSetupException):
-                # All floating IPs must be put in error state
-                LOG.exception(_LE("Failed to process floating IPs."))
-                fip_statuses = self.put_fips_in_error_state()
-        finally:
-            self.update_fip_statuses(agent, fip_statuses)
-
-    def process_external(self, agent):
-        fip_statuses = {}
-        try:
-            with self.iptables_manager.defer_apply():
-                ex_gw_port = self.get_ex_gw_port()
-                self._process_external_gateway(ex_gw_port, agent.pd)
-                if not ex_gw_port:
-                    return
-
-                # Process SNAT/DNAT rules and addresses for floating IPs
-                self.process_snat_dnat_for_fip()
-
-            # Once NAT rules for floating IPs are safely in place
-            # configure their addresses on the external gateway port
-            interface_name = self.get_external_device_interface_name(
-                ex_gw_port)
-            fip_statuses = self.configure_fip_addresses(interface_name)
-
-        except (n_exc.FloatingIpSetupException,
-                n_exc.IpTablesApplyException):
-                # All floating IPs must be put in error state
-                LOG.exception(_LE("Failed to process floating IPs."))
-                fip_statuses = self.put_fips_in_error_state()
-        finally:
-            self.update_fip_statuses(agent, fip_statuses)
-
-    def update_fip_statuses(self, agent, fip_statuses):
-        # Identify floating IPs which were disabled
-        existing_floating_ips = self.floating_ips
-        self.floating_ips = set(fip_statuses.keys())
-        for fip_id in existing_floating_ips - self.floating_ips:
-            fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN
-        # filter out statuses that didn't change
-        fip_statuses = {f: stat for f, stat in fip_statuses.items()
-                        if stat != FLOATINGIP_STATUS_NOCHANGE}
-        if not fip_statuses:
-            return
-        LOG.debug('Sending floating ip statuses: %s', fip_statuses)
-        # Update floating IP status on the neutron server
-        agent.plugin_rpc.update_floatingip_statuses(
-            agent.context, self.router_id, fip_statuses)
-
-    @common_utils.exception_logger()
-    def process_delete(self, agent):
-        """Process the delete of this router
-
-        This method is the point where the agent requests that this router
-        be deleted. This is a separate code path from process in that it
-        avoids any changes to the qrouter namespace that will be removed
-        at the end of the operation.
-
-        :param agent: Passes the agent in order to send RPC messages.
-        """
-        LOG.debug("process router delete")
-        self._process_internal_ports(agent.pd)
-        agent.pd.sync_router(self.router['id'])
-        self._process_external_on_delete(agent)
-
-    @common_utils.exception_logger()
-    def process(self, agent):
-        """Process updates to this router
-
-        This method is the point where the agent requests that updates be
-        applied to this router.
-
-        :param agent: Passes the agent in order to send RPC messages.
-        """
-        LOG.debug("process router updates")
-        self._process_internal_ports(agent.pd)
-        agent.pd.sync_router(self.router['id'])
-        self.process_external(agent)
-        # Process static routes for router
-        self.routes_updated(self.routes, self.router['routes'])
-        self.routes = self.router['routes']
-
-        # Update ex_gw_port and enable_snat on the router info cache
-        self.ex_gw_port = self.get_ex_gw_port()
-        # TODO(Carl) FWaaS uses this.  Why is it set after processing is done?
-        self.enable_snat = self.router.get('enable_snat')
diff --git a/neutron/agent/l3/router_processing_queue.py b/neutron/agent/l3/router_processing_queue.py
deleted file mode 100644 (file)
index a0b3fa1..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import datetime
-from six.moves import queue as Queue
-
-from oslo_utils import timeutils
-
-# Lower value is higher priority
-PRIORITY_RPC = 0
-PRIORITY_SYNC_ROUTERS_TASK = 1
-PRIORITY_PD_UPDATE = 2
-DELETE_ROUTER = 1
-PD_UPDATE = 2
-
-
-class RouterUpdate(object):
-    """Encapsulates a router update
-
-    An instance of this object carries the information necessary to prioritize
-    and process a request to update a router.
-    """
-    def __init__(self, router_id, priority,
-                 action=None, router=None, timestamp=None):
-        self.priority = priority
-        self.timestamp = timestamp
-        if not timestamp:
-            self.timestamp = timeutils.utcnow()
-        self.id = router_id
-        self.action = action
-        self.router = router
-
-    def __lt__(self, other):
-        """Implements priority among updates
-
-        Lower numerical priority always gets precedence.  When comparing two
-        updates of the same priority then the one with the earlier timestamp
-        gets procedence.  In the unlikely event that the timestamps are also
-        equal it falls back to a simple comparison of ids meaning the
-        precedence is essentially random.
-        """
-        if self.priority != other.priority:
-            return self.priority < other.priority
-        if self.timestamp != other.timestamp:
-            return self.timestamp < other.timestamp
-        return self.id < other.id
-
-
-class ExclusiveRouterProcessor(object):
-    """Manager for access to a router for processing
-
-    This class controls access to a router in a non-blocking way.  The first
-    instance to be created for a given router_id is granted exclusive access to
-    the router.
-
-    Other instances may be created for the same router_id while the first
-    instance has exclusive access.  If that happens then it doesn't block and
-    wait for access.  Instead, it signals to the master instance that an update
-    came in with the timestamp.
-
-    This way, a thread will not block to wait for access to a router.  Instead
-    it effectively signals to the thread that is working on the router that
-    something has changed since it started working on it.  That thread will
-    simply finish its current iteration and then repeat.
-
-    This class keeps track of the last time that a router data was fetched and
-    processed.  The timestamp that it keeps must be before when the data used
-    to process the router last was fetched from the database.  But, as close as
-    possible.  The timestamp should not be recorded, however, until the router
-    has been processed using the fetch data.
-    """
-    _masters = {}
-    _router_timestamps = {}
-
-    def __init__(self, router_id):
-        self._router_id = router_id
-
-        if router_id not in self._masters:
-            self._masters[router_id] = self
-            self._queue = []
-
-        self._master = self._masters[router_id]
-
-    def _i_am_master(self):
-        return self == self._master
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, type, value, traceback):
-        if self._i_am_master():
-            del self._masters[self._router_id]
-
-    def _get_router_data_timestamp(self):
-        return self._router_timestamps.get(self._router_id,
-                                           datetime.datetime.min)
-
-    def fetched_and_processed(self, timestamp):
-        """Records the data timestamp after it is used to update the router"""
-        new_timestamp = max(timestamp, self._get_router_data_timestamp())
-        self._router_timestamps[self._router_id] = new_timestamp
-
-    def queue_update(self, update):
-        """Queues an update from a worker
-
-        This is the queue used to keep new updates that come in while a router
-        is being processed.  These updates have already bubbled to the front of
-        the RouterProcessingQueue.
-        """
-        self._master._queue.append(update)
-
-    def updates(self):
-        """Processes the router until updates stop coming
-
-        Only the master instance will process the router.  However, updates may
-        come in from other workers while it is in progress.  This method loops
-        until they stop coming.
-        """
-        if self._i_am_master():
-            while self._queue:
-                # Remove the update from the queue even if it is old.
-                update = self._queue.pop(0)
-                # Process the update only if it is fresh.
-                if self._get_router_data_timestamp() < update.timestamp:
-                    yield update
-
-
-class RouterProcessingQueue(object):
-    """Manager of the queue of routers to process."""
-    def __init__(self):
-        self._queue = Queue.PriorityQueue()
-
-    def add(self, update):
-        self._queue.put(update)
-
-    def each_update_to_next_router(self):
-        """Grabs the next router from the queue and processes
-
-        This method uses a for loop to process the router repeatedly until
-        updates stop bubbling to the front of the queue.
-        """
-        next_update = self._queue.get()
-
-        with ExclusiveRouterProcessor(next_update.id) as rp:
-            # Queue the update whether this worker is the master or not.
-            rp.queue_update(next_update)
-
-            # Here, if the current worker is not the master, the call to
-            # rp.updates() will not yield and so this will essentially be a
-            # noop.
-            for update in rp.updates():
-                yield (rp, update)
diff --git a/neutron/agent/l3/rt_tables.py b/neutron/agent/l3/rt_tables.py
deleted file mode 100644 (file)
index 66c9f69..0000000
+++ /dev/null
@@ -1,255 +0,0 @@
-# Copyright (c) 2015 Hewlett-Packard Enterprise Development Company, L.P.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-import os
-import six
-
-from oslo_log import log as logging
-
-from neutron.agent.common import utils as common_utils
-from neutron.agent.linux import ip_lib
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.common import utils
-
-LOG = logging.getLogger(__name__)
-
-
-class NamespaceEtcDir(object):
-    """Creates a directory where namespace local /etc/iproute2 files can live
-
-    Directories are created under /etc/netns/<namespace_name>/iproute2 so that
-    when you exec a command inside a namespace, the directory is available as
-    /etc/iproute2 locally to the namespace.
-
-    The directory ownership is changed to the owner of the L3 agent process
-    so that root is no longer required to manage the file.  This limits the
-    scope of where root is needed.  Changing ownership is justified because
-    the directory lives under a namespace specific sub-directory of /etc, it
-    should be considered owned by the L3 agent process, which also manages the
-    namespace itself.
-
-    The directory and its contents should not be considered config.  Nothing
-    needs to be done for upgrade.  The only reason for it to live under /etc
-    within the namespace is that is the only place from where the ip command
-    will read it.
-    """
-
-    BASE_DIR = "/etc/netns"
-
-    def __init__(self, namespace):
-        self._directory = os.path.join(self.BASE_DIR, namespace)
-
-    def create(self):
-        common_utils.execute(['mkdir', '-p', self._directory],
-                             run_as_root=True)
-
-        user_id = os.geteuid()
-        common_utils.execute(['chown', user_id, self._directory],
-                             run_as_root=True)
-
-    def destroy(self):
-        common_utils.execute(['rm', '-r', '-f', self._directory],
-                             run_as_root=True)
-
-    def get_full_path(self):
-        return self._directory
-
-
-class RoutingTable(object):
-    def __init__(self, namespace, table_id, name):
-        self.name = name
-        self.table_id = table_id
-        self.ip_route = ip_lib.IPRoute(namespace=namespace, table=name)
-        self._keep = set()
-
-    def __eq__(self, other):
-        return self.table_id == other.table_id
-
-    def __hash__(self):
-        return self.table_id
-
-    def add(self, device, cidr):
-        table = device.route.table(self.name)
-        cidr = netaddr.IPNetwork(cidr)
-        # Get the network cidr (e.g. 192.168.5.135/23 -> 192.168.4.0/23)
-        net = utils.ip_to_cidr(cidr.network, cidr.prefixlen)
-        self._keep.add((net, device.name))
-        table.add_onlink_route(net)
-
-    def add_gateway(self, device, gateway_ip):
-        table = device.route.table(self.name)
-        ip_version = ip_lib.get_ip_version(gateway_ip)
-        self._keep.add((constants.IP_ANY[ip_version], device.name))
-        table.add_gateway(gateway_ip)
-
-    def __enter__(self):
-        self._keep = set()
-        return self
-
-    def __exit__(self, exc_type, value, traceback):
-        if exc_type:
-            return False
-
-        keep = self._keep
-        self._keep = None
-
-        ipv4_routes = self.ip_route.route.list_routes(constants.IP_VERSION_4)
-        ipv6_routes = self.ip_route.route.list_routes(constants.IP_VERSION_6)
-        all_routes = {(r['cidr'], r['dev'])
-                      for r in ipv4_routes + ipv6_routes}
-
-        for cidr, dev in all_routes - keep:
-            try:
-                self.ip_route.route.delete_route(cidr, dev=dev)
-            except exceptions.DeviceNotFoundError:
-                pass
-
-        return True
-
-
-class RoutingTablesManager(object):
-    """Manages mapping from routing table name to routing tables
-
-    The iproute2 package can read a mapping from /etc/iproute2/rt_tables.  When
-    namespaces are used, it is possible to maintain an rt_tables file that is
-    unique to the namespace.
-
-    It is necessary to maintain this mapping on disk somewhere because it must
-    survive agent restarts.  Otherwise, we'd be remapping each time.  It is not
-    necessary to maintain it in the Neutron database because it is an
-    agent-local implementation detail.
-
-    While it could be kept in any local file, it is convenient to keep it in
-    the rt_tables file so that we can simply pass the table name to the
-    ip route commands.  It will also be helpful for debugging to be able to use
-    the table name on the command line manually.
-    """
-
-    FILENAME = 'iproute2/rt_tables'
-    ALL_IDS = set(range(1024, 2048))
-    DEFAULT_TABLES = {"local": 255,
-                      "main": 254,
-                      "default": 253,
-                      "unspec": 0}
-
-    def __init__(self, namespace):
-        self._namespace = namespace
-        self.etc = NamespaceEtcDir(namespace)
-        self._rt_tables_filename = os.path.join(
-            self.etc.get_full_path(), self.FILENAME)
-        self._tables = {}
-        self.initialize_map()
-
-    def initialize_map(self):
-        # Create a default table if one is not already found
-        self.etc.create()
-        utils.ensure_dir(os.path.dirname(self._rt_tables_filename))
-        if not os.path.exists(self._rt_tables_filename):
-            self._write_map(self.DEFAULT_TABLES)
-        self._keep = set()
-
-    def _get_or_create(self, table_id, table_name):
-        table = self._tables.get(table_id)
-        if not table:
-            self._tables[table_id] = table = RoutingTable(
-                 self._namespace, table_id, table_name)
-        return table
-
-    def get(self, table_name):
-        """Returns the table ID for the given table name"""
-        table_id = self._read_map().get(table_name)
-        if table_id is not None:
-            return self._get_or_create(table_id, table_name)
-
-    def get_all(self):
-        return set(self._get_or_create(t_id, name)
-                   for name, t_id in self._read_map().items())
-
-    def add(self, table_name):
-        """Ensures there is a single table id available for the table name"""
-        name_to_id = self._read_map()
-
-        def get_and_keep(table_id, table_name):
-            table = self._get_or_create(table_id, table_name)
-            self._keep.add(table)
-            return table
-
-        # If it is already there, just return it.
-        if table_name in name_to_id:
-            return get_and_keep(name_to_id[table_name], table_name)
-
-        # Otherwise, find an available id and write the new file
-        table_ids = set(name_to_id.values())
-        available_ids = self.ALL_IDS - table_ids
-        name_to_id[table_name] = table_id = available_ids.pop()
-        self._write_map(name_to_id)
-        return get_and_keep(table_id, table_name)
-
-    def delete(self, table_name):
-        """Removes the table from the file"""
-        name_to_id = self._read_map()
-
-        # If it is already there, remove it
-        table_id = name_to_id.pop(table_name, None)
-        self._tables.pop(table_id, None)
-
-        # Write the new file
-        self._write_map(name_to_id)
-
-    def _write_map(self, name_to_id):
-        buf = six.StringIO()
-        for name, table_id in name_to_id.items():
-            buf.write("%s\t%s\n" % (table_id, name))
-        utils.replace_file(self._rt_tables_filename, buf.getvalue())
-
-    def _read_map(self):
-        result = {}
-        with open(self._rt_tables_filename, "r") as rt_file:
-            for line in rt_file:
-                fields = line.split()
-                if len(fields) != 2:
-                    continue
-                table_id_str, name = fields
-                try:
-                    table_id = int(table_id_str)
-                except ValueError:
-                    continue
-                result[name] = table_id
-        return result
-
-    def destroy(self):
-        self.etc.destroy()
-
-    def __enter__(self):
-        for rt in self.get_all():
-            if rt.table_id not in self.DEFAULT_TABLES.values():
-                rt.__enter__()
-        self._keep = set()
-        return self
-
-    def __exit__(self, exc_type, value, traceback):
-        if exc_type:
-            return False
-
-        all_tables = set(rt for rt in self.get_all()
-                         if rt.table_id not in self.DEFAULT_TABLES.values())
-        for rt in all_tables:
-            rt.__exit__(None, None, None)
-
-        for rt in all_tables - self._keep:
-            self.delete(rt.name)
-
-        return True
diff --git a/neutron/agent/l3_agent.py b/neutron/agent/l3_agent.py
deleted file mode 100644 (file)
index dbfbb65..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.
-#
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-from oslo_config import cfg
-from oslo_service import service
-
-from neutron.agent.common import config
-from neutron.agent.l3 import config as l3_config
-from neutron.agent.l3 import ha
-from neutron.agent.linux import external_process
-from neutron.agent.linux import interface
-from neutron.agent.linux import pd
-from neutron.agent.linux import ra
-from neutron.agent.metadata import config as metadata_config
-from neutron.common import config as common_config
-from neutron.common import topics
-from neutron import service as neutron_service
-
-
-def register_opts(conf):
-    conf.register_opts(l3_config.OPTS)
-    conf.register_opts(metadata_config.DRIVER_OPTS)
-    conf.register_opts(metadata_config.SHARED_OPTS)
-    conf.register_opts(ha.OPTS)
-    config.register_interface_driver_opts_helper(conf)
-    config.register_agent_state_opts_helper(conf)
-    conf.register_opts(interface.OPTS)
-    conf.register_opts(external_process.OPTS)
-    conf.register_opts(pd.OPTS)
-    conf.register_opts(ra.OPTS)
-    config.register_availability_zone_opts_helper(conf)
-
-
-def main(manager='neutron.agent.l3.agent.L3NATAgentWithStateReport'):
-    register_opts(cfg.CONF)
-    common_config.init(sys.argv[1:])
-    config.setup_logging()
-    server = neutron_service.Service.create(
-        binary='neutron-l3-agent',
-        topic=topics.L3_AGENT,
-        report_interval=cfg.CONF.AGENT.report_interval,
-        manager=manager)
-    service.launch(cfg.CONF, server).wait()
diff --git a/neutron/agent/linux/__init__.py b/neutron/agent/linux/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/agent/linux/async_process.py b/neutron/agent/linux/async_process.py
deleted file mode 100644 (file)
index 3acec0d..0000000
+++ /dev/null
@@ -1,266 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import signal
-
-import eventlet
-import eventlet.event
-import eventlet.queue
-from oslo_log import log as logging
-
-from neutron._i18n import _, _LE
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import utils as common_utils
-
-
-LOG = logging.getLogger(__name__)
-
-
-class AsyncProcessException(Exception):
-    pass
-
-
-class AsyncProcess(object):
-    """Manages an asynchronous process.
-
-    This class spawns a new process via subprocess and uses
-    greenthreads to read stderr and stdout asynchronously into queues
-    that can be read via repeatedly calling iter_stdout() and
-    iter_stderr().
-
-    If respawn_interval is non-zero, any error in communicating with
-    the managed process will result in the process and greenthreads
-    being cleaned up and the process restarted after the specified
-    interval.
-
-    Example usage:
-
-    >>> import time
-    >>> proc = AsyncProcess(['ping'])
-    >>> proc.start()
-    >>> time.sleep(5)
-    >>> proc.stop()
-    >>> for line in proc.iter_stdout():
-    ...     print(line)
-    """
-
-    def __init__(self, cmd, run_as_root=False, respawn_interval=None,
-                 namespace=None, log_output=False, die_on_error=False):
-        """Constructor.
-
-        :param cmd: The list of command arguments to invoke.
-        :param run_as_root: The process should run with elevated privileges.
-        :param respawn_interval: Optional, the interval in seconds to wait
-               to respawn after unexpected process death. Respawn will
-               only be attempted if a value of 0 or greater is provided.
-        :param namespace: Optional, start the command in the specified
-               namespace.
-        :param log_output: Optional, also log received output.
-        :param die_on_error: Optional, kills the process on stderr output.
-        """
-        self.cmd_without_namespace = cmd
-        self._cmd = ip_lib.add_namespace_to_cmd(cmd, namespace)
-        self.run_as_root = run_as_root
-        if respawn_interval is not None and respawn_interval < 0:
-            raise ValueError(_('respawn_interval must be >= 0 if provided.'))
-        self.respawn_interval = respawn_interval
-        self._process = None
-        self._is_running = False
-        self._kill_event = None
-        self._reset_queues()
-        self._watchers = []
-        self.log_output = log_output
-        self.die_on_error = die_on_error
-
-    @property
-    def cmd(self):
-        return ' '.join(self._cmd)
-
-    def _reset_queues(self):
-        self._stdout_lines = eventlet.queue.LightQueue()
-        self._stderr_lines = eventlet.queue.LightQueue()
-
-    def is_active(self):
-        # If using sudo rootwrap as a root_helper, we have to wait until sudo
-        # spawns rootwrap and rootwrap spawns the process.
-
-        return utils.pid_invoked_with_cmdline(
-            self.pid, self.cmd_without_namespace)
-
-    def start(self, block=False):
-        """Launch a process and monitor it asynchronously.
-
-        :param block: Block until the process has started.
-        :raises eventlet.timeout.Timeout if blocking is True and the process
-                did not start in time.
-        """
-        LOG.debug('Launching async process [%s].', self.cmd)
-        if self._is_running:
-            raise AsyncProcessException(_('Process is already started'))
-        else:
-            self._spawn()
-
-        if block:
-            utils.wait_until_true(self.is_active)
-
-    def stop(self, block=False, kill_signal=signal.SIGKILL):
-        """Halt the process and watcher threads.
-
-        :param block: Block until the process has stopped.
-        :param kill_signal: Number of signal that will be sent to the process
-                            when terminating the process
-        :raises eventlet.timeout.Timeout if blocking is True and the process
-                did not stop in time.
-        """
-        if self._is_running:
-            LOG.debug('Halting async process [%s].', self.cmd)
-            self._kill(kill_signal)
-        else:
-            raise AsyncProcessException(_('Process is not running.'))
-
-        if block:
-            utils.wait_until_true(lambda: not self.is_active())
-
-    def _spawn(self):
-        """Spawn a process and its watchers."""
-        self._is_running = True
-        self._kill_event = eventlet.event.Event()
-        self._process, cmd = utils.create_process(self._cmd,
-                                                  run_as_root=self.run_as_root)
-        self._watchers = []
-        for reader in (self._read_stdout, self._read_stderr):
-            # Pass the stop event directly to the greenthread to
-            # ensure that assignment of a new event to the instance
-            # attribute does not prevent the greenthread from using
-            # the original event.
-            watcher = eventlet.spawn(self._watch_process,
-                                     reader,
-                                     self._kill_event)
-            self._watchers.append(watcher)
-
-    @property
-    def pid(self):
-        if self._process:
-            return utils.get_root_helper_child_pid(
-                self._process.pid,
-                run_as_root=self.run_as_root)
-
-    def _kill(self, kill_signal):
-        """Kill the process and the associated watcher greenthreads."""
-        pid = self.pid
-        if pid:
-            self._is_running = False
-            self._kill_process(pid, kill_signal)
-
-        # Halt the greenthreads if they weren't already.
-        if self._kill_event:
-            self._kill_event.send()
-            self._kill_event = None
-
-    def _kill_process(self, pid, kill_signal):
-        try:
-            # A process started by a root helper will be running as
-            # root and need to be killed via the same helper.
-            utils.execute(['kill', '-%d' % kill_signal, pid],
-                          run_as_root=self.run_as_root)
-        except Exception as ex:
-            stale_pid = (isinstance(ex, RuntimeError) and
-                         'No such process' in str(ex))
-            if not stale_pid:
-                LOG.exception(_LE('An error occurred while killing [%s].'),
-                              self.cmd)
-                return False
-
-        if self._process:
-            self._process.wait()
-        return True
-
-    def _handle_process_error(self):
-        """Kill the async process and respawn if necessary."""
-        LOG.debug('Halting async process [%s] in response to an error.',
-                  self.cmd)
-        self._kill(signal.SIGKILL)
-        if self.respawn_interval is not None and self.respawn_interval >= 0:
-            eventlet.sleep(self.respawn_interval)
-            LOG.debug('Respawning async process [%s].', self.cmd)
-            try:
-                self.start()
-            except AsyncProcessException:
-                # Process was already respawned by someone else...
-                pass
-
-    def _watch_process(self, callback, kill_event):
-        while not kill_event.ready():
-            try:
-                output = callback()
-                if not output and output != "":
-                    break
-            except Exception:
-                LOG.exception(_LE('An error occurred while communicating '
-                                  'with async process [%s].'), self.cmd)
-                break
-            # Ensure that watching a process with lots of output does
-            # not block execution of other greenthreads.
-            eventlet.sleep()
-        # self._is_running being True indicates that the loop was
-        # broken out of due to an error in the watched process rather
-        # than the loop condition being satisfied.
-        if self._is_running:
-            self._is_running = False
-            self._handle_process_error()
-
-    def _read(self, stream, queue):
-        data = stream.readline()
-        if data:
-            data = common_utils.safe_decode_utf8(data.strip())
-            queue.put(data)
-            return data
-
-    def _read_stdout(self):
-        data = self._read(self._process.stdout, self._stdout_lines)
-        if self.log_output:
-            LOG.debug('Output received from [%(cmd)s]: %(data)s',
-                      {'cmd': self.cmd,
-                       'data': data})
-        return data
-
-    def _read_stderr(self):
-        data = self._read(self._process.stderr, self._stderr_lines)
-        if self.log_output:
-            LOG.error(_LE('Error received from [%(cmd)s]: %(err)s'),
-                      {'cmd': self.cmd,
-                       'err': data})
-        if self.die_on_error:
-            LOG.error(_LE("Process [%(cmd)s] dies due to the error: %(err)s"),
-                      {'cmd': self.cmd,
-                       'err': data})
-            # the callback caller will use None to indicate the need to bail
-            # out of the thread
-            return None
-
-        return data
-
-    def _iter_queue(self, queue, block):
-        while True:
-            try:
-                yield queue.get(block=block)
-            except eventlet.queue.Empty:
-                break
-
-    def iter_stdout(self, block=False):
-        return self._iter_queue(self._stdout_lines, block)
-
-    def iter_stderr(self, block=False):
-        return self._iter_queue(self._stderr_lines, block)
diff --git a/neutron/agent/linux/bridge_lib.py b/neutron/agent/linux/bridge_lib.py
deleted file mode 100644 (file)
index 625ae94..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-# Copyright 2015 Intel Corporation.
-# Copyright 2015 Isaku Yamahata <isaku.yamahata at intel com>
-#                               <isaku.yamahata at gmail com>
-# All Rights Reserved.
-#
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-from oslo_log import log as logging
-
-from neutron.agent.linux import ip_lib
-from neutron.i18n import _LE
-
-LOG = logging.getLogger(__name__)
-
-# NOTE(toabctl): Don't use /sys/devices/virtual/net here because not all tap
-# devices are listed here (i.e. when using Xen)
-BRIDGE_FS = "/sys/class/net/"
-BRIDGE_INTERFACE_FS = BRIDGE_FS + "%(bridge)s/brif/%(interface)s"
-BRIDGE_INTERFACES_FS = BRIDGE_FS + "%s/brif/"
-BRIDGE_PORT_FS_FOR_DEVICE = BRIDGE_FS + "%s/brport"
-BRIDGE_PATH_FOR_DEVICE = BRIDGE_PORT_FS_FOR_DEVICE + '/bridge'
-
-
-def is_bridged_interface(interface):
-    if not interface:
-        return False
-    else:
-        return os.path.exists(BRIDGE_PORT_FS_FOR_DEVICE % interface)
-
-
-def get_bridge_names():
-    return os.listdir(BRIDGE_FS)
-
-
-class BridgeDevice(ip_lib.IPDevice):
-    def _brctl(self, cmd):
-        cmd = ['brctl'] + cmd
-        ip_wrapper = ip_lib.IPWrapper(self.namespace)
-        return ip_wrapper.netns.execute(cmd, run_as_root=True)
-
-    def _sysctl(self, cmd):
-        """execute() doesn't return the exit status of the command it runs,
-        it returns stdout and stderr. Setting check_exit_code=True will cause
-        it to raise a RuntimeError if the exit status of the command is
-        non-zero, which in sysctl's case is an error. So we're normalizing
-        that into zero (success) and one (failure) here to mimic what
-        "echo $?" in a shell would be.
-
-        This is all because sysctl is too verbose and prints the value you
-        just set on success, unlike most other utilities that print nothing.
-
-        execute() will have dumped a message to the logs with the actual
-        output on failure, so it's not lost, and we don't need to print it
-        here.
-        """
-        cmd = ['sysctl', '-w'] + cmd
-        ip_wrapper = ip_lib.IPWrapper(self.namespace)
-        try:
-            ip_wrapper.netns.execute(cmd, run_as_root=True,
-                                     check_exit_code=True)
-        except RuntimeError:
-            LOG.exception(_LE("Failed running %s"), cmd)
-            return 1
-
-        return 0
-
-    @classmethod
-    def addbr(cls, name, namespace=None):
-        bridge = cls(name, namespace)
-        bridge._brctl(['addbr', bridge.name])
-        return bridge
-
-    @classmethod
-    def get_interface_bridge(cls, interface):
-        try:
-            path = os.readlink(BRIDGE_PATH_FOR_DEVICE % interface)
-        except OSError:
-            return None
-        else:
-            name = path.rpartition('/')[-1]
-            return cls(name)
-
-    def delbr(self):
-        return self._brctl(['delbr', self.name])
-
-    def addif(self, interface):
-        return self._brctl(['addif', self.name, interface])
-
-    def delif(self, interface):
-        return self._brctl(['delif', self.name, interface])
-
-    def setfd(self, fd):
-        return self._brctl(['setfd', self.name, str(fd)])
-
-    def disable_stp(self):
-        return self._brctl(['stp', self.name, 'off'])
-
-    def disable_ipv6(self):
-        cmd = 'net.ipv6.conf.%s.disable_ipv6=1' % self.name
-        return self._sysctl([cmd])
-
-    def owns_interface(self, interface):
-        return os.path.exists(
-            BRIDGE_INTERFACE_FS % {'bridge': self.name,
-                                   'interface': interface})
-
-    def get_interfaces(self):
-        try:
-            return os.listdir(BRIDGE_INTERFACES_FS % self.name)
-        except OSError:
-            return []
diff --git a/neutron/agent/linux/daemon.py b/neutron/agent/linux/daemon.py
deleted file mode 100644 (file)
index 7ad4ca7..0000000
+++ /dev/null
@@ -1,258 +0,0 @@
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import atexit
-import fcntl
-import grp
-import logging as std_logging
-from logging import handlers
-import os
-import pwd
-import signal
-import sys
-
-from oslo_log import log as logging
-
-from neutron._i18n import _, _LE, _LI
-from neutron.common import exceptions
-
-LOG = logging.getLogger(__name__)
-
-DEVNULL = object()
-
-# Note: We can't use sys.std*.fileno() here.  sys.std* objects may be
-# random file-like objects that may not match the true system std* fds
-# - and indeed may not even have a file descriptor at all (eg: test
-# fixtures that monkey patch fixtures.StringStream onto sys.stdout).
-# Below we always want the _real_ well-known 0,1,2 Unix fds during
-# os.dup2 manipulation.
-STDIN_FILENO = 0
-STDOUT_FILENO = 1
-STDERR_FILENO = 2
-
-
-def setuid(user_id_or_name):
-    try:
-        new_uid = int(user_id_or_name)
-    except (TypeError, ValueError):
-        new_uid = pwd.getpwnam(user_id_or_name).pw_uid
-    if new_uid != 0:
-        try:
-            os.setuid(new_uid)
-        except OSError:
-            msg = _('Failed to set uid %s') % new_uid
-            LOG.critical(msg)
-            raise exceptions.FailToDropPrivilegesExit(msg)
-
-
-def setgid(group_id_or_name):
-    try:
-        new_gid = int(group_id_or_name)
-    except (TypeError, ValueError):
-        new_gid = grp.getgrnam(group_id_or_name).gr_gid
-    if new_gid != 0:
-        try:
-            os.setgid(new_gid)
-        except OSError:
-            msg = _('Failed to set gid %s') % new_gid
-            LOG.critical(msg)
-            raise exceptions.FailToDropPrivilegesExit(msg)
-
-
-def unwatch_log():
-    """Replace WatchedFileHandler handlers by FileHandler ones.
-
-    Neutron logging uses WatchedFileHandler handlers but they do not
-    support privileges drop, this method replaces them by FileHandler
-    handlers supporting privileges drop.
-    """
-    log_root = logging.getLogger(None).logger
-    to_replace = [h for h in log_root.handlers
-                  if isinstance(h, handlers.WatchedFileHandler)]
-    for handler in to_replace:
-        # NOTE(cbrandily): we use default delay(=False) to ensure the log file
-        # is opened before privileges drop.
-        new_handler = std_logging.FileHandler(handler.baseFilename,
-                                              mode=handler.mode,
-                                              encoding=handler.encoding)
-        log_root.removeHandler(handler)
-        log_root.addHandler(new_handler)
-
-
-def drop_privileges(user=None, group=None):
-    """Drop privileges to user/group privileges."""
-    if user is None and group is None:
-        return
-
-    if os.geteuid() != 0:
-        msg = _('Root permissions are required to drop privileges.')
-        LOG.critical(msg)
-        raise exceptions.FailToDropPrivilegesExit(msg)
-
-    if group is not None:
-        try:
-            os.setgroups([])
-        except OSError:
-            msg = _('Failed to remove supplemental groups')
-            LOG.critical(msg)
-            raise exceptions.FailToDropPrivilegesExit(msg)
-        setgid(group)
-
-    if user is not None:
-        setuid(user)
-
-    LOG.info(_LI("Process runs with uid/gid: %(uid)s/%(gid)s"),
-             {'uid': os.getuid(), 'gid': os.getgid()})
-
-
-class Pidfile(object):
-    def __init__(self, pidfile, procname, uuid=None):
-        self.pidfile = pidfile
-        self.procname = procname
-        self.uuid = uuid
-        try:
-            self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR)
-            fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
-        except IOError:
-            LOG.exception(_LE("Error while handling pidfile: %s"), pidfile)
-            sys.exit(1)
-
-    def __str__(self):
-        return self.pidfile
-
-    def unlock(self):
-        fcntl.flock(self.fd, fcntl.LOCK_UN)
-
-    def write(self, pid):
-        os.ftruncate(self.fd, 0)
-        os.write(self.fd, "%d" % pid)
-        os.fsync(self.fd)
-
-    def read(self):
-        try:
-            pid = int(os.read(self.fd, 128))
-            os.lseek(self.fd, 0, os.SEEK_SET)
-            return pid
-        except ValueError:
-            return
-
-    def is_running(self):
-        pid = self.read()
-        if not pid:
-            return False
-
-        cmdline = '/proc/%s/cmdline' % pid
-        try:
-            with open(cmdline, "r") as f:
-                exec_out = f.readline()
-            return self.procname in exec_out and (not self.uuid or
-                                                  self.uuid in exec_out)
-        except IOError:
-            return False
-
-
-class Daemon(object):
-    """A generic daemon class.
-
-    Usage: subclass the Daemon class and override the run() method
-    """
-    def __init__(self, pidfile, stdin=DEVNULL, stdout=DEVNULL,
-                 stderr=DEVNULL, procname='python', uuid=None,
-                 user=None, group=None, watch_log=True):
-        """Note: pidfile may be None."""
-        self.stdin = stdin
-        self.stdout = stdout
-        self.stderr = stderr
-        self.procname = procname
-        self.pidfile = (Pidfile(pidfile, procname, uuid)
-                        if pidfile is not None else None)
-        self.user = user
-        self.group = group
-        self.watch_log = watch_log
-
-    def _fork(self):
-        try:
-            pid = os.fork()
-            if pid > 0:
-                os._exit(0)
-        except OSError:
-            LOG.exception(_LE('Fork failed'))
-            sys.exit(1)
-
-    def daemonize(self):
-        """Daemonize process by doing Stevens double fork."""
-
-        # flush any buffered data before fork/dup2.
-        if self.stdout is not DEVNULL:
-            self.stdout.flush()
-        if self.stderr is not DEVNULL:
-            self.stderr.flush()
-        # sys.std* may not match STD{OUT,ERR}_FILENO.  Tough.
-        for f in (sys.stdout, sys.stderr):
-            f.flush()
-
-        # fork first time
-        self._fork()
-
-        # decouple from parent environment
-        os.chdir("/")
-        os.setsid()
-        os.umask(0)
-
-        # fork second time
-        self._fork()
-
-        # redirect standard file descriptors
-        with open(os.devnull, 'w+') as devnull:
-            stdin = devnull if self.stdin is DEVNULL else self.stdin
-            stdout = devnull if self.stdout is DEVNULL else self.stdout
-            stderr = devnull if self.stderr is DEVNULL else self.stderr
-            os.dup2(stdin.fileno(), STDIN_FILENO)
-            os.dup2(stdout.fileno(), STDOUT_FILENO)
-            os.dup2(stderr.fileno(), STDERR_FILENO)
-
-        if self.pidfile is not None:
-            # write pidfile
-            atexit.register(self.delete_pid)
-            signal.signal(signal.SIGTERM, self.handle_sigterm)
-            self.pidfile.write(os.getpid())
-
-    def delete_pid(self):
-        if self.pidfile is not None:
-            os.remove(str(self.pidfile))
-
-    def handle_sigterm(self, signum, frame):
-        sys.exit(0)
-
-    def start(self):
-        """Start the daemon."""
-
-        if self.pidfile is not None and self.pidfile.is_running():
-            self.pidfile.unlock()
-            LOG.error(_LE('Pidfile %s already exist. Daemon already '
-                          'running?'), self.pidfile)
-            sys.exit(1)
-
-        # Start the daemon
-        self.daemonize()
-        self.run()
-
-    def run(self):
-        """Override this method and call super().run when subclassing Daemon.
-
-        start() will call this method after the process has daemonized.
-        """
-        if not self.watch_log:
-            unwatch_log()
-        drop_privileges(self.user, self.group)
diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py
deleted file mode 100644 (file)
index 77b3c7d..0000000
+++ /dev/null
@@ -1,1264 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import collections
-import os
-import re
-import shutil
-import time
-
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_utils import uuidutils
-import six
-
-from neutron._i18n import _, _LI, _LW, _LE
-from neutron.agent.common import utils as agent_common_utils
-from neutron.agent.linux import external_process
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import iptables_manager
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.common import ipv6_utils
-from neutron.common import utils as common_utils
-from neutron.extensions import extra_dhcp_opt as edo_ext
-
-LOG = logging.getLogger(__name__)
-
-UDP = 'udp'
-TCP = 'tcp'
-DNS_PORT = 53
-DHCPV4_PORT = 67
-DHCPV6_PORT = 547
-METADATA_DEFAULT_PREFIX = 16
-METADATA_DEFAULT_IP = '169.254.169.254'
-METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
-                                   METADATA_DEFAULT_PREFIX)
-METADATA_PORT = 80
-WIN2k3_STATIC_DNS = 249
-NS_PREFIX = 'qdhcp-'
-DNSMASQ_SERVICE_NAME = 'dnsmasq'
-
-
-class DictModel(dict):
-    """Convert dict into an object that provides attribute access to values."""
-
-    def __init__(self, *args, **kwargs):
-        """Convert dict values to DictModel values."""
-        super(DictModel, self).__init__(*args, **kwargs)
-
-        def needs_upgrade(item):
-            """Check if `item` is a dict and needs to be changed to DictModel.
-            """
-            return isinstance(item, dict) and not isinstance(item, DictModel)
-
-        def upgrade(item):
-            """Upgrade item if it needs to be upgraded."""
-            if needs_upgrade(item):
-                return DictModel(item)
-            else:
-                return item
-
-        for key, value in six.iteritems(self):
-            if isinstance(value, (list, tuple)):
-                # Keep the same type but convert dicts to DictModels
-                self[key] = type(value)(
-                    (upgrade(item) for item in value)
-                )
-            elif needs_upgrade(value):
-                # Change dict instance values to DictModel instance values
-                self[key] = DictModel(value)
-
-    def __getattr__(self, name):
-        try:
-            return self[name]
-        except KeyError as e:
-            raise AttributeError(e)
-
-    def __setattr__(self, name, value):
-        self[name] = value
-
-    def __delattr__(self, name):
-        del self[name]
-
-    def __str__(self):
-        pairs = ['%s=%s' % (k, v) for k, v in self.items()]
-        return ', '.join(sorted(pairs))
-
-
-class NetModel(DictModel):
-
-    def __init__(self, d):
-        super(NetModel, self).__init__(d)
-
-        self._ns_name = "%s%s" % (NS_PREFIX, self.id)
-
-    @property
-    def namespace(self):
-        return self._ns_name
-
-
-@six.add_metaclass(abc.ABCMeta)
-class DhcpBase(object):
-
-    def __init__(self, conf, network, process_monitor,
-                 version=None, plugin=None):
-        self.conf = conf
-        self.network = network
-        self.process_monitor = process_monitor
-        self.device_manager = DeviceManager(self.conf, plugin)
-        self.version = version
-
-    @abc.abstractmethod
-    def enable(self):
-        """Enables DHCP for this network."""
-
-    @abc.abstractmethod
-    def disable(self, retain_port=False):
-        """Disable dhcp for this network."""
-
-    def restart(self):
-        """Restart the dhcp service for the network."""
-        self.disable(retain_port=True)
-        self.enable()
-
-    @abc.abstractproperty
-    def active(self):
-        """Boolean representing the running state of the DHCP server."""
-
-    @abc.abstractmethod
-    def reload_allocations(self):
-        """Force the DHCP server to reload the assignment database."""
-
-    @classmethod
-    def existing_dhcp_networks(cls, conf):
-        """Return a list of existing networks ids that we have configs for."""
-
-        raise NotImplementedError()
-
-    @classmethod
-    def check_version(cls):
-        """Execute version checks on DHCP server."""
-
-        raise NotImplementedError()
-
-    @classmethod
-    def get_isolated_subnets(cls, network):
-        """Returns a dict indicating whether or not a subnet is isolated"""
-        raise NotImplementedError()
-
-    @classmethod
-    def should_enable_metadata(cls, conf, network):
-        """True if the metadata-proxy should be enabled for the network."""
-        raise NotImplementedError()
-
-
-class DhcpLocalProcess(DhcpBase):
-    PORTS = []
-
-    def __init__(self, conf, network, process_monitor, version=None,
-                 plugin=None):
-        super(DhcpLocalProcess, self).__init__(conf, network, process_monitor,
-                                               version, plugin)
-        self.confs_dir = self.get_confs_dir(conf)
-        self.network_conf_dir = os.path.join(self.confs_dir, network.id)
-        common_utils.ensure_dir(self.network_conf_dir)
-
-    @staticmethod
-    def get_confs_dir(conf):
-        return os.path.abspath(os.path.normpath(conf.dhcp_confs))
-
-    def get_conf_file_name(self, kind):
-        """Returns the file name for a given kind of config file."""
-        return os.path.join(self.network_conf_dir, kind)
-
-    def _remove_config_files(self):
-        shutil.rmtree(self.network_conf_dir, ignore_errors=True)
-
-    def _enable_dhcp(self):
-        """check if there is a subnet within the network with dhcp enabled."""
-        for subnet in self.network.subnets:
-            if subnet.enable_dhcp:
-                return True
-        return False
-
-    def enable(self):
-        """Enables DHCP for this network by spawning a local process."""
-        if self.active:
-            self.restart()
-        elif self._enable_dhcp():
-            common_utils.ensure_dir(self.network_conf_dir)
-            interface_name = self.device_manager.setup(self.network)
-            self.interface_name = interface_name
-            self.spawn_process()
-
-    def _get_process_manager(self, cmd_callback=None):
-        return external_process.ProcessManager(
-            conf=self.conf,
-            uuid=self.network.id,
-            namespace=self.network.namespace,
-            default_cmd_callback=cmd_callback,
-            pid_file=self.get_conf_file_name('pid'),
-            run_as_root=True)
-
-    def disable(self, retain_port=False):
-        """Disable DHCP for this network by killing the local process."""
-        self.process_monitor.unregister(self.network.id, DNSMASQ_SERVICE_NAME)
-        self._get_process_manager().disable()
-        if not retain_port:
-            self._destroy_namespace_and_port()
-        self._remove_config_files()
-
-    def _destroy_namespace_and_port(self):
-        try:
-            self.device_manager.destroy(self.network, self.interface_name)
-        except RuntimeError:
-            LOG.warning(_LW('Failed trying to delete interface: %s'),
-                        self.interface_name)
-
-        ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace)
-        try:
-            ns_ip.netns.delete(self.network.namespace)
-        except RuntimeError:
-            LOG.warning(_LW('Failed trying to delete namespace: %s'),
-                        self.network.namespace)
-
-    def _get_value_from_conf_file(self, kind, converter=None):
-        """A helper function to read a value from one of the state files."""
-        file_name = self.get_conf_file_name(kind)
-        msg = _('Error while reading %s')
-
-        try:
-            with open(file_name, 'r') as f:
-                try:
-                    return converter(f.read()) if converter else f.read()
-                except ValueError:
-                    msg = _('Unable to convert value in %s')
-        except IOError:
-            msg = _('Unable to access %s')
-
-        LOG.debug(msg, file_name)
-        return None
-
-    @property
-    def interface_name(self):
-        return self._get_value_from_conf_file('interface')
-
-    @interface_name.setter
-    def interface_name(self, value):
-        interface_file_path = self.get_conf_file_name('interface')
-        common_utils.replace_file(interface_file_path, value)
-
-    @property
-    def active(self):
-        return self._get_process_manager().active
-
-    @abc.abstractmethod
-    def spawn_process(self):
-        pass
-
-
-class Dnsmasq(DhcpLocalProcess):
-    # The ports that need to be opened when security policies are active
-    # on the Neutron port used for DHCP.  These are provided as a convenience
-    # for users of this class.
-    PORTS = {constants.IP_VERSION_4:
-             [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
-             constants.IP_VERSION_6:
-             [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
-             }
-
-    _TAG_PREFIX = 'tag%d'
-
-    _ID = 'id:'
-
-    @classmethod
-    def check_version(cls):
-        pass
-
-    @classmethod
-    def existing_dhcp_networks(cls, conf):
-        """Return a list of existing networks ids that we have configs for."""
-        confs_dir = cls.get_confs_dir(conf)
-        try:
-            return [
-                c for c in os.listdir(confs_dir)
-                if uuidutils.is_uuid_like(c)
-            ]
-        except OSError:
-            return []
-
-    def _build_cmdline_callback(self, pid_file):
-        cmd = [
-            'dnsmasq',
-            '--no-hosts',
-            '--strict-order',
-            '--except-interface=lo',
-            '--pid-file=%s' % pid_file,
-            '--dhcp-hostsfile=%s' % self.get_conf_file_name('host'),
-            '--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'),
-            '--dhcp-optsfile=%s' % self.get_conf_file_name('opts'),
-            '--dhcp-leasefile=%s' % self.get_conf_file_name('leases'),
-            '--dhcp-match=set:ipxe,175',
-        ]
-        if self.device_manager.driver.bridged:
-            cmd += [
-                '--bind-interfaces',
-                '--interface=%s' % self.interface_name,
-            ]
-        else:
-            cmd += [
-                '--bind-dynamic',
-                '--interface=%s' % self.interface_name,
-                '--interface=tap*',
-                '--bridge-interface=%s,tap*' % self.interface_name,
-            ]
-
-        possible_leases = 0
-        for i, subnet in enumerate(self.network.subnets):
-            mode = None
-            # if a subnet is specified to have dhcp disabled
-            if not subnet.enable_dhcp:
-                continue
-            if subnet.ip_version == 4:
-                mode = 'static'
-            else:
-                # Note(scollins) If the IPv6 attributes are not set, set it as
-                # static to preserve previous behavior
-                addr_mode = getattr(subnet, 'ipv6_address_mode', None)
-                ra_mode = getattr(subnet, 'ipv6_ra_mode', None)
-                if (addr_mode in [constants.DHCPV6_STATEFUL,
-                                  constants.DHCPV6_STATELESS] or
-                        not addr_mode and not ra_mode):
-                    mode = 'static'
-
-            cidr = netaddr.IPNetwork(subnet.cidr)
-
-            if self.conf.dhcp_lease_duration == -1:
-                lease = 'infinite'
-            else:
-                lease = '%ss' % self.conf.dhcp_lease_duration
-
-            # mode is optional and is not set - skip it
-            if mode:
-                if subnet.ip_version == 4:
-                    cmd.append('--dhcp-range=%s%s,%s,%s,%s' %
-                               ('set:', self._TAG_PREFIX % i,
-                                cidr.network, mode, lease))
-                else:
-                    cmd.append('--dhcp-range=%s%s,%s,%s,%d,%s' %
-                               ('set:', self._TAG_PREFIX % i,
-                                cidr.network, mode,
-                                cidr.prefixlen, lease))
-                possible_leases += cidr.size
-
-        if cfg.CONF.advertise_mtu:
-            mtu = getattr(self.network, 'mtu', 0)
-            # Do not advertise unknown mtu
-            if mtu > 0:
-                cmd.append('--dhcp-option-force=option:mtu,%d' % mtu)
-
-        # Cap the limit because creating lots of subnets can inflate
-        # this possible lease cap.
-        cmd.append('--dhcp-lease-max=%d' %
-                   min(possible_leases, self.conf.dnsmasq_lease_max))
-
-        cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
-        if self.conf.dnsmasq_dns_servers:
-            cmd.extend(
-                '--server=%s' % server
-                for server in self.conf.dnsmasq_dns_servers)
-        else:
-            # We only look at 'dnsmasq_local_resolv' if 'dnsmasq_dns_servers'
-            # is not set, which explicitly overrides 'dnsmasq_local_resolv'.
-            if not self.conf.dnsmasq_local_resolv:
-                cmd.append('--no-resolv')
-
-        if self.conf.dhcp_domain:
-            cmd.append('--domain=%s' % self.conf.dhcp_domain)
-
-        if self.conf.dhcp_broadcast_reply:
-            cmd.append('--dhcp-broadcast')
-
-        if self.conf.dnsmasq_base_log_dir:
-            try:
-                if not os.path.exists(self.conf.dnsmasq_base_log_dir):
-                    os.makedirs(self.conf.dnsmasq_base_log_dir)
-                log_filename = os.path.join(
-                    self.conf.dnsmasq_base_log_dir,
-                    self.network.id, 'dhcp_dns_log')
-                cmd.append('--log-queries')
-                cmd.append('--log-dhcp')
-                cmd.append('--log-facility=%s' % log_filename)
-            except OSError:
-                LOG.error(_LE('Error while create dnsmasq base log dir: %s'),
-                    self.conf.dnsmasq_base_log_dir)
-
-        return cmd
-
-    def spawn_process(self):
-        """Spawn the process, if it's not spawned already."""
-        # we only need to generate the lease file the first time dnsmasq starts
-        # rather than on every reload since dnsmasq will keep the file current
-        self._output_init_lease_file()
-        self._spawn_or_reload_process(reload_with_HUP=False)
-
-    def _spawn_or_reload_process(self, reload_with_HUP):
-        """Spawns or reloads a Dnsmasq process for the network.
-
-        When reload_with_HUP is True, dnsmasq receives a HUP signal,
-        or it's reloaded if the process is not running.
-        """
-
-        self._output_config_files()
-
-        pm = self._get_process_manager(
-            cmd_callback=self._build_cmdline_callback)
-
-        pm.enable(reload_cfg=reload_with_HUP)
-
-        self.process_monitor.register(uuid=self.network.id,
-                                      service_name=DNSMASQ_SERVICE_NAME,
-                                      monitored_process=pm)
-
-    def _release_lease(self, mac_address, ip, client_id):
-        """Release a DHCP lease."""
-        if netaddr.IPAddress(ip).version == constants.IP_VERSION_6:
-            # Note(SridharG) dhcp_release is only supported for IPv4
-            # addresses. For more details, please refer to man page.
-            return
-
-        cmd = ['dhcp_release', self.interface_name, ip, mac_address]
-        if client_id:
-            cmd.append(client_id)
-        ip_wrapper = ip_lib.IPWrapper(namespace=self.network.namespace)
-        ip_wrapper.netns.execute(cmd, run_as_root=True)
-
-    def _output_config_files(self):
-        self._output_hosts_file()
-        self._output_addn_hosts_file()
-        self._output_opts_file()
-
-    def reload_allocations(self):
-        """Rebuild the dnsmasq config and signal the dnsmasq to reload."""
-
-        # If all subnets turn off dhcp, kill the process.
-        if not self._enable_dhcp():
-            self.disable()
-            LOG.debug('Killing dnsmasq for network since all subnets have '
-                      'turned off DHCP: %s', self.network.id)
-            return
-
-        self._release_unused_leases()
-        self._spawn_or_reload_process(reload_with_HUP=True)
-        LOG.debug('Reloading allocations for network: %s', self.network.id)
-        self.device_manager.update(self.network, self.interface_name)
-
-    def _sort_fixed_ips_for_dnsmasq(self, fixed_ips, v6_nets):
-        """Sort fixed_ips so that stateless IPv6 subnets appear first.
-
-        For example, If a port with v6 extra_dhcp_opts is on a network with
-        IPv4 and IPv6 stateless subnets. Then dhcp host file will have
-        below 2 entries for same MAC,
-
-        fa:16:3e:8f:9d:65,30.0.0.5,set:aabc7d33-4874-429e-9637-436e4232d2cd
-        (entry for IPv4 dhcp)
-        fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
-        (entry for stateless IPv6 for v6 options)
-
-        dnsmasq internal details for processing host file entries
-        1) dnsmaq reads the host file from EOF.
-        2) So it first picks up stateless IPv6 entry,
-           fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
-        3) But dnsmasq doesn't have sufficient checks to skip this entry and
-           pick next entry, to process dhcp IPv4 request.
-        4) So dnsmaq uses this this entry to process dhcp IPv4 request.
-        5) As there is no ip in this entry, dnsmaq logs "no address available"
-           and fails to send DHCPOFFER message.
-
-        As we rely on internal details of dnsmasq to understand and fix the
-        issue, Ihar sent a mail to dnsmasq-discuss mailing list
-        http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2015q2/
-        009650.html
-
-        So If we reverse the order of writing entries in host file,
-        so that entry for stateless IPv6 comes first,
-        then dnsmasq can correctly fetch the IPv4 address.
-        """
-        return sorted(
-            fixed_ips,
-            key=lambda fip: ((fip.subnet_id in v6_nets) and (
-                v6_nets[fip.subnet_id].ipv6_address_mode == (
-                    constants.DHCPV6_STATELESS))),
-            reverse=True)
-
-    def _iter_hosts(self):
-        """Iterate over hosts.
-
-        For each host on the network we yield a tuple containing:
-        (
-            port,  # a DictModel instance representing the port.
-            alloc,  # a DictModel instance of the allocated ip and subnet.
-                    # if alloc is None, it means there is no need to allocate
-                    # an IPv6 address because of stateless DHCPv6 network.
-            host_name,  # Host name.
-            name,  # Canonical hostname in the format 'hostname[.domain]'.
-            no_dhcp,  # A flag indicating that the address doesn't need a DHCP
-                      # IP address.
-            no_opts,  # A flag indication that options shouldn't be written
-        )
-        """
-        v6_nets = dict((subnet.id, subnet) for subnet in
-                       self.network.subnets if subnet.ip_version == 6)
-
-        for port in self.network.ports:
-            fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips,
-                                                         v6_nets)
-            # Confirm whether Neutron server supports dns_name attribute in the
-            # ports API
-            dns_assignment = getattr(port, 'dns_assignment', None)
-            if dns_assignment:
-                dns_ip_map = {d.ip_address: d for d in dns_assignment}
-            for alloc in fixed_ips:
-                no_dhcp = False
-                no_opts = False
-                if alloc.subnet_id in v6_nets:
-                    addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode
-                    no_dhcp = addr_mode in (constants.IPV6_SLAAC,
-                                            constants.DHCPV6_STATELESS)
-                    # we don't setup anything for SLAAC. It doesn't make sense
-                    # to provide options for a client that won't use DHCP
-                    no_opts = addr_mode == constants.IPV6_SLAAC
-
-                # If dns_name attribute is supported by ports API, return the
-                # dns_assignment generated by the Neutron server. Otherwise,
-                # generate hostname and fqdn locally (previous behaviour)
-                if dns_assignment:
-                    hostname = dns_ip_map[alloc.ip_address].hostname
-                    fqdn = dns_ip_map[alloc.ip_address].fqdn
-                else:
-                    hostname = 'host-%s' % alloc.ip_address.replace(
-                        '.', '-').replace(':', '-')
-                    fqdn = hostname
-                    if self.conf.dhcp_domain:
-                        fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain)
-                yield (port, alloc, hostname, fqdn, no_dhcp, no_opts)
-
-    def _get_port_extra_dhcp_opts(self, port):
-        return getattr(port, edo_ext.EXTRADHCPOPTS, False)
-
-    def _output_init_lease_file(self):
-        """Write a fake lease file to bootstrap dnsmasq.
-
-        The generated file is passed to the --dhcp-leasefile option of dnsmasq.
-        This is used as a bootstrapping mechanism to avoid NAKing active leases
-        when a dhcp server is scheduled to another agent. Using a leasefile
-        will also prevent dnsmasq from NAKing or ignoring renewals after a
-        restart.
-
-        Format is as follows:
-        epoch-timestamp mac_addr ip_addr hostname client-ID
-        """
-        filename = self.get_conf_file_name('leases')
-        buf = six.StringIO()
-
-        LOG.debug('Building initial lease file: %s', filename)
-        # we make up a lease time for the database entry
-        if self.conf.dhcp_lease_duration == -1:
-            # Even with an infinite lease, a client may choose to renew a
-            # previous lease on reboot or interface bounce so we should have
-            # an entry for it.
-            # Dnsmasq timestamp format for an infinite lease is 0.
-            timestamp = 0
-        else:
-            timestamp = int(time.time()) + self.conf.dhcp_lease_duration
-        dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets
-                                   if s.enable_dhcp]
-        for host_tuple in self._iter_hosts():
-            port, alloc, hostname, name, no_dhcp, no_opts = host_tuple
-            # don't write ip address which belongs to a dhcp disabled subnet
-            # or an IPv6 SLAAC/stateless subnet
-            if no_dhcp or alloc.subnet_id not in dhcp_enabled_subnet_ids:
-                continue
-
-            ip_address = self._format_address_for_dnsmasq(alloc.ip_address)
-            # all that matters is the mac address and IP. the hostname and
-            # client ID will be overwritten on the next renewal.
-            buf.write('%s %s %s * *\n' %
-                      (timestamp, port.mac_address, ip_address))
-        contents = buf.getvalue()
-        common_utils.replace_file(filename, contents)
-        LOG.debug('Done building initial lease file %s with contents:\n%s',
-                  filename, contents)
-        return filename
-
-    @staticmethod
-    def _format_address_for_dnsmasq(address):
-        # (dzyu) Check if it is legal ipv6 address, if so, need wrap
-        # it with '[]' to let dnsmasq to distinguish MAC address from
-        # IPv6 address.
-        if netaddr.valid_ipv6(address):
-            return '[%s]' % address
-        return address
-
-    def _output_hosts_file(self):
-        """Writes a dnsmasq compatible dhcp hosts file.
-
-        The generated file is sent to the --dhcp-hostsfile option of dnsmasq,
-        and lists the hosts on the network which should receive a dhcp lease.
-        Each line in this file is in the form::
-
-            'mac_address,FQDN,ip_address'
-
-        IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in
-        this file if it did not give a lease to a host listed in it (e.g.:
-        multiple dnsmasq instances on the same network if this network is on
-        multiple network nodes). This file is only defining hosts which
-        should receive a dhcp lease, the hosts resolution in itself is
-        defined by the `_output_addn_hosts_file` method.
-        """
-        buf = six.StringIO()
-        filename = self.get_conf_file_name('host')
-
-        LOG.debug('Building host file: %s', filename)
-        dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets
-                                   if s.enable_dhcp]
-        # NOTE(ihrachyshka): the loop should not log anything inside it, to
-        # avoid potential performance drop when lots of hosts are dumped
-        for host_tuple in self._iter_hosts():
-            port, alloc, hostname, name, no_dhcp, no_opts = host_tuple
-            if no_dhcp:
-                if not no_opts and self._get_port_extra_dhcp_opts(port):
-                    buf.write('%s,%s%s\n' %
-                              (port.mac_address, 'set:', port.id))
-                continue
-
-            # don't write ip address which belongs to a dhcp disabled subnet.
-            if alloc.subnet_id not in dhcp_enabled_subnet_ids:
-                continue
-
-            ip_address = self._format_address_for_dnsmasq(alloc.ip_address)
-
-            if self._get_port_extra_dhcp_opts(port):
-                client_id = self._get_client_id(port)
-                if client_id and len(port.extra_dhcp_opts) > 1:
-                    buf.write('%s,%s%s,%s,%s,%s%s\n' %
-                              (port.mac_address, self._ID, client_id, name,
-                               ip_address, 'set:', port.id))
-                elif client_id and len(port.extra_dhcp_opts) == 1:
-                    buf.write('%s,%s%s,%s,%s\n' %
-                          (port.mac_address, self._ID, client_id, name,
-                           ip_address))
-                else:
-                    buf.write('%s,%s,%s,%s%s\n' %
-                              (port.mac_address, name, ip_address,
-                               'set:', port.id))
-            else:
-                buf.write('%s,%s,%s\n' %
-                          (port.mac_address, name, ip_address))
-
-        common_utils.replace_file(filename, buf.getvalue())
-        LOG.debug('Done building host file %s with contents:\n%s', filename,
-                  buf.getvalue())
-        return filename
-
-    def _get_client_id(self, port):
-        if self._get_port_extra_dhcp_opts(port):
-            for opt in port.extra_dhcp_opts:
-                if opt.opt_name == edo_ext.CLIENT_ID:
-                    return opt.opt_value
-
-    def _read_hosts_file_leases(self, filename):
-        leases = set()
-        try:
-            with open(filename) as f:
-                for l in f.readlines():
-                    host = l.strip().split(',')
-                    mac = host[0]
-                    client_id = None
-                    if host[1].startswith('set:'):
-                        continue
-                    if host[1].startswith(self._ID):
-                        ip = host[3].strip('[]')
-                        client_id = host[1][len(self._ID):]
-                    else:
-                        ip = host[2].strip('[]')
-                    leases.add((ip, mac, client_id))
-        except (OSError, IOError):
-            LOG.debug('Error while reading hosts file %s', filename)
-        return leases
-
-    def _release_unused_leases(self):
-        filename = self.get_conf_file_name('host')
-        old_leases = self._read_hosts_file_leases(filename)
-
-        new_leases = set()
-        dhcp_port_exists = False
-        dhcp_port_on_this_host = self.device_manager.get_device_id(
-            self.network)
-        for port in self.network.ports:
-            client_id = self._get_client_id(port)
-            for alloc in port.fixed_ips:
-                new_leases.add((alloc.ip_address, port.mac_address, client_id))
-            if port.device_id == dhcp_port_on_this_host:
-                dhcp_port_exists = True
-
-        for ip, mac, client_id in old_leases - new_leases:
-            self._release_lease(mac, ip, client_id)
-
-        if not dhcp_port_exists:
-            self.device_manager.driver.unplug(
-                self.interface_name, namespace=self.network.namespace)
-
-    def _output_addn_hosts_file(self):
-        """Writes a dnsmasq compatible additional hosts file.
-
-        The generated file is sent to the --addn-hosts option of dnsmasq,
-        and lists the hosts on the network which should be resolved even if
-        the dnsmaq instance did not give a lease to the host (see the
-        `_output_hosts_file` method).
-        Each line in this file is in the same form as a standard /etc/hosts
-        file.
-        """
-        buf = six.StringIO()
-        for host_tuple in self._iter_hosts():
-            port, alloc, hostname, fqdn, no_dhcp, no_opts = host_tuple
-            # It is compulsory to write the `fqdn` before the `hostname` in
-            # order to obtain it in PTR responses.
-            if alloc:
-                buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname))
-        addn_hosts = self.get_conf_file_name('addn_hosts')
-        common_utils.replace_file(addn_hosts, buf.getvalue())
-        return addn_hosts
-
-    def _output_opts_file(self):
-        """Write a dnsmasq compatible options file."""
-        options, subnet_index_map = self._generate_opts_per_subnet()
-        options += self._generate_opts_per_port(subnet_index_map)
-
-        name = self.get_conf_file_name('opts')
-        common_utils.replace_file(name, '\n'.join(options))
-        return name
-
-    def _generate_opts_per_subnet(self):
-        options = []
-        subnet_index_map = {}
-        if self.conf.enable_isolated_metadata or self.conf.force_metadata:
-            subnet_to_interface_ip = self._make_subnet_interface_ip_map()
-        isolated_subnets = self.get_isolated_subnets(self.network)
-        for i, subnet in enumerate(self.network.subnets):
-            addr_mode = getattr(subnet, 'ipv6_address_mode', None)
-            if (not subnet.enable_dhcp or
-                (subnet.ip_version == 6 and
-                 addr_mode == constants.IPV6_SLAAC)):
-                continue
-            if subnet.dns_nameservers:
-                options.append(
-                    self._format_option(
-                        subnet.ip_version, i, 'dns-server',
-                        ','.join(
-                            Dnsmasq._convert_to_literal_addrs(
-                                subnet.ip_version, subnet.dns_nameservers))))
-            else:
-                # use the dnsmasq ip as nameservers only if there is no
-                # dns-server submitted by the server
-                subnet_index_map[subnet.id] = i
-
-            if self.conf.dhcp_domain and subnet.ip_version == 6:
-                options.append('tag:tag%s,option6:domain-search,%s' %
-                               (i, ''.join(self.conf.dhcp_domain)))
-
-            gateway = subnet.gateway_ip
-            host_routes = []
-            for hr in subnet.host_routes:
-                if hr.destination == constants.IPv4_ANY:
-                    if not gateway:
-                        gateway = hr.nexthop
-                else:
-                    host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
-
-            # Add host routes for isolated network segments
-
-            if (self.conf.force_metadata or
-                (isolated_subnets[subnet.id] and
-                    self.conf.enable_isolated_metadata and
-                    subnet.ip_version == 4)):
-                subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
-                host_routes.append(
-                    '%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
-                )
-            elif not isolated_subnets[subnet.id] and gateway:
-                host_routes.append(
-                    '%s/32,%s' % (METADATA_DEFAULT_IP, gateway)
-                )
-
-            if subnet.ip_version == 4:
-                host_routes.extend(["%s,0.0.0.0" % (s.cidr) for s in
-                                    self.network.subnets
-                                    if (s.ip_version == 4 and
-                                        s.cidr != subnet.cidr)])
-
-                if host_routes:
-                    if gateway:
-                        host_routes.append("%s,%s" % (constants.IPv4_ANY,
-                                                      gateway))
-                    options.append(
-                        self._format_option(subnet.ip_version, i,
-                                            'classless-static-route',
-                                            ','.join(host_routes)))
-                    options.append(
-                        self._format_option(subnet.ip_version, i,
-                                            WIN2k3_STATIC_DNS,
-                                            ','.join(host_routes)))
-
-                if gateway:
-                    options.append(self._format_option(subnet.ip_version,
-                                                       i, 'router',
-                                                       gateway))
-                else:
-                    options.append(self._format_option(subnet.ip_version,
-                                                       i, 'router'))
-        return options, subnet_index_map
-
-    def _generate_opts_per_port(self, subnet_index_map):
-        options = []
-        dhcp_ips = collections.defaultdict(list)
-        for port in self.network.ports:
-            if self._get_port_extra_dhcp_opts(port):
-                port_ip_versions = set(
-                    [netaddr.IPAddress(ip.ip_address).version
-                     for ip in port.fixed_ips])
-                for opt in port.extra_dhcp_opts:
-                    if opt.opt_name == edo_ext.CLIENT_ID:
-                        continue
-                    opt_ip_version = opt.ip_version
-                    if opt_ip_version in port_ip_versions:
-                        options.append(
-                            self._format_option(opt_ip_version, port.id,
-                                                opt.opt_name, opt.opt_value))
-                    else:
-                        LOG.info(_LI("Cannot apply dhcp option %(opt)s "
-                                     "because it's ip_version %(version)d "
-                                     "is not in port's address IP versions"),
-                                 {'opt': opt.opt_name,
-                                  'version': opt_ip_version})
-
-            # provides all dnsmasq ip as dns-server if there is more than
-            # one dnsmasq for a subnet and there is no dns-server submitted
-            # by the server
-            if port.device_owner == constants.DEVICE_OWNER_DHCP:
-                for ip in port.fixed_ips:
-                    i = subnet_index_map.get(ip.subnet_id)
-                    if i is None:
-                        continue
-                    dhcp_ips[i].append(ip.ip_address)
-
-        for i, ips in dhcp_ips.items():
-            for ip_version in (4, 6):
-                vx_ips = [ip for ip in ips
-                          if netaddr.IPAddress(ip).version == ip_version]
-                if vx_ips:
-                    options.append(
-                        self._format_option(
-                            ip_version, i, 'dns-server',
-                            ','.join(
-                                Dnsmasq._convert_to_literal_addrs(ip_version,
-                                                                  vx_ips))))
-        return options
-
-    def _make_subnet_interface_ip_map(self):
-        ip_dev = ip_lib.IPDevice(self.interface_name,
-                                 namespace=self.network.namespace)
-
-        subnet_lookup = dict(
-            (netaddr.IPNetwork(subnet.cidr), subnet.id)
-            for subnet in self.network.subnets
-        )
-
-        retval = {}
-
-        for addr in ip_dev.addr.list():
-            ip_net = netaddr.IPNetwork(addr['cidr'])
-
-            if ip_net in subnet_lookup:
-                retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0]
-
-        return retval
-
-    def _format_option(self, ip_version, tag, option, *args):
-        """Format DHCP option by option name or code."""
-        option = str(option)
-        pattern = "(tag:(.*),)?(.*)$"
-        matches = re.match(pattern, option)
-        extra_tag = matches.groups()[0]
-        option = matches.groups()[2]
-
-        if isinstance(tag, int):
-            tag = self._TAG_PREFIX % tag
-
-        if not option.isdigit():
-            if ip_version == 4:
-                option = 'option:%s' % option
-            else:
-                option = 'option6:%s' % option
-        if extra_tag:
-            tags = ('tag:' + tag, extra_tag[:-1], '%s' % option)
-        else:
-            tags = ('tag:' + tag, '%s' % option)
-        return ','.join(tags + args)
-
-    @staticmethod
-    def _convert_to_literal_addrs(ip_version, ips):
-        if ip_version == 4:
-            return ips
-        return ['[' + ip + ']' for ip in ips]
-
-    @classmethod
-    def get_isolated_subnets(cls, network):
-        """Returns a dict indicating whether or not a subnet is isolated
-
-        A subnet is considered non-isolated if there is a port connected to
-        the subnet, and the port's ip address matches that of the subnet's
-        gateway. The port must be owned by a neutron router.
-        """
-        isolated_subnets = collections.defaultdict(lambda: True)
-        subnets = dict((subnet.id, subnet) for subnet in network.subnets)
-
-        for port in network.ports:
-            if port.device_owner not in constants.ROUTER_INTERFACE_OWNERS:
-                continue
-            for alloc in port.fixed_ips:
-                if subnets[alloc.subnet_id].gateway_ip == alloc.ip_address:
-                    isolated_subnets[alloc.subnet_id] = False
-
-        return isolated_subnets
-
-    @classmethod
-    def should_enable_metadata(cls, conf, network):
-        """Determine whether the metadata proxy is needed for a network
-
-        This method returns True for truly isolated networks (ie: not attached
-        to a router) when enable_isolated_metadata is True, or for all the
-        networks when the force_metadata flags is True.
-
-        This method also returns True when enable_metadata_network is True,
-        and the network passed as a parameter has a subnet in the link-local
-        CIDR, thus characterizing it as a "metadata" network. The metadata
-        network is used by solutions which do not leverage the l3 agent for
-        providing access to the metadata service via logical routers built
-        with 3rd party backends.
-        """
-        if conf.force_metadata:
-            return True
-
-        if conf.enable_metadata_network and conf.enable_isolated_metadata:
-            # check if the network has a metadata subnet
-            meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR)
-            if any(netaddr.IPNetwork(s.cidr) in meta_cidr
-                   for s in network.subnets):
-                return True
-
-        if not conf.enable_isolated_metadata:
-            return False
-
-        isolated_subnets = cls.get_isolated_subnets(network)
-        return any(isolated_subnets[subnet.id] for subnet in network.subnets)
-
-
-class DeviceManager(object):
-
-    def __init__(self, conf, plugin):
-        self.conf = conf
-        self.plugin = plugin
-        self.driver = agent_common_utils.load_interface_driver(conf)
-
-    def get_interface_name(self, network, port):
-        """Return interface(device) name for use by the DHCP process."""
-        return self.driver.get_device_name(port)
-
-    def get_device_id(self, network):
-        """Return a unique DHCP device ID for this host on the network."""
-        # There could be more than one dhcp server per network, so create
-        # a device id that combines host and network ids
-        return common_utils.get_dhcp_agent_device_id(network.id,
-                                                     self.conf.host)
-
-    def _set_default_route(self, network, device_name):
-        """Sets the default gateway for this dhcp namespace.
-
-        This method is idempotent and will only adjust the route if adjusting
-        it would change it from what it already is.  This makes it safe to call
-        and avoids unnecessary perturbation of the system.
-        """
-        device = ip_lib.IPDevice(device_name, namespace=network.namespace)
-        gateway = device.route.get_gateway()
-        if gateway:
-            gateway = gateway.get('gateway')
-
-        for subnet in network.subnets:
-            skip_subnet = (
-                subnet.ip_version != 4
-                or not subnet.enable_dhcp
-                or subnet.gateway_ip is None)
-
-            if skip_subnet:
-                continue
-
-            if gateway != subnet.gateway_ip:
-                LOG.debug('Setting gateway for dhcp netns on net %(n)s to '
-                          '%(ip)s',
-                          {'n': network.id, 'ip': subnet.gateway_ip})
-
-                device.route.add_gateway(subnet.gateway_ip)
-
-            return
-
-        # No subnets on the network have a valid gateway.  Clean it up to avoid
-        # confusion from seeing an invalid gateway here.
-        if gateway is not None:
-            LOG.debug('Removing gateway for dhcp netns on net %s', network.id)
-
-            device.route.delete_gateway(gateway)
-
-    def _setup_existing_dhcp_port(self, network, device_id, dhcp_subnets):
-        """Set up the existing DHCP port, if there is one."""
-
-        # To avoid pylint thinking that port might be undefined after
-        # the following loop...
-        port = None
-
-        # Look for an existing DHCP port for this network.
-        for port in network.ports:
-            port_device_id = getattr(port, 'device_id', None)
-            if port_device_id == device_id:
-                # If using gateway IPs on this port, we can skip the
-                # following code, whose purpose is just to review and
-                # update the Neutron-allocated IP addresses for the
-                # port.
-                if self.driver.use_gateway_ips:
-                    return port
-                # Otherwise break out, as we now have the DHCP port
-                # whose subnets and addresses we need to review.
-                break
-        else:
-            return None
-
-        # Compare what the subnets should be against what is already
-        # on the port.
-        dhcp_enabled_subnet_ids = set(dhcp_subnets)
-        port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips)
-
-        # If those differ, we need to call update.
-        if dhcp_enabled_subnet_ids != port_subnet_ids:
-            # Collect the subnets and fixed IPs that the port already
-            # has, for subnets that are still in the DHCP-enabled set.
-            wanted_fixed_ips = []
-            for fixed_ip in port.fixed_ips:
-                if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
-                    wanted_fixed_ips.append(
-                        {'subnet_id': fixed_ip.subnet_id,
-                         'ip_address': fixed_ip.ip_address})
-
-            # Add subnet IDs for new DHCP-enabled subnets.
-            wanted_fixed_ips.extend(
-                dict(subnet_id=s)
-                for s in dhcp_enabled_subnet_ids - port_subnet_ids)
-
-            # Update the port to have the calculated subnets and fixed
-            # IPs.  The Neutron server will allocate a fresh IP for
-            # each subnet that doesn't already have one.
-            port = self.plugin.update_dhcp_port(
-                port.id,
-                {'port': {'network_id': network.id,
-                          'fixed_ips': wanted_fixed_ips}})
-            if not port:
-                raise exceptions.Conflict()
-
-        return port
-
-    def _setup_reserved_dhcp_port(self, network, device_id, dhcp_subnets):
-        """Setup the reserved DHCP port, if there is one."""
-        LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
-                  ' does not yet exist. Checking for a reserved port.',
-                  {'device_id': device_id, 'network_id': network.id})
-        for port in network.ports:
-            port_device_id = getattr(port, 'device_id', None)
-            if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT:
-                try:
-                    port = self.plugin.update_dhcp_port(
-                        port.id, {'port': {'network_id': network.id,
-                                           'device_id': device_id}})
-                except oslo_messaging.RemoteError as e:
-                    if e.exc_type == exceptions.DhcpPortInUse:
-                        LOG.info(_LI("Skipping DHCP port %s as it is "
-                                     "already in use"), port.id)
-                        continue
-                    raise
-                if port:
-                    return port
-
-    def _setup_new_dhcp_port(self, network, device_id, dhcp_subnets):
-        """Create and set up new DHCP port for the specified network."""
-        LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
-                  ' does not yet exist. Creating new one.',
-                  {'device_id': device_id, 'network_id': network.id})
-
-        # Make a list of the subnets that need a unique IP address for
-        # this DHCP port.
-        if self.driver.use_gateway_ips:
-            unique_ip_subnets = []
-        else:
-            unique_ip_subnets = [dict(subnet_id=s) for s in dhcp_subnets]
-
-        port_dict = dict(
-            name='',
-            admin_state_up=True,
-            device_id=device_id,
-            network_id=network.id,
-            tenant_id=network.tenant_id,
-            fixed_ips=unique_ip_subnets)
-        return self.plugin.create_dhcp_port({'port': port_dict})
-
-    def setup_dhcp_port(self, network):
-        """Create/update DHCP port for the host if needed and return port."""
-
-        # The ID that the DHCP port will have (or already has).
-        device_id = self.get_device_id(network)
-
-        # Get the set of DHCP-enabled subnets on this network.
-        dhcp_subnets = {subnet.id: subnet for subnet in network.subnets
-                        if subnet.enable_dhcp}
-
-        # There are 3 cases: either the DHCP port already exists (but
-        # might need to be updated for a changed set of subnets); or
-        # some other code has already prepared a 'reserved' DHCP port,
-        # and we just need to adopt that; or we need to create a new
-        # DHCP port.  Try each of those in turn until we have a DHCP
-        # port.
-        for setup_method in (self._setup_existing_dhcp_port,
-                             self._setup_reserved_dhcp_port,
-                             self._setup_new_dhcp_port):
-            dhcp_port = setup_method(network, device_id, dhcp_subnets)
-            if dhcp_port:
-                break
-        else:
-            raise exceptions.Conflict()
-
-        # Convert subnet_id to subnet dict
-        fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
-                          ip_address=fixed_ip.ip_address,
-                          subnet=dhcp_subnets[fixed_ip.subnet_id])
-                     for fixed_ip in dhcp_port.fixed_ips]
-
-        ips = [DictModel(item) if isinstance(item, dict) else item
-               for item in fixed_ips]
-        dhcp_port.fixed_ips = ips
-
-        return dhcp_port
-
-    def _update_dhcp_port(self, network, port):
-        for index in range(len(network.ports)):
-            if network.ports[index].id == port.id:
-                network.ports[index] = port
-                break
-        else:
-            network.ports.append(port)
-
-    def _cleanup_stale_devices(self, network, dhcp_port):
-        LOG.debug("Cleaning stale devices for network %s", network.id)
-        dev_name = self.driver.get_device_name(dhcp_port)
-        ns_ip = ip_lib.IPWrapper(namespace=network.namespace)
-        for d in ns_ip.get_devices(exclude_loopback=True):
-            # delete all devices except current active DHCP port device
-            if d.name != dev_name:
-                LOG.debug("Found stale device %s, deleting", d.name)
-                self.driver.unplug(d.name, namespace=network.namespace)
-
-    def setup(self, network):
-        """Create and initialize a device for network's DHCP on this host."""
-        port = self.setup_dhcp_port(network)
-        self._update_dhcp_port(network, port)
-        interface_name = self.get_interface_name(network, port)
-
-        if ip_lib.ensure_device_is_ready(interface_name,
-                                         namespace=network.namespace):
-            LOG.debug('Reusing existing device: %s.', interface_name)
-        else:
-            self.driver.plug(network.id,
-                             port.id,
-                             interface_name,
-                             port.mac_address,
-                             namespace=network.namespace)
-            self.fill_dhcp_udp_checksums(namespace=network.namespace)
-        ip_cidrs = []
-        for fixed_ip in port.fixed_ips:
-            subnet = fixed_ip.subnet
-            if not ipv6_utils.is_auto_address_subnet(subnet):
-                net = netaddr.IPNetwork(subnet.cidr)
-                ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
-                ip_cidrs.append(ip_cidr)
-
-        if self.driver.use_gateway_ips:
-            # For each DHCP-enabled subnet, add that subnet's gateway
-            # IP address to the Linux device for the DHCP port.
-            for subnet in network.subnets:
-                if not subnet.enable_dhcp:
-                    continue
-                gateway = subnet.gateway_ip
-                if gateway:
-                    net = netaddr.IPNetwork(subnet.cidr)
-                    ip_cidrs.append('%s/%s' % (gateway, net.prefixlen))
-
-        if self.conf.enable_isolated_metadata:
-            ip_cidrs.append(METADATA_DEFAULT_CIDR)
-
-        self.driver.init_l3(interface_name, ip_cidrs,
-                            namespace=network.namespace)
-
-        self._set_default_route(network, interface_name)
-        try:
-            self._cleanup_stale_devices(network, port)
-        except Exception:
-            # catch everything as we don't want to fail because of
-            # cleanup step
-            LOG.error(_LE("Exception during stale dhcp device cleanup"))
-
-        return interface_name
-
-    def update(self, network, device_name):
-        """Update device settings for the network's DHCP on this host."""
-        self._set_default_route(network, device_name)
-
-    def destroy(self, network, device_name):
-        """Destroy the device used for the network's DHCP on this host."""
-        if device_name:
-            self.driver.unplug(device_name, namespace=network.namespace)
-        else:
-            LOG.debug('No interface exists for network %s', network.id)
-
-        self.plugin.release_dhcp_port(network.id,
-                                      self.get_device_id(network))
-
-    def fill_dhcp_udp_checksums(self, namespace):
-        """Ensure DHCP reply packets always have correct UDP checksums."""
-        iptables_mgr = iptables_manager.IptablesManager(use_ipv6=False,
-                                                        namespace=namespace)
-        ipv4_rule = ('-p udp --dport %d -j CHECKSUM --checksum-fill'
-                     % constants.DHCP_RESPONSE_PORT)
-        iptables_mgr.ipv4['mangle'].add_rule('POSTROUTING', ipv4_rule)
-        iptables_mgr.apply()
diff --git a/neutron/agent/linux/dibbler.py b/neutron/agent/linux/dibbler.py
deleted file mode 100644 (file)
index 14fc6b6..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright 2015 Cisco Systems
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-import shutil
-
-import jinja2
-from oslo_config import cfg
-from oslo_log import log as logging
-import six
-
-from neutron.agent.linux import external_process
-from neutron.agent.linux import pd
-from neutron.agent.linux import pd_driver
-from neutron.agent.linux import utils
-from neutron.common import constants
-from neutron.common import utils as common_utils
-
-LOG = logging.getLogger(__name__)
-
-PD_SERVICE_NAME = 'dibbler'
-CONFIG_TEMPLATE = jinja2.Template("""
-# Config for dibbler-client.
-
-# Use enterprise number based duid
-duid-type duid-en {{ enterprise_number }} {{ va_id }}
-
-# 8 (Debug) is most verbose. 7 (Info) is usually the best option
-log-level 8
-
-# No automatic downlink address assignment
-downlink-prefix-ifaces "none"
-
-# Use script to notify l3_agent of assigned prefix
-script {{ script_path }}
-
-# Ask for prefix over the external gateway interface
-iface {{ interface_name }} {
-# Bind to generated LLA
-bind-to-address {{ bind_address }}
-# ask for address
-    pd 1
-}
-""")
-
-# The first line must be #!/usr/bin/env bash
-SCRIPT_TEMPLATE = jinja2.Template("""#!/usr/bin/env bash
-
-exec neutron-pd-notify $1 {{ prefix_path }} {{ l3_agent_pid }}
-""")
-
-
-class PDDibbler(pd_driver.PDDriverBase):
-    def __init__(self, router_id, subnet_id, ri_ifname):
-        super(PDDibbler, self).__init__(router_id, subnet_id, ri_ifname)
-        self.requestor_id = "%s:%s:%s" % (self.router_id,
-                                          self.subnet_id,
-                                          self.ri_ifname)
-        self.dibbler_client_working_area = "%s/%s" % (cfg.CONF.pd_confs,
-                                                      self.requestor_id)
-        self.prefix_path = "%s/prefix" % self.dibbler_client_working_area
-        self.pid_path = "%s/client.pid" % self.dibbler_client_working_area
-        self.converted_subnet_id = self.subnet_id.replace('-', '')
-
-    def _is_dibbler_client_running(self):
-        return utils.get_value_from_file(self.pid_path)
-
-    def _generate_dibbler_conf(self, ex_gw_ifname, lla):
-        dcwa = self.dibbler_client_working_area
-        script_path = utils.get_conf_file_name(dcwa, 'notify', 'sh', True)
-        buf = six.StringIO()
-        buf.write('%s' % SCRIPT_TEMPLATE.render(
-                             prefix_path=self.prefix_path,
-                             l3_agent_pid=os.getpid()))
-        common_utils.replace_file(script_path, buf.getvalue())
-        os.chmod(script_path, 0o744)
-
-        dibbler_conf = utils.get_conf_file_name(dcwa, 'client', 'conf', False)
-        buf = six.StringIO()
-        buf.write('%s' % CONFIG_TEMPLATE.render(
-                             enterprise_number=cfg.CONF.vendor_pen,
-                             va_id='0x%s' % self.converted_subnet_id,
-                             script_path='"%s/notify.sh"' % dcwa,
-                             interface_name='"%s"' % ex_gw_ifname,
-                             bind_address='%s' % lla))
-
-        common_utils.replace_file(dibbler_conf, buf.getvalue())
-        return dcwa
-
-    def _spawn_dibbler(self, pmon, router_ns, dibbler_conf):
-        def callback(pid_file):
-            dibbler_cmd = ['dibbler-client',
-                           'start',
-                           '-w', '%s' % dibbler_conf]
-            return dibbler_cmd
-
-        pm = external_process.ProcessManager(
-            uuid=self.requestor_id,
-            default_cmd_callback=callback,
-            namespace=router_ns,
-            service=PD_SERVICE_NAME,
-            conf=cfg.CONF,
-            pid_file=self.pid_path)
-        pm.enable(reload_cfg=False)
-        pmon.register(uuid=self.requestor_id,
-                      service_name=PD_SERVICE_NAME,
-                      monitored_process=pm)
-
-    def enable(self, pmon, router_ns, ex_gw_ifname, lla):
-        LOG.debug("Enable IPv6 PD for router %s subnet %s ri_ifname %s",
-                  self.router_id, self.subnet_id, self.ri_ifname)
-        if not self._is_dibbler_client_running():
-            dibbler_conf = self._generate_dibbler_conf(ex_gw_ifname, lla)
-            self._spawn_dibbler(pmon, router_ns, dibbler_conf)
-            LOG.debug("dibbler client enabled for router %s subnet %s"
-                      " ri_ifname %s",
-                      self.router_id, self.subnet_id, self.ri_ifname)
-
-    def disable(self, pmon, router_ns):
-        LOG.debug("Disable IPv6 PD for router %s subnet %s ri_ifname %s",
-                  self.router_id, self.subnet_id, self.ri_ifname)
-        dcwa = self.dibbler_client_working_area
-
-        def callback(pid_file):
-            dibbler_cmd = ['dibbler-client',
-                           'stop',
-                           '-w', '%s' % dcwa]
-            return dibbler_cmd
-
-        pmon.unregister(uuid=self.requestor_id,
-                        service_name=PD_SERVICE_NAME)
-        pm = external_process.ProcessManager(
-                uuid=self.requestor_id,
-                namespace=router_ns,
-                service=PD_SERVICE_NAME,
-                conf=cfg.CONF,
-                pid_file=self.pid_path)
-        pm.disable(get_stop_command=callback)
-        shutil.rmtree(dcwa, ignore_errors=True)
-        LOG.debug("dibbler client disabled for router %s subnet %s "
-                  "ri_ifname %s",
-                  self.router_id, self.subnet_id, self.ri_ifname)
-
-    def get_prefix(self):
-        prefix = utils.get_value_from_file(self.prefix_path)
-        if not prefix:
-            prefix = constants.PROVISIONAL_IPV6_PD_PREFIX
-        return prefix
-
-    @staticmethod
-    def get_sync_data():
-        try:
-            requestor_ids = os.listdir(cfg.CONF.pd_confs)
-        except OSError:
-            return []
-
-        sync_data = []
-        requestors = (r.split(':') for r in requestor_ids if r.count(':') == 2)
-        for router_id, subnet_id, ri_ifname in requestors:
-            pd_info = pd.PDInfo()
-            pd_info.router_id = router_id
-            pd_info.subnet_id = subnet_id
-            pd_info.ri_ifname = ri_ifname
-            pd_info.driver = PDDibbler(router_id, subnet_id, ri_ifname)
-            pd_info.client_started = (
-                pd_info.driver._is_dibbler_client_running())
-            pd_info.prefix = pd_info.driver.get_prefix()
-            sync_data.append(pd_info)
-
-        return sync_data
diff --git a/neutron/agent/linux/external_process.py b/neutron/agent/linux/external_process.py
deleted file mode 100644 (file)
index fc969e5..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import collections
-import os.path
-import six
-
-import eventlet
-from oslo_concurrency import lockutils
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import fileutils
-
-from neutron._i18n import _, _LW, _LE
-from neutron.agent.common import config as agent_cfg
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import utils as common_utils
-
-LOG = logging.getLogger(__name__)
-
-
-OPTS = [
-    cfg.StrOpt('external_pids',
-               default='$state_path/external/pids',
-               help=_('Location to store child pid files')),
-]
-
-
-cfg.CONF.register_opts(OPTS)
-agent_cfg.register_process_monitor_opts(cfg.CONF)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class MonitoredProcess(object):
-    @abc.abstractproperty
-    def active(self):
-        """Boolean representing the running state of the process."""
-
-    @abc.abstractmethod
-    def enable(self):
-        """Enable the service, or respawn the process."""
-
-
-class ProcessManager(MonitoredProcess):
-    """An external process manager for Neutron spawned processes.
-
-    Note: The manager expects uuid to be in cmdline.
-    """
-    def __init__(self, conf, uuid, namespace=None, service=None,
-                 pids_path=None, default_cmd_callback=None,
-                 cmd_addl_env=None, pid_file=None, run_as_root=False):
-
-        self.conf = conf
-        self.uuid = uuid
-        self.namespace = namespace
-        self.default_cmd_callback = default_cmd_callback
-        self.cmd_addl_env = cmd_addl_env
-        self.pids_path = pids_path or self.conf.external_pids
-        self.pid_file = pid_file
-        self.run_as_root = run_as_root
-
-        if service:
-            self.service_pid_fname = 'pid.' + service
-            self.service = service
-        else:
-            self.service_pid_fname = 'pid'
-            self.service = 'default-service'
-
-        common_utils.ensure_dir(os.path.dirname(self.get_pid_file_name()))
-
-    def enable(self, cmd_callback=None, reload_cfg=False):
-        if not self.active:
-            if not cmd_callback:
-                cmd_callback = self.default_cmd_callback
-            cmd = cmd_callback(self.get_pid_file_name())
-
-            ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
-            ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env,
-                                     run_as_root=self.run_as_root)
-        elif reload_cfg:
-            self.reload_cfg()
-
-    def reload_cfg(self):
-        self.disable('HUP')
-
-    def disable(self, sig='9', get_stop_command=None):
-        pid = self.pid
-
-        if self.active:
-            if get_stop_command:
-                cmd = get_stop_command(self.get_pid_file_name())
-                ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
-                ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env)
-            else:
-                cmd = ['kill', '-%s' % (sig), pid]
-                utils.execute(cmd, run_as_root=True)
-                # In the case of shutting down, remove the pid file
-                if sig == '9':
-                    fileutils.delete_if_exists(self.get_pid_file_name())
-        elif pid:
-            LOG.debug('Process for %(uuid)s pid %(pid)d is stale, ignoring '
-                      'signal %(signal)s', {'uuid': self.uuid, 'pid': pid,
-                                            'signal': sig})
-        else:
-            LOG.debug('No process started for %s', self.uuid)
-
-    def get_pid_file_name(self):
-        """Returns the file name for a given kind of config file."""
-        if self.pid_file:
-            return self.pid_file
-        else:
-            return utils.get_conf_file_name(self.pids_path,
-                                            self.uuid,
-                                            self.service_pid_fname)
-
-    @property
-    def pid(self):
-        """Last known pid for this external process spawned for this uuid."""
-        return utils.get_value_from_file(self.get_pid_file_name(), int)
-
-    @property
-    def active(self):
-        pid = self.pid
-        if pid is None:
-            return False
-
-        cmdline = '/proc/%s/cmdline' % pid
-        try:
-            with open(cmdline, "r") as f:
-                return self.uuid in f.readline()
-        except IOError:
-            return False
-
-
-ServiceId = collections.namedtuple('ServiceId', ['uuid', 'service'])
-
-
-class ProcessMonitor(object):
-
-    def __init__(self, config, resource_type):
-        """Handle multiple process managers and watch over all of them.
-
-        :param config: oslo config object with the agent configuration.
-        :type config: oslo_config.ConfigOpts
-        :param resource_type: can be dhcp, router, load_balancer, etc.
-        :type resource_type: str
-        """
-        self._config = config
-        self._resource_type = resource_type
-
-        self._monitored_processes = {}
-
-        if self._config.AGENT.check_child_processes_interval:
-            self._spawn_checking_thread()
-
-    def register(self, uuid, service_name, monitored_process):
-        """Start monitoring a process.
-
-        The given monitored_process will be tied to it's uuid+service_name
-        replacing the old one if it existed already.
-
-        The monitored_process should be enabled before registration,
-        otherwise ProcessMonitor could try to enable the process itself,
-        which could lead to double enable and if unlucky enough, two processes
-        running, and also errors in the logs.
-
-        :param uuid: An ID of the resource for which the process is running.
-        :param service_name: A logical service name for this process monitor,
-                             so the same uuid provided via process manager
-                             can reference several different services.
-        :param monitored_process: MonitoredProcess we want to monitor.
-        """
-
-        service_id = ServiceId(uuid, service_name)
-        self._monitored_processes[service_id] = monitored_process
-
-    def unregister(self, uuid, service_name):
-        """Stop monitoring a process.
-
-        The uuid+service_name will be removed from the monitored processes.
-
-        The service must be disabled **after** unregistering, otherwise if
-        process monitor checks after you disable the process, and before
-        you unregister it, the process will be respawned, and left orphaned
-        into the system.
-
-        :param uuid: An ID of the resource for which the process is running.
-        :param service_name: A logical service name for this process monitor,
-                             so the same uuid provided via process manager
-                             can reference several different services.
-        """
-
-        service_id = ServiceId(uuid, service_name)
-        self._monitored_processes.pop(service_id, None)
-
-    def stop(self):
-        """Stop the process monitoring.
-
-        This method will stop the monitoring thread, but no monitored
-        process will be stopped.
-        """
-        self._monitor_processes = False
-
-    def _spawn_checking_thread(self):
-        self._monitor_processes = True
-        eventlet.spawn(self._periodic_checking_thread)
-
-    @lockutils.synchronized("_check_child_processes")
-    def _check_child_processes(self):
-        # we build the list of keys before iterating in the loop to cover
-        # the case where other threads add or remove items from the
-        # dictionary which otherwise will cause a RuntimeError
-        for service_id in list(self._monitored_processes):
-            pm = self._monitored_processes.get(service_id)
-
-            if pm and not pm.active:
-                LOG.error(_LE("%(service)s for %(resource_type)s "
-                              "with uuid %(uuid)s not found. "
-                              "The process should not have died"),
-                          {'service': pm.service,
-                           'resource_type': self._resource_type,
-                           'uuid': service_id.uuid})
-                self._execute_action(service_id)
-            eventlet.sleep(0)
-
-    def _periodic_checking_thread(self):
-        while self._monitor_processes:
-            eventlet.sleep(self._config.AGENT.check_child_processes_interval)
-            eventlet.spawn(self._check_child_processes)
-
-    def _execute_action(self, service_id):
-        action = self._config.AGENT.check_child_processes_action
-        action_function = getattr(self, "_%s_action" % action)
-        action_function(service_id)
-
-    def _respawn_action(self, service_id):
-        LOG.warning(_LW("Respawning %(service)s for uuid %(uuid)s"),
-                    {'service': service_id.service,
-                     'uuid': service_id.uuid})
-        self._monitored_processes[service_id].enable()
-
-    def _exit_action(self, service_id):
-        LOG.error(_LE("Exiting agent as programmed in check_child_processes_"
-                      "actions"))
-        self._exit_handler(service_id.uuid, service_id.service)
-
-    def _exit_handler(self, uuid, service):
-        """This is an exit handler for the ProcessMonitor.
-
-        It will be called if the administrator configured the exit action in
-        check_child_processes_actions, and one of our external processes die
-        unexpectedly.
-        """
-        LOG.error(_LE("Exiting agent because of a malfunction with the "
-                      "%(service)s process identified by uuid %(uuid)s"),
-                  {'service': service, 'uuid': uuid})
-        raise SystemExit(1)
diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py
deleted file mode 100644 (file)
index 551074f..0000000
+++ /dev/null
@@ -1,453 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-import six
-
-from neutron._i18n import _, _LE, _LI
-from neutron.agent.common import ovs_lib
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import constants as n_const
-from neutron.common import exceptions
-from neutron.common import ipv6_utils
-
-
-LOG = logging.getLogger(__name__)
-
-OPTS = [
-    cfg.StrOpt('ovs_integration_bridge',
-               default='br-int',
-               help=_('Name of Open vSwitch bridge to use')),
-    cfg.BoolOpt('ovs_use_veth',
-                default=False,
-                help=_('Uses veth for an OVS interface or not. '
-                       'Support kernels with limited namespace support '
-                       '(e.g. RHEL 6.5) so long as ovs_use_veth is set to '
-                       'True.')),
-    cfg.IntOpt('network_device_mtu',
-               help=_('MTU setting for device.')),
-]
-
-
-@six.add_metaclass(abc.ABCMeta)
-class LinuxInterfaceDriver(object):
-
-    # from linux IF_NAMESIZE
-    DEV_NAME_LEN = 14
-    DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
-
-    def __init__(self, conf):
-        self.conf = conf
-        if self.conf.network_device_mtu:
-            self._validate_network_device_mtu()
-
-    def _validate_network_device_mtu(self):
-        if (ipv6_utils.is_enabled() and
-            self.conf.network_device_mtu < n_const.IPV6_MIN_MTU):
-            LOG.error(_LE("IPv6 protocol requires a minimum MTU of "
-                          "%(min_mtu)s, while the configured value is "
-                          "%(current_mtu)s"), {'min_mtu': n_const.IPV6_MIN_MTU,
-                          'current_mtu': self.conf.network_device_mtu})
-            raise SystemExit(1)
-
-    @property
-    def use_gateway_ips(self):
-        """Whether to use gateway IPs instead of unique IP allocations.
-
-        In each place where the DHCP agent runs, and for each subnet for
-        which DHCP is handling out IP addresses, the DHCP port needs -
-        at the Linux level - to have an IP address within that subnet.
-        Generally this needs to be a unique Neutron-allocated IP
-        address, because the subnet's underlying L2 domain is bridged
-        across multiple compute hosts and network nodes, and for HA
-        there may be multiple DHCP agents running on that same bridged
-        L2 domain.
-
-        However, if the DHCP ports - on multiple compute/network nodes
-        but for the same network - are _not_ bridged to each other,
-        they do not need each to have a unique IP address.  Instead
-        they can all share the same address from the relevant subnet.
-        This works, without creating any ambiguity, because those
-        ports are not all present on the same L2 domain, and because
-        no data within the network is ever sent to that address.
-        (DHCP requests are broadcast, and it is the network's job to
-        ensure that such a broadcast will reach at least one of the
-        available DHCP servers.  DHCP responses will be sent _from_
-        the DHCP port address.)
-
-        Specifically, for networking backends where it makes sense,
-        the DHCP agent allows all DHCP ports to use the subnet's
-        gateway IP address, and thereby to completely avoid any unique
-        IP address allocation.  This behaviour is selected by running
-        the DHCP agent with a configured interface driver whose
-        'use_gateway_ips' property is True.
-
-        When an operator deploys Neutron with an interface driver that
-        makes use_gateway_ips True, they should also ensure that a
-        gateway IP address is defined for each DHCP-enabled subnet,
-        and that the gateway IP address doesn't change during the
-        subnet's lifetime.
-        """
-        return False
-
-    def init_l3(self, device_name, ip_cidrs, namespace=None,
-                preserve_ips=None, clean_connections=False):
-        """Set the L3 settings for the interface using data from the port.
-
-        ip_cidrs: list of 'X.X.X.X/YY' strings
-        preserve_ips: list of ip cidrs that should not be removed from device
-        clean_connections: Boolean to indicate if we should cleanup connections
-          associated to removed ips
-        """
-        preserve_ips = preserve_ips or []
-        device = ip_lib.IPDevice(device_name, namespace=namespace)
-
-        # The LLA generated by the operating system is not known to
-        # Neutron, so it would be deleted if we added it to the 'previous'
-        # list here
-        default_ipv6_lla = ip_lib.get_ipv6_lladdr(device.link.address)
-        previous = {addr['cidr'] for addr in device.addr.list(
-            filters=['permanent'])} - {default_ipv6_lla}
-
-        # add new addresses
-        for ip_cidr in ip_cidrs:
-
-            net = netaddr.IPNetwork(ip_cidr)
-            # Convert to compact IPv6 address because the return values of
-            # "ip addr list" are compact.
-            if net.version == 6:
-                ip_cidr = str(net)
-            if ip_cidr in previous:
-                previous.remove(ip_cidr)
-                continue
-
-            device.addr.add(ip_cidr)
-
-        # clean up any old addresses
-        for ip_cidr in previous:
-            if ip_cidr not in preserve_ips:
-                if clean_connections:
-                    device.delete_addr_and_conntrack_state(ip_cidr)
-                else:
-                    device.addr.delete(ip_cidr)
-
-    def init_router_port(self,
-                         device_name,
-                         ip_cidrs,
-                         namespace,
-                         preserve_ips=None,
-                         extra_subnets=None,
-                         clean_connections=False):
-        """Set the L3 settings for a router interface using data from the port.
-
-        ip_cidrs: list of 'X.X.X.X/YY' strings
-        preserve_ips: list of ip cidrs that should not be removed from device
-        clean_connections: Boolean to indicate if we should cleanup connections
-          associated to removed ips
-        extra_subnets: An iterable of cidrs to add as routes without address
-        """
-        LOG.debug("init_router_port: device_name(%s), namespace(%s)",
-                  device_name, namespace)
-        self.init_l3(device_name=device_name,
-                     ip_cidrs=ip_cidrs,
-                     namespace=namespace,
-                     preserve_ips=preserve_ips or [],
-                     clean_connections=clean_connections)
-
-        device = ip_lib.IPDevice(device_name, namespace=namespace)
-
-        # Manage on-link routes (routes without an associated address)
-        new_onlink_cidrs = set(s['cidr'] for s in extra_subnets or [])
-
-        v4_onlink = device.route.list_onlink_routes(n_const.IP_VERSION_4)
-        v6_onlink = device.route.list_onlink_routes(n_const.IP_VERSION_6)
-        existing_onlink_cidrs = set(r['cidr'] for r in v4_onlink + v6_onlink)
-
-        for route in new_onlink_cidrs - existing_onlink_cidrs:
-            LOG.debug("adding onlink route(%s)", route)
-            device.route.add_onlink_route(route)
-        for route in existing_onlink_cidrs - new_onlink_cidrs:
-            LOG.debug("deleting onlink route(%s)", route)
-            device.route.delete_onlink_route(route)
-
-    def add_ipv6_addr(self, device_name, v6addr, namespace, scope='global'):
-        device = ip_lib.IPDevice(device_name,
-                                 namespace=namespace)
-        net = netaddr.IPNetwork(v6addr)
-        device.addr.add(str(net), scope)
-
-    def delete_ipv6_addr(self, device_name, v6addr, namespace):
-        device = ip_lib.IPDevice(device_name,
-                                 namespace=namespace)
-        device.delete_addr_and_conntrack_state(v6addr)
-
-    def delete_ipv6_addr_with_prefix(self, device_name, prefix, namespace):
-        """Delete the first listed IPv6 address that falls within a given
-        prefix.
-        """
-        device = ip_lib.IPDevice(device_name, namespace=namespace)
-        net = netaddr.IPNetwork(prefix)
-        for address in device.addr.list(scope='global', filters=['permanent']):
-            ip_address = netaddr.IPNetwork(address['cidr'])
-            if ip_address in net:
-                device.delete_addr_and_conntrack_state(address['cidr'])
-                break
-
-    def get_ipv6_llas(self, device_name, namespace):
-        device = ip_lib.IPDevice(device_name,
-                                 namespace=namespace)
-
-        return device.addr.list(scope='link', ip_version=6)
-
-    def check_bridge_exists(self, bridge):
-        if not ip_lib.device_exists(bridge):
-            raise exceptions.BridgeDoesNotExist(bridge=bridge)
-
-    def get_device_name(self, port):
-        return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN]
-
-    @staticmethod
-    def configure_ipv6_ra(namespace, dev_name):
-        """Configure acceptance of IPv6 route advertisements on an intf."""
-        # Learn the default router's IP address via RAs
-        ip_lib.IPWrapper(namespace=namespace).netns.execute(
-            ['sysctl', '-w', 'net.ipv6.conf.%s.accept_ra=2' % dev_name])
-
-    @abc.abstractmethod
-    def plug_new(self, network_id, port_id, device_name, mac_address,
-                 bridge=None, namespace=None, prefix=None):
-        """Plug in the interface only for new devices that don't exist yet."""
-
-    def plug(self, network_id, port_id, device_name, mac_address,
-             bridge=None, namespace=None, prefix=None):
-        if not ip_lib.device_exists(device_name,
-                                    namespace=namespace):
-            self.plug_new(network_id, port_id, device_name, mac_address,
-                          bridge, namespace, prefix)
-        else:
-            LOG.info(_LI("Device %s already exists"), device_name)
-
-    @abc.abstractmethod
-    def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
-        """Unplug the interface."""
-
-    @property
-    def bridged(self):
-        """Whether the DHCP port is bridged to the VM TAP interfaces.
-
-        When the DHCP port is bridged to the TAP interfaces for the
-        VMs for which it is providing DHCP service - as is the case
-        for most Neutron network implementations - the DHCP server
-        only needs to listen on the DHCP port, and will still receive
-        DHCP requests from all the relevant VMs.
-
-        If the DHCP port is not bridged to the relevant VM TAP
-        interfaces, the DHCP server needs to listen explicitly on
-        those TAP interfaces, and to treat those as aliases of the
-        DHCP port where the IP subnet is defined.
-        """
-        return True
-
-
-class NullDriver(LinuxInterfaceDriver):
-    def plug_new(self, network_id, port_id, device_name, mac_address,
-                 bridge=None, namespace=None, prefix=None):
-        pass
-
-    def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
-        pass
-
-
-class OVSInterfaceDriver(LinuxInterfaceDriver):
-    """Driver for creating an internal interface on an OVS bridge."""
-
-    DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
-
-    def __init__(self, conf):
-        super(OVSInterfaceDriver, self).__init__(conf)
-        if self.conf.ovs_use_veth:
-            self.DEV_NAME_PREFIX = 'ns-'
-
-    def _get_tap_name(self, dev_name, prefix=None):
-        if self.conf.ovs_use_veth:
-            dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX,
-                                        n_const.TAP_DEVICE_PREFIX)
-        return dev_name
-
-    def _ovs_add_port(self, bridge, device_name, port_id, mac_address,
-                      internal=True):
-        attrs = [('external_ids', {'iface-id': port_id,
-                                   'iface-status': 'active',
-                                   'attached-mac': mac_address})]
-        if internal:
-            attrs.insert(0, ('type', 'internal'))
-
-        ovs = ovs_lib.OVSBridge(bridge)
-        ovs.replace_port(device_name, *attrs)
-
-    def plug_new(self, network_id, port_id, device_name, mac_address,
-                 bridge=None, namespace=None, prefix=None):
-        """Plug in the interface."""
-        if not bridge:
-            bridge = self.conf.ovs_integration_bridge
-
-        self.check_bridge_exists(bridge)
-
-        ip = ip_lib.IPWrapper()
-        tap_name = self._get_tap_name(device_name, prefix)
-
-        if self.conf.ovs_use_veth:
-            # Create ns_dev in a namespace if one is configured.
-            root_dev, ns_dev = ip.add_veth(tap_name,
-                                           device_name,
-                                           namespace2=namespace)
-        else:
-            ns_dev = ip.device(device_name)
-
-        internal = not self.conf.ovs_use_veth
-        self._ovs_add_port(bridge, tap_name, port_id, mac_address,
-                           internal=internal)
-
-        ns_dev.link.set_address(mac_address)
-
-        if self.conf.network_device_mtu:
-            ns_dev.link.set_mtu(self.conf.network_device_mtu)
-            if self.conf.ovs_use_veth:
-                root_dev.link.set_mtu(self.conf.network_device_mtu)
-
-        # Add an interface created by ovs to the namespace.
-        if not self.conf.ovs_use_veth and namespace:
-            namespace_obj = ip.ensure_namespace(namespace)
-            namespace_obj.add_device_to_namespace(ns_dev)
-
-        ns_dev.link.set_up()
-        if self.conf.ovs_use_veth:
-            root_dev.link.set_up()
-
-    def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
-        """Unplug the interface."""
-        if not bridge:
-            bridge = self.conf.ovs_integration_bridge
-
-        tap_name = self._get_tap_name(device_name, prefix)
-        self.check_bridge_exists(bridge)
-        ovs = ovs_lib.OVSBridge(bridge)
-
-        try:
-            ovs.delete_port(tap_name)
-            if self.conf.ovs_use_veth:
-                device = ip_lib.IPDevice(device_name, namespace=namespace)
-                device.link.delete()
-                LOG.debug("Unplugged interface '%s'", device_name)
-        except RuntimeError:
-            LOG.error(_LE("Failed unplugging interface '%s'"),
-                      device_name)
-
-
-class IVSInterfaceDriver(LinuxInterfaceDriver):
-    """Driver for creating an internal interface on an IVS bridge."""
-
-    DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
-
-    def __init__(self, conf):
-        super(IVSInterfaceDriver, self).__init__(conf)
-        self.DEV_NAME_PREFIX = 'ns-'
-
-    def _get_tap_name(self, dev_name, prefix=None):
-        dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX,
-                                    n_const.TAP_DEVICE_PREFIX)
-        return dev_name
-
-    def _ivs_add_port(self, device_name, port_id, mac_address):
-        cmd = ['ivs-ctl', 'add-port', device_name]
-        utils.execute(cmd, run_as_root=True)
-
-    def plug_new(self, network_id, port_id, device_name, mac_address,
-                 bridge=None, namespace=None, prefix=None):
-        """Plug in the interface."""
-        ip = ip_lib.IPWrapper()
-        tap_name = self._get_tap_name(device_name, prefix)
-
-        root_dev, ns_dev = ip.add_veth(tap_name, device_name)
-
-        self._ivs_add_port(tap_name, port_id, mac_address)
-
-        ns_dev = ip.device(device_name)
-        ns_dev.link.set_address(mac_address)
-
-        if self.conf.network_device_mtu:
-            ns_dev.link.set_mtu(self.conf.network_device_mtu)
-            root_dev.link.set_mtu(self.conf.network_device_mtu)
-
-        if namespace:
-            namespace_obj = ip.ensure_namespace(namespace)
-            namespace_obj.add_device_to_namespace(ns_dev)
-
-        ns_dev.link.set_up()
-        root_dev.link.set_up()
-
-    def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
-        """Unplug the interface."""
-        tap_name = self._get_tap_name(device_name, prefix)
-        try:
-            cmd = ['ivs-ctl', 'del-port', tap_name]
-            utils.execute(cmd, run_as_root=True)
-            device = ip_lib.IPDevice(device_name, namespace=namespace)
-            device.link.delete()
-            LOG.debug("Unplugged interface '%s'", device_name)
-        except RuntimeError:
-            LOG.error(_LE("Failed unplugging interface '%s'"),
-                      device_name)
-
-
-class BridgeInterfaceDriver(LinuxInterfaceDriver):
-    """Driver for creating bridge interfaces."""
-
-    DEV_NAME_PREFIX = 'ns-'
-
-    def plug_new(self, network_id, port_id, device_name, mac_address,
-                 bridge=None, namespace=None, prefix=None):
-        """Plugin the interface."""
-        ip = ip_lib.IPWrapper()
-
-        # Enable agent to define the prefix
-        tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX,
-                                       n_const.TAP_DEVICE_PREFIX)
-        # Create ns_veth in a namespace if one is configured.
-        root_veth, ns_veth = ip.add_veth(tap_name, device_name,
-                                         namespace2=namespace)
-        ns_veth.link.set_address(mac_address)
-
-        if self.conf.network_device_mtu:
-            root_veth.link.set_mtu(self.conf.network_device_mtu)
-            ns_veth.link.set_mtu(self.conf.network_device_mtu)
-
-        root_veth.link.set_up()
-        ns_veth.link.set_up()
-
-    def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
-        """Unplug the interface."""
-        device = ip_lib.IPDevice(device_name, namespace=namespace)
-        try:
-            device.link.delete()
-            LOG.debug("Unplugged interface '%s'", device_name)
-        except RuntimeError:
-            LOG.error(_LE("Failed unplugging interface '%s'"),
-                      device_name)
diff --git a/neutron/agent/linux/ip_conntrack.py b/neutron/agent/linux/ip_conntrack.py
deleted file mode 100644 (file)
index 4bbd835..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-import netaddr
-from oslo_log import log as logging
-
-from neutron._i18n import _LE
-from neutron.agent.linux import utils as linux_utils
-
-LOG = logging.getLogger(__name__)
-
-
-class IpConntrackManager(object):
-    """Smart wrapper for ip conntrack."""
-
-    def __init__(self, zone_lookup_func, execute=None, namespace=None):
-        self.get_device_zone = zone_lookup_func
-        self.execute = execute or linux_utils.execute
-        self.namespace = namespace
-
-    @staticmethod
-    def _generate_conntrack_cmd_by_rule(rule, namespace):
-        ethertype = rule.get('ethertype')
-        protocol = rule.get('protocol')
-        direction = rule.get('direction')
-        cmd = ['conntrack', '-D']
-        if protocol:
-            cmd.extend(['-p', str(protocol)])
-        cmd.extend(['-f', str(ethertype).lower()])
-        cmd.append('-d' if direction == 'ingress' else '-s')
-        cmd_ns = []
-        if namespace:
-            cmd_ns.extend(['ip', 'netns', 'exec', namespace])
-        cmd_ns.extend(cmd)
-        return cmd_ns
-
-    def _get_conntrack_cmds(self, device_info_list, rule, remote_ip=None):
-        conntrack_cmds = []
-        cmd = self._generate_conntrack_cmd_by_rule(rule, self.namespace)
-        ethertype = rule.get('ethertype')
-        for device_info in device_info_list:
-            zone_id = self.get_device_zone(device_info['device'])
-            ips = device_info.get('fixed_ips', [])
-            for ip in ips:
-                net = netaddr.IPNetwork(ip)
-                if str(net.version) not in ethertype:
-                    continue
-                ip_cmd = [str(net.ip), '-w', zone_id]
-                if remote_ip and str(
-                        netaddr.IPNetwork(remote_ip).version) in ethertype:
-                    ip_cmd.extend(['-s', str(remote_ip)])
-                conntrack_cmds.append(cmd + ip_cmd)
-        return conntrack_cmds
-
-    def _delete_conntrack_state(self, device_info_list, rule, remote_ip=None):
-        conntrack_cmds = self._get_conntrack_cmds(device_info_list,
-                                                  rule, remote_ip)
-        for cmd in conntrack_cmds:
-            try:
-                self.execute(cmd, run_as_root=True,
-                             check_exit_code=True,
-                             extra_ok_codes=[1])
-            except RuntimeError:
-                LOG.exception(
-                    _LE("Failed execute conntrack command %s"), str(cmd))
-
-    def delete_conntrack_state_by_rule(self, device_info_list, rule):
-        self._delete_conntrack_state(device_info_list, rule)
-
-    def delete_conntrack_state_by_remote_ips(self, device_info_list,
-                                             ethertype, remote_ips):
-        rule = {'ethertype': str(ethertype).lower(), 'direction': 'ingress'}
-        if remote_ips:
-            for remote_ip in remote_ips:
-                self._delete_conntrack_state(
-                    device_info_list, rule, remote_ip)
-        else:
-            self._delete_conntrack_state(device_info_list, rule)
diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py
deleted file mode 100644 (file)
index 336c3f4..0000000
+++ /dev/null
@@ -1,1031 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import debtcollector
-import eventlet
-import netaddr
-import os
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import excutils
-import re
-import six
-
-from neutron._i18n import _, _LE
-from neutron.agent.common import utils
-from neutron.common import constants
-from neutron.common import exceptions
-
-LOG = logging.getLogger(__name__)
-
-OPTS = [
-    cfg.BoolOpt('ip_lib_force_root',
-                default=False,
-                help=_('Force ip_lib calls to use the root helper')),
-]
-
-
-LOOPBACK_DEVNAME = 'lo'
-
-IP_NETNS_PATH = '/var/run/netns'
-SYS_NET_PATH = '/sys/class/net'
-DEFAULT_GW_PATTERN = re.compile(r"via (\S+)")
-METRIC_PATTERN = re.compile(r"metric (\S+)")
-DEVICE_NAME_PATTERN = re.compile(r"(\d+?): (\S+?):.*")
-
-
-def remove_interface_suffix(interface):
-    """Remove a possible "<if>@<endpoint>" suffix from an interface' name.
-
-    This suffix can appear in some kernel versions, and intends on specifying,
-    for example, a veth's pair. However, this interface name is useless to us
-    as further 'ip' commands require that the suffix be removed.
-    """
-    # If '@' is not present, this will do nothing.
-    return interface.partition("@")[0]
-
-
-class AddressNotReady(exceptions.NeutronException):
-    message = _("Failure waiting for address %(address)s to "
-                "become ready: %(reason)s")
-
-
-class SubProcessBase(object):
-    def __init__(self, namespace=None,
-                 log_fail_as_error=True):
-        self.namespace = namespace
-        self.log_fail_as_error = log_fail_as_error
-        try:
-            self.force_root = cfg.CONF.ip_lib_force_root
-        except cfg.NoSuchOptError:
-            # Only callers that need to force use of the root helper
-            # need to register the option.
-            self.force_root = False
-
-    def _run(self, options, command, args):
-        if self.namespace:
-            return self._as_root(options, command, args)
-        elif self.force_root:
-            # Force use of the root helper to ensure that commands
-            # will execute in dom0 when running under XenServer/XCP.
-            return self._execute(options, command, args, run_as_root=True,
-                                 log_fail_as_error=self.log_fail_as_error)
-        else:
-            return self._execute(options, command, args,
-                                 log_fail_as_error=self.log_fail_as_error)
-
-    def _as_root(self, options, command, args, use_root_namespace=False):
-        namespace = self.namespace if not use_root_namespace else None
-
-        return self._execute(options, command, args, run_as_root=True,
-                             namespace=namespace,
-                             log_fail_as_error=self.log_fail_as_error)
-
-    @classmethod
-    def _execute(cls, options, command, args, run_as_root=False,
-                 namespace=None, log_fail_as_error=True):
-        opt_list = ['-%s' % o for o in options]
-        ip_cmd = add_namespace_to_cmd(['ip'], namespace)
-        cmd = ip_cmd + opt_list + [command] + list(args)
-        return utils.execute(cmd, run_as_root=run_as_root,
-                             log_fail_as_error=log_fail_as_error)
-
-    def set_log_fail_as_error(self, fail_with_error):
-        self.log_fail_as_error = fail_with_error
-
-    def get_log_fail_as_error(self):
-        return self.log_fail_as_error
-
-
-class IPWrapper(SubProcessBase):
-    def __init__(self, namespace=None):
-        super(IPWrapper, self).__init__(namespace=namespace)
-        self.netns = IpNetnsCommand(self)
-
-    def device(self, name):
-        return IPDevice(name, namespace=self.namespace)
-
-    def get_devices(self, exclude_loopback=False):
-        retval = []
-        if self.namespace:
-            # we call out manually because in order to avoid screen scraping
-            # iproute2 we use find to see what is in the sysfs directory, as
-            # suggested by Stephen Hemminger (iproute2 dev).
-            output = utils.execute(['ip', 'netns', 'exec', self.namespace,
-                                    'find', SYS_NET_PATH, '-maxdepth', '1',
-                                    '-type', 'l', '-printf', '%f '],
-                                   run_as_root=True,
-                                   log_fail_as_error=self.log_fail_as_error
-                                   ).split()
-        else:
-            output = (
-                i for i in os.listdir(SYS_NET_PATH)
-                if os.path.islink(os.path.join(SYS_NET_PATH, i))
-            )
-
-        for name in output:
-            if exclude_loopback and name == LOOPBACK_DEVNAME:
-                continue
-            retval.append(IPDevice(name, namespace=self.namespace))
-
-        return retval
-
-    def get_device_by_ip(self, ip):
-        """Get the IPDevice from system which has ip configured.
-
-        @param ip: look for the device holding this ip. If this is None,
-                   None is returned.
-        @type ip: str.
-        """
-        if not ip:
-            return None
-
-        addr = IpAddrCommand(self)
-        devices = addr.get_devices_with_ip(to=ip)
-        if devices:
-            return IPDevice(devices[0]['name'], namespace=self.namespace)
-
-    def add_tuntap(self, name, mode='tap'):
-        self._as_root([], 'tuntap', ('add', name, 'mode', mode))
-        return IPDevice(name, namespace=self.namespace)
-
-    def add_veth(self, name1, name2, namespace2=None):
-        args = ['add', name1, 'type', 'veth', 'peer', 'name', name2]
-
-        if namespace2 is None:
-            namespace2 = self.namespace
-        else:
-            self.ensure_namespace(namespace2)
-            args += ['netns', namespace2]
-
-        self._as_root([], 'link', tuple(args))
-
-        return (IPDevice(name1, namespace=self.namespace),
-                IPDevice(name2, namespace=namespace2))
-
-    def del_veth(self, name):
-        """Delete a virtual interface between two namespaces."""
-        self._as_root([], 'link', ('del', name))
-
-    def add_dummy(self, name):
-        """Create a Linux dummy interface with the given name."""
-        self._as_root([], 'link', ('add', name, 'type', 'dummy'))
-        return IPDevice(name, namespace=self.namespace)
-
-    def ensure_namespace(self, name):
-        if not self.netns.exists(name):
-            ip = self.netns.add(name)
-            lo = ip.device(LOOPBACK_DEVNAME)
-            lo.link.set_up()
-        else:
-            ip = IPWrapper(namespace=name)
-        return ip
-
-    def namespace_is_empty(self):
-        return not self.get_devices(exclude_loopback=True)
-
-    def garbage_collect_namespace(self):
-        """Conditionally destroy the namespace if it is empty."""
-        if self.namespace and self.netns.exists(self.namespace):
-            if self.namespace_is_empty():
-                self.netns.delete(self.namespace)
-                return True
-        return False
-
-    def add_device_to_namespace(self, device):
-        if self.namespace:
-            device.link.set_netns(self.namespace)
-
-    def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None,
-                  local=None, port=None, proxy=False):
-        cmd = ['add', name, 'type', 'vxlan', 'id', vni]
-        if group:
-                cmd.extend(['group', group])
-        if dev:
-                cmd.extend(['dev', dev])
-        if ttl:
-                cmd.extend(['ttl', ttl])
-        if tos:
-                cmd.extend(['tos', tos])
-        if local:
-                cmd.extend(['local', local])
-        if proxy:
-                cmd.append('proxy')
-        # tuple: min,max
-        if port and len(port) == 2:
-                cmd.extend(['port', port[0], port[1]])
-        elif port:
-            raise exceptions.NetworkVxlanPortRangeError(vxlan_range=port)
-        self._as_root([], 'link', cmd)
-        return (IPDevice(name, namespace=self.namespace))
-
-    @classmethod
-    def get_namespaces(cls):
-        if not cfg.CONF.AGENT.use_helper_for_ns_read:
-            return os.listdir(IP_NETNS_PATH)
-
-        output = cls._execute([], 'netns', ['list'], run_as_root=True)
-        return [l.split()[0] for l in output.splitlines()]
-
-
-class IPDevice(SubProcessBase):
-    def __init__(self, name, namespace=None):
-        super(IPDevice, self).__init__(namespace=namespace)
-        self.name = name
-        self.link = IpLinkCommand(self)
-        self.addr = IpAddrCommand(self)
-        self.route = IpRouteCommand(self)
-        self.neigh = IpNeighCommand(self)
-
-    def __eq__(self, other):
-        return (other is not None and self.name == other.name
-                and self.namespace == other.namespace)
-
-    def __str__(self):
-        return self.name
-
-    def exists(self):
-        """Return True if the device exists in the namespace."""
-        # we must save and restore this before returning
-        orig_log_fail_as_error = self.get_log_fail_as_error()
-        self.set_log_fail_as_error(False)
-        try:
-            return bool(self.link.address)
-        except RuntimeError:
-            return False
-        finally:
-            self.set_log_fail_as_error(orig_log_fail_as_error)
-
-    def delete_addr_and_conntrack_state(self, cidr):
-        """Delete an address along with its conntrack state
-
-        This terminates any active connections through an IP.
-
-        :param cidr: the IP address for which state should be removed.
-            This can be passed as a string with or without /NN.
-            A netaddr.IPAddress or netaddr.Network representing the IP address
-            can also be passed.
-        """
-        self.addr.delete(cidr)
-
-        ip_str = str(netaddr.IPNetwork(cidr).ip)
-        ip_wrapper = IPWrapper(namespace=self.namespace)
-
-        # Delete conntrack state for ingress traffic
-        # If 0 flow entries have been deleted
-        # conntrack -D will return 1
-        try:
-            ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str],
-                                     check_exit_code=True,
-                                     extra_ok_codes=[1])
-
-        except RuntimeError:
-            LOG.exception(_LE("Failed deleting ingress connection state of"
-                              " floatingip %s"), ip_str)
-
-        # Delete conntrack state for egress traffic
-        try:
-            ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str],
-                                     check_exit_code=True,
-                                     extra_ok_codes=[1])
-        except RuntimeError:
-            LOG.exception(_LE("Failed deleting egress connection state of"
-                              " floatingip %s"), ip_str)
-
-
-class IpCommandBase(object):
-    COMMAND = ''
-
-    def __init__(self, parent):
-        self._parent = parent
-
-    def _run(self, options, args):
-        return self._parent._run(options, self.COMMAND, args)
-
-    def _as_root(self, options, args, use_root_namespace=False):
-        return self._parent._as_root(options,
-                                     self.COMMAND,
-                                     args,
-                                     use_root_namespace=use_root_namespace)
-
-
-class IPRule(SubProcessBase):
-    def __init__(self, namespace=None):
-        super(IPRule, self).__init__(namespace=namespace)
-        self.rule = IpRuleCommand(self)
-
-
-class IpRuleCommand(IpCommandBase):
-    COMMAND = 'rule'
-
-    @staticmethod
-    def _make_canonical(ip_version, settings):
-        """Converts settings to a canonical representation to compare easily"""
-        def canonicalize_fwmark_string(fwmark_mask):
-            """Reformats fwmark/mask in to a canonical form
-
-            Examples, these are all equivalent:
-                "0x1"
-                0x1
-                "0x1/0xfffffffff"
-                (0x1, 0xfffffffff)
-
-            :param fwmark_mask: The firewall and mask (default 0xffffffff)
-            :type fwmark_mask: A string with / as delimiter, an iterable, or a
-                single value.
-            """
-            # Turn the value we were passed in to an iterable: fwmark[, mask]
-            if isinstance(fwmark_mask, six.string_types):
-                # A / separates the optional mask in a string
-                iterable = fwmark_mask.split('/')
-            else:
-                try:
-                    iterable = iter(fwmark_mask)
-                except TypeError:
-                    # At this point, it must be a single integer
-                    iterable = [fwmark_mask]
-
-            def to_i(s):
-                if isinstance(s, six.string_types):
-                    # Passing 0 as "base" arg to "int" causes it to determine
-                    # the base automatically.
-                    return int(s, 0)
-                # s isn't a string, can't specify base argument
-                return int(s)
-
-            integers = [to_i(x) for x in iterable]
-
-            # The default mask is all ones, the mask is 32 bits.
-            if len(integers) == 1:
-                integers.append(0xffffffff)
-
-            # We now have two integers in a list.  Convert to canonical string.
-            return '{0:#x}/{1:#x}'.format(*integers)
-
-        def canonicalize(item):
-            k, v = item
-            # ip rule shows these as 'any'
-            if k == 'from' and v == 'all':
-                return k, constants.IP_ANY[ip_version]
-            # lookup and table are interchangeable.  Use table every time.
-            if k == 'lookup':
-                return 'table', v
-            if k == 'fwmark':
-                return k, canonicalize_fwmark_string(v)
-            return k, v
-
-        if 'type' not in settings:
-            settings['type'] = 'unicast'
-
-        return {k: str(v) for k, v in map(canonicalize, settings.items())}
-
-    def _parse_line(self, ip_version, line):
-        # Typical rules from 'ip rule show':
-        # 4030201:  from 1.2.3.4/24 lookup 10203040
-        # 1024:     from all iif qg-c43b1928-48 lookup noscope
-
-        parts = line.split()
-        if not parts:
-            return {}
-
-        # Format of line is: "priority: <key> <value> ... [<type>]"
-        settings = {k: v for k, v in zip(parts[1::2], parts[2::2])}
-        settings['priority'] = parts[0][:-1]
-        if len(parts) % 2 == 0:
-            # When line has an even number of columns, last one is the type.
-            settings['type'] = parts[-1]
-
-        return self._make_canonical(ip_version, settings)
-
-    def list_rules(self, ip_version):
-        lines = self._as_root([ip_version], ['show']).splitlines()
-        return [self._parse_line(ip_version, line) for line in lines]
-
-    def _exists(self, ip_version, **kwargs):
-        return kwargs in self.list_rules(ip_version)
-
-    def _make__flat_args_tuple(self, *args, **kwargs):
-        for kwargs_item in sorted(kwargs.items(), key=lambda i: i[0]):
-            args += kwargs_item
-        return tuple(args)
-
-    def add(self, ip, **kwargs):
-        ip_version = get_ip_version(ip)
-
-        kwargs.update({'from': ip})
-        canonical_kwargs = self._make_canonical(ip_version, kwargs)
-
-        if not self._exists(ip_version, **canonical_kwargs):
-            args_tuple = self._make__flat_args_tuple('add', **canonical_kwargs)
-            self._as_root([ip_version], args_tuple)
-
-    def delete(self, ip, **kwargs):
-        ip_version = get_ip_version(ip)
-
-        # TODO(Carl) ip ignored in delete, okay in general?
-
-        canonical_kwargs = self._make_canonical(ip_version, kwargs)
-
-        args_tuple = self._make__flat_args_tuple('del', **canonical_kwargs)
-        self._as_root([ip_version], args_tuple)
-
-
-class IpDeviceCommandBase(IpCommandBase):
-    @property
-    def name(self):
-        return self._parent.name
-
-
-class IpLinkCommand(IpDeviceCommandBase):
-    COMMAND = 'link'
-
-    def set_address(self, mac_address):
-        self._as_root([], ('set', self.name, 'address', mac_address))
-
-    def set_mtu(self, mtu_size):
-        self._as_root([], ('set', self.name, 'mtu', mtu_size))
-
-    def set_up(self):
-        return self._as_root([], ('set', self.name, 'up'))
-
-    def set_down(self):
-        return self._as_root([], ('set', self.name, 'down'))
-
-    def set_netns(self, namespace):
-        self._as_root([], ('set', self.name, 'netns', namespace))
-        self._parent.namespace = namespace
-
-    def set_name(self, name):
-        self._as_root([], ('set', self.name, 'name', name))
-        self._parent.name = name
-
-    def set_alias(self, alias_name):
-        self._as_root([], ('set', self.name, 'alias', alias_name))
-
-    def delete(self):
-        self._as_root([], ('delete', self.name))
-
-    @property
-    def address(self):
-        return self.attributes.get('link/ether')
-
-    @property
-    def state(self):
-        return self.attributes.get('state')
-
-    @property
-    def mtu(self):
-        return self.attributes.get('mtu')
-
-    @property
-    def qdisc(self):
-        return self.attributes.get('qdisc')
-
-    @property
-    def qlen(self):
-        return self.attributes.get('qlen')
-
-    @property
-    def alias(self):
-        return self.attributes.get('alias')
-
-    @property
-    def attributes(self):
-        return self._parse_line(self._run(['o'], ('show', self.name)))
-
-    def _parse_line(self, value):
-        if not value:
-            return {}
-
-        device_name, settings = value.replace("\\", '').split('>', 1)
-        tokens = settings.split()
-        keys = tokens[::2]
-        values = [int(v) if v.isdigit() else v for v in tokens[1::2]]
-
-        retval = dict(zip(keys, values))
-        return retval
-
-
-class IpAddrCommand(IpDeviceCommandBase):
-    COMMAND = 'addr'
-
-    def add(self, cidr, scope='global'):
-        net = netaddr.IPNetwork(cidr)
-        args = ['add', cidr,
-                'scope', scope,
-                'dev', self.name]
-        if net.version == 4:
-            args += ['brd', str(net[-1])]
-        self._as_root([net.version], tuple(args))
-
-    def delete(self, cidr):
-        ip_version = get_ip_version(cidr)
-        self._as_root([ip_version],
-                      ('del', cidr,
-                       'dev', self.name))
-
-    def flush(self, ip_version):
-        self._as_root([ip_version], ('flush', self.name))
-
-    def get_devices_with_ip(self, name=None, scope=None, to=None,
-                            filters=None, ip_version=None):
-        """Get a list of all the devices with an IP attached in the namespace.
-
-        @param name: if it's not None, only a device with that matching name
-                     will be returned.
-        """
-        options = [ip_version] if ip_version else []
-
-        args = ['show']
-        if name:
-            args += [name]
-        if filters:
-            args += filters
-        if scope:
-            args += ['scope', scope]
-        if to:
-            args += ['to', to]
-
-        retval = []
-
-        for line in self._run(options, tuple(args)).split('\n'):
-            line = line.strip()
-
-            match = DEVICE_NAME_PATTERN.search(line)
-            if match:
-                # Found a match for a device name, but its' addresses will
-                # only appear in following lines, so we may as well continue.
-                device_name = remove_interface_suffix(match.group(2))
-                continue
-            elif not line.startswith('inet'):
-                continue
-
-            parts = line.split(" ")
-            if parts[0] == 'inet6':
-                scope = parts[3]
-            else:
-                if parts[2] == 'brd':
-                    scope = parts[5]
-                else:
-                    scope = parts[3]
-
-            retval.append(dict(name=device_name,
-                               cidr=parts[1],
-                               scope=scope,
-                               dynamic=('dynamic' == parts[-1]),
-                               tentative=('tentative' in line),
-                               dadfailed=('dadfailed' == parts[-1])))
-        return retval
-
-    def list(self, scope=None, to=None, filters=None, ip_version=None):
-        """Get device details of a device named <self.name>."""
-        return self.get_devices_with_ip(
-            self.name, scope, to, filters, ip_version)
-
-    def wait_until_address_ready(self, address, wait_time=30):
-        """Wait until an address is no longer marked 'tentative'
-
-        raises AddressNotReady if times out or address not present on interface
-        """
-        def is_address_ready():
-            try:
-                addr_info = self.list(to=address)[0]
-            except IndexError:
-                raise AddressNotReady(
-                    address=address,
-                    reason=_('Address not present on interface'))
-            if not addr_info['tentative']:
-                return True
-            if addr_info['dadfailed']:
-                raise AddressNotReady(
-                    address=address, reason=_('Duplicate address detected'))
-        errmsg = _("Exceeded %s second limit waiting for "
-                   "address to leave the tentative state.") % wait_time
-        utils.utils.wait_until_true(
-            is_address_ready, timeout=wait_time, sleep=0.20,
-            exception=AddressNotReady(address=address, reason=errmsg))
-
-
-class IpRouteCommand(IpDeviceCommandBase):
-    COMMAND = 'route'
-
-    def __init__(self, parent, table=None):
-        super(IpRouteCommand, self).__init__(parent)
-        self._table = table
-
-    def table(self, table):
-        """Return an instance of IpRouteCommand which works on given table"""
-        return IpRouteCommand(self._parent, table)
-
-    def _table_args(self, override=None):
-        if override:
-            return ['table', override]
-        return ['table', self._table] if self._table else []
-
-    def _dev_args(self):
-        return ['dev', self.name] if self.name else []
-
-    def add_gateway(self, gateway, metric=None, table=None):
-        ip_version = get_ip_version(gateway)
-        args = ['replace', 'default', 'via', gateway]
-        if metric:
-            args += ['metric', metric]
-        args += self._dev_args()
-        args += self._table_args(table)
-        self._as_root([ip_version], tuple(args))
-
-    def _run_as_root_detect_device_not_found(self, *args, **kwargs):
-        try:
-            return self._as_root(*args, **kwargs)
-        except RuntimeError as rte:
-            with excutils.save_and_reraise_exception() as ctx:
-                if "Cannot find device" in str(rte):
-                    ctx.reraise = False
-                    raise exceptions.DeviceNotFoundError(device_name=self.name)
-
-    def delete_gateway(self, gateway, table=None):
-        ip_version = get_ip_version(gateway)
-        args = ['del', 'default',
-                'via', gateway]
-        args += self._dev_args()
-        args += self._table_args(table)
-        self._run_as_root_detect_device_not_found([ip_version], tuple(args))
-
-    def _parse_routes(self, ip_version, output, **kwargs):
-        for line in output.splitlines():
-            parts = line.split()
-
-            # Format of line is: "<cidr>|default [<key> <value>] ..."
-            route = {k: v for k, v in zip(parts[1::2], parts[2::2])}
-            route['cidr'] = parts[0]
-            # Avoids having to explicitly pass around the IP version
-            if route['cidr'] == 'default':
-                route['cidr'] = constants.IP_ANY[ip_version]
-
-            # ip route drops things like scope and dev from the output if it
-            # was specified as a filter.  This allows us to add them back.
-            if self.name:
-                route['dev'] = self.name
-            if self._table:
-                route['table'] = self._table
-            # Callers add any filters they use as kwargs
-            route.update(kwargs)
-
-            yield route
-
-    def list_routes(self, ip_version, **kwargs):
-        args = ['list']
-        args += self._dev_args()
-        args += self._table_args()
-        for k, v in kwargs.items():
-            args += [k, v]
-
-        output = self._run([ip_version], tuple(args))
-        return [r for r in self._parse_routes(ip_version, output, **kwargs)]
-
-    def list_onlink_routes(self, ip_version):
-        routes = self.list_routes(ip_version, scope='link')
-        return [r for r in routes if 'src' not in r]
-
-    def add_onlink_route(self, cidr):
-        self.add_route(cidr, scope='link')
-
-    def delete_onlink_route(self, cidr):
-        self.delete_route(cidr, scope='link')
-
-    def get_gateway(self, scope=None, filters=None, ip_version=None):
-        options = [ip_version] if ip_version else []
-
-        args = ['list']
-        args += self._dev_args()
-        args += self._table_args()
-        if filters:
-            args += filters
-
-        retval = None
-
-        if scope:
-            args += ['scope', scope]
-
-        route_list_lines = self._run(options, tuple(args)).split('\n')
-        default_route_line = next((x.strip() for x in
-                                   route_list_lines if
-                                   x.strip().startswith('default')), None)
-        if default_route_line:
-            retval = dict()
-            gateway = DEFAULT_GW_PATTERN.search(default_route_line)
-            if gateway:
-                retval.update(gateway=gateway.group(1))
-            metric = METRIC_PATTERN.search(default_route_line)
-            if metric:
-                retval.update(metric=int(metric.group(1)))
-
-        return retval
-
-    @debtcollector.removals.remove(message="Will be removed in the N cycle.")
-    def pullup_route(self, interface_name, ip_version):
-        """Ensures that the route entry for the interface is before all
-        others on the same subnet.
-        """
-        options = [ip_version]
-        device_list = []
-        device_route_list_lines = self._run(options,
-                                            ('list',
-                                             'proto', 'kernel',
-                                             'dev', interface_name)
-                                            ).split('\n')
-        for device_route_line in device_route_list_lines:
-            try:
-                subnet = device_route_line.split()[0]
-            except Exception:
-                continue
-            subnet_route_list_lines = self._run(options,
-                                                ('list',
-                                                 'proto', 'kernel',
-                                                 'match', subnet)
-                                                ).split('\n')
-            for subnet_route_line in subnet_route_list_lines:
-                i = iter(subnet_route_line.split())
-                while(next(i) != 'dev'):
-                    pass
-                device = next(i)
-                try:
-                    while(next(i) != 'src'):
-                        pass
-                    src = next(i)
-                except Exception:
-                    src = ''
-                if device != interface_name:
-                    device_list.append((device, src))
-                else:
-                    break
-
-            for (device, src) in device_list:
-                self._as_root(options, ('del', subnet, 'dev', device))
-                if (src != ''):
-                    self._as_root(options,
-                                  ('append', subnet,
-                                   'proto', 'kernel',
-                                   'src', src,
-                                   'dev', device))
-                else:
-                    self._as_root(options,
-                                  ('append', subnet,
-                                   'proto', 'kernel',
-                                   'dev', device))
-
-    def add_route(self, cidr, via=None, table=None, **kwargs):
-        ip_version = get_ip_version(cidr)
-        args = ['replace', cidr]
-        if via:
-            args += ['via', via]
-        args += self._dev_args()
-        args += self._table_args(table)
-        for k, v in kwargs.items():
-            args += [k, v]
-        self._run_as_root_detect_device_not_found([ip_version], tuple(args))
-
-    def delete_route(self, cidr, via=None, table=None, **kwargs):
-        ip_version = get_ip_version(cidr)
-        args = ['del', cidr]
-        if via:
-            args += ['via', via]
-        args += self._dev_args()
-        args += self._table_args(table)
-        for k, v in kwargs.items():
-            args += [k, v]
-        self._run_as_root_detect_device_not_found([ip_version], tuple(args))
-
-
-class IPRoute(SubProcessBase):
-    def __init__(self, namespace=None, table=None):
-        super(IPRoute, self).__init__(namespace=namespace)
-        self.name = None
-        self.route = IpRouteCommand(self, table=table)
-
-
-class IpNeighCommand(IpDeviceCommandBase):
-    COMMAND = 'neigh'
-
-    def add(self, ip_address, mac_address):
-        ip_version = get_ip_version(ip_address)
-        self._as_root([ip_version],
-                      ('replace', ip_address,
-                       'lladdr', mac_address,
-                       'nud', 'permanent',
-                       'dev', self.name))
-
-    def delete(self, ip_address, mac_address):
-        ip_version = get_ip_version(ip_address)
-        self._as_root([ip_version],
-                      ('del', ip_address,
-                       'lladdr', mac_address,
-                       'dev', self.name))
-
-    def show(self, ip_version):
-        options = [ip_version]
-        return self._as_root(options,
-                             ('show',
-                              'dev', self.name))
-
-    def flush(self, ip_version, ip_address):
-        """Flush neighbour entries
-
-        Given address entry is removed from neighbour cache (ARP or NDP). To
-        flush all entries pass string 'all' as an address.
-
-        :param ip_version: Either 4 or 6 for IPv4 or IPv6 respectively
-        :param ip_address: The prefix selecting the neighbours to flush
-        """
-        self._as_root([ip_version], ('flush', 'to', ip_address))
-
-
-class IpNetnsCommand(IpCommandBase):
-    COMMAND = 'netns'
-
-    def add(self, name):
-        self._as_root([], ('add', name), use_root_namespace=True)
-        wrapper = IPWrapper(namespace=name)
-        wrapper.netns.execute(['sysctl', '-w',
-                               'net.ipv4.conf.all.promote_secondaries=1'])
-        return wrapper
-
-    def delete(self, name):
-        self._as_root([], ('delete', name), use_root_namespace=True)
-
-    def execute(self, cmds, addl_env=None, check_exit_code=True,
-                log_fail_as_error=True, extra_ok_codes=None,
-                run_as_root=False):
-        ns_params = []
-        kwargs = {'run_as_root': run_as_root}
-        if self._parent.namespace:
-            kwargs['run_as_root'] = True
-            ns_params = ['ip', 'netns', 'exec', self._parent.namespace]
-
-        env_params = []
-        if addl_env:
-            env_params = (['env'] +
-                          ['%s=%s' % pair for pair in addl_env.items()])
-        cmd = ns_params + env_params + list(cmds)
-        return utils.execute(cmd, check_exit_code=check_exit_code,
-                             extra_ok_codes=extra_ok_codes,
-                             log_fail_as_error=log_fail_as_error, **kwargs)
-
-    def exists(self, name):
-        if not cfg.CONF.AGENT.use_helper_for_ns_read:
-            return name in os.listdir(IP_NETNS_PATH)
-
-        output = self._parent._execute(
-            ['o'], 'netns', ['list'], run_as_root=True)
-        for line in [l.split()[0] for l in output.splitlines()]:
-            if name == line:
-                return True
-        return False
-
-
-def vxlan_in_use(segmentation_id, namespace=None):
-    """Return True if VXLAN VNID is in use by an interface, else False."""
-    ip_wrapper = IPWrapper(namespace=namespace)
-    interfaces = ip_wrapper.netns.execute(["ip", "-d", "link", "list"],
-                                          check_exit_code=True)
-    return 'vxlan id %s ' % segmentation_id in interfaces
-
-
-def device_exists(device_name, namespace=None):
-    """Return True if the device exists in the namespace."""
-    return IPDevice(device_name, namespace=namespace).exists()
-
-
-def device_exists_with_ips_and_mac(device_name, ip_cidrs, mac, namespace=None):
-    """Return True if the device with the given IP addresses and MAC address
-    exists in the namespace.
-    """
-    try:
-        device = IPDevice(device_name, namespace=namespace)
-        if mac != device.link.address:
-            return False
-        device_ip_cidrs = [ip['cidr'] for ip in device.addr.list()]
-        for ip_cidr in ip_cidrs:
-            if ip_cidr not in device_ip_cidrs:
-                return False
-    except RuntimeError:
-        return False
-    else:
-        return True
-
-
-def get_routing_table(ip_version, namespace=None):
-    """Return a list of dictionaries, each representing a route.
-
-    @param ip_version: the routes of version to return, for example 4
-    @param namespace
-    @return: a list of dictionaries, each representing a route.
-    The dictionary format is: {'destination': cidr,
-                               'nexthop': ip,
-                               'device': device_name,
-                               'scope': scope}
-    """
-
-    ip_wrapper = IPWrapper(namespace=namespace)
-    table = ip_wrapper.netns.execute(
-        ['ip', '-%s' % ip_version, 'route'],
-        check_exit_code=True)
-
-    routes = []
-    # Example for route_lines:
-    # default via 192.168.3.120 dev wlp3s0  proto static  metric 1024
-    # 10.0.0.0/8 dev tun0  proto static  scope link  metric 1024
-    # The first column is the destination, followed by key/value pairs.
-    # The generator splits the routing table by newline, then strips and splits
-    # each individual line.
-    route_lines = (line.split() for line in table.split('\n') if line.strip())
-    for route in route_lines:
-        network = route[0]
-        # Create a dict of key/value pairs (For example - 'dev': 'tun0')
-        # excluding the first column.
-        data = dict(route[i:i + 2] for i in range(1, len(route), 2))
-        routes.append({'destination': network,
-                       'nexthop': data.get('via'),
-                       'device': data.get('dev'),
-                       'scope': data.get('scope')})
-    return routes
-
-
-def ensure_device_is_ready(device_name, namespace=None):
-    dev = IPDevice(device_name, namespace=namespace)
-    dev.set_log_fail_as_error(False)
-    try:
-        # Ensure the device is up, even if it is already up. If the device
-        # doesn't exist, a RuntimeError will be raised.
-        dev.link.set_up()
-    except RuntimeError:
-        return False
-    return True
-
-
-def iproute_arg_supported(command, arg):
-    command += ['help']
-    stdout, stderr = utils.execute(command, check_exit_code=False,
-                                   return_stderr=True, log_fail_as_error=False)
-    return any(arg in line for line in stderr.split('\n'))
-
-
-def _arping(ns_name, iface_name, address, count):
-    # Pass -w to set timeout to ensure exit if interface removed while running
-    arping_cmd = ['arping', '-A', '-I', iface_name, '-c', count,
-                  '-w', 1.5 * count, address]
-    try:
-        ip_wrapper = IPWrapper(namespace=ns_name)
-        ip_wrapper.netns.execute(arping_cmd, check_exit_code=True)
-    except Exception:
-        msg = _LE("Failed sending gratuitous ARP "
-                  "to %(addr)s on %(iface)s in namespace %(ns)s")
-        LOG.exception(msg, {'addr': address,
-                            'iface': iface_name,
-                            'ns': ns_name})
-
-
-def send_ip_addr_adv_notif(ns_name, iface_name, address, config):
-    """Send advance notification of an IP address assignment.
-
-    If the address is in the IPv4 family, send gratuitous ARP.
-
-    If the address is in the IPv6 family, no advance notification is
-    necessary, since the Neighbor Discovery Protocol (NDP), Duplicate
-    Address Discovery (DAD), and (for stateless addresses) router
-    advertisements (RAs) are sufficient for address resolution and
-    duplicate address detection.
-    """
-    count = config.send_arp_for_ha
-
-    def arping():
-        _arping(ns_name, iface_name, address, count)
-
-    if count > 0 and netaddr.IPAddress(address).version == 4:
-        eventlet.spawn_n(arping)
-
-
-def add_namespace_to_cmd(cmd, namespace=None):
-    """Add an optional namespace to the command."""
-
-    return ['ip', 'netns', 'exec', namespace] + cmd if namespace else cmd
-
-
-def get_ip_version(ip_or_cidr):
-    return netaddr.IPNetwork(ip_or_cidr).version
-
-
-def get_ipv6_lladdr(mac_addr):
-    return '%s/64' % netaddr.EUI(mac_addr).ipv6_link_local()
diff --git a/neutron/agent/linux/ip_link_support.py b/neutron/agent/linux/ip_link_support.py
deleted file mode 100644 (file)
index 1a49947..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import re
-
-from oslo_log import log as logging
-
-from neutron._i18n import _, _LE
-from neutron.agent.linux import utils
-from neutron.common import exceptions as n_exc
-
-
-LOG = logging.getLogger(__name__)
-
-
-class IpLinkSupportError(n_exc.NeutronException):
-    pass
-
-
-class UnsupportedIpLinkCommand(IpLinkSupportError):
-    message = _("ip link command is not supported: %(reason)s")
-
-
-class InvalidIpLinkCapability(IpLinkSupportError):
-    message = _("ip link capability %(capability)s is not supported")
-
-
-class IpLinkConstants(object):
-    IP_LINK_CAPABILITY_STATE = "state"
-    IP_LINK_CAPABILITY_VLAN = "vlan"
-    IP_LINK_CAPABILITY_RATE = "rate"
-    IP_LINK_CAPABILITY_SPOOFCHK = "spoofchk"
-    IP_LINK_SUB_CAPABILITY_QOS = "qos"
-
-
-class IpLinkSupport(object):
-    VF_BLOCK_REGEX = r"\[ vf NUM(?P<vf_block>.*) \] \]"
-
-    CAPABILITY_REGEX = r"\[ %s (.*)"
-    SUB_CAPABILITY_REGEX = r"\[ %(cap)s (.*) \[ %(subcap)s (.*)"
-
-    @classmethod
-    def get_vf_mgmt_section(cls):
-        """Parses ip link help output, and gets vf block"""
-
-        output = cls._get_ip_link_output()
-        vf_block_pattern = re.search(cls.VF_BLOCK_REGEX,
-                                     output,
-                                     re.DOTALL | re.MULTILINE)
-        if vf_block_pattern:
-            return vf_block_pattern.group("vf_block")
-
-    @classmethod
-    def vf_mgmt_capability_supported(cls, vf_section, capability,
-                                     subcapability=None):
-        """Validate vf capability support
-
-        Checks if given vf capability (and sub capability
-        if given) supported
-        :param vf_section: vf Num block content
-        :param capability: for example: vlan, rate, spoofchk, state
-        :param subcapability: for example: qos
-        """
-        if not vf_section:
-            return False
-        if subcapability:
-            regex = cls.SUB_CAPABILITY_REGEX % {"cap": capability,
-                                                "subcap": subcapability}
-        else:
-            regex = cls.CAPABILITY_REGEX % capability
-        pattern_match = re.search(regex, vf_section,
-                                  re.DOTALL | re.MULTILINE)
-        return pattern_match is not None
-
-    @classmethod
-    def _get_ip_link_output(cls):
-        """Gets the output of the ip link help command
-
-        Runs ip link help command and stores its output
-        Note: ip link help return error and writes its output to stderr
-                so we get the output from there. however, if this issue
-                will be solved and the command will write to stdout, we
-                will get the output from there too.
-        """
-        try:
-            ip_cmd = ['ip', 'link', 'help']
-            _stdout, _stderr = utils.execute(
-                ip_cmd,
-                check_exit_code=False,
-                return_stderr=True,
-                log_fail_as_error=False)
-        except Exception as e:
-            LOG.exception(_LE("Failed executing ip command"))
-            raise UnsupportedIpLinkCommand(reason=e)
-        return _stdout or _stderr
diff --git a/neutron/agent/linux/ip_monitor.py b/neutron/agent/linux/ip_monitor.py
deleted file mode 100644 (file)
index 4e36c81..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-from oslo_utils import excutils
-
-from neutron._i18n import _LE
-from neutron.agent.linux import async_process
-from neutron.agent.linux import ip_lib
-
-LOG = logging.getLogger(__name__)
-
-
-class IPMonitorEvent(object):
-    def __init__(self, line, added, interface, cidr):
-        self.line = line
-        self.added = added
-        self.interface = interface
-        self.cidr = cidr
-
-    def __str__(self):
-        return self.line
-
-    @classmethod
-    def from_text(cls, line):
-        route = line.split()
-
-        try:
-            first_word = route[0]
-        except IndexError:
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE('Unable to parse route "%s"'), line)
-
-        added = (first_word != 'Deleted')
-        if not added:
-            route = route[1:]
-
-        try:
-            interface = ip_lib.remove_interface_suffix(route[1])
-            cidr = route[3]
-        except IndexError:
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE('Unable to parse route "%s"'), line)
-
-        return cls(line, added, interface, cidr)
-
-
-class IPMonitor(async_process.AsyncProcess):
-    """Wrapper over `ip monitor address`.
-
-    To monitor and react indefinitely:
-        m = IPMonitor(namespace='tmp', root_as_root=True)
-        m.start()
-        for iterable in m:
-            event = IPMonitorEvent.from_text(iterable)
-            print(event, event.added, event.interface, event.cidr)
-    """
-
-    def __init__(self,
-                 namespace=None,
-                 run_as_root=True,
-                 respawn_interval=None):
-        super(IPMonitor, self).__init__(['ip', '-o', 'monitor', 'address'],
-                                        run_as_root=run_as_root,
-                                        respawn_interval=respawn_interval,
-                                        namespace=namespace)
-
-    def __iter__(self):
-        return self.iter_stdout(block=True)
-
-    def start(self):
-        super(IPMonitor, self).start(block=True)
-
-    def stop(self):
-        super(IPMonitor, self).stop(block=True)
diff --git a/neutron/agent/linux/ipset_manager.py b/neutron/agent/linux/ipset_manager.py
deleted file mode 100644 (file)
index ca11720..0000000
+++ /dev/null
@@ -1,187 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-import copy
-
-import netaddr
-
-from neutron.agent.linux import utils as linux_utils
-from neutron.common import utils
-
-IPSET_ADD_BULK_THRESHOLD = 5
-NET_PREFIX = 'N'
-SWAP_SUFFIX = '-n'
-IPSET_NAME_MAX_LENGTH = 31 - len(SWAP_SUFFIX)
-
-
-class IpsetManager(object):
-    """Smart wrapper for ipset.
-
-       Keeps track of ip addresses per set, using bulk
-       or single ip add/remove for smaller changes.
-    """
-
-    def __init__(self, execute=None, namespace=None):
-        self.execute = execute or linux_utils.execute
-        self.namespace = namespace
-        self.ipset_sets = {}
-
-    def _sanitize_addresses(self, addresses):
-        """This method converts any address to ipset format.
-
-        If an address has a mask of /0 we need to cover to it to a mask of
-        /1 as ipset does not support /0 length addresses. Instead we use two
-        /1's to represent the /0.
-        """
-        sanitized_addresses = []
-        for ip in addresses:
-            ip = netaddr.IPNetwork(ip)
-            if ip.prefixlen == 0:
-                if ip.version == 4:
-                    sanitized_addresses.append('0.0.0.0/1')
-                    sanitized_addresses.append('128.0.0.0/1')
-                elif ip.version == 6:
-                    sanitized_addresses.append('::/1')
-                    sanitized_addresses.append('8000::/1')
-            else:
-                sanitized_addresses.append(str(ip))
-        return sanitized_addresses
-
-    @staticmethod
-    def get_name(id, ethertype):
-        """Returns the given ipset name for an id+ethertype pair.
-        This reference can be used from iptables.
-        """
-        name = NET_PREFIX + ethertype + id
-        return name[:IPSET_NAME_MAX_LENGTH]
-
-    def set_name_exists(self, set_name):
-        """Returns true if the set name is known to the manager."""
-        return set_name in self.ipset_sets
-
-    def set_members(self, id, ethertype, member_ips):
-        """Create or update a specific set by name and ethertype.
-        It will make sure that a set is created, updated to
-        add / remove new members, or swapped atomically if
-        that's faster.
-        """
-        member_ips = self._sanitize_addresses(member_ips)
-        set_name = self.get_name(id, ethertype)
-        add_ips = self._get_new_set_ips(set_name, member_ips)
-        del_ips = self._get_deleted_set_ips(set_name, member_ips)
-        if not add_ips and not del_ips and self.set_name_exists(set_name):
-            # nothing to do because no membership changes and the ipset exists
-            return
-        self.set_members_mutate(set_name, ethertype, member_ips)
-
-    @utils.synchronized('ipset', external=True)
-    def set_members_mutate(self, set_name, ethertype, member_ips):
-        if not self.set_name_exists(set_name):
-            # The initial creation is handled with create/refresh to
-            # avoid any downtime for existing sets (i.e. avoiding
-            # a flush/restore), as the restore operation of ipset is
-            # additive to the existing set.
-            self._create_set(set_name, ethertype)
-            self._refresh_set(set_name, member_ips, ethertype)
-            # TODO(majopela,shihanzhang,haleyb): Optimize this by
-            # gathering the system ipsets at start. So we can determine
-            # if a normal restore is enough for initial creation.
-            # That should speed up agent boot up time.
-        else:
-            add_ips = self._get_new_set_ips(set_name, member_ips)
-            del_ips = self._get_deleted_set_ips(set_name, member_ips)
-            if (len(add_ips) + len(del_ips) < IPSET_ADD_BULK_THRESHOLD):
-                self._add_members_to_set(set_name, add_ips)
-                self._del_members_from_set(set_name, del_ips)
-            else:
-                self._refresh_set(set_name, member_ips, ethertype)
-
-    @utils.synchronized('ipset', external=True)
-    def destroy(self, id, ethertype, forced=False):
-        set_name = self.get_name(id, ethertype)
-        self._destroy(set_name, forced)
-
-    def _add_member_to_set(self, set_name, member_ip):
-        cmd = ['ipset', 'add', '-exist', set_name, member_ip]
-        self._apply(cmd)
-        self.ipset_sets[set_name].append(member_ip)
-
-    def _refresh_set(self, set_name, member_ips, ethertype):
-        new_set_name = set_name + SWAP_SUFFIX
-        set_type = self._get_ipset_set_type(ethertype)
-        process_input = ["create %s hash:net family %s" % (new_set_name,
-                                                          set_type)]
-        for ip in member_ips:
-            process_input.append("add %s %s" % (new_set_name, ip))
-
-        self._restore_sets(process_input)
-        self._swap_sets(new_set_name, set_name)
-        self._destroy(new_set_name, True)
-        self.ipset_sets[set_name] = copy.copy(member_ips)
-
-    def _del_member_from_set(self, set_name, member_ip):
-        cmd = ['ipset', 'del', set_name, member_ip]
-        self._apply(cmd, fail_on_errors=False)
-        self.ipset_sets[set_name].remove(member_ip)
-
-    def _create_set(self, set_name, ethertype):
-        cmd = ['ipset', 'create', '-exist', set_name, 'hash:net', 'family',
-               self._get_ipset_set_type(ethertype)]
-        self._apply(cmd)
-        self.ipset_sets[set_name] = []
-
-    def _apply(self, cmd, input=None, fail_on_errors=True):
-        input = '\n'.join(input) if input else None
-        cmd_ns = []
-        if self.namespace:
-            cmd_ns.extend(['ip', 'netns', 'exec', self.namespace])
-        cmd_ns.extend(cmd)
-        self.execute(cmd_ns, run_as_root=True, process_input=input,
-                     check_exit_code=fail_on_errors)
-
-    def _get_new_set_ips(self, set_name, expected_ips):
-        new_member_ips = (set(expected_ips) -
-                          set(self.ipset_sets.get(set_name, [])))
-        return list(new_member_ips)
-
-    def _get_deleted_set_ips(self, set_name, expected_ips):
-        deleted_member_ips = (set(self.ipset_sets.get(set_name, [])) -
-                              set(expected_ips))
-        return list(deleted_member_ips)
-
-    def _add_members_to_set(self, set_name, add_ips):
-        for ip in add_ips:
-            if ip not in self.ipset_sets[set_name]:
-                self._add_member_to_set(set_name, ip)
-
-    def _del_members_from_set(self, set_name, del_ips):
-        for ip in del_ips:
-            if ip in self.ipset_sets[set_name]:
-                self._del_member_from_set(set_name, ip)
-
-    def _get_ipset_set_type(self, ethertype):
-        return 'inet6' if ethertype == 'IPv6' else 'inet'
-
-    def _restore_sets(self, process_input):
-        cmd = ['ipset', 'restore', '-exist']
-        self._apply(cmd, process_input)
-
-    def _swap_sets(self, src_set, dest_set):
-        cmd = ['ipset', 'swap', src_set, dest_set]
-        self._apply(cmd)
-
-    def _destroy(self, set_name, forced=False):
-        if set_name in self.ipset_sets or forced:
-            cmd = ['ipset', 'destroy', set_name]
-            self._apply(cmd, fail_on_errors=False)
-            self.ipset_sets.pop(set_name, None)
diff --git a/neutron/agent/linux/iptables_comments.py b/neutron/agent/linux/iptables_comments.py
deleted file mode 100644 (file)
index b8883e0..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#    Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""iptables comments"""
-
-# Do not translate these comments. These comments cannot contain a quote or
-# an escape character because they will end up in a call to iptables and
-# could interfere with other parameters.
-
-SNAT_OUT = 'Perform source NAT on outgoing traffic.'
-UNMATCH_DROP = 'Default drop rule for unmatched traffic.'
-VM_INT_SG = 'Direct traffic from the VM interface to the security group chain.'
-SG_TO_VM_SG = 'Jump to the VM specific chain.'
-INPUT_TO_SG = 'Direct incoming traffic from VM to the security group chain.'
-PAIR_ALLOW = 'Allow traffic from defined IP/MAC pairs.'
-PAIR_DROP = 'Drop traffic without an IP/MAC allow rule.'
-DHCP_CLIENT = 'Allow DHCP client traffic.'
-DHCP_SPOOF = 'Prevent DHCP Spoofing by VM.'
-UNMATCHED = 'Send unmatched traffic to the fallback chain.'
-INVALID_DROP = ("Drop packets that appear related to an existing connection "
-                "(e.g. TCP ACK/FIN) but do not have an entry in conntrack.")
-ALLOW_ASSOC = ('Direct packets associated with a known session to the RETURN '
-               'chain.')
-PORT_SEC_ACCEPT = 'Accept all packets when port security is disabled.'
-IPV6_RA_DROP = 'Drop IPv6 Router Advts from VM Instance.'
-IPV6_ICMP_ALLOW = 'Allow IPv6 ICMP traffic.'
diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py
deleted file mode 100644 (file)
index f34c83b..0000000
+++ /dev/null
@@ -1,936 +0,0 @@
-# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import re
-
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-import six
-
-from neutron._i18n import _LI
-from neutron.agent import firewall
-from neutron.agent.linux import ip_conntrack
-from neutron.agent.linux import ipset_manager
-from neutron.agent.linux import iptables_comments as ic
-from neutron.agent.linux import iptables_manager
-from neutron.agent.linux import utils
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.common import utils as c_utils
-from neutron.extensions import portsecurity as psec
-
-
-LOG = logging.getLogger(__name__)
-SG_CHAIN = 'sg-chain'
-SPOOF_FILTER = 'spoof-filter'
-CHAIN_NAME_PREFIX = {firewall.INGRESS_DIRECTION: 'i',
-                     firewall.EGRESS_DIRECTION: 'o',
-                     SPOOF_FILTER: 's'}
-DIRECTION_IP_PREFIX = {firewall.INGRESS_DIRECTION: 'source_ip_prefix',
-                       firewall.EGRESS_DIRECTION: 'dest_ip_prefix'}
-IPSET_DIRECTION = {firewall.INGRESS_DIRECTION: 'src',
-                   firewall.EGRESS_DIRECTION: 'dst'}
-# length of all device prefixes (e.g. qvo, tap, qvb)
-LINUX_DEV_PREFIX_LEN = 3
-LINUX_DEV_LEN = 14
-MAX_CONNTRACK_ZONES = 65535
-comment_rule = iptables_manager.comment_rule
-
-
-class mac_iptables(netaddr.mac_eui48):
-    """mac format class for netaddr to match iptables representation."""
-    word_sep = ':'
-
-
-class IptablesFirewallDriver(firewall.FirewallDriver):
-    """Driver which enforces security groups through iptables rules."""
-    IPTABLES_DIRECTION = {firewall.INGRESS_DIRECTION: 'physdev-out',
-                          firewall.EGRESS_DIRECTION: 'physdev-in'}
-
-    def __init__(self, namespace=None):
-        self.iptables = iptables_manager.IptablesManager(
-            use_ipv6=ipv6_utils.is_enabled(),
-            namespace=namespace)
-        # TODO(majopela, shihanzhang): refactor out ipset to a separate
-        # driver composed over this one
-        self.ipset = ipset_manager.IpsetManager(namespace=namespace)
-        self.ipconntrack = ip_conntrack.IpConntrackManager(
-            self.get_device_zone, namespace=namespace)
-        self._populate_initial_zone_map()
-        # list of port which has security group
-        self.filtered_ports = {}
-        self.unfiltered_ports = {}
-        self._add_fallback_chain_v4v6()
-        self._defer_apply = False
-        self._pre_defer_filtered_ports = None
-        self._pre_defer_unfiltered_ports = None
-        # List of security group rules for ports residing on this host
-        self.sg_rules = {}
-        self.pre_sg_rules = None
-        # List of security group member ips for ports residing on this host
-        self.sg_members = collections.defaultdict(
-            lambda: collections.defaultdict(list))
-        self.pre_sg_members = None
-        self.enable_ipset = cfg.CONF.SECURITYGROUP.enable_ipset
-        self._enabled_netfilter_for_bridges = False
-        self.updated_rule_sg_ids = set()
-        self.updated_sg_members = set()
-        self.devices_with_updated_sg_members = collections.defaultdict(list)
-
-    def _enable_netfilter_for_bridges(self):
-        # we only need to set these values once, but it has to be when
-        # we create a bridge; before that the bridge module might not
-        # be loaded and the proc values aren't there.
-        if self._enabled_netfilter_for_bridges:
-            return
-        else:
-            self._enabled_netfilter_for_bridges = True
-
-        # These proc values ensure that netfilter is enabled on
-        # bridges; essential for enforcing security groups rules with
-        # OVS Hybrid.  Distributions can differ on whether this is
-        # enabled by default or not (Ubuntu - yes, Redhat - no, for
-        # example).
-        LOG.debug("Enabling netfilter for bridges")
-        utils.execute(['sysctl', '-w',
-                       'net.bridge.bridge-nf-call-arptables=1'],
-                      run_as_root=True)
-        utils.execute(['sysctl', '-w',
-                       'net.bridge.bridge-nf-call-ip6tables=1'],
-                      run_as_root=True)
-        utils.execute(['sysctl', '-w',
-                       'net.bridge.bridge-nf-call-iptables=1'],
-                      run_as_root=True)
-
-    @property
-    def ports(self):
-        return dict(self.filtered_ports, **self.unfiltered_ports)
-
-    def _update_remote_security_group_members(self, sec_group_ids):
-        for sg_id in sec_group_ids:
-            for device in self.filtered_ports.values():
-                if sg_id in device.get('security_group_source_groups', []):
-                    self.devices_with_updated_sg_members[sg_id].append(device)
-
-    def security_group_updated(self, action_type, sec_group_ids,
-                               device_ids=None):
-        device_ids = device_ids or []
-        if action_type == 'sg_rule':
-            self.updated_rule_sg_ids.update(sec_group_ids)
-        elif action_type == 'sg_member':
-            if device_ids:
-                self.updated_sg_members.update(device_ids)
-            else:
-                self._update_remote_security_group_members(sec_group_ids)
-
-    def update_security_group_rules(self, sg_id, sg_rules):
-        LOG.debug("Update rules of security group (%s)", sg_id)
-        self.sg_rules[sg_id] = sg_rules
-
-    def update_security_group_members(self, sg_id, sg_members):
-        LOG.debug("Update members of security group (%s)", sg_id)
-        self.sg_members[sg_id] = collections.defaultdict(list, sg_members)
-
-    def _ps_enabled(self, port):
-        return port.get(psec.PORTSECURITY, True)
-
-    def _set_ports(self, port):
-        if not self._ps_enabled(port):
-            self.unfiltered_ports[port['device']] = port
-            self.filtered_ports.pop(port['device'], None)
-        else:
-            self.filtered_ports[port['device']] = port
-            self.unfiltered_ports.pop(port['device'], None)
-
-    def _unset_ports(self, port):
-        self.unfiltered_ports.pop(port['device'], None)
-        self.filtered_ports.pop(port['device'], None)
-
-    def prepare_port_filter(self, port):
-        LOG.debug("Preparing device (%s) filter", port['device'])
-        self._remove_chains()
-        self._set_ports(port)
-        self._enable_netfilter_for_bridges()
-        # each security group has it own chains
-        self._setup_chains()
-        return self.iptables.apply()
-
-    def update_port_filter(self, port):
-        LOG.debug("Updating device (%s) filter", port['device'])
-        if port['device'] not in self.ports:
-            LOG.info(_LI('Attempted to update port filter which is not '
-                         'filtered %s'), port['device'])
-            return
-        self._remove_chains()
-        self._set_ports(port)
-        self._setup_chains()
-        return self.iptables.apply()
-
-    def remove_port_filter(self, port):
-        LOG.debug("Removing device (%s) filter", port['device'])
-        if port['device'] not in self.ports:
-            LOG.info(_LI('Attempted to remove port filter which is not '
-                         'filtered %r'), port)
-            return
-        self._remove_chains()
-        self._unset_ports(port)
-        self._setup_chains()
-        return self.iptables.apply()
-
-    def _add_accept_rule_port_sec(self, port, direction):
-        self._update_port_sec_rules(port, direction, add=True)
-
-    def _remove_rule_port_sec(self, port, direction):
-        self._update_port_sec_rules(port, direction, add=False)
-
-    def _remove_rule_from_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules):
-        for rule in ipv4_rules:
-            self.iptables.ipv4['filter'].remove_rule(chain_name, rule)
-
-        for rule in ipv6_rules:
-            self.iptables.ipv6['filter'].remove_rule(chain_name, rule)
-
-    def _setup_chains(self):
-        """Setup ingress and egress chain for a port."""
-        if not self._defer_apply:
-            self._setup_chains_apply(self.filtered_ports,
-                                     self.unfiltered_ports)
-
-    def _setup_chains_apply(self, ports, unfiltered_ports):
-        self._add_chain_by_name_v4v6(SG_CHAIN)
-        # sort by port so we always do this deterministically between
-        # agent restarts and don't cause unnecessary rule differences
-        for pname in sorted(ports):
-            port = ports[pname]
-            self._setup_chain(port, firewall.INGRESS_DIRECTION)
-            self._setup_chain(port, firewall.EGRESS_DIRECTION)
-        self.iptables.ipv4['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
-        self.iptables.ipv6['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
-
-        for port in unfiltered_ports.values():
-            self._add_accept_rule_port_sec(port, firewall.INGRESS_DIRECTION)
-            self._add_accept_rule_port_sec(port, firewall.EGRESS_DIRECTION)
-
-    def _remove_chains(self):
-        """Remove ingress and egress chain for a port."""
-        if not self._defer_apply:
-            self._remove_chains_apply(self.filtered_ports,
-                                      self.unfiltered_ports)
-
-    def _remove_chains_apply(self, ports, unfiltered_ports):
-        for port in ports.values():
-            self._remove_chain(port, firewall.INGRESS_DIRECTION)
-            self._remove_chain(port, firewall.EGRESS_DIRECTION)
-            self._remove_chain(port, SPOOF_FILTER)
-        for port in unfiltered_ports.values():
-            self._remove_rule_port_sec(port, firewall.INGRESS_DIRECTION)
-            self._remove_rule_port_sec(port, firewall.EGRESS_DIRECTION)
-        self._remove_chain_by_name_v4v6(SG_CHAIN)
-
-    def _setup_chain(self, port, DIRECTION):
-        self._add_chain(port, DIRECTION)
-        self._add_rules_by_security_group(port, DIRECTION)
-
-    def _remove_chain(self, port, DIRECTION):
-        chain_name = self._port_chain_name(port, DIRECTION)
-        self._remove_chain_by_name_v4v6(chain_name)
-
-    def _add_fallback_chain_v4v6(self):
-        self.iptables.ipv4['filter'].add_chain('sg-fallback')
-        self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP',
-                                              comment=ic.UNMATCH_DROP)
-        self.iptables.ipv6['filter'].add_chain('sg-fallback')
-        self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP',
-                                              comment=ic.UNMATCH_DROP)
-
-    def _add_raw_chain(self, chain_name):
-        self.iptables.ipv4['raw'].add_chain(chain_name)
-        self.iptables.ipv6['raw'].add_chain(chain_name)
-
-    def _add_chain_by_name_v4v6(self, chain_name):
-        self.iptables.ipv4['filter'].add_chain(chain_name)
-        self.iptables.ipv6['filter'].add_chain(chain_name)
-
-    def _remove_raw_chain(self, chain_name):
-        self.iptables.ipv4['raw'].remove_chain(chain_name)
-        self.iptables.ipv6['raw'].remove_chain(chain_name)
-
-    def _remove_chain_by_name_v4v6(self, chain_name):
-        self.iptables.ipv4['filter'].remove_chain(chain_name)
-        self.iptables.ipv6['filter'].remove_chain(chain_name)
-
-    def _add_rules_to_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules,
-                                 comment=None):
-        for rule in ipv4_rules:
-            self.iptables.ipv4['filter'].add_rule(chain_name, rule,
-                                                  comment=comment)
-
-        for rule in ipv6_rules:
-            self.iptables.ipv6['filter'].add_rule(chain_name, rule,
-                                                  comment=comment)
-
-    def _get_device_name(self, port):
-        return port['device']
-
-    def _update_port_sec_rules(self, port, direction, add=False):
-        # add/remove rules in FORWARD and INPUT chain
-        device = self._get_device_name(port)
-
-        jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
-                     '-j ACCEPT' % (self.IPTABLES_DIRECTION[direction],
-                                    device)]
-        if add:
-            self._add_rules_to_chain_v4v6(
-                'FORWARD', jump_rule, jump_rule, comment=ic.PORT_SEC_ACCEPT)
-        else:
-            self._remove_rule_from_chain_v4v6('FORWARD', jump_rule, jump_rule)
-
-        if direction == firewall.EGRESS_DIRECTION:
-            jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
-                         '-j ACCEPT' % (self.IPTABLES_DIRECTION[direction],
-                                        device)]
-            if add:
-                self._add_rules_to_chain_v4v6('INPUT', jump_rule, jump_rule,
-                                              comment=ic.PORT_SEC_ACCEPT)
-            else:
-                self._remove_rule_from_chain_v4v6(
-                    'INPUT', jump_rule, jump_rule)
-
-    def _add_chain(self, port, direction):
-        chain_name = self._port_chain_name(port, direction)
-        self._add_chain_by_name_v4v6(chain_name)
-
-        # Note(nati) jump to the security group chain (SG_CHAIN)
-        # This is needed because the packet may much two rule in port
-        # if the two port is in the same host
-        # We accept the packet at the end of SG_CHAIN.
-
-        # jump to the security group chain
-        device = self._get_device_name(port)
-        jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
-                     '-j $%s' % (self.IPTABLES_DIRECTION[direction],
-                                 device,
-                                 SG_CHAIN)]
-        self._add_rules_to_chain_v4v6('FORWARD', jump_rule, jump_rule,
-                                      comment=ic.VM_INT_SG)
-
-        # jump to the chain based on the device
-        jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
-                     '-j $%s' % (self.IPTABLES_DIRECTION[direction],
-                                 device,
-                                 chain_name)]
-        self._add_rules_to_chain_v4v6(SG_CHAIN, jump_rule, jump_rule,
-                                      comment=ic.SG_TO_VM_SG)
-
-        if direction == firewall.EGRESS_DIRECTION:
-            self._add_rules_to_chain_v4v6('INPUT', jump_rule, jump_rule,
-                                          comment=ic.INPUT_TO_SG)
-
-    def _split_sgr_by_ethertype(self, security_group_rules):
-        ipv4_sg_rules = []
-        ipv6_sg_rules = []
-        for rule in security_group_rules:
-            if rule.get('ethertype') == constants.IPv4:
-                ipv4_sg_rules.append(rule)
-            elif rule.get('ethertype') == constants.IPv6:
-                if rule.get('protocol') == 'icmp':
-                    rule['protocol'] = 'ipv6-icmp'
-                ipv6_sg_rules.append(rule)
-        return ipv4_sg_rules, ipv6_sg_rules
-
-    def _select_sgr_by_direction(self, port, direction):
-        return [rule
-                for rule in port.get('security_group_rules', [])
-                if rule['direction'] == direction]
-
-    def _setup_spoof_filter_chain(self, port, table, mac_ip_pairs, rules):
-        if mac_ip_pairs:
-            chain_name = self._port_chain_name(port, SPOOF_FILTER)
-            table.add_chain(chain_name)
-            for mac, ip in mac_ip_pairs:
-                if ip is None:
-                    # If fixed_ips is [] this rule will be added to the end
-                    # of the list after the allowed_address_pair rules.
-                    table.add_rule(chain_name,
-                                   '-m mac --mac-source %s -j RETURN'
-                                   % mac.upper(), comment=ic.PAIR_ALLOW)
-                else:
-                    # we need to convert it into a prefix to match iptables
-                    ip = c_utils.ip_to_cidr(ip)
-                    table.add_rule(chain_name,
-                                   '-s %s -m mac --mac-source %s -j RETURN'
-                                   % (ip, mac.upper()), comment=ic.PAIR_ALLOW)
-            table.add_rule(chain_name, '-j DROP', comment=ic.PAIR_DROP)
-            rules.append('-j $%s' % chain_name)
-
-    def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs,
-                                  mac_ipv6_pairs):
-        mac = str(netaddr.EUI(mac, dialect=mac_iptables))
-        if netaddr.IPNetwork(ip_address).version == 4:
-            mac_ipv4_pairs.append((mac, ip_address))
-        else:
-            mac_ipv6_pairs.append((mac, ip_address))
-
-    def _spoofing_rule(self, port, ipv4_rules, ipv6_rules):
-        # Allow dhcp client packets
-        ipv4_rules += [comment_rule('-p udp -m udp --sport 68 '
-                                    '-m udp --dport 67 '
-                                    '-j RETURN', comment=ic.DHCP_CLIENT)]
-        # Drop Router Advts from the port.
-        ipv6_rules += [comment_rule('-p ipv6-icmp -m icmp6 --icmpv6-type %s '
-                                    '-j DROP' % constants.ICMPV6_TYPE_RA,
-                                    comment=ic.IPV6_RA_DROP)]
-        ipv6_rules += [comment_rule('-p ipv6-icmp -j RETURN',
-                                    comment=ic.IPV6_ICMP_ALLOW)]
-        ipv6_rules += [comment_rule('-p udp -m udp --sport 546 '
-                                    '-m udp --dport 547 '
-                                    '-j RETURN', comment=ic.DHCP_CLIENT)]
-        mac_ipv4_pairs = []
-        mac_ipv6_pairs = []
-
-        if isinstance(port.get('allowed_address_pairs'), list):
-            for address_pair in port['allowed_address_pairs']:
-                self._build_ipv4v6_mac_ip_list(address_pair['mac_address'],
-                                               address_pair['ip_address'],
-                                               mac_ipv4_pairs,
-                                               mac_ipv6_pairs)
-
-        for ip in port['fixed_ips']:
-            self._build_ipv4v6_mac_ip_list(port['mac_address'], ip,
-                                           mac_ipv4_pairs, mac_ipv6_pairs)
-        if not port['fixed_ips']:
-            mac_ipv4_pairs.append((port['mac_address'], None))
-            mac_ipv6_pairs.append((port['mac_address'], None))
-
-        self._setup_spoof_filter_chain(port, self.iptables.ipv4['filter'],
-                                       mac_ipv4_pairs, ipv4_rules)
-        self._setup_spoof_filter_chain(port, self.iptables.ipv6['filter'],
-                                       mac_ipv6_pairs, ipv6_rules)
-
-    def _drop_dhcp_rule(self, ipv4_rules, ipv6_rules):
-        #Note(nati) Drop dhcp packet from VM
-        ipv4_rules += [comment_rule('-p udp -m udp --sport 67 '
-                                    '-m udp --dport 68 '
-                                    '-j DROP', comment=ic.DHCP_SPOOF)]
-        ipv6_rules += [comment_rule('-p udp -m udp --sport 547 '
-                                    '-m udp --dport 546 '
-                                    '-j DROP', comment=ic.DHCP_SPOOF)]
-
-    def _accept_inbound_icmpv6(self):
-        # Allow multicast listener, neighbor solicitation and
-        # neighbor advertisement into the instance
-        icmpv6_rules = []
-        for icmp6_type in constants.ICMPV6_ALLOWED_TYPES:
-            icmpv6_rules += ['-p ipv6-icmp -m icmp6 --icmpv6-type %s '
-                             '-j RETURN' % icmp6_type]
-        return icmpv6_rules
-
-    def _select_sg_rules_for_port(self, port, direction):
-        """Select rules from the security groups the port is member of."""
-        port_sg_ids = port.get('security_groups', [])
-        port_rules = []
-
-        for sg_id in port_sg_ids:
-            for rule in self.sg_rules.get(sg_id, []):
-                if rule['direction'] == direction:
-                    if self.enable_ipset:
-                        port_rules.append(rule)
-                    else:
-                        port_rules.extend(
-                            self._expand_sg_rule_with_remote_ips(
-                                rule, port, direction))
-        return port_rules
-
-    def _expand_sg_rule_with_remote_ips(self, rule, port, direction):
-        """Expand a remote group rule to rule per remote group IP."""
-        remote_group_id = rule.get('remote_group_id')
-        if remote_group_id:
-            ethertype = rule['ethertype']
-            port_ips = port.get('fixed_ips', [])
-
-            for ip in self.sg_members[remote_group_id][ethertype]:
-                if ip not in port_ips:
-                    ip_rule = rule.copy()
-                    direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
-                    ip_prefix = str(netaddr.IPNetwork(ip).cidr)
-                    ip_rule[direction_ip_prefix] = ip_prefix
-                    yield ip_rule
-        else:
-            yield rule
-
-    def _get_remote_sg_ids(self, port, direction=None):
-        sg_ids = port.get('security_groups', [])
-        remote_sg_ids = {constants.IPv4: set(), constants.IPv6: set()}
-        for sg_id in sg_ids:
-            for rule in self.sg_rules.get(sg_id, []):
-                if not direction or rule['direction'] == direction:
-                    remote_sg_id = rule.get('remote_group_id')
-                    ether_type = rule.get('ethertype')
-                    if remote_sg_id and ether_type:
-                        remote_sg_ids[ether_type].add(remote_sg_id)
-        return remote_sg_ids
-
-    def _add_rules_by_security_group(self, port, direction):
-        # select rules for current port and direction
-        security_group_rules = self._select_sgr_by_direction(port, direction)
-        security_group_rules += self._select_sg_rules_for_port(port, direction)
-        # make sure ipset members are updated for remote security groups
-        if self.enable_ipset:
-            remote_sg_ids = self._get_remote_sg_ids(port, direction)
-            self._update_ipset_members(remote_sg_ids)
-        # split groups by ip version
-        # for ipv4, iptables command is used
-        # for ipv6, iptables6 command is used
-        ipv4_sg_rules, ipv6_sg_rules = self._split_sgr_by_ethertype(
-            security_group_rules)
-        ipv4_iptables_rules = []
-        ipv6_iptables_rules = []
-        # include fixed egress/ingress rules
-        if direction == firewall.EGRESS_DIRECTION:
-            self._add_fixed_egress_rules(port,
-                                         ipv4_iptables_rules,
-                                         ipv6_iptables_rules)
-        elif direction == firewall.INGRESS_DIRECTION:
-            ipv6_iptables_rules += self._accept_inbound_icmpv6()
-        # include IPv4 and IPv6 iptable rules from security group
-        ipv4_iptables_rules += self._convert_sgr_to_iptables_rules(
-            ipv4_sg_rules)
-        ipv6_iptables_rules += self._convert_sgr_to_iptables_rules(
-            ipv6_sg_rules)
-        # finally add the rules to the port chain for a given direction
-        self._add_rules_to_chain_v4v6(self._port_chain_name(port, direction),
-                                      ipv4_iptables_rules,
-                                      ipv6_iptables_rules)
-
-    def _add_fixed_egress_rules(self, port, ipv4_iptables_rules,
-                                ipv6_iptables_rules):
-        self._spoofing_rule(port,
-                            ipv4_iptables_rules,
-                            ipv6_iptables_rules)
-        self._drop_dhcp_rule(ipv4_iptables_rules, ipv6_iptables_rules)
-
-    def _update_ipset_members(self, security_group_ids):
-        for ip_version, sg_ids in security_group_ids.items():
-            for sg_id in sg_ids:
-                current_ips = self.sg_members[sg_id][ip_version]
-                self.ipset.set_members(sg_id, ip_version, current_ips)
-
-    def _generate_ipset_rule_args(self, sg_rule, remote_gid):
-        ethertype = sg_rule.get('ethertype')
-        ipset_name = self.ipset.get_name(remote_gid, ethertype)
-        if not self.ipset.set_name_exists(ipset_name):
-            #NOTE(mangelajo): ipsets for empty groups are not created
-            #                 thus we can't reference them.
-            return None
-        ipset_direction = IPSET_DIRECTION[sg_rule.get('direction')]
-        args = self._generate_protocol_and_port_args(sg_rule)
-        args += ['-m set', '--match-set', ipset_name, ipset_direction]
-        args += ['-j RETURN']
-        return args
-
-    def _generate_protocol_and_port_args(self, sg_rule):
-        args = self._protocol_arg(sg_rule.get('protocol'))
-        args += self._port_arg('sport',
-                               sg_rule.get('protocol'),
-                               sg_rule.get('source_port_range_min'),
-                               sg_rule.get('source_port_range_max'))
-        args += self._port_arg('dport',
-                               sg_rule.get('protocol'),
-                               sg_rule.get('port_range_min'),
-                               sg_rule.get('port_range_max'))
-        return args
-
-    def _generate_plain_rule_args(self, sg_rule):
-        # These arguments MUST be in the format iptables-save will
-        # display them: source/dest, protocol, sport, dport, target
-        # Otherwise the iptables_manager code won't be able to find
-        # them to preserve their [packet:byte] counts.
-        args = self._ip_prefix_arg('s', sg_rule.get('source_ip_prefix'))
-        args += self._ip_prefix_arg('d', sg_rule.get('dest_ip_prefix'))
-        args += self._generate_protocol_and_port_args(sg_rule)
-        args += ['-j RETURN']
-        return args
-
-    def _convert_sg_rule_to_iptables_args(self, sg_rule):
-        remote_gid = sg_rule.get('remote_group_id')
-        if self.enable_ipset and remote_gid:
-            return self._generate_ipset_rule_args(sg_rule, remote_gid)
-        else:
-            return self._generate_plain_rule_args(sg_rule)
-
-    def _convert_sgr_to_iptables_rules(self, security_group_rules):
-        iptables_rules = []
-        self._allow_established(iptables_rules)
-        for rule in security_group_rules:
-            args = self._convert_sg_rule_to_iptables_args(rule)
-            if args:
-                iptables_rules += [' '.join(args)]
-
-        self._drop_invalid_packets(iptables_rules)
-        iptables_rules += [comment_rule('-j $sg-fallback',
-                                        comment=ic.UNMATCHED)]
-        return iptables_rules
-
-    def _drop_invalid_packets(self, iptables_rules):
-        # Always drop invalid packets
-        iptables_rules += [comment_rule('-m state --state ' 'INVALID -j DROP',
-                                        comment=ic.INVALID_DROP)]
-        return iptables_rules
-
-    def _allow_established(self, iptables_rules):
-        # Allow established connections
-        iptables_rules += [comment_rule(
-            '-m state --state RELATED,ESTABLISHED -j RETURN',
-            comment=ic.ALLOW_ASSOC)]
-        return iptables_rules
-
-    def _protocol_arg(self, protocol):
-        if not protocol:
-            return []
-        if protocol == 'icmpv6':
-            protocol = 'ipv6-icmp'
-        iptables_rule = ['-p', protocol]
-        return iptables_rule
-
-    def _port_arg(self, direction, protocol, port_range_min, port_range_max):
-        if (protocol not in ['udp', 'tcp', 'icmp', 'ipv6-icmp']
-            or port_range_min is None):
-            return []
-
-        protocol_modules = {'udp': 'udp', 'tcp': 'tcp',
-                            'icmp': 'icmp', 'ipv6-icmp': 'icmp6'}
-        # iptables adds '-m protocol' when the port number is specified
-        args = ['-m', protocol_modules[protocol]]
-
-        if protocol in ['icmp', 'ipv6-icmp']:
-            protocol_type = 'icmpv6' if protocol == 'ipv6-icmp' else 'icmp'
-            # Note(xuhanp): port_range_min/port_range_max represent
-            # icmp type/code when protocol is icmp or icmpv6
-            args += ['--%s-type' % protocol_type, '%s' % port_range_min]
-            # icmp code can be 0 so we cannot use "if port_range_max" here
-            if port_range_max is not None:
-                args[-1] += '/%s' % port_range_max
-        elif port_range_min == port_range_max:
-            args += ['--%s' % direction, '%s' % (port_range_min,)]
-        else:
-            args += ['-m', 'multiport', '--%ss' % direction,
-                     '%s:%s' % (port_range_min, port_range_max)]
-        return args
-
-    def _ip_prefix_arg(self, direction, ip_prefix):
-        #NOTE (nati) : source_group_id is converted to list of source_
-        # ip_prefix in server side
-        if ip_prefix:
-            if '/' not in ip_prefix:
-                # we need to convert it into a prefix to match iptables
-                ip_prefix = c_utils.ip_to_cidr(ip_prefix)
-            elif ip_prefix.endswith('/0'):
-                # an allow for every address is not a constraint so
-                # iptables drops it
-                return []
-            return ['-%s' % direction, ip_prefix]
-        return []
-
-    def _port_chain_name(self, port, direction):
-        return iptables_manager.get_chain_name(
-            '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:]))
-
-    def filter_defer_apply_on(self):
-        if not self._defer_apply:
-            self.iptables.defer_apply_on()
-            self._pre_defer_filtered_ports = dict(self.filtered_ports)
-            self._pre_defer_unfiltered_ports = dict(self.unfiltered_ports)
-            self.pre_sg_members = dict(self.sg_members)
-            self.pre_sg_rules = dict(self.sg_rules)
-            self._defer_apply = True
-
-    def _remove_unused_security_group_info(self):
-        """Remove any unnecessary local security group info or unused ipsets.
-
-        This function has to be called after applying the last iptables
-        rules, so we're in a point where no iptable rule depends
-        on an ipset we're going to delete.
-        """
-        filtered_ports = self.filtered_ports.values()
-
-        remote_sgs_to_remove = self._determine_remote_sgs_to_remove(
-            filtered_ports)
-
-        for ip_version, remote_sg_ids in six.iteritems(remote_sgs_to_remove):
-            if self.enable_ipset:
-                self._remove_ipsets_for_remote_sgs(ip_version, remote_sg_ids)
-
-        self._remove_sg_members(remote_sgs_to_remove)
-
-        # Remove unused security group rules
-        for remove_group_id in self._determine_sg_rules_to_remove(
-                filtered_ports):
-            self.sg_rules.pop(remove_group_id, None)
-
-    def _determine_remote_sgs_to_remove(self, filtered_ports):
-        """Calculate which remote security groups we don't need anymore.
-
-        We do the calculation for each ip_version.
-        """
-        sgs_to_remove_per_ipversion = {constants.IPv4: set(),
-                                       constants.IPv6: set()}
-        remote_group_id_sets = self._get_remote_sg_ids_sets_by_ipversion(
-            filtered_ports)
-        for ip_version, remote_group_id_set in (
-                six.iteritems(remote_group_id_sets)):
-            sgs_to_remove_per_ipversion[ip_version].update(
-                set(self.pre_sg_members) - remote_group_id_set)
-        return sgs_to_remove_per_ipversion
-
-    def _get_remote_sg_ids_sets_by_ipversion(self, filtered_ports):
-        """Given a port, calculates the remote sg references by ip_version."""
-        remote_group_id_sets = {constants.IPv4: set(),
-                                constants.IPv6: set()}
-        for port in filtered_ports:
-            remote_sg_ids = self._get_remote_sg_ids(port)
-            for ip_version in (constants.IPv4, constants.IPv6):
-                remote_group_id_sets[ip_version] |= remote_sg_ids[ip_version]
-        return remote_group_id_sets
-
-    def _determine_sg_rules_to_remove(self, filtered_ports):
-        """Calculate which security groups need to be removed.
-
-        We find out by subtracting our previous sg group ids,
-        with the security groups associated to a set of ports.
-        """
-        port_group_ids = self._get_sg_ids_set_for_ports(filtered_ports)
-        return set(self.pre_sg_rules) - port_group_ids
-
-    def _get_sg_ids_set_for_ports(self, filtered_ports):
-        """Get the port security group ids as a set."""
-        port_group_ids = set()
-        for port in filtered_ports:
-            port_group_ids.update(port.get('security_groups', []))
-        return port_group_ids
-
-    def _remove_ipsets_for_remote_sgs(self, ip_version, remote_sg_ids):
-        """Remove system ipsets matching the provided parameters."""
-        for remote_sg_id in remote_sg_ids:
-            self.ipset.destroy(remote_sg_id, ip_version)
-
-    def _remove_sg_members(self, remote_sgs_to_remove):
-        """Remove sg_member entries."""
-        ipv4_sec_group_set = remote_sgs_to_remove.get(constants.IPv4)
-        ipv6_sec_group_set = remote_sgs_to_remove.get(constants.IPv6)
-        for sg_id in (ipv4_sec_group_set & ipv6_sec_group_set):
-            if sg_id in self.sg_members:
-                del self.sg_members[sg_id]
-
-    def _find_deleted_sg_rules(self, sg_id):
-        del_rules = list()
-        for pre_rule in self.pre_sg_rules.get(sg_id, []):
-            if pre_rule not in self.sg_rules.get(sg_id, []):
-                del_rules.append(pre_rule)
-        return del_rules
-
-    def _find_devices_on_security_group(self, sg_id):
-        device_list = list()
-        for device in self.filtered_ports.values():
-            if sg_id in device.get('security_groups', []):
-                device_list.append(device)
-        return device_list
-
-    def _clean_deleted_sg_rule_conntrack_entries(self):
-        deleted_sg_ids = set()
-        for sg_id in self.updated_rule_sg_ids:
-            del_rules = self._find_deleted_sg_rules(sg_id)
-            if not del_rules:
-                continue
-            device_list = self._find_devices_on_security_group(sg_id)
-            for rule in del_rules:
-                self.ipconntrack.delete_conntrack_state_by_rule(
-                    device_list, rule)
-            deleted_sg_ids.add(sg_id)
-        for id in deleted_sg_ids:
-            self.updated_rule_sg_ids.remove(id)
-
-    def _clean_updated_sg_member_conntrack_entries(self):
-        updated_device_ids = set()
-        for device in self.updated_sg_members:
-            sec_group_change = False
-            device_info = self.filtered_ports.get(device)
-            pre_device_info = self._pre_defer_filtered_ports.get(device)
-            if not device_info or not pre_device_info:
-                continue
-            for sg_id in pre_device_info.get('security_groups', []):
-                if sg_id not in device_info.get('security_groups', []):
-                    sec_group_change = True
-                    break
-            if not sec_group_change:
-                continue
-            for ethertype in [constants.IPv4, constants.IPv6]:
-                self.ipconntrack.delete_conntrack_state_by_remote_ips(
-                    [device_info], ethertype, set())
-            updated_device_ids.add(device)
-        for id in updated_device_ids:
-            self.updated_sg_members.remove(id)
-
-    def _clean_deleted_remote_sg_members_conntrack_entries(self):
-        deleted_sg_ids = set()
-        for sg_id, devices in self.devices_with_updated_sg_members.items():
-            for ethertype in [constants.IPv4, constants.IPv6]:
-                pre_ips = self._get_sg_members(
-                    self.pre_sg_members, sg_id, ethertype)
-                cur_ips = self._get_sg_members(
-                    self.sg_members, sg_id, ethertype)
-                ips = (pre_ips - cur_ips)
-                if devices and ips:
-                    self.ipconntrack.delete_conntrack_state_by_remote_ips(
-                        devices, ethertype, ips)
-            deleted_sg_ids.add(sg_id)
-        for id in deleted_sg_ids:
-            self.devices_with_updated_sg_members.pop(id, None)
-
-    def _remove_conntrack_entries_from_sg_updates(self):
-        self._clean_deleted_sg_rule_conntrack_entries()
-        self._clean_updated_sg_member_conntrack_entries()
-        self._clean_deleted_remote_sg_members_conntrack_entries()
-
-    def _get_sg_members(self, sg_info, sg_id, ethertype):
-        return set(sg_info.get(sg_id, {}).get(ethertype, []))
-
-    def filter_defer_apply_off(self):
-        if self._defer_apply:
-            self._defer_apply = False
-            self._remove_chains_apply(self._pre_defer_filtered_ports,
-                                      self._pre_defer_unfiltered_ports)
-            self._setup_chains_apply(self.filtered_ports,
-                                     self.unfiltered_ports)
-            self.iptables.defer_apply_off()
-            self._remove_conntrack_entries_from_sg_updates()
-            self._remove_unused_security_group_info()
-            self._pre_defer_filtered_ports = None
-            self._pre_defer_unfiltered_ports = None
-
-    def _populate_initial_zone_map(self):
-        """Setup the map between devices and zones based on current rules."""
-        self._device_zone_map = {}
-        rules = self.iptables.get_rules_for_table('raw')
-        for rule in rules:
-            match = re.match(r'.* --physdev-in (?P<dev>[a-zA-Z0-9\-]+)'
-                             r'.* -j CT --zone (?P<zone>\d+).*', rule)
-            if match:
-                # strip off any prefix that the interface is using
-                short_port_id = match.group('dev')[LINUX_DEV_PREFIX_LEN:]
-                self._device_zone_map[short_port_id] = int(match.group('zone'))
-        LOG.debug("Populated conntrack zone map: %s", self._device_zone_map)
-
-    def get_device_zone(self, port_id):
-        # we have to key the device_zone_map based on the fragment of the port
-        # UUID that shows up in the interface name. This is because the initial
-        # map is populated strictly based on interface names that we don't know
-        # the full UUID of.
-        short_port_id = port_id[:(LINUX_DEV_LEN - LINUX_DEV_PREFIX_LEN)]
-        try:
-            return self._device_zone_map[short_port_id]
-        except KeyError:
-            return self._generate_device_zone(short_port_id)
-
-    def _free_zones_from_removed_ports(self):
-        """Clears any entries from the zone map of removed ports."""
-        existing_ports = [
-            port['device'][:(LINUX_DEV_LEN - LINUX_DEV_PREFIX_LEN)]
-            for port in (list(self.filtered_ports.values()) +
-                         list(self.unfiltered_ports.values()))
-        ]
-        removed = set(self._device_zone_map) - set(existing_ports)
-        for dev in removed:
-            self._device_zone_map.pop(dev, None)
-
-    def _generate_device_zone(self, short_port_id):
-        """Generates a unique conntrack zone for the passed in ID."""
-        try:
-            zone = self._find_open_zone()
-        except n_exc.CTZoneExhaustedError:
-            # Free some zones and try again, repeat failure will not be caught
-            self._free_zones_from_removed_ports()
-            zone = self._find_open_zone()
-
-        self._device_zone_map[short_port_id] = zone
-        LOG.debug("Assigned CT zone %(z)s to port %(dev)s.",
-                  {'z': zone, 'dev': short_port_id})
-        return self._device_zone_map[short_port_id]
-
-    def _find_open_zone(self):
-        # call set to dedup because old ports may be mapped to the same zone.
-        zones_in_use = sorted(set(self._device_zone_map.values()))
-        if not zones_in_use:
-            return 1
-        # attempt to increment onto the highest used zone first. if we hit the
-        # end, go back and look for any gaps left by removed devices.
-        last = zones_in_use[-1]
-        if last < MAX_CONNTRACK_ZONES:
-            return last + 1
-        for index, used in enumerate(zones_in_use):
-            if used - index != 1:
-                # gap found, let's use it!
-                return index + 1
-        # conntrack zones exhausted :( :(
-        raise n_exc.CTZoneExhaustedError()
-
-
-class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver):
-    OVS_HYBRID_TAP_PREFIX = constants.TAP_DEVICE_PREFIX
-
-    def _port_chain_name(self, port, direction):
-        return iptables_manager.get_chain_name(
-            '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device']))
-
-    def _get_device_name(self, port):
-        return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN]
-
-    def _get_br_device_name(self, port):
-        return ('qvb' + port['device'])[:LINUX_DEV_LEN]
-
-    def _get_jump_rule(self, port, direction):
-        if direction == firewall.INGRESS_DIRECTION:
-            device = self._get_br_device_name(port)
-        else:
-            device = self._get_device_name(port)
-        jump_rule = '-m physdev --physdev-in %s -j CT --zone %s' % (
-            device, self.get_device_zone(port['device']))
-        return jump_rule
-
-    def _add_raw_chain_rules(self, port, direction):
-        jump_rule = self._get_jump_rule(port, direction)
-        self.iptables.ipv4['raw'].add_rule('PREROUTING', jump_rule)
-        self.iptables.ipv6['raw'].add_rule('PREROUTING', jump_rule)
-
-    def _remove_raw_chain_rules(self, port, direction):
-        jump_rule = self._get_jump_rule(port, direction)
-        self.iptables.ipv4['raw'].remove_rule('PREROUTING', jump_rule)
-        self.iptables.ipv6['raw'].remove_rule('PREROUTING', jump_rule)
-
-    def _add_chain(self, port, direction):
-        super(OVSHybridIptablesFirewallDriver, self)._add_chain(port,
-                                                                direction)
-        if direction in [firewall.INGRESS_DIRECTION,
-                         firewall.EGRESS_DIRECTION]:
-            self._add_raw_chain_rules(port, direction)
-
-    def _remove_chain(self, port, direction):
-        super(OVSHybridIptablesFirewallDriver, self)._remove_chain(port,
-                                                                   direction)
-        if direction in [firewall.INGRESS_DIRECTION,
-                         firewall.EGRESS_DIRECTION]:
-            self._remove_raw_chain_rules(port, direction)
diff --git a/neutron/agent/linux/iptables_manager.py b/neutron/agent/linux/iptables_manager.py
deleted file mode 100644 (file)
index 40340ca..0000000
+++ /dev/null
@@ -1,748 +0,0 @@
-# Copyright 2012 Locaweb.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-#
-# based on
-# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
-
-"""Implements iptables rules using linux utilities."""
-
-import collections
-import contextlib
-import difflib
-import os
-import re
-import sys
-
-from oslo_concurrency import lockutils
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import excutils
-import six
-
-from neutron._i18n import _, _LE, _LW
-from neutron.agent.common import config
-from neutron.agent.linux import iptables_comments as ic
-from neutron.agent.linux import utils as linux_utils
-from neutron.common import exceptions as n_exc
-from neutron.common import utils
-
-LOG = logging.getLogger(__name__)
-
-config.register_iptables_opts(cfg.CONF)
-
-
-# NOTE(vish): Iptables supports chain names of up to 28 characters,  and we
-#             add up to 12 characters to binary_name which is used as a prefix,
-#             so we limit it to 16 characters.
-#             (max_chain_name_length - len('-POSTROUTING') == 16)
-def get_binary_name():
-    """Grab the name of the binary we're running in."""
-    return os.path.basename(sys.argv[0])[:16].replace(' ', '_')
-
-binary_name = get_binary_name()
-
-# A length of a chain name must be less than or equal to 11 characters.
-# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
-MAX_CHAIN_LEN_WRAP = 11
-MAX_CHAIN_LEN_NOWRAP = 28
-
-# Number of iptables rules to print before and after a rule that causes a
-# a failure during iptables-restore
-IPTABLES_ERROR_LINES_OF_CONTEXT = 5
-
-
-def comment_rule(rule, comment):
-    if not cfg.CONF.AGENT.comment_iptables_rules or not comment:
-        return rule
-    # iptables-save outputs the comment before the jump so we need to match
-    # that order so _find_last_entry works
-    comment = '-m comment --comment "%s"' % comment
-    if rule.startswith('-j'):
-        # this is a jump only rule so we just put the comment first
-        return '%s %s' % (comment, rule)
-    try:
-        jpos = rule.index(' -j ')
-        return ' '.join((rule[:jpos], comment, rule[jpos + 1:]))
-    except ValueError:
-        return '%s %s' % (rule, comment)
-
-
-def get_chain_name(chain_name, wrap=True):
-    if wrap:
-        return chain_name[:MAX_CHAIN_LEN_WRAP]
-    else:
-        return chain_name[:MAX_CHAIN_LEN_NOWRAP]
-
-
-class IptablesRule(object):
-    """An iptables rule.
-
-    You shouldn't need to use this class directly, it's only used by
-    IptablesManager.
-
-    """
-
-    def __init__(self, chain, rule, wrap=True, top=False,
-                 binary_name=binary_name, tag=None, comment=None):
-        self.chain = get_chain_name(chain, wrap)
-        self.rule = rule
-        self.wrap = wrap
-        self.top = top
-        self.wrap_name = binary_name[:16]
-        self.tag = tag
-        self.comment = comment
-
-    def __eq__(self, other):
-        return ((self.chain == other.chain) and
-                (self.rule == other.rule) and
-                (self.top == other.top) and
-                (self.wrap == other.wrap))
-
-    def __ne__(self, other):
-        return not self == other
-
-    def __str__(self):
-        if self.wrap:
-            chain = '%s-%s' % (self.wrap_name, self.chain)
-        else:
-            chain = self.chain
-        return comment_rule('-A %s %s' % (chain, self.rule), self.comment)
-
-
-class IptablesTable(object):
-    """An iptables table."""
-
-    def __init__(self, binary_name=binary_name):
-        self.rules = []
-        self.remove_rules = []
-        self.chains = set()
-        self.unwrapped_chains = set()
-        self.remove_chains = set()
-        self.wrap_name = binary_name[:16]
-
-    def add_chain(self, name, wrap=True):
-        """Adds a named chain to the table.
-
-        The chain name is wrapped to be unique for the component creating
-        it, so different components of Nova can safely create identically
-        named chains without interfering with one another.
-
-        At the moment, its wrapped name is <binary name>-<chain name>,
-        so if neutron-openvswitch-agent creates a chain named 'OUTPUT',
-        it'll actually end up being named 'neutron-openvswi-OUTPUT'.
-
-        """
-        name = get_chain_name(name, wrap)
-        if wrap:
-            self.chains.add(name)
-        else:
-            self.unwrapped_chains.add(name)
-
-    def _select_chain_set(self, wrap):
-        if wrap:
-            return self.chains
-        else:
-            return self.unwrapped_chains
-
-    def remove_chain(self, name, wrap=True):
-        """Remove named chain.
-
-        This removal "cascades". All rule in the chain are removed, as are
-        all rules in other chains that jump to it.
-
-        If the chain is not found, this is merely logged.
-
-        """
-        name = get_chain_name(name, wrap)
-        chain_set = self._select_chain_set(wrap)
-
-        if name not in chain_set:
-            LOG.debug('Attempted to remove chain %s which does not exist',
-                      name)
-            return
-
-        chain_set.remove(name)
-
-        if not wrap:
-            # non-wrapped chains and rules need to be dealt with specially,
-            # so we keep a list of them to be iterated over in apply()
-            self.remove_chains.add(name)
-
-            # first, add rules to remove that have a matching chain name
-            self.remove_rules += [str(r) for r in self.rules
-                                  if r.chain == name]
-
-        # next, remove rules from list that have a matching chain name
-        self.rules = [r for r in self.rules if r.chain != name]
-
-        if not wrap:
-            jump_snippet = '-j %s' % name
-            # next, add rules to remove that have a matching jump chain
-            self.remove_rules += [str(r) for r in self.rules
-                                  if jump_snippet in r.rule]
-        else:
-            jump_snippet = '-j %s-%s' % (self.wrap_name, name)
-
-        # finally, remove rules from list that have a matching jump chain
-        self.rules = [r for r in self.rules
-                      if jump_snippet not in r.rule]
-
-    def add_rule(self, chain, rule, wrap=True, top=False, tag=None,
-                 comment=None):
-        """Add a rule to the table.
-
-        This is just like what you'd feed to iptables, just without
-        the '-A <chain name>' bit at the start.
-
-        However, if you need to jump to one of your wrapped chains,
-        prepend its name with a '$' which will ensure the wrapping
-        is applied correctly.
-
-        """
-        chain = get_chain_name(chain, wrap)
-        if wrap and chain not in self.chains:
-            raise LookupError(_('Unknown chain: %r') % chain)
-
-        if '$' in rule:
-            rule = ' '.join(
-                self._wrap_target_chain(e, wrap) for e in rule.split(' '))
-
-        self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
-                                       tag, comment))
-
-    def _wrap_target_chain(self, s, wrap):
-        if s.startswith('$'):
-            s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
-
-        return s
-
-    def remove_rule(self, chain, rule, wrap=True, top=False, comment=None):
-        """Remove a rule from a chain.
-
-        Note: The rule must be exactly identical to the one that was added.
-        You cannot switch arguments around like you can with the iptables
-        CLI tool.
-
-        """
-        chain = get_chain_name(chain, wrap)
-        try:
-            if '$' in rule:
-                rule = ' '.join(
-                    self._wrap_target_chain(e, wrap) for e in rule.split(' '))
-
-            self.rules.remove(IptablesRule(chain, rule, wrap, top,
-                                           self.wrap_name,
-                                           comment=comment))
-            if not wrap:
-                self.remove_rules.append(str(IptablesRule(chain, rule, wrap,
-                                                          top, self.wrap_name,
-                                                          comment=comment)))
-        except ValueError:
-            LOG.warn(_LW('Tried to remove rule that was not there:'
-                         ' %(chain)r %(rule)r %(wrap)r %(top)r'),
-                     {'chain': chain, 'rule': rule,
-                      'top': top, 'wrap': wrap})
-
-    def _get_chain_rules(self, chain, wrap):
-        chain = get_chain_name(chain, wrap)
-        return [rule for rule in self.rules
-                if rule.chain == chain and rule.wrap == wrap]
-
-    def empty_chain(self, chain, wrap=True):
-        """Remove all rules from a chain."""
-        chained_rules = self._get_chain_rules(chain, wrap)
-        for rule in chained_rules:
-            self.rules.remove(rule)
-
-    def clear_rules_by_tag(self, tag):
-        if not tag:
-            return
-        rules = [rule for rule in self.rules if rule.tag == tag]
-        for rule in rules:
-            self.rules.remove(rule)
-
-
-class IptablesManager(object):
-    """Wrapper for iptables.
-
-    See IptablesTable for some usage docs
-
-    A number of chains are set up to begin with.
-
-    First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT.
-    Its name is not wrapped, so it's shared between the various neutron
-    workers. It's intended for rules that need to live at the top of the
-    FORWARD and OUTPUT chains. It's in both the ipv4 and ipv6 set of tables.
-
-    For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
-    are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
-    the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
-    "local" which is jumped to from neutron-filter-top.
-
-    For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
-    wrapped in the same was as the built-in filter chains. Additionally,
-    there's a snat chain that is applied after the POSTROUTING chain.
-
-    """
-
-    def __init__(self, _execute=None, state_less=False, use_ipv6=False,
-                 namespace=None, binary_name=binary_name):
-        if _execute:
-            self.execute = _execute
-        else:
-            self.execute = linux_utils.execute
-
-        self.use_ipv6 = use_ipv6
-        self.namespace = namespace
-        self.iptables_apply_deferred = False
-        self.wrap_name = binary_name[:16]
-
-        self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
-        self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
-
-        # Add a neutron-filter-top chain. It's intended to be shared
-        # among the various neutron components. It sits at the very top
-        # of FORWARD and OUTPUT.
-        for tables in [self.ipv4, self.ipv6]:
-            tables['filter'].add_chain('neutron-filter-top', wrap=False)
-            tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
-                                      wrap=False, top=True)
-            tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
-                                      wrap=False, top=True)
-
-            tables['filter'].add_chain('local')
-            tables['filter'].add_rule('neutron-filter-top', '-j $local',
-                                      wrap=False)
-
-        # Wrap the built-in chains
-        builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
-                          6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
-
-        if not state_less:
-            self.ipv4.update(
-                {'mangle': IptablesTable(binary_name=self.wrap_name)})
-            builtin_chains[4].update(
-                {'mangle': ['PREROUTING', 'INPUT', 'FORWARD', 'OUTPUT',
-                            'POSTROUTING']})
-            self.ipv4.update(
-                {'nat': IptablesTable(binary_name=self.wrap_name)})
-            builtin_chains[4].update({'nat': ['PREROUTING',
-                                      'OUTPUT', 'POSTROUTING']})
-
-        self.ipv4.update({'raw': IptablesTable(binary_name=self.wrap_name)})
-        builtin_chains[4].update({'raw': ['PREROUTING', 'OUTPUT']})
-        self.ipv6.update({'raw': IptablesTable(binary_name=self.wrap_name)})
-        builtin_chains[6].update({'raw': ['PREROUTING', 'OUTPUT']})
-
-        for ip_version in builtin_chains:
-            if ip_version == 4:
-                tables = self.ipv4
-            elif ip_version == 6:
-                tables = self.ipv6
-
-            for table, chains in six.iteritems(builtin_chains[ip_version]):
-                for chain in chains:
-                    tables[table].add_chain(chain)
-                    tables[table].add_rule(chain, '-j $%s' %
-                                           (chain), wrap=False)
-
-        if not state_less:
-            # Add a neutron-postrouting-bottom chain. It's intended to be
-            # shared among the various neutron components. We set it as the
-            # last chain of POSTROUTING chain.
-            self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
-                                       wrap=False)
-            self.ipv4['nat'].add_rule('POSTROUTING',
-                                      '-j neutron-postrouting-bottom',
-                                      wrap=False)
-
-            # We add a snat chain to the shared neutron-postrouting-bottom
-            # chain so that it's applied last.
-            self.ipv4['nat'].add_chain('snat')
-            self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
-                                      '-j $snat', wrap=False,
-                                      comment=ic.SNAT_OUT)
-
-            # And then we add a float-snat chain and jump to first thing in
-            # the snat chain.
-            self.ipv4['nat'].add_chain('float-snat')
-            self.ipv4['nat'].add_rule('snat', '-j $float-snat')
-
-            # Add a mark chain to mangle PREROUTING chain. It is used to
-            # identify ingress packets from a certain interface.
-            self.ipv4['mangle'].add_chain('mark')
-            self.ipv4['mangle'].add_rule('PREROUTING', '-j $mark')
-
-    def get_chain(self, table, chain, ip_version=4, wrap=True):
-        try:
-            requested_table = {4: self.ipv4, 6: self.ipv6}[ip_version][table]
-        except KeyError:
-            return []
-        return requested_table._get_chain_rules(chain, wrap)
-
-    def is_chain_empty(self, table, chain, ip_version=4, wrap=True):
-        return not self.get_chain(table, chain, ip_version, wrap)
-
-    @contextlib.contextmanager
-    def defer_apply(self):
-        """Defer apply context."""
-        self.defer_apply_on()
-        try:
-            yield
-        finally:
-            try:
-                self.defer_apply_off()
-            except Exception:
-                msg = _('Failure applying iptables rules')
-                LOG.exception(msg)
-                raise n_exc.IpTablesApplyException(msg)
-
-    def defer_apply_on(self):
-        self.iptables_apply_deferred = True
-
-    def defer_apply_off(self):
-        self.iptables_apply_deferred = False
-        self._apply()
-
-    def apply(self):
-        if self.iptables_apply_deferred:
-            return
-
-        return self._apply()
-
-    def _apply(self):
-        lock_name = 'iptables'
-        if self.namespace:
-            lock_name += '-' + self.namespace
-
-        with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
-            return self._apply_synchronized()
-
-    def get_rules_for_table(self, table):
-        """Runs iptables-save on a table and returns the results."""
-        args = ['iptables-save', '-t', table]
-        if self.namespace:
-            args = ['ip', 'netns', 'exec', self.namespace] + args
-        return self.execute(args, run_as_root=True).split('\n')
-
-    def _apply_synchronized(self):
-        """Apply the current in-memory set of iptables rules.
-
-        This will create a diff between the rules from the previous runs
-        and replace them with the current set of rules.
-        This happens atomically, thanks to iptables-restore.
-
-        Returns a list of the changes that were sent to iptables-save.
-        """
-        s = [('iptables', self.ipv4)]
-        if self.use_ipv6:
-            s += [('ip6tables', self.ipv6)]
-        all_commands = []  # variable to keep track all commands for return val
-        for cmd, tables in s:
-            args = ['%s-save' % (cmd,)]
-            if self.namespace:
-                args = ['ip', 'netns', 'exec', self.namespace] + args
-            save_output = self.execute(args, run_as_root=True)
-            all_lines = save_output.split('\n')
-            commands = []
-            # Traverse tables in sorted order for predictable dump output
-            for table_name in sorted(tables):
-                table = tables[table_name]
-                # isolate the lines of the table we are modifying
-                start, end = self._find_table(all_lines, table_name)
-                old_rules = all_lines[start:end]
-                # generate the new table state we want
-                new_rules = self._modify_rules(old_rules, table, table_name)
-                # generate the iptables commands to get between the old state
-                # and the new state
-                changes = _generate_path_between_rules(old_rules, new_rules)
-                if changes:
-                    # if there are changes to the table, we put on the header
-                    # and footer that iptables-save needs
-                    commands += (['# Generated by iptables_manager'] +
-                                 ['*%s' % table_name] + changes +
-                                 ['COMMIT', '# Completed by iptables_manager'])
-            if not commands:
-                continue
-            all_commands += commands
-            args = ['%s-restore' % (cmd,), '-n']
-            if self.namespace:
-                args = ['ip', 'netns', 'exec', self.namespace] + args
-            try:
-                # always end with a new line
-                commands.append('')
-                self.execute(args, process_input='\n'.join(commands),
-                             run_as_root=True)
-            except RuntimeError as r_error:
-                with excutils.save_and_reraise_exception():
-                    try:
-                        line_no = int(re.search(
-                            'iptables-restore: line ([0-9]+?) failed',
-                            str(r_error)).group(1))
-                        context = IPTABLES_ERROR_LINES_OF_CONTEXT
-                        log_start = max(0, line_no - context)
-                        log_end = line_no + context
-                    except AttributeError:
-                        # line error wasn't found, print all lines instead
-                        log_start = 0
-                        log_end = len(commands)
-                    log_lines = ('%7d. %s' % (idx, l)
-                                 for idx, l in enumerate(
-                                     commands[log_start:log_end],
-                                     log_start + 1)
-                                 )
-                    LOG.error(_LE("IPTablesManager.apply failed to apply the "
-                                  "following set of iptables rules:\n%s"),
-                              '\n'.join(log_lines))
-        LOG.debug("IPTablesManager.apply completed with success. %d iptables "
-                  "commands were issued", len(all_commands))
-        return all_commands
-
-    def _find_table(self, lines, table_name):
-        if len(lines) < 3:
-            # length only <2 when fake iptables
-            return (0, 0)
-        try:
-            start = lines.index('*%s' % table_name)
-        except ValueError:
-            # Couldn't find table_name
-            LOG.debug('Unable to find table %s', table_name)
-            return (0, 0)
-        end = lines[start:].index('COMMIT') + start + 1
-        return (start, end)
-
-    def _find_rules_index(self, lines):
-        seen_chains = False
-        rules_index = 0
-        for rules_index, rule in enumerate(lines):
-            if not seen_chains:
-                if rule.startswith(':'):
-                    seen_chains = True
-            else:
-                if not rule.startswith(':'):
-                    break
-
-        if not seen_chains:
-            rules_index = 2
-
-        return rules_index
-
-    def _modify_rules(self, current_lines, table, table_name):
-        # Chains are stored as sets to avoid duplicates.
-        # Sort the output chains here to make their order predictable.
-        unwrapped_chains = sorted(table.unwrapped_chains)
-        chains = sorted(table.chains)
-
-        # we don't want to change any rules that don't belong to us so we start
-        # the new_filter with these rules
-        new_filter = [line.strip() for line in current_lines
-                      if self.wrap_name not in line]
-
-        # generate our list of chain names
-        our_chains = [':%s-%s' % (self.wrap_name, name) for name in chains]
-
-        # the unwrapped chains (e.g. neutron-filter-top) may already exist in
-        # the new_filter since they aren't marked by the wrap_name so we only
-        # want to add them if they arent' already there
-        our_chains += [':%s' % name for name in unwrapped_chains
-                       if not any(':%s' % name in s for s in new_filter)]
-
-        our_top_rules = []
-        our_bottom_rules = []
-        for rule in table.rules:
-            rule_str = str(rule)
-            # similar to the unwrapped chains, there are some rules that belong
-            # to us but they don't have the wrap name. we want to remove them
-            # from the new_filter and then add them in the right location in
-            # case our new rules changed the order.
-            # (e.g. '-A FORWARD -j neutron-filter-top')
-            new_filter = [s for s in new_filter if rule_str not in s]
-
-            if rule.top:
-                # rule.top == True means we want this rule to be at the top.
-                our_top_rules += [rule_str]
-            else:
-                our_bottom_rules += [rule_str]
-
-        our_chains_and_rules = our_chains + our_top_rules + our_bottom_rules
-
-        # locate the position immediately after the existing chains to insert
-        # our chains and rules
-        rules_index = self._find_rules_index(new_filter)
-        new_filter[rules_index:rules_index] = our_chains_and_rules
-
-        def _weed_out_removes(line):
-            # remove any rules or chains from the filter that were slated
-            # for removal
-            if line.startswith(':'):
-                chain = line[1:]
-                if chain in table.remove_chains:
-                    table.remove_chains.remove(chain)
-                    return False
-            else:
-                if line in table.remove_rules:
-                    table.remove_rules.remove(line)
-                    return False
-            # Leave it alone
-            return True
-
-        seen_lines = set()
-
-        # TODO(kevinbenton): remove this function and the next one. They are
-        # just oversized brooms to sweep bugs under the rug!!! We generate the
-        # rules and we shouldn't be generating duplicates.
-        def _weed_out_duplicates(line):
-            if line in seen_lines:
-                thing = 'chain' if line.startswith(':') else 'rule'
-                LOG.warning(_LW("Duplicate iptables %(thing)s detected. This "
-                                "may indicate a bug in the the iptables "
-                                "%(thing)s generation code. Line: %(line)s"),
-                            {'thing': thing, 'line': line})
-                return False
-            seen_lines.add(line)
-            # Leave it alone
-            return True
-
-        new_filter.reverse()
-        new_filter = [line for line in new_filter
-                      if _weed_out_duplicates(line) and
-                      _weed_out_removes(line)]
-        new_filter.reverse()
-
-        # flush lists, just in case a rule or chain marked for removal
-        # was already gone. (chains is a set, rules is a list)
-        table.remove_chains.clear()
-        table.remove_rules = []
-
-        return new_filter
-
-    def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
-        name = get_chain_name(chain, wrap)
-
-        cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
-                      if name in table._select_chain_set(wrap)]
-
-        if self.use_ipv6:
-            cmd_tables += [('ip6tables', key)
-                           for key, table in self.ipv6.items()
-                           if name in table._select_chain_set(wrap)]
-
-        return cmd_tables
-
-    def get_traffic_counters(self, chain, wrap=True, zero=False):
-        """Return the sum of the traffic counters of all rules of a chain."""
-        cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
-        if not cmd_tables:
-            LOG.warn(_LW('Attempted to get traffic counters of chain %s which '
-                         'does not exist'), chain)
-            return
-
-        name = get_chain_name(chain, wrap)
-        acc = {'pkts': 0, 'bytes': 0}
-
-        for cmd, table in cmd_tables:
-            args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
-            if zero:
-                args.append('-Z')
-            if self.namespace:
-                args = ['ip', 'netns', 'exec', self.namespace] + args
-            current_table = self.execute(args, run_as_root=True)
-            current_lines = current_table.split('\n')
-
-            for line in current_lines[2:]:
-                if not line:
-                    break
-                data = line.split()
-                if (len(data) < 2 or
-                        not data[0].isdigit() or
-                        not data[1].isdigit()):
-                    break
-
-                acc['pkts'] += int(data[0])
-                acc['bytes'] += int(data[1])
-
-        return acc
-
-
-def _generate_path_between_rules(old_rules, new_rules):
-    """Generates iptables commands to get from old_rules to new_rules.
-
-    This function diffs the two rule sets and then calculates the iptables
-    commands necessary to get from the old rules to the new rules using
-    insert and delete commands.
-    """
-    old_by_chain = _get_rules_by_chain(old_rules)
-    new_by_chain = _get_rules_by_chain(new_rules)
-    old_chains, new_chains = set(old_by_chain.keys()), set(new_by_chain.keys())
-    # all referenced chains should be declared at the top before rules.
-
-    # NOTE(kevinbenton): sorting and grouping chains is for determinism in
-    # tests. iptables doesn't care about the order here
-    statements = [':%s - [0:0]' % c for c in sorted(new_chains - old_chains)]
-    sg_chains = []
-    other_chains = []
-    for chain in sorted(old_chains | new_chains):
-        if '-sg-' in chain:
-            sg_chains.append(chain)
-        else:
-            other_chains.append(chain)
-
-    for chain in other_chains + sg_chains:
-        statements += _generate_chain_diff_iptables_commands(
-            chain, old_by_chain[chain], new_by_chain[chain])
-    # unreferenced chains get the axe
-    for chain in sorted(old_chains - new_chains):
-        statements += ['-X %s' % chain]
-    return statements
-
-
-def _get_rules_by_chain(rules):
-    by_chain = collections.defaultdict(list)
-    for line in rules:
-        if line.startswith(':'):
-            chain = line[1:].split(' ', 1)[0]
-            # even though this is a default dict, we need to manually add
-            # chains to ensure that ones without rules are included because
-            # they might be a jump reference
-            if chain not in by_chain:
-                by_chain[chain] = []
-        elif line.startswith('-A'):
-            chain = line[3:].split(' ', 1)[0]
-            by_chain[chain].append(line)
-    return by_chain
-
-
-def _generate_chain_diff_iptables_commands(chain, old_chain_rules,
-                                          new_chain_rules):
-    # keep track of the old index because we have to insert rules
-    # in the right position
-    old_index = 1
-    statements = []
-    for line in difflib.ndiff(old_chain_rules, new_chain_rules):
-        if line.startswith('?'):
-            # skip ? because that's a guide string for intraline differences
-            continue
-        elif line.startswith('-'):  # line deleted
-            statements.append('-D %s %d' % (chain, old_index))
-            # since we are removing a line from the old rules, we
-            # backup the index by 1
-            old_index -= 1
-        elif line.startswith('+'):  # line added
-            # strip the chain name since we have to add it before the index
-            rule = line[5:].split(' ', 1)[-1]
-            # rule inserted at this position
-            statements.append('-I %s %d %s' % (chain, old_index, rule))
-        old_index += 1
-    return statements
diff --git a/neutron/agent/linux/keepalived.py b/neutron/agent/linux/keepalived.py
deleted file mode 100644 (file)
index 5950b1b..0000000
+++ /dev/null
@@ -1,437 +0,0 @@
-# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import errno
-import itertools
-import os
-
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from neutron._i18n import _
-from neutron.agent.linux import external_process
-from neutron.common import exceptions
-from neutron.common import utils as common_utils
-
-VALID_STATES = ['MASTER', 'BACKUP']
-VALID_AUTH_TYPES = ['AH', 'PASS']
-HA_DEFAULT_PRIORITY = 50
-PRIMARY_VIP_RANGE_SIZE = 24
-# TODO(amuller): Use L3 agent constant when new constants module is introduced.
-FIP_LL_SUBNET = '169.254.30.0/23'
-KEEPALIVED_SERVICE_NAME = 'keepalived'
-GARP_MASTER_REPEAT = 5
-GARP_MASTER_REFRESH = 10
-
-LOG = logging.getLogger(__name__)
-
-
-def get_free_range(parent_range, excluded_ranges, size=PRIMARY_VIP_RANGE_SIZE):
-    """Get a free IP range, from parent_range, of the specified size.
-
-    :param parent_range: String representing an IP range. E.g: '169.254.0.0/16'
-    :param excluded_ranges: A list of strings to be excluded from parent_range
-    :param size: What should be the size of the range returned?
-    :return: A string representing an IP range
-    """
-    free_cidrs = netaddr.IPSet([parent_range]) - netaddr.IPSet(excluded_ranges)
-    for cidr in free_cidrs.iter_cidrs():
-        if cidr.prefixlen <= size:
-            return '%s/%s' % (cidr.network, size)
-
-    raise ValueError(_('Network of size %(size)s, from IP range '
-                       '%(parent_range)s excluding IP ranges '
-                       '%(excluded_ranges)s was not found.') %
-                     {'size': size,
-                      'parent_range': parent_range,
-                      'excluded_ranges': excluded_ranges})
-
-
-class InvalidInstanceStateException(exceptions.NeutronException):
-    message = _('Invalid instance state: %(state)s, valid states are: '
-                '%(valid_states)s')
-
-    def __init__(self, **kwargs):
-        if 'valid_states' not in kwargs:
-            kwargs['valid_states'] = ', '.join(VALID_STATES)
-        super(InvalidInstanceStateException, self).__init__(**kwargs)
-
-
-class InvalidAuthenticationTypeException(exceptions.NeutronException):
-    message = _('Invalid authentication type: %(auth_type)s, '
-                'valid types are: %(valid_auth_types)s')
-
-    def __init__(self, **kwargs):
-        if 'valid_auth_types' not in kwargs:
-            kwargs['valid_auth_types'] = ', '.join(VALID_AUTH_TYPES)
-        super(InvalidAuthenticationTypeException, self).__init__(**kwargs)
-
-
-class KeepalivedVipAddress(object):
-    """A virtual address entry of a keepalived configuration."""
-
-    def __init__(self, ip_address, interface_name, scope=None):
-        self.ip_address = ip_address
-        self.interface_name = interface_name
-        self.scope = scope
-
-    def __eq__(self, other):
-        return (isinstance(other, KeepalivedVipAddress) and
-                self.ip_address == other.ip_address)
-
-    def __str__(self):
-        return '[%s, %s, %s]' % (self.ip_address,
-                                 self.interface_name,
-                                 self.scope)
-
-    def build_config(self):
-        result = '%s dev %s' % (self.ip_address, self.interface_name)
-        if self.scope:
-            result += ' scope %s' % self.scope
-        return result
-
-
-class KeepalivedVirtualRoute(object):
-    """A virtual route entry of a keepalived configuration."""
-
-    def __init__(self, destination, nexthop, interface_name=None,
-                 scope=None):
-        self.destination = destination
-        self.nexthop = nexthop
-        self.interface_name = interface_name
-        self.scope = scope
-
-    def build_config(self):
-        output = self.destination
-        if self.nexthop:
-            output += ' via %s' % self.nexthop
-        if self.interface_name:
-            output += ' dev %s' % self.interface_name
-        if self.scope:
-            output += ' scope %s' % self.scope
-        return output
-
-
-class KeepalivedInstanceRoutes(object):
-    def __init__(self):
-        self.gateway_routes = []
-        self.extra_routes = []
-        self.extra_subnets = []
-
-    def remove_routes_on_interface(self, interface_name):
-        self.gateway_routes = [gw_rt for gw_rt in self.gateway_routes
-                               if gw_rt.interface_name != interface_name]
-        # NOTE(amuller): extra_routes are initialized from the router's
-        # 'routes' attribute. These routes do not have an interface
-        # parameter and so cannot be removed via an interface_name lookup.
-        self.extra_subnets = [route for route in self.extra_subnets if
-                              route.interface_name != interface_name]
-
-    @property
-    def routes(self):
-        return self.gateway_routes + self.extra_routes + self.extra_subnets
-
-    def __len__(self):
-        return len(self.routes)
-
-    def build_config(self):
-        return itertools.chain(['    virtual_routes {'],
-                               ('        %s' % route.build_config()
-                                for route in self.routes),
-                               ['    }'])
-
-
-class KeepalivedInstance(object):
-    """Instance section of a keepalived configuration."""
-
-    def __init__(self, state, interface, vrouter_id, ha_cidrs,
-                 priority=HA_DEFAULT_PRIORITY, advert_int=None,
-                 mcast_src_ip=None, nopreempt=False,
-                 garp_master_repeat=GARP_MASTER_REPEAT,
-                 garp_master_refresh=GARP_MASTER_REFRESH):
-        self.name = 'VR_%s' % vrouter_id
-
-        if state not in VALID_STATES:
-            raise InvalidInstanceStateException(state=state)
-
-        self.state = state
-        self.interface = interface
-        self.vrouter_id = vrouter_id
-        self.priority = priority
-        self.nopreempt = nopreempt
-        self.advert_int = advert_int
-        self.mcast_src_ip = mcast_src_ip
-        self.garp_master_repeat = garp_master_repeat
-        self.garp_master_refresh = garp_master_refresh
-        self.track_interfaces = []
-        self.vips = []
-        self.virtual_routes = KeepalivedInstanceRoutes()
-        self.authentication = None
-        metadata_cidr = '169.254.169.254/32'
-        self.primary_vip_range = get_free_range(
-            parent_range='169.254.0.0/16',
-            excluded_ranges=[metadata_cidr, FIP_LL_SUBNET] + ha_cidrs,
-            size=PRIMARY_VIP_RANGE_SIZE)
-
-    def set_authentication(self, auth_type, password):
-        if auth_type not in VALID_AUTH_TYPES:
-            raise InvalidAuthenticationTypeException(auth_type=auth_type)
-
-        self.authentication = (auth_type, password)
-
-    def add_vip(self, ip_cidr, interface_name, scope):
-        vip = KeepalivedVipAddress(ip_cidr, interface_name, scope)
-        if vip not in self.vips:
-            self.vips.append(vip)
-        else:
-            LOG.debug('VIP %s already present in %s', vip, self.vips)
-
-    def remove_vips_vroutes_by_interface(self, interface_name):
-        self.vips = [vip for vip in self.vips
-                     if vip.interface_name != interface_name]
-
-        self.virtual_routes.remove_routes_on_interface(interface_name)
-
-    def remove_vip_by_ip_address(self, ip_address):
-        self.vips = [vip for vip in self.vips
-                     if vip.ip_address != ip_address]
-
-    def get_existing_vip_ip_addresses(self, interface_name):
-        return [vip.ip_address for vip in self.vips
-                if vip.interface_name == interface_name]
-
-    def _build_track_interface_config(self):
-        return itertools.chain(
-            ['    track_interface {'],
-            ('        %s' % i for i in self.track_interfaces),
-            ['    }'])
-
-    def get_primary_vip(self):
-        """Return an address in the primary_vip_range CIDR, with the router's
-        VRID in the host section.
-
-        For example, if primary_vip_range is 169.254.0.0/24, and this router's
-        VRID is 5, the result is 169.254.0.5. Using the VRID assures that
-        the primary VIP is consistent amongst HA router instances on different
-        nodes.
-        """
-
-        ip = (netaddr.IPNetwork(self.primary_vip_range).network +
-              self.vrouter_id)
-        return str(netaddr.IPNetwork('%s/%s' % (ip, PRIMARY_VIP_RANGE_SIZE)))
-
-    def _build_vips_config(self):
-        # NOTE(amuller): The primary VIP must be consistent in order to avoid
-        # keepalived bugs. Changing the VIP in the 'virtual_ipaddress' and
-        # SIGHUP'ing keepalived can remove virtual routers, including the
-        # router's default gateway.
-        # We solve this by never changing the VIP in the virtual_ipaddress
-        # section, herein known as the primary VIP.
-        # The only interface known to exist for HA routers is the HA interface
-        # (self.interface). We generate an IP on that device and use it as the
-        # primary VIP. The other VIPs (Internal interfaces IPs, the external
-        # interface IP and floating IPs) are placed in the
-        # virtual_ipaddress_excluded section.
-
-        primary = KeepalivedVipAddress(self.get_primary_vip(), self.interface)
-        vips_result = ['    virtual_ipaddress {',
-                       '        %s' % primary.build_config(),
-                       '    }']
-
-        if self.vips:
-            vips_result.extend(
-                itertools.chain(['    virtual_ipaddress_excluded {'],
-                                ('        %s' % vip.build_config()
-                                 for vip in
-                                 sorted(self.vips,
-                                        key=lambda vip: vip.ip_address)),
-                                ['    }']))
-
-        return vips_result
-
-    def _build_virtual_routes_config(self):
-        return itertools.chain(['    virtual_routes {'],
-                               ('        %s' % route.build_config()
-                                for route in self.virtual_routes),
-                               ['    }'])
-
-    def build_config(self):
-        config = ['vrrp_instance %s {' % self.name,
-                  '    state %s' % self.state,
-                  '    interface %s' % self.interface,
-                  '    virtual_router_id %s' % self.vrouter_id,
-                  '    priority %s' % self.priority,
-                  '    garp_master_repeat %s' % self.garp_master_repeat,
-                  '    garp_master_refresh %s' % self.garp_master_refresh]
-
-        if self.nopreempt:
-            config.append('    nopreempt')
-
-        if self.advert_int:
-            config.append('    advert_int %s' % self.advert_int)
-
-        if self.authentication:
-            auth_type, password = self.authentication
-            authentication = ['    authentication {',
-                              '        auth_type %s' % auth_type,
-                              '        auth_pass %s' % password,
-                              '    }']
-            config.extend(authentication)
-
-        if self.mcast_src_ip:
-            config.append('    mcast_src_ip %s' % self.mcast_src_ip)
-
-        if self.track_interfaces:
-            config.extend(self._build_track_interface_config())
-
-        config.extend(self._build_vips_config())
-
-        if len(self.virtual_routes):
-            config.extend(self.virtual_routes.build_config())
-
-        config.append('}')
-
-        return config
-
-
-class KeepalivedConf(object):
-    """A keepalived configuration."""
-
-    def __init__(self):
-        self.reset()
-
-    def reset(self):
-        self.instances = {}
-
-    def add_instance(self, instance):
-        self.instances[instance.vrouter_id] = instance
-
-    def get_instance(self, vrouter_id):
-        return self.instances.get(vrouter_id)
-
-    def build_config(self):
-        config = []
-
-        for instance in self.instances.values():
-            config.extend(instance.build_config())
-
-        return config
-
-    def get_config_str(self):
-        """Generates and returns the keepalived configuration.
-
-        :return: Keepalived configuration string.
-        """
-        return '\n'.join(self.build_config())
-
-
-class KeepalivedManager(object):
-    """Wrapper for keepalived.
-
-    This wrapper permits to write keepalived config files, to start/restart
-    keepalived process.
-
-    """
-
-    def __init__(self, resource_id, config, process_monitor, conf_path='/tmp',
-                 namespace=None):
-        self.resource_id = resource_id
-        self.config = config
-        self.namespace = namespace
-        self.process_monitor = process_monitor
-        self.conf_path = conf_path
-
-    def get_conf_dir(self):
-        confs_dir = os.path.abspath(os.path.normpath(self.conf_path))
-        conf_dir = os.path.join(confs_dir, self.resource_id)
-        return conf_dir
-
-    def get_full_config_file_path(self, filename, ensure_conf_dir=True):
-        conf_dir = self.get_conf_dir()
-        if ensure_conf_dir:
-            common_utils.ensure_dir(conf_dir)
-        return os.path.join(conf_dir, filename)
-
-    def _output_config_file(self):
-        config_str = self.config.get_config_str()
-        config_path = self.get_full_config_file_path('keepalived.conf')
-        common_utils.replace_file(config_path, config_str)
-
-        return config_path
-
-    def get_conf_on_disk(self):
-        config_path = self.get_full_config_file_path('keepalived.conf')
-        try:
-            with open(config_path) as conf:
-                return conf.read()
-        except (OSError, IOError) as e:
-            if e.errno != errno.ENOENT:
-                raise
-
-    def spawn(self):
-        config_path = self._output_config_file()
-
-        keepalived_pm = self.get_process()
-        vrrp_pm = self._get_vrrp_process(
-            '%s-vrrp' % keepalived_pm.get_pid_file_name())
-
-        keepalived_pm.default_cmd_callback = (
-            self._get_keepalived_process_callback(vrrp_pm, config_path))
-
-        keepalived_pm.enable(reload_cfg=True)
-
-        self.process_monitor.register(uuid=self.resource_id,
-                                      service_name=KEEPALIVED_SERVICE_NAME,
-                                      monitored_process=keepalived_pm)
-
-        LOG.debug('Keepalived spawned with config %s', config_path)
-
-    def disable(self):
-        self.process_monitor.unregister(uuid=self.resource_id,
-                                        service_name=KEEPALIVED_SERVICE_NAME)
-
-        pm = self.get_process()
-        pm.disable(sig='15')
-
-    def get_process(self):
-        return external_process.ProcessManager(
-            cfg.CONF,
-            self.resource_id,
-            self.namespace,
-            pids_path=self.conf_path)
-
-    def _get_vrrp_process(self, pid_file):
-        return external_process.ProcessManager(
-            cfg.CONF,
-            self.resource_id,
-            self.namespace,
-            pid_file=pid_file)
-
-    def _get_keepalived_process_callback(self, vrrp_pm, config_path):
-
-        def callback(pid_file):
-            # If keepalived process crashed unexpectedly, the vrrp process
-            # will be orphan and prevent keepalived process to be spawned.
-            # A check here will let the l3-agent to kill the orphan process
-            # and spawn keepalived successfully.
-            if vrrp_pm.active:
-                vrrp_pm.disable()
-            cmd = ['keepalived', '-P',
-                   '-f', config_path,
-                   '-p', pid_file,
-                   '-r', '%s-vrrp' % pid_file]
-            return cmd
-
-        return callback
diff --git a/neutron/agent/linux/ovsdb_monitor.py b/neutron/agent/linux/ovsdb_monitor.py
deleted file mode 100644 (file)
index bf1edf3..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import eventlet
-from oslo_log import log as logging
-from oslo_serialization import jsonutils
-
-from neutron._i18n import _LE
-from neutron.agent.linux import async_process
-from neutron.agent.ovsdb import api as ovsdb
-
-
-LOG = logging.getLogger(__name__)
-
-OVSDB_ACTION_INITIAL = 'initial'
-OVSDB_ACTION_INSERT = 'insert'
-OVSDB_ACTION_DELETE = 'delete'
-
-
-class OvsdbMonitor(async_process.AsyncProcess):
-    """Manages an invocation of 'ovsdb-client monitor'."""
-
-    def __init__(self, table_name, columns=None, format=None,
-                 respawn_interval=None):
-
-        cmd = ['ovsdb-client', 'monitor', table_name]
-        if columns:
-            cmd.append(','.join(columns))
-        if format:
-            cmd.append('--format=%s' % format)
-        super(OvsdbMonitor, self).__init__(cmd, run_as_root=True,
-                                           respawn_interval=respawn_interval,
-                                           log_output=True,
-                                           die_on_error=True)
-
-
-class SimpleInterfaceMonitor(OvsdbMonitor):
-    """Monitors the Interface table of the local host's ovsdb for changes.
-
-    The has_updates() method indicates whether changes to the ovsdb
-    Interface table have been detected since the monitor started or
-    since the previous access.
-    """
-
-    def __init__(self, respawn_interval=None):
-        super(SimpleInterfaceMonitor, self).__init__(
-            'Interface',
-            columns=['name', 'ofport', 'external_ids'],
-            format='json',
-            respawn_interval=respawn_interval,
-        )
-        self.new_events = {'added': [], 'removed': []}
-
-    @property
-    def has_updates(self):
-        """Indicate whether the ovsdb Interface table has been updated.
-
-        If the monitor process is not active an error will be logged since
-        it won't be able to communicate any update. This situation should be
-        temporary if respawn_interval is set.
-        """
-        if not self.is_active():
-            LOG.error(_LE("Interface monitor is not active"))
-        else:
-            self.process_events()
-        return bool(self.new_events['added'] or self.new_events['removed'])
-
-    def get_events(self):
-        self.process_events()
-        events = self.new_events
-        self.new_events = {'added': [], 'removed': []}
-        return events
-
-    def process_events(self):
-        devices_added = []
-        devices_removed = []
-        for row in self.iter_stdout():
-            json = jsonutils.loads(row).get('data')
-            for ovs_id, action, name, ofport, external_ids in json:
-                if external_ids:
-                    external_ids = ovsdb.val_to_py(external_ids)
-                if ofport:
-                    ofport = ovsdb.val_to_py(ofport)
-                device = {'name': name,
-                          'ofport': ofport,
-                          'external_ids': external_ids}
-                if action in (OVSDB_ACTION_INITIAL, OVSDB_ACTION_INSERT):
-                    devices_added.append(device)
-                elif action == OVSDB_ACTION_DELETE:
-                    devices_removed.append(device)
-        self.new_events['added'].extend(devices_added)
-        self.new_events['removed'].extend(devices_removed)
-
-    def start(self, block=False, timeout=5):
-        super(SimpleInterfaceMonitor, self).start()
-        if block:
-            with eventlet.timeout.Timeout(timeout):
-                while not self.is_active():
-                    eventlet.sleep()
diff --git a/neutron/agent/linux/pd.py b/neutron/agent/linux/pd.py
deleted file mode 100644 (file)
index 350b530..0000000
+++ /dev/null
@@ -1,358 +0,0 @@
-# Copyright 2015 Cisco Systems
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import eventlet
-import functools
-import signal
-import six
-
-from stevedore import driver
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from neutron._i18n import _
-from neutron.agent.linux import utils as linux_utils
-from neutron.callbacks import events
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-from neutron.common import constants as l3_constants
-from neutron.common import ipv6_utils
-from neutron.common import utils
-
-LOG = logging.getLogger(__name__)
-
-OPTS = [
-    cfg.StrOpt('pd_dhcp_driver',
-               default='dibbler',
-               help=_('Service to handle DHCPv6 Prefix delegation.')),
-]
-
-
-class PrefixDelegation(object):
-    def __init__(self, context, pmon, intf_driver, notifier, pd_update_cb,
-                 agent_conf):
-        self.context = context
-        self.pmon = pmon
-        self.intf_driver = intf_driver
-        self.notifier = notifier
-        self.routers = {}
-        self.pd_update_cb = pd_update_cb
-        self.agent_conf = agent_conf
-        self.pd_dhcp_driver = driver.DriverManager(
-            namespace='neutron.agent.linux.pd_drivers',
-            name=agent_conf.prefix_delegation_driver,
-        ).driver
-        registry.subscribe(add_router,
-                           resources.ROUTER,
-                           events.BEFORE_CREATE)
-        registry.subscribe(remove_router,
-                           resources.ROUTER,
-                           events.AFTER_DELETE)
-        self._get_sync_data()
-
-    @utils.synchronized("l3-agent-pd")
-    def enable_subnet(self, router_id, subnet_id, prefix, ri_ifname, mac):
-        router = self.routers.get(router_id)
-        if router is None:
-            return
-
-        pd_info = router['subnets'].get(subnet_id)
-        if not pd_info:
-            pd_info = PDInfo(ri_ifname=ri_ifname, mac=mac)
-            router['subnets'][subnet_id] = pd_info
-
-        pd_info.bind_lla = self._get_lla(mac)
-        if pd_info.sync:
-            pd_info.mac = mac
-            pd_info.old_prefix = prefix
-        else:
-            self._add_lla(router, pd_info.get_bind_lla_with_mask())
-
-    def _delete_pd(self, router, pd_info):
-        self._delete_lla(router, pd_info.get_bind_lla_with_mask())
-        if pd_info.client_started:
-            pd_info.driver.disable(self.pmon, router['ns_name'])
-
-    @utils.synchronized("l3-agent-pd")
-    def disable_subnet(self, router_id, subnet_id):
-        prefix_update = {}
-        router = self.routers.get(router_id)
-        if not router:
-            return
-        pd_info = router['subnets'].get(subnet_id)
-        if not pd_info:
-            return
-        self._delete_pd(router, pd_info)
-        prefix_update[subnet_id] = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
-        del router['subnets'][subnet_id]
-        LOG.debug("Update server with prefixes: %s", prefix_update)
-        self.notifier(self.context, prefix_update)
-
-    @utils.synchronized("l3-agent-pd")
-    def update_subnet(self, router_id, subnet_id, prefix):
-        router = self.routers.get(router_id)
-        if router is not None:
-            pd_info = router['subnets'].get(subnet_id)
-            if pd_info and pd_info.old_prefix != prefix:
-                old_prefix = pd_info.old_prefix
-                pd_info.old_prefix = prefix
-                return old_prefix
-
-    @utils.synchronized("l3-agent-pd")
-    def add_gw_interface(self, router_id, gw_ifname):
-        router = self.routers.get(router_id)
-        prefix_update = {}
-        if not router:
-            return
-        router['gw_interface'] = gw_ifname
-        for subnet_id, pd_info in six.iteritems(router['subnets']):
-            # gateway is added after internal router ports.
-            # If a PD is being synced, and if the prefix is available,
-            # send update if prefix out of sync; If not available,
-            # start the PD client
-            bind_lla_with_mask = pd_info.get_bind_lla_with_mask()
-            if pd_info.sync:
-                pd_info.sync = False
-                if pd_info.client_started:
-                    if pd_info.prefix != pd_info.old_prefix:
-                        prefix_update['subnet_id'] = pd_info.prefix
-                else:
-                    self._delete_lla(router, bind_lla_with_mask)
-                    self._add_lla(router, bind_lla_with_mask)
-            else:
-                self._add_lla(router, bind_lla_with_mask)
-        if prefix_update:
-            LOG.debug("Update server with prefixes: %s", prefix_update)
-            self.notifier(self.context, prefix_update)
-
-    def delete_router_pd(self, router):
-        prefix_update = {}
-        for subnet_id, pd_info in six.iteritems(router['subnets']):
-            self._delete_lla(router, pd_info.get_bind_lla_with_mask())
-            if pd_info.client_started:
-                pd_info.driver.disable(self.pmon, router['ns_name'])
-                pd_info.prefix = None
-                pd_info.client_started = False
-                prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
-                prefix_update[subnet_id] = prefix
-        if prefix_update:
-            LOG.debug("Update server with prefixes: %s", prefix_update)
-            self.notifier(self.context, prefix_update)
-
-    @utils.synchronized("l3-agent-pd")
-    def remove_gw_interface(self, router_id):
-        router = self.routers.get(router_id)
-        if router is not None:
-            router['gw_interface'] = None
-            self.delete_router_pd(router)
-
-    @utils.synchronized("l3-agent-pd")
-    def sync_router(self, router_id):
-        router = self.routers.get(router_id)
-        if router is not None and router['gw_interface'] is None:
-            self.delete_router_pd(router)
-
-    @utils.synchronized("l3-agent-pd")
-    def remove_stale_ri_ifname(self, router_id, stale_ifname):
-        router = self.routers.get(router_id)
-        if router is not None:
-            for subnet_id, pd_info in router['subnets'].items():
-                if pd_info.ri_ifname == stale_ifname:
-                    self._delete_pd(router, pd_info)
-                    del router['subnets'][subnet_id]
-
-    @staticmethod
-    def _get_lla(mac):
-        lla = ipv6_utils.get_ipv6_addr_by_EUI64(l3_constants.IPV6_LLA_PREFIX,
-                                                mac)
-        return lla
-
-    def _get_llas(self, gw_ifname, ns_name):
-        try:
-            return self.intf_driver.get_ipv6_llas(gw_ifname, ns_name)
-        except RuntimeError:
-            # The error message was printed as part of the driver call
-            # This could happen if the gw_ifname was removed
-            # simply return and exit the thread
-            return
-
-    def _add_lla(self, router, lla_with_mask):
-        if router['gw_interface']:
-            self.intf_driver.add_ipv6_addr(router['gw_interface'],
-                                           lla_with_mask,
-                                           router['ns_name'],
-                                           'link')
-            # There is a delay before the LLA becomes active.
-            # This is because the kernel runs DAD to make sure LLA uniqueness
-            # Spawn a thread to wait for the interface to be ready
-            self._spawn_lla_thread(router['gw_interface'],
-                                   router['ns_name'],
-                                   lla_with_mask)
-
-    def _spawn_lla_thread(self, gw_ifname, ns_name, lla_with_mask):
-            eventlet.spawn_n(self._ensure_lla_task,
-                             gw_ifname,
-                             ns_name,
-                             lla_with_mask)
-
-    def _delete_lla(self, router, lla_with_mask):
-        if lla_with_mask and router['gw_interface']:
-            try:
-                self.intf_driver.delete_ipv6_addr(router['gw_interface'],
-                                                  lla_with_mask,
-                                                  router['ns_name'])
-            except RuntimeError:
-                # Ignore error if the lla doesn't exist
-                pass
-
-    def _ensure_lla_task(self, gw_ifname, ns_name, lla_with_mask):
-        # It would be insane for taking so long unless DAD test failed
-        # In that case, the subnet would never be assigned a prefix.
-        linux_utils.wait_until_true(functools.partial(self._lla_available,
-                                                      gw_ifname,
-                                                      ns_name,
-                                                      lla_with_mask),
-                                    timeout=l3_constants.LLA_TASK_TIMEOUT,
-                                    sleep=2)
-
-    def _lla_available(self, gw_ifname, ns_name, lla_with_mask):
-        llas = self._get_llas(gw_ifname, ns_name)
-        if self._is_lla_active(lla_with_mask, llas):
-            LOG.debug("LLA %s is active now" % lla_with_mask)
-            self.pd_update_cb()
-            return True
-
-    @staticmethod
-    def _is_lla_active(lla_with_mask, llas):
-        for lla in llas:
-            if lla_with_mask == lla['cidr']:
-                return not lla['tentative']
-        return False
-
-    @utils.synchronized("l3-agent-pd")
-    def process_prefix_update(self):
-        LOG.debug("Processing IPv6 PD Prefix Update")
-
-        prefix_update = {}
-        for router_id, router in six.iteritems(self.routers):
-            if not router['gw_interface']:
-                continue
-
-            llas = None
-            for subnet_id, pd_info in six.iteritems(router['subnets']):
-                if pd_info.client_started:
-                    prefix = pd_info.driver.get_prefix()
-                    if prefix != pd_info.prefix:
-                        pd_info.prefix = prefix
-                        prefix_update[subnet_id] = prefix
-                else:
-                    if not llas:
-                        llas = self._get_llas(router['gw_interface'],
-                                              router['ns_name'])
-
-                    if self._is_lla_active(pd_info.get_bind_lla_with_mask(),
-                                           llas):
-                        if not pd_info.driver:
-                            pd_info.driver = self.pd_dhcp_driver(
-                                router_id, subnet_id, pd_info.ri_ifname)
-                        pd_info.driver.enable(self.pmon, router['ns_name'],
-                                              router['gw_interface'],
-                                              pd_info.bind_lla)
-                        pd_info.client_started = True
-
-        if prefix_update:
-            LOG.debug("Update server with prefixes: %s", prefix_update)
-            self.notifier(self.context, prefix_update)
-
-    def after_start(self):
-        LOG.debug('SIGUSR1 signal handler set')
-        signal.signal(signal.SIGUSR1, self._handle_sigusr1)
-
-    def _handle_sigusr1(self, signum, frame):
-        """Update PD on receiving SIGUSR1.
-
-        The external DHCPv6 client uses SIGUSR1 to notify agent
-        of prefix changes.
-        """
-        self.pd_update_cb()
-
-    def _get_sync_data(self):
-        sync_data = self.pd_dhcp_driver.get_sync_data()
-        for pd_info in sync_data:
-            router_id = pd_info.router_id
-            if not self.routers.get(router_id):
-                self.routers[router_id] = {'gw_interface': None,
-                                           'ns_name': None,
-                                           'subnets': {}}
-            new_pd_info = PDInfo(pd_info=pd_info)
-            subnets = self.routers[router_id]['subnets']
-            subnets[pd_info.subnet_id] = new_pd_info
-
-
-@utils.synchronized("l3-agent-pd")
-def remove_router(resource, event, l3_agent, **kwargs):
-    router_id = kwargs['router'].router_id
-    router = l3_agent.pd.routers.get(router_id)
-    l3_agent.pd.delete_router_pd(router)
-    del l3_agent.pd.routers[router_id]['subnets']
-    del l3_agent.pd.routers[router_id]
-
-
-def get_router_entry(ns_name):
-    return {'gw_interface': None,
-            'ns_name': ns_name,
-            'subnets': {}}
-
-
-@utils.synchronized("l3-agent-pd")
-def add_router(resource, event, l3_agent, **kwargs):
-    added_router = kwargs['router']
-    router = l3_agent.pd.routers.get(added_router.router_id)
-    if not router:
-        l3_agent.pd.routers[added_router.router_id] = (
-            get_router_entry(added_router.ns_name))
-    else:
-        # This will happen during l3 agent restart
-        router['ns_name'] = added_router.ns_name
-
-
-class PDInfo(object):
-    """A class to simplify storing and passing of information relevant to
-    Prefix Delegation operations for a given subnet.
-    """
-    def __init__(self, pd_info=None, ri_ifname=None, mac=None):
-        if pd_info is None:
-            self.prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
-            self.old_prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
-            self.ri_ifname = ri_ifname
-            self.mac = mac
-            self.bind_lla = None
-            self.sync = False
-            self.driver = None
-            self.client_started = False
-        else:
-            self.prefix = pd_info.prefix
-            self.old_prefix = None
-            self.ri_ifname = pd_info.ri_ifname
-            self.mac = None
-            self.bind_lla = None
-            self.sync = True
-            self.driver = pd_info.driver
-            self.client_started = pd_info.client_started
-
-    def get_bind_lla_with_mask(self):
-        bind_lla_with_mask = '%s/64' % self.bind_lla
-        return bind_lla_with_mask
diff --git a/neutron/agent/linux/pd_driver.py b/neutron/agent/linux/pd_driver.py
deleted file mode 100644 (file)
index 3cdb36a..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2015 Cisco Systems
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import six
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-
-OPTS = [
-    cfg.StrOpt('pd_confs',
-               default='$state_path/pd',
-               help=_('Location to store IPv6 PD files.')),
-    cfg.StrOpt('vendor_pen',
-               default='8888',
-               help=_("A decimal value as Vendor's Registered Private "
-                      "Enterprise Number as required by RFC3315 DUID-EN.")),
-]
-
-cfg.CONF.register_opts(OPTS)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class PDDriverBase(object):
-
-    def __init__(self, router_id, subnet_id, ri_ifname):
-        self.router_id = router_id
-        self.subnet_id = subnet_id
-        self.ri_ifname = ri_ifname
-
-    @abc.abstractmethod
-    def enable(self, pmon, router_ns, ex_gw_ifname, lla):
-        """Enable IPv6 Prefix Delegation for this PDDriver on the given
-        external interface, with the given link local address
-        """
-
-    @abc.abstractmethod
-    def disable(self, pmon, router_ns):
-        """Disable IPv6 Prefix Delegation for this PDDriver
-        """
-
-    @abc.abstractmethod
-    def get_prefix(self):
-        """Get the current assigned prefix for this PDDriver from the PD agent.
-        If no prefix is currently assigned, return
-        constants.PROVISIONAL_IPV6_PD_PREFIX
-        """
-
-    @staticmethod
-    @abc.abstractmethod
-    def get_sync_data():
-        """Get the latest router_id, subnet_id, and ri_ifname from the PD agent
-        so that the PDDriver can be kept up to date
-        """
diff --git a/neutron/agent/linux/polling.py b/neutron/agent/linux/polling.py
deleted file mode 100644 (file)
index 29d5bd6..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-import eventlet
-from oslo_log import log as logging
-
-from neutron.agent.common import base_polling
-from neutron.agent.linux import async_process
-from neutron.agent.linux import ovsdb_monitor
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-
-LOG = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def get_polling_manager(minimize_polling=False,
-                        ovsdb_monitor_respawn_interval=(
-                            constants.DEFAULT_OVSDBMON_RESPAWN)):
-    if minimize_polling:
-        pm = InterfacePollingMinimizer(
-            ovsdb_monitor_respawn_interval=ovsdb_monitor_respawn_interval)
-        pm.start()
-    else:
-        pm = base_polling.AlwaysPoll()
-    try:
-        yield pm
-    finally:
-        if minimize_polling:
-            pm.stop()
-
-
-class InterfacePollingMinimizer(base_polling.BasePollingManager):
-    """Monitors ovsdb to determine when polling is required."""
-
-    def __init__(
-            self,
-            ovsdb_monitor_respawn_interval=constants.DEFAULT_OVSDBMON_RESPAWN):
-
-        super(InterfacePollingMinimizer, self).__init__()
-        self._monitor = ovsdb_monitor.SimpleInterfaceMonitor(
-            respawn_interval=ovsdb_monitor_respawn_interval)
-
-    def start(self):
-        self._monitor.start()
-
-    def stop(self):
-        try:
-            self._monitor.stop()
-        except async_process.AsyncProcessException:
-            LOG.debug("InterfacePollingMinimizer was not running when stopped")
-
-    def _is_polling_required(self):
-        # Maximize the chances of update detection having a chance to
-        # collect output.
-        eventlet.sleep()
-        return self._monitor.has_updates
-
-    def get_events(self):
-        return self._monitor.get_events()
diff --git a/neutron/agent/linux/ra.py b/neutron/agent/linux/ra.py
deleted file mode 100644 (file)
index 9cc0a25..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from itertools import chain as iter_chain
-import jinja2
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-import six
-
-from neutron._i18n import _
-from neutron.agent.linux import external_process
-from neutron.agent.linux import utils
-from neutron.common import constants
-from neutron.common import utils as common_utils
-
-
-RADVD_SERVICE_NAME = 'radvd'
-RADVD_SERVICE_CMD = 'radvd'
-# We can configure max of 3 DNS servers in radvd RDNSS section.
-MAX_RDNSS_ENTRIES = 3
-
-LOG = logging.getLogger(__name__)
-
-OPTS = [
-    cfg.StrOpt('ra_confs',
-               default='$state_path/ra',
-               help=_('Location to store IPv6 RA config files')),
-]
-
-CONFIG_TEMPLATE = jinja2.Template("""interface {{ interface_name }}
-{
-   AdvSendAdvert on;
-   MinRtrAdvInterval 3;
-   MaxRtrAdvInterval 10;
-
-   {% if constants.DHCPV6_STATELESS in ra_modes %}
-   AdvOtherConfigFlag on;
-   {% endif %}
-
-   {% if constants.DHCPV6_STATEFUL in ra_modes %}
-   AdvManagedFlag on;
-   {% endif %}
-
-   {% if dns_servers %}
-   RDNSS {% for dns in dns_servers %} {{ dns }} {% endfor %} {};
-   {% endif %}
-
-   {% for prefix in prefixes %}
-   prefix {{ prefix }}
-   {
-        AdvOnLink on;
-        AdvAutonomous on;
-   };
-   {% endfor %}
-};
-""")
-
-
-class DaemonMonitor(object):
-    """Manage the data and state of an radvd process."""
-
-    def __init__(self, router_id, router_ns, process_monitor, dev_name_helper,
-                 agent_conf):
-        self._router_id = router_id
-        self._router_ns = router_ns
-        self._process_monitor = process_monitor
-        self._dev_name_helper = dev_name_helper
-        self._agent_conf = agent_conf
-
-    def _generate_radvd_conf(self, router_ports):
-        radvd_conf = utils.get_conf_file_name(self._agent_conf.ra_confs,
-                                              self._router_id,
-                                              'radvd.conf',
-                                              True)
-        buf = six.StringIO()
-        for p in router_ports:
-            subnets = p.get('subnets', [])
-            v6_subnets = [subnet for subnet in subnets if
-                    netaddr.IPNetwork(subnet['cidr']).version == 6]
-            if not v6_subnets:
-                continue
-            ra_modes = {subnet['ipv6_ra_mode'] for subnet in v6_subnets}
-            auto_config_prefixes = [subnet['cidr'] for subnet in v6_subnets if
-                    subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC or
-                    subnet['ipv6_ra_mode'] == constants.DHCPV6_STATELESS]
-            interface_name = self._dev_name_helper(p['id'])
-            slaac_subnets = [subnet for subnet in v6_subnets if
-                subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC]
-            dns_servers = list(iter_chain(*[subnet['dns_nameservers'] for
-                subnet in slaac_subnets if subnet.get('dns_nameservers')]))
-            buf.write('%s' % CONFIG_TEMPLATE.render(
-                ra_modes=list(ra_modes),
-                interface_name=interface_name,
-                prefixes=auto_config_prefixes,
-                dns_servers=dns_servers[0:MAX_RDNSS_ENTRIES],
-                constants=constants))
-
-        common_utils.replace_file(radvd_conf, buf.getvalue())
-        return radvd_conf
-
-    def _get_radvd_process_manager(self, callback=None):
-        return external_process.ProcessManager(
-            uuid=self._router_id,
-            default_cmd_callback=callback,
-            namespace=self._router_ns,
-            service=RADVD_SERVICE_NAME,
-            conf=self._agent_conf,
-            run_as_root=True)
-
-    def _spawn_radvd(self, radvd_conf):
-        def callback(pid_file):
-            # we need to use -m syslog and f.e. not -m stderr (the default)
-            # or -m stderr_syslog so that radvd 2.0+ will close stderr and
-            # exit after daemonization; otherwise, the current thread will
-            # be locked waiting for result from radvd that won't ever come
-            # until the process dies
-            radvd_cmd = [RADVD_SERVICE_CMD,
-                         '-C', '%s' % radvd_conf,
-                         '-p', '%s' % pid_file,
-                         '-m', 'syslog']
-            return radvd_cmd
-
-        pm = self._get_radvd_process_manager(callback)
-        pm.enable(reload_cfg=True)
-        self._process_monitor.register(uuid=self._router_id,
-                                       service_name=RADVD_SERVICE_NAME,
-                                       monitored_process=pm)
-        LOG.debug("radvd enabled for router %s", self._router_id)
-
-    def enable(self, router_ports):
-        for p in router_ports:
-            for subnet in p['subnets']:
-                if netaddr.IPNetwork(subnet['cidr']).version == 6:
-                    LOG.debug("Enable IPv6 RA for router %s", self._router_id)
-                    radvd_conf = self._generate_radvd_conf(router_ports)
-                    self._spawn_radvd(radvd_conf)
-                    return
-
-        # Kill the daemon if it's running
-        self.disable()
-
-    def disable(self):
-        self._process_monitor.unregister(uuid=self._router_id,
-                                         service_name=RADVD_SERVICE_NAME)
-        pm = self._get_radvd_process_manager()
-        pm.disable()
-        utils.remove_conf_files(self._agent_conf.ra_confs, self._router_id)
-        LOG.debug("radvd disabled for router %s", self._router_id)
-
-    @property
-    def enabled(self):
-        return self._get_radvd_process_manager().active
diff --git a/neutron/agent/linux/utils.py b/neutron/agent/linux/utils.py
deleted file mode 100644 (file)
index 67aa711..0000000
+++ /dev/null
@@ -1,407 +0,0 @@
-# Copyright 2012 Locaweb.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import fcntl
-import glob
-import grp
-import os
-import pwd
-import shlex
-import socket
-import struct
-import tempfile
-import threading
-
-import debtcollector
-import eventlet
-from eventlet.green import subprocess
-from eventlet import greenthread
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_rootwrap import client
-from oslo_utils import excutils
-import six
-from six.moves import http_client as httplib
-
-from neutron._i18n import _, _LE
-from neutron.agent.common import config
-from neutron.common import constants
-from neutron.common import utils
-from neutron import wsgi
-
-
-LOG = logging.getLogger(__name__)
-
-
-class RootwrapDaemonHelper(object):
-    __client = None
-    __lock = threading.Lock()
-
-    def __new__(cls):
-        """There is no reason to instantiate this class"""
-        raise NotImplementedError()
-
-    @classmethod
-    def get_client(cls):
-        with cls.__lock:
-            if cls.__client is None:
-                cls.__client = client.Client(
-                    shlex.split(cfg.CONF.AGENT.root_helper_daemon))
-            return cls.__client
-
-
-def addl_env_args(addl_env):
-    """Build arugments for adding additional environment vars with env"""
-
-    # NOTE (twilson) If using rootwrap, an EnvFilter should be set up for the
-    # command instead of a CommandFilter.
-    if addl_env is None:
-        return []
-    return ['env'] + ['%s=%s' % pair for pair in addl_env.items()]
-
-
-def create_process(cmd, run_as_root=False, addl_env=None):
-    """Create a process object for the given command.
-
-    The return value will be a tuple of the process object and the
-    list of command arguments used to create it.
-    """
-    cmd = list(map(str, addl_env_args(addl_env) + cmd))
-    if run_as_root:
-        cmd = shlex.split(config.get_root_helper(cfg.CONF)) + cmd
-    LOG.debug("Running command: %s", cmd)
-    obj = utils.subprocess_popen(cmd, shell=False,
-                                 stdin=subprocess.PIPE,
-                                 stdout=subprocess.PIPE,
-                                 stderr=subprocess.PIPE)
-
-    return obj, cmd
-
-
-def execute_rootwrap_daemon(cmd, process_input, addl_env):
-    cmd = list(map(str, addl_env_args(addl_env) + cmd))
-    # NOTE(twilson) oslo_rootwrap.daemon will raise on filter match
-    # errors, whereas oslo_rootwrap.cmd converts them to return codes.
-    # In practice, no neutron code should be trying to execute something that
-    # would throw those errors, and if it does it should be fixed as opposed to
-    # just logging the execution error.
-    LOG.debug("Running command (rootwrap daemon): %s", cmd)
-    client = RootwrapDaemonHelper.get_client()
-    return client.execute(cmd, process_input)
-
-
-def execute(cmd, process_input=None, addl_env=None,
-            check_exit_code=True, return_stderr=False, log_fail_as_error=True,
-            extra_ok_codes=None, run_as_root=False):
-    try:
-        if (process_input is None or
-            isinstance(process_input, six.binary_type)):
-            _process_input = process_input
-        else:
-            _process_input = process_input.encode('utf-8')
-        if run_as_root and cfg.CONF.AGENT.root_helper_daemon:
-            returncode, _stdout, _stderr = (
-                execute_rootwrap_daemon(cmd, process_input, addl_env))
-        else:
-            obj, cmd = create_process(cmd, run_as_root=run_as_root,
-                                      addl_env=addl_env)
-            _stdout, _stderr = obj.communicate(_process_input)
-            returncode = obj.returncode
-            obj.stdin.close()
-        _stdout = utils.safe_decode_utf8(_stdout)
-        _stderr = utils.safe_decode_utf8(_stderr)
-
-        extra_ok_codes = extra_ok_codes or []
-        if returncode and returncode not in extra_ok_codes:
-            msg = _("Exit code: %(returncode)d; "
-                    "Stdin: %(stdin)s; "
-                    "Stdout: %(stdout)s; "
-                    "Stderr: %(stderr)s") % {
-                        'returncode': returncode,
-                        'stdin': process_input or '',
-                        'stdout': _stdout,
-                        'stderr': _stderr}
-
-            if log_fail_as_error:
-                LOG.error(msg)
-            if check_exit_code:
-                raise RuntimeError(msg)
-        else:
-            LOG.debug("Exit code: %d", returncode)
-
-    finally:
-        # NOTE(termie): this appears to be necessary to let the subprocess
-        #               call clean something up in between calls, without
-        #               it two execute calls in a row hangs the second one
-        greenthread.sleep(0)
-
-    return (_stdout, _stderr) if return_stderr else _stdout
-
-
-def get_interface_mac(interface):
-    MAC_START = 18
-    MAC_END = 24
-    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-    dev = interface[:constants.DEVICE_NAME_MAX_LEN]
-    if isinstance(dev, six.text_type):
-        dev = dev.encode('utf-8')
-    info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', dev))
-    return ''.join(['%02x:' % ord(char)
-                    for char in info[MAC_START:MAC_END]])[:-1]
-
-
-@debtcollector.removals.remove(message="Redundant in Mitaka release.")
-def replace_file(file_name, data, file_mode=0o644):
-    """Replaces the contents of file_name with data in a safe manner.
-
-    First write to a temp file and then rename. Since POSIX renames are
-    atomic, the file is unlikely to be corrupted by competing writes.
-
-    We create the tempfile on the same device to ensure that it can be renamed.
-    """
-
-    base_dir = os.path.dirname(os.path.abspath(file_name))
-    tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False)
-    tmp_file.write(data)
-    tmp_file.close()
-    os.chmod(tmp_file.name, file_mode)
-    os.rename(tmp_file.name, file_name)
-
-
-def find_child_pids(pid):
-    """Retrieve a list of the pids of child processes of the given pid."""
-
-    try:
-        raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid='],
-                           log_fail_as_error=False)
-    except RuntimeError as e:
-        # Unexpected errors are the responsibility of the caller
-        with excutils.save_and_reraise_exception() as ctxt:
-            # Exception has already been logged by execute
-            no_children_found = 'Exit code: 1' in str(e)
-            if no_children_found:
-                ctxt.reraise = False
-                return []
-    return [x.strip() for x in raw_pids.split('\n') if x.strip()]
-
-
-def _get_conf_base(cfg_root, uuid, ensure_conf_dir):
-    #TODO(mangelajo): separate responsibilities here, ensure_conf_dir
-    #                 should be a separate function
-    conf_dir = os.path.abspath(os.path.normpath(cfg_root))
-    conf_base = os.path.join(conf_dir, uuid)
-    if ensure_conf_dir:
-        utils.ensure_dir(conf_dir)
-    return conf_base
-
-
-def get_conf_file_name(cfg_root, uuid, cfg_file, ensure_conf_dir=False):
-    """Returns the file name for a given kind of config file."""
-    conf_base = _get_conf_base(cfg_root, uuid, ensure_conf_dir)
-    return "%s.%s" % (conf_base, cfg_file)
-
-
-def get_value_from_file(filename, converter=None):
-
-    try:
-        with open(filename, 'r') as f:
-            try:
-                return converter(f.read()) if converter else f.read()
-            except ValueError:
-                LOG.error(_LE('Unable to convert value in %s'), filename)
-    except IOError:
-        LOG.debug('Unable to access %s', filename)
-
-
-def get_value_from_conf_file(cfg_root, uuid, cfg_file, converter=None):
-    """A helper function to read a value from one of a config file."""
-    file_name = get_conf_file_name(cfg_root, uuid, cfg_file)
-    return get_value_from_file(file_name, converter)
-
-
-def remove_conf_files(cfg_root, uuid):
-    conf_base = _get_conf_base(cfg_root, uuid, False)
-    for file_path in glob.iglob("%s.*" % conf_base):
-        os.unlink(file_path)
-
-
-def get_root_helper_child_pid(pid, run_as_root=False):
-    """
-    Get the lowest child pid in the process hierarchy
-
-    If root helper was used, two or more processes would be created:
-
-     - a root helper process (e.g. sudo myscript)
-     - possibly a rootwrap script (e.g. neutron-rootwrap)
-     - a child process (e.g. myscript)
-
-    Killing the root helper process will leave the child process
-    running, re-parented to init, so the only way to ensure that both
-    die is to target the child process directly.
-    """
-    pid = str(pid)
-    if run_as_root:
-        try:
-            pid = find_child_pids(pid)[0]
-        except IndexError:
-            # Process is already dead
-            return None
-        while True:
-            try:
-                # We shouldn't have more than one child per process
-                # so keep getting the children of the first one
-                pid = find_child_pids(pid)[0]
-            except IndexError:
-                # Last process in the tree, return it
-                break
-    return pid
-
-
-def remove_abs_path(cmd):
-    """Remove absolute path of executable in cmd
-
-    Note: New instance of list is returned
-
-    :param cmd: parsed shlex command (e.g. ['/bin/foo', 'param1', 'param two'])
-
-    """
-    if cmd and os.path.isabs(cmd[0]):
-        cmd = list(cmd)
-        cmd[0] = os.path.basename(cmd[0])
-
-    return cmd
-
-
-def get_cmdline_from_pid(pid):
-    if pid is None or not os.path.exists('/proc/%s' % pid):
-        return []
-    with open('/proc/%s/cmdline' % pid, 'r') as f:
-        return f.readline().split('\0')[:-1]
-
-
-def cmd_matches_expected(cmd, expected_cmd):
-    abs_cmd = remove_abs_path(cmd)
-    abs_expected_cmd = remove_abs_path(expected_cmd)
-    if abs_cmd != abs_expected_cmd:
-        # Commands executed with #! are prefixed with the script
-        # executable. Check for the expected cmd being a subset of the
-        # actual cmd to cover this possibility.
-        abs_cmd = remove_abs_path(abs_cmd[1:])
-    return abs_cmd == abs_expected_cmd
-
-
-def pid_invoked_with_cmdline(pid, expected_cmd):
-    """Validate process with given pid is running with provided parameters
-
-    """
-    cmd = get_cmdline_from_pid(pid)
-    return cmd_matches_expected(cmd, expected_cmd)
-
-
-def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
-    """
-    Wait until callable predicate is evaluated as True
-
-    :param predicate: Callable deciding whether waiting should continue.
-    Best practice is to instantiate predicate with functools.partial()
-    :param timeout: Timeout in seconds how long should function wait.
-    :param sleep: Polling interval for results in seconds.
-    :param exception: Exception class for eventlet.Timeout.
-    (see doc for eventlet.Timeout for more information)
-    """
-    with eventlet.timeout.Timeout(timeout, exception):
-        while not predicate():
-            eventlet.sleep(sleep)
-
-
-def ensure_directory_exists_without_file(path):
-    dirname = os.path.dirname(path)
-    if os.path.isdir(dirname):
-        try:
-            os.unlink(path)
-        except OSError:
-            with excutils.save_and_reraise_exception() as ctxt:
-                if not os.path.exists(path):
-                    ctxt.reraise = False
-    else:
-        utils.ensure_dir(dirname)
-
-
-def is_effective_user(user_id_or_name):
-    """Returns True if user_id_or_name is effective user (id/name)."""
-    euid = os.geteuid()
-    if str(user_id_or_name) == str(euid):
-        return True
-    effective_user_name = pwd.getpwuid(euid).pw_name
-    return user_id_or_name == effective_user_name
-
-
-def is_effective_group(group_id_or_name):
-    """Returns True if group_id_or_name is effective group (id/name)."""
-    egid = os.getegid()
-    if str(group_id_or_name) == str(egid):
-        return True
-    effective_group_name = grp.getgrgid(egid).gr_name
-    return group_id_or_name == effective_group_name
-
-
-class UnixDomainHTTPConnection(httplib.HTTPConnection):
-    """Connection class for HTTP over UNIX domain socket."""
-    def __init__(self, host, port=None, strict=None, timeout=None,
-                 proxy_info=None):
-        httplib.HTTPConnection.__init__(self, host, port, strict)
-        self.timeout = timeout
-        self.socket_path = cfg.CONF.metadata_proxy_socket
-
-    def connect(self):
-        self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-        if self.timeout:
-            self.sock.settimeout(self.timeout)
-        self.sock.connect(self.socket_path)
-
-
-class UnixDomainHttpProtocol(eventlet.wsgi.HttpProtocol):
-    def __init__(self, request, client_address, server):
-        if client_address == '':
-            client_address = ('<local>', 0)
-        # base class is old-style, so super does not work properly
-        eventlet.wsgi.HttpProtocol.__init__(self, request, client_address,
-                                            server)
-
-
-class UnixDomainWSGIServer(wsgi.Server):
-    def __init__(self, name):
-        self._socket = None
-        self._launcher = None
-        self._server = None
-        super(UnixDomainWSGIServer, self).__init__(name)
-
-    def start(self, application, file_socket, workers, backlog, mode=None):
-        self._socket = eventlet.listen(file_socket,
-                                       family=socket.AF_UNIX,
-                                       backlog=backlog)
-        if mode is not None:
-            os.chmod(file_socket, mode)
-
-        self._launch(application, workers=workers)
-
-    def _run(self, application, socket):
-        """Start a WSGI service in a new green thread."""
-        logger = logging.getLogger('eventlet.wsgi.server')
-        eventlet.wsgi.server(socket,
-                             application,
-                             max_size=self.num_threads,
-                             protocol=UnixDomainHttpProtocol,
-                             log=logger)
diff --git a/neutron/agent/metadata/__init__.py b/neutron/agent/metadata/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/agent/metadata/agent.py b/neutron/agent/metadata/agent.py
deleted file mode 100644 (file)
index ab5c93f..0000000
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import hashlib
-import hmac
-
-import httplib2
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_service import loopingcall
-import six
-import six.moves.urllib.parse as urlparse
-import webob
-
-from neutron._i18n import _, _LE, _LW
-from neutron.agent.linux import utils as agent_utils
-from neutron.agent.metadata import config
-from neutron.agent import rpc as agent_rpc
-from neutron.common import constants as n_const
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.common import utils
-from neutron import context
-from neutron.openstack.common.cache import cache
-
-LOG = logging.getLogger(__name__)
-
-MODE_MAP = {
-    config.USER_MODE: 0o644,
-    config.GROUP_MODE: 0o664,
-    config.ALL_MODE: 0o666,
-}
-
-
-class MetadataPluginAPI(object):
-    """Agent-side RPC for metadata agent-to-plugin interaction.
-
-    This class implements the client side of an rpc interface used by the
-    metadata service to make calls back into the Neutron plugin.  The server
-    side is defined in
-    neutron.api.rpc.handlers.metadata_rpc.MetadataRpcCallback.  For more
-    information about changing rpc interfaces, see
-    doc/source/devref/rpc_api.rst.
-
-    API version history:
-        1.0 - Initial version.
-    """
-
-    def __init__(self, topic):
-        target = oslo_messaging.Target(
-            topic=topic,
-            namespace=n_const.RPC_NAMESPACE_METADATA,
-            version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def get_ports(self, context, filters):
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'get_ports', filters=filters)
-
-
-class MetadataProxyHandler(object):
-
-    def __init__(self, conf):
-        self.conf = conf
-        if self.conf.cache_url:
-            self._cache = cache.get_cache(self.conf.cache_url)
-        else:
-            self._cache = False
-
-        self.plugin_rpc = MetadataPluginAPI(topics.PLUGIN)
-        self.context = context.get_admin_context_without_session()
-
-    @webob.dec.wsgify(RequestClass=webob.Request)
-    def __call__(self, req):
-        try:
-            LOG.debug("Request: %s", req)
-
-            instance_id, tenant_id = self._get_instance_and_tenant_id(req)
-            if instance_id:
-                return self._proxy_request(instance_id, tenant_id, req)
-            else:
-                return webob.exc.HTTPNotFound()
-
-        except Exception:
-            LOG.exception(_LE("Unexpected error."))
-            msg = _('An unknown error has occurred. '
-                    'Please try your request again.')
-            explanation = six.text_type(msg)
-            return webob.exc.HTTPInternalServerError(explanation=explanation)
-
-    def _get_ports_from_server(self, router_id=None, ip_address=None,
-                               networks=None):
-        """Get ports from server."""
-        filters = self._get_port_filters(router_id, ip_address, networks)
-        return self.plugin_rpc.get_ports(self.context, filters)
-
-    def _get_port_filters(self, router_id=None, ip_address=None,
-                          networks=None):
-        filters = {}
-        if router_id:
-            filters['device_id'] = [router_id]
-            filters['device_owner'] = n_const.ROUTER_INTERFACE_OWNERS
-        if ip_address:
-            filters['fixed_ips'] = {'ip_address': [ip_address]}
-        if networks:
-            filters['network_id'] = networks
-
-        return filters
-
-    @utils.cache_method_results
-    def _get_router_networks(self, router_id):
-        """Find all networks connected to given router."""
-        internal_ports = self._get_ports_from_server(router_id=router_id)
-        return tuple(p['network_id'] for p in internal_ports)
-
-    @utils.cache_method_results
-    def _get_ports_for_remote_address(self, remote_address, networks):
-        """Get list of ports that has given ip address and are part of
-        given networks.
-
-        :param networks: list of networks in which the ip address will be
-                         searched for
-
-        """
-        return self._get_ports_from_server(networks=networks,
-                                           ip_address=remote_address)
-
-    def _get_ports(self, remote_address, network_id=None, router_id=None):
-        """Search for all ports that contain passed ip address and belongs to
-        given network.
-
-        If no network is passed ports are searched on all networks connected to
-        given router. Either one of network_id or router_id must be passed.
-
-        """
-        if network_id:
-            networks = (network_id,)
-        elif router_id:
-            networks = self._get_router_networks(router_id)
-        else:
-            raise TypeError(_("Either one of parameter network_id or router_id"
-                              " must be passed to _get_ports method."))
-
-        return self._get_ports_for_remote_address(remote_address, networks)
-
-    def _get_instance_and_tenant_id(self, req):
-        remote_address = req.headers.get('X-Forwarded-For')
-        network_id = req.headers.get('X-Neutron-Network-ID')
-        router_id = req.headers.get('X-Neutron-Router-ID')
-
-        ports = self._get_ports(remote_address, network_id, router_id)
-
-        if len(ports) == 1:
-            return ports[0]['device_id'], ports[0]['tenant_id']
-        return None, None
-
-    def _proxy_request(self, instance_id, tenant_id, req):
-        headers = {
-            'X-Forwarded-For': req.headers.get('X-Forwarded-For'),
-            'X-Instance-ID': instance_id,
-            'X-Tenant-ID': tenant_id,
-            'X-Instance-ID-Signature': self._sign_instance_id(instance_id)
-        }
-
-        nova_ip_port = '%s:%s' % (self.conf.nova_metadata_ip,
-                                  self.conf.nova_metadata_port)
-        url = urlparse.urlunsplit((
-            self.conf.nova_metadata_protocol,
-            nova_ip_port,
-            req.path_info,
-            req.query_string,
-            ''))
-
-        h = httplib2.Http(
-            ca_certs=self.conf.auth_ca_cert,
-            disable_ssl_certificate_validation=self.conf.nova_metadata_insecure
-        )
-        if self.conf.nova_client_cert and self.conf.nova_client_priv_key:
-            h.add_certificate(self.conf.nova_client_priv_key,
-                              self.conf.nova_client_cert,
-                              nova_ip_port)
-        resp, content = h.request(url, method=req.method, headers=headers,
-                                  body=req.body)
-
-        if resp.status == 200:
-            LOG.debug(str(resp))
-            req.response.content_type = resp['content-type']
-            req.response.body = content
-            return req.response
-        elif resp.status == 403:
-            LOG.warn(_LW(
-                'The remote metadata server responded with Forbidden. This '
-                'response usually occurs when shared secrets do not match.'
-            ))
-            return webob.exc.HTTPForbidden()
-        elif resp.status == 400:
-            return webob.exc.HTTPBadRequest()
-        elif resp.status == 404:
-            return webob.exc.HTTPNotFound()
-        elif resp.status == 409:
-            return webob.exc.HTTPConflict()
-        elif resp.status == 500:
-            msg = _(
-                'Remote metadata server experienced an internal server error.'
-            )
-            LOG.warn(msg)
-            explanation = six.text_type(msg)
-            return webob.exc.HTTPInternalServerError(explanation=explanation)
-        else:
-            raise Exception(_('Unexpected response code: %s') % resp.status)
-
-    def _sign_instance_id(self, instance_id):
-        secret = self.conf.metadata_proxy_shared_secret
-        if isinstance(secret, six.text_type):
-            secret = secret.encode('utf-8')
-        if isinstance(instance_id, six.text_type):
-            instance_id = instance_id.encode('utf-8')
-        return hmac.new(secret, instance_id, hashlib.sha256).hexdigest()
-
-
-class UnixDomainMetadataProxy(object):
-
-    def __init__(self, conf):
-        self.conf = conf
-        agent_utils.ensure_directory_exists_without_file(
-            cfg.CONF.metadata_proxy_socket)
-        self._init_state_reporting()
-
-    def _init_state_reporting(self):
-        self.context = context.get_admin_context_without_session()
-        self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
-        self.agent_state = {
-            'binary': 'neutron-metadata-agent',
-            'host': cfg.CONF.host,
-            'topic': 'N/A',
-            'configurations': {
-                'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket,
-                'nova_metadata_ip': cfg.CONF.nova_metadata_ip,
-                'nova_metadata_port': cfg.CONF.nova_metadata_port,
-                'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats,
-            },
-            'start_flag': True,
-            'agent_type': n_const.AGENT_TYPE_METADATA}
-        report_interval = cfg.CONF.AGENT.report_interval
-        if report_interval:
-            self.heartbeat = loopingcall.FixedIntervalLoopingCall(
-                self._report_state)
-            self.heartbeat.start(interval=report_interval)
-
-    def _report_state(self):
-        try:
-            self.state_rpc.report_state(
-                self.context,
-                self.agent_state,
-                use_call=self.agent_state.get('start_flag'))
-        except AttributeError:
-            # This means the server does not support report_state
-            LOG.warn(_LW('Neutron server does not support state report.'
-                         ' State report for this agent will be disabled.'))
-            self.heartbeat.stop()
-            return
-        except Exception:
-            LOG.exception(_LE("Failed reporting state!"))
-            return
-        self.agent_state.pop('start_flag', None)
-
-    def _get_socket_mode(self):
-        mode = self.conf.metadata_proxy_socket_mode
-        if mode == config.DEDUCE_MODE:
-            user = self.conf.metadata_proxy_user
-            if (not user or user == '0' or user == 'root'
-                    or agent_utils.is_effective_user(user)):
-                # user is agent effective user or root => USER_MODE
-                mode = config.USER_MODE
-            else:
-                group = self.conf.metadata_proxy_group
-                if not group or agent_utils.is_effective_group(group):
-                    # group is agent effective group => GROUP_MODE
-                    mode = config.GROUP_MODE
-                else:
-                    # otherwise => ALL_MODE
-                    mode = config.ALL_MODE
-        return MODE_MAP[mode]
-
-    def run(self):
-        server = agent_utils.UnixDomainWSGIServer('neutron-metadata-agent')
-        server.start(MetadataProxyHandler(self.conf),
-                     self.conf.metadata_proxy_socket,
-                     workers=self.conf.metadata_workers,
-                     backlog=self.conf.metadata_backlog,
-                     mode=self._get_socket_mode())
-        server.wait()
diff --git a/neutron/agent/metadata/config.py b/neutron/agent/metadata/config.py
deleted file mode 100644 (file)
index a5a79a1..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright 2015 OpenStack Foundation.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.common import utils
-
-
-SHARED_OPTS = [
-    cfg.StrOpt('metadata_proxy_socket',
-               default='$state_path/metadata_proxy',
-               help=_('Location for Metadata Proxy UNIX domain socket.')),
-    cfg.StrOpt('metadata_proxy_user',
-               default='',
-               help=_("User (uid or name) running metadata proxy after "
-                      "its initialization (if empty: agent effective "
-                      "user).")),
-    cfg.StrOpt('metadata_proxy_group',
-               default='',
-               help=_("Group (gid or name) running metadata proxy after "
-                      "its initialization (if empty: agent effective "
-                      "group)."))
-]
-
-
-DRIVER_OPTS = [
-    cfg.BoolOpt('metadata_proxy_watch_log',
-                help=_("Enable/Disable log watch by metadata proxy. It "
-                       "should be disabled when metadata_proxy_user/group "
-                       "is not allowed to read/write its log file and "
-                       "copytruncate logrotate option must be used if "
-                       "logrotate is enabled on metadata proxy log "
-                       "files. Option default value is deduced from "
-                       "metadata_proxy_user: watch log is enabled if "
-                       "metadata_proxy_user is agent effective user "
-                       "id/name.")),
-]
-
-
-METADATA_PROXY_HANDLER_OPTS = [
-     cfg.StrOpt('auth_ca_cert',
-                help=_("Certificate Authority public key (CA cert) "
-                       "file for ssl")),
-     cfg.StrOpt('nova_metadata_ip', default='127.0.0.1',
-                help=_("IP address used by Nova metadata server.")),
-     cfg.PortOpt('nova_metadata_port',
-                 default=8775,
-                 help=_("TCP Port used by Nova metadata server.")),
-     cfg.StrOpt('metadata_proxy_shared_secret',
-                default='',
-                help=_('When proxying metadata requests, Neutron signs the '
-                       'Instance-ID header with a shared secret to prevent '
-                       'spoofing. You may select any string for a secret, '
-                       'but it must match here and in the configuration used '
-                       'by the Nova Metadata Server. NOTE: Nova uses the same '
-                       'config key, but in [neutron] section.'),
-                secret=True),
-     cfg.StrOpt('nova_metadata_protocol',
-                default='http',
-                choices=['http', 'https'],
-                help=_("Protocol to access nova metadata, http or https")),
-     cfg.BoolOpt('nova_metadata_insecure', default=False,
-                 help=_("Allow to perform insecure SSL (https) requests to "
-                        "nova metadata")),
-     cfg.StrOpt('nova_client_cert',
-                default='',
-                help=_("Client certificate for nova metadata api server.")),
-     cfg.StrOpt('nova_client_priv_key',
-                default='',
-                help=_("Private key of client certificate."))
-]
-
-DEDUCE_MODE = 'deduce'
-USER_MODE = 'user'
-GROUP_MODE = 'group'
-ALL_MODE = 'all'
-SOCKET_MODES = (DEDUCE_MODE, USER_MODE, GROUP_MODE, ALL_MODE)
-
-
-UNIX_DOMAIN_METADATA_PROXY_OPTS = [
-    cfg.StrOpt('metadata_proxy_socket_mode',
-               default=DEDUCE_MODE,
-               choices=SOCKET_MODES,
-               help=_("Metadata Proxy UNIX domain socket mode, 4 values "
-                      "allowed: "
-                      "'deduce': deduce mode from metadata_proxy_user/group "
-                      "values, "
-                      "'user': set metadata proxy socket mode to 0o644, to "
-                      "use when metadata_proxy_user is agent effective user "
-                      "or root, "
-                      "'group': set metadata proxy socket mode to 0o664, to "
-                      "use when metadata_proxy_group is agent effective "
-                      "group or root, "
-                      "'all': set metadata proxy socket mode to 0o666, to use "
-                      "otherwise.")),
-    cfg.IntOpt('metadata_workers',
-               default=utils.cpu_count() // 2,
-               help=_('Number of separate worker processes for metadata '
-                      'server (defaults to half of the number of CPUs)')),
-    cfg.IntOpt('metadata_backlog',
-               default=4096,
-               help=_('Number of backlog requests to configure the '
-                      'metadata server socket with'))
-]
diff --git a/neutron/agent/metadata/driver.py b/neutron/agent/metadata/driver.py
deleted file mode 100644 (file)
index cee81c0..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
-# Copyright 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from neutron.agent.common import config
-from neutron.agent.l3 import ha_router
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import external_process
-from neutron.agent.linux import utils
-from neutron.callbacks import events
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-from neutron.common import constants
-from neutron.common import exceptions
-
-
-# Access with redirection to metadata proxy iptables mark mask
-METADATA_SERVICE_NAME = 'metadata-proxy'
-
-
-class MetadataDriver(object):
-
-    def __init__(self, l3_agent):
-        self.metadata_port = l3_agent.conf.metadata_port
-        self.metadata_access_mark = l3_agent.conf.metadata_access_mark
-        registry.subscribe(
-            after_router_added, resources.ROUTER, events.AFTER_CREATE)
-        registry.subscribe(
-            before_router_removed, resources.ROUTER, events.BEFORE_DELETE)
-
-    @classmethod
-    def metadata_filter_rules(cls, port, mark):
-        return [('INPUT', '-m mark --mark %s/%s -j ACCEPT' %
-                 (mark, constants.ROUTER_MARK_MASK)),
-                ('INPUT', '-p tcp -m tcp --dport %s '
-                 '-j DROP' % port)]
-
-    @classmethod
-    def metadata_mangle_rules(cls, mark):
-        return [('PREROUTING', '-d 169.254.169.254/32 '
-                 '-i %(interface_name)s '
-                 '-p tcp -m tcp --dport 80 '
-                 '-j MARK --set-xmark %(value)s/%(mask)s' %
-                 {'interface_name': namespaces.INTERNAL_DEV_PREFIX + '+',
-                  'value': mark,
-                  'mask': constants.ROUTER_MARK_MASK})]
-
-    @classmethod
-    def metadata_nat_rules(cls, port):
-        return [('PREROUTING', '-d 169.254.169.254/32 '
-                 '-i %(interface_name)s '
-                 '-p tcp -m tcp --dport 80 -j REDIRECT '
-                 '--to-port %(port)s' %
-                 {'interface_name': namespaces.INTERNAL_DEV_PREFIX + '+',
-                  'port': port})]
-
-    @classmethod
-    def _get_metadata_proxy_user_group_watchlog(cls, conf):
-        user = conf.metadata_proxy_user or str(os.geteuid())
-        group = conf.metadata_proxy_group or str(os.getegid())
-
-        watch_log = conf.metadata_proxy_watch_log
-        if watch_log is None:
-            # NOTE(cbrandily): Commonly, log watching can be enabled only
-            # when metadata proxy user is agent effective user (id/name).
-            watch_log = utils.is_effective_user(user)
-
-        return user, group, watch_log
-
-    @classmethod
-    def _get_metadata_proxy_callback(cls, port, conf, network_id=None,
-                                     router_id=None):
-        uuid = network_id or router_id
-        if uuid is None:
-            raise exceptions.NetworkIdOrRouterIdRequiredError()
-
-        if network_id:
-            lookup_param = '--network_id=%s' % network_id
-        else:
-            lookup_param = '--router_id=%s' % router_id
-
-        def callback(pid_file):
-            metadata_proxy_socket = conf.metadata_proxy_socket
-            user, group, watch_log = (
-                cls._get_metadata_proxy_user_group_watchlog(conf))
-            proxy_cmd = ['neutron-ns-metadata-proxy',
-                         '--pid_file=%s' % pid_file,
-                         '--metadata_proxy_socket=%s' % metadata_proxy_socket,
-                         lookup_param,
-                         '--state_path=%s' % conf.state_path,
-                         '--metadata_port=%s' % port,
-                         '--metadata_proxy_user=%s' % user,
-                         '--metadata_proxy_group=%s' % group]
-            proxy_cmd.extend(config.get_log_args(
-                conf, 'neutron-ns-metadata-proxy-%s.log' % uuid,
-                metadata_proxy_watch_log=watch_log))
-            return proxy_cmd
-
-        return callback
-
-    @classmethod
-    def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf,
-                                       network_id=None, router_id=None):
-        uuid = network_id or router_id
-        callback = cls._get_metadata_proxy_callback(
-            port, conf, network_id=network_id, router_id=router_id)
-        pm = cls._get_metadata_proxy_process_manager(uuid, conf,
-                                                     ns_name=ns_name,
-                                                     callback=callback)
-        pm.enable()
-        monitor.register(uuid, METADATA_SERVICE_NAME, pm)
-
-    @classmethod
-    def destroy_monitored_metadata_proxy(cls, monitor, uuid, conf):
-        monitor.unregister(uuid, METADATA_SERVICE_NAME)
-        # No need to pass ns name as it's not needed for disable()
-        pm = cls._get_metadata_proxy_process_manager(uuid, conf)
-        pm.disable()
-
-    @classmethod
-    def _get_metadata_proxy_process_manager(cls, router_id, conf, ns_name=None,
-                                            callback=None):
-        return external_process.ProcessManager(
-            conf=conf,
-            uuid=router_id,
-            namespace=ns_name,
-            default_cmd_callback=callback)
-
-
-def after_router_added(resource, event, l3_agent, **kwargs):
-    router = kwargs['router']
-    proxy = l3_agent.metadata_driver
-    for c, r in proxy.metadata_filter_rules(proxy.metadata_port,
-                                           proxy.metadata_access_mark):
-        router.iptables_manager.ipv4['filter'].add_rule(c, r)
-    for c, r in proxy.metadata_mangle_rules(proxy.metadata_access_mark):
-        router.iptables_manager.ipv4['mangle'].add_rule(c, r)
-    for c, r in proxy.metadata_nat_rules(proxy.metadata_port):
-        router.iptables_manager.ipv4['nat'].add_rule(c, r)
-    router.iptables_manager.apply()
-
-    if not isinstance(router, ha_router.HaRouter):
-        proxy.spawn_monitored_metadata_proxy(
-            l3_agent.process_monitor,
-            router.ns_name,
-            proxy.metadata_port,
-            l3_agent.conf,
-            router_id=router.router_id)
-
-
-def before_router_removed(resource, event, l3_agent, **kwargs):
-    router = kwargs['router']
-    proxy = l3_agent.metadata_driver
-
-    proxy.destroy_monitored_metadata_proxy(l3_agent.process_monitor,
-                                          router.router['id'],
-                                          l3_agent.conf)
diff --git a/neutron/agent/metadata/namespace_proxy.py b/neutron/agent/metadata/namespace_proxy.py
deleted file mode 100644 (file)
index 8a226a6..0000000
+++ /dev/null
@@ -1,190 +0,0 @@
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import httplib2
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_service import wsgi as base_wsgi
-from oslo_utils import encodeutils
-import six
-import six.moves.urllib.parse as urlparse
-import webob
-
-from neutron._i18n import _, _LE
-from neutron.agent.linux import daemon
-from neutron.agent.linux import utils as agent_utils
-from neutron.common import config
-from neutron.common import exceptions
-from neutron.common import utils
-from neutron import wsgi
-
-LOG = logging.getLogger(__name__)
-
-
-class NetworkMetadataProxyHandler(object):
-    """Proxy AF_INET metadata request through Unix Domain socket.
-
-    The Unix domain socket allows the proxy access resource that are not
-    accessible within the isolated tenant context.
-    """
-
-    def __init__(self, network_id=None, router_id=None):
-        self.network_id = network_id
-        self.router_id = router_id
-
-        if network_id is None and router_id is None:
-            raise exceptions.NetworkIdOrRouterIdRequiredError()
-
-    @webob.dec.wsgify(RequestClass=base_wsgi.Request)
-    def __call__(self, req):
-        LOG.debug("Request: %s", req)
-        try:
-            return self._proxy_request(req.remote_addr,
-                                       req.method,
-                                       req.path_info,
-                                       req.query_string,
-                                       req.body)
-        except Exception:
-            LOG.exception(_LE("Unexpected error."))
-            msg = _('An unknown error has occurred. '
-                    'Please try your request again.')
-            explanation = six.text_type(msg)
-            return webob.exc.HTTPInternalServerError(explanation=explanation)
-
-    def _proxy_request(self, remote_address, method, path_info,
-                       query_string, body):
-        headers = {
-            'X-Forwarded-For': remote_address,
-        }
-
-        if self.router_id:
-            headers['X-Neutron-Router-ID'] = self.router_id
-        else:
-            headers['X-Neutron-Network-ID'] = self.network_id
-
-        url = urlparse.urlunsplit((
-            'http',
-            '169.254.169.254',  # a dummy value to make the request proper
-            path_info,
-            query_string,
-            ''))
-
-        h = httplib2.Http()
-        resp, content = h.request(
-            url,
-            method=method,
-            headers=headers,
-            body=body,
-            connection_type=agent_utils.UnixDomainHTTPConnection)
-
-        if resp.status == 200:
-            LOG.debug(resp)
-            LOG.debug(encodeutils.safe_decode(content, errors='replace'))
-            response = webob.Response()
-            response.status = resp.status
-            response.headers['Content-Type'] = resp['content-type']
-            response.body = wsgi.encode_body(content)
-            return response
-        elif resp.status == 400:
-            return webob.exc.HTTPBadRequest()
-        elif resp.status == 404:
-            return webob.exc.HTTPNotFound()
-        elif resp.status == 409:
-            return webob.exc.HTTPConflict()
-        elif resp.status == 500:
-            msg = _(
-                'Remote metadata server experienced an internal server error.'
-            )
-            LOG.debug(msg)
-            explanation = six.text_type(msg)
-            return webob.exc.HTTPInternalServerError(explanation=explanation)
-        else:
-            raise Exception(_('Unexpected response code: %s') % resp.status)
-
-
-class ProxyDaemon(daemon.Daemon):
-    def __init__(self, pidfile, port, network_id=None, router_id=None,
-                 user=None, group=None, watch_log=True):
-        uuid = network_id or router_id
-        super(ProxyDaemon, self).__init__(pidfile, uuid=uuid, user=user,
-                                         group=group, watch_log=watch_log)
-        self.network_id = network_id
-        self.router_id = router_id
-        self.port = port
-
-    def run(self):
-        handler = NetworkMetadataProxyHandler(
-            self.network_id,
-            self.router_id)
-        proxy = wsgi.Server('neutron-network-metadata-proxy')
-        proxy.start(handler, self.port)
-
-        # Drop privileges after port bind
-        super(ProxyDaemon, self).run()
-
-        proxy.wait()
-
-
-def main():
-    opts = [
-        cfg.StrOpt('network_id',
-                   help=_('Network that will have instance metadata '
-                          'proxied.')),
-        cfg.StrOpt('router_id',
-                   help=_('Router that will have connected instances\' '
-                          'metadata proxied.')),
-        cfg.StrOpt('pid_file',
-                   help=_('Location of pid file of this process.')),
-        cfg.BoolOpt('daemonize',
-                    default=True,
-                    help=_('Run as daemon.')),
-        cfg.PortOpt('metadata_port',
-                    default=9697,
-                    help=_("TCP Port to listen for metadata server "
-                           "requests.")),
-        cfg.StrOpt('metadata_proxy_socket',
-                   default='$state_path/metadata_proxy',
-                   help=_('Location of Metadata Proxy UNIX domain '
-                          'socket')),
-        cfg.StrOpt('metadata_proxy_user',
-                   help=_("User (uid or name) running metadata proxy after "
-                          "its initialization")),
-        cfg.StrOpt('metadata_proxy_group',
-                   help=_("Group (gid or name) running metadata proxy after "
-                          "its initialization")),
-        cfg.BoolOpt('metadata_proxy_watch_log',
-                    default=True,
-                    help=_("Watch file log. Log watch should be disabled when "
-                           "metadata_proxy_user/group has no read/write "
-                           "permissions on metadata proxy log file.")),
-    ]
-
-    cfg.CONF.register_cli_opts(opts)
-    # Don't get the default configuration file
-    cfg.CONF(project='neutron', default_config_files=[])
-    config.setup_logging()
-    utils.log_opt_values(LOG)
-
-    proxy = ProxyDaemon(cfg.CONF.pid_file,
-                        cfg.CONF.metadata_port,
-                        network_id=cfg.CONF.network_id,
-                        router_id=cfg.CONF.router_id,
-                        user=cfg.CONF.metadata_proxy_user,
-                        group=cfg.CONF.metadata_proxy_group,
-                        watch_log=cfg.CONF.metadata_proxy_watch_log)
-
-    if cfg.CONF.daemonize:
-        proxy.start()
-    else:
-        proxy.run()
diff --git a/neutron/agent/metadata_agent.py b/neutron/agent/metadata_agent.py
deleted file mode 100644 (file)
index b392ed1..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2015 OpenStack Foundation.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from neutron.agent.common import config as agent_conf
-from neutron.agent.metadata import agent
-from neutron.agent.metadata import config as metadata_conf
-from neutron.common import config
-from neutron.common import utils
-from neutron.openstack.common.cache import cache
-
-LOG = logging.getLogger(__name__)
-
-
-def main():
-    cfg.CONF.register_opts(metadata_conf.SHARED_OPTS)
-    cfg.CONF.register_opts(metadata_conf.UNIX_DOMAIN_METADATA_PROXY_OPTS)
-    cfg.CONF.register_opts(metadata_conf.METADATA_PROXY_HANDLER_OPTS)
-    cache.register_oslo_configs(cfg.CONF)
-    cfg.CONF.set_default(name='cache_url', default='memory://?default_ttl=5')
-    agent_conf.register_agent_state_opts_helper(cfg.CONF)
-    config.init(sys.argv[1:])
-    config.setup_logging()
-    utils.log_opt_values(LOG)
-    proxy = agent.UnixDomainMetadataProxy(cfg.CONF)
-    proxy.run()
diff --git a/neutron/agent/ovsdb/__init__.py b/neutron/agent/ovsdb/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/agent/ovsdb/api.py b/neutron/agent/ovsdb/api.py
deleted file mode 100644 (file)
index 5a21782..0000000
+++ /dev/null
@@ -1,378 +0,0 @@
-# Copyright (c) 2014 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import collections
-import uuid
-
-from oslo_config import cfg
-from oslo_utils import importutils
-import six
-
-from neutron._i18n import _
-
-interface_map = {
-    'vsctl': 'neutron.agent.ovsdb.impl_vsctl.OvsdbVsctl',
-    'native': 'neutron.agent.ovsdb.impl_idl.OvsdbIdl',
-}
-
-OPTS = [
-    cfg.StrOpt('ovsdb_interface',
-               choices=interface_map.keys(),
-               default='vsctl',
-               help=_('The interface for interacting with the OVSDB')),
-    cfg.StrOpt('ovsdb_connection',
-               default='tcp:127.0.0.1:6640',
-               help=_('The connection string for the native OVSDB backend. '
-                      'Requires the native ovsdb_interface to be enabled.'))
-]
-cfg.CONF.register_opts(OPTS, 'OVS')
-
-
-@six.add_metaclass(abc.ABCMeta)
-class Command(object):
-    """An OVSDB command that can be executed in a transaction
-
-    :attr result: The result of executing the command in a transaction
-    """
-
-    @abc.abstractmethod
-    def execute(self, **transaction_options):
-        """Immediately execute an OVSDB command
-
-        This implicitly creates a transaction with the passed options and then
-        executes it, returning the value of the executed transaction
-
-        :param transaction_options: Options to pass to the transaction
-        """
-
-
-@six.add_metaclass(abc.ABCMeta)
-class Transaction(object):
-    @abc.abstractmethod
-    def commit(self):
-        """Commit the transaction to OVSDB"""
-
-    @abc.abstractmethod
-    def add(self, command):
-        """Append an OVSDB operation to the transaction"""
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, exc_type, exc_val, tb):
-        if exc_type is None:
-            self.result = self.commit()
-
-
-@six.add_metaclass(abc.ABCMeta)
-class API(object):
-    def __init__(self, context):
-        self.context = context
-
-    @staticmethod
-    def get(context, iface_name=None):
-        """Return the configured OVSDB API implementation"""
-        iface = importutils.import_class(
-            interface_map[iface_name or cfg.CONF.OVS.ovsdb_interface])
-        return iface(context)
-
-    @abc.abstractmethod
-    def transaction(self, check_error=False, log_errors=True, **kwargs):
-        """Create a transaction
-
-        :param check_error: Allow the transaction to raise an exception?
-        :type check_error:  bool
-        :param log_errors:  Log an error if the transaction fails?
-        :type log_errors:   bool
-        :returns: A new transaction
-        :rtype: :class:`Transaction`
-        """
-
-    @abc.abstractmethod
-    def add_br(self, name, may_exist=True, datapath_type=None):
-        """Create a command to add an OVS bridge
-
-        :param name:            The name of the bridge
-        :type name:             string
-        :param may_exist:       Do not fail if bridge already exists
-        :type may_exist:        bool
-        :param datapath_type:   The datapath_type of the bridge
-        :type datapath_type:    string
-        :returns:               :class:`Command` with no result
-        """
-
-    @abc.abstractmethod
-    def del_br(self, name, if_exists=True):
-        """Create a command to delete an OVS bridge
-
-        :param name:      The name of the bridge
-        :type name:       string
-        :param if_exists: Do not fail if the bridge does not exist
-        :type if_exists:  bool
-        :returns:        :class:`Command` with no result
-        """
-
-    @abc.abstractmethod
-    def br_exists(self, name):
-        """Create a command to check if an OVS bridge exists
-
-        :param name: The name of the bridge
-        :type name:  string
-        :returns:    :class:`Command` with bool result
-        """
-
-    @abc.abstractmethod
-    def port_to_br(self, name):
-        """Create a command to return the name of the bridge with the port
-
-        :param name: The name of the OVS port
-        :type name:  string
-        :returns:    :class:`Command` with bridge name result
-        """
-
-    @abc.abstractmethod
-    def iface_to_br(self, name):
-        """Create a command to return the name of the bridge with the interface
-
-        :param name: The name of the OVS interface
-        :type name:  string
-        :returns:    :class:`Command` with bridge name result
-        """
-
-    @abc.abstractmethod
-    def list_br(self):
-        """Create a command to return the current list of OVS bridge names
-
-        :returns: :class:`Command` with list of bridge names result
-        """
-
-    @abc.abstractmethod
-    def br_get_external_id(self, name, field):
-        """Create a command to return a field from the Bridge's external_ids
-
-        :param name:  The name of the OVS Bridge
-        :type name:   string
-        :param field: The external_ids field to return
-        :type field:  string
-        :returns:     :class:`Command` with field value result
-        """
-
-    @abc.abstractmethod
-    def db_create(self, table, **col_values):
-        """Create a command to create new record
-
-        :param table:      The OVS table containing the record to be created
-        :type table:       string
-        :param col_values: The columns and their associated values
-                           to be set after create
-        :type col_values:  Dictionary of columns id's and values
-        :returns:          :class:`Command` with no result
-        """
-
-    @abc.abstractmethod
-    def db_destroy(self, table, record):
-        """Create a command to destroy a record
-
-        :param table:      The OVS table containing the record to be destroyed
-        :type table:       string
-        :param record:     The record id (name/uuid) to be destroyed
-        :type record:      uuid/string
-        :returns:          :class:`Command` with no result
-        """
-
-    @abc.abstractmethod
-    def db_set(self, table, record, *col_values):
-        """Create a command to set fields in a record
-
-        :param table:      The OVS table containing the record to be modified
-        :type table:       string
-        :param record:     The record id (name/uuid) to be modified
-        :type table:       string
-        :param col_values: The columns and their associated values
-        :type col_values:  Tuples of (column, value). Values may be atomic
-                           values or unnested sequences/mappings
-        :returns:          :class:`Command` with no result
-        """
-        # TODO(twilson) Consider handling kwargs for arguments where order
-        # doesn't matter. Though that would break the assert_called_once_with
-        # unit tests
-
-    @abc.abstractmethod
-    def db_clear(self, table, record, column):
-        """Create a command to clear a field's value in a record
-
-        :param table:  The OVS table containing the record to be modified
-        :type table:   string
-        :param record: The record id (name/uuid) to be modified
-        :type record:  string
-        :param column: The column whose value should be cleared
-        :type column:  string
-        :returns:      :class:`Command` with no result
-        """
-
-    @abc.abstractmethod
-    def db_get(self, table, record, column):
-        """Create a command to return a field's value in a record
-
-        :param table:  The OVS table containing the record to be queried
-        :type table:   string
-        :param record: The record id (name/uuid) to be queried
-        :type record:  string
-        :param column: The column whose value should be returned
-        :type column:  string
-        :returns:      :class:`Command` with the field's value result
-        """
-
-    @abc.abstractmethod
-    def db_list(self, table, records=None, columns=None, if_exists=False):
-        """Create a command to return a list of OVSDB records
-
-        :param table:     The OVS table to query
-        :type table:      string
-        :param records:   The records to return values from
-        :type records:    list of record ids (names/uuids)
-        :param columns:   Limit results to only columns, None means all columns
-        :type columns:    list of column names or None
-        :param if_exists: Do not fail if the record does not exist
-        :type if_exists:  bool
-        :returns:         :class:`Command` with [{'column', value}, ...] result
-        """
-
-    @abc.abstractmethod
-    def db_find(self, table, *conditions, **kwargs):
-        """Create a command to return find OVSDB records matching conditions
-
-        :param table:     The OVS table to query
-        :type table:      string
-        :param conditions:The conditions to satisfy the query
-        :type conditions: 3-tuples containing (column, operation, match)
-                          Examples:
-                              atomic: ('tag', '=', 7)
-                              map: ('external_ids' '=', {'iface-id': 'xxx'})
-                              field exists?
-                                  ('external_ids', '!=', {'iface-id', ''})
-                              set contains?:
-                                  ('protocols', '{>=}', 'OpenFlow13')
-                          See the ovs-vsctl man page for more operations
-        :param columns:   Limit results to only columns, None means all columns
-        :type columns:    list of column names or None
-        :returns:         :class:`Command` with [{'column', value}, ...] result
-        """
-
-    @abc.abstractmethod
-    def set_controller(self, bridge, controllers):
-        """Create a command to set an OVS bridge's OpenFlow controllers
-
-        :param bridge:      The name of the bridge
-        :type bridge:       string
-        :param controllers: The controller strings
-        :type controllers:  list of strings, see ovs-vsctl manpage for format
-        :returns:           :class:`Command` with no result
-        """
-
-    @abc.abstractmethod
-    def del_controller(self, bridge):
-        """Create a command to clear an OVS bridge's OpenFlow controllers
-
-        :param bridge: The name of the bridge
-        :type bridge:  string
-        :returns:      :class:`Command` with no result
-        """
-
-    @abc.abstractmethod
-    def get_controller(self, bridge):
-        """Create a command to return an OVS bridge's OpenFlow controllers
-
-        :param bridge: The name of the bridge
-        :type bridge:  string
-        :returns:      :class:`Command` with list of controller strings result
-        """
-
-    @abc.abstractmethod
-    def set_fail_mode(self, bridge, mode):
-        """Create a command to set an OVS bridge's failure mode
-
-        :param bridge: The name of the bridge
-        :type bridge:  string
-        :param mode:   The failure mode
-        :type mode:    "secure" or "standalone"
-        :returns:      :class:`Command` with no result
-        """
-
-    @abc.abstractmethod
-    def add_port(self, bridge, port, may_exist=True):
-        """Create a command to add a port to an OVS bridge
-
-        :param bridge:    The name of the bridge
-        :type bridge:     string
-        :param port:      The name of the port
-        :type port:       string
-        :param may_exist: Do not fail if the port already exists
-        :type may_exist:  bool
-        :returns:         :class:`Command` with no result
-        """
-
-    @abc.abstractmethod
-    def del_port(self, port, bridge=None, if_exists=True):
-        """Create a command to delete a port an OVS port
-
-        :param port:      The name of the port
-        :type port:       string
-        :param bridge:    Only delete port if it is attached to this bridge
-        :type bridge:     string
-        :param if_exists: Do not fail if the port does not exist
-        :type if_exists:  bool
-        :returns:         :class:`Command` with no result
-        """
-
-    @abc.abstractmethod
-    def list_ports(self, bridge):
-        """Create a command to list the names of ports on a bridge
-
-        :param bridge: The name of the bridge
-        :type bridge:  string
-        :returns:      :class:`Command` with list of port names result
-        """
-
-    @abc.abstractmethod
-    def list_ifaces(self, bridge):
-        """Create a command to list the names of interfaces on a bridge
-
-        :param bridge: The name of the bridge
-        :type bridge:  string
-        :returns:      :class:`Command` with list of interfaces names result
-        """
-
-
-def val_to_py(val):
-    """Convert a json ovsdb return value to native python object"""
-    if isinstance(val, collections.Sequence) and len(val) == 2:
-        if val[0] == "uuid":
-            return uuid.UUID(val[1])
-        elif val[0] == "set":
-            return [val_to_py(x) for x in val[1]]
-        elif val[0] == "map":
-            return {val_to_py(x): val_to_py(y) for x, y in val[1]}
-    return val
-
-
-def py_to_val(pyval):
-    """Convert python value to ovs-vsctl value argument"""
-    if isinstance(pyval, bool):
-        return 'true' if pyval is True else 'false'
-    elif pyval == '':
-        return '""'
-    else:
-        return pyval
diff --git a/neutron/agent/ovsdb/impl_idl.py b/neutron/agent/ovsdb/impl_idl.py
deleted file mode 100644 (file)
index 4ce44c0..0000000
+++ /dev/null
@@ -1,201 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from six.moves import queue as Queue
-import time
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import excutils
-from ovs.db import idl
-
-from neutron._i18n import _
-from neutron.agent.ovsdb import api
-from neutron.agent.ovsdb.native import commands as cmd
-from neutron.agent.ovsdb.native import connection
-from neutron.agent.ovsdb.native import idlutils
-
-
-cfg.CONF.import_opt('ovs_vsctl_timeout', 'neutron.agent.common.ovs_lib')
-
-LOG = logging.getLogger(__name__)
-
-
-class Transaction(api.Transaction):
-    def __init__(self, api, ovsdb_connection, timeout,
-                 check_error=False, log_errors=False):
-        self.api = api
-        self.check_error = check_error
-        self.log_errors = log_errors
-        self.commands = []
-        self.results = Queue.Queue(1)
-        self.ovsdb_connection = ovsdb_connection
-        self.timeout = timeout
-
-    def add(self, command):
-        """Add a command to the transaction
-
-        returns The command passed as a convenience
-        """
-
-        self.commands.append(command)
-        return command
-
-    def commit(self):
-        self.ovsdb_connection.queue_txn(self)
-        result = self.results.get()
-        if self.check_error:
-            if isinstance(result, idlutils.ExceptionResult):
-                if self.log_errors:
-                    LOG.error(result.tb)
-                raise result.ex
-        return result
-
-    def do_commit(self):
-        start_time = time.time()
-        attempts = 0
-        while True:
-            elapsed_time = time.time() - start_time
-            if attempts > 0 and elapsed_time > self.timeout:
-                raise RuntimeError("OVS transaction timed out")
-            attempts += 1
-            # TODO(twilson) Make sure we don't loop longer than vsctl_timeout
-            txn = idl.Transaction(self.api.idl)
-            for i, command in enumerate(self.commands):
-                LOG.debug("Running txn command(idx=%(idx)s): %(cmd)s",
-                          {'idx': i, 'cmd': command})
-                try:
-                    command.run_idl(txn)
-                except Exception:
-                    with excutils.save_and_reraise_exception() as ctx:
-                        txn.abort()
-                        if not self.check_error:
-                            ctx.reraise = False
-            seqno = self.api.idl.change_seqno
-            status = txn.commit_block()
-            if status == txn.TRY_AGAIN:
-                LOG.debug("OVSDB transaction returned TRY_AGAIN, retrying")
-                idlutils.wait_for_change(
-                    self.api.idl, self.timeout - elapsed_time,
-                    seqno)
-                continue
-            elif status == txn.ERROR:
-                msg = _("OVSDB Error: %s") % txn.get_error()
-                if self.log_errors:
-                    LOG.error(msg)
-                if self.check_error:
-                    # For now, raise similar error to vsctl/utils.execute()
-                    raise RuntimeError(msg)
-                return
-            elif status == txn.ABORTED:
-                LOG.debug("Transaction aborted")
-                return
-            elif status == txn.UNCHANGED:
-                LOG.debug("Transaction caused no change")
-
-            return [cmd.result for cmd in self.commands]
-
-
-class OvsdbIdl(api.API):
-
-    ovsdb_connection = connection.Connection(cfg.CONF.OVS.ovsdb_connection,
-                                             cfg.CONF.ovs_vsctl_timeout,
-                                             'Open_vSwitch')
-
-    def __init__(self, context):
-        super(OvsdbIdl, self).__init__(context)
-        OvsdbIdl.ovsdb_connection.start()
-        self.idl = OvsdbIdl.ovsdb_connection.idl
-
-    @property
-    def _tables(self):
-        return self.idl.tables
-
-    @property
-    def _ovs(self):
-        return list(self._tables['Open_vSwitch'].rows.values())[0]
-
-    def transaction(self, check_error=False, log_errors=True, **kwargs):
-        return Transaction(self, OvsdbIdl.ovsdb_connection,
-                           self.context.vsctl_timeout,
-                           check_error, log_errors)
-
-    def add_br(self, name, may_exist=True, datapath_type=None):
-        return cmd.AddBridgeCommand(self, name, may_exist, datapath_type)
-
-    def del_br(self, name, if_exists=True):
-        return cmd.DelBridgeCommand(self, name, if_exists)
-
-    def br_exists(self, name):
-        return cmd.BridgeExistsCommand(self, name)
-
-    def port_to_br(self, name):
-        return cmd.PortToBridgeCommand(self, name)
-
-    def iface_to_br(self, name):
-        return cmd.InterfaceToBridgeCommand(self, name)
-
-    def list_br(self):
-        return cmd.ListBridgesCommand(self)
-
-    def br_get_external_id(self, name, field):
-        return cmd.BrGetExternalIdCommand(self, name, field)
-
-    def br_set_external_id(self, name, field, value):
-        return cmd.BrSetExternalIdCommand(self, name, field, value)
-
-    def db_create(self, table, **col_values):
-        return cmd.DbCreateCommand(self, table, **col_values)
-
-    def db_destroy(self, table, record):
-        return cmd.DbDestroyCommand(self, table, record)
-
-    def db_set(self, table, record, *col_values):
-        return cmd.DbSetCommand(self, table, record, *col_values)
-
-    def db_clear(self, table, record, column):
-        return cmd.DbClearCommand(self, table, record, column)
-
-    def db_get(self, table, record, column):
-        return cmd.DbGetCommand(self, table, record, column)
-
-    def db_list(self, table, records=None, columns=None, if_exists=False):
-        return cmd.DbListCommand(self, table, records, columns, if_exists)
-
-    def db_find(self, table, *conditions, **kwargs):
-        return cmd.DbFindCommand(self, table, *conditions, **kwargs)
-
-    def set_controller(self, bridge, controllers):
-        return cmd.SetControllerCommand(self, bridge, controllers)
-
-    def del_controller(self, bridge):
-        return cmd.DelControllerCommand(self, bridge)
-
-    def get_controller(self, bridge):
-        return cmd.GetControllerCommand(self, bridge)
-
-    def set_fail_mode(self, bridge, mode):
-        return cmd.SetFailModeCommand(self, bridge, mode)
-
-    def add_port(self, bridge, port, may_exist=True):
-        return cmd.AddPortCommand(self, bridge, port, may_exist)
-
-    def del_port(self, port, bridge=None, if_exists=True):
-        return cmd.DelPortCommand(self, port, bridge, if_exists)
-
-    def list_ports(self, bridge):
-        return cmd.ListPortsCommand(self, bridge)
-
-    def list_ifaces(self, bridge):
-        return cmd.ListIfacesCommand(self, bridge)
diff --git a/neutron/agent/ovsdb/impl_vsctl.py b/neutron/agent/ovsdb/impl_vsctl.py
deleted file mode 100644 (file)
index e108a78..0000000
+++ /dev/null
@@ -1,285 +0,0 @@
-# Copyright (c) 2014 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import itertools
-
-from oslo_log import log as logging
-from oslo_serialization import jsonutils
-from oslo_utils import excutils
-import six
-
-from neutron._i18n import _LE
-from neutron.agent.common import utils
-from neutron.agent.ovsdb import api as ovsdb
-
-LOG = logging.getLogger(__name__)
-
-
-class Transaction(ovsdb.Transaction):
-    def __init__(self, context, check_error=False, log_errors=True, opts=None):
-        self.context = context
-        self.check_error = check_error
-        self.log_errors = log_errors
-        self.opts = ["--timeout=%d" % self.context.vsctl_timeout,
-                     '--oneline', '--format=json']
-        if opts:
-            self.opts += opts
-        self.commands = []
-
-    def add(self, command):
-        self.commands.append(command)
-        return command
-
-    def commit(self):
-        args = []
-        for cmd in self.commands:
-            cmd.result = None
-            args += cmd.vsctl_args()
-        res = self.run_vsctl(args)
-        if res is None:
-            return
-        res = res.replace(r'\\', '\\').splitlines()
-        for i, record in enumerate(res):
-            self.commands[i].result = record
-        return [cmd.result for cmd in self.commands]
-
-    def run_vsctl(self, args):
-        full_args = ["ovs-vsctl"] + self.opts + args
-        try:
-            # We log our own errors, so never have utils.execute do it
-            return utils.execute(full_args, run_as_root=True,
-                                 log_fail_as_error=False).rstrip()
-        except Exception as e:
-            with excutils.save_and_reraise_exception() as ctxt:
-                if self.log_errors:
-                    LOG.error(_LE("Unable to execute %(cmd)s. "
-                                  "Exception: %(exception)s"),
-                              {'cmd': full_args, 'exception': e})
-                if not self.check_error:
-                    ctxt.reraise = False
-
-
-class BaseCommand(ovsdb.Command):
-    def __init__(self, context, cmd, opts=None, args=None):
-        self.context = context
-        self.cmd = cmd
-        self.opts = [] if opts is None else opts
-        self.args = [] if args is None else args
-
-    def execute(self, check_error=False, log_errors=True):
-        with Transaction(self.context, check_error=check_error,
-                         log_errors=log_errors) as txn:
-            txn.add(self)
-        return self.result
-
-    def vsctl_args(self):
-        return itertools.chain(('--',), self.opts, (self.cmd,), self.args)
-
-
-class MultiLineCommand(BaseCommand):
-    """Command for ovs-vsctl commands that return multiple lines"""
-    @property
-    def result(self):
-        return self._result
-
-    @result.setter
-    def result(self, raw_result):
-        self._result = raw_result.split(r'\n') if raw_result else []
-
-
-class DbCommand(BaseCommand):
-    def __init__(self, context, cmd, opts=None, args=None, columns=None):
-        if opts is None:
-            opts = []
-        if columns:
-            opts += ['--columns=%s' % ",".join(columns)]
-        super(DbCommand, self).__init__(context, cmd, opts, args)
-
-    @property
-    def result(self):
-        return self._result
-
-    @result.setter
-    def result(self, raw_result):
-        # If check_error=False, run_vsctl can return None
-        if not raw_result:
-            self._result = None
-            return
-
-        try:
-            json = jsonutils.loads(raw_result)
-        except (ValueError, TypeError) as e:
-            # This shouldn't happen, but if it does and we check_errors
-            # log and raise.
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE("Could not parse: %(raw_result)s. "
-                              "Exception: %(exception)s"),
-                          {'raw_result': raw_result, 'exception': e})
-
-        headings = json['headings']
-        data = json['data']
-        results = []
-        for record in data:
-            obj = {}
-            for pos, heading in enumerate(headings):
-                obj[heading] = ovsdb.val_to_py(record[pos])
-            results.append(obj)
-        self._result = results
-
-
-class DbGetCommand(DbCommand):
-    @DbCommand.result.setter
-    def result(self, val):
-        # super()'s never worked for setters http://bugs.python.org/issue14965
-        DbCommand.result.fset(self, val)
-        # DbCommand will return [{'column': value}] and we just want value.
-        if self._result:
-            self._result = list(self._result[0].values())[0]
-
-
-class BrExistsCommand(DbCommand):
-    @DbCommand.result.setter
-    def result(self, val):
-        self._result = val is not None
-
-    def execute(self):
-        return super(BrExistsCommand, self).execute(check_error=False,
-                                                    log_errors=False)
-
-
-class OvsdbVsctl(ovsdb.API):
-    def transaction(self, check_error=False, log_errors=True, **kwargs):
-        return Transaction(self.context, check_error, log_errors, **kwargs)
-
-    def add_br(self, name, may_exist=True, datapath_type=None):
-        opts = ['--may-exist'] if may_exist else None
-        params = [name]
-        if datapath_type:
-            params += ['--', 'set', 'Bridge', name,
-                       'datapath_type=%s' % datapath_type]
-        return BaseCommand(self.context, 'add-br', opts, params)
-
-    def del_br(self, name, if_exists=True):
-        opts = ['--if-exists'] if if_exists else None
-        return BaseCommand(self.context, 'del-br', opts, [name])
-
-    def br_exists(self, name):
-        return BrExistsCommand(self.context, 'list', args=['Bridge', name])
-
-    def port_to_br(self, name):
-        return BaseCommand(self.context, 'port-to-br', args=[name])
-
-    def iface_to_br(self, name):
-        return BaseCommand(self.context, 'iface-to-br', args=[name])
-
-    def list_br(self):
-        return MultiLineCommand(self.context, 'list-br')
-
-    def br_get_external_id(self, name, field):
-        return BaseCommand(self.context, 'br-get-external-id',
-                           args=[name, field])
-
-    def db_create(self, table, **col_values):
-        args = [table]
-        args += _set_colval_args(*col_values.items())
-        return BaseCommand(self.context, 'create', args=args)
-
-    def db_destroy(self, table, record):
-        args = [table, record]
-        return BaseCommand(self.context, 'destroy', args=args)
-
-    def db_set(self, table, record, *col_values):
-        args = [table, record]
-        args += _set_colval_args(*col_values)
-        return BaseCommand(self.context, 'set', args=args)
-
-    def db_clear(self, table, record, column):
-        return BaseCommand(self.context, 'clear', args=[table, record,
-                                                        column])
-
-    def db_get(self, table, record, column):
-        # Use the 'list' command as it can return json and 'get' cannot so that
-        # we can get real return types instead of treating everything as string
-        # NOTE: openvswitch can return a single atomic value for fields that
-        # are sets, but only have one value. This makes directly iterating over
-        # the result of a db_get() call unsafe.
-        return DbGetCommand(self.context, 'list', args=[table, record],
-                            columns=[column])
-
-    def db_list(self, table, records=None, columns=None, if_exists=False):
-        opts = ['--if-exists'] if if_exists else None
-        args = [table]
-        if records:
-            args += records
-        return DbCommand(self.context, 'list', opts=opts, args=args,
-                         columns=columns)
-
-    def db_find(self, table, *conditions, **kwargs):
-        columns = kwargs.pop('columns', None)
-        args = itertools.chain([table],
-                               *[_set_colval_args(c) for c in conditions])
-        return DbCommand(self.context, 'find', args=args, columns=columns)
-
-    def set_controller(self, bridge, controllers):
-        return BaseCommand(self.context, 'set-controller',
-                           args=[bridge] + list(controllers))
-
-    def del_controller(self, bridge):
-        return BaseCommand(self.context, 'del-controller', args=[bridge])
-
-    def get_controller(self, bridge):
-        return MultiLineCommand(self.context, 'get-controller', args=[bridge])
-
-    def set_fail_mode(self, bridge, mode):
-        return BaseCommand(self.context, 'set-fail-mode', args=[bridge, mode])
-
-    def add_port(self, bridge, port, may_exist=True):
-        opts = ['--may-exist'] if may_exist else None
-        return BaseCommand(self.context, 'add-port', opts, [bridge, port])
-
-    def del_port(self, port, bridge=None, if_exists=True):
-        opts = ['--if-exists'] if if_exists else None
-        args = filter(None, [bridge, port])
-        return BaseCommand(self.context, 'del-port', opts, args)
-
-    def list_ports(self, bridge):
-        return MultiLineCommand(self.context, 'list-ports', args=[bridge])
-
-    def list_ifaces(self, bridge):
-        return MultiLineCommand(self.context, 'list-ifaces', args=[bridge])
-
-
-def _set_colval_args(*col_values):
-    args = []
-    # TODO(twilson) This is ugly, but set/find args are very similar except for
-    # op. Will try to find a better way to default this op to '='
-    for entry in col_values:
-        if len(entry) == 2:
-            col, op, val = entry[0], '=', entry[1]
-        else:
-            col, op, val = entry
-        if isinstance(val, collections.Mapping):
-            args += ["%s:%s%s%s" % (
-                col, k, op, ovsdb.py_to_val(v)) for k, v in val.items()]
-        elif (isinstance(val, collections.Sequence)
-                and not isinstance(val, six.string_types)):
-            if len(val) == 0:
-                args.append("%s%s%s" % (col, op, "[]"))
-            else:
-                args.append(
-                    "%s%s%s" % (col, op, ",".join(map(ovsdb.py_to_val, val))))
-        else:
-            args.append("%s%s%s" % (col, op, ovsdb.py_to_val(val)))
-    return args
diff --git a/neutron/agent/ovsdb/native/__init__.py b/neutron/agent/ovsdb/native/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/agent/ovsdb/native/commands.py b/neutron/agent/ovsdb/native/commands.py
deleted file mode 100644 (file)
index 23c2eae..0000000
+++ /dev/null
@@ -1,465 +0,0 @@
-# Copyright (c) 2015 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-
-from oslo_log import log as logging
-from oslo_utils import excutils
-
-from neutron._i18n import _, _LE
-from neutron.agent.ovsdb import api
-from neutron.agent.ovsdb.native import idlutils
-
-LOG = logging.getLogger(__name__)
-
-
-class BaseCommand(api.Command):
-    def __init__(self, api):
-        self.api = api
-        self.result = None
-
-    def execute(self, check_error=False, log_errors=True):
-        try:
-            with self.api.transaction(check_error, log_errors) as txn:
-                txn.add(self)
-            return self.result
-        except Exception:
-            with excutils.save_and_reraise_exception() as ctx:
-                if log_errors:
-                    LOG.exception(_LE("Error executing command"))
-                if not check_error:
-                    ctx.reraise = False
-
-    def __str__(self):
-        command_info = self.__dict__
-        return "%s(%s)" % (
-            self.__class__.__name__,
-            ", ".join("%s=%s" % (k, v) for k, v in command_info.items()
-                      if k not in ['api', 'result']))
-
-
-class AddBridgeCommand(BaseCommand):
-    def __init__(self, api, name, may_exist, datapath_type):
-        super(AddBridgeCommand, self).__init__(api)
-        self.name = name
-        self.may_exist = may_exist
-        self.datapath_type = datapath_type
-
-    def run_idl(self, txn):
-        if self.may_exist:
-            br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name',
-                                       self.name, None)
-            if br:
-                if self.datapath_type:
-                    br.datapath_type = self.datapath_type
-                return
-        row = txn.insert(self.api._tables['Bridge'])
-        row.name = self.name
-        if self.datapath_type:
-            row.datapath_type = self.datapath_type
-        self.api._ovs.verify('bridges')
-        self.api._ovs.bridges = self.api._ovs.bridges + [row]
-
-        # Add the internal bridge port
-        cmd = AddPortCommand(self.api, self.name, self.name, self.may_exist)
-        cmd.run_idl(txn)
-
-        cmd = DbSetCommand(self.api, 'Interface', self.name,
-                           ('type', 'internal'))
-        cmd.run_idl(txn)
-
-
-class DelBridgeCommand(BaseCommand):
-    def __init__(self, api, name, if_exists):
-        super(DelBridgeCommand, self).__init__(api)
-        self.name = name
-        self.if_exists = if_exists
-
-    def run_idl(self, txn):
-        try:
-            br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name',
-                                       self.name)
-        except idlutils.RowNotFound:
-            if self.if_exists:
-                return
-            else:
-                msg = _("Bridge %s does not exist") % self.name
-                LOG.error(msg)
-                raise RuntimeError(msg)
-        self.api._ovs.verify('bridges')
-        for port in br.ports:
-            cmd = DelPortCommand(self.api, port.name, self.name,
-                                 if_exists=True)
-            cmd.run_idl(txn)
-        bridges = self.api._ovs.bridges
-        bridges.remove(br)
-        self.api._ovs.bridges = bridges
-        self.api._tables['Bridge'].rows[br.uuid].delete()
-
-
-class BridgeExistsCommand(BaseCommand):
-    def __init__(self, api, name):
-        super(BridgeExistsCommand, self).__init__(api)
-        self.name = name
-
-    def run_idl(self, txn):
-        self.result = bool(idlutils.row_by_value(self.api.idl, 'Bridge',
-                                                 'name', self.name, None))
-
-
-class ListBridgesCommand(BaseCommand):
-    def __init__(self, api):
-        super(ListBridgesCommand, self).__init__(api)
-
-    def run_idl(self, txn):
-        # NOTE (twilson) [x.name for x in rows.values()] if no index
-        self.result = [x.name for x in
-                       self.api._tables['Bridge'].rows.values()]
-
-
-class BrGetExternalIdCommand(BaseCommand):
-    def __init__(self, api, name, field):
-        super(BrGetExternalIdCommand, self).__init__(api)
-        self.name = name
-        self.field = field
-
-    def run_idl(self, txn):
-        br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.name)
-        self.result = br.external_ids[self.field]
-
-
-class BrSetExternalIdCommand(BaseCommand):
-    def __init__(self, api, name, field, value):
-        super(BrSetExternalIdCommand, self).__init__(api)
-        self.name = name
-        self.field = field
-        self.value = value
-
-    def run_idl(self, txn):
-        br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.name)
-        external_ids = getattr(br, 'external_ids', {})
-        external_ids[self.field] = self.value
-        br.external_ids = external_ids
-
-
-class DbCreateCommand(BaseCommand):
-    def __init__(self, api, table, **columns):
-        super(DbCreateCommand, self).__init__(api)
-        self.table = table
-        self.columns = columns
-
-    def run_idl(self, txn):
-        row = txn.insert(self.api._tables[self.table])
-        for col, val in self.columns.items():
-            setattr(row, col, val)
-        self.result = row
-
-
-class DbDestroyCommand(BaseCommand):
-    def __init__(self, api, table, record):
-        super(DbDestroyCommand, self).__init__(api)
-        self.table = table
-        self.record = record
-
-    def run_idl(self, txn):
-        record = idlutils.row_by_record(self.api.idl, self.table, self.record)
-        record.delete()
-
-
-class DbSetCommand(BaseCommand):
-    def __init__(self, api, table, record, *col_values):
-        super(DbSetCommand, self).__init__(api)
-        self.table = table
-        self.record = record
-        self.col_values = col_values
-
-    def run_idl(self, txn):
-        record = idlutils.row_by_record(self.api.idl, self.table, self.record)
-        for col, val in self.col_values:
-            # TODO(twilson) Ugh, the OVS library doesn't like OrderedDict
-            # We're only using it to make a unit test work, so we should fix
-            # this soon.
-            if isinstance(val, collections.OrderedDict):
-                val = dict(val)
-            setattr(record, col, val)
-
-
-class DbClearCommand(BaseCommand):
-    def __init__(self, api, table, record, column):
-        super(DbClearCommand, self).__init__(api)
-        self.table = table
-        self.record = record
-        self.column = column
-
-    def run_idl(self, txn):
-        record = idlutils.row_by_record(self.api.idl, self.table, self.record)
-        # Create an empty value of the column type
-        value = type(getattr(record, self.column))()
-        setattr(record, self.column, value)
-
-
-class DbGetCommand(BaseCommand):
-    def __init__(self, api, table, record, column):
-        super(DbGetCommand, self).__init__(api)
-        self.table = table
-        self.record = record
-        self.column = column
-
-    def run_idl(self, txn):
-        record = idlutils.row_by_record(self.api.idl, self.table, self.record)
-        # TODO(twilson) This feels wrong, but ovs-vsctl returns single results
-        # on set types without the list. The IDL is returning them as lists,
-        # even if the set has the maximum number of items set to 1. Might be
-        # able to inspect the Schema and just do this conversion for that case.
-        result = idlutils.get_column_value(record, self.column)
-        if isinstance(result, list) and len(result) == 1:
-            self.result = result[0]
-        else:
-            self.result = result
-
-
-class SetControllerCommand(BaseCommand):
-    def __init__(self, api, bridge, targets):
-        super(SetControllerCommand, self).__init__(api)
-        self.bridge = bridge
-        self.targets = targets
-
-    def run_idl(self, txn):
-        br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
-        controllers = []
-        for target in self.targets:
-            controller = txn.insert(self.api._tables['Controller'])
-            controller.target = target
-            controllers.append(controller)
-        br.verify('controller')
-        br.controller = controllers
-
-
-class DelControllerCommand(BaseCommand):
-    def __init__(self, api, bridge):
-        super(DelControllerCommand, self).__init__(api)
-        self.bridge = bridge
-
-    def run_idl(self, txn):
-        br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
-        br.controller = []
-
-
-class GetControllerCommand(BaseCommand):
-    def __init__(self, api, bridge):
-        super(GetControllerCommand, self).__init__(api)
-        self.bridge = bridge
-
-    def run_idl(self, txn):
-        br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
-        br.verify('controller')
-        self.result = [c.target for c in br.controller]
-
-
-class SetFailModeCommand(BaseCommand):
-    def __init__(self, api, bridge, mode):
-        super(SetFailModeCommand, self).__init__(api)
-        self.bridge = bridge
-        self.mode = mode
-
-    def run_idl(self, txn):
-        br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
-        br.verify('fail_mode')
-        br.fail_mode = self.mode
-
-
-class AddPortCommand(BaseCommand):
-    def __init__(self, api, bridge, port, may_exist):
-        super(AddPortCommand, self).__init__(api)
-        self.bridge = bridge
-        self.port = port
-        self.may_exist = may_exist
-
-    def run_idl(self, txn):
-        br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
-        if self.may_exist:
-            port = idlutils.row_by_value(self.api.idl, 'Port', 'name',
-                                         self.port, None)
-            if port:
-                return
-        port = txn.insert(self.api._tables['Port'])
-        port.name = self.port
-        br.verify('ports')
-        ports = getattr(br, 'ports', [])
-        ports.append(port)
-        br.ports = ports
-
-        iface = txn.insert(self.api._tables['Interface'])
-        iface.name = self.port
-        port.verify('interfaces')
-        ifaces = getattr(port, 'interfaces', [])
-        ifaces.append(iface)
-        port.interfaces = ifaces
-
-
-class DelPortCommand(BaseCommand):
-    def __init__(self, api, port, bridge, if_exists):
-        super(DelPortCommand, self).__init__(api)
-        self.port = port
-        self.bridge = bridge
-        self.if_exists = if_exists
-
-    def run_idl(self, txn):
-        try:
-            port = idlutils.row_by_value(self.api.idl, 'Port', 'name',
-                                         self.port)
-        except idlutils.RowNotFound:
-            if self.if_exists:
-                return
-            msg = _("Port %s does not exist") % self.port
-            raise RuntimeError(msg)
-        if self.bridge:
-            br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name',
-                                       self.bridge)
-        else:
-            br = next(b for b in self.api._tables['Bridge'].rows.values()
-                      if port in b.ports)
-
-        if port.uuid not in br.ports and not self.if_exists:
-            # TODO(twilson) Make real errors across both implementations
-            msg = _("Port %(port)s does not exist on %(bridge)s!") % {
-                'port': self.name, 'bridge': self.bridge
-            }
-            LOG.error(msg)
-            raise RuntimeError(msg)
-
-        br.verify('ports')
-        ports = br.ports
-        ports.remove(port)
-        br.ports = ports
-
-        # Also remove port/interface directly for indexing?
-        port.verify('interfaces')
-        for iface in port.interfaces:
-            self.api._tables['Interface'].rows[iface.uuid].delete()
-        self.api._tables['Port'].rows[port.uuid].delete()
-
-
-class ListPortsCommand(BaseCommand):
-    def __init__(self, api, bridge):
-        super(ListPortsCommand, self).__init__(api)
-        self.bridge = bridge
-
-    def run_idl(self, txn):
-        br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
-        self.result = [p.name for p in br.ports if p.name != self.bridge]
-
-
-class ListIfacesCommand(BaseCommand):
-    def __init__(self, api, bridge):
-        super(ListIfacesCommand, self).__init__(api)
-        self.bridge = bridge
-
-    def run_idl(self, txn):
-        br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
-        self.result = [i.name for p in br.ports if p.name != self.bridge
-                       for i in p.interfaces]
-
-
-class PortToBridgeCommand(BaseCommand):
-    def __init__(self, api, name):
-        super(PortToBridgeCommand, self).__init__(api)
-        self.name = name
-
-    def run_idl(self, txn):
-        # TODO(twilson) This is expensive!
-        # This traversal of all ports could be eliminated by caching the bridge
-        # name on the Port's external_id field
-        # In fact, if we did that, the only place that uses to_br functions
-        # could just add the external_id field to the conditions passed to find
-        port = idlutils.row_by_value(self.api.idl, 'Port', 'name', self.name)
-        bridges = self.api._tables['Bridge'].rows.values()
-        self.result = next(br.name for br in bridges if port in br.ports)
-
-
-class InterfaceToBridgeCommand(BaseCommand):
-    def __init__(self, api, name):
-        super(InterfaceToBridgeCommand, self).__init__(api)
-        self.name = name
-
-    def run_idl(self, txn):
-        interface = idlutils.row_by_value(self.api.idl, 'Interface', 'name',
-                                          self.name)
-        ports = self.api._tables['Port'].rows.values()
-        pname = next(
-                port for port in ports if interface in port.interfaces)
-
-        bridges = self.api._tables['Bridge'].rows.values()
-        self.result = next(br.name for br in bridges if pname in br.ports)
-
-
-class DbListCommand(BaseCommand):
-    def __init__(self, api, table, records, columns, if_exists):
-        super(DbListCommand, self).__init__(api)
-        self.table = table
-        self.columns = columns
-        self.if_exists = if_exists
-        self.records = records
-
-    def run_idl(self, txn):
-        table_schema = self.api._tables[self.table]
-        columns = self.columns or table_schema.columns.keys() + ['_uuid']
-        if self.records:
-            row_uuids = []
-            for record in self.records:
-                try:
-                    row_uuids.append(idlutils.row_by_record(
-                                     self.api.idl, self.table, record).uuid)
-                except idlutils.RowNotFound:
-                    if self.if_exists:
-                        continue
-                    # NOTE(kevinbenton): this is converted to a RuntimeError
-                    # for compat with the vsctl version. It might make more
-                    # sense to change this to a RowNotFoundError in the future.
-                    raise RuntimeError(_(
-                          "Row doesn't exist in the DB. Request info: "
-                          "Table=%(table)s. Columns=%(columns)s. "
-                          "Records=%(records)s.") % {
-                              "table": self.table,
-                              "columns": self.columns,
-                              "records": self.records,
-                          })
-        else:
-            row_uuids = table_schema.rows.keys()
-        self.result = [
-            {
-                c: idlutils.get_column_value(table_schema.rows[uuid], c)
-                for c in columns
-            }
-            for uuid in row_uuids
-        ]
-
-
-class DbFindCommand(BaseCommand):
-    def __init__(self, api, table, *conditions, **kwargs):
-        super(DbFindCommand, self).__init__(api)
-        self.table = self.api._tables[table]
-        self.conditions = conditions
-        self.columns = (kwargs.get('columns') or
-                        self.table.columns.keys() + ['_uuid'])
-
-    def run_idl(self, txn):
-        self.result = [
-            {
-                c: idlutils.get_column_value(r, c)
-                for c in self.columns
-            }
-            for r in self.table.rows.values()
-            if idlutils.row_match(r, self.conditions)
-        ]
diff --git a/neutron/agent/ovsdb/native/connection.py b/neutron/agent/ovsdb/native/connection.py
deleted file mode 100644 (file)
index 56cd7cc..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-from six.moves import queue as Queue
-import threading
-import traceback
-
-from ovs.db import idl
-from ovs import poller
-import retrying
-
-from neutron.agent.ovsdb.native import helpers
-from neutron.agent.ovsdb.native import idlutils
-
-
-class TransactionQueue(Queue.Queue, object):
-    def __init__(self, *args, **kwargs):
-        super(TransactionQueue, self).__init__(*args, **kwargs)
-        alertpipe = os.pipe()
-        self.alertin = os.fdopen(alertpipe[0], 'r', 0)
-        self.alertout = os.fdopen(alertpipe[1], 'w', 0)
-
-    def get_nowait(self, *args, **kwargs):
-        try:
-            result = super(TransactionQueue, self).get_nowait(*args, **kwargs)
-        except Queue.Empty:
-            return None
-        self.alertin.read(1)
-        return result
-
-    def put(self, *args, **kwargs):
-        super(TransactionQueue, self).put(*args, **kwargs)
-        self.alertout.write('X')
-        self.alertout.flush()
-
-    @property
-    def alert_fileno(self):
-        return self.alertin.fileno()
-
-
-class Connection(object):
-    def __init__(self, connection, timeout, schema_name):
-        self.idl = None
-        self.connection = connection
-        self.timeout = timeout
-        self.txns = TransactionQueue(1)
-        self.lock = threading.Lock()
-        self.schema_name = schema_name
-
-    def start(self):
-        with self.lock:
-            if self.idl is not None:
-                return
-
-            try:
-                helper = idlutils.get_schema_helper(self.connection,
-                                                    self.schema_name)
-            except Exception:
-                # We may have failed do to set-manager not being called
-                helpers.enable_connection_uri(self.connection)
-
-                # There is a small window for a race, so retry up to a second
-                @retrying.retry(wait_exponential_multiplier=10,
-                                stop_max_delay=1000)
-                def do_get_schema_helper():
-                    return idlutils.get_schema_helper(self.connection,
-                                                      self.schema_name)
-                helper = do_get_schema_helper()
-
-            helper.register_all()
-            self.idl = idl.Idl(self.connection, helper)
-            idlutils.wait_for_change(self.idl, self.timeout)
-            self.poller = poller.Poller()
-            self.thread = threading.Thread(target=self.run)
-            self.thread.setDaemon(True)
-            self.thread.start()
-
-    def run(self):
-        while True:
-            self.idl.wait(self.poller)
-            self.poller.fd_wait(self.txns.alert_fileno, poller.POLLIN)
-            self.poller.block()
-            self.idl.run()
-            txn = self.txns.get_nowait()
-            if txn is not None:
-                try:
-                    txn.results.put(txn.do_commit())
-                except Exception as ex:
-                    er = idlutils.ExceptionResult(ex=ex,
-                                                  tb=traceback.format_exc())
-                    txn.results.put(er)
-                self.txns.task_done()
-
-    def queue_txn(self, txn):
-        self.txns.put(txn)
diff --git a/neutron/agent/ovsdb/native/helpers.py b/neutron/agent/ovsdb/native/helpers.py
deleted file mode 100644 (file)
index 0a4e426..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.common import utils
-
-
-def _connection_to_manager_uri(conn_uri):
-    proto, addr = conn_uri.split(':', 1)
-    if ':' in addr:
-        ip, port = addr.split(':', 1)
-        return 'p%s:%s:%s' % (proto, port, ip)
-    else:
-        return 'p%s:%s' % (proto, addr)
-
-
-def enable_connection_uri(conn_uri):
-    manager_uri = _connection_to_manager_uri(conn_uri)
-    utils.execute(['ovs-vsctl', 'set-manager', manager_uri], run_as_root=True)
diff --git a/neutron/agent/ovsdb/native/idlutils.py b/neutron/agent/ovsdb/native/idlutils.py
deleted file mode 100644 (file)
index ccc8599..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import os
-import time
-import uuid
-
-from ovs.db import idl
-from ovs import jsonrpc
-from ovs import poller
-from ovs import stream
-
-from neutron._i18n import _
-from neutron.common import exceptions
-
-
-RowLookup = collections.namedtuple('RowLookup',
-                                   ['table', 'column', 'uuid_column'])
-
-# Tables with no index in OVSDB and special record lookup rules
-_LOOKUP_TABLE = {
-    'Controller': RowLookup('Bridge', 'name', 'controller'),
-    'Flow_Table': RowLookup('Flow_Table', 'name', None),
-    'IPFIX': RowLookup('Bridge', 'name', 'ipfix'),
-    'Mirror': RowLookup('Mirror', 'name', None),
-    'NetFlow': RowLookup('Bridge', 'name', 'netflow'),
-    'QoS': RowLookup('Port', 'name', 'qos'),
-    'Queue': RowLookup(None, None, None),
-    'sFlow': RowLookup('Bridge', 'name', 'sflow'),
-    'SSL': RowLookup('Open_vSwitch', None, 'ssl'),
-}
-
-_NO_DEFAULT = object()
-
-
-class RowNotFound(exceptions.NeutronException):
-    message = _("Cannot find %(table)s with %(col)s=%(match)s")
-
-
-def row_by_value(idl_, table, column, match, default=_NO_DEFAULT):
-    """Lookup an IDL row in a table by column/value"""
-    tab = idl_.tables[table]
-    for r in tab.rows.values():
-        if getattr(r, column) == match:
-            return r
-    if default is not _NO_DEFAULT:
-        return default
-    raise RowNotFound(table=table, col=column, match=match)
-
-
-def row_by_record(idl_, table, record):
-    t = idl_.tables[table]
-    try:
-        if isinstance(record, uuid.UUID):
-            return t.rows[record]
-        uuid_ = uuid.UUID(record)
-        return t.rows[uuid_]
-    except ValueError:
-        # Not a UUID string, continue lookup by other means
-        pass
-    except KeyError:
-        raise RowNotFound(table=table, col='uuid', match=record)
-
-    rl = _LOOKUP_TABLE.get(table, RowLookup(table, get_index_column(t), None))
-    # no table means uuid only, no column is just SSL which we don't need
-    if rl.table is None:
-        raise ValueError(_("Table %s can only be queried by UUID") % table)
-    if rl.column is None:
-        raise NotImplementedError(_("'.' searches are not implemented"))
-    row = row_by_value(idl_, rl.table, rl.column, record)
-    if rl.uuid_column:
-        rows = getattr(row, rl.uuid_column)
-        if len(rows) != 1:
-            raise RowNotFound(table=table, col=_('record'), match=record)
-        row = rows[0]
-    return row
-
-
-class ExceptionResult(object):
-    def __init__(self, ex, tb):
-        self.ex = ex
-        self.tb = tb
-
-
-def get_schema_helper(connection, schema_name):
-    err, strm = stream.Stream.open_block(
-        stream.Stream.open(connection))
-    if err:
-        raise Exception("Could not connect to %s" % (
-            connection,))
-    rpc = jsonrpc.Connection(strm)
-    req = jsonrpc.Message.create_request('get_schema', [schema_name])
-    err, resp = rpc.transact_block(req)
-    rpc.close()
-    if err:
-        raise Exception("Could not retrieve schema from %s: %s" % (
-            connection, os.strerror(err)))
-    elif resp.error:
-        raise Exception(resp.error)
-    return idl.SchemaHelper(None, resp.result)
-
-
-def wait_for_change(_idl, timeout, seqno=None):
-    if seqno is None:
-        seqno = _idl.change_seqno
-    stop = time.time() + timeout
-    while _idl.change_seqno == seqno and not _idl.run():
-        ovs_poller = poller.Poller()
-        _idl.wait(ovs_poller)
-        ovs_poller.timer_wait(timeout * 1000)
-        ovs_poller.block()
-        if time.time() > stop:
-            raise Exception("Timeout")
-
-
-def get_column_value(row, col):
-    if col == '_uuid':
-        val = row.uuid
-    else:
-        val = getattr(row, col)
-
-    # Idl returns lists of Rows where ovs-vsctl returns lists of UUIDs
-    if isinstance(val, list) and len(val):
-        if isinstance(val[0], idl.Row):
-            val = [v.uuid for v in val]
-        # ovs-vsctl treats lists of 1 as single results
-        if len(val) == 1:
-            val = val[0]
-    return val
-
-
-def condition_match(row, condition):
-    """Return whether a condition matches a row
-
-    :param row:       An OVSDB Row
-    :param condition: A 3-tuple containing (column, operation, match)
-    """
-
-    col, op, match = condition
-    val = get_column_value(row, col)
-    matched = True
-
-    # TODO(twilson) Implement other operators and type comparisons
-    # ovs_lib only uses dict '=' and '!=' searches for now
-    if isinstance(match, dict):
-        for key in match:
-            if op == '=':
-                if (key not in val or match[key] != val[key]):
-                    matched = False
-                    break
-            elif op == '!=':
-                if key not in val or match[key] == val[key]:
-                    matched = False
-                    break
-            else:
-                raise NotImplementedError()
-    elif isinstance(match, list):
-        raise NotImplementedError()
-    else:
-        if op == '=':
-            if val != match:
-                matched = False
-        elif op == '!=':
-            if val == match:
-                matched = False
-        else:
-            raise NotImplementedError()
-    return matched
-
-
-def row_match(row, conditions):
-    """Return whether the row matches the list of conditions"""
-    return all(condition_match(row, cond) for cond in conditions)
-
-
-def get_index_column(table):
-    if len(table.indexes) == 1:
-        idx = table.indexes[0]
-        if len(idx) == 1:
-            return idx[0].name
diff --git a/neutron/agent/rpc.py b/neutron/agent/rpc.py
deleted file mode 100644 (file)
index 64a30e1..0000000
+++ /dev/null
@@ -1,203 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from datetime import datetime
-import itertools
-
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_utils import uuidutils
-
-from neutron._i18n import _LW
-from neutron.common import constants
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-
-
-LOG = logging.getLogger(__name__)
-
-
-def create_consumers(endpoints, prefix, topic_details, start_listening=True):
-    """Create agent RPC consumers.
-
-    :param endpoints: The list of endpoints to process the incoming messages.
-    :param prefix: Common prefix for the plugin/agent message queues.
-    :param topic_details: A list of topics. Each topic has a name, an
-                          operation, and an optional host param keying the
-                          subscription to topic.host for plugin calls.
-    :param start_listening: if True, it starts the processing loop
-
-    :returns: A common Connection.
-    """
-
-    connection = n_rpc.create_connection()
-    for details in topic_details:
-        topic, operation, node_name = itertools.islice(
-            itertools.chain(details, [None]), 3)
-
-        topic_name = topics.get_topic_name(prefix, topic, operation)
-        connection.create_consumer(topic_name, endpoints, fanout=True)
-        if node_name:
-            node_topic_name = '%s.%s' % (topic_name, node_name)
-            connection.create_consumer(node_topic_name,
-                                       endpoints,
-                                       fanout=False)
-    if start_listening:
-        connection.consume_in_threads()
-    return connection
-
-
-class PluginReportStateAPI(object):
-    """RPC client used to report state back to plugin.
-
-    This class implements the client side of an rpc interface.  The server side
-    can be found in neutron.db.agents_db.AgentExtRpcCallback.  For more
-    information on changing rpc interfaces, see doc/source/devref/rpc_api.rst.
-    """
-    def __init__(self, topic):
-        target = oslo_messaging.Target(topic=topic, version='1.0',
-                                       namespace=constants.RPC_NAMESPACE_STATE)
-        self.client = n_rpc.get_client(target)
-
-    def report_state(self, context, agent_state, use_call=False):
-        cctxt = self.client.prepare()
-        # add unique identifier to a report
-        # that can be logged on server side.
-        # This create visible correspondence between events on
-        # the agent and on the server
-        agent_state['uuid'] = uuidutils.generate_uuid()
-        kwargs = {
-            'agent_state': {'agent_state': agent_state},
-            'time': datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT),
-        }
-        method = cctxt.call if use_call else cctxt.cast
-        return method(context, 'report_state', **kwargs)
-
-
-class PluginApi(object):
-    '''Agent side of the rpc API.
-
-    API version history:
-        1.0 - Initial version.
-        1.3 - get_device_details rpc signature upgrade to obtain 'host' and
-              return value to include fixed_ips and device_owner for
-              the device port
-        1.4 - tunnel_sync rpc signature upgrade to obtain 'host'
-        1.5 - Support update_device_list and
-              get_devices_details_list_and_failed_devices
-    '''
-
-    def __init__(self, topic):
-        target = oslo_messaging.Target(topic=topic, version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def get_device_details(self, context, device, agent_id, host=None):
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'get_device_details', device=device,
-                          agent_id=agent_id, host=host)
-
-    def get_devices_details_list(self, context, devices, agent_id, host=None):
-        try:
-            cctxt = self.client.prepare(version='1.3')
-            res = cctxt.call(context, 'get_devices_details_list',
-                             devices=devices, agent_id=agent_id, host=host)
-        except oslo_messaging.UnsupportedVersion:
-            # If the server has not been upgraded yet, a DVR-enabled agent
-            # may not work correctly, however it can function in 'degraded'
-            # mode, in that DVR routers may not be in the system yet, and
-            # it might be not necessary to retrieve info about the host.
-            LOG.warn(_LW('DVR functionality requires a server upgrade.'))
-            res = [
-                self.get_device_details(context, device, agent_id, host)
-                for device in devices
-            ]
-        return res
-
-    def get_devices_details_list_and_failed_devices(self, context, devices,
-                                                    agent_id, host=None):
-        """Get devices details and the list of devices that failed.
-
-        This method returns the devices details. If an error is thrown when
-        retrieving the devices details, the device is put in a list of
-        failed devices.
-        """
-        try:
-            cctxt = self.client.prepare(version='1.5')
-            res = cctxt.call(
-                context,
-                'get_devices_details_list_and_failed_devices',
-                devices=devices, agent_id=agent_id, host=host)
-        except oslo_messaging.UnsupportedVersion:
-            #TODO(rossella_s): Remove this failback logic in M
-            res = self._device_list_rpc_call_with_failed_dev(
-                self.get_device_details, context, agent_id, host, devices)
-        return res
-
-    def update_device_down(self, context, device, agent_id, host=None):
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'update_device_down', device=device,
-                          agent_id=agent_id, host=host)
-
-    def update_device_up(self, context, device, agent_id, host=None):
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'update_device_up', device=device,
-                          agent_id=agent_id, host=host)
-
-    def _device_list_rpc_call_with_failed_dev(self, rpc_call, context,
-                                              agent_id, host, devices):
-        succeeded_devices = []
-        failed_devices = []
-        for device in devices:
-            try:
-                rpc_device = rpc_call(context, device, agent_id, host)
-            except Exception:
-                failed_devices.append(device)
-            else:
-                # update_device_up doesn't return the device
-                succeeded_dev = rpc_device or device
-                succeeded_devices.append(succeeded_dev)
-        return {'devices': succeeded_devices, 'failed_devices': failed_devices}
-
-    def update_device_list(self, context, devices_up, devices_down,
-                           agent_id, host):
-        try:
-            cctxt = self.client.prepare(version='1.5')
-            res = cctxt.call(context, 'update_device_list',
-                             devices_up=devices_up, devices_down=devices_down,
-                             agent_id=agent_id, host=host)
-        except oslo_messaging.UnsupportedVersion:
-            #TODO(rossella_s): Remove this failback logic in M
-            dev_up = self._device_list_rpc_call_with_failed_dev(
-                self.update_device_up, context, agent_id, host, devices_up)
-            dev_down = self._device_list_rpc_call_with_failed_dev(
-                self.update_device_down, context, agent_id, host, devices_down)
-
-            res = {'devices_up': dev_up.get('devices'),
-                   'failed_devices_up': dev_up.get('failed_devices'),
-                   'devices_down': dev_down.get('devices'),
-                   'failed_devices_down': dev_down.get('failed_devices')}
-        return res
-
-    def tunnel_sync(self, context, tunnel_ip, tunnel_type=None, host=None):
-        try:
-            cctxt = self.client.prepare(version='1.4')
-            res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
-                             tunnel_type=tunnel_type, host=host)
-        except oslo_messaging.UnsupportedVersion:
-            LOG.warn(_LW('Tunnel synchronization requires a server upgrade.'))
-            cctxt = self.client.prepare()
-            res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
-                             tunnel_type=tunnel_type)
-        return res
diff --git a/neutron/agent/securitygroups_rpc.py b/neutron/agent/securitygroups_rpc.py
deleted file mode 100644 (file)
index a087367..0000000
+++ /dev/null
@@ -1,322 +0,0 @@
-# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import functools
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_utils import importutils
-
-from neutron._i18n import _, _LI, _LW
-from neutron.agent import firewall
-from neutron.api.rpc.handlers import securitygroups_rpc
-
-LOG = logging.getLogger(__name__)
-
-
-security_group_opts = [
-    cfg.StrOpt(
-        'firewall_driver',
-        help=_('Driver for security groups firewall in the L2 agent')),
-    cfg.BoolOpt(
-        'enable_security_group',
-        default=True,
-        help=_(
-            'Controls whether the neutron security group API is enabled '
-            'in the server. It should be false when using no security '
-            'groups or using the nova security group API.')),
-    cfg.BoolOpt(
-        'enable_ipset',
-        default=True,
-        help=_('Use ipset to speed-up the iptables based security groups. '
-               'Enabling ipset support requires that ipset is installed on L2 '
-               'agent node.'))
-]
-cfg.CONF.register_opts(security_group_opts, 'SECURITYGROUP')
-
-
-#This is backward compatibility check for Havana
-def _is_valid_driver_combination():
-    return ((cfg.CONF.SECURITYGROUP.enable_security_group and
-             (cfg.CONF.SECURITYGROUP.firewall_driver and
-              cfg.CONF.SECURITYGROUP.firewall_driver !=
-             'neutron.agent.firewall.NoopFirewallDriver')) or
-            (not cfg.CONF.SECURITYGROUP.enable_security_group and
-             (cfg.CONF.SECURITYGROUP.firewall_driver ==
-             'neutron.agent.firewall.NoopFirewallDriver' or
-              cfg.CONF.SECURITYGROUP.firewall_driver is None)
-             ))
-
-
-def is_firewall_enabled():
-    if not _is_valid_driver_combination():
-        LOG.warn(_LW("Driver configuration doesn't match with "
-                     "enable_security_group"))
-
-    return cfg.CONF.SECURITYGROUP.enable_security_group
-
-
-def _disable_extension(extension, aliases):
-    if extension in aliases:
-        aliases.remove(extension)
-
-
-def disable_security_group_extension_by_config(aliases):
-    if not is_firewall_enabled():
-        LOG.info(_LI('Disabled security-group extension.'))
-        _disable_extension('security-group', aliases)
-        LOG.info(_LI('Disabled allowed-address-pairs extension.'))
-        _disable_extension('allowed-address-pairs', aliases)
-
-
-class SecurityGroupAgentRpc(object):
-    """Enables SecurityGroup agent support in agent implementations."""
-
-    def __init__(self, context, plugin_rpc, local_vlan_map=None,
-                 defer_refresh_firewall=False,):
-        self.context = context
-        self.plugin_rpc = plugin_rpc
-        self.init_firewall(defer_refresh_firewall)
-        self.local_vlan_map = local_vlan_map
-
-    def init_firewall(self, defer_refresh_firewall=False):
-        firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver
-        LOG.debug("Init firewall settings (driver=%s)", firewall_driver)
-        if not _is_valid_driver_combination():
-            LOG.warn(_LW("Driver configuration doesn't match "
-                         "with enable_security_group"))
-        if not firewall_driver:
-            firewall_driver = 'neutron.agent.firewall.NoopFirewallDriver'
-        self.firewall = importutils.import_object(firewall_driver)
-        # The following flag will be set to true if port filter must not be
-        # applied as soon as a rule or membership notification is received
-        self.defer_refresh_firewall = defer_refresh_firewall
-        # Stores devices for which firewall should be refreshed when
-        # deferred refresh is enabled.
-        self.devices_to_refilter = set()
-        # Flag raised when a global refresh is needed
-        self.global_refresh_firewall = False
-        self._use_enhanced_rpc = None
-
-    @property
-    def use_enhanced_rpc(self):
-        if self._use_enhanced_rpc is None:
-            self._use_enhanced_rpc = (
-                self._check_enhanced_rpc_is_supported_by_server())
-        return self._use_enhanced_rpc
-
-    def _check_enhanced_rpc_is_supported_by_server(self):
-        try:
-            self.plugin_rpc.security_group_info_for_devices(
-                self.context, devices=[])
-        except oslo_messaging.UnsupportedVersion:
-            LOG.warning(_LW('security_group_info_for_devices rpc call not '
-                            'supported by the server, falling back to old '
-                            'security_group_rules_for_devices which scales '
-                            'worse.'))
-            return False
-        return True
-
-    def skip_if_noopfirewall_or_firewall_disabled(func):
-        @functools.wraps(func)
-        def decorated_function(self, *args, **kwargs):
-            if (isinstance(self.firewall, firewall.NoopFirewallDriver) or
-                not is_firewall_enabled()):
-                LOG.info(_LI("Skipping method %s as firewall is disabled "
-                         "or configured as NoopFirewallDriver."),
-                         func.__name__)
-            else:
-                return func(self,  # pylint: disable=not-callable
-                            *args, **kwargs)
-        return decorated_function
-
-    @skip_if_noopfirewall_or_firewall_disabled
-    def prepare_devices_filter(self, device_ids):
-        if not device_ids:
-            return
-        LOG.info(_LI("Preparing filters for devices %s"), device_ids)
-        if self.use_enhanced_rpc:
-            devices_info = self.plugin_rpc.security_group_info_for_devices(
-                self.context, list(device_ids))
-            devices = devices_info['devices']
-            security_groups = devices_info['security_groups']
-            security_group_member_ips = devices_info['sg_member_ips']
-        else:
-            devices = self.plugin_rpc.security_group_rules_for_devices(
-                self.context, list(device_ids))
-
-        with self.firewall.defer_apply():
-            for device in devices.values():
-                self.firewall.prepare_port_filter(device)
-            if self.use_enhanced_rpc:
-                LOG.debug("Update security group information for ports %s",
-                          devices.keys())
-                self._update_security_group_info(
-                    security_groups, security_group_member_ips)
-
-    def _update_security_group_info(self, security_groups,
-                                    security_group_member_ips):
-        LOG.debug("Update security group information")
-        for sg_id, sg_rules in security_groups.items():
-            self.firewall.update_security_group_rules(sg_id, sg_rules)
-        for remote_sg_id, member_ips in security_group_member_ips.items():
-            self.firewall.update_security_group_members(
-                remote_sg_id, member_ips)
-
-    def security_groups_rule_updated(self, security_groups):
-        LOG.info(_LI("Security group "
-                 "rule updated %r"), security_groups)
-        self._security_group_updated(
-            security_groups,
-            'security_groups',
-            'sg_rule')
-
-    def security_groups_member_updated(self, security_groups):
-        LOG.info(_LI("Security group "
-                 "member updated %r"), security_groups)
-        self._security_group_updated(
-            security_groups,
-            'security_group_source_groups',
-            'sg_member')
-
-    def _security_group_updated(self, security_groups, attribute, action_type):
-        devices = []
-        sec_grp_set = set(security_groups)
-        for device in self.firewall.ports.values():
-            if sec_grp_set & set(device.get(attribute, [])):
-                devices.append(device['device'])
-        if devices:
-            if self.use_enhanced_rpc:
-                self.firewall.security_group_updated(action_type, sec_grp_set)
-            if self.defer_refresh_firewall:
-                LOG.debug("Adding %s devices to the list of devices "
-                          "for which firewall needs to be refreshed",
-                          devices)
-                self.devices_to_refilter |= set(devices)
-            else:
-                self.refresh_firewall(devices)
-
-    def security_groups_provider_updated(self, devices_to_update):
-        LOG.info(_LI("Provider rule updated"))
-        if self.defer_refresh_firewall:
-            if devices_to_update is None:
-                self.global_refresh_firewall = True
-            else:
-                self.devices_to_refilter |= set(devices_to_update)
-        else:
-            self.refresh_firewall(devices_to_update)
-
-    def remove_devices_filter(self, device_ids):
-        if not device_ids:
-            return
-        LOG.info(_LI("Remove device filter for %r"), device_ids)
-        with self.firewall.defer_apply():
-            for device_id in device_ids:
-                device = self.firewall.ports.get(device_id)
-                if not device:
-                    continue
-                self.firewall.remove_port_filter(device)
-
-    @skip_if_noopfirewall_or_firewall_disabled
-    def refresh_firewall(self, device_ids=None):
-        LOG.info(_LI("Refresh firewall rules"))
-        if not device_ids:
-            device_ids = self.firewall.ports.keys()
-            if not device_ids:
-                LOG.info(_LI("No ports here to refresh firewall"))
-                return
-        if self.use_enhanced_rpc:
-            devices_info = self.plugin_rpc.security_group_info_for_devices(
-                self.context, device_ids)
-            devices = devices_info['devices']
-            security_groups = devices_info['security_groups']
-            security_group_member_ips = devices_info['sg_member_ips']
-        else:
-            devices = self.plugin_rpc.security_group_rules_for_devices(
-                self.context, device_ids)
-
-        with self.firewall.defer_apply():
-            for device in devices.values():
-                LOG.debug("Update port filter for %s", device['device'])
-                self.firewall.update_port_filter(device)
-            if self.use_enhanced_rpc:
-                LOG.debug("Update security group information for ports %s",
-                          devices.keys())
-                self._update_security_group_info(
-                    security_groups, security_group_member_ips)
-
-    def firewall_refresh_needed(self):
-        return self.global_refresh_firewall or self.devices_to_refilter
-
-    def setup_port_filters(self, new_devices, updated_devices):
-        """Configure port filters for devices.
-
-        This routine applies filters for new devices and refreshes firewall
-        rules when devices have been updated, or when there are changes in
-        security group membership or rules.
-
-        :param new_devices: set containing identifiers for new devices
-        :param updated_devices: set containing identifiers for
-        updated devices
-        """
-        # These data structures are cleared here in order to avoid
-        # losing updates occurring during firewall refresh
-        devices_to_refilter = self.devices_to_refilter
-        global_refresh_firewall = self.global_refresh_firewall
-        self.devices_to_refilter = set()
-        self.global_refresh_firewall = False
-        # We must call prepare_devices_filter() after we've grabbed
-        # self.devices_to_refilter since an update for a new port
-        # could arrive while we're processing, and we need to make
-        # sure we don't skip it.  It will get handled the next time.
-        if new_devices:
-            LOG.debug("Preparing device filters for %d new devices",
-                      len(new_devices))
-            self.prepare_devices_filter(new_devices)
-        # TODO(salv-orlando): Avoid if possible ever performing the global
-        # refresh providing a precise list of devices for which firewall
-        # should be refreshed
-        if global_refresh_firewall:
-            LOG.debug("Refreshing firewall for all filtered devices")
-            self.refresh_firewall()
-        else:
-            if self.use_enhanced_rpc:
-                self.firewall.security_group_updated('sg_member', [],
-                                                     updated_devices)
-            # If a device is both in new and updated devices
-            # avoid reprocessing it
-            updated_devices = ((updated_devices | devices_to_refilter) -
-                               new_devices)
-            if updated_devices:
-                LOG.debug("Refreshing firewall for %d devices",
-                          len(updated_devices))
-                self.refresh_firewall(updated_devices)
-
-
-# TODO(armax): for bw compat with external dependencies; to be dropped in M.
-SG_RPC_VERSION = (
-    securitygroups_rpc.SecurityGroupAgentRpcApiMixin.SG_RPC_VERSION
-)
-SecurityGroupServerRpcApi = (
-    securitygroups_rpc.SecurityGroupServerRpcApi
-)
-SecurityGroupAgentRpcApiMixin = (
-    securitygroups_rpc.SecurityGroupAgentRpcApiMixin
-)
-SecurityGroupAgentRpcCallbackMixin = (
-    securitygroups_rpc.SecurityGroupAgentRpcCallbackMixin
-)
diff --git a/neutron/agent/windows/__init__.py b/neutron/agent/windows/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/agent/windows/polling.py b/neutron/agent/windows/polling.py
deleted file mode 100644 (file)
index f80a598..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2015 Cloudbase Solutions.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-from neutron.agent.common import base_polling
-
-
-@contextlib.contextmanager
-def get_polling_manager(minimize_polling, ovsdb_monitor_respawn_interval):
-    pm = base_polling.AlwaysPoll()
-    yield pm
diff --git a/neutron/agent/windows/utils.py b/neutron/agent/windows/utils.py
deleted file mode 100644 (file)
index 115593e..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright 2015 Cloudbase Solutions.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from eventlet.green import subprocess
-from eventlet import greenthread
-from oslo_log import log as logging
-import six
-
-from neutron._i18n import _
-from neutron.common import utils
-
-LOG = logging.getLogger(__name__)
-
-
-def create_process(cmd, addl_env=None):
-    cmd = list(map(str, cmd))
-
-    LOG.debug("Running command: %s", cmd)
-    env = os.environ.copy()
-    if addl_env:
-        env.update(addl_env)
-
-    obj = utils.subprocess_popen(cmd, shell=False,
-                                 stdin=subprocess.PIPE,
-                                 stdout=subprocess.PIPE,
-                                 stderr=subprocess.PIPE,
-                                 env=env,
-                                 preexec_fn=None,
-                                 close_fds=False)
-
-    return obj, cmd
-
-
-def execute(cmd, process_input=None, addl_env=None,
-            check_exit_code=True, return_stderr=False, log_fail_as_error=True,
-            extra_ok_codes=None, run_as_root=False, do_decode=True):
-
-    try:
-        if (process_input is None or
-            isinstance(process_input, six.binary_type)):
-            _process_input = process_input
-        else:
-            _process_input = process_input.encode('utf-8')
-        obj, cmd = create_process(cmd, addl_env=addl_env)
-        _stdout, _stderr = obj.communicate(_process_input)
-        obj.stdin.close()
-        _stdout = utils.safe_decode_utf8(_stdout)
-        _stderr = utils.safe_decode_utf8(_stderr)
-
-        m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdin: %(stdin)s\n"
-              "Stdout: %(stdout)s\nStderr: %(stderr)s") % \
-            {'cmd': cmd,
-             'code': obj.returncode,
-             'stdin': process_input or '',
-             'stdout': _stdout,
-             'stderr': _stderr}
-
-        extra_ok_codes = extra_ok_codes or []
-        if obj.returncode and obj.returncode in extra_ok_codes:
-            obj.returncode = None
-
-        log_msg = m.strip().replace('\n', '; ')
-        if obj.returncode and log_fail_as_error:
-            LOG.error(log_msg)
-        else:
-            LOG.debug(log_msg)
-
-        if obj.returncode and check_exit_code:
-            raise RuntimeError(m)
-    finally:
-        # NOTE(termie): this appears to be necessary to let the subprocess
-        #               call clean something up in between calls, without
-        #               it two execute calls in a row hangs the second one
-        greenthread.sleep(0)
-
-    return (_stdout, _stderr) if return_stderr else _stdout
diff --git a/neutron/api/__init__.py b/neutron/api/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/api/api_common.py b/neutron/api/api_common.py
deleted file mode 100644 (file)
index b31cde7..0000000
+++ /dev/null
@@ -1,341 +0,0 @@
-# Copyright 2011 Citrix System.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import functools
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import six
-from six.moves.urllib import parse
-from webob import exc
-
-from neutron._i18n import _, _LW
-from neutron.common import constants
-from neutron.common import exceptions
-
-
-LOG = logging.getLogger(__name__)
-
-
-def get_filters(request, attr_info, skips=None):
-    """Extracts the filters from the request string.
-
-    Returns a dict of lists for the filters:
-    check=a&check=b&name=Bob&
-    becomes:
-    {'check': [u'a', u'b'], 'name': [u'Bob']}
-    """
-    skips = skips or []
-    res = {}
-    for key, values in six.iteritems(request.GET.dict_of_lists()):
-        if key in skips:
-            continue
-        values = [v for v in values if v]
-        key_attr_info = attr_info.get(key, {})
-        if 'convert_list_to' in key_attr_info:
-            values = key_attr_info['convert_list_to'](values)
-        elif 'convert_to' in key_attr_info:
-            convert_to = key_attr_info['convert_to']
-            values = [convert_to(v) for v in values]
-        if values:
-            res[key] = values
-    return res
-
-
-def get_previous_link(request, items, id_key):
-    params = request.GET.copy()
-    params.pop('marker', None)
-    if items:
-        marker = items[0][id_key]
-        params['marker'] = marker
-    params['page_reverse'] = True
-    return "%s?%s" % (request.path_url, parse.urlencode(params))
-
-
-def get_next_link(request, items, id_key):
-    params = request.GET.copy()
-    params.pop('marker', None)
-    if items:
-        marker = items[-1][id_key]
-        params['marker'] = marker
-    params.pop('page_reverse', None)
-    return "%s?%s" % (request.path_url, parse.urlencode(params))
-
-
-def get_limit_and_marker(request):
-    """Return marker, limit tuple from request.
-
-    :param request: `wsgi.Request` possibly containing 'marker' and 'limit'
-                    GET variables. 'marker' is the id of the last element
-                    the client has seen, and 'limit' is the maximum number
-                    of items to return. If limit == 0, it means we needn't
-                    pagination, then return None.
-    """
-    max_limit = _get_pagination_max_limit()
-    limit = _get_limit_param(request)
-    if max_limit > 0:
-        limit = min(max_limit, limit) or max_limit
-    if not limit:
-        return None, None
-    marker = request.GET.get('marker', None)
-    return limit, marker
-
-
-def _get_pagination_max_limit():
-    max_limit = -1
-    if (cfg.CONF.pagination_max_limit.lower() !=
-        constants.PAGINATION_INFINITE):
-        try:
-            max_limit = int(cfg.CONF.pagination_max_limit)
-            if max_limit == 0:
-                raise ValueError()
-        except ValueError:
-            LOG.warn(_LW("Invalid value for pagination_max_limit: %s. It "
-                         "should be an integer greater to 0"),
-                     cfg.CONF.pagination_max_limit)
-    return max_limit
-
-
-def _get_limit_param(request):
-    """Extract integer limit from request or fail."""
-    try:
-        limit = int(request.GET.get('limit', 0))
-        if limit >= 0:
-            return limit
-    except ValueError:
-        pass
-    msg = _("Limit must be an integer 0 or greater and not '%d'")
-    raise exceptions.BadRequest(resource='limit', msg=msg)
-
-
-def list_args(request, arg):
-    """Extracts the list of arg from request."""
-    return [v for v in request.GET.getall(arg) if v]
-
-
-def get_sorts(request, attr_info):
-    """Extract sort_key and sort_dir from request.
-
-    Return as: [(key1, value1), (key2, value2)]
-    """
-    sort_keys = list_args(request, "sort_key")
-    sort_dirs = list_args(request, "sort_dir")
-    if len(sort_keys) != len(sort_dirs):
-        msg = _("The number of sort_keys and sort_dirs must be same")
-        raise exc.HTTPBadRequest(explanation=msg)
-    valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC]
-    absent_keys = [x for x in sort_keys if x not in attr_info]
-    if absent_keys:
-        msg = _("%s is invalid attribute for sort_keys") % absent_keys
-        raise exc.HTTPBadRequest(explanation=msg)
-    invalid_dirs = [x for x in sort_dirs if x not in valid_dirs]
-    if invalid_dirs:
-        msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, "
-                 "valid value is '%(asc)s' and '%(desc)s'") %
-               {'invalid_dirs': invalid_dirs,
-                'asc': constants.SORT_DIRECTION_ASC,
-                'desc': constants.SORT_DIRECTION_DESC})
-        raise exc.HTTPBadRequest(explanation=msg)
-    return list(zip(sort_keys,
-                    [x == constants.SORT_DIRECTION_ASC for x in sort_dirs]))
-
-
-def get_page_reverse(request):
-    data = request.GET.get('page_reverse', 'False')
-    return data.lower() == "true"
-
-
-def get_pagination_links(request, items, limit,
-                         marker, page_reverse, key="id"):
-    key = key if key else 'id'
-    links = []
-    if not limit:
-        return links
-    if not (len(items) < limit and not page_reverse):
-        links.append({"rel": "next",
-                      "href": get_next_link(request, items,
-                                            key)})
-    if not (len(items) < limit and page_reverse):
-        links.append({"rel": "previous",
-                      "href": get_previous_link(request, items,
-                                                key)})
-    return links
-
-
-class PaginationHelper(object):
-
-    def __init__(self, request, primary_key='id'):
-        self.request = request
-        self.primary_key = primary_key
-
-    def update_fields(self, original_fields, fields_to_add):
-        pass
-
-    def update_args(self, args):
-        pass
-
-    def paginate(self, items):
-        return items
-
-    def get_links(self, items):
-        return {}
-
-
-class PaginationEmulatedHelper(PaginationHelper):
-
-    def __init__(self, request, primary_key='id'):
-        super(PaginationEmulatedHelper, self).__init__(request, primary_key)
-        self.limit, self.marker = get_limit_and_marker(request)
-        self.page_reverse = get_page_reverse(request)
-
-    def update_fields(self, original_fields, fields_to_add):
-        if not original_fields:
-            return
-        if self.primary_key not in original_fields:
-            original_fields.append(self.primary_key)
-            fields_to_add.append(self.primary_key)
-
-    def paginate(self, items):
-        if not self.limit:
-            return items
-        i = -1
-        if self.marker:
-            for item in items:
-                i = i + 1
-                if item[self.primary_key] == self.marker:
-                    break
-        if self.page_reverse:
-            return items[i - self.limit:i]
-        return items[i + 1:i + self.limit + 1]
-
-    def get_links(self, items):
-        return get_pagination_links(
-            self.request, items, self.limit, self.marker,
-            self.page_reverse, self.primary_key)
-
-
-class PaginationNativeHelper(PaginationEmulatedHelper):
-
-    def update_args(self, args):
-        if self.primary_key not in dict(args.get('sorts', [])).keys():
-            args.setdefault('sorts', []).append((self.primary_key, True))
-        args.update({'limit': self.limit, 'marker': self.marker,
-                     'page_reverse': self.page_reverse})
-
-    def paginate(self, items):
-        return items
-
-
-class NoPaginationHelper(PaginationHelper):
-    pass
-
-
-class SortingHelper(object):
-
-    def __init__(self, request, attr_info):
-        pass
-
-    def update_args(self, args):
-        pass
-
-    def update_fields(self, original_fields, fields_to_add):
-        pass
-
-    def sort(self, items):
-        return items
-
-
-class SortingEmulatedHelper(SortingHelper):
-
-    def __init__(self, request, attr_info):
-        super(SortingEmulatedHelper, self).__init__(request, attr_info)
-        self.sort_dict = get_sorts(request, attr_info)
-
-    def update_fields(self, original_fields, fields_to_add):
-        if not original_fields:
-            return
-        for key in dict(self.sort_dict).keys():
-            if key not in original_fields:
-                original_fields.append(key)
-                fields_to_add.append(key)
-
-    def sort(self, items):
-        def cmp_func(obj1, obj2):
-            for key, direction in self.sort_dict:
-                o1 = obj1[key]
-                o2 = obj2[key]
-
-                if o1 is None and o2 is None:
-                    ret = 0
-                elif o1 is None and o2 is not None:
-                    ret = -1
-                elif o1 is not None and o2 is None:
-                    ret = 1
-                else:
-                    ret = (o1 > o2) - (o1 < o2)
-                if ret:
-                    return ret * (1 if direction else -1)
-            return 0
-        return sorted(items, key=functools.cmp_to_key(cmp_func))
-
-
-class SortingNativeHelper(SortingHelper):
-
-    def __init__(self, request, attr_info):
-        self.sort_dict = get_sorts(request, attr_info)
-
-    def update_args(self, args):
-        args['sorts'] = self.sort_dict
-
-
-class NoSortingHelper(SortingHelper):
-    pass
-
-
-class NeutronController(object):
-    """Base controller class for Neutron API."""
-    # _resource_name will be redefined in sub concrete controller
-    _resource_name = None
-
-    def __init__(self, plugin):
-        self._plugin = plugin
-        super(NeutronController, self).__init__()
-
-    def _prepare_request_body(self, body, params):
-        """Verifies required parameters are in request body.
-
-        Sets default value for missing optional parameters.
-        Body argument must be the deserialized body.
-        """
-        try:
-            if body is None:
-                # Initialize empty resource for setting default value
-                body = {self._resource_name: {}}
-            data = body[self._resource_name]
-        except KeyError:
-            # raise if _resource_name is not in req body.
-            raise exc.HTTPBadRequest(_("Unable to find '%s' in request body") %
-                                     self._resource_name)
-        for param in params:
-            param_name = param['param-name']
-            param_value = data.get(param_name)
-            # If the parameter wasn't found and it was required, return 400
-            if param_value is None and param['required']:
-                msg = (_("Failed to parse request. "
-                         "Parameter '%s' not specified") % param_name)
-                LOG.error(msg)
-                raise exc.HTTPBadRequest(msg)
-            data[param_name] = param_value or param.get('default-value')
-        return body
diff --git a/neutron/api/extensions.py b/neutron/api/extensions.py
deleted file mode 100644 (file)
index e18c75f..0000000
+++ /dev/null
@@ -1,666 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import collections
-import imp
-import os
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_middleware import base
-import routes
-import six
-import webob.dec
-import webob.exc
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.common import exceptions
-import neutron.extensions
-from neutron import manager
-from neutron.services import provider_configuration
-from neutron import wsgi
-
-
-LOG = logging.getLogger(__name__)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class PluginInterface(object):
-
-    @classmethod
-    def __subclasshook__(cls, klass):
-        """Checking plugin class.
-
-        The __subclasshook__ method is a class method
-        that will be called every time a class is tested
-        using issubclass(klass, PluginInterface).
-        In that case, it will check that every method
-        marked with the abstractmethod decorator is
-        provided by the plugin class.
-        """
-
-        if not cls.__abstractmethods__:
-            return NotImplemented
-
-        for method in cls.__abstractmethods__:
-            if any(method in base.__dict__ for base in klass.__mro__):
-                continue
-            return NotImplemented
-        return True
-
-
-@six.add_metaclass(abc.ABCMeta)
-class ExtensionDescriptor(object):
-    """Base class that defines the contract for extensions."""
-
-    def get_name(self):
-        """The name of the extension.
-
-        e.g. 'Fox In Socks'
-        """
-        raise NotImplementedError()
-
-    def get_alias(self):
-        """The alias for the extension.
-
-        e.g. 'FOXNSOX'
-        """
-        raise NotImplementedError()
-
-    def get_description(self):
-        """Friendly description for the extension.
-
-        e.g. 'The Fox In Socks Extension'
-        """
-        raise NotImplementedError()
-
-    def get_updated(self):
-        """The timestamp when the extension was last updated.
-
-        e.g. '2011-01-22T13:25:27-06:00'
-        """
-        # NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS
-        raise NotImplementedError()
-
-    def get_resources(self):
-        """List of extensions.ResourceExtension extension objects.
-
-        Resources define new nouns, and are accessible through URLs.
-        """
-        resources = []
-        return resources
-
-    def get_actions(self):
-        """List of extensions.ActionExtension extension objects.
-
-        Actions are verbs callable from the API.
-        """
-        actions = []
-        return actions
-
-    def get_request_extensions(self):
-        """List of extensions.RequestException extension objects.
-
-        Request extensions are used to handle custom request data.
-        """
-        request_exts = []
-        return request_exts
-
-    def get_extended_resources(self, version):
-        """Retrieve extended resources or attributes for core resources.
-
-        Extended attributes are implemented by a core plugin similarly
-        to the attributes defined in the core, and can appear in
-        request and response messages. Their names are scoped with the
-        extension's prefix. The core API version is passed to this
-        function, which must return a
-        map[<resource_name>][<attribute_name>][<attribute_property>]
-        specifying the extended resource attribute properties required
-        by that API version.
-
-        Extension can add resources and their attr definitions too.
-        The returned map can be integrated into RESOURCE_ATTRIBUTE_MAP.
-        """
-        return {}
-
-    def get_plugin_interface(self):
-        """Returns an abstract class which defines contract for the plugin.
-
-        The abstract class should inherit from extensions.PluginInterface,
-        Methods in this abstract class  should be decorated as abstractmethod
-        """
-        return None
-
-    def get_required_extensions(self):
-        """Returns a list of extensions to be processed before this one."""
-        return []
-
-    def update_attributes_map(self, extended_attributes,
-                              extension_attrs_map=None):
-        """Update attributes map for this extension.
-
-        This is default method for extending an extension's attributes map.
-        An extension can use this method and supplying its own resource
-        attribute map in extension_attrs_map argument to extend all its
-        attributes that needs to be extended.
-
-        If an extension does not implement update_attributes_map, the method
-        does nothing and just return.
-        """
-        if not extension_attrs_map:
-            return
-
-        for resource, attrs in six.iteritems(extension_attrs_map):
-            extended_attrs = extended_attributes.get(resource)
-            if extended_attrs:
-                attrs.update(extended_attrs)
-
-
-class ActionExtensionController(wsgi.Controller):
-
-    def __init__(self, application):
-        self.application = application
-        self.action_handlers = {}
-
-    def add_action(self, action_name, handler):
-        self.action_handlers[action_name] = handler
-
-    def action(self, request, id):
-        input_dict = self._deserialize(request.body,
-                                       request.get_content_type())
-        for action_name, handler in six.iteritems(self.action_handlers):
-            if action_name in input_dict:
-                return handler(input_dict, request, id)
-        # no action handler found (bump to downstream application)
-        response = self.application
-        return response
-
-
-class RequestExtensionController(wsgi.Controller):
-
-    def __init__(self, application):
-        self.application = application
-        self.handlers = []
-
-    def add_handler(self, handler):
-        self.handlers.append(handler)
-
-    def process(self, request, *args, **kwargs):
-        res = request.get_response(self.application)
-        # currently request handlers are un-ordered
-        for handler in self.handlers:
-            response = handler(request, res)
-        return response
-
-
-class ExtensionController(wsgi.Controller):
-
-    def __init__(self, extension_manager):
-        self.extension_manager = extension_manager
-
-    @staticmethod
-    def _translate(ext):
-        ext_data = {}
-        ext_data['name'] = ext.get_name()
-        ext_data['alias'] = ext.get_alias()
-        ext_data['description'] = ext.get_description()
-        ext_data['updated'] = ext.get_updated()
-        ext_data['links'] = []  # TODO(dprince): implement extension links
-        return ext_data
-
-    def index(self, request):
-        extensions = []
-        for _alias, ext in six.iteritems(self.extension_manager.extensions):
-            extensions.append(self._translate(ext))
-        return dict(extensions=extensions)
-
-    def show(self, request, id):
-        # NOTE(dprince): the extensions alias is used as the 'id' for show
-        ext = self.extension_manager.extensions.get(id, None)
-        if not ext:
-            raise webob.exc.HTTPNotFound(
-                _("Extension with alias %s does not exist") % id)
-        return dict(extension=self._translate(ext))
-
-    def delete(self, request, id):
-        msg = _('Resource not found.')
-        raise webob.exc.HTTPNotFound(msg)
-
-    def create(self, request):
-        msg = _('Resource not found.')
-        raise webob.exc.HTTPNotFound(msg)
-
-
-class ExtensionMiddleware(base.ConfigurableMiddleware):
-    """Extensions middleware for WSGI."""
-
-    def __init__(self, application,
-                 ext_mgr=None):
-        self.ext_mgr = (ext_mgr
-                        or ExtensionManager(get_extensions_path()))
-        mapper = routes.Mapper()
-
-        # extended resources
-        for resource in self.ext_mgr.get_resources():
-            path_prefix = resource.path_prefix
-            if resource.parent:
-                path_prefix = (resource.path_prefix +
-                               "/%s/{%s_id}" %
-                               (resource.parent["collection_name"],
-                                resource.parent["member_name"]))
-
-            LOG.debug('Extended resource: %s',
-                      resource.collection)
-            for action, method in six.iteritems(resource.collection_actions):
-                conditions = dict(method=[method])
-                path = "/%s/%s" % (resource.collection, action)
-                with mapper.submapper(controller=resource.controller,
-                                      action=action,
-                                      path_prefix=path_prefix,
-                                      conditions=conditions) as submap:
-                    submap.connect(path)
-                    submap.connect("%s.:(format)" % path)
-
-            mapper.resource(resource.collection, resource.collection,
-                            controller=resource.controller,
-                            member=resource.member_actions,
-                            parent_resource=resource.parent,
-                            path_prefix=path_prefix)
-
-        # extended actions
-        action_controllers = self._action_ext_controllers(application,
-                                                          self.ext_mgr, mapper)
-        for action in self.ext_mgr.get_actions():
-            LOG.debug('Extended action: %s', action.action_name)
-            controller = action_controllers[action.collection]
-            controller.add_action(action.action_name, action.handler)
-
-        # extended requests
-        req_controllers = self._request_ext_controllers(application,
-                                                        self.ext_mgr, mapper)
-        for request_ext in self.ext_mgr.get_request_extensions():
-            LOG.debug('Extended request: %s', request_ext.key)
-            controller = req_controllers[request_ext.key]
-            controller.add_handler(request_ext.handler)
-
-        self._router = routes.middleware.RoutesMiddleware(self._dispatch,
-                                                          mapper)
-        super(ExtensionMiddleware, self).__init__(application)
-
-    @classmethod
-    def factory(cls, global_config, **local_config):
-        """Paste factory."""
-        def _factory(app):
-            return cls(app, global_config, **local_config)
-        return _factory
-
-    def _action_ext_controllers(self, application, ext_mgr, mapper):
-        """Return a dict of ActionExtensionController-s by collection."""
-        action_controllers = {}
-        for action in ext_mgr.get_actions():
-            if action.collection not in action_controllers.keys():
-                controller = ActionExtensionController(application)
-                mapper.connect("/%s/:(id)/action.:(format)" %
-                               action.collection,
-                               action='action',
-                               controller=controller,
-                               conditions=dict(method=['POST']))
-                mapper.connect("/%s/:(id)/action" % action.collection,
-                               action='action',
-                               controller=controller,
-                               conditions=dict(method=['POST']))
-                action_controllers[action.collection] = controller
-
-        return action_controllers
-
-    def _request_ext_controllers(self, application, ext_mgr, mapper):
-        """Returns a dict of RequestExtensionController-s by collection."""
-        request_ext_controllers = {}
-        for req_ext in ext_mgr.get_request_extensions():
-            if req_ext.key not in request_ext_controllers.keys():
-                controller = RequestExtensionController(application)
-                mapper.connect(req_ext.url_route + '.:(format)',
-                               action='process',
-                               controller=controller,
-                               conditions=req_ext.conditions)
-
-                mapper.connect(req_ext.url_route,
-                               action='process',
-                               controller=controller,
-                               conditions=req_ext.conditions)
-                request_ext_controllers[req_ext.key] = controller
-
-        return request_ext_controllers
-
-    @webob.dec.wsgify(RequestClass=wsgi.Request)
-    def __call__(self, req):
-        """Route the incoming request with router."""
-        req.environ['extended.app'] = self.application
-        return self._router
-
-    @staticmethod
-    @webob.dec.wsgify(RequestClass=wsgi.Request)
-    def _dispatch(req):
-        """Dispatch the request.
-
-        Returns the routed WSGI app's response or defers to the extended
-        application.
-        """
-        match = req.environ['wsgiorg.routing_args'][1]
-        if not match:
-            return req.environ['extended.app']
-        app = match['controller']
-        return app
-
-
-def plugin_aware_extension_middleware_factory(global_config, **local_config):
-    """Paste factory."""
-    def _factory(app):
-        ext_mgr = PluginAwareExtensionManager.get_instance()
-        return ExtensionMiddleware(app, ext_mgr=ext_mgr)
-    return _factory
-
-
-class ExtensionManager(object):
-    """Load extensions from the configured extension path.
-
-    See tests/unit/extensions/foxinsocks.py for an
-    example extension implementation.
-    """
-
-    def __init__(self, path):
-        LOG.info(_LI('Initializing extension manager.'))
-        self.path = path
-        self.extensions = {}
-        self._load_all_extensions()
-
-    def get_resources(self):
-        """Returns a list of ResourceExtension objects."""
-        resources = []
-        resources.append(ResourceExtension('extensions',
-                                           ExtensionController(self)))
-        for ext in self.extensions.values():
-            resources.extend(ext.get_resources())
-        return resources
-
-    def get_actions(self):
-        """Returns a list of ActionExtension objects."""
-        actions = []
-        for ext in self.extensions.values():
-            actions.extend(ext.get_actions())
-        return actions
-
-    def get_request_extensions(self):
-        """Returns a list of RequestExtension objects."""
-        request_exts = []
-        for ext in self.extensions.values():
-            request_exts.extend(ext.get_request_extensions())
-        return request_exts
-
-    def extend_resources(self, version, attr_map):
-        """Extend resources with additional resources or attributes.
-
-        :param: attr_map, the existing mapping from resource name to
-        attrs definition.
-
-        After this function, we will extend the attr_map if an extension
-        wants to extend this map.
-        """
-        processed_exts = {}
-        exts_to_process = self.extensions.copy()
-        # Iterate until there are unprocessed extensions or if no progress
-        # is made in a whole iteration
-        while exts_to_process:
-            processed_ext_count = len(processed_exts)
-            for ext_name, ext in list(exts_to_process.items()):
-                # Process extension only if all required extensions
-                # have been processed already
-                required_exts_set = set(ext.get_required_extensions())
-                if required_exts_set - set(processed_exts):
-                    continue
-                extended_attrs = ext.get_extended_resources(version)
-                for res, resource_attrs in six.iteritems(extended_attrs):
-                    attr_map.setdefault(res, {}).update(resource_attrs)
-                processed_exts[ext_name] = ext
-                del exts_to_process[ext_name]
-            if len(processed_exts) == processed_ext_count:
-                # Exit loop as no progress was made
-                break
-        if exts_to_process:
-            # NOTE(salv-orlando): Consider whether this error should be fatal
-            LOG.error(_LE("It was impossible to process the following "
-                          "extensions: %s because of missing requirements."),
-                      ','.join(exts_to_process.keys()))
-
-        # Extending extensions' attributes map.
-        for ext in processed_exts.values():
-            ext.update_attributes_map(attr_map)
-
-    def _check_extension(self, extension):
-        """Checks for required methods in extension objects."""
-        try:
-            LOG.debug('Ext name: %s', extension.get_name())
-            LOG.debug('Ext alias: %s', extension.get_alias())
-            LOG.debug('Ext description: %s', extension.get_description())
-            LOG.debug('Ext updated: %s', extension.get_updated())
-        except AttributeError:
-            LOG.exception(_LE("Exception loading extension"))
-            return False
-        return isinstance(extension, ExtensionDescriptor)
-
-    def _load_all_extensions(self):
-        """Load extensions from the configured path.
-
-        The extension name is constructed from the module_name. If your
-        extension module is named widgets.py, the extension class within that
-        module should be 'Widgets'.
-
-        See tests/unit/extensions/foxinsocks.py for an example extension
-        implementation.
-        """
-
-        for path in self.path.split(':'):
-            if os.path.exists(path):
-                self._load_all_extensions_from_path(path)
-            else:
-                LOG.error(_LE("Extension path '%s' doesn't exist!"), path)
-
-    def _load_all_extensions_from_path(self, path):
-        # Sorting the extension list makes the order in which they
-        # are loaded predictable across a cluster of load-balanced
-        # Neutron Servers
-        for f in sorted(os.listdir(path)):
-            try:
-                LOG.debug('Loading extension file: %s', f)
-                mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
-                ext_path = os.path.join(path, f)
-                if file_ext.lower() == '.py' and not mod_name.startswith('_'):
-                    mod = imp.load_source(mod_name, ext_path)
-                    ext_name = mod_name[0].upper() + mod_name[1:]
-                    new_ext_class = getattr(mod, ext_name, None)
-                    if not new_ext_class:
-                        LOG.warn(_LW('Did not find expected name '
-                                     '"%(ext_name)s" in %(file)s'),
-                                 {'ext_name': ext_name,
-                                  'file': ext_path})
-                        continue
-                    new_ext = new_ext_class()
-                    self.add_extension(new_ext)
-            except Exception as exception:
-                LOG.warn(_LW("Extension file %(f)s wasn't loaded due to "
-                             "%(exception)s"),
-                         {'f': f, 'exception': exception})
-
-    def add_extension(self, ext):
-        # Do nothing if the extension doesn't check out
-        if not self._check_extension(ext):
-            return
-
-        alias = ext.get_alias()
-        LOG.info(_LI('Loaded extension: %s'), alias)
-
-        if alias in self.extensions:
-            raise exceptions.DuplicatedExtension(alias=alias)
-        self.extensions[alias] = ext
-
-
-class PluginAwareExtensionManager(ExtensionManager):
-
-    _instance = None
-
-    def __init__(self, path, plugins):
-        self.plugins = plugins
-        super(PluginAwareExtensionManager, self).__init__(path)
-        self.check_if_plugin_extensions_loaded()
-
-    def _check_extension(self, extension):
-        """Check if an extension is supported by any plugin."""
-        extension_is_valid = super(PluginAwareExtensionManager,
-                                   self)._check_extension(extension)
-        return (extension_is_valid and
-                self._plugins_support(extension) and
-                self._plugins_implement_interface(extension))
-
-    def _plugins_support(self, extension):
-        alias = extension.get_alias()
-        supports_extension = alias in self.get_supported_extension_aliases()
-        if not supports_extension:
-            LOG.warn(_LW("Extension %s not supported by any of loaded "
-                         "plugins"),
-                     alias)
-        return supports_extension
-
-    def _plugins_implement_interface(self, extension):
-        if extension.get_plugin_interface() is None:
-            return True
-        for plugin in self.plugins.values():
-            if isinstance(plugin, extension.get_plugin_interface()):
-                return True
-        LOG.warn(_LW("Loaded plugins do not implement extension %s interface"),
-                 extension.get_alias())
-        return False
-
-    @classmethod
-    def get_instance(cls):
-        if cls._instance is None:
-            service_plugins = manager.NeutronManager.get_service_plugins()
-            cls._instance = cls(get_extensions_path(service_plugins),
-                                service_plugins)
-        return cls._instance
-
-    def get_supported_extension_aliases(self):
-        """Gets extension aliases supported by all plugins."""
-        aliases = set()
-        for plugin in self.plugins.values():
-            # we also check all classes that the plugins inherit to see if they
-            # directly provide support for an extension
-            for item in [plugin] + plugin.__class__.mro():
-                try:
-                    aliases |= set(
-                        getattr(item, "supported_extension_aliases", []))
-                except TypeError:
-                    # we land here if a class has an @property decorator for
-                    # supported extension aliases. They only work on objects.
-                    pass
-        return aliases
-
-    @classmethod
-    def clear_instance(cls):
-        cls._instance = None
-
-    def check_if_plugin_extensions_loaded(self):
-        """Check if an extension supported by a plugin has been loaded."""
-        plugin_extensions = self.get_supported_extension_aliases()
-        missing_aliases = plugin_extensions - set(self.extensions)
-        if missing_aliases:
-            raise exceptions.ExtensionsNotFound(
-                extensions=list(missing_aliases))
-
-
-class RequestExtension(object):
-    """Extend requests and responses of core Neutron OpenStack API controllers.
-
-    Provide a way to add data to responses and handle custom request data
-    that is sent to core Neutron OpenStack API controllers.
-    """
-
-    def __init__(self, method, url_route, handler):
-        self.url_route = url_route
-        self.handler = handler
-        self.conditions = dict(method=[method])
-        self.key = "%s-%s" % (method, url_route)
-
-
-class ActionExtension(object):
-    """Add custom actions to core Neutron OpenStack API controllers."""
-
-    def __init__(self, collection, action_name, handler):
-        self.collection = collection
-        self.action_name = action_name
-        self.handler = handler
-
-
-class ResourceExtension(object):
-    """Add top level resources to the OpenStack API in Neutron."""
-
-    def __init__(self, collection, controller, parent=None, path_prefix="",
-                 collection_actions=None, member_actions=None, attr_map=None):
-        collection_actions = collection_actions or {}
-        member_actions = member_actions or {}
-        attr_map = attr_map or {}
-        self.collection = collection
-        self.controller = controller
-        self.parent = parent
-        self.collection_actions = collection_actions
-        self.member_actions = member_actions
-        self.path_prefix = path_prefix
-        self.attr_map = attr_map
-
-
-# Returns the extension paths from a config entry and the __path__
-# of neutron.extensions
-def get_extensions_path(service_plugins=None):
-    paths = collections.OrderedDict()
-
-    # Add Neutron core extensions
-    paths[neutron.extensions.__path__[0]] = 1
-    if service_plugins:
-        # Add Neutron *-aas extensions
-        for plugin in service_plugins.values():
-            neutron_mod = provider_configuration.NeutronModule(
-                plugin.__module__.split('.')[0])
-            try:
-                paths[neutron_mod.module().extensions.__path__[0]] = 1
-            except AttributeError:
-                # Occurs normally if module has no extensions sub-module
-                pass
-
-    # Add external/other plugins extensions
-    if cfg.CONF.api_extensions_path:
-        for path in cfg.CONF.api_extensions_path.split(":"):
-            paths[path] = 1
-
-    LOG.debug("get_extension_paths = %s", paths)
-
-    # Re-build the extension string
-    path = ':'.join(paths)
-    return path
-
-
-def append_api_extensions_path(paths):
-    paths = list(set([cfg.CONF.api_extensions_path] + paths))
-    cfg.CONF.set_override('api_extensions_path',
-                          ':'.join([p for p in paths if p]))
diff --git a/neutron/api/rpc/__init__.py b/neutron/api/rpc/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/api/rpc/agentnotifiers/__init__.py b/neutron/api/rpc/agentnotifiers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py
deleted file mode 100644 (file)
index 06374b1..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-
-from neutron._i18n import _LE, _LW
-from neutron.common import constants
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.common import utils
-from neutron import manager
-
-
-LOG = logging.getLogger(__name__)
-
-
-class DhcpAgentNotifyAPI(object):
-    """API for plugin to notify DHCP agent.
-
-    This class implements the client side of an rpc interface.  The server side
-    is neutron.agent.dhcp_agent.DhcpAgent.  For more information about changing
-    rpc interfaces, please see doc/source/devref/rpc_api.rst.
-    """
-    # It seems dhcp agent does not support bulk operation
-    VALID_RESOURCES = ['network', 'subnet', 'port']
-    VALID_METHOD_NAMES = ['network.create.end',
-                          'network.update.end',
-                          'network.delete.end',
-                          'subnet.create.end',
-                          'subnet.update.end',
-                          'subnet.delete.end',
-                          'port.create.end',
-                          'port.update.end',
-                          'port.delete.end']
-
-    def __init__(self, topic=topics.DHCP_AGENT, plugin=None):
-        self._plugin = plugin
-        target = oslo_messaging.Target(topic=topic, version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    @property
-    def plugin(self):
-        if self._plugin is None:
-            self._plugin = manager.NeutronManager.get_plugin()
-        return self._plugin
-
-    def _schedule_network(self, context, network, existing_agents):
-        """Schedule the network to new agents
-
-        :return: all agents associated with the network
-        """
-        new_agents = self.plugin.schedule_network(context, network) or []
-        if new_agents:
-            for agent in new_agents:
-                self._cast_message(
-                    context, 'network_create_end',
-                    {'network': {'id': network['id']}}, agent['host'])
-        elif not existing_agents:
-            LOG.warn(_LW('Unable to schedule network %s: no agents available; '
-                         'will retry on subsequent port and subnet creation '
-                         'events.'), network['id'])
-        return new_agents + existing_agents
-
-    def _get_enabled_agents(self, context, network, agents, method, payload):
-        """Get the list of agents who can provide services."""
-        if not agents:
-            return []
-        network_id = network['id']
-        enabled_agents = agents
-        if not cfg.CONF.enable_services_on_agents_with_admin_state_down:
-            enabled_agents = [x for x in agents if x.admin_state_up]
-        active_agents = [x for x in agents if x.is_active]
-        len_enabled_agents = len(enabled_agents)
-        len_active_agents = len(active_agents)
-        if len_active_agents < len_enabled_agents:
-            LOG.warn(_LW("Only %(active)d of %(total)d DHCP agents associated "
-                         "with network '%(net_id)s' are marked as active, so "
-                         "notifications may be sent to inactive agents."),
-                     {'active': len_active_agents,
-                      'total': len_enabled_agents,
-                      'net_id': network_id})
-        if not enabled_agents:
-            num_ports = self.plugin.get_ports_count(
-                context, {'network_id': [network_id]})
-            notification_required = (
-                num_ports > 0 and len(network['subnets']) >= 1)
-            if notification_required:
-                LOG.error(_LE("Will not send event %(method)s for network "
-                              "%(net_id)s: no agent available. Payload: "
-                              "%(payload)s"),
-                          {'method': method,
-                           'net_id': network_id,
-                           'payload': payload})
-        return enabled_agents
-
-    def _is_reserved_dhcp_port(self, port):
-        return port.get('device_id') == constants.DEVICE_ID_RESERVED_DHCP_PORT
-
-    def _notify_agents(self, context, method, payload, network_id):
-        """Notify all the agents that are hosting the network."""
-        # fanout is required as we do not know who is "listening"
-        no_agents = not utils.is_extension_supported(
-            self.plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS)
-        fanout_required = method == 'network_delete_end' or no_agents
-
-        # we do nothing on network creation because we want to give the
-        # admin the chance to associate an agent to the network manually
-        cast_required = method != 'network_create_end'
-
-        if fanout_required:
-            self._fanout_message(context, method, payload)
-        elif cast_required:
-            admin_ctx = (context if context.is_admin else context.elevated())
-            network = self.plugin.get_network(admin_ctx, network_id)
-            agents = self.plugin.get_dhcp_agents_hosting_networks(
-                context, [network_id])
-
-            # schedule the network first, if needed
-            schedule_required = (
-                method == 'subnet_create_end' or
-                method == 'port_create_end' and
-                not self._is_reserved_dhcp_port(payload['port']))
-            if schedule_required:
-                agents = self._schedule_network(admin_ctx, network, agents)
-            if not agents:
-                LOG.debug("Network %s is not hosted by any dhcp agent",
-                          network_id)
-                return
-            enabled_agents = self._get_enabled_agents(
-                context, network, agents, method, payload)
-            for agent in enabled_agents:
-                self._cast_message(
-                    context, method, payload, agent.host, agent.topic)
-
-    def _cast_message(self, context, method, payload, host,
-                      topic=topics.DHCP_AGENT):
-        """Cast the payload to the dhcp agent running on the host."""
-        cctxt = self.client.prepare(topic=topic, server=host)
-        cctxt.cast(context, method, payload=payload)
-
-    def _fanout_message(self, context, method, payload):
-        """Fanout the payload to all dhcp agents."""
-        cctxt = self.client.prepare(fanout=True)
-        cctxt.cast(context, method, payload=payload)
-
-    def network_removed_from_agent(self, context, network_id, host):
-        self._cast_message(context, 'network_delete_end',
-                           {'network_id': network_id}, host)
-
-    def network_added_to_agent(self, context, network_id, host):
-        self._cast_message(context, 'network_create_end',
-                           {'network': {'id': network_id}}, host)
-
-    def agent_updated(self, context, admin_state_up, host):
-        self._cast_message(context, 'agent_updated',
-                           {'admin_state_up': admin_state_up}, host)
-
-    def notify(self, context, data, method_name):
-        # data is {'key' : 'value'} with only one key
-        if method_name not in self.VALID_METHOD_NAMES:
-            return
-        obj_type = list(data.keys())[0]
-        if obj_type not in self.VALID_RESOURCES:
-            return
-        obj_value = data[obj_type]
-        network_id = None
-        if obj_type == 'network' and 'id' in obj_value:
-            network_id = obj_value['id']
-        elif obj_type in ['port', 'subnet'] and 'network_id' in obj_value:
-            network_id = obj_value['network_id']
-        if not network_id:
-            return
-        method_name = method_name.replace(".", "_")
-        if method_name.endswith("_delete_end"):
-            if 'id' in obj_value:
-                self._notify_agents(context, method_name,
-                                    {obj_type + '_id': obj_value['id']},
-                                    network_id)
-        else:
-            self._notify_agents(context, method_name, data, network_id)
diff --git a/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py
deleted file mode 100644 (file)
index f239ed6..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import random
-
-from oslo_log import log as logging
-import oslo_messaging
-
-from neutron._i18n import _LE
-from neutron.common import constants
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.common import utils
-from neutron import manager
-from neutron.plugins.common import constants as service_constants
-
-
-LOG = logging.getLogger(__name__)
-
-
-class L3AgentNotifyAPI(object):
-    """API for plugin to notify L3 agent."""
-
-    def __init__(self, topic=topics.L3_AGENT):
-        target = oslo_messaging.Target(topic=topic, version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def _notification_host(self, context, method, host, use_call=False,
-                           **kwargs):
-        """Notify the agent that is hosting the router."""
-        LOG.debug('Notify agent at %(host)s the message '
-                  '%(method)s', {'host': host,
-                                 'method': method})
-        cctxt = self.client.prepare(server=host)
-        rpc_method = cctxt.call if use_call else cctxt.cast
-        rpc_method(context, method, **kwargs)
-
-    def _agent_notification(self, context, method, router_ids, operation,
-                            shuffle_agents):
-        """Notify changed routers to hosting l3 agents."""
-        adminContext = context if context.is_admin else context.elevated()
-        plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        for router_id in router_ids:
-            hosts = plugin.get_hosts_to_notify(adminContext, router_id)
-            if shuffle_agents:
-                random.shuffle(hosts)
-            for host in hosts:
-                LOG.debug('Notify agent at %(topic)s.%(host)s the message '
-                          '%(method)s',
-                          {'topic': topics.L3_AGENT,
-                           'host': host,
-                           'method': method})
-                cctxt = self.client.prepare(topic=topics.L3_AGENT,
-                                            server=host,
-                                            version='1.1')
-                cctxt.cast(context, method, routers=[router_id])
-
-    def _agent_notification_arp(self, context, method, router_id,
-                                operation, data):
-        """Notify arp details to l3 agents hosting router."""
-        if not router_id:
-            return
-        adminContext = (context.is_admin and
-                        context or context.elevated())
-        plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        hosts = plugin.get_hosts_to_notify(adminContext, router_id)
-        # TODO(murali): replace cast with fanout to avoid performance
-        # issues at greater scale.
-        for host in hosts:
-            log_topic = '%s.%s' % (topics.L3_AGENT, host)
-            LOG.debug('Casting message %(method)s with topic %(topic)s',
-                      {'topic': log_topic, 'method': method})
-            dvr_arptable = {'router_id': router_id,
-                            'arp_table': data}
-            cctxt = self.client.prepare(topic=topics.L3_AGENT,
-                                        server=host,
-                                        version='1.2')
-            cctxt.cast(context, method, payload=dvr_arptable)
-
-    def _notification(self, context, method, router_ids, operation,
-                      shuffle_agents, schedule_routers=True):
-        """Notify all the agents that are hosting the routers."""
-        plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        if not plugin:
-            LOG.error(_LE('No plugin for L3 routing registered. Cannot notify '
-                          'agents with the message %s'), method)
-            return
-        if utils.is_extension_supported(
-                plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
-            adminContext = (context.is_admin and
-                            context or context.elevated())
-            if schedule_routers:
-                plugin.schedule_routers(adminContext, router_ids)
-            self._agent_notification(
-                context, method, router_ids, operation, shuffle_agents)
-        else:
-            cctxt = self.client.prepare(fanout=True)
-            cctxt.cast(context, method, routers=router_ids)
-
-    def _notification_fanout(self, context, method, router_id=None, **kwargs):
-        """Fanout the information to all L3 agents.
-
-        This function will fanout the router_id or ext_net_id
-        to the L3 Agents.
-        """
-        ext_net_id = kwargs.get('ext_net_id')
-        if router_id:
-            kwargs['router_id'] = router_id
-            LOG.debug('Fanout notify agent at %(topic)s the message '
-                      '%(method)s on router %(router_id)s',
-                      {'topic': topics.L3_AGENT,
-                       'method': method,
-                       'router_id': router_id})
-        if ext_net_id:
-            LOG.debug('Fanout notify agent at %(topic)s the message '
-                      '%(method)s for external_network  %(ext_net_id)s',
-                      {'topic': topics.L3_AGENT,
-                       'method': method,
-                       'ext_net_id': ext_net_id})
-        cctxt = self.client.prepare(fanout=True)
-        cctxt.cast(context, method, **kwargs)
-
-    def agent_updated(self, context, admin_state_up, host):
-        self._notification_host(context, 'agent_updated', host,
-                                payload={'admin_state_up': admin_state_up})
-
-    def router_deleted(self, context, router_id):
-        self._notification_fanout(context, 'router_deleted', router_id)
-
-    def routers_updated(self, context, router_ids, operation=None, data=None,
-                        shuffle_agents=False, schedule_routers=True):
-        if router_ids:
-            self._notification(context, 'routers_updated', router_ids,
-                               operation, shuffle_agents, schedule_routers)
-
-    def add_arp_entry(self, context, router_id, arp_table, operation=None):
-        self._agent_notification_arp(context, 'add_arp_entry', router_id,
-                                     operation, arp_table)
-
-    def del_arp_entry(self, context, router_id, arp_table, operation=None):
-        self._agent_notification_arp(context, 'del_arp_entry', router_id,
-                                     operation, arp_table)
-
-    def delete_fipnamespace_for_ext_net(self, context, ext_net_id):
-        self._notification_fanout(
-            context, 'fipnamespace_delete_on_ext_net',
-            ext_net_id=ext_net_id)
-
-    def router_removed_from_agent(self, context, router_id, host):
-        self._notification_host(context, 'router_removed_from_agent', host,
-                                payload={'router_id': router_id})
-
-    def router_added_to_agent(self, context, router_ids, host):
-        # need to use call here as we want to be sure agent received
-        # notification and router will not be "lost". However using call()
-        # itself is not a guarantee, calling code should handle exceptions and
-        # retry
-        self._notification_host(context, 'router_added_to_agent', host,
-                                use_call=True, payload=router_ids)
-
-    def routers_updated_on_host(self, context, router_ids, host):
-        self._notification_host(context, 'routers_updated', host,
-                                routers=router_ids)
diff --git a/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py
deleted file mode 100644 (file)
index 6787781..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-import oslo_messaging
-import six
-
-from neutron.common import constants
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.common import utils
-from neutron.db import agentschedulers_db
-from neutron import manager
-from neutron.plugins.common import constants as service_constants
-
-LOG = logging.getLogger(__name__)
-
-
-class MeteringAgentNotifyAPI(object):
-    """API for plugin to notify L3 metering agent."""
-
-    def __init__(self, topic=topics.METERING_AGENT):
-        self.topic = topic
-        target = oslo_messaging.Target(topic=topic, version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def _agent_notification(self, context, method, routers):
-        """Notify l3 metering agents hosted by l3 agent hosts."""
-        adminContext = context if context.is_admin else context.elevated()
-        plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-
-        l3_routers = {}
-        state = agentschedulers_db.get_admin_state_up_filter()
-        for router in routers:
-            l3_agents = plugin.get_l3_agents_hosting_routers(
-                adminContext, [router['id']],
-                admin_state_up=state,
-                active=True)
-            for l3_agent in l3_agents:
-                LOG.debug('Notify metering agent at %(topic)s.%(host)s '
-                          'the message %(method)s',
-                          {'topic': self.topic,
-                           'host': l3_agent.host,
-                           'method': method})
-
-                l3_router = l3_routers.get(l3_agent.host, [])
-                l3_router.append(router)
-                l3_routers[l3_agent.host] = l3_router
-
-        for host, routers in six.iteritems(l3_routers):
-            cctxt = self.client.prepare(server=host)
-            cctxt.cast(context, method, routers=routers)
-
-    def _notification_fanout(self, context, method, router_id):
-        LOG.debug('Fanout notify metering agent at %(topic)s the message '
-                  '%(method)s on router %(router_id)s',
-                  {'topic': self.topic,
-                   'method': method,
-                   'router_id': router_id})
-        cctxt = self.client.prepare(fanout=True)
-        cctxt.cast(context, method, router_id=router_id)
-
-    def _notification(self, context, method, routers):
-        """Notify all the agents that are hosting the routers."""
-        plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        if utils.is_extension_supported(
-            plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
-            self._agent_notification(context, method, routers)
-        else:
-            cctxt = self.client.prepare(fanout=True)
-            cctxt.cast(context, method, routers=routers)
-
-    def router_deleted(self, context, router_id):
-        self._notification_fanout(context, 'router_deleted', router_id)
-
-    def routers_updated(self, context, routers):
-        if routers:
-            self._notification(context, 'routers_updated', routers)
-
-    def update_metering_label_rules(self, context, routers):
-        self._notification(context, 'update_metering_label_rules', routers)
-
-    def add_metering_label_rule(self, context, routers):
-        self._notification(context, 'add_metering_label_rule', routers)
-
-    def remove_metering_label_rule(self, context, routers):
-        self._notification(context, 'remove_metering_label_rule', routers)
-
-    def add_metering_label(self, context, routers):
-        self._notification(context, 'add_metering_label', routers)
-
-    def remove_metering_label(self, context, routers):
-        self._notification(context, 'remove_metering_label', routers)
diff --git a/neutron/api/rpc/callbacks/__init__.py b/neutron/api/rpc/callbacks/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/api/rpc/callbacks/consumer/__init__.py b/neutron/api/rpc/callbacks/consumer/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/api/rpc/callbacks/consumer/registry.py b/neutron/api/rpc/callbacks/consumer/registry.py
deleted file mode 100644 (file)
index a59b5db..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api.rpc.callbacks import resource_manager
-
-
-#TODO(ajo): consider adding locking to _get_manager, it's
-#           safe for eventlet, but not for normal threading.
-def _get_manager():
-    return resource_manager.ConsumerResourceCallbacksManager()
-
-
-def subscribe(callback, resource_type):
-    _get_manager().register(callback, resource_type)
-
-
-def unsubscribe(callback, resource_type):
-    _get_manager().unregister(callback, resource_type)
-
-
-def push(resource_type, resource, event_type):
-    """Push resource events into all registered callbacks for the type."""
-
-    callbacks = _get_manager().get_callbacks(resource_type)
-    for callback in callbacks:
-        callback(resource, event_type)
-
-
-def clear():
-    _get_manager().clear()
diff --git a/neutron/api/rpc/callbacks/events.py b/neutron/api/rpc/callbacks/events.py
deleted file mode 100644 (file)
index 485a1bc..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-CREATED = 'created'
-UPDATED = 'updated'
-DELETED = 'deleted'
-
-VALID = (
-    CREATED,
-    UPDATED,
-    DELETED
-)
diff --git a/neutron/api/rpc/callbacks/exceptions.py b/neutron/api/rpc/callbacks/exceptions.py
deleted file mode 100644 (file)
index c45615b..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron._i18n import _
-from neutron.common import exceptions
-
-
-class CallbackWrongResourceType(exceptions.NeutronException):
-    message = _('Callback for %(resource_type)s returned wrong resource type')
-
-
-class CallbackNotFound(exceptions.NeutronException):
-    message = _('Callback for %(resource_type)s not found')
-
-
-class CallbacksMaxLimitReached(exceptions.NeutronException):
-    message = _("Cannot add multiple callbacks for %(resource_type)s")
diff --git a/neutron/api/rpc/callbacks/producer/__init__.py b/neutron/api/rpc/callbacks/producer/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/api/rpc/callbacks/producer/registry.py b/neutron/api/rpc/callbacks/producer/registry.py
deleted file mode 100644 (file)
index 92124c5..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api.rpc.callbacks import exceptions
-from neutron.api.rpc.callbacks import resource_manager
-from neutron.objects import base
-
-
-# TODO(ajo): consider adding locking: it's safe for eventlet but not
-#            for other types of threading.
-def _get_manager():
-    return resource_manager.ProducerResourceCallbacksManager()
-
-
-def provide(callback, resource_type):
-    """Register a callback as a producer for the resource type.
-
-    This callback will be used to produce resources of corresponding type for
-    interested parties.
-    """
-    _get_manager().register(callback, resource_type)
-
-
-def unprovide(callback, resource_type):
-    """Unregister a callback for corresponding resource type."""
-    _get_manager().unregister(callback, resource_type)
-
-
-def clear():
-    """Clear all callbacks."""
-    _get_manager().clear()
-
-
-def pull(resource_type, resource_id, **kwargs):
-    """Get resource object that corresponds to resource id.
-
-    The function will return an object that is provided by resource producer.
-
-    :returns: NeutronObject
-    """
-    callback = _get_manager().get_callback(resource_type)
-    obj = callback(resource_type, resource_id, **kwargs)
-    if obj:
-        if (not isinstance(obj, base.NeutronObject) or
-            resource_type != obj.obj_name()):
-            raise exceptions.CallbackWrongResourceType(
-                resource_type=resource_type)
-    return obj
diff --git a/neutron/api/rpc/callbacks/resource_manager.py b/neutron/api/rpc/callbacks/resource_manager.py
deleted file mode 100644 (file)
index 63f8980..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import collections
-
-from oslo_log import log as logging
-import six
-
-from neutron.api.rpc.callbacks import exceptions as rpc_exc
-from neutron.api.rpc.callbacks import resources
-from neutron.callbacks import exceptions
-
-LOG = logging.getLogger(__name__)
-
-# TODO(QoS): split the registry/resources_rpc modules into two separate things:
-# one for pull and one for push APIs
-
-
-def _validate_resource_type(resource_type):
-    if not resources.is_valid_resource_type(resource_type):
-        raise exceptions.Invalid(element='resource', value=resource_type)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class ResourceCallbacksManager(object):
-    """A callback system that allows information providers in a loose manner.
-    """
-
-    # This hook is to allow tests to get new objects for the class
-    _singleton = True
-
-    def __new__(cls, *args, **kwargs):
-        if not cls._singleton:
-            return super(ResourceCallbacksManager, cls).__new__(cls)
-
-        if not hasattr(cls, '_instance'):
-            cls._instance = super(ResourceCallbacksManager, cls).__new__(cls)
-        return cls._instance
-
-    @abc.abstractmethod
-    def _add_callback(self, callback, resource_type):
-        pass
-
-    @abc.abstractmethod
-    def _delete_callback(self, callback, resource_type):
-        pass
-
-    def register(self, callback, resource_type):
-        """Register a callback for a resource type.
-
-        :param callback: the callback. It must raise or return NeutronObject.
-        :param resource_type: must be a valid resource type.
-        """
-        LOG.debug("Registering callback for %s", resource_type)
-        _validate_resource_type(resource_type)
-        self._add_callback(callback, resource_type)
-
-    def unregister(self, callback, resource_type):
-        """Unregister callback from the registry.
-
-        :param callback: the callback.
-        :param resource_type: must be a valid resource type.
-        """
-        LOG.debug("Unregistering callback for %s", resource_type)
-        _validate_resource_type(resource_type)
-        self._delete_callback(callback, resource_type)
-
-    @abc.abstractmethod
-    def clear(self):
-        """Brings the manager to a clean state."""
-
-    def get_subscribed_types(self):
-        return list(self._callbacks.keys())
-
-
-class ProducerResourceCallbacksManager(ResourceCallbacksManager):
-
-    _callbacks = dict()
-
-    def _add_callback(self, callback, resource_type):
-        if resource_type in self._callbacks:
-            raise rpc_exc.CallbacksMaxLimitReached(resource_type=resource_type)
-        self._callbacks[resource_type] = callback
-
-    def _delete_callback(self, callback, resource_type):
-        try:
-            del self._callbacks[resource_type]
-        except KeyError:
-            raise rpc_exc.CallbackNotFound(resource_type=resource_type)
-
-    def clear(self):
-        self._callbacks = dict()
-
-    def get_callback(self, resource_type):
-        _validate_resource_type(resource_type)
-        try:
-            return self._callbacks[resource_type]
-        except KeyError:
-            raise rpc_exc.CallbackNotFound(resource_type=resource_type)
-
-
-class ConsumerResourceCallbacksManager(ResourceCallbacksManager):
-
-    _callbacks = collections.defaultdict(set)
-
-    def _add_callback(self, callback, resource_type):
-        self._callbacks[resource_type].add(callback)
-
-    def _delete_callback(self, callback, resource_type):
-        try:
-            self._callbacks[resource_type].remove(callback)
-            if not self._callbacks[resource_type]:
-                del self._callbacks[resource_type]
-        except KeyError:
-            raise rpc_exc.CallbackNotFound(resource_type=resource_type)
-
-    def clear(self):
-        self._callbacks = collections.defaultdict(set)
-
-    def get_callbacks(self, resource_type):
-        """Return the callback if found, None otherwise.
-
-        :param resource_type: must be a valid resource type.
-        """
-        _validate_resource_type(resource_type)
-        callbacks = self._callbacks[resource_type]
-        if not callbacks:
-            raise rpc_exc.CallbackNotFound(resource_type=resource_type)
-        return callbacks
diff --git a/neutron/api/rpc/callbacks/resources.py b/neutron/api/rpc/callbacks/resources.py
deleted file mode 100644 (file)
index bde7aed..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.objects.qos import policy
-
-
-_QOS_POLICY_CLS = policy.QosPolicy
-
-_VALID_CLS = (
-    _QOS_POLICY_CLS,
-)
-
-_VALID_TYPES = [cls.obj_name() for cls in _VALID_CLS]
-
-
-# Supported types
-QOS_POLICY = _QOS_POLICY_CLS.obj_name()
-
-
-_TYPE_TO_CLS_MAP = {
-    QOS_POLICY: _QOS_POLICY_CLS,
-}
-
-
-def get_resource_type(resource_cls):
-    if not resource_cls:
-        return None
-
-    if not hasattr(resource_cls, 'obj_name'):
-        return None
-
-    return resource_cls.obj_name()
-
-
-def is_valid_resource_type(resource_type):
-    return resource_type in _VALID_TYPES
-
-
-def get_resource_cls(resource_type):
-    return _TYPE_TO_CLS_MAP.get(resource_type)
diff --git a/neutron/api/rpc/handlers/__init__.py b/neutron/api/rpc/handlers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/api/rpc/handlers/dhcp_rpc.py b/neutron/api/rpc/handlers/dhcp_rpc.py
deleted file mode 100644 (file)
index 82878fd..0000000
+++ /dev/null
@@ -1,231 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import copy
-import itertools
-import operator
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_utils import excutils
-
-from neutron._i18n import _, _LW
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import utils
-from neutron.db import api as db_api
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.plugins.common import utils as p_utils
-from neutron.quota import resource_registry
-
-
-LOG = logging.getLogger(__name__)
-
-
-class DhcpRpcCallback(object):
-    """DHCP agent RPC callback in plugin implementations.
-
-    This class implements the server side of an rpc interface.  The client
-    side of this interface can be found in
-    neutron.agent.dhcp.agent.DhcpPluginApi.  For more information about
-    changing rpc interfaces, see doc/source/devref/rpc_api.rst.
-    """
-
-    # API version history:
-    #     1.0 - Initial version.
-    #     1.1 - Added get_active_networks_info, create_dhcp_port,
-    #           and update_dhcp_port methods.
-    #     1.2 - Removed get_dhcp_port. When removing a method (Making a
-    #           backwards incompatible change) you would normally bump the
-    #           major version. However, since the method was unused in the
-    #           RPC client for many releases, it should be OK to bump the
-    #           minor release instead and claim RPC compatibility with the
-    #           last few client versions.
-    #     1.3 - Removed release_port_fixed_ip. It's not used by reference DHCP
-    #           agent since Juno, so similar rationale for not bumping the
-    #           major version as above applies here too.
-    target = oslo_messaging.Target(
-        namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN,
-        version='1.3')
-
-    def _get_active_networks(self, context, **kwargs):
-        """Retrieve and return a list of the active networks."""
-        host = kwargs.get('host')
-        plugin = manager.NeutronManager.get_plugin()
-        if utils.is_extension_supported(
-            plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS):
-            if cfg.CONF.network_auto_schedule:
-                plugin.auto_schedule_networks(context, host)
-            nets = plugin.list_active_networks_on_active_dhcp_agent(
-                context, host)
-        else:
-            filters = dict(admin_state_up=[True])
-            nets = plugin.get_networks(context, filters=filters)
-        return nets
-
-    def _port_action(self, plugin, context, port, action):
-        """Perform port operations taking care of concurrency issues."""
-        try:
-            if action == 'create_port':
-                return p_utils.create_port(plugin, context, port)
-            elif action == 'update_port':
-                return plugin.update_port(context, port['id'], port)
-            else:
-                msg = _('Unrecognized action')
-                raise n_exc.Invalid(message=msg)
-        except (db_exc.DBError, n_exc.NetworkNotFound,
-                n_exc.SubnetNotFound, n_exc.IpAddressGenerationFailure) as e:
-            with excutils.save_and_reraise_exception(reraise=False) as ctxt:
-                if isinstance(e, n_exc.IpAddressGenerationFailure):
-                    # Check if the subnet still exists and if it does not,
-                    # this is the reason why the ip address generation failed.
-                    # In any other unlikely event re-raise
-                    try:
-                        subnet_id = port['port']['fixed_ips'][0]['subnet_id']
-                        plugin.get_subnet(context, subnet_id)
-                    except n_exc.SubnetNotFound:
-                        pass
-                    else:
-                        ctxt.reraise = True
-                net_id = port['port']['network_id']
-                LOG.warn(_LW("Action %(action)s for network %(net_id)s "
-                             "could not complete successfully: %(reason)s"),
-                         {"action": action, "net_id": net_id, 'reason': e})
-
-    def get_active_networks(self, context, **kwargs):
-        """Retrieve and return a list of the active network ids."""
-        # NOTE(arosen): This method is no longer used by the DHCP agent but is
-        # left so that neutron-dhcp-agents will still continue to work if
-        # neutron-server is upgraded and not the agent.
-        host = kwargs.get('host')
-        LOG.debug('get_active_networks requested from %s', host)
-        nets = self._get_active_networks(context, **kwargs)
-        return [net['id'] for net in nets]
-
-    def _group_by_network_id(self, res):
-        grouped = {}
-        keyfunc = operator.itemgetter('network_id')
-        for net_id, values in itertools.groupby(sorted(res, key=keyfunc),
-                                                keyfunc):
-            grouped[net_id] = list(values)
-        return grouped
-
-    def get_active_networks_info(self, context, **kwargs):
-        """Returns all the networks/subnets/ports in system."""
-        host = kwargs.get('host')
-        LOG.debug('get_active_networks_info from %s', host)
-        networks = self._get_active_networks(context, **kwargs)
-        plugin = manager.NeutronManager.get_plugin()
-        filters = {'network_id': [network['id'] for network in networks]}
-        ports = plugin.get_ports(context, filters=filters)
-        filters['enable_dhcp'] = [True]
-        subnets = plugin.get_subnets(context, filters=filters)
-
-        grouped_subnets = self._group_by_network_id(subnets)
-        grouped_ports = self._group_by_network_id(ports)
-        for network in networks:
-            network['subnets'] = grouped_subnets.get(network['id'], [])
-            network['ports'] = grouped_ports.get(network['id'], [])
-
-        return networks
-
-    def get_network_info(self, context, **kwargs):
-        """Retrieve and return extended information about a network."""
-        network_id = kwargs.get('network_id')
-        host = kwargs.get('host')
-        LOG.debug('Network %(network_id)s requested from '
-                  '%(host)s', {'network_id': network_id,
-                               'host': host})
-        plugin = manager.NeutronManager.get_plugin()
-        try:
-            network = plugin.get_network(context, network_id)
-        except n_exc.NetworkNotFound:
-            LOG.warn(_LW("Network %s could not be found, it might have "
-                         "been deleted concurrently."), network_id)
-            return
-        filters = dict(network_id=[network_id])
-        network['subnets'] = plugin.get_subnets(context, filters=filters)
-        network['ports'] = plugin.get_ports(context, filters=filters)
-        return network
-
-    @db_api.retry_db_errors
-    def release_dhcp_port(self, context, **kwargs):
-        """Release the port currently being used by a DHCP agent."""
-        host = kwargs.get('host')
-        network_id = kwargs.get('network_id')
-        device_id = kwargs.get('device_id')
-
-        LOG.debug('DHCP port deletion for %(network_id)s request from '
-                  '%(host)s',
-                  {'network_id': network_id, 'host': host})
-        plugin = manager.NeutronManager.get_plugin()
-        plugin.delete_ports_by_device_id(context, device_id, network_id)
-
-    def update_lease_expiration(self, context, **kwargs):
-        """Release the fixed_ip associated the subnet on a port."""
-        # NOTE(arosen): This method is no longer used by the DHCP agent but is
-        # left so that neutron-dhcp-agents will still continue to work if
-        # neutron-server is upgraded and not the agent.
-        host = kwargs.get('host')
-
-        LOG.warning(_LW('Updating lease expiration is now deprecated. Issued  '
-                        'from host %s.'), host)
-
-    @db_api.retry_db_errors
-    @resource_registry.mark_resources_dirty
-    def create_dhcp_port(self, context, **kwargs):
-        """Create and return dhcp port information.
-
-        If an expected failure occurs, a None port is returned.
-
-        """
-        host = kwargs.get('host')
-        # Note(pbondar): Create deep copy of port to prevent operating
-        # on changed dict if RetryRequest is raised
-        port = copy.deepcopy(kwargs.get('port'))
-        LOG.debug('Create dhcp port %(port)s '
-                  'from %(host)s.',
-                  {'port': port,
-                   'host': host})
-
-        port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP
-        port['port'][portbindings.HOST_ID] = host
-        if 'mac_address' not in port['port']:
-            port['port']['mac_address'] = attributes.ATTR_NOT_SPECIFIED
-        plugin = manager.NeutronManager.get_plugin()
-        return self._port_action(plugin, context, port, 'create_port')
-
-    @db_api.retry_db_errors
-    def update_dhcp_port(self, context, **kwargs):
-        """Update the dhcp port."""
-        host = kwargs.get('host')
-        port = kwargs.get('port')
-        port['id'] = kwargs.get('port_id')
-        port['port'][portbindings.HOST_ID] = host
-        plugin = manager.NeutronManager.get_plugin()
-        old_port = plugin.get_port(context, port['id'])
-        if (old_port['device_id'] != constants.DEVICE_ID_RESERVED_DHCP_PORT
-            and old_port['device_id'] !=
-            utils.get_dhcp_agent_device_id(port['port']['network_id'], host)):
-            raise n_exc.DhcpPortInUse(port_id=port['id'])
-        LOG.debug('Update dhcp port %(port)s '
-                  'from %(host)s.',
-                  {'port': port,
-                   'host': host})
-        return self._port_action(plugin, context, port, 'update_port')
diff --git a/neutron/api/rpc/handlers/dvr_rpc.py b/neutron/api/rpc/handlers/dvr_rpc.py
deleted file mode 100644 (file)
index 73a5918..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright 2014, Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import helpers as log_helpers
-from oslo_log import log as logging
-import oslo_messaging
-
-from neutron.common import constants
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron import manager
-
-LOG = logging.getLogger(__name__)
-
-
-class DVRServerRpcApi(object):
-    """Agent-side RPC (stub) for agent-to-plugin interaction.
-
-    This class implements the client side of an rpc interface.  The server side
-    can be found below: DVRServerRpcCallback.  For more information on changing
-    rpc interfaces, see doc/source/devref/rpc_api.rst.
-    """
-    # 1.0 Initial Version
-    # 1.1 Support for passing 'fixed_ips' in get_subnet_for_dvr function.
-    #     Passing 'subnet" will be deprecated in the next release.
-
-    def __init__(self, topic):
-        target = oslo_messaging.Target(topic=topic, version='1.0',
-                                       namespace=constants.RPC_NAMESPACE_DVR)
-        self.client = n_rpc.get_client(target)
-
-    @log_helpers.log_method_call
-    def get_dvr_mac_address_by_host(self, context, host):
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'get_dvr_mac_address_by_host', host=host)
-
-    @log_helpers.log_method_call
-    def get_dvr_mac_address_list(self, context):
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'get_dvr_mac_address_list')
-
-    @log_helpers.log_method_call
-    def get_ports_on_host_by_subnet(self, context, host, subnet):
-        """Get DVR serviced ports on given host and subnet."""
-
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'get_ports_on_host_by_subnet',
-                          host=host, subnet=subnet)
-
-    @log_helpers.log_method_call
-    def get_subnet_for_dvr(self, context, subnet, fixed_ips):
-        cctxt = self.client.prepare()
-        return cctxt.call(
-            context, 'get_subnet_for_dvr', subnet=subnet, fixed_ips=fixed_ips)
-
-
-class DVRServerRpcCallback(object):
-    """Plugin-side RPC (implementation) for agent-to-plugin interaction.
-
-    This class implements the server side of an rpc interface.  The client side
-    can be found above: DVRServerRpcApi.  For more information on changing rpc
-    interfaces, see doc/source/devref/rpc_api.rst.
-    """
-
-    # History
-    #   1.0 Initial version
-    #   1.1 Support for passing the 'fixed_ips" in get_subnet_for_dvr.
-    #       Passing subnet will be deprecated in the next release.
-
-    target = oslo_messaging.Target(version='1.1',
-                                   namespace=constants.RPC_NAMESPACE_DVR)
-
-    @property
-    def plugin(self):
-        if not getattr(self, '_plugin', None):
-            self._plugin = manager.NeutronManager.get_plugin()
-        return self._plugin
-
-    def get_dvr_mac_address_list(self, context):
-        return self.plugin.get_dvr_mac_address_list(context)
-
-    def get_dvr_mac_address_by_host(self, context, **kwargs):
-        host = kwargs.get('host')
-        LOG.debug("DVR Agent requests mac_address for host %s", host)
-        return self.plugin.get_dvr_mac_address_by_host(context, host)
-
-    def get_ports_on_host_by_subnet(self, context, **kwargs):
-        """Get DVR serviced ports for given host and subnet."""
-
-        host = kwargs.get('host')
-        subnet = kwargs.get('subnet')
-        LOG.debug("DVR Agent requests list of VM ports on host %s", host)
-        return self.plugin.get_ports_on_host_by_subnet(context,
-            host, subnet)
-
-    def get_subnet_for_dvr(self, context, **kwargs):
-        fixed_ips = kwargs.get('fixed_ips')
-        subnet = kwargs.get('subnet')
-        return self.plugin.get_subnet_for_dvr(
-            context, subnet, fixed_ips=fixed_ips)
-
-
-class DVRAgentRpcApiMixin(object):
-    """Plugin-side RPC (stub) for plugin-to-agent interaction."""
-
-    DVR_RPC_VERSION = "1.0"
-
-    def _get_dvr_update_topic(self):
-        return topics.get_topic_name(self.topic,
-                                     topics.DVR,
-                                     topics.UPDATE)
-
-    def dvr_mac_address_update(self, context, dvr_macs):
-        """Notify dvr mac address updates."""
-        if not dvr_macs:
-            return
-        cctxt = self.client.prepare(topic=self._get_dvr_update_topic(),
-                                    version=self.DVR_RPC_VERSION, fanout=True)
-        cctxt.cast(context, 'dvr_mac_address_update', dvr_macs=dvr_macs)
-
-
-class DVRAgentRpcCallbackMixin(object):
-    """Agent-side RPC (implementation) for plugin-to-agent interaction."""
-
-    def dvr_mac_address_update(self, context, **kwargs):
-        """Callback for dvr_mac_addresses update.
-
-        :param dvr_macs: list of updated dvr_macs
-        """
-        dvr_macs = kwargs.get('dvr_macs', [])
-        LOG.debug("dvr_macs updated on remote: %s", dvr_macs)
-        self.dvr_agent.dvr_mac_address_update(dvr_macs)
diff --git a/neutron/api/rpc/handlers/l3_rpc.py b/neutron/api/rpc/handlers/l3_rpc.py
deleted file mode 100644 (file)
index b16eea8..0000000
+++ /dev/null
@@ -1,293 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_serialization import jsonutils
-import six
-
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.common import utils
-from neutron import context as neutron_context
-from neutron.db import api as db_api
-from neutron.extensions import l3
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.plugins.common import constants as plugin_constants
-
-
-LOG = logging.getLogger(__name__)
-
-
-class L3RpcCallback(object):
-    """L3 agent RPC callback in plugin implementations."""
-
-    # 1.0 L3PluginApi BASE_RPC_API_VERSION
-    # 1.1 Support update_floatingip_statuses
-    # 1.2 Added methods for DVR support
-    # 1.3 Added a method that returns the list of activated services
-    # 1.4 Added L3 HA update_router_state. This method was later removed,
-    #     since it was unused. The RPC version was not changed
-    # 1.5 Added update_ha_routers_states
-    # 1.6 Added process_prefix_update to support IPv6 Prefix Delegation
-    # 1.7 Added method delete_agent_gateway_port for DVR Routers
-    # 1.8 Added address scope information
-    # 1.9 Added get_router_ids
-    target = oslo_messaging.Target(version='1.9')
-
-    @property
-    def plugin(self):
-        if not hasattr(self, '_plugin'):
-            self._plugin = manager.NeutronManager.get_plugin()
-        return self._plugin
-
-    @property
-    def l3plugin(self):
-        if not hasattr(self, '_l3plugin'):
-            self._l3plugin = manager.NeutronManager.get_service_plugins()[
-                plugin_constants.L3_ROUTER_NAT]
-        return self._l3plugin
-
-    def get_router_ids(self, context, host):
-        """Returns IDs of routers scheduled to l3 agent on <host>"""
-        return self.l3plugin.list_router_ids_on_host(context, host)
-
-    @db_api.retry_db_errors
-    def sync_routers(self, context, **kwargs):
-        """Sync routers according to filters to a specific agent.
-
-        @param context: contain user information
-        @param kwargs: host, router_ids
-        @return: a list of routers
-                 with their interfaces and floating_ips
-        """
-        router_ids = kwargs.get('router_ids')
-        host = kwargs.get('host')
-        context = neutron_context.get_admin_context()
-        if utils.is_extension_supported(
-            self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
-            if cfg.CONF.router_auto_schedule:
-                self.l3plugin.auto_schedule_routers(context, host, router_ids)
-            routers = (
-                self.l3plugin.list_active_sync_routers_on_active_l3_agent(
-                    context, host, router_ids))
-        else:
-            routers = self.l3plugin.get_sync_data(context, router_ids)
-        if utils.is_extension_supported(
-            self.plugin, constants.PORT_BINDING_EXT_ALIAS):
-            self._ensure_host_set_on_ports(context, host, routers)
-        LOG.debug("Routers returned to l3 agent:\n %s",
-                  utils.DelayedStringRenderer(jsonutils.dumps,
-                                              routers, indent=5))
-        return routers
-
-    def _ensure_host_set_on_ports(self, context, host, routers):
-        for router in routers:
-            LOG.debug("Checking router: %(id)s for host: %(host)s",
-                      {'id': router['id'], 'host': host})
-            if router.get('gw_port') and router.get('distributed'):
-                # '' is used to effectively clear binding of a gw port if not
-                # bound (snat is not hosted on any l3 agent)
-                gw_port_host = router.get('gw_port_host') or ''
-                self._ensure_host_set_on_port(context,
-                                              gw_port_host,
-                                              router.get('gw_port'),
-                                              router['id'])
-                for p in router.get(constants.SNAT_ROUTER_INTF_KEY, []):
-                    self._ensure_host_set_on_port(context,
-                                                  gw_port_host,
-                                                  p, router['id'])
-            else:
-                self._ensure_host_set_on_port(
-                    context, host,
-                    router.get('gw_port'),
-                    router['id'],
-                    ha_router_port=router.get('ha'))
-            for interface in router.get(constants.INTERFACE_KEY, []):
-                self._ensure_host_set_on_port(
-                    context,
-                    host,
-                    interface,
-                    router['id'],
-                    ha_router_port=router.get('ha'))
-            interface = router.get(constants.HA_INTERFACE_KEY)
-            if interface:
-                self._ensure_host_set_on_port(context, host, interface,
-                                              router['id'])
-
-    def _ensure_host_set_on_port(self, context, host, port, router_id=None,
-                                 ha_router_port=False):
-        if (port and host is not None and
-            (port.get('device_owner') !=
-             constants.DEVICE_OWNER_DVR_INTERFACE and
-             port.get(portbindings.HOST_ID) != host or
-             port.get(portbindings.VIF_TYPE) ==
-             portbindings.VIF_TYPE_BINDING_FAILED)):
-
-            # Ports owned by non-HA routers are bound again if they're
-            # already bound but the router moved to another host.
-            if not ha_router_port:
-                # All ports, including ports created for SNAT'ing for
-                # DVR are handled here
-                try:
-                    self.plugin.update_port(
-                        context,
-                        port['id'],
-                        {'port': {portbindings.HOST_ID: host}})
-                    # updating port's host to pass actual info to l3 agent
-                    port[portbindings.HOST_ID] = host
-                except exceptions.PortNotFound:
-                    LOG.debug("Port %(port)s not found while updating "
-                              "agent binding for router %(router)s.",
-                              {"port": port['id'], "router": router_id})
-            # Ports owned by HA routers should only be bound once, if
-            # they are unbound. These ports are moved when an agent reports
-            # that one of its routers moved to the active state.
-            else:
-                if not port.get(portbindings.HOST_ID):
-                    active_host = (
-                        self.l3plugin.get_active_host_for_ha_router(
-                            context, router_id))
-                    if active_host:
-                        host = active_host
-                    # If there is currently no active router instance (For
-                    # example it's a new router), the host that requested
-                    # the routers (Essentially a random host) will do. The
-                    # port binding will be corrected when an active is
-                    # elected.
-                    try:
-                        self.plugin.update_port(
-                            context,
-                            port['id'],
-                            {'port': {portbindings.HOST_ID: host}})
-                    except exceptions.PortNotFound:
-                        LOG.debug("Port %(port)s not found while updating "
-                                  "agent binding for router %(router)s.",
-                                  {"port": port['id'], "router": router_id})
-        elif (port and
-              port.get('device_owner') ==
-              constants.DEVICE_OWNER_DVR_INTERFACE):
-            # Ports that are DVR interfaces have multiple bindings (based on
-            # of hosts on which DVR router interfaces are spawned). Such
-            # bindings are created/updated here by invoking
-            # update_dvr_port_binding
-            self.plugin.update_dvr_port_binding(context, port['id'],
-                                                {'port':
-                                                 {portbindings.HOST_ID: host,
-                                                  'device_id': router_id}
-                                                 })
-
-    def get_external_network_id(self, context, **kwargs):
-        """Get one external network id for l3 agent.
-
-        l3 agent expects only one external network when it performs
-        this query.
-        """
-        context = neutron_context.get_admin_context()
-        net_id = self.plugin.get_external_network_id(context)
-        LOG.debug("External network ID returned to l3 agent: %s",
-                  net_id)
-        return net_id
-
-    def get_service_plugin_list(self, context, **kwargs):
-        plugins = manager.NeutronManager.get_service_plugins()
-        return plugins.keys()
-
-    def update_floatingip_statuses(self, context, router_id, fip_statuses):
-        """Update operational status for a floating IP."""
-        with context.session.begin(subtransactions=True):
-            for (floatingip_id, status) in six.iteritems(fip_statuses):
-                LOG.debug("New status for floating IP %(floatingip_id)s: "
-                          "%(status)s", {'floatingip_id': floatingip_id,
-                                         'status': status})
-                try:
-                    self.l3plugin.update_floatingip_status(context,
-                                                           floatingip_id,
-                                                           status)
-                except l3.FloatingIPNotFound:
-                    LOG.debug("Floating IP: %s no longer present.",
-                              floatingip_id)
-            # Find all floating IPs known to have been the given router
-            # for which an update was not received. Set them DOWN mercilessly
-            # This situation might occur for some asynchronous backends if
-            # notifications were missed
-            known_router_fips = self.l3plugin.get_floatingips(
-                context, {'last_known_router_id': [router_id]})
-            # Consider only floating ips which were disassociated in the API
-            # FIXME(salv-orlando): Filtering in code should be avoided.
-            # the plugin should offer a way to specify a null filter
-            fips_to_disable = (fip['id'] for fip in known_router_fips
-                               if not fip['router_id'])
-            for fip_id in fips_to_disable:
-                self.l3plugin.update_floatingip_status(
-                    context, fip_id, constants.FLOATINGIP_STATUS_DOWN)
-
-    def get_ports_by_subnet(self, context, **kwargs):
-        """DVR: RPC called by dvr-agent to get all ports for subnet."""
-        subnet_id = kwargs.get('subnet_id')
-        LOG.debug("DVR: subnet_id: %s", subnet_id)
-        filters = {'fixed_ips': {'subnet_id': [subnet_id]}}
-        return self.plugin.get_ports(context, filters=filters)
-
-    @db_api.retry_db_errors
-    def get_agent_gateway_port(self, context, **kwargs):
-        """Get Agent Gateway port for FIP.
-
-        l3 agent expects an Agent Gateway Port to be returned
-        for this query.
-        """
-        network_id = kwargs.get('network_id')
-        host = kwargs.get('host')
-        admin_ctx = neutron_context.get_admin_context()
-        agent_port = self.l3plugin.create_fip_agent_gw_port_if_not_exists(
-            admin_ctx, network_id, host)
-        self._ensure_host_set_on_port(admin_ctx, host, agent_port)
-        LOG.debug('Agent Gateway port returned : %(agent_port)s with '
-                  'host %(host)s', {'agent_port': agent_port,
-                  'host': host})
-        return agent_port
-
-    def update_ha_routers_states(self, context, **kwargs):
-        """Update states for HA routers.
-
-        Get a map of router_id to its HA state on a host and update the DB.
-        State must be in: ('active', 'standby').
-        """
-        states = kwargs.get('states')
-        host = kwargs.get('host')
-
-        LOG.debug('Updating HA routers states on host %s: %s', host, states)
-        self.l3plugin.update_routers_states(context, states, host)
-
-    def process_prefix_update(self, context, **kwargs):
-        subnets = kwargs.get('subnets')
-
-        updated_subnets = []
-        for subnet_id, prefix in subnets.items():
-            updated_subnets.append(self.plugin.update_subnet(
-                                        context,
-                                        subnet_id,
-                                        {'subnet': {'cidr': prefix}}))
-        return updated_subnets
-
-    def delete_agent_gateway_port(self, context, **kwargs):
-        """Delete Floatingip agent gateway port."""
-        network_id = kwargs.get('network_id')
-        host = kwargs.get('host')
-        admin_ctx = neutron_context.get_admin_context()
-        self.l3plugin.delete_floatingip_agent_gateway_port(
-            admin_ctx, host, network_id)
diff --git a/neutron/api/rpc/handlers/metadata_rpc.py b/neutron/api/rpc/handlers/metadata_rpc.py
deleted file mode 100644 (file)
index 3903439..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import oslo_messaging
-
-from neutron.common import constants
-from neutron import manager
-
-
-class MetadataRpcCallback(object):
-    """Metadata agent RPC callback in plugin implementations.
-
-    This class implements the server side of an rpc interface used by the
-    metadata service to make calls back into the Neutron plugin.  The client
-    side is defined in neutron.agent.metadata.agent.MetadataPluginAPI.  For
-    more information about changing rpc interfaces, see
-    doc/source/devref/rpc_api.rst.
-    """
-
-    # 1.0  MetadataPluginAPI BASE_RPC_API_VERSION
-    target = oslo_messaging.Target(version='1.0',
-                                   namespace=constants.RPC_NAMESPACE_METADATA)
-
-    @property
-    def plugin(self):
-        if not hasattr(self, '_plugin'):
-            self._plugin = manager.NeutronManager.get_plugin()
-        return self._plugin
-
-    def get_ports(self, context, filters):
-        return self.plugin.get_ports(context, filters=filters)
diff --git a/neutron/api/rpc/handlers/resources_rpc.py b/neutron/api/rpc/handlers/resources_rpc.py
deleted file mode 100755 (executable)
index f493511..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
-# Copyright (c) 2015 Mellanox Technologies, Ltd
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import helpers as log_helpers
-from oslo_log import log as logging
-import oslo_messaging
-
-from neutron._i18n import _
-from neutron.api.rpc.callbacks.consumer import registry as cons_registry
-from neutron.api.rpc.callbacks.producer import registry as prod_registry
-from neutron.api.rpc.callbacks import resources
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.objects import base as obj_base
-
-
-LOG = logging.getLogger(__name__)
-
-
-class ResourcesRpcError(exceptions.NeutronException):
-    pass
-
-
-class InvalidResourceTypeClass(ResourcesRpcError):
-    message = _("Invalid resource type %(resource_type)s")
-
-
-class ResourceNotFound(ResourcesRpcError):
-    message = _("Resource %(resource_id)s of type %(resource_type)s "
-                "not found")
-
-
-def _validate_resource_type(resource_type):
-    if not resources.is_valid_resource_type(resource_type):
-        raise InvalidResourceTypeClass(resource_type=resource_type)
-
-
-def resource_type_versioned_topic(resource_type):
-    _validate_resource_type(resource_type)
-    cls = resources.get_resource_cls(resource_type)
-    return topics.RESOURCE_TOPIC_PATTERN % {'resource_type': resource_type,
-                                            'version': cls.VERSION}
-
-
-class ResourcesPullRpcApi(object):
-    """Agent-side RPC (stub) for agent-to-plugin interaction.
-
-    This class implements the client side of an rpc interface.  The server side
-    can be found below: ResourcesPullRpcCallback.  For more information on
-    this RPC interface, see doc/source/devref/rpc_callbacks.rst.
-    """
-
-    def __new__(cls):
-        # make it a singleton
-        if not hasattr(cls, '_instance'):
-            cls._instance = super(ResourcesPullRpcApi, cls).__new__(cls)
-            target = oslo_messaging.Target(
-                topic=topics.PLUGIN, version='1.0',
-                namespace=constants.RPC_NAMESPACE_RESOURCES)
-            cls._instance.client = n_rpc.get_client(target)
-        return cls._instance
-
-    @log_helpers.log_method_call
-    def pull(self, context, resource_type, resource_id):
-        _validate_resource_type(resource_type)
-
-        # we've already validated the resource type, so we are pretty sure the
-        # class is there => no need to validate it specifically
-        resource_type_cls = resources.get_resource_cls(resource_type)
-
-        cctxt = self.client.prepare()
-        primitive = cctxt.call(context, 'pull',
-            resource_type=resource_type,
-            version=resource_type_cls.VERSION, resource_id=resource_id)
-
-        if primitive is None:
-            raise ResourceNotFound(resource_type=resource_type,
-                                   resource_id=resource_id)
-
-        return resource_type_cls.clean_obj_from_primitive(primitive)
-
-
-class ResourcesPullRpcCallback(object):
-    """Plugin-side RPC (implementation) for agent-to-plugin interaction.
-
-    This class implements the server side of an rpc interface.  The client side
-    can be found above: ResourcesPullRpcApi.  For more information on
-    this RPC interface, see doc/source/devref/rpc_callbacks.rst.
-    """
-
-    # History
-    #   1.0 Initial version
-
-    target = oslo_messaging.Target(
-        version='1.0', namespace=constants.RPC_NAMESPACE_RESOURCES)
-
-    def pull(self, context, resource_type, version, resource_id):
-        obj = prod_registry.pull(resource_type, resource_id, context=context)
-        if obj:
-            return obj.obj_to_primitive(target_version=version)
-
-
-class ResourcesPushRpcApi(object):
-    """Plugin-side RPC for plugin-to-agents interaction.
-
-    This interface is designed to push versioned object updates to interested
-    agents using fanout topics.
-
-    This class implements the caller side of an rpc interface.  The receiver
-    side can be found below: ResourcesPushRpcCallback.
-    """
-
-    def __init__(self):
-        target = oslo_messaging.Target(
-            version='1.0',
-            namespace=constants.RPC_NAMESPACE_RESOURCES)
-        self.client = n_rpc.get_client(target)
-
-    def _prepare_object_fanout_context(self, obj):
-        """Prepare fanout context, one topic per object type."""
-        obj_topic = resource_type_versioned_topic(obj.obj_name())
-        return self.client.prepare(fanout=True, topic=obj_topic)
-
-    @log_helpers.log_method_call
-    def push(self, context, resource, event_type):
-        resource_type = resources.get_resource_type(resource)
-        _validate_resource_type(resource_type)
-        cctxt = self._prepare_object_fanout_context(resource)
-        #TODO(QoS): Push notifications for every known version once we have
-        #           multiple of those
-        dehydrated_resource = resource.obj_to_primitive()
-        cctxt.cast(context, 'push',
-                   resource=dehydrated_resource,
-                   event_type=event_type)
-
-
-class ResourcesPushRpcCallback(object):
-    """Agent-side RPC for plugin-to-agents interaction.
-
-    This class implements the receiver for notification about versioned objects
-    resource updates used by neutron.api.rpc.callbacks. You can find the
-    caller side in ResourcesPushRpcApi.
-    """
-    # History
-    #   1.0 Initial version
-
-    target = oslo_messaging.Target(version='1.0',
-                                   namespace=constants.RPC_NAMESPACE_RESOURCES)
-
-    def push(self, context, resource, event_type):
-        resource_obj = obj_base.NeutronObject.clean_obj_from_primitive(
-            resource)
-        LOG.debug("Resources notification (%(event_type)s): %(resource)s",
-                  {'event_type': event_type, 'resource': repr(resource_obj)})
-        resource_type = resources.get_resource_type(resource_obj)
-        cons_registry.push(resource_type, resource_obj, event_type)
diff --git a/neutron/api/rpc/handlers/securitygroups_rpc.py b/neutron/api/rpc/handlers/securitygroups_rpc.py
deleted file mode 100644 (file)
index c6f28e5..0000000
+++ /dev/null
@@ -1,223 +0,0 @@
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import oslo_messaging
-
-from oslo_log import log as logging
-
-from neutron._i18n import _LW
-from neutron.common import constants
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.common import utils
-from neutron import manager
-
-LOG = logging.getLogger(__name__)
-
-
-class SecurityGroupServerRpcApi(object):
-    """RPC client for security group methods in the plugin.
-
-    This class implements the client side of an rpc interface.  This interface
-    is used by agents to call security group related methods implemented on the
-    plugin side.  The other side of this interface is defined in
-    SecurityGroupServerRpcCallback.  For more information about changing rpc
-    interfaces, see doc/source/devref/rpc_api.rst.
-    """
-    def __init__(self, topic):
-        target = oslo_messaging.Target(
-            topic=topic, version='1.0',
-            namespace=constants.RPC_NAMESPACE_SECGROUP)
-        self.client = n_rpc.get_client(target)
-
-    def security_group_rules_for_devices(self, context, devices):
-        LOG.debug("Get security group rules "
-                  "for devices via rpc %r", devices)
-        cctxt = self.client.prepare(version='1.1')
-        return cctxt.call(context, 'security_group_rules_for_devices',
-                          devices=devices)
-
-    def security_group_info_for_devices(self, context, devices):
-        LOG.debug("Get security group information for devices via rpc %r",
-                  devices)
-        cctxt = self.client.prepare(version='1.2')
-        return cctxt.call(context, 'security_group_info_for_devices',
-                          devices=devices)
-
-
-class SecurityGroupServerRpcCallback(object):
-    """Callback for SecurityGroup agent RPC in plugin implementations.
-
-    This class implements the server side of an rpc interface.  The client side
-    can be found in SecurityGroupServerRpcApi. For more information on changing
-    rpc interfaces, see doc/source/devref/rpc_api.rst.
-    """
-
-    # API version history:
-    #   1.1 - Initial version
-    #   1.2 - security_group_info_for_devices introduced as an optimization
-
-    # NOTE: target must not be overridden in subclasses
-    # to keep RPC API version consistent across plugins.
-    target = oslo_messaging.Target(version='1.2',
-                                   namespace=constants.RPC_NAMESPACE_SECGROUP)
-
-    @property
-    def plugin(self):
-        return manager.NeutronManager.get_plugin()
-
-    def _get_devices_info(self, context, devices):
-        return dict(
-            (port['id'], port)
-            for port in self.plugin.get_ports_from_devices(context, devices)
-            if port and not utils.is_port_trusted(port)
-        )
-
-    def security_group_rules_for_devices(self, context, **kwargs):
-        """Callback method to return security group rules for each port.
-
-        also convert remote_group_id rule
-        to source_ip_prefix and dest_ip_prefix rule
-
-        :params devices: list of devices
-        :returns: port correspond to the devices with security group rules
-        """
-        devices_info = kwargs.get('devices')
-        ports = self._get_devices_info(context, devices_info)
-        return self.plugin.security_group_rules_for_ports(context, ports)
-
-    def security_group_info_for_devices(self, context, **kwargs):
-        """Return security group information for requested devices.
-
-        :params devices: list of devices
-        :returns:
-        sg_info{
-          'security_groups': {sg_id: [rule1, rule2]}
-          'sg_member_ips': {sg_id: {'IPv4': set(), 'IPv6': set()}}
-          'devices': {device_id: {device_info}}
-        }
-
-        Note that sets are serialized into lists by rpc code.
-        """
-        devices_info = kwargs.get('devices')
-        ports = self._get_devices_info(context, devices_info)
-        return self.plugin.security_group_info_for_ports(context, ports)
-
-
-class SecurityGroupAgentRpcApiMixin(object):
-    """RPC client for security group methods to the agent.
-
-    This class implements the client side of an rpc interface.  This interface
-    is used by plugins to call security group methods implemented on the
-    agent side.  The other side of this interface can be found in
-    SecurityGroupAgentRpcCallbackMixin.  For more information about changing
-    rpc interfaces, see doc/source/devref/rpc_api.rst.
-    """
-
-    # history
-    #   1.1 Support Security Group RPC
-    SG_RPC_VERSION = "1.1"
-
-    def _get_security_group_topic(self):
-        return topics.get_topic_name(self.topic,
-                                     topics.SECURITY_GROUP,
-                                     topics.UPDATE)
-
-    def security_groups_rule_updated(self, context, security_groups):
-        """Notify rule updated security groups."""
-        if not security_groups:
-            return
-        cctxt = self.client.prepare(version=self.SG_RPC_VERSION,
-                                    topic=self._get_security_group_topic(),
-                                    fanout=True)
-        cctxt.cast(context, 'security_groups_rule_updated',
-                   security_groups=security_groups)
-
-    def security_groups_member_updated(self, context, security_groups):
-        """Notify member updated security groups."""
-        if not security_groups:
-            return
-        cctxt = self.client.prepare(version=self.SG_RPC_VERSION,
-                                    topic=self._get_security_group_topic(),
-                                    fanout=True)
-        cctxt.cast(context, 'security_groups_member_updated',
-                   security_groups=security_groups)
-
-    def security_groups_provider_updated(self, context,
-                                         devices_to_update=None):
-        """Notify provider updated security groups."""
-        # NOTE(ihrachys) the version here should really be 1.3, but since we
-        # don't support proper version pinning yet, we leave it intact to allow
-        # to work with older agents. The reason why we should not require the
-        # version here is that in rolling upgrade scenarios we always upgrade
-        # server first, and since the notification is directed from the newer
-        # server to older agents, and those agents don't have their RPC entry
-        # point bumped to 1.3 yet, we cannot safely enforce the minimal
-        # version. Newer payload works for older agents because agent handlers
-        # are written so that we silently ignore unknown parameters.
-        cctxt = self.client.prepare(version=self.SG_RPC_VERSION,
-                                    topic=self._get_security_group_topic(),
-                                    fanout=True)
-        cctxt.cast(context, 'security_groups_provider_updated',
-                   devices_to_update=devices_to_update)
-
-
-class SecurityGroupAgentRpcCallbackMixin(object):
-    """A mix-in that enable SecurityGroup support in agent implementations.
-
-    This class implements the server side of an rpc interface.  The client side
-    can be found in SecurityGroupServerRpcApi. For more information on changing
-    rpc interfaces, see doc/source/devref/rpc_api.rst.
-
-    The sg_agent reference implementation is available in neutron/agent
-    """
-    # mix-in object should be have sg_agent
-    sg_agent = None
-
-    def _security_groups_agent_not_set(self):
-        LOG.warning(_LW("Security group agent binding currently not set. "
-                        "This should be set by the end of the init "
-                        "process."))
-
-    def security_groups_rule_updated(self, context, **kwargs):
-        """Callback for security group rule update.
-
-        :param security_groups: list of updated security_groups
-        """
-        security_groups = kwargs.get('security_groups', [])
-        LOG.debug("Security group rule updated on remote: %s",
-                  security_groups)
-        if not self.sg_agent:
-            return self._security_groups_agent_not_set()
-        self.sg_agent.security_groups_rule_updated(security_groups)
-
-    def security_groups_member_updated(self, context, **kwargs):
-        """Callback for security group member update.
-
-        :param security_groups: list of updated security_groups
-        """
-        security_groups = kwargs.get('security_groups', [])
-        LOG.debug("Security group member updated on remote: %s",
-                  security_groups)
-        if not self.sg_agent:
-            return self._security_groups_agent_not_set()
-        self.sg_agent.security_groups_member_updated(security_groups)
-
-    def security_groups_provider_updated(self, context, **kwargs):
-        """Callback for security group provider update."""
-        LOG.debug("Provider rule updated")
-        devices_to_update = kwargs.get('devices_to_update')
-        if not self.sg_agent:
-            return self._security_groups_agent_not_set()
-        self.sg_agent.security_groups_provider_updated(devices_to_update)
diff --git a/neutron/api/v2/__init__.py b/neutron/api/v2/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/api/v2/attributes.py b/neutron/api/v2/attributes.py
deleted file mode 100644 (file)
index 7a75797..0000000
+++ /dev/null
@@ -1,992 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-
-import functools
-import netaddr
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-import six
-import webob.exc
-
-from neutron._i18n import _
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-
-
-LOG = logging.getLogger(__name__)
-
-ATTR_NOT_SPECIFIED = object()
-# Defining a constant to avoid repeating string literal in several modules
-SHARED = 'shared'
-
-# Used by range check to indicate no limit for a bound.
-UNLIMITED = None
-
-NAME_MAX_LEN = 255
-TENANT_ID_MAX_LEN = 255
-DESCRIPTION_MAX_LEN = 255
-LONG_DESCRIPTION_MAX_LEN = 1024
-DEVICE_ID_MAX_LEN = 255
-DEVICE_OWNER_MAX_LEN = 255
-
-
-def _verify_dict_keys(expected_keys, target_dict, strict=True):
-    """Allows to verify keys in a dictionary.
-
-    :param expected_keys: A list of keys expected to be present.
-    :param target_dict: The dictionary which should be verified.
-    :param strict: Specifies whether additional keys are allowed to be present.
-    :return: True, if keys in the dictionary correspond to the specification.
-    """
-    if not isinstance(target_dict, dict):
-        msg = (_("Invalid input. '%(target_dict)s' must be a dictionary "
-                 "with keys: %(expected_keys)s") %
-               {'target_dict': target_dict, 'expected_keys': expected_keys})
-        LOG.debug(msg)
-        return msg
-
-    expected_keys = set(expected_keys)
-    provided_keys = set(target_dict.keys())
-
-    predicate = expected_keys.__eq__ if strict else expected_keys.issubset
-
-    if not predicate(provided_keys):
-        msg = (_("Validation of dictionary's keys failed. "
-                 "Expected keys: %(expected_keys)s "
-                 "Provided keys: %(provided_keys)s") %
-               {'expected_keys': expected_keys,
-                'provided_keys': provided_keys})
-        LOG.debug(msg)
-        return msg
-
-
-def is_attr_set(attribute):
-    return not (attribute is None or attribute is ATTR_NOT_SPECIFIED)
-
-
-def _validate_list_of_items(item_validator, data, *args, **kwargs):
-    if not isinstance(data, list):
-        msg = _("'%s' is not a list") % data
-        return msg
-
-    if len(set(data)) != len(data):
-        msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
-        return msg
-
-    for item in data:
-        msg = item_validator(item, *args, **kwargs)
-        if msg:
-            return msg
-
-
-def _validate_values(data, valid_values=None):
-    if data not in valid_values:
-        msg = (_("'%(data)s' is not in %(valid_values)s") %
-               {'data': data, 'valid_values': valid_values})
-        LOG.debug(msg)
-        return msg
-
-
-def _validate_not_empty_string_or_none(data, max_len=None):
-    if data is not None:
-        return _validate_not_empty_string(data, max_len=max_len)
-
-
-def _validate_not_empty_string(data, max_len=None):
-    msg = _validate_string(data, max_len=max_len)
-    if msg:
-        return msg
-    if not data.strip():
-        msg = _("'%s' Blank strings are not permitted") % data
-        LOG.debug(msg)
-        return msg
-
-
-def _validate_string_or_none(data, max_len=None):
-    if data is not None:
-        return _validate_string(data, max_len=max_len)
-
-
-def _validate_string(data, max_len=None):
-    if not isinstance(data, six.string_types):
-        msg = _("'%s' is not a valid string") % data
-        LOG.debug(msg)
-        return msg
-
-    if max_len is not None and len(data) > max_len:
-        msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") %
-               {'data': data, 'max_len': max_len})
-        LOG.debug(msg)
-        return msg
-
-
-validate_list_of_unique_strings = functools.partial(_validate_list_of_items,
-                                                    _validate_string)
-
-
-def _validate_boolean(data, valid_values=None):
-    try:
-        convert_to_boolean(data)
-    except n_exc.InvalidInput:
-        msg = _("'%s' is not a valid boolean value") % data
-        LOG.debug(msg)
-        return msg
-
-
-def _validate_range(data, valid_values=None):
-    """Check that integer value is within a range provided.
-
-    Test is inclusive. Allows either limit to be ignored, to allow
-    checking ranges where only the lower or upper limit matter.
-    It is expected that the limits provided are valid integers or
-    the value None.
-    """
-
-    min_value = valid_values[0]
-    max_value = valid_values[1]
-    try:
-        data = int(data)
-    except (ValueError, TypeError):
-        msg = _("'%s' is not an integer") % data
-        LOG.debug(msg)
-        return msg
-    if min_value is not UNLIMITED and data < min_value:
-        msg = _("'%(data)s' is too small - must be at least "
-                "'%(limit)d'") % {'data': data, 'limit': min_value}
-        LOG.debug(msg)
-        return msg
-    if max_value is not UNLIMITED and data > max_value:
-        msg = _("'%(data)s' is too large - must be no larger than "
-                "'%(limit)d'") % {'data': data, 'limit': max_value}
-        LOG.debug(msg)
-        return msg
-
-
-def _validate_no_whitespace(data):
-    """Validates that input has no whitespace."""
-    if re.search(r'\s', data):
-        msg = _("'%s' contains whitespace") % data
-        LOG.debug(msg)
-        raise n_exc.InvalidInput(error_message=msg)
-    return data
-
-
-def _validate_mac_address(data, valid_values=None):
-    try:
-        valid_mac = netaddr.valid_mac(_validate_no_whitespace(data))
-    except Exception:
-        valid_mac = False
-
-    if valid_mac:
-        valid_mac = not netaddr.EUI(data) in map(netaddr.EUI,
-                    constants.INVALID_MAC_ADDRESSES)
-    # TODO(arosen): The code in this file should be refactored
-    # so it catches the correct exceptions. _validate_no_whitespace
-    # raises AttributeError if data is None.
-    if not valid_mac:
-        msg = _("'%s' is not a valid MAC address") % data
-        LOG.debug(msg)
-        return msg
-
-
-def _validate_mac_address_or_none(data, valid_values=None):
-    if data is not None:
-        return _validate_mac_address(data, valid_values)
-
-
-def _validate_ip_address(data, valid_values=None):
-    msg = None
-    try:
-        # netaddr.core.ZEROFILL is only applicable to IPv4.
-        # it will remove leading zeros from IPv4 address octets.
-        ip = netaddr.IPAddress(_validate_no_whitespace(data),
-                               flags=netaddr.core.ZEROFILL)
-        # The followings are quick checks for IPv6 (has ':') and
-        # IPv4.  (has 3 periods like 'xx.xx.xx.xx')
-        # NOTE(yamamoto): netaddr uses libraries provided by the underlying
-        # platform to convert addresses.  For example, inet_aton(3).
-        # Some platforms, including NetBSD and OS X, have inet_aton
-        # implementation which accepts more varying forms of addresses than
-        # we want to accept here.  The following check is to reject such
-        # addresses.  For Example:
-        #   >>> netaddr.IPAddress('1' * 59)
-        #   IPAddress('199.28.113.199')
-        #   >>> netaddr.IPAddress(str(int('1' * 59) & 0xffffffff))
-        #   IPAddress('199.28.113.199')
-        #   >>>
-        if ':' not in data and data.count('.') != 3:
-            msg = _("'%s' is not a valid IP address") % data
-        # A leading '0' in IPv4 address may be interpreted as an octal number,
-        # e.g. 011 octal is 9 decimal. Since there is no standard saying
-        # whether IP address with leading '0's should be interpreted as octal
-        # or decimal, hence we reject leading '0's to avoid ambiguity.
-        if ip.version == 4 and str(ip) != data:
-            msg = _("'%(data)s' is not an accepted IP address, "
-                    "'%(ip)s' is recommended") % {"data": data, "ip": ip}
-    except Exception:
-        msg = _("'%s' is not a valid IP address") % data
-    if msg:
-        LOG.debug(msg)
-    return msg
-
-
-def _validate_ip_pools(data, valid_values=None):
-    """Validate that start and end IP addresses are present.
-
-    In addition to this the IP addresses will also be validated
-    """
-    if not isinstance(data, list):
-        msg = _("Invalid data format for IP pool: '%s'") % data
-        LOG.debug(msg)
-        return msg
-
-    expected_keys = ['start', 'end']
-    for ip_pool in data:
-        msg = _verify_dict_keys(expected_keys, ip_pool)
-        if msg:
-            return msg
-        for k in expected_keys:
-            msg = _validate_ip_address(ip_pool[k])
-            if msg:
-                return msg
-
-
-def _validate_fixed_ips(data, valid_values=None):
-    if not isinstance(data, list):
-        msg = _("Invalid data format for fixed IP: '%s'") % data
-        LOG.debug(msg)
-        return msg
-
-    ips = []
-    for fixed_ip in data:
-        if not isinstance(fixed_ip, dict):
-            msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip
-            LOG.debug(msg)
-            return msg
-        if 'ip_address' in fixed_ip:
-            # Ensure that duplicate entries are not set - just checking IP
-            # suffices. Duplicate subnet_id's are legitimate.
-            fixed_ip_address = fixed_ip['ip_address']
-            if fixed_ip_address in ips:
-                msg = _("Duplicate IP address '%s'") % fixed_ip_address
-                LOG.debug(msg)
-            else:
-                msg = _validate_ip_address(fixed_ip_address)
-            if msg:
-                return msg
-            ips.append(fixed_ip_address)
-        if 'subnet_id' in fixed_ip:
-            msg = _validate_uuid(fixed_ip['subnet_id'])
-            if msg:
-                return msg
-
-
-def _validate_nameservers(data, valid_values=None):
-    if not hasattr(data, '__iter__'):
-        msg = _("Invalid data format for nameserver: '%s'") % data
-        LOG.debug(msg)
-        return msg
-
-    hosts = []
-    for host in data:
-        # This must be an IP address only
-        msg = _validate_ip_address(host)
-        if msg:
-            msg = _("'%(host)s' is not a valid nameserver. %(msg)s") % {
-                'host': host, 'msg': msg}
-            LOG.debug(msg)
-            return msg
-        if host in hosts:
-            msg = _("Duplicate nameserver '%s'") % host
-            LOG.debug(msg)
-            return msg
-        hosts.append(host)
-
-
-def _validate_hostroutes(data, valid_values=None):
-    if not isinstance(data, list):
-        msg = _("Invalid data format for hostroute: '%s'") % data
-        LOG.debug(msg)
-        return msg
-
-    expected_keys = ['destination', 'nexthop']
-    hostroutes = []
-    for hostroute in data:
-        msg = _verify_dict_keys(expected_keys, hostroute)
-        if msg:
-            return msg
-        msg = _validate_subnet(hostroute['destination'])
-        if msg:
-            return msg
-        msg = _validate_ip_address(hostroute['nexthop'])
-        if msg:
-            return msg
-        if hostroute in hostroutes:
-            msg = _("Duplicate hostroute '%s'") % hostroute
-            LOG.debug(msg)
-            return msg
-        hostroutes.append(hostroute)
-
-
-def _validate_ip_address_or_none(data, valid_values=None):
-    if data is not None:
-        return _validate_ip_address(data, valid_values)
-
-
-def _validate_subnet(data, valid_values=None):
-    msg = None
-    try:
-        net = netaddr.IPNetwork(_validate_no_whitespace(data))
-        if '/' not in data or (net.version == 4 and str(net) != data):
-            msg = _("'%(data)s' isn't a recognized IP subnet cidr,"
-                    " '%(cidr)s' is recommended") % {"data": data,
-                                                     "cidr": net.cidr}
-        else:
-            return
-    except Exception:
-        msg = _("'%s' is not a valid IP subnet") % data
-    if msg:
-        LOG.debug(msg)
-    return msg
-
-
-def _validate_subnet_or_none(data, valid_values=None):
-    if data is not None:
-        return _validate_subnet(data, valid_values)
-
-
-_validate_subnet_list = functools.partial(_validate_list_of_items,
-                                          _validate_subnet)
-
-
-def _validate_regex(data, valid_values=None):
-    try:
-        if re.match(valid_values, data):
-            return
-    except TypeError:
-        pass
-
-    msg = _("'%s' is not a valid input") % data
-    LOG.debug(msg)
-    return msg
-
-
-def _validate_regex_or_none(data, valid_values=None):
-    if data is not None:
-        return _validate_regex(data, valid_values)
-
-
-def _validate_subnetpool_id(data, valid_values=None):
-    if data != constants.IPV6_PD_POOL_ID:
-        return _validate_uuid_or_none(data, valid_values)
-
-
-def _validate_subnetpool_id_or_none(data, valid_values=None):
-    if data is not None:
-        return _validate_subnetpool_id(data, valid_values)
-
-
-def _validate_uuid(data, valid_values=None):
-    if not uuidutils.is_uuid_like(data):
-        msg = _("'%s' is not a valid UUID") % data
-        LOG.debug(msg)
-        return msg
-
-
-def _validate_uuid_or_none(data, valid_values=None):
-    if data is not None:
-        return _validate_uuid(data)
-
-
-_validate_uuid_list = functools.partial(_validate_list_of_items,
-                                        _validate_uuid)
-
-
-def _validate_dict_item(key, key_validator, data):
-    # Find conversion function, if any, and apply it
-    conv_func = key_validator.get('convert_to')
-    if conv_func:
-        data[key] = conv_func(data.get(key))
-    # Find validator function
-    # TODO(salv-orlando): Structure of dict attributes should be improved
-    # to avoid iterating over items
-    val_func = val_params = None
-    for (k, v) in six.iteritems(key_validator):
-        if k.startswith('type:'):
-            # ask forgiveness, not permission
-            try:
-                val_func = validators[k]
-            except KeyError:
-                msg = _("Validator '%s' does not exist.") % k
-                LOG.debug(msg)
-                return msg
-            val_params = v
-            break
-    # Process validation
-    if val_func:
-        return val_func(data.get(key), val_params)
-
-
-def _validate_dict(data, key_specs=None):
-    if not isinstance(data, dict):
-        msg = _("'%s' is not a dictionary") % data
-        LOG.debug(msg)
-        return msg
-    # Do not perform any further validation, if no constraints are supplied
-    if not key_specs:
-        return
-
-    # Check whether all required keys are present
-    required_keys = [key for key, spec in six.iteritems(key_specs)
-                     if spec.get('required')]
-
-    if required_keys:
-        msg = _verify_dict_keys(required_keys, data, False)
-        if msg:
-            return msg
-
-    # Perform validation and conversion of all values
-    # according to the specifications.
-    for key, key_validator in [(k, v) for k, v in six.iteritems(key_specs)
-                               if k in data]:
-        msg = _validate_dict_item(key, key_validator, data)
-        if msg:
-            return msg
-
-
-def _validate_dict_or_none(data, key_specs=None):
-    if data is not None:
-        return _validate_dict(data, key_specs)
-
-
-def _validate_dict_or_empty(data, key_specs=None):
-    if data != {}:
-        return _validate_dict(data, key_specs)
-
-
-def _validate_dict_or_nodata(data, key_specs=None):
-    if data:
-        return _validate_dict(data, key_specs)
-
-
-def _validate_non_negative(data, valid_values=None):
-    try:
-        data = int(data)
-    except (ValueError, TypeError):
-        msg = _("'%s' is not an integer") % data
-        LOG.debug(msg)
-        return msg
-
-    if data < 0:
-        msg = _("'%s' should be non-negative") % data
-        LOG.debug(msg)
-        return msg
-
-
-def convert_to_boolean(data):
-    if isinstance(data, six.string_types):
-        val = data.lower()
-        if val == "true" or val == "1":
-            return True
-        if val == "false" or val == "0":
-            return False
-    elif isinstance(data, bool):
-        return data
-    elif isinstance(data, int):
-        if data == 0:
-            return False
-        elif data == 1:
-            return True
-    msg = _("'%s' cannot be converted to boolean") % data
-    raise n_exc.InvalidInput(error_message=msg)
-
-
-def convert_to_boolean_if_not_none(data):
-    if data is not None:
-        return convert_to_boolean(data)
-
-
-def convert_to_int(data):
-    try:
-        return int(data)
-    except (ValueError, TypeError):
-        msg = _("'%s' is not an integer") % data
-        raise n_exc.InvalidInput(error_message=msg)
-
-
-def convert_to_int_if_not_none(data):
-    if data is not None:
-        return convert_to_int(data)
-    return data
-
-
-def convert_to_positive_float_or_none(val):
-    # NOTE(salv-orlando): This conversion function is currently used by
-    # a vendor specific extension only at the moment  It is used for
-    # port's RXTX factor in neutron.plugins.vmware.extensions.qos.
-    # It is deemed however generic enough to be in this module as it
-    # might be used in future for other API attributes.
-    if val is None:
-        return
-    try:
-        val = float(val)
-        if val < 0:
-            raise ValueError()
-    except (ValueError, TypeError):
-        msg = _("'%s' must be a non negative decimal.") % val
-        raise n_exc.InvalidInput(error_message=msg)
-    return val
-
-
-def convert_kvp_str_to_list(data):
-    """Convert a value of the form 'key=value' to ['key', 'value'].
-
-    :raises: n_exc.InvalidInput if any of the strings are malformed
-                                (e.g. do not contain a key).
-    """
-    kvp = [x.strip() for x in data.split('=', 1)]
-    if len(kvp) == 2 and kvp[0]:
-        return kvp
-    msg = _("'%s' is not of the form <key>=[value]") % data
-    raise n_exc.InvalidInput(error_message=msg)
-
-
-def convert_kvp_list_to_dict(kvp_list):
-    """Convert a list of 'key=value' strings to a dict.
-
-    :raises: n_exc.InvalidInput if any of the strings are malformed
-                                (e.g. do not contain a key) or if any
-                                of the keys appear more than once.
-    """
-    if kvp_list == ['True']:
-        # No values were provided (i.e. '--flag-name')
-        return {}
-    kvp_map = {}
-    for kvp_str in kvp_list:
-        key, value = convert_kvp_str_to_list(kvp_str)
-        kvp_map.setdefault(key, set())
-        kvp_map[key].add(value)
-    return dict((x, list(y)) for x, y in six.iteritems(kvp_map))
-
-
-def convert_none_to_empty_list(value):
-    return [] if value is None else value
-
-
-def convert_none_to_empty_dict(value):
-    return {} if value is None else value
-
-
-def convert_to_list(data):
-    if data is None:
-        return []
-    elif hasattr(data, '__iter__') and not isinstance(data, six.string_types):
-        return list(data)
-    else:
-        return [data]
-
-
-HEX_ELEM = '[0-9A-Fa-f]'
-UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
-                         HEX_ELEM + '{4}', HEX_ELEM + '{4}',
-                         HEX_ELEM + '{12}'])
-# Note: In order to ensure that the MAC address is unicast the first byte
-# must be even.
-MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM)
-
-# Dictionary that maintains a list of validation functions
-validators = {'type:dict': _validate_dict,
-              'type:dict_or_none': _validate_dict_or_none,
-              'type:dict_or_empty': _validate_dict_or_empty,
-              'type:dict_or_nodata': _validate_dict_or_nodata,
-              'type:fixed_ips': _validate_fixed_ips,
-              'type:hostroutes': _validate_hostroutes,
-              'type:ip_address': _validate_ip_address,
-              'type:ip_address_or_none': _validate_ip_address_or_none,
-              'type:ip_pools': _validate_ip_pools,
-              'type:mac_address': _validate_mac_address,
-              'type:mac_address_or_none': _validate_mac_address_or_none,
-              'type:nameservers': _validate_nameservers,
-              'type:non_negative': _validate_non_negative,
-              'type:range': _validate_range,
-              'type:regex': _validate_regex,
-              'type:regex_or_none': _validate_regex_or_none,
-              'type:string': _validate_string,
-              'type:string_or_none': _validate_string_or_none,
-              'type:not_empty_string': _validate_not_empty_string,
-              'type:not_empty_string_or_none':
-              _validate_not_empty_string_or_none,
-              'type:subnet': _validate_subnet,
-              'type:subnet_list': _validate_subnet_list,
-              'type:subnet_or_none': _validate_subnet_or_none,
-              'type:subnetpool_id': _validate_subnetpool_id,
-              'type:subnetpool_id_or_none': _validate_subnetpool_id_or_none,
-              'type:uuid': _validate_uuid,
-              'type:uuid_or_none': _validate_uuid_or_none,
-              'type:uuid_list': _validate_uuid_list,
-              'type:values': _validate_values,
-              'type:boolean': _validate_boolean,
-              'type:list_of_unique_strings': validate_list_of_unique_strings}
-
-# Define constants for base resource name
-NETWORK = 'network'
-NETWORKS = '%ss' % NETWORK
-PORT = 'port'
-PORTS = '%ss' % PORT
-SUBNET = 'subnet'
-SUBNETS = '%ss' % SUBNET
-SUBNETPOOL = 'subnetpool'
-SUBNETPOOLS = '%ss' % SUBNETPOOL
-# Note: a default of ATTR_NOT_SPECIFIED indicates that an
-# attribute is not required, but will be generated by the plugin
-# if it is not specified.  Particularly, a value of ATTR_NOT_SPECIFIED
-# is different from an attribute that has been specified with a value of
-# None.  For example, if 'gateway_ip' is omitted in a request to
-# create a subnet, the plugin will receive ATTR_NOT_SPECIFIED
-# and the default gateway_ip will be generated.
-# However, if gateway_ip is specified as None, this means that
-# the subnet does not have a gateway IP.
-# The following is a short reference for understanding attribute info:
-# default: default value of the attribute (if missing, the attribute
-# becomes mandatory.
-# allow_post: the attribute can be used on POST requests.
-# allow_put: the attribute can be used on PUT requests.
-# validate: specifies rules for validating data in the attribute.
-# convert_to: transformation to apply to the value before it is returned
-# is_visible: the attribute is returned in GET responses.
-# required_by_policy: the attribute is required by the policy engine and
-# should therefore be filled by the API layer even if not present in
-# request body.
-# enforce_policy: the attribute is actively part of the policy enforcing
-# mechanism, ie: there might be rules which refer to this attribute.
-
-RESOURCE_ATTRIBUTE_MAP = {
-    NETWORKS: {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True,
-               'primary_key': True},
-        'name': {'allow_post': True, 'allow_put': True,
-                 'validate': {'type:string': NAME_MAX_LEN},
-                 'default': '', 'is_visible': True},
-        'subnets': {'allow_post': False, 'allow_put': False,
-                    'default': [],
-                    'is_visible': True},
-        'admin_state_up': {'allow_post': True, 'allow_put': True,
-                           'default': True,
-                           'convert_to': convert_to_boolean,
-                           'is_visible': True},
-        'status': {'allow_post': False, 'allow_put': False,
-                   'is_visible': True},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'validate': {'type:string': TENANT_ID_MAX_LEN},
-                      'required_by_policy': True,
-                      'is_visible': True},
-        SHARED: {'allow_post': True,
-                 'allow_put': True,
-                 'default': False,
-                 'convert_to': convert_to_boolean,
-                 'is_visible': True,
-                 'required_by_policy': True,
-                 'enforce_policy': True},
-    },
-    PORTS: {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True,
-               'primary_key': True},
-        'name': {'allow_post': True, 'allow_put': True, 'default': '',
-                 'validate': {'type:string': NAME_MAX_LEN},
-                 'is_visible': True},
-        'network_id': {'allow_post': True, 'allow_put': False,
-                       'required_by_policy': True,
-                       'validate': {'type:uuid': None},
-                       'is_visible': True},
-        'admin_state_up': {'allow_post': True, 'allow_put': True,
-                           'default': True,
-                           'convert_to': convert_to_boolean,
-                           'is_visible': True},
-        'mac_address': {'allow_post': True, 'allow_put': True,
-                        'default': ATTR_NOT_SPECIFIED,
-                        'validate': {'type:mac_address': None},
-                        'enforce_policy': True,
-                        'is_visible': True},
-        'fixed_ips': {'allow_post': True, 'allow_put': True,
-                      'default': ATTR_NOT_SPECIFIED,
-                      'convert_list_to': convert_kvp_list_to_dict,
-                      'validate': {'type:fixed_ips': None},
-                      'enforce_policy': True,
-                      'is_visible': True},
-        'device_id': {'allow_post': True, 'allow_put': True,
-                      'validate': {'type:string': DEVICE_ID_MAX_LEN},
-                      'default': '',
-                      'is_visible': True},
-        'device_owner': {'allow_post': True, 'allow_put': True,
-                         'validate': {'type:string': DEVICE_OWNER_MAX_LEN},
-                         'default': '', 'enforce_policy': True,
-                         'is_visible': True},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'validate': {'type:string': TENANT_ID_MAX_LEN},
-                      'required_by_policy': True,
-                      'is_visible': True},
-        'status': {'allow_post': False, 'allow_put': False,
-                   'is_visible': True},
-    },
-    SUBNETS: {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True,
-               'primary_key': True},
-        'name': {'allow_post': True, 'allow_put': True, 'default': '',
-                 'validate': {'type:string': NAME_MAX_LEN},
-                 'is_visible': True},
-        'ip_version': {'allow_post': True, 'allow_put': False,
-                       'convert_to': convert_to_int,
-                       'validate': {'type:values': [4, 6]},
-                       'is_visible': True},
-        'network_id': {'allow_post': True, 'allow_put': False,
-                       'required_by_policy': True,
-                       'validate': {'type:uuid': None},
-                       'is_visible': True},
-        'subnetpool_id': {'allow_post': True,
-                          'allow_put': False,
-                          'default': ATTR_NOT_SPECIFIED,
-                          'required_by_policy': False,
-                          'validate': {'type:subnetpool_id_or_none': None},
-                          'is_visible': True},
-        'prefixlen': {'allow_post': True,
-                      'allow_put': False,
-                      'validate': {'type:non_negative': None},
-                      'convert_to': convert_to_int,
-                      'default': ATTR_NOT_SPECIFIED,
-                      'required_by_policy': False,
-                      'is_visible': False},
-        'cidr': {'allow_post': True,
-                 'allow_put': False,
-                 'default': ATTR_NOT_SPECIFIED,
-                 'validate': {'type:subnet_or_none': None},
-                 'required_by_policy': False,
-                 'is_visible': True},
-        'gateway_ip': {'allow_post': True, 'allow_put': True,
-                       'default': ATTR_NOT_SPECIFIED,
-                       'validate': {'type:ip_address_or_none': None},
-                       'is_visible': True},
-        'allocation_pools': {'allow_post': True, 'allow_put': True,
-                             'default': ATTR_NOT_SPECIFIED,
-                             'validate': {'type:ip_pools': None},
-                             'is_visible': True},
-        'dns_nameservers': {'allow_post': True, 'allow_put': True,
-                            'convert_to': convert_none_to_empty_list,
-                            'default': ATTR_NOT_SPECIFIED,
-                            'validate': {'type:nameservers': None},
-                            'is_visible': True},
-        'host_routes': {'allow_post': True, 'allow_put': True,
-                        'convert_to': convert_none_to_empty_list,
-                        'default': ATTR_NOT_SPECIFIED,
-                        'validate': {'type:hostroutes': None},
-                        'is_visible': True},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'validate': {'type:string': TENANT_ID_MAX_LEN},
-                      'required_by_policy': True,
-                      'is_visible': True},
-        'enable_dhcp': {'allow_post': True, 'allow_put': True,
-                        'default': True,
-                        'convert_to': convert_to_boolean,
-                        'is_visible': True},
-        'ipv6_ra_mode': {'allow_post': True, 'allow_put': False,
-                         'default': ATTR_NOT_SPECIFIED,
-                         'validate': {'type:values': constants.IPV6_MODES},
-                         'is_visible': True},
-        'ipv6_address_mode': {'allow_post': True, 'allow_put': False,
-                              'default': ATTR_NOT_SPECIFIED,
-                              'validate': {'type:values':
-                                           constants.IPV6_MODES},
-                              'is_visible': True},
-        SHARED: {'allow_post': False,
-                 'allow_put': False,
-                 'default': False,
-                 'convert_to': convert_to_boolean,
-                 'is_visible': False,
-                 'required_by_policy': True,
-                 'enforce_policy': True},
-    },
-    SUBNETPOOLS: {
-        'id': {'allow_post': False,
-               'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True,
-               'primary_key': True},
-        'name': {'allow_post': True,
-                 'allow_put': True,
-                 'validate': {'type:not_empty_string': None},
-                 'is_visible': True},
-        'tenant_id': {'allow_post': True,
-                      'allow_put': False,
-                      'validate': {'type:string': TENANT_ID_MAX_LEN},
-                      'required_by_policy': True,
-                      'is_visible': True},
-        'prefixes': {'allow_post': True,
-                     'allow_put': True,
-                     'validate': {'type:subnet_list': None},
-                     'is_visible': True},
-        'default_quota': {'allow_post': True,
-                          'allow_put': True,
-                          'validate': {'type:non_negative': None},
-                          'convert_to': convert_to_int,
-                          'default': ATTR_NOT_SPECIFIED,
-                          'is_visible': True},
-        'ip_version': {'allow_post': False,
-                       'allow_put': False,
-                       'is_visible': True},
-        'default_prefixlen': {'allow_post': True,
-                              'allow_put': True,
-                              'validate': {'type:non_negative': None},
-                              'convert_to': convert_to_int,
-                              'default': ATTR_NOT_SPECIFIED,
-                              'is_visible': True},
-        'min_prefixlen': {'allow_post': True,
-                          'allow_put': True,
-                          'default': ATTR_NOT_SPECIFIED,
-                          'validate': {'type:non_negative': None},
-                          'convert_to': convert_to_int,
-                          'is_visible': True},
-        'max_prefixlen': {'allow_post': True,
-                          'allow_put': True,
-                          'default': ATTR_NOT_SPECIFIED,
-                          'validate': {'type:non_negative': None},
-                          'convert_to': convert_to_int,
-                          'is_visible': True},
-        'is_default': {'allow_post': True,
-                       'allow_put': True,
-                       'default': False,
-                       'convert_to': convert_to_boolean,
-                       'is_visible': True,
-                       'required_by_policy': True,
-                       'enforce_policy': True},
-        SHARED: {'allow_post': True,
-                 'allow_put': False,
-                 'default': False,
-                 'convert_to': convert_to_boolean,
-                 'is_visible': True,
-                 'required_by_policy': True,
-                 'enforce_policy': True},
-    }
-}
-
-# Identify the attribute used by a resource to reference another resource
-
-RESOURCE_FOREIGN_KEYS = {
-    NETWORKS: 'network_id'
-}
-
-# Store plural/singular mappings
-PLURALS = {NETWORKS: NETWORK,
-           PORTS: PORT,
-           SUBNETS: SUBNET,
-           SUBNETPOOLS: SUBNETPOOL,
-           'dns_nameservers': 'dns_nameserver',
-           'host_routes': 'host_route',
-           'allocation_pools': 'allocation_pool',
-           'fixed_ips': 'fixed_ip',
-           'extensions': 'extension'}
-# Store singular/plural mappings. This dictionary is populated by
-# get_resource_info
-REVERSED_PLURALS = {}
-
-
-def get_collection_info(collection):
-    """Helper function to retrieve attribute info.
-
-    :param collection: Collection or plural name of the resource
-    """
-    return RESOURCE_ATTRIBUTE_MAP.get(collection)
-
-
-def get_resource_info(resource):
-    """Helper function to retrive attribute info
-
-    :param resource: resource name
-    """
-    plural_name = REVERSED_PLURALS.get(resource)
-    if not plural_name:
-        for (plural, singular) in PLURALS.items():
-            if singular == resource:
-                plural_name = plural
-                REVERSED_PLURALS[resource] = plural_name
-    return RESOURCE_ATTRIBUTE_MAP.get(plural_name)
-
-
-def fill_default_value(attr_info, res_dict,
-                       exc_cls=ValueError,
-                       check_allow_post=True):
-    for attr, attr_vals in six.iteritems(attr_info):
-        if attr_vals['allow_post']:
-            if ('default' not in attr_vals and
-                attr not in res_dict):
-                msg = _("Failed to parse request. Required "
-                        "attribute '%s' not specified") % attr
-                raise exc_cls(msg)
-            res_dict[attr] = res_dict.get(attr,
-                                          attr_vals.get('default'))
-        elif check_allow_post:
-            if attr in res_dict:
-                msg = _("Attribute '%s' not allowed in POST") % attr
-                raise exc_cls(msg)
-
-
-def convert_value(attr_info, res_dict, exc_cls=ValueError):
-    for attr, attr_vals in six.iteritems(attr_info):
-        if (attr not in res_dict or
-            res_dict[attr] is ATTR_NOT_SPECIFIED):
-            continue
-        # Convert values if necessary
-        if 'convert_to' in attr_vals:
-            res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
-        # Check that configured values are correct
-        if 'validate' not in attr_vals:
-            continue
-        for rule in attr_vals['validate']:
-            res = validators[rule](res_dict[attr], attr_vals['validate'][rule])
-            if res:
-                msg_dict = dict(attr=attr, reason=res)
-                msg = _("Invalid input for %(attr)s. "
-                        "Reason: %(reason)s.") % msg_dict
-                raise exc_cls(msg)
-
-
-def populate_tenant_id(context, res_dict, attr_info, is_create):
-    if (('tenant_id' in res_dict and
-         res_dict['tenant_id'] != context.tenant_id and
-         not context.is_admin)):
-        msg = _("Specifying 'tenant_id' other than authenticated "
-                "tenant in request requires admin privileges")
-        raise webob.exc.HTTPBadRequest(msg)
-
-    if is_create and 'tenant_id' not in res_dict:
-        if context.tenant_id:
-            res_dict['tenant_id'] = context.tenant_id
-        elif 'tenant_id' in attr_info:
-            msg = _("Running without keystone AuthN requires "
-                    "that tenant_id is specified")
-            raise webob.exc.HTTPBadRequest(msg)
-
-
-def verify_attributes(res_dict, attr_info):
-    extra_keys = set(res_dict.keys()) - set(attr_info.keys())
-    if extra_keys:
-        msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys)
-        raise webob.exc.HTTPBadRequest(msg)
diff --git a/neutron/api/v2/base.py b/neutron/api/v2/base.py
deleted file mode 100644 (file)
index 80a51db..0000000
+++ /dev/null
@@ -1,724 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import copy
-
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_policy import policy as oslo_policy
-from oslo_utils import excutils
-import six
-import webob.exc
-
-from neutron._i18n import _, _LE, _LI
-from neutron.api import api_common
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.api.v2 import attributes
-from neutron.api.v2 import resource as wsgi_resource
-from neutron.common import constants as const
-from neutron.common import exceptions
-from neutron.common import rpc as n_rpc
-from neutron.db import api as db_api
-from neutron import policy
-from neutron import quota
-from neutron.quota import resource_registry
-
-
-LOG = logging.getLogger(__name__)
-
-FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
-             exceptions.Conflict: webob.exc.HTTPConflict,
-             exceptions.InUse: webob.exc.HTTPConflict,
-             exceptions.BadRequest: webob.exc.HTTPBadRequest,
-             exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
-             exceptions.NotAuthorized: webob.exc.HTTPForbidden,
-             netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
-             oslo_policy.PolicyNotAuthorized: webob.exc.HTTPForbidden
-             }
-
-
-class Controller(object):
-    LIST = 'list'
-    SHOW = 'show'
-    CREATE = 'create'
-    UPDATE = 'update'
-    DELETE = 'delete'
-
-    def __init__(self, plugin, collection, resource, attr_info,
-                 allow_bulk=False, member_actions=None, parent=None,
-                 allow_pagination=False, allow_sorting=False):
-        if member_actions is None:
-            member_actions = []
-        self._plugin = plugin
-        self._collection = collection.replace('-', '_')
-        self._resource = resource.replace('-', '_')
-        self._attr_info = attr_info
-        self._allow_bulk = allow_bulk
-        self._allow_pagination = allow_pagination
-        self._allow_sorting = allow_sorting
-        self._native_bulk = self._is_native_bulk_supported()
-        self._native_pagination = self._is_native_pagination_supported()
-        self._native_sorting = self._is_native_sorting_supported()
-        self._policy_attrs = [name for (name, info) in self._attr_info.items()
-                              if info.get('required_by_policy')]
-        self._notifier = n_rpc.get_notifier('network')
-        # use plugin's dhcp notifier, if this is already instantiated
-        agent_notifiers = getattr(plugin, 'agent_notifiers', {})
-        self._dhcp_agent_notifier = (
-            agent_notifiers.get(const.AGENT_TYPE_DHCP) or
-            dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
-        )
-        if cfg.CONF.notify_nova_on_port_data_changes:
-            from neutron.notifiers import nova
-            self._nova_notifier = nova.Notifier()
-        self._member_actions = member_actions
-        self._primary_key = self._get_primary_key()
-        if self._allow_pagination and self._native_pagination:
-            # Native pagination need native sorting support
-            if not self._native_sorting:
-                raise exceptions.Invalid(
-                    _("Native pagination depend on native sorting")
-                )
-            if not self._allow_sorting:
-                LOG.info(_LI("Allow sorting is enabled because native "
-                             "pagination requires native sorting"))
-                self._allow_sorting = True
-
-        if parent:
-            self._parent_id_name = '%s_id' % parent['member_name']
-            parent_part = '_%s' % parent['member_name']
-        else:
-            self._parent_id_name = None
-            parent_part = ''
-        self._plugin_handlers = {
-            self.LIST: 'get%s_%s' % (parent_part, self._collection),
-            self.SHOW: 'get%s_%s' % (parent_part, self._resource)
-        }
-        for action in [self.CREATE, self.UPDATE, self.DELETE]:
-            self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
-                                                         self._resource)
-
-    def _get_primary_key(self, default_primary_key='id'):
-        for key, value in six.iteritems(self._attr_info):
-            if value.get('primary_key', False):
-                return key
-        return default_primary_key
-
-    def _is_native_bulk_supported(self):
-        native_bulk_attr_name = ("_%s__native_bulk_support"
-                                 % self._plugin.__class__.__name__)
-        return getattr(self._plugin, native_bulk_attr_name, False)
-
-    def _is_native_pagination_supported(self):
-        native_pagination_attr_name = ("_%s__native_pagination_support"
-                                       % self._plugin.__class__.__name__)
-        return getattr(self._plugin, native_pagination_attr_name, False)
-
-    def _is_native_sorting_supported(self):
-        native_sorting_attr_name = ("_%s__native_sorting_support"
-                                    % self._plugin.__class__.__name__)
-        return getattr(self._plugin, native_sorting_attr_name, False)
-
-    def _exclude_attributes_by_policy(self, context, data):
-        """Identifies attributes to exclude according to authZ policies.
-
-        Return a list of attribute names which should be stripped from the
-        response returned to the user because the user is not authorized
-        to see them.
-        """
-        attributes_to_exclude = []
-        for attr_name in data.keys():
-            attr_data = self._attr_info.get(attr_name)
-            if attr_data and attr_data['is_visible']:
-                if policy.check(
-                    context,
-                    '%s:%s' % (self._plugin_handlers[self.SHOW], attr_name),
-                    data,
-                    might_not_exist=True,
-                    pluralized=self._collection):
-                    # this attribute is visible, check next one
-                    continue
-            # if the code reaches this point then either the policy check
-            # failed or the attribute was not visible in the first place
-            attributes_to_exclude.append(attr_name)
-        return attributes_to_exclude
-
-    def _view(self, context, data, fields_to_strip=None):
-        """Build a view of an API resource.
-
-        :param context: the neutron context
-        :param data: the object for which a view is being created
-        :param fields_to_strip: attributes to remove from the view
-
-        :returns: a view of the object which includes only attributes
-        visible according to API resource declaration and authZ policies.
-        """
-        fields_to_strip = ((fields_to_strip or []) +
-                           self._exclude_attributes_by_policy(context, data))
-        return self._filter_attributes(context, data, fields_to_strip)
-
-    def _filter_attributes(self, context, data, fields_to_strip=None):
-        if not fields_to_strip:
-            return data
-        return dict(item for item in six.iteritems(data)
-                    if (item[0] not in fields_to_strip))
-
-    def _do_field_list(self, original_fields):
-        fields_to_add = None
-        # don't do anything if fields were not specified in the request
-        if original_fields:
-            fields_to_add = [attr for attr in self._policy_attrs
-                             if attr not in original_fields]
-            original_fields.extend(self._policy_attrs)
-        return original_fields, fields_to_add
-
-    def __getattr__(self, name):
-        if name in self._member_actions:
-            @db_api.retry_db_errors
-            def _handle_action(request, id, **kwargs):
-                arg_list = [request.context, id]
-                # Ensure policy engine is initialized
-                policy.init()
-                # Fetch the resource and verify if the user can access it
-                try:
-                    parent_id = kwargs.get(self._parent_id_name)
-                    resource = self._item(request,
-                                          id,
-                                          do_authz=True,
-                                          field_list=None,
-                                          parent_id=parent_id)
-                except oslo_policy.PolicyNotAuthorized:
-                    msg = _('The resource could not be found.')
-                    raise webob.exc.HTTPNotFound(msg)
-                body = copy.deepcopy(kwargs.pop('body', None))
-                # Explicit comparison with None to distinguish from {}
-                if body is not None:
-                    arg_list.append(body)
-                # It is ok to raise a 403 because accessibility to the
-                # object was checked earlier in this method
-                policy.enforce(request.context,
-                               name,
-                               resource,
-                               pluralized=self._collection)
-                ret_value = getattr(self._plugin, name)(*arg_list, **kwargs)
-                # It is simply impossible to predict whether one of this
-                # actions alters resource usage. For instance a tenant port
-                # is created when a router interface is added. Therefore it is
-                # important to mark as dirty resources whose counters have
-                # been altered by this operation
-                resource_registry.set_resources_dirty(request.context)
-                return ret_value
-
-            return _handle_action
-        else:
-            raise AttributeError()
-
-    def _get_pagination_helper(self, request):
-        if self._allow_pagination and self._native_pagination:
-            return api_common.PaginationNativeHelper(request,
-                                                     self._primary_key)
-        elif self._allow_pagination:
-            return api_common.PaginationEmulatedHelper(request,
-                                                       self._primary_key)
-        return api_common.NoPaginationHelper(request, self._primary_key)
-
-    def _get_sorting_helper(self, request):
-        if self._allow_sorting and self._native_sorting:
-            return api_common.SortingNativeHelper(request, self._attr_info)
-        elif self._allow_sorting:
-            return api_common.SortingEmulatedHelper(request, self._attr_info)
-        return api_common.NoSortingHelper(request, self._attr_info)
-
-    def _items(self, request, do_authz=False, parent_id=None):
-        """Retrieves and formats a list of elements of the requested entity."""
-        # NOTE(salvatore-orlando): The following ensures that fields which
-        # are needed for authZ policy validation are not stripped away by the
-        # plugin before returning.
-        original_fields, fields_to_add = self._do_field_list(
-            api_common.list_args(request, 'fields'))
-        filters = api_common.get_filters(request, self._attr_info,
-                                         ['fields', 'sort_key', 'sort_dir',
-                                          'limit', 'marker', 'page_reverse'])
-        kwargs = {'filters': filters,
-                  'fields': original_fields}
-        sorting_helper = self._get_sorting_helper(request)
-        pagination_helper = self._get_pagination_helper(request)
-        sorting_helper.update_args(kwargs)
-        sorting_helper.update_fields(original_fields, fields_to_add)
-        pagination_helper.update_args(kwargs)
-        pagination_helper.update_fields(original_fields, fields_to_add)
-        if parent_id:
-            kwargs[self._parent_id_name] = parent_id
-        obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST])
-        obj_list = obj_getter(request.context, **kwargs)
-        obj_list = sorting_helper.sort(obj_list)
-        obj_list = pagination_helper.paginate(obj_list)
-        # Check authz
-        if do_authz:
-            # FIXME(salvatore-orlando): obj_getter might return references to
-            # other resources. Must check authZ on them too.
-            # Omit items from list that should not be visible
-            obj_list = [obj for obj in obj_list
-                        if policy.check(request.context,
-                                        self._plugin_handlers[self.SHOW],
-                                        obj,
-                                        plugin=self._plugin,
-                                        pluralized=self._collection)]
-        # Use the first element in the list for discriminating which attributes
-        # should be filtered out because of authZ policies
-        # fields_to_add contains a list of attributes added for request policy
-        # checks but that were not required by the user. They should be
-        # therefore stripped
-        fields_to_strip = fields_to_add or []
-        if obj_list:
-            fields_to_strip += self._exclude_attributes_by_policy(
-                request.context, obj_list[0])
-        collection = {self._collection:
-                      [self._filter_attributes(
-                          request.context, obj,
-                          fields_to_strip=fields_to_strip)
-                       for obj in obj_list]}
-        pagination_links = pagination_helper.get_links(obj_list)
-        if pagination_links:
-            collection[self._collection + "_links"] = pagination_links
-        # Synchronize usage trackers, if needed
-        resource_registry.resync_resource(
-            request.context, self._resource, request.context.tenant_id)
-        return collection
-
-    def _item(self, request, id, do_authz=False, field_list=None,
-              parent_id=None):
-        """Retrieves and formats a single element of the requested entity."""
-        kwargs = {'fields': field_list}
-        action = self._plugin_handlers[self.SHOW]
-        if parent_id:
-            kwargs[self._parent_id_name] = parent_id
-        obj_getter = getattr(self._plugin, action)
-        obj = obj_getter(request.context, id, **kwargs)
-        # Check authz
-        # FIXME(salvatore-orlando): obj_getter might return references to
-        # other resources. Must check authZ on them too.
-        if do_authz:
-            policy.enforce(request.context,
-                           action,
-                           obj,
-                           pluralized=self._collection)
-        return obj
-
-    def _send_dhcp_notification(self, context, data, methodname):
-        if cfg.CONF.dhcp_agent_notification:
-            if self._collection in data:
-                for body in data[self._collection]:
-                    item = {self._resource: body}
-                    self._dhcp_agent_notifier.notify(context, item, methodname)
-            else:
-                self._dhcp_agent_notifier.notify(context, data, methodname)
-
-    def _send_nova_notification(self, action, orig, returned):
-        if hasattr(self, '_nova_notifier'):
-            self._nova_notifier.send_network_change(action, orig, returned)
-
-    def index(self, request, **kwargs):
-        """Returns a list of the requested entity."""
-        parent_id = kwargs.get(self._parent_id_name)
-        # Ensure policy engine is initialized
-        policy.init()
-        return self._items(request, True, parent_id)
-
-    def show(self, request, id, **kwargs):
-        """Returns detailed information about the requested entity."""
-        try:
-            # NOTE(salvatore-orlando): The following ensures that fields
-            # which are needed for authZ policy validation are not stripped
-            # away by the plugin before returning.
-            field_list, added_fields = self._do_field_list(
-                api_common.list_args(request, "fields"))
-            parent_id = kwargs.get(self._parent_id_name)
-            # Ensure policy engine is initialized
-            policy.init()
-            return {self._resource:
-                    self._view(request.context,
-                               self._item(request,
-                                          id,
-                                          do_authz=True,
-                                          field_list=field_list,
-                                          parent_id=parent_id),
-                               fields_to_strip=added_fields)}
-        except oslo_policy.PolicyNotAuthorized:
-            # To avoid giving away information, pretend that it
-            # doesn't exist
-            msg = _('The resource could not be found.')
-            raise webob.exc.HTTPNotFound(msg)
-
-    def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
-        objs = []
-        try:
-            for item in body[self._collection]:
-                kwargs = {self._resource: item}
-                if parent_id:
-                    kwargs[self._parent_id_name] = parent_id
-                fields_to_strip = self._exclude_attributes_by_policy(
-                    request.context, item)
-                objs.append(self._filter_attributes(
-                    request.context,
-                    obj_creator(request.context, **kwargs),
-                    fields_to_strip=fields_to_strip))
-            return objs
-        # Note(salvatore-orlando): broad catch as in theory a plugin
-        # could raise any kind of exception
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                for obj in objs:
-                    obj_deleter = getattr(self._plugin,
-                                          self._plugin_handlers[self.DELETE])
-                    try:
-                        kwargs = ({self._parent_id_name: parent_id}
-                                  if parent_id else {})
-                        obj_deleter(request.context, obj['id'], **kwargs)
-                    except Exception:
-                        # broad catch as our only purpose is to log the
-                        # exception
-                        LOG.exception(_LE("Unable to undo add for "
-                                          "%(resource)s %(id)s"),
-                                      {'resource': self._resource,
-                                       'id': obj['id']})
-                # TODO(salvatore-orlando): The object being processed when the
-                # plugin raised might have been created or not in the db.
-                # We need a way for ensuring that if it has been created,
-                # it is then deleted
-
-    def create(self, request, body=None, **kwargs):
-        self._notifier.info(request.context,
-                            self._resource + '.create.start',
-                            body)
-        return self._create(request, body, **kwargs)
-
-    @db_api.retry_db_errors
-    def _create(self, request, body, **kwargs):
-        """Creates a new instance of the requested entity."""
-        parent_id = kwargs.get(self._parent_id_name)
-        body = Controller.prepare_request_body(request.context,
-                                               copy.deepcopy(body), True,
-                                               self._resource, self._attr_info,
-                                               allow_bulk=self._allow_bulk)
-        action = self._plugin_handlers[self.CREATE]
-        # Check authz
-        if self._collection in body:
-            # Have to account for bulk create
-            items = body[self._collection]
-        else:
-            items = [body]
-        # Ensure policy engine is initialized
-        policy.init()
-        # Store requested resource amounts grouping them by tenant
-        # This won't work with multiple resources. However because of the
-        # current structure of this controller there will hardly be more than
-        # one resource for which reservations are being made
-        request_deltas = collections.defaultdict(int)
-        for item in items:
-            self._validate_network_tenant_ownership(request,
-                                                    item[self._resource])
-            policy.enforce(request.context,
-                           action,
-                           item[self._resource],
-                           pluralized=self._collection)
-            if 'tenant_id' not in item[self._resource]:
-                # no tenant_id - no quota check
-                continue
-            tenant_id = item[self._resource]['tenant_id']
-            request_deltas[tenant_id] += 1
-        # Quota enforcement
-        reservations = []
-        try:
-            for (tenant, delta) in request_deltas.items():
-                reservation = quota.QUOTAS.make_reservation(
-                    request.context,
-                    tenant,
-                    {self._resource: delta},
-                    self._plugin)
-                reservations.append(reservation)
-        except exceptions.QuotaResourceUnknown as e:
-                # We don't want to quota this resource
-                LOG.debug(e)
-
-        def notify(create_result):
-            # Ensure usage trackers for all resources affected by this API
-            # operation are marked as dirty
-            with request.context.session.begin():
-                # Commit the reservation(s)
-                for reservation in reservations:
-                    quota.QUOTAS.commit_reservation(
-                        request.context, reservation.reservation_id)
-                resource_registry.set_resources_dirty(request.context)
-
-            notifier_method = self._resource + '.create.end'
-            self._notifier.info(request.context,
-                                notifier_method,
-                                create_result)
-            self._send_dhcp_notification(request.context,
-                                         create_result,
-                                         notifier_method)
-            return create_result
-
-        def do_create(body, bulk=False, emulated=False):
-            kwargs = {self._parent_id_name: parent_id} if parent_id else {}
-            if bulk and not emulated:
-                obj_creator = getattr(self._plugin, "%s_bulk" % action)
-            else:
-                obj_creator = getattr(self._plugin, action)
-            try:
-                if emulated:
-                    return self._emulate_bulk_create(obj_creator, request,
-                                                     body, parent_id)
-                else:
-                    if self._collection in body:
-                        # This is weird but fixing it requires changes to the
-                        # plugin interface
-                        kwargs.update({self._collection: body})
-                    else:
-                        kwargs.update({self._resource: body})
-                    return obj_creator(request.context, **kwargs)
-            except Exception:
-                # In case of failure the plugin will always raise an
-                # exception. Cancel the reservation
-                with excutils.save_and_reraise_exception():
-                    for reservation in reservations:
-                        quota.QUOTAS.cancel_reservation(
-                            request.context, reservation.reservation_id)
-
-        if self._collection in body and self._native_bulk:
-            # plugin does atomic bulk create operations
-            objs = do_create(body, bulk=True)
-            # Use first element of list to discriminate attributes which
-            # should be removed because of authZ policies
-            fields_to_strip = self._exclude_attributes_by_policy(
-                request.context, objs[0])
-            return notify({self._collection: [self._filter_attributes(
-                request.context, obj, fields_to_strip=fields_to_strip)
-                for obj in objs]})
-        else:
-            if self._collection in body:
-                # Emulate atomic bulk behavior
-                objs = do_create(body, bulk=True, emulated=True)
-                return notify({self._collection: objs})
-            else:
-                obj = do_create(body)
-                self._send_nova_notification(action, {},
-                                             {self._resource: obj})
-                return notify({self._resource: self._view(request.context,
-                                                          obj)})
-
-    def delete(self, request, id, **kwargs):
-        """Deletes the specified entity."""
-        self._notifier.info(request.context,
-                            self._resource + '.delete.start',
-                            {self._resource + '_id': id})
-        return self._delete(request, id, **kwargs)
-
-    @db_api.retry_db_errors
-    def _delete(self, request, id, **kwargs):
-        action = self._plugin_handlers[self.DELETE]
-
-        # Check authz
-        policy.init()
-        parent_id = kwargs.get(self._parent_id_name)
-        obj = self._item(request, id, parent_id=parent_id)
-        try:
-            policy.enforce(request.context,
-                           action,
-                           obj,
-                           pluralized=self._collection)
-        except oslo_policy.PolicyNotAuthorized:
-            # To avoid giving away information, pretend that it
-            # doesn't exist
-            msg = _('The resource could not be found.')
-            raise webob.exc.HTTPNotFound(msg)
-
-        obj_deleter = getattr(self._plugin, action)
-        obj_deleter(request.context, id, **kwargs)
-        # A delete operation usually alters resource usage, so mark affected
-        # usage trackers as dirty
-        resource_registry.set_resources_dirty(request.context)
-        notifier_method = self._resource + '.delete.end'
-        self._notifier.info(request.context,
-                            notifier_method,
-                            {self._resource + '_id': id})
-        result = {self._resource: self._view(request.context, obj)}
-        self._send_nova_notification(action, {}, result)
-        self._send_dhcp_notification(request.context,
-                                     result,
-                                     notifier_method)
-
-    def update(self, request, id, body=None, **kwargs):
-        """Updates the specified entity's attributes."""
-        try:
-            payload = body.copy()
-        except AttributeError:
-            msg = _("Invalid format: %s") % request.body
-            raise exceptions.BadRequest(resource='body', msg=msg)
-        payload['id'] = id
-        self._notifier.info(request.context,
-                            self._resource + '.update.start',
-                            payload)
-        return self._update(request, id, body, **kwargs)
-
-    @db_api.retry_db_errors
-    def _update(self, request, id, body, **kwargs):
-        body = Controller.prepare_request_body(request.context, body, False,
-                                               self._resource, self._attr_info,
-                                               allow_bulk=self._allow_bulk)
-        action = self._plugin_handlers[self.UPDATE]
-        # Load object to check authz
-        # but pass only attributes in the original body and required
-        # by the policy engine to the policy 'brain'
-        field_list = [name for (name, value) in six.iteritems(self._attr_info)
-                      if (value.get('required_by_policy') or
-                          value.get('primary_key') or
-                          'default' not in value)]
-        # Ensure policy engine is initialized
-        policy.init()
-        parent_id = kwargs.get(self._parent_id_name)
-        orig_obj = self._item(request, id, field_list=field_list,
-                              parent_id=parent_id)
-        orig_object_copy = copy.copy(orig_obj)
-        orig_obj.update(body[self._resource])
-        # Make a list of attributes to be updated to inform the policy engine
-        # which attributes are set explicitly so that it can distinguish them
-        # from the ones that are set to their default values.
-        orig_obj[const.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys()
-        try:
-            policy.enforce(request.context,
-                           action,
-                           orig_obj,
-                           pluralized=self._collection)
-        except oslo_policy.PolicyNotAuthorized:
-            with excutils.save_and_reraise_exception() as ctxt:
-                # If a tenant is modifying it's own object, it's safe to return
-                # a 403. Otherwise, pretend that it doesn't exist to avoid
-                # giving away information.
-                if request.context.tenant_id != orig_obj['tenant_id']:
-                    ctxt.reraise = False
-            msg = _('The resource could not be found.')
-            raise webob.exc.HTTPNotFound(msg)
-
-        obj_updater = getattr(self._plugin, action)
-        kwargs = {self._resource: body}
-        if parent_id:
-            kwargs[self._parent_id_name] = parent_id
-        obj = obj_updater(request.context, id, **kwargs)
-        # Usually an update operation does not alter resource usage, but as
-        # there might be side effects it might be worth checking for changes
-        # in resource usage here as well (e.g: a tenant port is created when a
-        # router interface is added)
-        resource_registry.set_resources_dirty(request.context)
-
-        result = {self._resource: self._view(request.context, obj)}
-        notifier_method = self._resource + '.update.end'
-        self._notifier.info(request.context, notifier_method, result)
-        self._send_dhcp_notification(request.context,
-                                     result,
-                                     notifier_method)
-        self._send_nova_notification(action, orig_object_copy, result)
-        return result
-
-    @staticmethod
-    def prepare_request_body(context, body, is_create, resource, attr_info,
-                             allow_bulk=False):
-        """Verifies required attributes are in request body.
-
-        Also checking that an attribute is only specified if it is allowed
-        for the given operation (create/update).
-
-        Attribute with default values are considered to be optional.
-
-        body argument must be the deserialized body.
-        """
-        collection = resource + "s"
-        if not body:
-            raise webob.exc.HTTPBadRequest(_("Resource body required"))
-
-        LOG.debug("Request body: %(body)s", {'body': body})
-        try:
-            if collection in body:
-                if not allow_bulk:
-                    raise webob.exc.HTTPBadRequest(_("Bulk operation "
-                                                     "not supported"))
-                if not body[collection]:
-                    raise webob.exc.HTTPBadRequest(_("Resources required"))
-                bulk_body = [
-                    Controller.prepare_request_body(
-                        context, item if resource in item
-                        else {resource: item}, is_create, resource, attr_info,
-                        allow_bulk) for item in body[collection]
-                ]
-                return {collection: bulk_body}
-            res_dict = body.get(resource)
-        except (AttributeError, TypeError):
-            msg = _("Body contains invalid data")
-            raise webob.exc.HTTPBadRequest(msg)
-        if res_dict is None:
-            msg = _("Unable to find '%s' in request body") % resource
-            raise webob.exc.HTTPBadRequest(msg)
-
-        attributes.populate_tenant_id(context, res_dict, attr_info, is_create)
-        attributes.verify_attributes(res_dict, attr_info)
-
-        if is_create:  # POST
-            attributes.fill_default_value(attr_info, res_dict,
-                                          webob.exc.HTTPBadRequest)
-        else:  # PUT
-            for attr, attr_vals in six.iteritems(attr_info):
-                if attr in res_dict and not attr_vals['allow_put']:
-                    msg = _("Cannot update read-only attribute %s") % attr
-                    raise webob.exc.HTTPBadRequest(msg)
-
-        attributes.convert_value(attr_info, res_dict, webob.exc.HTTPBadRequest)
-        return body
-
-    def _validate_network_tenant_ownership(self, request, resource_item):
-        # TODO(salvatore-orlando): consider whether this check can be folded
-        # in the policy engine
-        if (request.context.is_admin or request.context.is_advsvc or
-                self._resource not in ('port', 'subnet')):
-            return
-        network = self._plugin.get_network(
-            request.context,
-            resource_item['network_id'])
-        # do not perform the check on shared networks
-        if network.get('shared'):
-            return
-
-        network_owner = network['tenant_id']
-
-        if network_owner != resource_item['tenant_id']:
-            msg = _("Tenant %(tenant_id)s not allowed to "
-                    "create %(resource)s on this network")
-            raise webob.exc.HTTPForbidden(msg % {
-                "tenant_id": resource_item['tenant_id'],
-                "resource": self._resource,
-            })
-
-
-def create_resource(collection, resource, plugin, params, allow_bulk=False,
-                    member_actions=None, parent=None, allow_pagination=False,
-                    allow_sorting=False):
-    controller = Controller(plugin, collection, resource, params, allow_bulk,
-                            member_actions=member_actions, parent=parent,
-                            allow_pagination=allow_pagination,
-                            allow_sorting=allow_sorting)
-
-    return wsgi_resource.Resource(controller, FAULT_MAP)
diff --git a/neutron/api/v2/resource.py b/neutron/api/v2/resource.py
deleted file mode 100644 (file)
index 0687977..0000000
+++ /dev/null
@@ -1,189 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Utility methods for working with WSGI servers redux
-"""
-
-import sys
-
-import netaddr
-import oslo_i18n
-from oslo_log import log as logging
-from oslo_policy import policy as oslo_policy
-import six
-import webob.dec
-import webob.exc
-
-from neutron._i18n import _, _LE, _LI
-from neutron.common import exceptions
-from neutron import wsgi
-
-
-LOG = logging.getLogger(__name__)
-
-
-class Request(wsgi.Request):
-    pass
-
-
-def Resource(controller, faults=None, deserializers=None, serializers=None):
-    """Represents an API entity resource and the associated serialization and
-    deserialization logic
-    """
-    default_deserializers = {'application/json': wsgi.JSONDeserializer()}
-    default_serializers = {'application/json': wsgi.JSONDictSerializer()}
-    format_types = {'json': 'application/json'}
-    action_status = dict(create=201, delete=204)
-
-    default_deserializers.update(deserializers or {})
-    default_serializers.update(serializers or {})
-
-    deserializers = default_deserializers
-    serializers = default_serializers
-    faults = faults or {}
-
-    @webob.dec.wsgify(RequestClass=Request)
-    def resource(request):
-        route_args = request.environ.get('wsgiorg.routing_args')
-        if route_args:
-            args = route_args[1].copy()
-        else:
-            args = {}
-
-        # NOTE(jkoelker) by now the controller is already found, remove
-        #                it from the args if it is in the matchdict
-        args.pop('controller', None)
-        fmt = args.pop('format', None)
-        action = args.pop('action', None)
-        content_type = format_types.get(fmt,
-                                        request.best_match_content_type())
-        language = request.best_match_language()
-        deserializer = deserializers.get(content_type)
-        serializer = serializers.get(content_type)
-
-        try:
-            if request.body:
-                args['body'] = deserializer.deserialize(request.body)['body']
-
-            method = getattr(controller, action)
-
-            result = method(request=request, **args)
-        except (exceptions.NeutronException,
-                netaddr.AddrFormatError,
-                oslo_policy.PolicyNotAuthorized) as e:
-            for fault in faults:
-                if isinstance(e, fault):
-                    mapped_exc = faults[fault]
-                    break
-            else:
-                mapped_exc = webob.exc.HTTPInternalServerError
-            if 400 <= mapped_exc.code < 500:
-                LOG.info(_LI('%(action)s failed (client error): %(exc)s'),
-                         {'action': action, 'exc': e})
-            else:
-                LOG.exception(_LE('%s failed'), action)
-            e = translate(e, language)
-            body = serializer.serialize(
-                {'NeutronError': get_exception_data(e)})
-            kwargs = {'body': body, 'content_type': content_type}
-            raise mapped_exc(**kwargs)
-        except webob.exc.HTTPException as e:
-            type_, value, tb = sys.exc_info()
-            if hasattr(e, 'code') and 400 <= e.code < 500:
-                LOG.info(_LI('%(action)s failed (client error): %(exc)s'),
-                         {'action': action, 'exc': e})
-            else:
-                LOG.exception(_LE('%s failed'), action)
-            translate(e, language)
-            value.body = serializer.serialize(
-                {'NeutronError': get_exception_data(e)})
-            value.content_type = content_type
-            six.reraise(type_, value, tb)
-        except NotImplementedError as e:
-            e = translate(e, language)
-            # NOTE(armando-migliaccio): from a client standpoint
-            # it makes sense to receive these errors, because
-            # extensions may or may not be implemented by
-            # the underlying plugin. So if something goes south,
-            # because a plugin does not implement a feature,
-            # returning 500 is definitely confusing.
-            body = serializer.serialize(
-                {'NotImplementedError': get_exception_data(e)})
-            kwargs = {'body': body, 'content_type': content_type}
-            raise webob.exc.HTTPNotImplemented(**kwargs)
-        except Exception:
-            # NOTE(jkoelker) Everything else is 500
-            LOG.exception(_LE('%s failed'), action)
-            # Do not expose details of 500 error to clients.
-            msg = _('Request Failed: internal server error while '
-                    'processing your request.')
-            msg = translate(msg, language)
-            body = serializer.serialize(
-                {'NeutronError': get_exception_data(
-                    webob.exc.HTTPInternalServerError(msg))})
-            kwargs = {'body': body, 'content_type': content_type}
-            raise webob.exc.HTTPInternalServerError(**kwargs)
-
-        status = action_status.get(action, 200)
-        body = serializer.serialize(result)
-        # NOTE(jkoelker) Comply with RFC2616 section 9.7
-        if status == 204:
-            content_type = ''
-            body = None
-
-        return webob.Response(request=request, status=status,
-                              content_type=content_type,
-                              body=body)
-    return resource
-
-
-def get_exception_data(e):
-    """Extract the information about an exception.
-
-    Neutron client for the v2 API expects exceptions to have 'type', 'message'
-    and 'detail' attributes.This information is extracted and converted into a
-    dictionary.
-
-    :param e: the exception to be reraised
-    :returns: a structured dict with the exception data
-    """
-    err_data = {'type': e.__class__.__name__,
-                'message': e, 'detail': ''}
-    return err_data
-
-
-def translate(translatable, locale):
-    """Translates the object to the given locale.
-
-    If the object is an exception its translatable elements are translated
-    in place, if the object is a translatable string it is translated and
-    returned. Otherwise, the object is returned as-is.
-
-    :param translatable: the object to be translated
-    :param locale: the locale to translate to
-    :returns: the translated object, or the object as-is if it
-              was not translated
-    """
-    localize = oslo_i18n.translate
-    if isinstance(translatable, exceptions.NeutronException):
-        translatable.msg = localize(translatable.msg, locale)
-    elif isinstance(translatable, webob.exc.HTTPError):
-        translatable.detail = localize(translatable.detail, locale)
-    elif isinstance(translatable, Exception):
-        translatable.message = localize(translatable, locale)
-    else:
-        return localize(translatable, locale)
-    return translatable
diff --git a/neutron/api/v2/resource_helper.py b/neutron/api/v2/resource_helper.py
deleted file mode 100644 (file)
index bbdc2a1..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-# (c) Copyright 2014 Cisco Systems Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from neutron.api import extensions
-from neutron.api.v2 import base
-from neutron import manager
-from neutron.plugins.common import constants
-from neutron.quota import resource_registry
-
-LOG = logging.getLogger(__name__)
-
-
-def build_plural_mappings(special_mappings, resource_map):
-    """Create plural to singular mapping for all resources.
-
-    Allows for special mappings to be provided, for particular cases..
-    Otherwise, will strip off the last character for normal mappings, like
-    routers -> router, unless the plural name ends with 'ies', in which
-    case the singular form will end with a 'y' (e.g.: policy/policies)
-    """
-    plural_mappings = {}
-    for plural in resource_map:
-        singular = special_mappings.get(plural)
-        if not singular:
-            if plural.endswith('ies'):
-                singular = "%sy" % plural[:-3]
-            else:
-                singular = plural[:-1]
-        plural_mappings[plural] = singular
-    return plural_mappings
-
-
-def build_resource_info(plural_mappings, resource_map, which_service,
-                        action_map=None, register_quota=False,
-                        translate_name=False, allow_bulk=False):
-    """Build resources for advanced services.
-
-    Takes the resource information, and singular/plural mappings, and creates
-    API resource objects for advanced services extensions. Will optionally
-    translate underscores to dashes in resource names, register the resource,
-    and accept action information for resources.
-
-    :param plural_mappings: mappings between singular and plural forms
-    :param resource_map: attribute map for the WSGI resources to create
-    :param which_service: The name of the service for which the WSGI resources
-                          are being created. This name will be used to pass
-                          the appropriate plugin to the WSGI resource.
-                          It can be set to None or "CORE" to create WSGI
-                          resources for the core plugin
-    :param action_map: custom resource actions
-    :param register_quota: it can be set to True to register quotas for the
-                           resource(s) being created
-    :param translate_name: replaces underscores with dashes
-    :param allow_bulk: True if bulk create are allowed
-    """
-    resources = []
-    if not which_service:
-        which_service = constants.CORE
-    if action_map is None:
-        action_map = {}
-    if which_service != constants.CORE:
-        plugin = manager.NeutronManager.get_service_plugins()[which_service]
-    else:
-        plugin = manager.NeutronManager.get_plugin()
-    path_prefix = getattr(plugin, "path_prefix", "")
-    LOG.debug('Service %(service)s assigned prefix: %(prefix)s'
-              % {'service': which_service, 'prefix': path_prefix})
-    for collection_name in resource_map:
-        resource_name = plural_mappings[collection_name]
-        params = resource_map.get(collection_name, {})
-        if translate_name:
-            collection_name = collection_name.replace('_', '-')
-        if register_quota:
-            resource_registry.register_resource_by_name(resource_name)
-        member_actions = action_map.get(resource_name, {})
-        controller = base.create_resource(
-            collection_name, resource_name, plugin, params,
-            member_actions=member_actions,
-            allow_bulk=allow_bulk,
-            allow_pagination=cfg.CONF.allow_pagination,
-            allow_sorting=cfg.CONF.allow_sorting)
-        resource = extensions.ResourceExtension(
-            collection_name,
-            controller,
-            path_prefix=path_prefix,
-            member_actions=member_actions,
-            attr_map=params)
-        resources.append(resource)
-    return resources
diff --git a/neutron/api/v2/router.py b/neutron/api/v2/router.py
deleted file mode 100644 (file)
index 6389c83..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from oslo_config import cfg
-from oslo_service import wsgi as base_wsgi
-import routes as routes_mapper
-import six
-import six.moves.urllib.parse as urlparse
-import webob
-import webob.dec
-import webob.exc
-
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.api.v2 import base
-from neutron import manager
-from neutron import policy
-from neutron.quota import resource_registry
-from neutron import wsgi
-
-
-RESOURCES = {'network': 'networks',
-             'subnet': 'subnets',
-             'subnetpool': 'subnetpools',
-             'port': 'ports'}
-SUB_RESOURCES = {}
-COLLECTION_ACTIONS = ['index', 'create']
-MEMBER_ACTIONS = ['show', 'update', 'delete']
-REQUIREMENTS = {'id': attributes.UUID_PATTERN, 'format': 'json'}
-
-
-class Index(wsgi.Application):
-    def __init__(self, resources):
-        self.resources = resources
-
-    @webob.dec.wsgify(RequestClass=wsgi.Request)
-    def __call__(self, req):
-        metadata = {}
-
-        layout = []
-        for name, collection in six.iteritems(self.resources):
-            href = urlparse.urljoin(req.path_url, collection)
-            resource = {'name': name,
-                        'collection': collection,
-                        'links': [{'rel': 'self',
-                                   'href': href}]}
-            layout.append(resource)
-        response = dict(resources=layout)
-        content_type = req.best_match_content_type()
-        body = wsgi.Serializer(metadata=metadata).serialize(response,
-                                                            content_type)
-        return webob.Response(body=body, content_type=content_type)
-
-
-class APIRouter(base_wsgi.Router):
-
-    @classmethod
-    def factory(cls, global_config, **local_config):
-        return cls(**local_config)
-
-    def __init__(self, **local_config):
-        mapper = routes_mapper.Mapper()
-        plugin = manager.NeutronManager.get_plugin()
-        ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
-        ext_mgr.extend_resources("2.0", attributes.RESOURCE_ATTRIBUTE_MAP)
-
-        col_kwargs = dict(collection_actions=COLLECTION_ACTIONS,
-                          member_actions=MEMBER_ACTIONS)
-
-        def _map_resource(collection, resource, params, parent=None):
-            allow_bulk = cfg.CONF.allow_bulk
-            allow_pagination = cfg.CONF.allow_pagination
-            allow_sorting = cfg.CONF.allow_sorting
-            controller = base.create_resource(
-                collection, resource, plugin, params, allow_bulk=allow_bulk,
-                parent=parent, allow_pagination=allow_pagination,
-                allow_sorting=allow_sorting)
-            path_prefix = None
-            if parent:
-                path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'],
-                                                  parent['member_name'],
-                                                  collection)
-            mapper_kwargs = dict(controller=controller,
-                                 requirements=REQUIREMENTS,
-                                 path_prefix=path_prefix,
-                                 **col_kwargs)
-            return mapper.collection(collection, resource,
-                                     **mapper_kwargs)
-
-        mapper.connect('index', '/', controller=Index(RESOURCES))
-        for resource in RESOURCES:
-            _map_resource(RESOURCES[resource], resource,
-                          attributes.RESOURCE_ATTRIBUTE_MAP.get(
-                              RESOURCES[resource], dict()))
-            resource_registry.register_resource_by_name(resource)
-
-        for resource in SUB_RESOURCES:
-            _map_resource(SUB_RESOURCES[resource]['collection_name'], resource,
-                          attributes.RESOURCE_ATTRIBUTE_MAP.get(
-                              SUB_RESOURCES[resource]['collection_name'],
-                              dict()),
-                          SUB_RESOURCES[resource]['parent'])
-
-        # Certain policy checks require that the extensions are loaded
-        # and the RESOURCE_ATTRIBUTE_MAP populated before they can be
-        # properly initialized. This can only be claimed with certainty
-        # once this point in the code has been reached. In the event
-        # that the policies have been initialized before this point,
-        # calling reset will cause the next policy check to
-        # re-initialize with all of the required data in place.
-        policy.reset()
-        super(APIRouter, self).__init__(mapper)
diff --git a/neutron/api/versions.py b/neutron/api/versions.py
deleted file mode 100644 (file)
index 8a70773..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2011 Citrix Systems.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import oslo_i18n
-import webob.dec
-
-from neutron._i18n import _
-from neutron.api.views import versions as versions_view
-from neutron import wsgi
-
-
-class Versions(object):
-
-    @classmethod
-    def factory(cls, global_config, **local_config):
-        return cls()
-
-    @webob.dec.wsgify(RequestClass=wsgi.Request)
-    def __call__(self, req):
-        """Respond to a request for all Neutron API versions."""
-        version_objs = [
-            {
-                "id": "v2.0",
-                "status": "CURRENT",
-            },
-        ]
-
-        if req.path != '/':
-            language = req.best_match_language()
-            msg = _('Unknown API version specified')
-            msg = oslo_i18n.translate(msg, language)
-            return webob.exc.HTTPNotFound(explanation=msg)
-
-        builder = versions_view.get_view_builder(req)
-        versions = [builder.build(version) for version in version_objs]
-        response = dict(versions=versions)
-        metadata = {}
-
-        content_type = req.best_match_content_type()
-        body = (wsgi.Serializer(metadata=metadata).
-                serialize(response, content_type))
-
-        response = webob.Response()
-        response.content_type = content_type
-        response.body = wsgi.encode_body(body)
-
-        return response
diff --git a/neutron/api/views/__init__.py b/neutron/api/views/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/api/views/versions.py b/neutron/api/views/versions.py
deleted file mode 100644 (file)
index d097bb7..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2010-2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-
-def get_view_builder(req):
-    base_url = req.application_url
-    return ViewBuilder(base_url)
-
-
-class ViewBuilder(object):
-
-    def __init__(self, base_url):
-        """Object initialization.
-
-        :param base_url: url of the root wsgi application
-        """
-        self.base_url = base_url
-
-    def build(self, version_data):
-        """Generic method used to generate a version entity."""
-        version = {
-            "id": version_data["id"],
-            "status": version_data["status"],
-            "links": self._build_links(version_data),
-        }
-
-        return version
-
-    def _build_links(self, version_data):
-        """Generate a container of links that refer to the provided version."""
-        href = self.generate_href(version_data["id"])
-
-        links = [
-            {
-                "rel": "self",
-                "href": href,
-            },
-        ]
-
-        return links
-
-    def generate_href(self, version_number):
-        """Create an url that refers to a specific version_number."""
-        return os.path.join(self.base_url, version_number)
diff --git a/neutron/auth.py b/neutron/auth.py
deleted file mode 100644 (file)
index 5d972b8..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-#    Copyright 2012 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_middleware import base
-from oslo_middleware import request_id
-import webob.dec
-import webob.exc
-
-from neutron import context
-
-LOG = logging.getLogger(__name__)
-
-
-class NeutronKeystoneContext(base.ConfigurableMiddleware):
-    """Make a request context from keystone headers."""
-
-    @webob.dec.wsgify
-    def __call__(self, req):
-        # Determine the user ID
-        user_id = req.headers.get('X_USER_ID')
-        if not user_id:
-            LOG.debug("X_USER_ID is not found in request")
-            return webob.exc.HTTPUnauthorized()
-
-        # Determine the tenant
-        tenant_id = req.headers.get('X_PROJECT_ID')
-
-        # Suck out the roles
-        roles = [r.strip() for r in req.headers.get('X_ROLES', '').split(',')]
-
-        # Human-friendly names
-        tenant_name = req.headers.get('X_PROJECT_NAME')
-        user_name = req.headers.get('X_USER_NAME')
-
-        # Use request_id if already set
-        req_id = req.environ.get(request_id.ENV_REQUEST_ID)
-
-        # Get the auth token
-        auth_token = req.headers.get('X_AUTH_TOKEN',
-                                     req.headers.get('X_STORAGE_TOKEN'))
-
-        # Create a context with the authentication data
-        ctx = context.Context(user_id, tenant_id, roles=roles,
-                              user_name=user_name, tenant_name=tenant_name,
-                              request_id=req_id, auth_token=auth_token)
-
-        # Inject the context...
-        req.environ['neutron.context'] = ctx
-
-        return self.application
-
-
-def pipeline_factory(loader, global_conf, **local_conf):
-    """Create a paste pipeline based on the 'auth_strategy' config option."""
-    pipeline = local_conf[cfg.CONF.auth_strategy]
-    pipeline = pipeline.split()
-    filters = [loader.get_filter(n) for n in pipeline[:-1]]
-    app = loader.get_app(pipeline[-1])
-    filters.reverse()
-    for filter in filters:
-        app = filter(app)
-    return app
diff --git a/neutron/callbacks/__init__.py b/neutron/callbacks/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/callbacks/events.py b/neutron/callbacks/events.py
deleted file mode 100644 (file)
index 7dfd83d..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# String literals representing core events.
-BEFORE_CREATE = 'before_create'
-BEFORE_READ = 'before_read'
-BEFORE_UPDATE = 'before_update'
-BEFORE_DELETE = 'before_delete'
-
-AFTER_CREATE = 'after_create'
-AFTER_READ = 'after_read'
-AFTER_UPDATE = 'after_update'
-AFTER_DELETE = 'after_delete'
-
-ABORT_CREATE = 'abort_create'
-ABORT_READ = 'abort_read'
-ABORT_UPDATE = 'abort_update'
-ABORT_DELETE = 'abort_delete'
-
-ABORT = 'abort_'
-BEFORE = 'before_'
diff --git a/neutron/callbacks/exceptions.py b/neutron/callbacks/exceptions.py
deleted file mode 100644 (file)
index be5e6d5..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron._i18n import _
-from neutron.common import exceptions
-
-
-class Invalid(exceptions.NeutronException):
-    message = _("The value '%(value)s' for %(element)s is not valid.")
-
-
-class CallbackFailure(Exception):
-
-    def __init__(self, errors):
-        self.errors = errors
-
-    def __str__(self):
-        if isinstance(self.errors, list):
-            return ','.join(str(error) for error in self.errors)
-        else:
-            return str(self.errors)
-
-
-class NotificationError(object):
-
-    def __init__(self, callback_id, error):
-        self.callback_id = callback_id
-        self.error = error
-
-    def __str__(self):
-        return 'Callback %s failed with "%s"' % (self.callback_id, self.error)
diff --git a/neutron/callbacks/manager.py b/neutron/callbacks/manager.py
deleted file mode 100644 (file)
index 625f520..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-
-from oslo_log import log as logging
-from oslo_utils import reflection
-
-from neutron._i18n import _LE
-from neutron.callbacks import events
-from neutron.callbacks import exceptions
-
-LOG = logging.getLogger(__name__)
-
-
-class CallbacksManager(object):
-    """A callback system that allows objects to cooperate in a loose manner."""
-
-    def __init__(self):
-        self.clear()
-
-    def subscribe(self, callback, resource, event):
-        """Subscribe callback for a resource event.
-
-        The same callback may register for more than one event.
-
-        :param callback: the callback. It must raise or return a boolean.
-        :param resource: the resource. It must be a valid resource.
-        :param event: the event. It must be a valid event.
-        """
-        LOG.debug("Subscribe: %(callback)s %(resource)s %(event)s",
-                  {'callback': callback, 'resource': resource, 'event': event})
-
-        callback_id = _get_id(callback)
-        try:
-            self._callbacks[resource][event][callback_id] = callback
-        except KeyError:
-            # Initialize the registry for unknown resources and/or events
-            # prior to enlisting the callback.
-            self._callbacks[resource][event] = {}
-            self._callbacks[resource][event][callback_id] = callback
-        # We keep a copy of callbacks to speed the unsubscribe operation.
-        if callback_id not in self._index:
-            self._index[callback_id] = collections.defaultdict(set)
-        self._index[callback_id][resource].add(event)
-
-    def unsubscribe(self, callback, resource, event):
-        """Unsubscribe callback from the registry.
-
-        :param callback: the callback.
-        :param resource: the resource.
-        :param event: the event.
-        """
-        LOG.debug("Unsubscribe: %(callback)s %(resource)s %(event)s",
-                  {'callback': callback, 'resource': resource, 'event': event})
-
-        callback_id = self._find(callback)
-        if not callback_id:
-            LOG.debug("Callback %s not found", callback_id)
-            return
-        if resource and event:
-            del self._callbacks[resource][event][callback_id]
-            self._index[callback_id][resource].discard(event)
-            if not self._index[callback_id][resource]:
-                del self._index[callback_id][resource]
-                if not self._index[callback_id]:
-                    del self._index[callback_id]
-        else:
-            value = '%s,%s' % (resource, event)
-            raise exceptions.Invalid(element='resource,event', value=value)
-
-    def unsubscribe_by_resource(self, callback, resource):
-        """Unsubscribe callback for any event associated to the resource.
-
-        :param callback: the callback.
-        :param resource: the resource.
-        """
-        callback_id = self._find(callback)
-        if callback_id:
-            if resource in self._index[callback_id]:
-                for event in self._index[callback_id][resource]:
-                    del self._callbacks[resource][event][callback_id]
-                del self._index[callback_id][resource]
-                if not self._index[callback_id]:
-                    del self._index[callback_id]
-
-    def unsubscribe_all(self, callback):
-        """Unsubscribe callback for all events and all resources.
-
-
-        :param callback: the callback.
-        """
-        callback_id = self._find(callback)
-        if callback_id:
-            for resource, resource_events in self._index[callback_id].items():
-                for event in resource_events:
-                    del self._callbacks[resource][event][callback_id]
-            del self._index[callback_id]
-
-    def notify(self, resource, event, trigger, **kwargs):
-        """Notify all subscribed callback(s).
-
-        Dispatch the resource's event to the subscribed callbacks.
-
-        :param resource: the resource.
-        :param event: the event.
-        :param trigger: the trigger. A reference to the sender of the event.
-        """
-        errors = self._notify_loop(resource, event, trigger, **kwargs)
-        if errors and event.startswith(events.BEFORE):
-            abort_event = event.replace(
-                events.BEFORE, events.ABORT)
-            self._notify_loop(resource, abort_event, trigger, **kwargs)
-            raise exceptions.CallbackFailure(errors=errors)
-
-    def clear(self):
-        """Brings the manager to a clean slate."""
-        self._callbacks = collections.defaultdict(dict)
-        self._index = collections.defaultdict(dict)
-
-    def _notify_loop(self, resource, event, trigger, **kwargs):
-        """The notification loop."""
-        LOG.debug("Notify callbacks for %(resource)s, %(event)s",
-                  {'resource': resource, 'event': event})
-
-        errors = []
-        callbacks = self._callbacks[resource].get(event, {}).items()
-        # TODO(armax): consider using a GreenPile
-        for callback_id, callback in callbacks:
-            try:
-                LOG.debug("Calling callback %s", callback_id)
-                callback(resource, event, trigger, **kwargs)
-            except Exception as e:
-                LOG.exception(_LE("Error during notification for "
-                                  "%(callback)s %(resource)s, %(event)s"),
-                              {'callback': callback_id,
-                               'resource': resource,
-                               'event': event})
-                errors.append(exceptions.NotificationError(callback_id, e))
-        return errors
-
-    def _find(self, callback):
-        """Return the callback_id if found, None otherwise."""
-        callback_id = _get_id(callback)
-        return callback_id if callback_id in self._index else None
-
-
-def _get_id(callback):
-    """Return a unique identifier for the callback."""
-    # TODO(armax): consider using something other than names
-    # https://www.python.org/dev/peps/pep-3155/, but this
-    # might be okay for now.
-    return reflection.get_callable_name(callback)
diff --git a/neutron/callbacks/registry.py b/neutron/callbacks/registry.py
deleted file mode 100644 (file)
index 0e8f0ab..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.callbacks import manager
-
-
-# TODO(armax): consider adding locking
-CALLBACK_MANAGER = None
-
-
-def _get_callback_manager():
-    global CALLBACK_MANAGER
-    if CALLBACK_MANAGER is None:
-        CALLBACK_MANAGER = manager.CallbacksManager()
-    return CALLBACK_MANAGER
-
-
-def subscribe(callback, resource, event):
-    _get_callback_manager().subscribe(callback, resource, event)
-
-
-def unsubscribe(callback, resource, event):
-    _get_callback_manager().unsubscribe(callback, resource, event)
-
-
-def unsubscribe_by_resource(callback, resource):
-    _get_callback_manager().unsubscribe_by_resource(callback, resource)
-
-
-def unsubscribe_all(callback):
-    _get_callback_manager().unsubscribe_all(callback)
-
-
-def notify(resource, event, trigger, **kwargs):
-    _get_callback_manager().notify(resource, event, trigger, **kwargs)
-
-
-def clear():
-    _get_callback_manager().clear()
diff --git a/neutron/callbacks/resources.py b/neutron/callbacks/resources.py
deleted file mode 100644 (file)
index a0fd4c0..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# String literals representing core resources.
-PORT = 'port'
-PROCESS = 'process'
-ROUTER = 'router'
-ROUTER_GATEWAY = 'router_gateway'
-ROUTER_INTERFACE = 'router_interface'
-SECURITY_GROUP = 'security_group'
-SECURITY_GROUP_RULE = 'security_group_rule'
-SUBNET = 'subnet'
-SUBNET_GATEWAY = 'subnet_gateway'
diff --git a/neutron/cmd/__init__.py b/neutron/cmd/__init__.py
deleted file mode 100644 (file)
index 29fcdc5..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_reports import guru_meditation_report as gmr
-
-from neutron import version
-
-_version_string = version.version_info.release_string()
-gmr.TextGuruMeditation.setup_autorun(version=_version_string)
diff --git a/neutron/cmd/eventlet/__init__.py b/neutron/cmd/eventlet/__init__.py
deleted file mode 100644 (file)
index 01f9f69..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import eventlet_utils
-
-eventlet_utils.monkey_patch()
diff --git a/neutron/cmd/eventlet/agents/__init__.py b/neutron/cmd/eventlet/agents/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/cmd/eventlet/agents/dhcp.py b/neutron/cmd/eventlet/agents/dhcp.py
deleted file mode 100644 (file)
index f1c04cf..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent import dhcp_agent
-
-
-def main():
-    dhcp_agent.main()
diff --git a/neutron/cmd/eventlet/agents/l3.py b/neutron/cmd/eventlet/agents/l3.py
deleted file mode 100644 (file)
index e44acd5..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent import l3_agent
-
-
-def main():
-    l3_agent.main()
diff --git a/neutron/cmd/eventlet/agents/metadata.py b/neutron/cmd/eventlet/agents/metadata.py
deleted file mode 100644 (file)
index 17ad50a..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent import metadata_agent
-
-
-def main():
-    metadata_agent.main()
diff --git a/neutron/cmd/eventlet/agents/metadata_proxy.py b/neutron/cmd/eventlet/agents/metadata_proxy.py
deleted file mode 100644 (file)
index dc61c14..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.metadata import namespace_proxy
-
-
-def main():
-    namespace_proxy.main()
diff --git a/neutron/cmd/eventlet/plugins/__init__.py b/neutron/cmd/eventlet/plugins/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/cmd/eventlet/plugins/linuxbridge_neutron_agent.py b/neutron/cmd/eventlet/plugins/linuxbridge_neutron_agent.py
deleted file mode 100644 (file)
index 2ffc230..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import \
-    neutron.plugins.ml2.drivers.linuxbridge.agent.linuxbridge_neutron_agent \
-    as agent_main
-
-
-def main():
-    agent_main.main()
diff --git a/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py b/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py
deleted file mode 100644 (file)
index 1f7d2cd..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) 2015 Cloudbase Solutions.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import neutron.plugins.ml2.drivers.openvswitch.agent.main as agent_main
-
-
-def main():
-    agent_main.main()
diff --git a/neutron/cmd/eventlet/plugins/sriov_nic_neutron_agent.py b/neutron/cmd/eventlet/plugins/sriov_nic_neutron_agent.py
deleted file mode 100644 (file)
index 7695fc4..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import neutron.plugins.ml2.drivers.mech_sriov.agent.sriov_nic_agent \
-        as agent_main
-
-
-def main():
-    agent_main.main()
diff --git a/neutron/cmd/eventlet/server/__init__.py b/neutron/cmd/eventlet/server/__init__.py
deleted file mode 100644 (file)
index 7f1831f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron.server import rpc_eventlet
-from neutron.server import wsgi_eventlet
-from neutron.server import wsgi_pecan
-
-
-def main():
-    if cfg.CONF.web_framework == 'legacy':
-        main_wsgi_eventlet()
-    else:
-        main_wsgi_pecan()
-
-
-def main_wsgi_eventlet():
-    wsgi_eventlet.main()
-
-
-# Eventlet patching is not required for Pecan, but some plugins still spawn
-# eventlet threads
-def main_wsgi_pecan():
-    wsgi_pecan.main()
-
-
-def main_rpc_eventlet():
-    rpc_eventlet.main()
diff --git a/neutron/cmd/eventlet/services/__init__.py b/neutron/cmd/eventlet/services/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/cmd/eventlet/services/metering_agent.py b/neutron/cmd/eventlet/services/metering_agent.py
deleted file mode 100644 (file)
index b5f9b3f..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.services.metering.agents import metering_agent
-
-
-def main():
-    metering_agent.main()
diff --git a/neutron/cmd/eventlet/usage_audit.py b/neutron/cmd/eventlet/usage_audit.py
deleted file mode 100644 (file)
index 402d92c..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (c) 2012 New Dream Network, LLC (DreamHost)
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-"""Cron script to generate usage notifications for networks, ports and
-subnets.
-
-"""
-
-import sys
-
-from neutron.common import config
-from neutron.common import rpc as n_rpc
-from neutron import context
-from neutron import manager
-from neutron.plugins.common import constants
-
-
-def main():
-    config.init(sys.argv[1:])
-    config.setup_logging()
-
-    cxt = context.get_admin_context()
-    plugin = manager.NeutronManager.get_plugin()
-    l3_plugin = manager.NeutronManager.get_service_plugins().get(
-            constants.L3_ROUTER_NAT)
-    notifier = n_rpc.get_notifier('network')
-    for network in plugin.get_networks(cxt):
-        notifier.info(cxt, 'network.exists', {'network': network})
-    for subnet in plugin.get_subnets(cxt):
-        notifier.info(cxt, 'subnet.exists', {'subnet': subnet})
-    for port in plugin.get_ports(cxt):
-        notifier.info(cxt, 'port.exists', {'port': port})
-    for router in l3_plugin.get_routers(cxt):
-        notifier.info(cxt, 'router.exists', {'router': router})
-    for floatingip in l3_plugin.get_floatingips(cxt):
-        notifier.info(cxt, 'floatingip.exists', {'floatingip': floatingip})
diff --git a/neutron/cmd/ipset_cleanup.py b/neutron/cmd/ipset_cleanup.py
deleted file mode 100644 (file)
index 9448b44..0000000
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from neutron._i18n import _, _LE, _LI
-from neutron.agent.linux import ipset_manager
-from neutron.agent.linux import utils
-from neutron.common import config
-
-
-LOG = logging.getLogger(__name__)
-
-
-def setup_conf():
-    """Setup the cfg for the clean up utility.
-
-    Use separate setup_conf for the utility because there are many options
-    from the main config that do not apply during clean-up.
-    """
-
-    cli_opts = [
-        cfg.BoolOpt('allsets',
-                    default=False,
-                    help=_('Destroy all IPsets.')),
-        cfg.BoolOpt('force',
-                    default=False,
-                    help=_('Destroy IPsets even if there is an iptables '
-                           'reference.')),
-        cfg.StrOpt('prefix',
-                   default=ipset_manager.NET_PREFIX,
-                   help=_('String prefix used to match IPset names.')),
-    ]
-
-    conf = cfg.CONF
-    conf.register_cli_opts(cli_opts)
-    return conf
-
-
-def remove_iptables_reference(ipset):
-    # Remove any iptables reference to this IPset
-    cmd = ['iptables-save'] if 'IPv4' in ipset else ['ip6tables-save']
-    iptables_save = utils.execute(cmd, run_as_root=True)
-
-    if ipset in iptables_save:
-        cmd = ['iptables'] if 'IPv4' in ipset else ['ip6tables']
-        LOG.info(_LI("Removing iptables rule for IPset: %s"), ipset)
-        for rule in iptables_save.splitlines():
-            if '--match-set %s ' % ipset in rule and rule.startswith('-A'):
-                # change to delete
-                params = rule.split()
-                params[0] = '-D'
-                try:
-                    utils.execute(cmd + params, run_as_root=True)
-                except Exception:
-                    LOG.exception(_LE('Error, unable to remove iptables rule '
-                                      'for IPset: %s'), ipset)
-
-
-def destroy_ipset(conf, ipset):
-    # If there is an iptables reference and we don't remove it, the
-    # IPset removal will fail below
-    if conf.force:
-        remove_iptables_reference(ipset)
-
-    LOG.info(_LI("Destroying IPset: %s"), ipset)
-    cmd = ['ipset', 'destroy', ipset]
-    try:
-        utils.execute(cmd, run_as_root=True)
-    except Exception:
-        LOG.exception(_LE('Error, unable to destroy IPset: %s'), ipset)
-
-
-def cleanup_ipsets(conf):
-    # Identify ipsets for destruction.
-    LOG.info(_LI("Destroying IPsets with prefix: %s"), conf.prefix)
-
-    cmd = ['ipset', '-L', '-n']
-    ipsets = utils.execute(cmd, run_as_root=True)
-    for ipset in ipsets.split('\n'):
-        if conf.allsets or ipset.startswith(conf.prefix):
-            destroy_ipset(conf, ipset)
-
-    LOG.info(_LI("IPset cleanup completed successfully"))
-
-
-def main():
-    """Main method for cleaning up IPsets.
-
-    The utility is designed to clean-up after the forced or unexpected
-    termination of Neutron agents.
-
-    The --allsets flag should only be used as part of the cleanup of a devstack
-    installation as it will blindly destroy all IPsets.
-    """
-    conf = setup_conf()
-    conf()
-    config.setup_logging()
-    cleanup_ipsets(conf)
diff --git a/neutron/cmd/keepalived_state_change.py b/neutron/cmd/keepalived_state_change.py
deleted file mode 100644 (file)
index eca618a..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2015 Red Hat Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.l3 import keepalived_state_change
-
-
-def main():
-    keepalived_state_change.main()
diff --git a/neutron/cmd/linuxbridge_cleanup.py b/neutron/cmd/linuxbridge_cleanup.py
deleted file mode 100644 (file)
index 4fa1976..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from neutron._i18n import _LE, _LI
-from neutron.common import config
-from neutron.common import utils as n_utils
-from neutron.plugins.ml2.drivers.linuxbridge.agent \
-    import linuxbridge_neutron_agent
-
-
-LOG = logging.getLogger(__name__)
-
-
-def remove_empty_bridges():
-    try:
-        interface_mappings = n_utils.parse_mappings(
-            cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
-    except ValueError as e:
-        LOG.error(_LE("Parsing physical_interface_mappings failed: %s."), e)
-        sys.exit(1)
-    LOG.info(_LI("Interface mappings: %s."), interface_mappings)
-
-    try:
-        bridge_mappings = n_utils.parse_mappings(
-            cfg.CONF.LINUX_BRIDGE.bridge_mappings)
-    except ValueError as e:
-        LOG.error(_LE("Parsing bridge_mappings failed: %s."), e)
-        sys.exit(1)
-    LOG.info(_LI("Bridge mappings: %s."), bridge_mappings)
-
-    lb_manager = linuxbridge_neutron_agent.LinuxBridgeManager(
-        bridge_mappings, interface_mappings)
-
-    bridge_names = lb_manager.get_deletable_bridges()
-    for bridge_name in bridge_names:
-        if lb_manager.get_tap_devices_count(bridge_name):
-            continue
-
-        try:
-            lb_manager.delete_bridge(bridge_name)
-            LOG.info(_LI("Linux bridge %s deleted"), bridge_name)
-        except RuntimeError:
-            LOG.exception(_LE("Linux bridge %s delete failed"), bridge_name)
-    LOG.info(_LI("Linux bridge cleanup completed successfully"))
-
-
-def main():
-    """Main method for cleaning up empty linux bridges.
-
-    This tool deletes every empty linux bridge managed by linuxbridge agent
-    (brq.* linux bridges) except thes ones defined using bridge_mappings option
-    in section LINUX_BRIDGE (created by deployers).
-
-    This tool should not be called during an instance create, migrate, etc. as
-    it can delete a linux bridge about to be used by nova.
-    """
-    cfg.CONF(sys.argv[1:])
-    config.setup_logging()
-    remove_empty_bridges()
diff --git a/neutron/cmd/netns_cleanup.py b/neutron/cmd/netns_cleanup.py
deleted file mode 100644 (file)
index 3d59ef9..0000000
+++ /dev/null
@@ -1,189 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-import time
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import importutils
-
-from neutron._i18n import _, _LE
-from neutron.agent.common import config as agent_config
-from neutron.agent.common import ovs_lib
-from neutron.agent.dhcp import config as dhcp_config
-from neutron.agent.l3 import agent as l3_agent
-from neutron.agent.l3 import dvr
-from neutron.agent.l3 import dvr_fip_ns
-from neutron.agent.linux import dhcp
-from neutron.agent.linux import external_process
-from neutron.agent.linux import interface
-from neutron.agent.linux import ip_lib
-from neutron.api.v2 import attributes
-from neutron.common import config
-
-
-LOG = logging.getLogger(__name__)
-LB_NS_PREFIX = 'qlbaas-'
-NS_MANGLING_PATTERN = ('(%s|%s|%s|%s|%s)' % (dhcp.NS_PREFIX,
-                                          l3_agent.NS_PREFIX,
-                                          dvr.SNAT_NS_PREFIX,
-                                          dvr_fip_ns.FIP_NS_PREFIX,
-                                          LB_NS_PREFIX) +
-                       attributes.UUID_PATTERN)
-
-
-class FakeDhcpPlugin(object):
-    """Fake RPC plugin to bypass any RPC calls."""
-    def __getattribute__(self, name):
-        def fake_method(*args):
-            pass
-        return fake_method
-
-
-def setup_conf():
-    """Setup the cfg for the clean up utility.
-
-    Use separate setup_conf for the utility because there are many options
-    from the main config that do not apply during clean-up.
-    """
-
-    cli_opts = [
-        cfg.BoolOpt('force',
-                    default=False,
-                    help=_('Delete the namespace by removing all devices.')),
-    ]
-
-    conf = cfg.CONF
-    conf.register_cli_opts(cli_opts)
-    agent_config.register_interface_driver_opts_helper(conf)
-    conf.register_opts(dhcp_config.DHCP_AGENT_OPTS)
-    conf.register_opts(dhcp_config.DHCP_OPTS)
-    conf.register_opts(dhcp_config.DNSMASQ_OPTS)
-    conf.register_opts(interface.OPTS)
-    return conf
-
-
-def _get_dhcp_process_monitor(config):
-    return external_process.ProcessMonitor(config=config,
-                                           resource_type='dhcp')
-
-
-def kill_dhcp(conf, namespace):
-    """Disable DHCP for a network if DHCP is still active."""
-    network_id = namespace.replace(dhcp.NS_PREFIX, '')
-
-    dhcp_driver = importutils.import_object(
-        conf.dhcp_driver,
-        conf=conf,
-        process_monitor=_get_dhcp_process_monitor(conf),
-        network=dhcp.NetModel({'id': network_id}),
-        plugin=FakeDhcpPlugin())
-
-    if dhcp_driver.active:
-        dhcp_driver.disable()
-
-
-def eligible_for_deletion(conf, namespace, force=False):
-    """Determine whether a namespace is eligible for deletion.
-
-    Eligibility is determined by having only the lo device or if force
-    is passed as a parameter.
-    """
-
-    # filter out namespaces without UUID as the name
-    if not re.match(NS_MANGLING_PATTERN, namespace):
-        return False
-
-    ip = ip_lib.IPWrapper(namespace=namespace)
-    return force or ip.namespace_is_empty()
-
-
-def unplug_device(conf, device):
-    orig_log_fail_as_error = device.get_log_fail_as_error()
-    device.set_log_fail_as_error(False)
-    try:
-        device.link.delete()
-    except RuntimeError:
-        device.set_log_fail_as_error(orig_log_fail_as_error)
-        # Maybe the device is OVS port, so try to delete
-        ovs = ovs_lib.BaseOVS()
-        bridge_name = ovs.get_bridge_for_iface(device.name)
-        if bridge_name:
-            bridge = ovs_lib.OVSBridge(bridge_name)
-            bridge.delete_port(device.name)
-        else:
-            LOG.debug('Unable to find bridge for device: %s', device.name)
-    finally:
-        device.set_log_fail_as_error(orig_log_fail_as_error)
-
-
-def destroy_namespace(conf, namespace, force=False):
-    """Destroy a given namespace.
-
-    If force is True, then dhcp (if it exists) will be disabled and all
-    devices will be forcibly removed.
-    """
-
-    try:
-        ip = ip_lib.IPWrapper(namespace=namespace)
-
-        if force:
-            kill_dhcp(conf, namespace)
-            # NOTE: The dhcp driver will remove the namespace if is it empty,
-            # so a second check is required here.
-            if ip.netns.exists(namespace):
-                for device in ip.get_devices(exclude_loopback=True):
-                    unplug_device(conf, device)
-
-        ip.garbage_collect_namespace()
-    except Exception:
-        LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace)
-
-
-def cleanup_network_namespaces(conf):
-    # Identify namespaces that are candidates for deletion.
-    candidates = [ns for ns in
-                  ip_lib.IPWrapper.get_namespaces()
-                  if eligible_for_deletion(conf, ns, conf.force)]
-
-    if candidates:
-        time.sleep(2)
-
-        for namespace in candidates:
-            destroy_namespace(conf, namespace, conf.force)
-
-
-def main():
-    """Main method for cleaning up network namespaces.
-
-    This method will make two passes checking for namespaces to delete. The
-    process will identify candidates, sleep, and call garbage collect. The
-    garbage collection will re-verify that the namespace meets the criteria for
-    deletion (ie it is empty). The period of sleep and the 2nd pass allow
-    time for the namespace state to settle, so that the check prior deletion
-    will re-confirm the namespace is empty.
-
-    The utility is designed to clean-up after the forced or unexpected
-    termination of Neutron agents.
-
-    The --force flag should only be used as part of the cleanup of a devstack
-    installation as it will blindly purge namespaces and their devices. This
-    option also kills any lingering DHCP instances.
-    """
-    conf = setup_conf()
-    conf()
-    config.setup_logging()
-    cleanup_network_namespaces(conf)
diff --git a/neutron/cmd/ovs_cleanup.py b/neutron/cmd/ovs_cleanup.py
deleted file mode 100644 (file)
index 4571912..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from neutron._i18n import _, _LI
-from neutron.agent.common import config as agent_config
-from neutron.agent.common import ovs_lib
-from neutron.agent.l3 import config as l3_config
-from neutron.agent.linux import interface
-from neutron.agent.linux import ip_lib
-from neutron.common import config
-
-
-LOG = logging.getLogger(__name__)
-
-
-def setup_conf():
-    """Setup the cfg for the clean up utility.
-
-    Use separate setup_conf for the utility because there are many options
-    from the main config that do not apply during clean-up.
-    """
-    opts = [
-        cfg.BoolOpt('ovs_all_ports',
-                    default=False,
-                    help=_('True to delete all ports on all the OpenvSwitch '
-                           'bridges. False to delete ports created by '
-                           'Neutron on integration and external network '
-                           'bridges.'))
-    ]
-
-    conf = cfg.CONF
-    conf.register_cli_opts(opts)
-    conf.register_opts(l3_config.OPTS)
-    conf.register_opts(interface.OPTS)
-    agent_config.register_interface_driver_opts_helper(conf)
-    return conf
-
-
-def collect_neutron_ports(bridges):
-    """Collect ports created by Neutron from OVS."""
-    ports = []
-    for bridge in bridges:
-        ovs = ovs_lib.OVSBridge(bridge)
-        ports += [port.port_name for port in ovs.get_vif_ports()]
-    return ports
-
-
-def delete_neutron_ports(ports):
-    """Delete non-internal ports created by Neutron
-
-    Non-internal OVS ports need to be removed manually.
-    """
-    for port in ports:
-        device = ip_lib.IPDevice(port)
-        if device.exists():
-            device.link.delete()
-            LOG.info(_LI("Deleting port: %s"), port)
-
-
-def main():
-    """Main method for cleaning up OVS bridges.
-
-    The utility cleans up the integration bridges used by Neutron.
-    """
-
-    conf = setup_conf()
-    conf()
-    config.setup_logging()
-
-    configuration_bridges = set([conf.ovs_integration_bridge,
-                                 conf.external_network_bridge])
-    ovs = ovs_lib.BaseOVS()
-    ovs_bridges = set(ovs.get_bridges())
-    available_configuration_bridges = configuration_bridges & ovs_bridges
-
-    if conf.ovs_all_ports:
-        bridges = ovs_bridges
-    else:
-        bridges = available_configuration_bridges
-
-    # Collect existing ports created by Neutron on configuration bridges.
-    # After deleting ports from OVS bridges, we cannot determine which
-    # ports were created by Neutron, so port information is collected now.
-    ports = collect_neutron_ports(available_configuration_bridges)
-
-    for bridge in bridges:
-        LOG.info(_LI("Cleaning bridge: %s"), bridge)
-        ovs = ovs_lib.OVSBridge(bridge)
-        ovs.delete_ports(all_ports=conf.ovs_all_ports)
-
-    # Remove remaining ports created by Neutron (usually veth pair)
-    delete_neutron_ports(ports)
-
-    LOG.info(_LI("OVS cleanup completed successfully"))
diff --git a/neutron/cmd/pd_notify.py b/neutron/cmd/pd_notify.py
deleted file mode 100644 (file)
index 9dff494..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright (c) 2015 Cisco Systems.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-import signal
-import sys
-
-from neutron.common import utils
-
-
-def main():
-    """Expected arguments:
-    sys.argv[1] - The add/update/delete operation performed by the PD agent
-    sys.argv[2] - The file where the new prefix should be written
-    sys.argv[3] - The process ID of the L3 agent to be notified of this change
-    """
-    operation = sys.argv[1]
-    prefix_fname = sys.argv[2]
-    agent_pid = sys.argv[3]
-    prefix = os.getenv('PREFIX1', "::")
-
-    if operation == "add" or operation == "update":
-        utils.replace_file(prefix_fname, "%s/64" % prefix)
-    elif operation == "delete":
-        utils.replace_file(prefix_fname, "::/64")
-    os.kill(int(agent_pid), signal.SIGUSR1)
diff --git a/neutron/cmd/sanity/__init__.py b/neutron/cmd/sanity/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/cmd/sanity/checks.py b/neutron/cmd/sanity/checks.py
deleted file mode 100644 (file)
index 87e17da..0000000
+++ /dev/null
@@ -1,381 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-import shutil
-import tempfile
-
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-
-from neutron._i18n import _LE
-from neutron.agent.common import ovs_lib
-from neutron.agent.l3 import ha_router
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import external_process
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import ip_link_support
-from neutron.agent.linux import keepalived
-from neutron.agent.linux import utils as agent_utils
-from neutron.common import constants as n_consts
-from neutron.common import utils
-from neutron.plugins.common import constants as const
-from neutron.plugins.ml2.drivers.openvswitch.agent.common \
-    import constants as ovs_const
-
-LOG = logging.getLogger(__name__)
-
-
-MINIMUM_DNSMASQ_VERSION = 2.67
-MINIMUM_DIBBLER_VERSION = '1.0.1'
-
-
-def ovs_vxlan_supported(from_ip='192.0.2.1', to_ip='192.0.2.2'):
-    name = "vxlantest-" + utils.get_random_string(6)
-    with ovs_lib.OVSBridge(name) as br:
-        port = br.add_tunnel_port(from_ip, to_ip, const.TYPE_VXLAN)
-        return port != ovs_lib.INVALID_OFPORT
-
-
-def ovs_geneve_supported(from_ip='192.0.2.3', to_ip='192.0.2.4'):
-    name = "genevetest-" + utils.get_random_string(6)
-    with ovs_lib.OVSBridge(name) as br:
-        port = br.add_tunnel_port(from_ip, to_ip, const.TYPE_GENEVE)
-        return port != ovs_lib.INVALID_OFPORT
-
-
-def iproute2_vxlan_supported():
-    ip = ip_lib.IPWrapper()
-    name = "vxlantest-" + utils.get_random_string(4)
-    port = ip.add_vxlan(name, 3000)
-    ip.del_veth(name)
-    return name == port.name
-
-
-def patch_supported():
-    seed = utils.get_random_string(6)
-    name = "patchtest-" + seed
-    peer_name = "peertest0-" + seed
-    patch_name = "peertest1-" + seed
-    with ovs_lib.OVSBridge(name) as br:
-        port = br.add_patch_port(patch_name, peer_name)
-        return port != ovs_lib.INVALID_OFPORT
-
-
-def nova_notify_supported():
-    try:
-        import neutron.notifiers.nova  # noqa since unused
-        return True
-    except ImportError:
-        return False
-
-
-def ofctl_arg_supported(cmd, **kwargs):
-    """Verify if ovs-ofctl binary supports cmd with **kwargs.
-
-    :param cmd: ovs-ofctl command to use for test.
-    :param **kwargs: arguments to test with the command.
-    :returns: a boolean if the supplied arguments are supported.
-    """
-    br_name = 'br-test-%s' % utils.get_random_string(6)
-    with ovs_lib.OVSBridge(br_name) as test_br:
-        full_args = ["ovs-ofctl", cmd, test_br.br_name,
-                     ovs_lib._build_flow_expr_str(kwargs, cmd.split('-')[0])]
-        try:
-            agent_utils.execute(full_args, run_as_root=True)
-        except RuntimeError as e:
-            LOG.debug("Exception while checking supported feature via "
-                      "command %s. Exception: %s", full_args, e)
-            return False
-        except Exception:
-            LOG.exception(_LE("Unexpected exception while checking supported"
-                              " feature via command: %s"), full_args)
-            return False
-        else:
-            return True
-
-
-def arp_responder_supported():
-    mac = netaddr.EUI('dead:1234:beef', dialect=netaddr.mac_unix)
-    ip = netaddr.IPAddress('240.0.0.1')
-    actions = ovs_const.ARP_RESPONDER_ACTIONS % {'mac': mac, 'ip': ip}
-
-    return ofctl_arg_supported(cmd='add-flow',
-                               table=21,
-                               priority=1,
-                               proto='arp',
-                               dl_vlan=42,
-                               nw_dst='%s' % ip,
-                               actions=actions)
-
-
-def arp_header_match_supported():
-    return ofctl_arg_supported(cmd='add-flow',
-                               table=24,
-                               priority=1,
-                               proto='arp',
-                               arp_op='0x2',
-                               arp_spa='1.1.1.1',
-                               actions="NORMAL")
-
-
-def icmpv6_header_match_supported():
-    return ofctl_arg_supported(cmd='add-flow',
-                               table=ovs_const.ARP_SPOOF_TABLE,
-                               priority=1,
-                               dl_type=n_consts.ETHERTYPE_IPV6,
-                               nw_proto=n_consts.PROTO_NUM_ICMP_V6,
-                               icmp_type=n_consts.ICMPV6_TYPE_NA,
-                               nd_target='fdf8:f53b:82e4::10',
-                               actions="NORMAL")
-
-
-def vf_management_supported():
-    is_supported = True
-    required_caps = (
-        ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
-        ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK,
-        ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE)
-    try:
-        vf_section = ip_link_support.IpLinkSupport.get_vf_mgmt_section()
-        for cap in required_caps:
-            if not ip_link_support.IpLinkSupport.vf_mgmt_capability_supported(
-                   vf_section, cap):
-                is_supported = False
-                LOG.debug("ip link command does not support "
-                          "vf capability '%(cap)s'", cap)
-    except ip_link_support.UnsupportedIpLinkCommand:
-        LOG.exception(_LE("Unexpected exception while checking supported "
-                          "ip link command"))
-        return False
-    return is_supported
-
-
-def netns_read_requires_helper():
-    ipw = ip_lib.IPWrapper()
-    nsname = "netnsreadtest-" + uuidutils.generate_uuid()
-    ipw.netns.add(nsname)
-    try:
-        # read without root_helper. if exists, not required.
-        ipw_nohelp = ip_lib.IPWrapper()
-        exists = ipw_nohelp.netns.exists(nsname)
-    finally:
-        ipw.netns.delete(nsname)
-    return not exists
-
-
-def get_minimal_dnsmasq_version_supported():
-    return MINIMUM_DNSMASQ_VERSION
-
-
-def dnsmasq_version_supported():
-    try:
-        cmd = ['dnsmasq', '--version']
-        env = {'LC_ALL': 'C'}
-        out = agent_utils.execute(cmd, addl_env=env)
-        m = re.search(r"version (\d+\.\d+)", out)
-        ver = float(m.group(1)) if m else 0
-        if ver < MINIMUM_DNSMASQ_VERSION:
-            return False
-    except (OSError, RuntimeError, IndexError, ValueError) as e:
-        LOG.debug("Exception while checking minimal dnsmasq version. "
-                  "Exception: %s", e)
-        return False
-    return True
-
-
-class KeepalivedIPv6Test(object):
-    def __init__(self, ha_port, gw_port, gw_vip, default_gw):
-        self.ha_port = ha_port
-        self.gw_port = gw_port
-        self.gw_vip = gw_vip
-        self.default_gw = default_gw
-        self.manager = None
-        self.config = None
-        self.config_path = None
-        self.nsname = "keepalivedtest-" + uuidutils.generate_uuid()
-        self.pm = external_process.ProcessMonitor(cfg.CONF, 'router')
-        self.orig_interval = cfg.CONF.AGENT.check_child_processes_interval
-
-    def configure(self):
-        config = keepalived.KeepalivedConf()
-        instance1 = keepalived.KeepalivedInstance('MASTER', self.ha_port, 1,
-                                                  ['169.254.192.0/18'],
-                                                  advert_int=5)
-        instance1.track_interfaces.append(self.ha_port)
-
-        # Configure keepalived with an IPv6 address (gw_vip) on gw_port.
-        vip_addr1 = keepalived.KeepalivedVipAddress(self.gw_vip, self.gw_port)
-        instance1.vips.append(vip_addr1)
-
-        # Configure keepalived with an IPv6 default route on gw_port.
-        gateway_route = keepalived.KeepalivedVirtualRoute(n_consts.IPv6_ANY,
-                                                          self.default_gw,
-                                                          self.gw_port)
-        instance1.virtual_routes.gateway_routes = [gateway_route]
-        config.add_instance(instance1)
-        self.config = config
-
-    def start_keepalived_process(self):
-        # Disable process monitoring for Keepalived process.
-        cfg.CONF.set_override('check_child_processes_interval', 0, 'AGENT')
-
-        # Create a temp directory to store keepalived configuration.
-        self.config_path = tempfile.mkdtemp()
-
-        # Instantiate keepalived manager with the IPv6 configuration.
-        self.manager = keepalived.KeepalivedManager('router1', self.config,
-            namespace=self.nsname, process_monitor=self.pm,
-            conf_path=self.config_path)
-        self.manager.spawn()
-
-    def verify_ipv6_address_assignment(self, gw_dev):
-        process = self.manager.get_process()
-        agent_utils.wait_until_true(lambda: process.active)
-
-        def _gw_vip_assigned():
-            iface_ip = gw_dev.addr.list(ip_version=6, scope='global')
-            if iface_ip:
-                return self.gw_vip == iface_ip[0]['cidr']
-
-        agent_utils.wait_until_true(_gw_vip_assigned)
-
-    def __enter__(self):
-        ip_lib.IPWrapper().netns.add(self.nsname)
-        return self
-
-    def __exit__(self, exc_type, exc_value, exc_tb):
-        self.pm.stop()
-        if self.manager:
-            self.manager.disable()
-        if self.config_path:
-            shutil.rmtree(self.config_path, ignore_errors=True)
-        ip_lib.IPWrapper().netns.delete(self.nsname)
-        cfg.CONF.set_override('check_child_processes_interval',
-                              self.orig_interval, 'AGENT')
-
-
-def keepalived_ipv6_supported():
-    """Check if keepalived supports IPv6 functionality.
-
-    Validation is done as follows.
-    1. Create a namespace.
-    2. Create OVS bridge with two ports (ha_port and gw_port)
-    3. Move the ovs ports to the namespace.
-    4. Spawn keepalived process inside the namespace with IPv6 configuration.
-    5. Verify if IPv6 address is assigned to gw_port.
-    6. Verify if IPv6 default route is configured by keepalived.
-    """
-
-    random_str = utils.get_random_string(6)
-    br_name = "ka-test-" + random_str
-    ha_port = ha_router.HA_DEV_PREFIX + random_str
-    gw_port = namespaces.INTERNAL_DEV_PREFIX + random_str
-    gw_vip = 'fdf8:f53b:82e4::10/64'
-    expected_default_gw = 'fe80:f816::1'
-
-    with ovs_lib.OVSBridge(br_name) as br:
-        with KeepalivedIPv6Test(ha_port, gw_port, gw_vip,
-                                expected_default_gw) as ka:
-            br.add_port(ha_port, ('type', 'internal'))
-            br.add_port(gw_port, ('type', 'internal'))
-
-            ha_dev = ip_lib.IPDevice(ha_port)
-            gw_dev = ip_lib.IPDevice(gw_port)
-
-            ha_dev.link.set_netns(ka.nsname)
-            gw_dev.link.set_netns(ka.nsname)
-
-            ha_dev.link.set_up()
-            gw_dev.link.set_up()
-
-            ka.configure()
-
-            ka.start_keepalived_process()
-
-            ka.verify_ipv6_address_assignment(gw_dev)
-
-            default_gw = gw_dev.route.get_gateway(ip_version=6)
-            if default_gw:
-                default_gw = default_gw['gateway']
-
-    return expected_default_gw == default_gw
-
-
-def ovsdb_native_supported():
-    # Running the test should ensure we are configured for OVSDB native
-    try:
-        ovs = ovs_lib.BaseOVS()
-        ovs.get_bridges()
-        return True
-    except ImportError as ex:
-        LOG.error(_LE("Failed to import required modules. Ensure that the "
-                      "python-openvswitch package is installed. Error: %s"),
-                  ex)
-    except Exception:
-        LOG.exception(_LE("Unexpected exception occurred."))
-
-    return False
-
-
-def ebtables_supported():
-    try:
-        cmd = ['ebtables', '--version']
-        agent_utils.execute(cmd)
-        return True
-    except (OSError, RuntimeError, IndexError, ValueError) as e:
-        LOG.debug("Exception while checking for installed ebtables. "
-                  "Exception: %s", e)
-        return False
-
-
-def ipset_supported():
-    try:
-        cmd = ['ipset', '--version']
-        agent_utils.execute(cmd)
-        return True
-    except (OSError, RuntimeError, IndexError, ValueError) as e:
-        LOG.debug("Exception while checking for installed ipset. "
-                  "Exception: %s", e)
-        return False
-
-
-def ip6tables_supported():
-    try:
-        cmd = ['ip6tables', '--version']
-        agent_utils.execute(cmd)
-        return True
-    except (OSError, RuntimeError, IndexError, ValueError) as e:
-        LOG.debug("Exception while checking for installed ip6tables. "
-                  "Exception: %s", e)
-        return False
-
-
-def get_minimal_dibbler_version_supported():
-    return MINIMUM_DIBBLER_VERSION
-
-
-def dibbler_version_supported():
-    try:
-        cmd = ['dibbler-client',
-               'help']
-        out = agent_utils.execute(cmd)
-        return '-w' in out
-    except (OSError, RuntimeError, IndexError, ValueError) as e:
-        LOG.debug("Exception while checking minimal dibbler version. "
-                  "Exception: %s", e)
-        return False
diff --git a/neutron/cmd/sanity_check.py b/neutron/cmd/sanity_check.py
deleted file mode 100644 (file)
index c5d8b12..0000000
+++ /dev/null
@@ -1,312 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from neutron._i18n import _, _LE, _LW
-from neutron.agent import dhcp_agent
-from neutron.cmd.sanity import checks
-from neutron.common import config
-from neutron.db import l3_hamode_db
-
-
-LOG = logging.getLogger(__name__)
-
-
-def setup_conf():
-    cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
-                          'agent.common.config')
-    cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.'
-                          'agent.common.config')
-    cfg.CONF.import_group('VXLAN', 'neutron.plugins.ml2.drivers.linuxbridge.'
-                          'agent.common.config')
-    cfg.CONF.import_group('ml2', 'neutron.plugins.ml2.config')
-    cfg.CONF.import_group('ml2_sriov',
-                          'neutron.plugins.ml2.drivers.mech_sriov.mech_driver.'
-                          'mech_driver')
-    cfg.CONF.import_group('SECURITYGROUP', 'neutron.agent.securitygroups_rpc')
-    dhcp_agent.register_options(cfg.CONF)
-    cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS)
-
-
-class BoolOptCallback(cfg.BoolOpt):
-    def __init__(self, name, callback, **kwargs):
-        if 'default' not in kwargs:
-            kwargs['default'] = False
-        self.callback = callback
-        super(BoolOptCallback, self).__init__(name, **kwargs)
-
-
-def check_ovs_vxlan():
-    result = checks.ovs_vxlan_supported()
-    if not result:
-        LOG.error(_LE('Check for Open vSwitch VXLAN support failed. '
-                      'Please ensure that the version of openvswitch '
-                      'being used has VXLAN support.'))
-    return result
-
-
-def check_ovs_geneve():
-    result = checks.ovs_geneve_supported()
-    if not result:
-        LOG.error(_LE('Check for Open vSwitch Geneve support failed. '
-                      'Please ensure that the version of openvswitch '
-                      'and kernel being used has Geneve support.'))
-    return result
-
-
-def check_iproute2_vxlan():
-    result = checks.iproute2_vxlan_supported()
-    if not result:
-        LOG.error(_LE('Check for iproute2 VXLAN support failed. Please ensure '
-                      'that the iproute2 has VXLAN support.'))
-    return result
-
-
-def check_ovs_patch():
-    result = checks.patch_supported()
-    if not result:
-        LOG.error(_LE('Check for Open vSwitch patch port support failed. '
-                      'Please ensure that the version of openvswitch '
-                      'being used has patch port support or disable features '
-                      'requiring patch ports (gre/vxlan, etc.).'))
-    return result
-
-
-def check_read_netns():
-    required = checks.netns_read_requires_helper()
-    if not required and cfg.CONF.AGENT.use_helper_for_ns_read:
-        LOG.warning(_LW("The user that is executing neutron can read the "
-                        "namespaces without using the root_helper. Disable "
-                        "the use_helper_for_ns_read option to avoid a "
-                        "performance impact."))
-        # Don't fail because nothing is actually broken. Just not optimal.
-        result = True
-    elif required and not cfg.CONF.AGENT.use_helper_for_ns_read:
-        LOG.error(_LE("The user that is executing neutron does not have "
-                      "permissions to read the namespaces. Enable the "
-                      "use_helper_for_ns_read configuration option."))
-        result = False
-    else:
-        # everything is configured appropriately
-        result = True
-    return result
-
-
-# NOTE(ihrachyshka): since the minimal version is currently capped due to
-# missing hwaddr matching in dnsmasq < 2.67, a better version of the check
-# would actually start dnsmasq server and issue a DHCP request using a IPv6
-# DHCP client.
-def check_dnsmasq_version():
-    result = checks.dnsmasq_version_supported()
-    if not result:
-        LOG.error(_LE('The installed version of dnsmasq is too old. '
-                      'Please update to at least version %s.'),
-                  checks.get_minimal_dnsmasq_version_supported())
-    return result
-
-
-def check_keepalived_ipv6_support():
-    result = checks.keepalived_ipv6_supported()
-    if not result:
-        LOG.error(_LE('The installed version of keepalived does not support '
-                      'IPv6. Please update to at least version 1.2.10 for '
-                      'IPv6 support.'))
-    return result
-
-
-def check_dibbler_version():
-    result = checks.dibbler_version_supported()
-    if not result:
-        LOG.error(_LE('The installed version of dibbler-client is too old. '
-                      'Please update to at least version %s.'),
-                  checks.get_minimal_dibbler_version_supported())
-    return result
-
-
-def check_nova_notify():
-    result = checks.nova_notify_supported()
-    if not result:
-        LOG.error(_LE('Nova notifications are enabled, but novaclient is not '
-                      'installed. Either disable nova notifications or '
-                      'install python-novaclient.'))
-    return result
-
-
-def check_arp_responder():
-    result = checks.arp_responder_supported()
-    if not result:
-        LOG.error(_LE('Check for Open vSwitch ARP responder support failed. '
-                      'Please ensure that the version of openvswitch '
-                      'being used has ARP flows support.'))
-    return result
-
-
-def check_arp_header_match():
-    result = checks.arp_header_match_supported()
-    if not result:
-        LOG.error(_LE('Check for Open vSwitch support of ARP header matching '
-                      'failed. ARP spoofing suppression will not work. A '
-                      'newer version of OVS is required.'))
-    return result
-
-
-def check_icmpv6_header_match():
-    result = checks.icmpv6_header_match_supported()
-    if not result:
-        LOG.error(_LE('Check for Open vSwitch support of ICMPv6 header '
-                      'matching failed. ICMPv6 Neighbor Advt spoofing (part '
-                      'of arp spoofing) suppression will not work. A newer '
-                      'version of OVS is required.'))
-    return result
-
-
-def check_vf_management():
-    result = checks.vf_management_supported()
-    if not result:
-        LOG.error(_LE('Check for VF management support failed. '
-                      'Please ensure that the version of ip link '
-                      'being used has VF support.'))
-    return result
-
-
-def check_ovsdb_native():
-    cfg.CONF.set_override('ovsdb_interface', 'native', group='OVS')
-    result = checks.ovsdb_native_supported()
-    if not result:
-        LOG.error(_LE('Check for native OVSDB support failed.'))
-    return result
-
-
-def check_ebtables():
-    result = checks.ebtables_supported()
-    if not result:
-        LOG.error(_LE('Cannot run ebtables. Please ensure that it '
-                      'is installed.'))
-    return result
-
-
-def check_ipset():
-    result = checks.ipset_supported()
-    if not result:
-        LOG.error(_LE('Cannot run ipset. Please ensure that it '
-                      'is installed.'))
-    return result
-
-
-def check_ip6tables():
-    result = checks.ip6tables_supported()
-    if not result:
-        LOG.error(_LE('Cannot run ip6tables. Please ensure that it '
-                      'is installed.'))
-    return result
-
-# Define CLI opts to test specific features, with a callback for the test
-OPTS = [
-    BoolOptCallback('ovs_vxlan', check_ovs_vxlan, default=False,
-                    help=_('Check for OVS vxlan support')),
-    BoolOptCallback('ovs_geneve', check_ovs_geneve, default=False,
-                    help=_('Check for OVS Geneve support')),
-    BoolOptCallback('iproute2_vxlan', check_iproute2_vxlan, default=False,
-                    help=_('Check for iproute2 vxlan support')),
-    BoolOptCallback('ovs_patch', check_ovs_patch, default=False,
-                    help=_('Check for patch port support')),
-    BoolOptCallback('nova_notify', check_nova_notify,
-                    help=_('Check for nova notification support')),
-    BoolOptCallback('arp_responder', check_arp_responder,
-                    help=_('Check for ARP responder support')),
-    BoolOptCallback('arp_header_match', check_arp_header_match,
-                    help=_('Check for ARP header match support')),
-    BoolOptCallback('icmpv6_header_match', check_icmpv6_header_match,
-                    help=_('Check for ICMPv6 header match support')),
-    BoolOptCallback('vf_management', check_vf_management,
-                    help=_('Check for VF management support')),
-    BoolOptCallback('read_netns', check_read_netns,
-                    help=_('Check netns permission settings')),
-    BoolOptCallback('dnsmasq_version', check_dnsmasq_version,
-                    help=_('Check minimal dnsmasq version')),
-    BoolOptCallback('ovsdb_native', check_ovsdb_native,
-                    help=_('Check ovsdb native interface support')),
-    BoolOptCallback('ebtables_installed', check_ebtables,
-                    help=_('Check ebtables installation')),
-    BoolOptCallback('keepalived_ipv6_support', check_keepalived_ipv6_support,
-                    help=_('Check keepalived IPv6 support')),
-    BoolOptCallback('dibbler_version', check_dibbler_version,
-                    help=_('Check minimal dibbler version')),
-    BoolOptCallback('ipset_installed', check_ipset,
-                    help=_('Check ipset installation')),
-    BoolOptCallback('ip6tables_installed', check_ip6tables,
-                    help=_('Check ip6tables installation')),
-]
-
-
-def enable_tests_from_config():
-    """If a test can depend on configuration, use this function to set the
-    appropriate CLI option to enable that test. It will then be possible to
-    run all necessary tests, just by passing in the appropriate configs.
-    """
-
-    cfg.CONF.set_override('vf_management', True)
-    if 'vxlan' in cfg.CONF.AGENT.tunnel_types:
-        cfg.CONF.set_override('ovs_vxlan', True)
-    if 'geneve' in cfg.CONF.AGENT.tunnel_types:
-        cfg.CONF.set_override('ovs_geneve', True)
-    if ('vxlan' in cfg.CONF.ml2.type_drivers or
-            cfg.CONF.VXLAN.enable_vxlan):
-        cfg.CONF.set_override('iproute2_vxlan', True)
-    if cfg.CONF.AGENT.tunnel_types:
-        cfg.CONF.set_override('ovs_patch', True)
-    if not cfg.CONF.OVS.use_veth_interconnection:
-        cfg.CONF.set_override('ovs_patch', True)
-    if (cfg.CONF.notify_nova_on_port_status_changes or
-            cfg.CONF.notify_nova_on_port_data_changes):
-        cfg.CONF.set_override('nova_notify', True)
-    if cfg.CONF.AGENT.arp_responder:
-        cfg.CONF.set_override('arp_responder', True)
-    if cfg.CONF.AGENT.prevent_arp_spoofing:
-        cfg.CONF.set_override('arp_header_match', True)
-        cfg.CONF.set_override('icmpv6_header_match', True)
-    if not cfg.CONF.AGENT.use_helper_for_ns_read:
-        cfg.CONF.set_override('read_netns', True)
-    if cfg.CONF.dhcp_driver == 'neutron.agent.linux.dhcp.Dnsmasq':
-        cfg.CONF.set_override('dnsmasq_version', True)
-    if cfg.CONF.OVS.ovsdb_interface == 'native':
-        cfg.CONF.set_override('ovsdb_native', True)
-    if cfg.CONF.l3_ha:
-        cfg.CONF.set_override('keepalived_ipv6_support', True)
-    if cfg.CONF.SECURITYGROUP.enable_ipset:
-        cfg.CONF.set_override('ipset_installed', True)
-    if cfg.CONF.SECURITYGROUP.enable_security_group:
-        cfg.CONF.set_override('ip6tables_installed', True)
-
-
-def all_tests_passed():
-    return all(opt.callback() for opt in OPTS if cfg.CONF.get(opt.name))
-
-
-def main():
-    setup_conf()
-    cfg.CONF.register_cli_opts(OPTS)
-    cfg.CONF.set_override('use_stderr', True)
-    config.setup_logging()
-    config.init(sys.argv[1:], default_config_files=[])
-
-    if cfg.CONF.config_file:
-        enable_tests_from_config()
-
-    return 0 if all_tests_passed() else 1
diff --git a/neutron/common/__init__.py b/neutron/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/common/config.py b/neutron/common/config.py
deleted file mode 100644 (file)
index 64911f1..0000000
+++ /dev/null
@@ -1,268 +0,0 @@
-# Copyright 2011 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Routines for configuring Neutron
-"""
-
-import sys
-
-from keystoneauth1 import loading as ks_loading
-from oslo_config import cfg
-from oslo_db import options as db_options
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_service import wsgi
-
-from neutron._i18n import _, _LI
-from neutron.api.v2 import attributes
-from neutron.common import utils
-from neutron import policy
-from neutron import version
-
-
-LOG = logging.getLogger(__name__)
-
-core_opts = [
-    cfg.StrOpt('bind_host', default='0.0.0.0',
-               help=_("The host IP to bind to")),
-    cfg.PortOpt('bind_port', default=9696,
-                help=_("The port to bind to")),
-    cfg.StrOpt('api_extensions_path', default="",
-               help=_("The path for API extensions. "
-                      "Note that this can be a colon-separated list of paths. "
-                      "For example: api_extensions_path = "
-                      "extensions:/path/to/more/exts:/even/more/exts. "
-                      "The __path__ of neutron.extensions is appended to "
-                      "this, so if your extensions are in there you don't "
-                      "need to specify them here.")),
-    cfg.StrOpt('auth_strategy', default='keystone',
-               help=_("The type of authentication to use")),
-    cfg.StrOpt('core_plugin',
-               help=_("The core plugin Neutron will use")),
-    cfg.ListOpt('service_plugins', default=[],
-                help=_("The service plugins Neutron will use")),
-    cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
-               help=_("The base MAC address Neutron will use for VIFs. "
-                      "The first 3 octets will remain unchanged. If the 4th "
-                      "octet is not 00, it will also be used. The others "
-                      "will be randomly generated.")),
-    cfg.IntOpt('mac_generation_retries', default=16,
-               help=_("How many times Neutron will retry MAC generation")),
-    cfg.BoolOpt('allow_bulk', default=True,
-                help=_("Allow the usage of the bulk API")),
-    cfg.BoolOpt('allow_pagination', default=False,
-                help=_("Allow the usage of the pagination")),
-    cfg.BoolOpt('allow_sorting', default=False,
-                help=_("Allow the usage of the sorting")),
-    cfg.StrOpt('pagination_max_limit', default="-1",
-               help=_("The maximum number of items returned in a single "
-                      "response, value was 'infinite' or negative integer "
-                      "means no limit")),
-    cfg.ListOpt('default_availability_zones', default=[],
-                help=_("Default value of availability zone hints. The "
-                       "availability zone aware schedulers use this when "
-                       "the resources availability_zone_hints is empty. "
-                       "Multiple availability zones can be specified by a "
-                       "comma separated string. This value can be empty. "
-                       "In this case, even if availability_zone_hints for "
-                       "a resource is empty, availability zone is "
-                       "considered for high availability while scheduling "
-                       "the resource.")),
-    cfg.IntOpt('max_dns_nameservers', default=5,
-               help=_("Maximum number of DNS nameservers per subnet")),
-    cfg.IntOpt('max_subnet_host_routes', default=20,
-               help=_("Maximum number of host routes per subnet")),
-    cfg.IntOpt('max_fixed_ips_per_port', default=5,
-               deprecated_for_removal=True,
-               help=_("Maximum number of fixed ips per port. This option "
-                      "is deprecated and will be removed in the N "
-                      "release.")),
-    cfg.StrOpt('default_ipv4_subnet_pool', deprecated_for_removal=True,
-               help=_("Default IPv4 subnet pool to be used for automatic "
-                      "subnet CIDR allocation. "
-                      "Specifies by UUID the pool to be used in case where "
-                      "creation of a subnet is being called without a "
-                      "subnet pool ID. If not set then no pool "
-                      "will be used unless passed explicitly to the subnet "
-                      "create. If no pool is used, then a CIDR must be passed "
-                      "to create a subnet and that subnet will not be "
-                      "allocated from any pool; it will be considered part of "
-                      "the tenant's private address space. This option is "
-                      "deprecated for removal in the N release.")),
-    cfg.StrOpt('default_ipv6_subnet_pool', deprecated_for_removal=True,
-               help=_("Default IPv6 subnet pool to be used for automatic "
-                      "subnet CIDR allocation. "
-                      "Specifies by UUID the pool to be used in case where "
-                      "creation of a subnet is being called without a "
-                      "subnet pool ID. See the description for "
-                      "default_ipv4_subnet_pool for more information. This "
-                      "option is deprecated for removal in the N release.")),
-    cfg.BoolOpt('ipv6_pd_enabled', default=False,
-                help=_("Enables IPv6 Prefix Delegation for automatic subnet "
-                       "CIDR allocation. "
-                       "Set to True to enable IPv6 Prefix Delegation for "
-                       "subnet allocation in a PD-capable environment. Users "
-                       "making subnet creation requests for IPv6 subnets "
-                       "without providing a CIDR or subnetpool ID will be "
-                       "given a CIDR via the Prefix Delegation mechanism. "
-                       "Note that enabling PD will override the behavior of "
-                       "the default IPv6 subnetpool.")),
-    cfg.IntOpt('dhcp_lease_duration', default=86400,
-               deprecated_name='dhcp_lease_time',
-               help=_("DHCP lease duration (in seconds). Use -1 to tell "
-                      "dnsmasq to use infinite lease times.")),
-    cfg.StrOpt('dns_domain',
-               default='openstacklocal',
-               help=_('Domain to use for building the hostnames')),
-    cfg.BoolOpt('dhcp_agent_notification', default=True,
-                help=_("Allow sending resource operation"
-                       " notification to DHCP agent")),
-    cfg.BoolOpt('allow_overlapping_ips', default=False,
-                help=_("Allow overlapping IP support in Neutron. "
-                       "Attention: the following parameter MUST be set to "
-                       "False if Neutron is being used in conjunction with "
-                       "Nova security groups.")),
-    cfg.StrOpt('host', default=utils.get_hostname(),
-               sample_default='example.domain',
-               help=_("Hostname to be used by the Neutron server, agents and "
-                      "services running on this machine. All the agents and "
-                      "services running on this machine must use the same "
-                      "host value.")),
-    cfg.BoolOpt('force_gateway_on_subnet', default=True,
-                help=_("Ensure that configured gateway is on subnet. "
-                       "For IPv6, validate only if gateway is not a link "
-                       "local address.")),
-    cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
-                help=_("Send notification to nova when port status changes")),
-    cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
-                help=_("Send notification to nova when port data (fixed_ips/"
-                       "floatingip) changes so nova can update its cache.")),
-    cfg.IntOpt('send_events_interval', default=2,
-               help=_('Number of seconds between sending events to nova if '
-                      'there are any events to send.')),
-    cfg.BoolOpt('advertise_mtu', default=False,
-                help=_('If True, effort is made to advertise MTU settings '
-                       'to VMs via network methods (DHCP and RA MTU options) '
-                       'when the network\'s preferred MTU is known.')),
-    cfg.StrOpt('ipam_driver',
-               help=_("Neutron IPAM (IP address management) driver to use. "
-                      "If ipam_driver is not set (default behavior), no IPAM "
-                      "driver is used. In order to use the reference "
-                      "implementation of Neutron IPAM driver, "
-                      "use 'internal'.")),
-    cfg.BoolOpt('vlan_transparent', default=False,
-                help=_('If True, then allow plugins that support it to '
-                       'create VLAN transparent networks.')),
-    cfg.StrOpt('web_framework', default='legacy',
-               choices=('legacy', 'pecan'),
-               help=_("This will choose the web framework in which to run "
-                      "the Neutron API server. 'pecan' is a new experiemental "
-                      "rewrite of the API server."))
-]
-
-core_cli_opts = [
-    cfg.StrOpt('state_path',
-               default='/var/lib/neutron',
-               help=_("Where to store Neutron state files. "
-                      "This directory must be writable by the agent.")),
-]
-
-# Register the configuration options
-cfg.CONF.register_opts(core_opts)
-cfg.CONF.register_cli_opts(core_cli_opts)
-wsgi.register_opts(cfg.CONF)
-
-# Ensure that the control exchange is set correctly
-oslo_messaging.set_transport_defaults(control_exchange='neutron')
-
-
-def set_db_defaults():
-    # Update the default QueuePool parameters. These can be tweaked by the
-    # conf variables - max_pool_size, max_overflow and pool_timeout
-    db_options.set_defaults(
-        cfg.CONF,
-        connection='sqlite://',
-        sqlite_db='', max_pool_size=10,
-        max_overflow=20, pool_timeout=10)
-
-set_db_defaults()
-
-NOVA_CONF_SECTION = 'nova'
-
-ks_loading.register_auth_conf_options(cfg.CONF, NOVA_CONF_SECTION)
-ks_loading.register_session_conf_options(cfg.CONF, NOVA_CONF_SECTION)
-
-nova_opts = [
-    cfg.StrOpt('region_name',
-               help=_('Name of nova region to use. Useful if keystone manages'
-                      ' more than one region.')),
-    cfg.StrOpt('endpoint_type',
-               default='public',
-               choices=['public', 'admin', 'internal'],
-               help=_('Type of the nova endpoint to use.  This endpoint will'
-                      ' be looked up in the keystone catalog and should be'
-                      ' one of public, internal or admin.')),
-]
-cfg.CONF.register_opts(nova_opts, group=NOVA_CONF_SECTION)
-
-logging.register_options(cfg.CONF)
-
-
-def init(args, **kwargs):
-    cfg.CONF(args=args, project='neutron',
-             version='%%(prog)s %s' % version.version_info.release_string(),
-             **kwargs)
-
-    # FIXME(ihrachys): if import is put in global, circular import
-    # failure occurs
-    from neutron.common import rpc as n_rpc
-    n_rpc.init(cfg.CONF)
-
-    # Validate that the base_mac is of the correct format
-    msg = attributes._validate_regex(cfg.CONF.base_mac,
-                                     attributes.MAC_PATTERN)
-    if msg:
-        msg = _("Base MAC: %s") % msg
-        raise Exception(msg)
-
-
-def setup_logging():
-    """Sets up the logging options for a log with supplied name."""
-    product_name = "neutron"
-    logging.setup(cfg.CONF, product_name)
-    LOG.info(_LI("Logging enabled!"))
-    LOG.info(_LI("%(prog)s version %(version)s"),
-             {'prog': sys.argv[0],
-              'version': version.version_info.release_string()})
-    LOG.debug("command line: %s", " ".join(sys.argv))
-
-
-def reset_service():
-    # Reset worker in case SIGHUP is called.
-    # Note that this is called only in case a service is running in
-    # daemon mode.
-    setup_logging()
-    policy.refresh()
-
-
-def load_paste_app(app_name):
-    """Builds and returns a WSGI app from a paste config file.
-
-    :param app_name: Name of the application to load
-    """
-    loader = wsgi.Loader(cfg.CONF)
-    app = loader.load_app(app_name)
-    return app
diff --git a/neutron/common/constants.py b/neutron/common/constants.py
deleted file mode 100644 (file)
index b80d5c0..0000000
+++ /dev/null
@@ -1,221 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# TODO(salv-orlando): Verify if a single set of operational
-# status constants is achievable
-NET_STATUS_ACTIVE = 'ACTIVE'
-NET_STATUS_BUILD = 'BUILD'
-NET_STATUS_DOWN = 'DOWN'
-NET_STATUS_ERROR = 'ERROR'
-
-PORT_STATUS_ACTIVE = 'ACTIVE'
-PORT_STATUS_BUILD = 'BUILD'
-PORT_STATUS_DOWN = 'DOWN'
-PORT_STATUS_ERROR = 'ERROR'
-PORT_STATUS_NOTAPPLICABLE = 'N/A'
-
-FLOATINGIP_STATUS_ACTIVE = 'ACTIVE'
-FLOATINGIP_STATUS_DOWN = 'DOWN'
-FLOATINGIP_STATUS_ERROR = 'ERROR'
-
-DEVICE_OWNER_COMPUTE_PREFIX = "compute:"
-DEVICE_OWNER_NETWORK_PREFIX = "network:"
-DEVICE_OWNER_NEUTRON_PREFIX = "neutron:"
-
-DEVICE_OWNER_ROUTER_HA_INTF = (DEVICE_OWNER_NETWORK_PREFIX +
-                               "router_ha_interface")
-DEVICE_OWNER_ROUTER_INTF = DEVICE_OWNER_NETWORK_PREFIX + "router_interface"
-DEVICE_OWNER_ROUTER_GW = DEVICE_OWNER_NETWORK_PREFIX + "router_gateway"
-DEVICE_OWNER_FLOATINGIP = DEVICE_OWNER_NETWORK_PREFIX + "floatingip"
-DEVICE_OWNER_DHCP = DEVICE_OWNER_NETWORK_PREFIX + "dhcp"
-DEVICE_OWNER_DVR_INTERFACE = (DEVICE_OWNER_NETWORK_PREFIX +
-                              "router_interface_distributed")
-DEVICE_OWNER_AGENT_GW = (DEVICE_OWNER_NETWORK_PREFIX +
-                         "floatingip_agent_gateway")
-DEVICE_OWNER_ROUTER_SNAT = (DEVICE_OWNER_NETWORK_PREFIX +
-                            "router_centralized_snat")
-DEVICE_OWNER_LOADBALANCER = DEVICE_OWNER_NEUTRON_PREFIX + "LOADBALANCER"
-DEVICE_OWNER_LOADBALANCERV2 = DEVICE_OWNER_NEUTRON_PREFIX + "LOADBALANCERV2"
-
-DEVICE_OWNER_PREFIXES = (DEVICE_OWNER_NETWORK_PREFIX,
-                         DEVICE_OWNER_NEUTRON_PREFIX)
-
-# Collection used to identify devices owned by router interfaces.
-# DEVICE_OWNER_ROUTER_HA_INTF is a special case and so is not included.
-ROUTER_INTERFACE_OWNERS = (DEVICE_OWNER_ROUTER_INTF,
-                           DEVICE_OWNER_DVR_INTERFACE)
-ROUTER_INTERFACE_OWNERS_SNAT = (DEVICE_OWNER_ROUTER_INTF,
-                                DEVICE_OWNER_DVR_INTERFACE,
-                                DEVICE_OWNER_ROUTER_SNAT)
-L3_AGENT_MODE_DVR = 'dvr'
-L3_AGENT_MODE_DVR_SNAT = 'dvr_snat'
-L3_AGENT_MODE_LEGACY = 'legacy'
-L3_AGENT_MODE = 'agent_mode'
-
-DEVICE_ID_RESERVED_DHCP_PORT = "reserved_dhcp_port"
-
-FLOATINGIP_KEY = '_floatingips'
-INTERFACE_KEY = '_interfaces'
-HA_INTERFACE_KEY = '_ha_interface'
-HA_ROUTER_STATE_KEY = '_ha_state'
-METERING_LABEL_KEY = '_metering_labels'
-FLOATINGIP_AGENT_INTF_KEY = '_floatingip_agent_interfaces'
-SNAT_ROUTER_INTF_KEY = '_snat_router_interfaces'
-
-HA_NETWORK_NAME = 'HA network tenant %s'
-HA_SUBNET_NAME = 'HA subnet tenant %s'
-HA_PORT_NAME = 'HA port tenant %s'
-MINIMUM_AGENTS_FOR_HA = 2
-HA_ROUTER_STATE_ACTIVE = 'active'
-HA_ROUTER_STATE_STANDBY = 'standby'
-
-IPv4 = 'IPv4'
-IPv6 = 'IPv6'
-IP_VERSION_4 = 4
-IP_VERSION_6 = 6
-IPv4_BITS = 32
-IPv6_BITS = 128
-
-INVALID_MAC_ADDRESSES = ['00:00:00:00:00:00', 'FF:FF:FF:FF:FF:FF']
-
-IPv4_ANY = '0.0.0.0/0'
-IPv6_ANY = '::/0'
-IP_ANY = {IP_VERSION_4: IPv4_ANY, IP_VERSION_6: IPv6_ANY}
-
-DHCP_RESPONSE_PORT = 68
-
-FLOODING_ENTRY = ('00:00:00:00:00:00', '0.0.0.0')
-
-AGENT_TYPE_DHCP = 'DHCP agent'
-AGENT_TYPE_OVS = 'Open vSwitch agent'
-AGENT_TYPE_LINUXBRIDGE = 'Linux bridge agent'
-AGENT_TYPE_OFA = 'OFA driver agent'
-AGENT_TYPE_L3 = 'L3 agent'
-AGENT_TYPE_LOADBALANCER = 'Loadbalancer agent'
-AGENT_TYPE_METERING = 'Metering agent'
-AGENT_TYPE_METADATA = 'Metadata agent'
-AGENT_TYPE_NIC_SWITCH = 'NIC Switch agent'
-L2_AGENT_TOPIC = 'N/A'
-
-PAGINATION_INFINITE = 'infinite'
-
-SORT_DIRECTION_ASC = 'asc'
-SORT_DIRECTION_DESC = 'desc'
-
-PORT_BINDING_EXT_ALIAS = 'binding'
-L3_AGENT_SCHEDULER_EXT_ALIAS = 'l3_agent_scheduler'
-DHCP_AGENT_SCHEDULER_EXT_ALIAS = 'dhcp_agent_scheduler'
-LBAAS_AGENT_SCHEDULER_EXT_ALIAS = 'lbaas_agent_scheduler'
-L3_DISTRIBUTED_EXT_ALIAS = 'dvr'
-L3_HA_MODE_EXT_ALIAS = 'l3-ha'
-SUBNET_ALLOCATION_EXT_ALIAS = 'subnet_allocation'
-
-ETHERTYPE_IPV6 = 0x86DD
-
-# Protocol names and numbers for Security Groups/Firewalls
-PROTO_NAME_TCP = 'tcp'
-PROTO_NAME_ICMP = 'icmp'
-PROTO_NAME_ICMP_V6 = 'icmpv6'
-PROTO_NAME_UDP = 'udp'
-PROTO_NUM_TCP = 6
-PROTO_NUM_ICMP = 1
-PROTO_NUM_ICMP_V6 = 58
-PROTO_NUM_UDP = 17
-
-# List of ICMPv6 types that should be allowed by default:
-# Multicast Listener Query (130),
-# Multicast Listener Report (131),
-# Multicast Listener Done (132),
-# Neighbor Solicitation (135),
-# Neighbor Advertisement (136)
-ICMPV6_ALLOWED_TYPES = [130, 131, 132, 135, 136]
-ICMPV6_TYPE_RA = 134
-ICMPV6_TYPE_NA = 136
-
-DHCPV6_STATEFUL = 'dhcpv6-stateful'
-DHCPV6_STATELESS = 'dhcpv6-stateless'
-IPV6_SLAAC = 'slaac'
-IPV6_MODES = [DHCPV6_STATEFUL, DHCPV6_STATELESS, IPV6_SLAAC]
-
-IPV6_LLA_PREFIX = 'fe80::/64'
-
-# Human-readable ID to which the subnetpool ID should be set to
-# indicate that IPv6 Prefix Delegation is enabled for a given subnet
-IPV6_PD_POOL_ID = 'prefix_delegation'
-
-# Special provisional prefix for IPv6 Prefix Delegation
-PROVISIONAL_IPV6_PD_PREFIX = '::/64'
-
-# Timeout in seconds for getting an IPv6 LLA
-LLA_TASK_TIMEOUT = 40
-
-# Linux interface max length
-DEVICE_NAME_MAX_LEN = 15
-
-# vhost-user device names start with "vhu"
-VHOST_USER_DEVICE_PREFIX = 'vhu'
-# Device names start with "tap"
-TAP_DEVICE_PREFIX = 'tap'
-# The vswitch side of a veth pair for a nova iptables filter setup
-VETH_DEVICE_PREFIX = 'qvo'
-# prefix for SNAT interface in DVR
-SNAT_INT_DEV_PREFIX = 'sg-'
-
-# Possible prefixes to partial port IDs in interface names used by the OVS,
-# Linux Bridge, and IVS VIF drivers in Nova and the neutron agents. See the
-# 'get_ovs_interfaceid' method in Nova (nova/virt/libvirt/vif.py) for details.
-INTERFACE_PREFIXES = (TAP_DEVICE_PREFIX, VETH_DEVICE_PREFIX,
-                      SNAT_INT_DEV_PREFIX)
-
-ATTRIBUTES_TO_UPDATE = 'attributes_to_update'
-
-# Maximum value integer can take in MySQL and PostgreSQL
-# In SQLite integer can be stored in 1, 2, 3, 4, 6, or 8 bytes,
-# but here it will be limited by this value for consistency.
-DB_INTEGER_MAX_VALUE = 2 ** 31 - 1
-
-# TODO(amuller): Re-define the RPC namespaces once Oslo messaging supports
-# Targets with multiple namespaces. Neutron will then implement callbacks
-# for its RPC clients in order to support rolling upgrades.
-
-# RPC Interface for agents to call DHCP API implemented on the plugin side
-RPC_NAMESPACE_DHCP_PLUGIN = None
-# RPC interface for the metadata service to get info from the plugin side
-RPC_NAMESPACE_METADATA = None
-# RPC interface for agent to plugin security group API
-RPC_NAMESPACE_SECGROUP = None
-# RPC interface for agent to plugin DVR api
-RPC_NAMESPACE_DVR = None
-# RPC interface for reporting state back to the plugin
-RPC_NAMESPACE_STATE = None
-# RPC interface for agent to plugin resources API
-RPC_NAMESPACE_RESOURCES = None
-
-# Default network MTU value when not configured
-DEFAULT_NETWORK_MTU = 0
-IPV6_MIN_MTU = 1280
-
-ROUTER_MARK_MASK = "0xffff"
-
-# Time format
-ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
-
-# Agent states as detected by server, used to reply on agent's state report
-# agent has just been registered
-AGENT_NEW = 'new'
-# agent is alive
-AGENT_ALIVE = 'alive'
-# agent has just returned to alive after being dead
-AGENT_REVIVED = 'revived'
diff --git a/neutron/common/eventlet_utils.py b/neutron/common/eventlet_utils.py
deleted file mode 100644 (file)
index cf995d5..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (c) 2015 Cloudbase Solutions.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-
-import eventlet
-
-
-def monkey_patch():
-    if os.name == 'nt':
-        # eventlet monkey patching the os and thread modules causes
-        # subprocess.Popen to fail on Windows when using pipes due
-        # to missing non-blocking IO support.
-        #
-        # bug report on eventlet:
-        # https://bitbucket.org/eventlet/eventlet/issue/132/
-        #       eventletmonkey_patch-breaks
-        eventlet.monkey_patch(os=False, thread=False)
-    else:
-        eventlet.monkey_patch()
diff --git a/neutron/common/exceptions.py b/neutron/common/exceptions.py
deleted file mode 100644 (file)
index bbfbb2f..0000000
+++ /dev/null
@@ -1,550 +0,0 @@
-# Copyright 2011 VMware, Inc
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Neutron base exception handling.
-"""
-
-from oslo_utils import excutils
-import six
-
-from neutron._i18n import _
-
-
-class NeutronException(Exception):
-    """Base Neutron Exception.
-
-    To correctly use this class, inherit from it and define
-    a 'message' property. That message will get printf'd
-    with the keyword arguments provided to the constructor.
-    """
-    message = _("An unknown exception occurred.")
-
-    def __init__(self, **kwargs):
-        try:
-            super(NeutronException, self).__init__(self.message % kwargs)
-            self.msg = self.message % kwargs
-        except Exception:
-            with excutils.save_and_reraise_exception() as ctxt:
-                if not self.use_fatal_exceptions():
-                    ctxt.reraise = False
-                    # at least get the core message out if something happened
-                    super(NeutronException, self).__init__(self.message)
-
-    if six.PY2:
-        def __unicode__(self):
-            return unicode(self.msg)
-
-    def __str__(self):
-        return self.msg
-
-    def use_fatal_exceptions(self):
-        return False
-
-
-class BadRequest(NeutronException):
-    message = _('Bad %(resource)s request: %(msg)s.')
-
-
-class NotFound(NeutronException):
-    pass
-
-
-class Conflict(NeutronException):
-    pass
-
-
-class NotAuthorized(NeutronException):
-    message = _("Not authorized.")
-
-
-class ServiceUnavailable(NeutronException):
-    message = _("The service is unavailable.")
-
-
-class AdminRequired(NotAuthorized):
-    message = _("User does not have admin privileges: %(reason)s.")
-
-
-class ObjectNotFound(NotFound):
-    message = _("Object %(id)s not found.")
-
-
-class NetworkNotFound(NotFound):
-    message = _("Network %(net_id)s could not be found.")
-
-
-class SubnetNotFound(NotFound):
-    message = _("Subnet %(subnet_id)s could not be found.")
-
-
-class SubnetPoolNotFound(NotFound):
-    message = _("Subnet pool %(subnetpool_id)s could not be found.")
-
-
-class PortNotFound(NotFound):
-    message = _("Port %(port_id)s could not be found.")
-
-
-class QosPolicyNotFound(NotFound):
-    message = _("QoS policy %(policy_id)s could not be found.")
-
-
-class QosRuleNotFound(NotFound):
-    message = _("QoS rule %(rule_id)s for policy %(policy_id)s "
-                "could not be found.")
-
-
-class PortNotFoundOnNetwork(NotFound):
-    message = _("Port %(port_id)s could not be found "
-                "on network %(net_id)s.")
-
-
-class PortQosBindingNotFound(NotFound):
-    message = _("QoS binding for port %(port_id)s and policy %(policy_id)s "
-                "could not be found.")
-
-
-class NetworkQosBindingNotFound(NotFound):
-    message = _("QoS binding for network %(net_id)s and policy %(policy_id)s "
-                "could not be found.")
-
-
-class PolicyFileNotFound(NotFound):
-    message = _("Policy configuration policy.json could not be found.")
-
-
-class PolicyInitError(NeutronException):
-    message = _("Failed to init policy %(policy)s because %(reason)s.")
-
-
-class PolicyCheckError(NeutronException):
-    message = _("Failed to check policy %(policy)s because %(reason)s.")
-
-
-class StateInvalid(BadRequest):
-    message = _("Unsupported port state: %(port_state)s.")
-
-
-class InUse(NeutronException):
-    message = _("The resource is in use.")
-
-
-class QosPolicyInUse(InUse):
-    message = _("QoS Policy %(policy_id)s is used by "
-                "%(object_type)s %(object_id)s.")
-
-
-class NetworkInUse(InUse):
-    message = _("Unable to complete operation on network %(net_id)s. "
-                "There are one or more ports still in use on the network.")
-
-
-class SubnetInUse(InUse):
-    message = _("Unable to complete operation on subnet %(subnet_id)s "
-                "%(reason)s.")
-
-    def __init__(self, **kwargs):
-        if 'reason' not in kwargs:
-            kwargs['reason'] = _("One or more ports have an IP allocation "
-                                 "from this subnet.")
-        super(SubnetInUse, self).__init__(**kwargs)
-
-
-class SubnetPoolInUse(InUse):
-    message = _("Unable to complete operation on subnet pool "
-                "%(subnet_pool_id)s. %(reason)s.")
-
-    def __init__(self, **kwargs):
-        if 'reason' not in kwargs:
-            kwargs['reason'] = _("Two or more concurrent subnets allocated.")
-        super(SubnetPoolInUse, self).__init__(**kwargs)
-
-
-class PortInUse(InUse):
-    message = _("Unable to complete operation on port %(port_id)s "
-                "for network %(net_id)s. Port already has an attached "
-                "device %(device_id)s.")
-
-
-class ServicePortInUse(InUse):
-    message = _("Port %(port_id)s cannot be deleted directly via the "
-                "port API: %(reason)s.")
-
-
-class DhcpPortInUse(InUse):
-    message = _("Port %(port_id)s is already acquired by another DHCP agent")
-
-
-class PortBound(InUse):
-    message = _("Unable to complete operation on port %(port_id)s, "
-                "port is already bound, port type: %(vif_type)s, "
-                "old_mac %(old_mac)s, new_mac %(new_mac)s.")
-
-
-class MacAddressInUse(InUse):
-    message = _("Unable to complete operation for network %(net_id)s. "
-                "The mac address %(mac)s is in use.")
-
-
-class HostRoutesExhausted(BadRequest):
-    # NOTE(xchenum): probably make sense to use quota exceeded exception?
-    message = _("Unable to complete operation for %(subnet_id)s. "
-                "The number of host routes exceeds the limit %(quota)s.")
-
-
-class DNSNameServersExhausted(BadRequest):
-    # NOTE(xchenum): probably make sense to use quota exceeded exception?
-    message = _("Unable to complete operation for %(subnet_id)s. "
-                "The number of DNS nameservers exceeds the limit %(quota)s.")
-
-
-class InvalidIpForNetwork(BadRequest):
-    message = _("IP address %(ip_address)s is not a valid IP "
-                "for any of the subnets on the specified network.")
-
-
-class InvalidIpForSubnet(BadRequest):
-    message = _("IP address %(ip_address)s is not a valid IP "
-                "for the specified subnet.")
-
-
-class IpAddressInUse(InUse):
-    message = _("Unable to complete operation for network %(net_id)s. "
-                "The IP address %(ip_address)s is in use.")
-
-
-class VlanIdInUse(InUse):
-    message = _("Unable to create the network. "
-                "The VLAN %(vlan_id)s on physical network "
-                "%(physical_network)s is in use.")
-
-
-class FlatNetworkInUse(InUse):
-    message = _("Unable to create the flat network. "
-                "Physical network %(physical_network)s is in use.")
-
-
-class TunnelIdInUse(InUse):
-    message = _("Unable to create the network. "
-                "The tunnel ID %(tunnel_id)s is in use.")
-
-
-class TenantNetworksDisabled(ServiceUnavailable):
-    message = _("Tenant network creation is not enabled.")
-
-
-class ResourceExhausted(ServiceUnavailable):
-    pass
-
-
-class NoNetworkAvailable(ResourceExhausted):
-    message = _("Unable to create the network. "
-                "No tenant network is available for allocation.")
-
-
-class NoNetworkFoundInMaximumAllowedAttempts(ServiceUnavailable):
-    message = _("Unable to create the network. "
-                "No available network found in maximum allowed attempts.")
-
-
-class SubnetMismatchForPort(BadRequest):
-    message = _("Subnet on port %(port_id)s does not match "
-                "the requested subnet %(subnet_id)s.")
-
-
-class MalformedRequestBody(BadRequest):
-    message = _("Malformed request body: %(reason)s.")
-
-
-class Invalid(NeutronException):
-    def __init__(self, message=None):
-        self.message = message
-        super(Invalid, self).__init__()
-
-
-class InvalidInput(BadRequest):
-    message = _("Invalid input for operation: %(error_message)s.")
-
-
-class InvalidAllocationPool(BadRequest):
-    message = _("The allocation pool %(pool)s is not valid.")
-
-
-class UnsupportedPortDeviceOwner(Conflict):
-    message = _("Operation %(op)s is not supported for device_owner "
-                "%(device_owner)s on port %(port_id)s.")
-
-
-class OverlappingAllocationPools(Conflict):
-    message = _("Found overlapping allocation pools: "
-                "%(pool_1)s %(pool_2)s for subnet %(subnet_cidr)s.")
-
-
-class OutOfBoundsAllocationPool(BadRequest):
-    message = _("The allocation pool %(pool)s spans "
-                "beyond the subnet cidr %(subnet_cidr)s.")
-
-
-class MacAddressGenerationFailure(ServiceUnavailable):
-    message = _("Unable to generate unique mac on network %(net_id)s.")
-
-
-class IpAddressGenerationFailure(Conflict):
-    message = _("No more IP addresses available on network %(net_id)s.")
-
-
-class BridgeDoesNotExist(NeutronException):
-    message = _("Bridge %(bridge)s does not exist.")
-
-
-class PreexistingDeviceFailure(NeutronException):
-    message = _("Creation failed. %(dev_name)s already exists.")
-
-
-class QuotaResourceUnknown(NotFound):
-    message = _("Unknown quota resources %(unknown)s.")
-
-
-class OverQuota(Conflict):
-    message = _("Quota exceeded for resources: %(overs)s.")
-
-
-class QuotaMissingTenant(BadRequest):
-    message = _("Tenant-id was missing from quota request.")
-
-
-class InvalidQuotaValue(Conflict):
-    message = _("Change would make usage less than 0 for the following "
-                "resources: %(unders)s.")
-
-
-class InvalidSharedSetting(Conflict):
-    message = _("Unable to reconfigure sharing settings for network "
-                "%(network)s. Multiple tenants are using it.")
-
-
-class InvalidExtensionEnv(BadRequest):
-    message = _("Invalid extension environment: %(reason)s.")
-
-
-class ExtensionsNotFound(NotFound):
-    message = _("Extensions not found: %(extensions)s.")
-
-
-class InvalidContentType(NeutronException):
-    message = _("Invalid content type %(content_type)s.")
-
-
-class ExternalIpAddressExhausted(BadRequest):
-    message = _("Unable to find any IP address on external "
-                "network %(net_id)s.")
-
-
-class TooManyExternalNetworks(NeutronException):
-    message = _("More than one external network exists.")
-
-
-class InvalidConfigurationOption(NeutronException):
-    message = _("An invalid value was provided for %(opt_name)s: "
-                "%(opt_value)s.")
-
-
-class GatewayConflictWithAllocationPools(InUse):
-    message = _("Gateway ip %(ip_address)s conflicts with "
-                "allocation pool %(pool)s.")
-
-
-class GatewayIpInUse(InUse):
-    message = _("Current gateway ip %(ip_address)s already in use "
-                "by port %(port_id)s. Unable to update.")
-
-
-class NetworkVlanRangeError(NeutronException):
-    message = _("Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'.")
-
-    def __init__(self, **kwargs):
-        # Convert vlan_range tuple to 'start:end' format for display
-        if isinstance(kwargs['vlan_range'], tuple):
-            kwargs['vlan_range'] = "%d:%d" % kwargs['vlan_range']
-        super(NetworkVlanRangeError, self).__init__(**kwargs)
-
-
-class PhysicalNetworkNameError(NeutronException):
-    message = _("Empty physical network name.")
-
-
-class NetworkTunnelRangeError(NeutronException):
-    message = _("Invalid network tunnel range: "
-                "'%(tunnel_range)s' - %(error)s.")
-
-    def __init__(self, **kwargs):
-        # Convert tunnel_range tuple to 'start:end' format for display
-        if isinstance(kwargs['tunnel_range'], tuple):
-            kwargs['tunnel_range'] = "%d:%d" % kwargs['tunnel_range']
-        super(NetworkTunnelRangeError, self).__init__(**kwargs)
-
-
-class NetworkVxlanPortRangeError(NeutronException):
-    message = _("Invalid network VXLAN port range: '%(vxlan_range)s'.")
-
-
-class VxlanNetworkUnsupported(NeutronException):
-    message = _("VXLAN network unsupported.")
-
-
-class DuplicatedExtension(NeutronException):
-    message = _("Found duplicate extension: %(alias)s.")
-
-
-class DeviceIDNotOwnedByTenant(Conflict):
-    message = _("The following device_id %(device_id)s is not owned by your "
-                "tenant or matches another tenants router.")
-
-
-class InvalidCIDR(BadRequest):
-    message = _("Invalid CIDR %(input)s given as IP prefix.")
-
-
-class RouterNotCompatibleWithAgent(NeutronException):
-    message = _("Router '%(router_id)s' is not compatible with this agent.")
-
-
-class DvrHaRouterNotSupported(NeutronException):
-    message = _("Router '%(router_id)s' cannot be both DVR and HA.")
-
-
-class FailToDropPrivilegesExit(SystemExit):
-    """Exit exception raised when a drop privileges action fails."""
-    code = 99
-
-
-class FloatingIpSetupException(NeutronException):
-    def __init__(self, message=None):
-        self.message = message
-        super(FloatingIpSetupException, self).__init__()
-
-
-class IpTablesApplyException(NeutronException):
-    def __init__(self, message=None):
-        self.message = message
-        super(IpTablesApplyException, self).__init__()
-
-
-class NetworkIdOrRouterIdRequiredError(NeutronException):
-    message = _('Both network_id and router_id are None. '
-                'One must be provided.')
-
-
-class AbortSyncRouters(NeutronException):
-    message = _("Aborting periodic_sync_routers_task due to an error.")
-
-
-# Shared *aas exceptions, pending them being refactored out of Neutron
-# proper.
-
-class FirewallInternalDriverError(NeutronException):
-    """Fwaas exception for all driver errors.
-
-    On any failure or exception in the driver, driver should log it and
-    raise this exception to the agent
-    """
-    message = _("%(driver)s: Internal driver error.")
-
-
-class MissingMinSubnetPoolPrefix(BadRequest):
-    message = _("Unspecified minimum subnet pool prefix.")
-
-
-class EmptySubnetPoolPrefixList(BadRequest):
-    message = _("Empty subnet pool prefix list.")
-
-
-class PrefixVersionMismatch(BadRequest):
-    message = _("Cannot mix IPv4 and IPv6 prefixes in a subnet pool.")
-
-
-class UnsupportedMinSubnetPoolPrefix(BadRequest):
-    message = _("Prefix '%(prefix)s' not supported in IPv%(version)s pool.")
-
-
-class IllegalSubnetPoolPrefixBounds(BadRequest):
-    message = _("Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, "
-                "%(base_prefix_type)s=%(base_prefixlen)s.")
-
-
-class IllegalSubnetPoolPrefixUpdate(BadRequest):
-    message = _("Illegal update to prefixes: %(msg)s.")
-
-
-class SubnetAllocationError(NeutronException):
-    message = _("Failed to allocate subnet: %(reason)s.")
-
-
-class AddressScopePrefixConflict(Conflict):
-    message = _("Failed to associate address scope: subnetpools "
-                "within an address scope must have unique prefixes.")
-
-
-class IllegalSubnetPoolAssociationToAddressScope(BadRequest):
-    message = _("Illegal subnetpool association: subnetpool %(subnetpool_id)s "
-                "cannot be associated with address scope "
-                "%(address_scope_id)s.")
-
-
-class IllegalSubnetPoolIpVersionAssociationToAddressScope(BadRequest):
-    message = _("Illegal subnetpool association: subnetpool %(subnetpool_id)s "
-                "cannot associate with address scope %(address_scope_id)s "
-                "because subnetpool ip_version is not %(ip_version)s.")
-
-
-class IllegalSubnetPoolUpdate(BadRequest):
-    message = _("Illegal subnetpool update : %(reason)s.")
-
-
-class MinPrefixSubnetAllocationError(BadRequest):
-    message = _("Unable to allocate subnet with prefix length %(prefixlen)s, "
-                "minimum allowed prefix is %(min_prefixlen)s.")
-
-
-class MaxPrefixSubnetAllocationError(BadRequest):
-    message = _("Unable to allocate subnet with prefix length %(prefixlen)s, "
-                "maximum allowed prefix is %(max_prefixlen)s.")
-
-
-class SubnetPoolDeleteError(BadRequest):
-    message = _("Unable to delete subnet pool: %(reason)s.")
-
-
-class SubnetPoolQuotaExceeded(OverQuota):
-    message = _("Per-tenant subnet pool prefix quota exceeded.")
-
-
-class DeviceNotFoundError(NeutronException):
-    message = _("Device '%(device_name)s' does not exist.")
-
-
-class NetworkSubnetPoolAffinityError(BadRequest):
-    message = _("Subnets hosted on the same network must be allocated from "
-                "the same subnet pool.")
-
-
-class ObjectActionError(NeutronException):
-    message = _('Object action %(action)s failed because: %(reason)s.')
-
-
-class CTZoneExhaustedError(NeutronException):
-    message = _("IPtables conntrack zones exhausted, iptables rules cannot "
-                "be applied.")
diff --git a/neutron/common/ipv6_utils.py b/neutron/common/ipv6_utils.py
deleted file mode 100644 (file)
index a0c8f0c..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-IPv6-related utilities and helper functions.
-"""
-import os
-
-import netaddr
-from oslo_log import log
-
-from neutron._i18n import _, _LI
-from neutron.common import constants
-
-
-LOG = log.getLogger(__name__)
-_IS_IPV6_ENABLED = None
-
-
-def get_ipv6_addr_by_EUI64(prefix, mac):
-    # Check if the prefix is IPv4 address
-    isIPv4 = netaddr.valid_ipv4(prefix)
-    if isIPv4:
-        msg = _("Unable to generate IP address by EUI64 for IPv4 prefix")
-        raise TypeError(msg)
-    try:
-        eui64 = int(netaddr.EUI(mac).eui64())
-        prefix = netaddr.IPNetwork(prefix)
-        return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57))
-    except (ValueError, netaddr.AddrFormatError):
-        raise TypeError(_('Bad prefix or mac format for generating IPv6 '
-                          'address by EUI-64: %(prefix)s, %(mac)s:')
-                        % {'prefix': prefix, 'mac': mac})
-    except TypeError:
-        raise TypeError(_('Bad prefix type for generate IPv6 address by '
-                          'EUI-64: %s') % prefix)
-
-
-def is_enabled():
-    global _IS_IPV6_ENABLED
-
-    if _IS_IPV6_ENABLED is None:
-        disabled_ipv6_path = "/proc/sys/net/ipv6/conf/default/disable_ipv6"
-        if os.path.exists(disabled_ipv6_path):
-            with open(disabled_ipv6_path, 'r') as f:
-                disabled = f.read().strip()
-            _IS_IPV6_ENABLED = disabled == "0"
-        else:
-            _IS_IPV6_ENABLED = False
-        if not _IS_IPV6_ENABLED:
-            LOG.info(_LI("IPv6 is not enabled on this system."))
-    return _IS_IPV6_ENABLED
-
-
-def is_auto_address_subnet(subnet):
-    """Check if subnet is an auto address subnet."""
-    modes = [constants.IPV6_SLAAC, constants.DHCPV6_STATELESS]
-    return (subnet['ipv6_address_mode'] in modes
-            or subnet['ipv6_ra_mode'] in modes)
-
-
-def is_eui64_address(ip_address):
-    """Check if ip address is EUI64."""
-    ip = netaddr.IPAddress(ip_address)
-    # '0xfffe' addition is used to build EUI-64 from MAC (RFC4291)
-    # Look for it in the middle of the EUI-64 part of address
-    return ip.version == 6 and not ((ip & 0xffff000000) ^ 0xfffe000000)
-
-
-def is_ipv6_pd_enabled(subnet):
-    """Returns True if the subnetpool_id of the given subnet is equal to
-       constants.IPV6_PD_POOL_ID
-    """
-    return subnet.get('subnetpool_id') == constants.IPV6_PD_POOL_ID
diff --git a/neutron/common/rpc.py b/neutron/common/rpc.py
deleted file mode 100644 (file)
index beef015..0000000
+++ /dev/null
@@ -1,233 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# Copyright (c) 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from debtcollector import removals
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_messaging import serializer as om_serializer
-from oslo_service import service
-
-from neutron.common import exceptions
-from neutron import context
-
-
-LOG = logging.getLogger(__name__)
-
-
-TRANSPORT = None
-NOTIFIER = None
-
-ALLOWED_EXMODS = [
-    exceptions.__name__,
-]
-EXTRA_EXMODS = []
-
-
-TRANSPORT_ALIASES = {
-    'neutron.openstack.common.rpc.impl_fake': 'fake',
-    'neutron.openstack.common.rpc.impl_qpid': 'qpid',
-    'neutron.openstack.common.rpc.impl_kombu': 'rabbit',
-    'neutron.openstack.common.rpc.impl_zmq': 'zmq',
-    'neutron.rpc.impl_fake': 'fake',
-    'neutron.rpc.impl_qpid': 'qpid',
-    'neutron.rpc.impl_kombu': 'rabbit',
-    'neutron.rpc.impl_zmq': 'zmq',
-}
-
-# NOTE(salv-orlando): I am afraid this is a global variable. While not ideal,
-# they're however widely used throughout the code base. It should be set to
-# true if the RPC server is not running in the current process space. This
-# will prevent get_connection from creating connections to the AMQP server
-RPC_DISABLED = False
-
-
-def init(conf):
-    global TRANSPORT, NOTIFIER
-    exmods = get_allowed_exmods()
-    TRANSPORT = oslo_messaging.get_transport(conf,
-                                             allowed_remote_exmods=exmods,
-                                             aliases=TRANSPORT_ALIASES)
-    serializer = RequestContextSerializer()
-    NOTIFIER = oslo_messaging.Notifier(TRANSPORT, serializer=serializer)
-
-
-def cleanup():
-    global TRANSPORT, NOTIFIER
-    assert TRANSPORT is not None
-    assert NOTIFIER is not None
-    TRANSPORT.cleanup()
-    TRANSPORT = NOTIFIER = None
-
-
-def add_extra_exmods(*args):
-    EXTRA_EXMODS.extend(args)
-
-
-def clear_extra_exmods():
-    del EXTRA_EXMODS[:]
-
-
-def get_allowed_exmods():
-    return ALLOWED_EXMODS + EXTRA_EXMODS
-
-
-def get_client(target, version_cap=None, serializer=None):
-    assert TRANSPORT is not None
-    serializer = RequestContextSerializer(serializer)
-    return oslo_messaging.RPCClient(TRANSPORT,
-                                    target,
-                                    version_cap=version_cap,
-                                    serializer=serializer)
-
-
-def get_server(target, endpoints, serializer=None):
-    assert TRANSPORT is not None
-    serializer = RequestContextSerializer(serializer)
-    return oslo_messaging.get_rpc_server(TRANSPORT, target, endpoints,
-                                         'eventlet', serializer)
-
-
-def get_notifier(service=None, host=None, publisher_id=None):
-    assert NOTIFIER is not None
-    if not publisher_id:
-        publisher_id = "%s.%s" % (service, host or cfg.CONF.host)
-    return NOTIFIER.prepare(publisher_id=publisher_id)
-
-
-class RequestContextSerializer(om_serializer.Serializer):
-    """This serializer is used to convert RPC common context into
-    Neutron Context.
-    """
-    def __init__(self, base=None):
-        super(RequestContextSerializer, self).__init__()
-        self._base = base
-
-    def serialize_entity(self, ctxt, entity):
-        if not self._base:
-            return entity
-        return self._base.serialize_entity(ctxt, entity)
-
-    def deserialize_entity(self, ctxt, entity):
-        if not self._base:
-            return entity
-        return self._base.deserialize_entity(ctxt, entity)
-
-    def serialize_context(self, ctxt):
-        return ctxt.to_dict()
-
-    def deserialize_context(self, ctxt):
-        rpc_ctxt_dict = ctxt.copy()
-        user_id = rpc_ctxt_dict.pop('user_id', None)
-        if not user_id:
-            user_id = rpc_ctxt_dict.pop('user', None)
-        tenant_id = rpc_ctxt_dict.pop('tenant_id', None)
-        if not tenant_id:
-            tenant_id = rpc_ctxt_dict.pop('project_id', None)
-        return context.Context(user_id, tenant_id, **rpc_ctxt_dict)
-
-
-class Service(service.Service):
-    """Service object for binaries running on hosts.
-
-    A service enables rpc by listening to queues based on topic and host.
-    """
-    def __init__(self, host, topic, manager=None, serializer=None):
-        super(Service, self).__init__()
-        self.host = host
-        self.topic = topic
-        self.serializer = serializer
-        if manager is None:
-            self.manager = self
-        else:
-            self.manager = manager
-
-    def start(self):
-        super(Service, self).start()
-
-        self.conn = create_connection()
-        LOG.debug("Creating Consumer connection for Service %s",
-                  self.topic)
-
-        endpoints = [self.manager]
-
-        self.conn.create_consumer(self.topic, endpoints)
-
-        # Hook to allow the manager to do other initializations after
-        # the rpc connection is created.
-        if callable(getattr(self.manager, 'initialize_service_hook', None)):
-            self.manager.initialize_service_hook(self)
-
-        # Consume from all consumers in threads
-        self.conn.consume_in_threads()
-
-    def stop(self):
-        # Try to shut the connection down, but if we get any sort of
-        # errors, go ahead and ignore them.. as we're shutting down anyway
-        try:
-            self.conn.close()
-        except Exception:
-            pass
-        super(Service, self).stop()
-
-
-class Connection(object):
-
-    def __init__(self):
-        super(Connection, self).__init__()
-        self.servers = []
-
-    def create_consumer(self, topic, endpoints, fanout=False):
-        target = oslo_messaging.Target(
-            topic=topic, server=cfg.CONF.host, fanout=fanout)
-        server = get_server(target, endpoints)
-        self.servers.append(server)
-
-    def consume_in_threads(self):
-        for server in self.servers:
-            server.start()
-        return self.servers
-
-    def close(self):
-        for server in self.servers:
-            server.stop()
-        for server in self.servers:
-            server.wait()
-
-
-class VoidConnection(object):
-
-    def create_consumer(self, topic, endpoints, fanout=False):
-        pass
-
-    def consume_in_threads(self):
-        pass
-
-    def close(self):
-        pass
-
-
-# functions
-@removals.removed_kwarg('new')
-def create_connection(new=True):
-    # NOTE(salv-orlando): This is a clever interpretation of the factory design
-    # patter aimed at preventing plugins from initializing RPC servers upon
-    # initialization when they are running in the REST over HTTP API server.
-    # The educated reader will perfectly be able that this a fairly dirty hack
-    # to avoid having to change the initialization process of every plugin.
-    if RPC_DISABLED:
-        return VoidConnection()
-    return Connection()
diff --git a/neutron/common/test_lib.py b/neutron/common/test_lib.py
deleted file mode 100644 (file)
index 994de30..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) 2010 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-# Colorizer Code is borrowed from Twisted:
-# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
-#
-#    Permission is hereby granted, free of charge, to any person obtaining
-#    a copy of this software and associated documentation files (the
-#    "Software"), to deal in the Software without restriction, including
-#    without limitation the rights to use, copy, modify, merge, publish,
-#    distribute, sublicense, and/or sell copies of the Software, and to
-#    permit persons to whom the Software is furnished to do so, subject to
-#    the following conditions:
-#
-#    The above copyright notice and this permission notice shall be
-#    included in all copies or substantial portions of the Software.
-#
-#    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-#    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-#    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-#    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-#    LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-#    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-#    WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-# describes parameters used by different unit/functional tests
-# a plugin-specific testing mechanism should import this dictionary
-# and override the values in it if needed (e.g., run_tests.py in
-# neutron/plugins/openvswitch/ )
-test_config = {}
diff --git a/neutron/common/topics.py b/neutron/common/topics.py
deleted file mode 100644 (file)
index 15b8d7c..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-NETWORK = 'network'
-SUBNET = 'subnet'
-PORT = 'port'
-SECURITY_GROUP = 'security_group'
-L2POPULATION = 'l2population'
-DVR = 'dvr'
-RESOURCES = 'resources'
-
-CREATE = 'create'
-DELETE = 'delete'
-UPDATE = 'update'
-
-AGENT = 'q-agent-notifier'
-PLUGIN = 'q-plugin'
-L3PLUGIN = 'q-l3-plugin'
-REPORTS = 'q-reports-plugin'
-DHCP = 'q-dhcp-notifer'
-METERING_PLUGIN = 'q-metering-plugin'
-
-L3_AGENT = 'l3_agent'
-DHCP_AGENT = 'dhcp_agent'
-METERING_AGENT = 'metering_agent'
-
-RESOURCE_TOPIC_PATTERN = "neutron-vo-%(resource_type)s-%(version)s"
-
-
-def get_topic_name(prefix, table, operation, host=None):
-    """Create a topic name.
-
-    The topic name needs to be synced between the agent and the
-    plugin. The plugin will send a fanout message to all of the
-    listening agents so that the agents in turn can perform their
-    updates accordingly.
-
-    :param prefix: Common prefix for the plugin/agent message queues.
-    :param table: The table in question (NETWORK, SUBNET, PORT).
-    :param operation: The operation that invokes notification (CREATE,
-                      DELETE, UPDATE)
-    :param host: Add host to the topic
-    :returns: The topic name.
-    """
-    if host:
-        return '%s-%s-%s.%s' % (prefix, table, operation, host)
-    return '%s-%s-%s' % (prefix, table, operation)
diff --git a/neutron/common/utils.py b/neutron/common/utils.py
deleted file mode 100644 (file)
index 98e3bed..0000000
+++ /dev/null
@@ -1,546 +0,0 @@
-# Copyright 2011, VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-# Borrowed from nova code base, more utilities will be added/borrowed as and
-# when needed.
-
-"""Utilities and helper functions."""
-
-import collections
-import datetime
-import decimal
-import errno
-import functools
-import hashlib
-import multiprocessing
-import netaddr
-import os
-import random
-import signal
-import socket
-import sys
-import tempfile
-import uuid
-
-import debtcollector
-from eventlet.green import subprocess
-from oslo_concurrency import lockutils
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import excutils
-from oslo_utils import importutils
-from oslo_utils import reflection
-import six
-from stevedore import driver
-
-from neutron._i18n import _, _LE
-from neutron.common import constants as n_const
-
-TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
-LOG = logging.getLogger(__name__)
-SYNCHRONIZED_PREFIX = 'neutron-'
-
-synchronized = lockutils.synchronized_with_prefix(SYNCHRONIZED_PREFIX)
-
-
-class cache_method_results(object):
-    """This decorator is intended for object methods only."""
-
-    def __init__(self, func):
-        self.func = func
-        functools.update_wrapper(self, func)
-        self._first_call = True
-        self._not_cached = object()
-
-    def _get_from_cache(self, target_self, *args, **kwargs):
-        target_self_cls_name = reflection.get_class_name(target_self,
-                                                         fully_qualified=False)
-        func_name = "%(module)s.%(class)s.%(func_name)s" % {
-            'module': target_self.__module__,
-            'class': target_self_cls_name,
-            'func_name': self.func.__name__,
-        }
-        key = (func_name,) + args
-        if kwargs:
-            key += dict2tuple(kwargs)
-        try:
-            item = target_self._cache.get(key, self._not_cached)
-        except TypeError:
-            LOG.debug("Method %(func_name)s cannot be cached due to "
-                      "unhashable parameters: args: %(args)s, kwargs: "
-                      "%(kwargs)s",
-                      {'func_name': func_name,
-                       'args': args,
-                       'kwargs': kwargs})
-            return self.func(target_self, *args, **kwargs)
-
-        if item is self._not_cached:
-            item = self.func(target_self, *args, **kwargs)
-            target_self._cache.set(key, item, None)
-
-        return item
-
-    def __call__(self, target_self, *args, **kwargs):
-        target_self_cls_name = reflection.get_class_name(target_self,
-                                                         fully_qualified=False)
-        if not hasattr(target_self, '_cache'):
-            raise NotImplementedError(
-                _("Instance of class %(module)s.%(class)s must contain _cache "
-                  "attribute") % {
-                    'module': target_self.__module__,
-                    'class': target_self_cls_name})
-        if not target_self._cache:
-            if self._first_call:
-                LOG.debug("Instance of class %(module)s.%(class)s doesn't "
-                          "contain attribute _cache therefore results "
-                          "cannot be cached for %(func_name)s.",
-                          {'module': target_self.__module__,
-                           'class': target_self_cls_name,
-                           'func_name': self.func.__name__})
-                self._first_call = False
-            return self.func(target_self, *args, **kwargs)
-        return self._get_from_cache(target_self, *args, **kwargs)
-
-    def __get__(self, obj, objtype):
-        return functools.partial(self.__call__, obj)
-
-
-@debtcollector.removals.remove(message="This will removed in the N cycle.")
-def read_cached_file(filename, cache_info, reload_func=None):
-    """Read from a file if it has been modified.
-
-    :param cache_info: dictionary to hold opaque cache.
-    :param reload_func: optional function to be called with data when
-                        file is reloaded due to a modification.
-
-    :returns: data from file
-
-    """
-    mtime = os.path.getmtime(filename)
-    if not cache_info or mtime != cache_info.get('mtime'):
-        LOG.debug("Reloading cached file %s", filename)
-        with open(filename) as fap:
-            cache_info['data'] = fap.read()
-        cache_info['mtime'] = mtime
-        if reload_func:
-            reload_func(cache_info['data'])
-    return cache_info['data']
-
-
-@debtcollector.removals.remove(message="This will removed in the N cycle.")
-def find_config_file(options, config_file):
-    """Return the first config file found.
-
-    We search for the paste config file in the following order:
-    * If --config-file option is used, use that
-    * Search for the configuration files via common cfg directories
-    :retval Full path to config file, or None if no config file found
-    """
-    fix_path = lambda p: os.path.abspath(os.path.expanduser(p))
-    if options.get('config_file'):
-        if os.path.exists(options['config_file']):
-            return fix_path(options['config_file'])
-
-    dir_to_common = os.path.dirname(os.path.abspath(__file__))
-    root = os.path.join(dir_to_common, '..', '..', '..', '..')
-    # Handle standard directory search for the config file
-    config_file_dirs = [fix_path(os.path.join(os.getcwd(), 'etc')),
-                        fix_path(os.path.join('~', '.neutron-venv', 'etc',
-                                              'neutron')),
-                        fix_path('~'),
-                        os.path.join(cfg.CONF.state_path, 'etc'),
-                        os.path.join(cfg.CONF.state_path, 'etc', 'neutron'),
-                        fix_path(os.path.join('~', '.local',
-                                              'etc', 'neutron')),
-                        '/usr/etc/neutron',
-                        '/usr/local/etc/neutron',
-                        '/etc/neutron/',
-                        '/etc']
-
-    if 'plugin' in options:
-        config_file_dirs = [
-            os.path.join(x, 'neutron', 'plugins', options['plugin'])
-            for x in config_file_dirs
-        ]
-
-    if os.path.exists(os.path.join(root, 'plugins')):
-        plugins = [fix_path(os.path.join(root, 'plugins', p, 'etc'))
-                   for p in os.listdir(os.path.join(root, 'plugins'))]
-        plugins = [p for p in plugins if os.path.isdir(p)]
-        config_file_dirs.extend(plugins)
-
-    for cfg_dir in config_file_dirs:
-        cfg_file = os.path.join(cfg_dir, config_file)
-        if os.path.exists(cfg_file):
-            return cfg_file
-
-
-def ensure_dir(dir_path):
-    """Ensure a directory with 755 permissions mode."""
-    try:
-        os.makedirs(dir_path, 0o755)
-    except OSError as e:
-        # If the directory already existed, don't raise the error.
-        if e.errno != errno.EEXIST:
-            raise
-
-
-def _subprocess_setup():
-    # Python installs a SIGPIPE handler by default. This is usually not what
-    # non-Python subprocesses expect.
-    signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-
-
-def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
-                     env=None, preexec_fn=_subprocess_setup, close_fds=True):
-
-    return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
-                            stderr=stderr, preexec_fn=preexec_fn,
-                            close_fds=close_fds, env=env)
-
-
-def parse_mappings(mapping_list, unique_values=True):
-    """Parse a list of mapping strings into a dictionary.
-
-    :param mapping_list: a list of strings of the form '<key>:<value>'
-    :param unique_values: values must be unique if True
-    :returns: a dict mapping keys to values
-    """
-    mappings = {}
-    for mapping in mapping_list:
-        mapping = mapping.strip()
-        if not mapping:
-            continue
-        split_result = mapping.split(':')
-        if len(split_result) != 2:
-            raise ValueError(_("Invalid mapping: '%s'") % mapping)
-        key = split_result[0].strip()
-        if not key:
-            raise ValueError(_("Missing key in mapping: '%s'") % mapping)
-        value = split_result[1].strip()
-        if not value:
-            raise ValueError(_("Missing value in mapping: '%s'") % mapping)
-        if key in mappings:
-            raise ValueError(_("Key %(key)s in mapping: '%(mapping)s' not "
-                               "unique") % {'key': key, 'mapping': mapping})
-        if unique_values and value in mappings.values():
-            raise ValueError(_("Value %(value)s in mapping: '%(mapping)s' "
-                               "not unique") % {'value': value,
-                                                'mapping': mapping})
-        mappings[key] = value
-    return mappings
-
-
-def get_hostname():
-    return socket.gethostname()
-
-
-def get_first_host_ip(net, ip_version):
-    return str(netaddr.IPAddress(net.first + 1, ip_version))
-
-
-def compare_elements(a, b):
-    """Compare elements if a and b have same elements.
-
-    This method doesn't consider ordering
-    """
-    if a is None:
-        a = []
-    if b is None:
-        b = []
-    return set(a) == set(b)
-
-
-def safe_sort_key(value):
-    """Return value hash or build one for dictionaries."""
-    if isinstance(value, collections.Mapping):
-        return sorted(value.items())
-    return value
-
-
-def dict2str(dic):
-    return ','.join("%s=%s" % (key, val)
-                    for key, val in sorted(six.iteritems(dic)))
-
-
-def str2dict(string):
-    res_dict = {}
-    for keyvalue in string.split(','):
-        (key, value) = keyvalue.split('=', 1)
-        res_dict[key] = value
-    return res_dict
-
-
-def dict2tuple(d):
-    items = list(d.items())
-    items.sort()
-    return tuple(items)
-
-
-def diff_list_of_dict(old_list, new_list):
-    new_set = set([dict2str(l) for l in new_list])
-    old_set = set([dict2str(l) for l in old_list])
-    added = new_set - old_set
-    removed = old_set - new_set
-    return [str2dict(a) for a in added], [str2dict(r) for r in removed]
-
-
-def is_extension_supported(plugin, ext_alias):
-    return ext_alias in getattr(
-        plugin, "supported_extension_aliases", [])
-
-
-def log_opt_values(log):
-    cfg.CONF.log_opt_values(log, logging.DEBUG)
-
-
-def get_random_mac(base_mac):
-    mac = [int(base_mac[0], 16), int(base_mac[1], 16),
-           int(base_mac[2], 16), random.randint(0x00, 0xff),
-           random.randint(0x00, 0xff), random.randint(0x00, 0xff)]
-    if base_mac[3] != '00':
-        mac[3] = int(base_mac[3], 16)
-    return ':'.join(["%02x" % x for x in mac])
-
-
-def get_random_string(length):
-    """Get a random hex string of the specified length.
-
-    based on Cinder library
-      cinder/transfer/api.py
-    """
-    rndstr = ""
-    random.seed(datetime.datetime.now().microsecond)
-    while len(rndstr) < length:
-        base_str = str(random.random()).encode('utf-8')
-        rndstr += hashlib.sha224(base_str).hexdigest()
-
-    return rndstr[0:length]
-
-
-def get_dhcp_agent_device_id(network_id, host):
-    # Split host so as to always use only the hostname and
-    # not the domain name. This will guarantee consistency
-    # whether a local hostname or an fqdn is passed in.
-    local_hostname = host.split('.')[0]
-    host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(local_hostname))
-    return 'dhcp%s-%s' % (host_uuid, network_id)
-
-
-def cpu_count():
-    try:
-        return multiprocessing.cpu_count()
-    except NotImplementedError:
-        return 1
-
-
-class exception_logger(object):
-    """Wrap a function and log raised exception
-
-    :param logger: the logger to log the exception default is LOG.exception
-
-    :returns: origin value if no exception raised; re-raise the exception if
-              any occurred
-
-    """
-    def __init__(self, logger=None):
-        self.logger = logger
-
-    def __call__(self, func):
-        if self.logger is None:
-            LOG = logging.getLogger(func.__module__)
-            self.logger = LOG.exception
-
-        def call(*args, **kwargs):
-            try:
-                return func(*args, **kwargs)
-            except Exception as e:
-                with excutils.save_and_reraise_exception():
-                    self.logger(e)
-        return call
-
-
-def get_other_dvr_serviced_device_owners():
-    """Return device_owner names for ports that should be serviced by DVR
-
-    This doesn't return DEVICE_OWNER_COMPUTE_PREFIX since it is a
-    prefix, not a complete device_owner name, so should be handled
-    separately (see is_dvr_serviced() below)
-    """
-    return [n_const.DEVICE_OWNER_LOADBALANCER,
-            n_const.DEVICE_OWNER_LOADBALANCERV2,
-            n_const.DEVICE_OWNER_DHCP]
-
-
-def is_dvr_serviced(device_owner):
-    """Check if the port need to be serviced by DVR
-
-    Helper function to check the device owners of the
-    ports in the compute and service node to make sure
-    if they are required for DVR or any service directly or
-    indirectly associated with DVR.
-    """
-    return (device_owner.startswith(n_const.DEVICE_OWNER_COMPUTE_PREFIX) or
-            device_owner in get_other_dvr_serviced_device_owners())
-
-
-@debtcollector.removals.remove(message="This will removed in the N cycle.")
-def get_keystone_url(conf):
-    if conf.auth_uri:
-        auth_uri = conf.auth_uri.rstrip('/')
-    else:
-        auth_uri = ('%(protocol)s://%(host)s:%(port)s' %
-            {'protocol': conf.auth_protocol,
-             'host': conf.auth_host,
-             'port': conf.auth_port})
-    # NOTE(ihrachys): all existing consumers assume version 2.0
-    return '%s/v2.0/' % auth_uri
-
-
-def ip_to_cidr(ip, prefix=None):
-    """Convert an ip with no prefix to cidr notation
-
-    :param ip: An ipv4 or ipv6 address.  Convertable to netaddr.IPNetwork.
-    :param prefix: Optional prefix.  If None, the default 32 will be used for
-        ipv4 and 128 for ipv6.
-    """
-    net = netaddr.IPNetwork(ip)
-    if prefix is not None:
-        # Can't pass ip and prefix separately.  Must concatenate strings.
-        net = netaddr.IPNetwork(str(net.ip) + '/' + str(prefix))
-    return str(net)
-
-
-def fixed_ip_cidrs(fixed_ips):
-    """Create a list of a port's fixed IPs in cidr notation.
-
-    :param fixed_ips: A neutron port's fixed_ips dictionary
-    """
-    return [ip_to_cidr(fixed_ip['ip_address'], fixed_ip.get('prefixlen'))
-            for fixed_ip in fixed_ips]
-
-
-def is_cidr_host(cidr):
-    """Determines if the cidr passed in represents a single host network
-
-    :param cidr: Either an ipv4 or ipv6 cidr.
-    :returns: True if the cidr is /32 for ipv4 or /128 for ipv6.
-    :raises ValueError: raises if cidr does not contain a '/'.  This disallows
-        plain IP addresses specifically to avoid ambiguity.
-    """
-    if '/' not in str(cidr):
-        raise ValueError("cidr doesn't contain a '/'")
-    net = netaddr.IPNetwork(cidr)
-    if net.version == 4:
-        return net.prefixlen == n_const.IPv4_BITS
-    return net.prefixlen == n_const.IPv6_BITS
-
-
-def ip_version_from_int(ip_version_int):
-    if ip_version_int == 4:
-        return n_const.IPv4
-    if ip_version_int == 6:
-        return n_const.IPv6
-    raise ValueError(_('Illegal IP version number'))
-
-
-def is_port_trusted(port):
-    """Used to determine if port can be trusted not to attack network.
-
-    Trust is currently based on the device_owner field starting with 'network:'
-    since we restrict who can use that in the default policy.json file.
-    """
-    return port['device_owner'].startswith(n_const.DEVICE_OWNER_NETWORK_PREFIX)
-
-
-class DelayedStringRenderer(object):
-    """Takes a callable and its args and calls when __str__ is called
-
-    Useful for when an argument to a logging statement is expensive to
-    create. This will prevent the callable from being called if it's
-    never converted to a string.
-    """
-
-    def __init__(self, function, *args, **kwargs):
-        self.function = function
-        self.args = args
-        self.kwargs = kwargs
-
-    def __str__(self):
-        return str(self.function(*self.args, **self.kwargs))
-
-
-def camelize(s):
-    return ''.join(s.replace('_', ' ').title().split())
-
-
-def round_val(val):
-    # we rely on decimal module since it behaves consistently across Python
-    # versions (2.x vs. 3.x)
-    return int(decimal.Decimal(val).quantize(decimal.Decimal('1'),
-                                             rounding=decimal.ROUND_HALF_UP))
-
-
-def replace_file(file_name, data, file_mode=0o644):
-    """Replaces the contents of file_name with data in a safe manner.
-
-    First write to a temp file and then rename. Since POSIX renames are
-    atomic, the file is unlikely to be corrupted by competing writes.
-
-    We create the tempfile on the same device to ensure that it can be renamed.
-    """
-
-    base_dir = os.path.dirname(os.path.abspath(file_name))
-    with tempfile.NamedTemporaryFile('w+',
-                                     dir=base_dir,
-                                     delete=False) as tmp_file:
-        tmp_file.write(data)
-    os.chmod(tmp_file.name, file_mode)
-    os.rename(tmp_file.name, file_name)
-
-
-def load_class_by_alias_or_classname(namespace, name):
-    """Load class using stevedore alias or the class name
-    :param namespace: namespace where the alias is defined
-    :param name: alias or class name of the class to be loaded
-    :returns class if calls can be loaded
-    :raises ImportError if class cannot be loaded
-    """
-
-    if not name:
-        LOG.error(_LE("Alias or class name is not set"))
-        raise ImportError(_("Class not found."))
-    try:
-        # Try to resolve class by alias
-        mgr = driver.DriverManager(namespace, name)
-        class_to_load = mgr.driver
-    except RuntimeError:
-        e1_info = sys.exc_info()
-        # Fallback to class name
-        try:
-            class_to_load = importutils.import_class(name)
-        except (ImportError, ValueError):
-            LOG.error(_LE("Error loading class by alias"),
-                      exc_info=e1_info)
-            LOG.error(_LE("Error loading class by class name"),
-                      exc_info=True)
-            raise ImportError(_("Class not found."))
-    return class_to_load
-
-
-def safe_decode_utf8(s):
-    if six.PY3 and isinstance(s, bytes):
-        return s.decode('utf-8', 'surrogateescape')
-    return s
diff --git a/neutron/context.py b/neutron/context.py
deleted file mode 100644 (file)
index 4a37263..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""Context: context for security/db session."""
-
-import copy
-import datetime
-
-from oslo_context import context as oslo_context
-
-from neutron.db import api as db_api
-from neutron import policy
-
-
-class ContextBase(oslo_context.RequestContext):
-    """Security context and request information.
-
-    Represents the user taking a given action within the system.
-
-    """
-
-    def __init__(self, user_id, tenant_id, is_admin=None, roles=None,
-                 timestamp=None, request_id=None, tenant_name=None,
-                 user_name=None, overwrite=True, auth_token=None,
-                 is_advsvc=None, **kwargs):
-        """Object initialization.
-
-        :param overwrite: Set to False to ensure that the greenthread local
-            copy of the index is not overwritten.
-
-        :param kwargs: Extra arguments that might be present, but we ignore
-            because they possibly came in from older rpc messages.
-        """
-        super(ContextBase, self).__init__(auth_token=auth_token,
-                                          user=user_id, tenant=tenant_id,
-                                          is_admin=is_admin,
-                                          request_id=request_id,
-                                          overwrite=overwrite)
-        self.user_name = user_name
-        self.tenant_name = tenant_name
-
-        if not timestamp:
-            timestamp = datetime.datetime.utcnow()
-        self.timestamp = timestamp
-        self.roles = roles or []
-        self.is_advsvc = is_advsvc
-        if self.is_advsvc is None:
-            self.is_advsvc = self.is_admin or policy.check_is_advsvc(self)
-        if self.is_admin is None:
-            self.is_admin = policy.check_is_admin(self)
-
-    @property
-    def project_id(self):
-        return self.tenant
-
-    @property
-    def tenant_id(self):
-        return self.tenant
-
-    @tenant_id.setter
-    def tenant_id(self, tenant_id):
-        self.tenant = tenant_id
-
-    @property
-    def user_id(self):
-        return self.user
-
-    @user_id.setter
-    def user_id(self, user_id):
-        self.user = user_id
-
-    def to_dict(self):
-        context = super(ContextBase, self).to_dict()
-        context.update({
-            'user_id': self.user_id,
-            'tenant_id': self.tenant_id,
-            'project_id': self.project_id,
-            'roles': self.roles,
-            'timestamp': str(self.timestamp),
-            'tenant_name': self.tenant_name,
-            'project_name': self.tenant_name,
-            'user_name': self.user_name,
-        })
-        return context
-
-    @classmethod
-    def from_dict(cls, values):
-        return cls(**values)
-
-    def elevated(self):
-        """Return a version of this context with admin flag set."""
-        context = copy.copy(self)
-        context.is_admin = True
-
-        if 'admin' not in [x.lower() for x in context.roles]:
-            context.roles = context.roles + ["admin"]
-
-        return context
-
-
-class Context(ContextBase):
-    def __init__(self, *args, **kwargs):
-        super(Context, self).__init__(*args, **kwargs)
-        self._session = None
-
-    @property
-    def session(self):
-        if self._session is None:
-            self._session = db_api.get_session()
-        return self._session
-
-
-def get_admin_context():
-    return Context(user_id=None,
-                   tenant_id=None,
-                   is_admin=True,
-                   overwrite=False)
-
-
-def get_admin_context_without_session():
-    return ContextBase(user_id=None,
-                       tenant_id=None,
-                       is_admin=True)
diff --git a/neutron/core_extensions/__init__.py b/neutron/core_extensions/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/core_extensions/base.py b/neutron/core_extensions/base.py
deleted file mode 100644 (file)
index 67cbf87..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2015 Red Hat Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-import six
-
-
-NETWORK = 'network'
-PORT = 'port'
-
-
-CORE_RESOURCES = [NETWORK, PORT]
-
-
-@six.add_metaclass(abc.ABCMeta)
-class CoreResourceExtension(object):
-
-    @abc.abstractmethod
-    def process_fields(self, context, resource_type,
-                       requested_resource, actual_resource):
-        """Process extension fields.
-
-        :param context: neutron api request context
-        :param resource_type: core resource type (one of CORE_RESOURCES)
-        :param requested_resource: resource dict that contains extension fields
-        :param actual_resource: actual resource dict known to plugin
-        """
-
-    @abc.abstractmethod
-    def extract_fields(self, resource_type, resource):
-        """Extract extension fields.
-
-        :param resource_type: core resource type (one of CORE_RESOURCES)
-        :param resource: resource dict that contains extension fields
-        """
diff --git a/neutron/core_extensions/qos.py b/neutron/core_extensions/qos.py
deleted file mode 100644 (file)
index 72fb898..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright (c) 2015 Red Hat Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import exceptions as n_exc
-from neutron.core_extensions import base
-from neutron.db import api as db_api
-from neutron import manager
-from neutron.objects.qos import policy as policy_object
-from neutron.plugins.common import constants as plugin_constants
-from neutron.services.qos import qos_consts
-
-
-class QosCoreResourceExtension(base.CoreResourceExtension):
-
-    @property
-    def plugin_loaded(self):
-        if not hasattr(self, '_plugin_loaded'):
-            service_plugins = manager.NeutronManager.get_service_plugins()
-            self._plugin_loaded = plugin_constants.QOS in service_plugins
-        return self._plugin_loaded
-
-    def _get_policy_obj(self, context, policy_id):
-        obj = policy_object.QosPolicy.get_by_id(context, policy_id)
-        if obj is None:
-            raise n_exc.QosPolicyNotFound(policy_id=policy_id)
-        return obj
-
-    def _update_port_policy(self, context, port, port_changes):
-        old_policy = policy_object.QosPolicy.get_port_policy(
-            context, port['id'])
-        if old_policy:
-            old_policy.detach_port(port['id'])
-
-        qos_policy_id = port_changes.get(qos_consts.QOS_POLICY_ID)
-        if qos_policy_id is not None:
-            policy = self._get_policy_obj(context, qos_policy_id)
-            policy.attach_port(port['id'])
-        port[qos_consts.QOS_POLICY_ID] = qos_policy_id
-
-    def _update_network_policy(self, context, network, network_changes):
-        old_policy = policy_object.QosPolicy.get_network_policy(
-            context, network['id'])
-        if old_policy:
-            old_policy.detach_network(network['id'])
-
-        qos_policy_id = network_changes.get(qos_consts.QOS_POLICY_ID)
-        if qos_policy_id is not None:
-            policy = self._get_policy_obj(context, qos_policy_id)
-            policy.attach_network(network['id'])
-        network[qos_consts.QOS_POLICY_ID] = qos_policy_id
-
-    def _exec(self, method_name, context, kwargs):
-        with db_api.autonested_transaction(context.session):
-            return getattr(self, method_name)(context=context, **kwargs)
-
-    def process_fields(self, context, resource_type,
-                       requested_resource, actual_resource):
-        if (qos_consts.QOS_POLICY_ID in requested_resource and
-            self.plugin_loaded):
-            self._exec('_update_%s_policy' % resource_type, context,
-                       {resource_type: actual_resource,
-                        "%s_changes" % resource_type: requested_resource})
-
-    def extract_fields(self, resource_type, resource):
-        if not self.plugin_loaded:
-            return {}
-
-        binding = resource['qos_policy_binding']
-        qos_policy_id = binding['policy_id'] if binding else None
-        return {qos_consts.QOS_POLICY_ID: qos_policy_id}
diff --git a/neutron/db/__init__.py b/neutron/db/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/db/address_scope_db.py b/neutron/db/address_scope_db.py
deleted file mode 100644 (file)
index 5152f74..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (c) 2015 Huawei Technologies Co.,LTD.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_utils import uuidutils
-import sqlalchemy as sa
-from sqlalchemy.orm import exc
-
-from neutron._i18n import _
-from neutron.api.v2 import attributes as attr
-from neutron.db import model_base
-from neutron.extensions import address_scope as ext_address_scope
-
-
-class AddressScope(model_base.BASEV2, model_base.HasId, model_base.HasTenant):
-    """Represents a neutron address scope."""
-
-    __tablename__ = "address_scopes"
-
-    name = sa.Column(sa.String(attr.NAME_MAX_LEN), nullable=False)
-    shared = sa.Column(sa.Boolean, nullable=False)
-    ip_version = sa.Column(sa.Integer(), nullable=False)
-
-
-class AddressScopeDbMixin(ext_address_scope.AddressScopePluginBase):
-    """Mixin class to add address scope to db_base_plugin_v2."""
-
-    __native_bulk_support = True
-
-    def _make_address_scope_dict(self, address_scope, fields=None):
-        res = {'id': address_scope['id'],
-               'name': address_scope['name'],
-               'tenant_id': address_scope['tenant_id'],
-               'shared': address_scope['shared'],
-               'ip_version': address_scope['ip_version']}
-        return self._fields(res, fields)
-
-    def _get_address_scope(self, context, id):
-        try:
-            return self._get_by_id(context, AddressScope, id)
-        except exc.NoResultFound:
-            raise ext_address_scope.AddressScopeNotFound(address_scope_id=id)
-
-    def is_address_scope_owned_by_tenant(self, context, id):
-        """Check if address scope id is owned by the tenant or not.
-
-        AddressScopeNotFound is raised if the
-          - address scope id doesn't exist or
-          - if the (unshared) address scope id is not owned by this tenant.
-
-        @return Returns true if the user is admin or tenant is owner
-                Returns false if the address scope id is shared and not
-                owned by the tenant.
-        """
-        address_scope = self._get_address_scope(context, id)
-        return context.is_admin or (
-            address_scope.tenant_id == context.tenant_id)
-
-    def get_ip_version_for_address_scope(self, context, id):
-        address_scope = self._get_address_scope(context, id)
-        return address_scope.ip_version
-
-    def create_address_scope(self, context, address_scope):
-        """Create an address scope."""
-        a_s = address_scope['address_scope']
-        address_scope_id = a_s.get('id') or uuidutils.generate_uuid()
-        with context.session.begin(subtransactions=True):
-            pool_args = {'tenant_id': a_s['tenant_id'],
-                         'id': address_scope_id,
-                         'name': a_s['name'],
-                         'shared': a_s['shared'],
-                         'ip_version': a_s['ip_version']}
-            address_scope = AddressScope(**pool_args)
-            context.session.add(address_scope)
-
-        return self._make_address_scope_dict(address_scope)
-
-    def update_address_scope(self, context, id, address_scope):
-        a_s = address_scope['address_scope']
-        with context.session.begin(subtransactions=True):
-            address_scope = self._get_address_scope(context, id)
-            if address_scope.shared and not a_s.get('shared', True):
-                reason = _("Shared address scope can't be unshared")
-                raise ext_address_scope.AddressScopeUpdateError(
-                    address_scope_id=id, reason=reason)
-            address_scope.update(a_s)
-
-        return self._make_address_scope_dict(address_scope)
-
-    def get_address_scope(self, context, id, fields=None):
-        address_scope = self._get_address_scope(context, id)
-        return self._make_address_scope_dict(address_scope, fields)
-
-    def get_address_scopes(self, context, filters=None, fields=None,
-                           sorts=None, limit=None, marker=None,
-                           page_reverse=False):
-        marker_obj = self._get_marker_obj(context, 'addrscope', limit, marker)
-        collection = self._get_collection(context, AddressScope,
-                                          self._make_address_scope_dict,
-                                          filters=filters, fields=fields,
-                                          sorts=sorts,
-                                          limit=limit,
-                                          marker_obj=marker_obj,
-                                          page_reverse=page_reverse)
-        return collection
-
-    def get_address_scopes_count(self, context, filters=None):
-        return self._get_collection_count(context, AddressScope,
-                                          filters=filters)
-
-    def delete_address_scope(self, context, id):
-        with context.session.begin(subtransactions=True):
-            if self._get_subnetpools_by_address_scope_id(context, id):
-                raise ext_address_scope.AddressScopeInUse(address_scope_id=id)
-            address_scope = self._get_address_scope(context, id)
-            context.session.delete(address_scope)
diff --git a/neutron/db/agents_db.py b/neutron/db/agents_db.py
deleted file mode 100644 (file)
index 8a99d75..0000000
+++ /dev/null
@@ -1,426 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import datetime
-
-from eventlet import greenthread
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_serialization import jsonutils
-from oslo_utils import timeutils
-import six
-import sqlalchemy as sa
-from sqlalchemy.orm import exc
-from sqlalchemy import sql
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron import context
-from neutron.db import model_base
-from neutron.extensions import agent as ext_agent
-from neutron.extensions import availability_zone as az_ext
-from neutron import manager
-
-LOG = logging.getLogger(__name__)
-
-AGENT_OPTS = [
-    cfg.IntOpt('agent_down_time', default=75,
-               help=_("Seconds to regard the agent is down; should be at "
-                      "least twice report_interval, to be sure the "
-                      "agent is down for good.")),
-    cfg.StrOpt('dhcp_load_type', default='networks',
-               choices=['networks', 'subnets', 'ports'],
-               help=_('Representing the resource type whose load is being '
-                      'reported by the agent. This can be "networks", '
-                      '"subnets" or "ports". '
-                      'When specified (Default is networks), the server will '
-                      'extract particular load sent as part of its agent '
-                      'configuration object from the agent report state, '
-                      'which is the number of resources being consumed, at '
-                      'every report_interval.'
-                      'dhcp_load_type can be used in combination with '
-                      'network_scheduler_driver = '
-                      'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler '
-                      'When the network_scheduler_driver is WeightScheduler, '
-                      'dhcp_load_type can be configured to represent the '
-                      'choice for the resource being balanced. '
-                      'Example: dhcp_load_type=networks')),
-    cfg.BoolOpt('enable_new_agents', default=True,
-                help=_("Agent starts with admin_state_up=False when "
-                       "enable_new_agents=False. In the case, user's "
-                       "resources will not be scheduled automatically to the "
-                       "agent until admin changes admin_state_up to True.")),
-]
-cfg.CONF.register_opts(AGENT_OPTS)
-
-
-class Agent(model_base.BASEV2, model_base.HasId):
-    """Represents agents running in neutron deployments."""
-
-    __table_args__ = (
-        sa.UniqueConstraint('agent_type', 'host',
-                            name='uniq_agents0agent_type0host'),
-        model_base.BASEV2.__table_args__
-    )
-
-    # L3 agent, DHCP agent, OVS agent, LinuxBridge
-    agent_type = sa.Column(sa.String(255), nullable=False)
-    binary = sa.Column(sa.String(255), nullable=False)
-    # TOPIC is a fanout exchange topic
-    topic = sa.Column(sa.String(255), nullable=False)
-    # TOPIC.host is a target topic
-    host = sa.Column(sa.String(255), nullable=False)
-    availability_zone = sa.Column(sa.String(255))
-    admin_state_up = sa.Column(sa.Boolean, default=True,
-                               server_default=sql.true(), nullable=False)
-    # the time when first report came from agents
-    created_at = sa.Column(sa.DateTime, nullable=False)
-    # the time when first report came after agents start
-    started_at = sa.Column(sa.DateTime, nullable=False)
-    # updated when agents report
-    heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
-    # description is note for admin user
-    description = sa.Column(sa.String(attributes.DESCRIPTION_MAX_LEN))
-    # configurations: a json dict string, I think 4095 is enough
-    configurations = sa.Column(sa.String(4095), nullable=False)
-    # load - number of resources hosted by the agent
-    load = sa.Column(sa.Integer, server_default='0', nullable=False)
-
-    @property
-    def is_active(self):
-        return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp)
-
-
-class AgentAvailabilityZoneMixin(az_ext.AvailabilityZonePluginBase):
-    """Mixin class to add availability_zone extension to AgentDbMixin."""
-
-    def _list_availability_zones(self, context, filters=None):
-        result = {}
-        query = self._get_collection_query(context, Agent, filters=filters)
-        for agent in query.group_by(Agent.admin_state_up,
-                                    Agent.availability_zone,
-                                    Agent.agent_type):
-            if not agent.availability_zone:
-                continue
-            if agent.agent_type == constants.AGENT_TYPE_DHCP:
-                resource = 'network'
-            elif agent.agent_type == constants.AGENT_TYPE_L3:
-                resource = 'router'
-            else:
-                continue
-            key = (agent.availability_zone, resource)
-            result[key] = agent.admin_state_up or result.get(key, False)
-        return result
-
-    def get_availability_zones(self, context, filters=None, fields=None,
-                               sorts=None, limit=None, marker=None,
-                               page_reverse=False):
-        """Return a list of availability zones."""
-        # NOTE(hichihara): 'tenant_id' is dummy for policy check.
-        # it is not visible via API.
-        return [{'state': 'available' if v else 'unavailable',
-                 'name': k[0], 'resource': k[1],
-                 'tenant_id': context.tenant_id}
-                for k, v in six.iteritems(self._list_availability_zones(
-                                           context, filters))]
-
-    def validate_availability_zones(self, context, resource_type,
-                                    availability_zones):
-        """Verify that the availability zones exist."""
-        if not availability_zones:
-            return
-        if resource_type == 'network':
-            agent_type = constants.AGENT_TYPE_DHCP
-        elif resource_type == 'router':
-            agent_type = constants.AGENT_TYPE_L3
-        else:
-            return
-        query = context.session.query(Agent.availability_zone).filter_by(
-                    agent_type=agent_type).group_by(Agent.availability_zone)
-        query = query.filter(Agent.availability_zone.in_(availability_zones))
-        azs = [item[0] for item in query]
-        diff = set(availability_zones) - set(azs)
-        if diff:
-            raise az_ext.AvailabilityZoneNotFound(availability_zone=diff.pop())
-
-
-class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
-    """Mixin class to add agent extension to db_base_plugin_v2."""
-
-    def _get_agent(self, context, id):
-        try:
-            agent = self._get_by_id(context, Agent, id)
-        except exc.NoResultFound:
-            raise ext_agent.AgentNotFound(id=id)
-        return agent
-
-    def get_enabled_agent_on_host(self, context, agent_type, host):
-        """Return agent of agent_type for the specified host."""
-        query = context.session.query(Agent)
-        query = query.filter(Agent.agent_type == agent_type,
-                             Agent.host == host,
-                             Agent.admin_state_up == sql.true())
-        try:
-            agent = query.one()
-        except exc.NoResultFound:
-            LOG.debug('No enabled %(agent_type)s agent on host '
-                      '%(host)s', {'agent_type': agent_type, 'host': host})
-            return
-        if self.is_agent_down(agent.heartbeat_timestamp):
-            LOG.warn(_LW('%(agent_type)s agent %(agent_id)s is not active'),
-                     {'agent_type': agent_type, 'agent_id': agent.id})
-        return agent
-
-    @classmethod
-    def is_agent_down(cls, heart_beat_time):
-        return timeutils.is_older_than(heart_beat_time,
-                                       cfg.CONF.agent_down_time)
-
-    def get_configuration_dict(self, agent_db):
-        try:
-            conf = jsonutils.loads(agent_db.configurations)
-        except Exception:
-            msg = _LW('Configuration for agent %(agent_type)s on host %(host)s'
-                      ' is invalid.')
-            LOG.warn(msg, {'agent_type': agent_db.agent_type,
-                           'host': agent_db.host})
-            conf = {}
-        return conf
-
-    def _get_agent_load(self, agent):
-        configs = agent.get('configurations', {})
-        load_type = None
-        load = 0
-        if(agent['agent_type'] == constants.AGENT_TYPE_DHCP):
-            load_type = cfg.CONF.dhcp_load_type
-        if load_type:
-            load = int(configs.get(load_type, 0))
-        return load
-
-    def _make_agent_dict(self, agent, fields=None):
-        attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get(
-            ext_agent.RESOURCE_NAME + 's')
-        res = dict((k, agent[k]) for k in attr
-                   if k not in ['alive', 'configurations'])
-        res['alive'] = not AgentDbMixin.is_agent_down(
-            res['heartbeat_timestamp'])
-        res['configurations'] = self.get_configuration_dict(agent)
-        res['availability_zone'] = agent['availability_zone']
-        return self._fields(res, fields)
-
-    def delete_agent(self, context, id):
-        with context.session.begin(subtransactions=True):
-            agent = self._get_agent(context, id)
-            context.session.delete(agent)
-
-    def update_agent(self, context, id, agent):
-        agent_data = agent['agent']
-        with context.session.begin(subtransactions=True):
-            agent = self._get_agent(context, id)
-            agent.update(agent_data)
-        return self._make_agent_dict(agent)
-
-    def get_agents_db(self, context, filters=None):
-        query = self._get_collection_query(context, Agent, filters=filters)
-        return query.all()
-
-    def get_agents(self, context, filters=None, fields=None):
-        agents = self._get_collection(context, Agent,
-                                      self._make_agent_dict,
-                                      filters=filters, fields=fields)
-        alive = filters and filters.get('alive', None)
-        if alive:
-            # alive filter will be a list
-            alive = attributes.convert_to_boolean(alive[0])
-            agents = [agent for agent in agents if agent['alive'] == alive]
-        return agents
-
-    def agent_health_check(self):
-        """Scan agents and log if some are considered dead."""
-        agents = self.get_agents(context.get_admin_context(),
-                                 filters={'admin_state_up': [True]})
-        dead_agents = [agent for agent in agents if not agent['alive']]
-        if dead_agents:
-            data = '%20s %20s %s\n' % ('Type', 'Last heartbeat', "host")
-            data += '\n'.join(['%20s %20s %s' %
-                               (agent['agent_type'],
-                                agent['heartbeat_timestamp'],
-                                agent['host']) for agent in dead_agents])
-            LOG.warn(_LW("Agent healthcheck: found %(count)s dead agents "
-                         "out of %(total)s:\n%(data)s"),
-                     {'count': len(dead_agents),
-                      'total': len(agents),
-                      'data': data})
-        else:
-            LOG.debug("Agent healthcheck: found %s active agents",
-                      len(agents))
-
-    def _get_agent_by_type_and_host(self, context, agent_type, host):
-        query = self._model_query(context, Agent)
-        try:
-            agent_db = query.filter(Agent.agent_type == agent_type,
-                                    Agent.host == host).one()
-            return agent_db
-        except exc.NoResultFound:
-            raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type,
-                                                    host=host)
-        except exc.MultipleResultsFound:
-            raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type,
-                                                         host=host)
-
-    def get_agent(self, context, id, fields=None):
-        agent = self._get_agent(context, id)
-        return self._make_agent_dict(agent, fields)
-
-    def _log_heartbeat(self, state, agent_db, agent_conf):
-        if agent_conf.get('log_agent_heartbeats'):
-            delta = timeutils.utcnow() - agent_db.heartbeat_timestamp
-            LOG.info(_LI("Heartbeat received from %(type)s agent on "
-                         "host %(host)s, uuid %(uuid)s after %(delta)s"),
-                     {'type': agent_db.agent_type,
-                      'host': agent_db.host,
-                      'uuid': state.get('uuid'),
-                      'delta': delta})
-
-    def _create_or_update_agent(self, context, agent_state):
-        """Registers new agent in the database or updates existing.
-
-        Returns agent status from server point of view: alive, new or revived.
-        It could be used by agent to do some sync with the server if needed.
-        """
-        status = constants.AGENT_ALIVE
-        with context.session.begin(subtransactions=True):
-            res_keys = ['agent_type', 'binary', 'host', 'topic']
-            res = dict((k, agent_state[k]) for k in res_keys)
-            if 'availability_zone' in agent_state:
-                res['availability_zone'] = agent_state['availability_zone']
-            configurations_dict = agent_state.get('configurations', {})
-            res['configurations'] = jsonutils.dumps(configurations_dict)
-            res['load'] = self._get_agent_load(agent_state)
-            current_time = timeutils.utcnow()
-            try:
-                agent_db = self._get_agent_by_type_and_host(
-                    context, agent_state['agent_type'], agent_state['host'])
-                if not agent_db.is_active:
-                    status = constants.AGENT_REVIVED
-                res['heartbeat_timestamp'] = current_time
-                if agent_state.get('start_flag'):
-                    res['started_at'] = current_time
-                greenthread.sleep(0)
-                self._log_heartbeat(agent_state, agent_db, configurations_dict)
-                agent_db.update(res)
-            except ext_agent.AgentNotFoundByTypeHost:
-                greenthread.sleep(0)
-                res['created_at'] = current_time
-                res['started_at'] = current_time
-                res['heartbeat_timestamp'] = current_time
-                res['admin_state_up'] = cfg.CONF.enable_new_agents
-                agent_db = Agent(**res)
-                greenthread.sleep(0)
-                context.session.add(agent_db)
-                self._log_heartbeat(agent_state, agent_db, configurations_dict)
-                status = constants.AGENT_NEW
-            greenthread.sleep(0)
-        return status
-
-    def create_or_update_agent(self, context, agent):
-        """Create or update agent according to report."""
-
-        try:
-            return self._create_or_update_agent(context, agent)
-        except db_exc.DBDuplicateEntry:
-            # It might happen that two or more concurrent transactions
-            # are trying to insert new rows having the same value of
-            # (agent_type, host) pair at the same time (if there has
-            # been no such entry in the table and multiple agent status
-            # updates are being processed at the moment). In this case
-            # having a unique constraint on (agent_type, host) columns
-            # guarantees that only one transaction will succeed and
-            # insert a new agent entry, others will fail and be rolled
-            # back. That means we must retry them one more time: no
-            # INSERTs will be issued, because
-            # _get_agent_by_type_and_host() will return the existing
-            # agent entry, which will be updated multiple times
-            return self._create_or_update_agent(context, agent)
-
-
-class AgentExtRpcCallback(object):
-    """Processes the rpc report in plugin implementations.
-
-    This class implements the server side of an rpc interface.  The client side
-    can be found in neutron.agent.rpc.PluginReportStateAPI.  For more
-    information on changing rpc interfaces, see doc/source/devref/rpc_api.rst.
-    """
-
-    target = oslo_messaging.Target(version='1.0',
-                                   namespace=constants.RPC_NAMESPACE_STATE)
-    START_TIME = timeutils.utcnow()
-
-    def __init__(self, plugin=None):
-        super(AgentExtRpcCallback, self).__init__()
-        self.plugin = plugin
-
-    def report_state(self, context, **kwargs):
-        """Report state from agent to server.
-
-        Returns - agent's status: AGENT_NEW, AGENT_REVIVED, AGENT_ALIVE
-        """
-        time = kwargs['time']
-        time = timeutils.parse_strtime(time)
-        agent_state = kwargs['agent_state']['agent_state']
-        self._check_clock_sync_on_agent_start(agent_state, time)
-        if self.START_TIME > time:
-            time_agent = datetime.datetime.isoformat(time)
-            time_server = datetime.datetime.isoformat(self.START_TIME)
-            log_dict = {'agent_time': time_agent, 'server_time': time_server}
-            LOG.debug("Stale message received with timestamp: %(agent_time)s. "
-                      "Skipping processing because it's older than the "
-                      "server start timestamp: %(server_time)s", log_dict)
-            return
-        if not self.plugin:
-            self.plugin = manager.NeutronManager.get_plugin()
-        return self.plugin.create_or_update_agent(context, agent_state)
-
-    def _check_clock_sync_on_agent_start(self, agent_state, agent_time):
-        """Checks if the server and the agent times are in sync.
-
-        Method checks if the agent time is in sync with the server time
-        on start up. Ignores it, on subsequent re-connects.
-        """
-        if agent_state.get('start_flag'):
-            time_server_now = timeutils.utcnow()
-            diff = abs(timeutils.delta_seconds(time_server_now, agent_time))
-            if diff > cfg.CONF.agent_down_time:
-                agent_name = agent_state['agent_type']
-                time_agent = datetime.datetime.isoformat(agent_time)
-
-                host = agent_state['host']
-                log_dict = {'host': host,
-                            'agent_name': agent_name,
-                            'agent_time': time_agent,
-                            'threshold': cfg.CONF.agent_down_time,
-                            'serv_time': (datetime.datetime.isoformat
-                                          (time_server_now)),
-                            'diff': diff}
-                LOG.error(_LE("Message received from the host: %(host)s "
-                              "during the registration of %(agent_name)s has "
-                              "a timestamp: %(agent_time)s. This differs from "
-                              "the current server timestamp: %(serv_time)s by "
-                              "%(diff)s seconds, which is more than the "
-                              "threshold agent down"
-                              "time: %(threshold)s."), log_dict)
diff --git a/neutron/db/agentschedulers_db.py b/neutron/db/agentschedulers_db.py
deleted file mode 100644 (file)
index a1b6796..0000000
+++ /dev/null
@@ -1,485 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import datetime
-import random
-import time
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_service import loopingcall
-from oslo_utils import timeutils
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy.orm import exc
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.common import constants
-from neutron.common import utils
-from neutron import context as ncontext
-from neutron.db import agents_db
-from neutron.db.availability_zone import network as network_az
-from neutron.db import model_base
-from neutron.extensions import agent as ext_agent
-from neutron.extensions import dhcpagentscheduler
-
-
-LOG = logging.getLogger(__name__)
-
-AGENTS_SCHEDULER_OPTS = [
-    cfg.StrOpt('network_scheduler_driver',
-               default='neutron.scheduler.'
-                       'dhcp_agent_scheduler.WeightScheduler',
-               help=_('Driver to use for scheduling network to DHCP agent')),
-    cfg.BoolOpt('network_auto_schedule', default=True,
-                help=_('Allow auto scheduling networks to DHCP agent.')),
-    cfg.BoolOpt('allow_automatic_dhcp_failover', default=True,
-                help=_('Automatically remove networks from offline DHCP '
-                       'agents.')),
-    cfg.IntOpt('dhcp_agents_per_network', default=1,
-               help=_('Number of DHCP agents scheduled to host a tenant '
-                      'network. If this number is greater than 1, the '
-                      'scheduler automatically assigns multiple DHCP agents '
-                      'for a given tenant network, providing high '
-                      'availability for DHCP service.')),
-    cfg.BoolOpt('enable_services_on_agents_with_admin_state_down',
-                default=False,
-                help=_('Enable services on an agent with admin_state_up '
-                       'False. If this option is False, when admin_state_up '
-                       'of an agent is turned False, services on it will be '
-                       'disabled. Agents with admin_state_up False are not '
-                       'selected for automatic scheduling regardless of this '
-                       'option. But manual scheduling to such agents is '
-                       'available if this option is True.')),
-]
-
-cfg.CONF.register_opts(AGENTS_SCHEDULER_OPTS)
-
-
-class NetworkDhcpAgentBinding(model_base.BASEV2):
-    """Represents binding between neutron networks and DHCP agents."""
-
-    network_id = sa.Column(sa.String(36),
-                           sa.ForeignKey("networks.id", ondelete='CASCADE'),
-                           primary_key=True)
-    dhcp_agent = orm.relation(agents_db.Agent)
-    dhcp_agent_id = sa.Column(sa.String(36),
-                              sa.ForeignKey("agents.id",
-                                            ondelete='CASCADE'),
-                              primary_key=True)
-
-
-class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
-    """Common class for agent scheduler mixins."""
-
-    # agent notifiers to handle agent update operations;
-    # should be updated by plugins;
-    agent_notifiers = {
-        constants.AGENT_TYPE_DHCP: None,
-        constants.AGENT_TYPE_L3: None,
-        constants.AGENT_TYPE_LOADBALANCER: None,
-    }
-
-    @staticmethod
-    def is_eligible_agent(active, agent):
-        if active is None:
-            # filtering by activeness is disabled, all agents are eligible
-            return True
-        else:
-            # note(rpodolyaka): original behaviour is saved here: if active
-            #                   filter is set, only agents which are 'up'
-            #                   (i.e. have a recent heartbeat timestamp)
-            #                   are eligible, even if active is False
-            return not agents_db.AgentDbMixin.is_agent_down(
-                agent['heartbeat_timestamp'])
-
-    def update_agent(self, context, id, agent):
-        original_agent = self.get_agent(context, id)
-        result = super(AgentSchedulerDbMixin, self).update_agent(
-            context, id, agent)
-        agent_data = agent['agent']
-        agent_notifier = self.agent_notifiers.get(original_agent['agent_type'])
-        if (agent_notifier and
-            'admin_state_up' in agent_data and
-            original_agent['admin_state_up'] != agent_data['admin_state_up']):
-            agent_notifier.agent_updated(context,
-                                         agent_data['admin_state_up'],
-                                         original_agent['host'])
-        return result
-
-    def add_agent_status_check(self, function):
-        loop = loopingcall.FixedIntervalLoopingCall(function)
-        # TODO(enikanorov): make interval configurable rather than computed
-        interval = max(cfg.CONF.agent_down_time // 2, 1)
-        # add random initial delay to allow agents to check in after the
-        # neutron server first starts. random to offset multiple servers
-        initial_delay = random.randint(interval, interval * 2)
-        loop.start(interval=interval, initial_delay=initial_delay)
-
-        if hasattr(self, 'periodic_agent_loops'):
-            self.periodic_agent_loops.append(loop)
-        else:
-            self.periodic_agent_loops = [loop]
-
-    def agent_dead_limit_seconds(self):
-        return cfg.CONF.agent_down_time * 2
-
-    def wait_down_agents(self, agent_type, agent_dead_limit):
-        """Gives chance for agents to send a heartbeat."""
-        # check for an abrupt clock change since last check. if a change is
-        # detected, sleep for a while to let the agents check in.
-        tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
-                                              timeutils.utcnow())
-        if tdelta.total_seconds() > cfg.CONF.agent_down_time:
-            LOG.warn(_LW("Time since last %s agent reschedule check has "
-                         "exceeded the interval between checks. Waiting "
-                         "before check to allow agents to send a heartbeat "
-                         "in case there was a clock adjustment."), agent_type)
-            time.sleep(agent_dead_limit)
-        self._clock_jump_canary = timeutils.utcnow()
-
-    def get_cutoff_time(self, agent_dead_limit):
-        cutoff = timeutils.utcnow() - datetime.timedelta(
-            seconds=agent_dead_limit)
-        return cutoff
-
-
-class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
-                                .DhcpAgentSchedulerPluginBase,
-                                AgentSchedulerDbMixin):
-    """Mixin class to add DHCP agent scheduler extension to db_base_plugin_v2.
-    """
-
-    network_scheduler = None
-
-    def start_periodic_dhcp_agent_status_check(self):
-        if not cfg.CONF.allow_automatic_dhcp_failover:
-            LOG.info(_LI("Skipping periodic DHCP agent status check because "
-                         "automatic network rescheduling is disabled."))
-            return
-
-        self.add_agent_status_check(self.remove_networks_from_down_agents)
-
-    def is_eligible_agent(self, context, active, agent):
-        # eligible agent is active or starting up
-        return (AgentSchedulerDbMixin.is_eligible_agent(active, agent) or
-                self.agent_starting_up(context, agent))
-
-    def agent_starting_up(self, context, agent):
-        """Check if agent was just started.
-
-        Method returns True if agent is in its 'starting up' period.
-        Return value depends on amount of networks assigned to the agent.
-        It doesn't look at latest heartbeat timestamp as it is assumed
-        that this method is called for agents that are considered dead.
-        """
-        agent_dead_limit = datetime.timedelta(
-            seconds=self.agent_dead_limit_seconds())
-        network_count = (context.session.query(NetworkDhcpAgentBinding).
-                         filter_by(dhcp_agent_id=agent['id']).count())
-        # amount of networks assigned to agent affect amount of time we give
-        # it so startup. Tests show that it's more or less sage to assume
-        # that DHCP agent processes each network in less than 2 seconds.
-        # So, give it this additional time for each of the networks.
-        additional_time = datetime.timedelta(seconds=2 * network_count)
-        LOG.debug("Checking if agent starts up and giving it additional %s",
-                  additional_time)
-        agent_expected_up = (agent['started_at'] + agent_dead_limit +
-                             additional_time)
-        return agent_expected_up > timeutils.utcnow()
-
-    def _schedule_network(self, context, network_id, dhcp_notifier):
-        LOG.info(_LI("Scheduling unhosted network %s"), network_id)
-        try:
-            # TODO(enikanorov): have to issue redundant db query
-            # to satisfy scheduling interface
-            network = self.get_network(context, network_id)
-            agents = self.schedule_network(context, network)
-            if not agents:
-                LOG.info(_LI("Failed to schedule network %s, "
-                             "no eligible agents or it might be "
-                             "already scheduled by another server"),
-                         network_id)
-                return
-            if not dhcp_notifier:
-                return
-            for agent in agents:
-                LOG.info(_LI("Adding network %(net)s to agent "
-                             "%(agent)s on host %(host)s"),
-                         {'net': network_id,
-                          'agent': agent.id,
-                          'host': agent.host})
-                dhcp_notifier.network_added_to_agent(
-                    context, network_id, agent.host)
-        except Exception:
-            # catching any exception during scheduling
-            # so if _schedule_network is invoked in the loop it could
-            # continue in any case
-            LOG.exception(_LE("Failed to schedule network %s"), network_id)
-
-    def _filter_bindings(self, context, bindings):
-        """Skip bindings for which the agent is dead, but starting up."""
-
-        # to save few db calls: store already checked agents in dict
-        # id -> is_agent_starting_up
-        checked_agents = {}
-        for binding in bindings:
-            try:
-                agent_id = binding.dhcp_agent['id']
-                if agent_id not in checked_agents:
-                    if self.agent_starting_up(context, binding.dhcp_agent):
-                        # When agent starts and it has many networks to process
-                        # it may fail to send state reports in defined interval
-                        # The server will consider it dead and try to remove
-                        # networks from it.
-                        checked_agents[agent_id] = True
-                        LOG.debug("Agent %s is starting up, skipping",
-                                  agent_id)
-                    else:
-                        checked_agents[agent_id] = False
-                if not checked_agents[agent_id]:
-                    yield binding
-            except exc.ObjectDeletedError:
-                # we're not within a transaction, so object can be lost
-                # because underlying row is removed, just ignore this issue
-                LOG.debug("binding was removed concurrently, skipping it")
-
-    def remove_networks_from_down_agents(self):
-        """Remove networks from down DHCP agents if admin state is up.
-
-        Reschedule them if configured so.
-        """
-
-        agent_dead_limit = self.agent_dead_limit_seconds()
-        self.wait_down_agents('DHCP', agent_dead_limit)
-        cutoff = self.get_cutoff_time(agent_dead_limit)
-
-        context = ncontext.get_admin_context()
-        down_bindings = (
-            context.session.query(NetworkDhcpAgentBinding).
-            join(agents_db.Agent).
-            filter(agents_db.Agent.heartbeat_timestamp < cutoff,
-                   agents_db.Agent.admin_state_up))
-        dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
-
-        try:
-            dead_bindings = [b for b in
-                             self._filter_bindings(context, down_bindings)]
-            agents = self.get_agents_db(
-                context, {'agent_type': [constants.AGENT_TYPE_DHCP]})
-            active_agents = [agent for agent in agents if
-                             self.is_eligible_agent(context, True, agent)]
-            if not active_agents:
-                LOG.warn(_LW("No DHCP agents available, "
-                             "skipping rescheduling"))
-                return
-            for binding in dead_bindings:
-                LOG.warn(_LW("Removing network %(network)s from agent "
-                             "%(agent)s because the agent did not report "
-                             "to the server in the last %(dead_time)s "
-                             "seconds."),
-                         {'network': binding.network_id,
-                          'agent': binding.dhcp_agent_id,
-                          'dead_time': agent_dead_limit})
-                # save binding object to avoid ObjectDeletedError
-                # in case binding is concurrently deleted from the DB
-                saved_binding = {'net': binding.network_id,
-                                 'agent': binding.dhcp_agent_id}
-                try:
-                    # do not notify agent if it considered dead
-                    # so when it is restarted it won't see network delete
-                    # notifications on its queue
-                    self.remove_network_from_dhcp_agent(context,
-                                                        binding.dhcp_agent_id,
-                                                        binding.network_id,
-                                                        notify=False)
-                except dhcpagentscheduler.NetworkNotHostedByDhcpAgent:
-                    # measures against concurrent operation
-                    LOG.debug("Network %(net)s already removed from DHCP "
-                              "agent %(agent)s",
-                              saved_binding)
-                    # still continue and allow concurrent scheduling attempt
-                except Exception:
-                    LOG.exception(_LE("Unexpected exception occurred while "
-                                      "removing network %(net)s from agent "
-                                      "%(agent)s"),
-                                  saved_binding)
-
-                if cfg.CONF.network_auto_schedule:
-                    self._schedule_network(
-                        context, saved_binding['net'], dhcp_notifier)
-        except Exception:
-            # we want to be thorough and catch whatever is raised
-            # to avoid loop abortion
-            LOG.exception(_LE("Exception encountered during network "
-                              "rescheduling"))
-
-    def get_dhcp_agents_hosting_networks(
-            self, context, network_ids, active=None, admin_state_up=None):
-        if not network_ids:
-            return []
-        query = context.session.query(NetworkDhcpAgentBinding)
-        query = query.options(orm.contains_eager(
-                              NetworkDhcpAgentBinding.dhcp_agent))
-        query = query.join(NetworkDhcpAgentBinding.dhcp_agent)
-        if len(network_ids) == 1:
-            query = query.filter(
-                NetworkDhcpAgentBinding.network_id == network_ids[0])
-        elif network_ids:
-            query = query.filter(
-                NetworkDhcpAgentBinding.network_id in network_ids)
-        if admin_state_up is not None:
-            query = query.filter(agents_db.Agent.admin_state_up ==
-                                 admin_state_up)
-
-        return [binding.dhcp_agent
-                for binding in query
-                if self.is_eligible_agent(context, active,
-                                          binding.dhcp_agent)]
-
-    def add_network_to_dhcp_agent(self, context, id, network_id):
-        self._get_network(context, network_id)
-        with context.session.begin(subtransactions=True):
-            agent_db = self._get_agent(context, id)
-            if (agent_db['agent_type'] != constants.AGENT_TYPE_DHCP or
-                    not services_available(agent_db['admin_state_up'])):
-                raise dhcpagentscheduler.InvalidDHCPAgent(id=id)
-            dhcp_agents = self.get_dhcp_agents_hosting_networks(
-                context, [network_id])
-            for dhcp_agent in dhcp_agents:
-                if id == dhcp_agent.id:
-                    raise dhcpagentscheduler.NetworkHostedByDHCPAgent(
-                        network_id=network_id, agent_id=id)
-            binding = NetworkDhcpAgentBinding()
-            binding.dhcp_agent_id = id
-            binding.network_id = network_id
-            context.session.add(binding)
-        dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
-        if dhcp_notifier:
-            dhcp_notifier.network_added_to_agent(
-                context, network_id, agent_db.host)
-
-    def remove_network_from_dhcp_agent(self, context, id, network_id,
-                                       notify=True):
-        agent = self._get_agent(context, id)
-        with context.session.begin(subtransactions=True):
-            try:
-                query = context.session.query(NetworkDhcpAgentBinding)
-                query = query.filter(
-                    NetworkDhcpAgentBinding.network_id == network_id,
-                    NetworkDhcpAgentBinding.dhcp_agent_id == id)
-                # just ensure the binding exists
-                query.one()
-            except exc.NoResultFound:
-                raise dhcpagentscheduler.NetworkNotHostedByDhcpAgent(
-                    network_id=network_id, agent_id=id)
-
-            # reserve the port, so the ip is reused on a subsequent add
-            device_id = utils.get_dhcp_agent_device_id(network_id,
-                                                       agent['host'])
-            filters = dict(device_id=[device_id])
-            ports = self.get_ports(context, filters=filters)
-            for port in ports:
-                port['device_id'] = constants.DEVICE_ID_RESERVED_DHCP_PORT
-                self.update_port(context, port['id'], dict(port=port))
-            # avoid issues with query.one() object that was
-            # loaded into the session
-            query.delete(synchronize_session=False)
-
-        if not notify:
-            return
-        dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
-        if dhcp_notifier:
-            dhcp_notifier.network_removed_from_agent(
-                context, network_id, agent.host)
-
-    def list_networks_on_dhcp_agent(self, context, id):
-        query = context.session.query(NetworkDhcpAgentBinding.network_id)
-        query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == id)
-
-        net_ids = [item[0] for item in query]
-        if net_ids:
-            return {'networks':
-                    self.get_networks(context, filters={'id': net_ids})}
-        else:
-            # Exception will be thrown if the requested agent does not exist.
-            self._get_agent(context, id)
-            return {'networks': []}
-
-    def list_active_networks_on_active_dhcp_agent(self, context, host):
-        try:
-            agent = self._get_agent_by_type_and_host(
-                context, constants.AGENT_TYPE_DHCP, host)
-        except ext_agent.AgentNotFoundByTypeHost:
-            LOG.debug("DHCP Agent not found on host %s", host)
-            return []
-
-        if not services_available(agent.admin_state_up):
-            return []
-        query = context.session.query(NetworkDhcpAgentBinding.network_id)
-        query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == agent.id)
-
-        net_ids = [item[0] for item in query]
-        if net_ids:
-            return self.get_networks(
-                context,
-                filters={'id': net_ids, 'admin_state_up': [True]}
-            )
-        else:
-            return []
-
-    def list_dhcp_agents_hosting_network(self, context, network_id):
-        dhcp_agents = self.get_dhcp_agents_hosting_networks(
-            context, [network_id])
-        agent_ids = [dhcp_agent.id for dhcp_agent in dhcp_agents]
-        if agent_ids:
-            return {
-                'agents': self.get_agents(context, filters={'id': agent_ids})}
-        else:
-            return {'agents': []}
-
-    def schedule_network(self, context, created_network):
-        if self.network_scheduler:
-            return self.network_scheduler.schedule(
-                self, context, created_network)
-
-    def auto_schedule_networks(self, context, host):
-        if self.network_scheduler:
-            self.network_scheduler.auto_schedule_networks(self, context, host)
-
-
-class AZDhcpAgentSchedulerDbMixin(DhcpAgentSchedulerDbMixin,
-                                  network_az.NetworkAvailabilityZoneMixin):
-    """Mixin class to add availability_zone supported DHCP agent scheduler."""
-
-    def get_network_availability_zones(self, network):
-        zones = {agent.availability_zone for agent in network.dhcp_agents}
-        return list(zones)
-
-
-# helper functions for readability.
-def services_available(admin_state_up):
-    if cfg.CONF.enable_services_on_agents_with_admin_state_down:
-        # Services are available regardless admin_state_up
-        return True
-    return admin_state_up
-
-
-def get_admin_state_up_filter():
-    if cfg.CONF.enable_services_on_agents_with_admin_state_down:
-        # Avoid filtering on admin_state_up at all
-        return None
-    # Filters on admin_state_up is True
-    return True
diff --git a/neutron/db/allowedaddresspairs_db.py b/neutron/db/allowedaddresspairs_db.py
deleted file mode 100644 (file)
index a69c237..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import sqlalchemy as sa
-
-from oslo_db import exception as db_exc
-from sqlalchemy import orm
-
-from neutron.api.v2 import attributes as attr
-from neutron.db import db_base_plugin_v2
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.extensions import allowedaddresspairs as addr_pair
-
-
-class AllowedAddressPair(model_base.BASEV2):
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey('ports.id', ondelete="CASCADE"),
-                        primary_key=True)
-    mac_address = sa.Column(sa.String(32), nullable=False, primary_key=True)
-    ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True)
-
-    port = orm.relationship(
-        models_v2.Port,
-        backref=orm.backref("allowed_address_pairs",
-                            lazy="joined", cascade="delete"))
-
-
-class AllowedAddressPairsMixin(object):
-    """Mixin class for allowed address pairs."""
-
-    def _process_create_allowed_address_pairs(self, context, port,
-                                              allowed_address_pairs):
-        if not attr.is_attr_set(allowed_address_pairs):
-            return []
-        try:
-            with context.session.begin(subtransactions=True):
-                for address_pair in allowed_address_pairs:
-                    # use port.mac_address if no mac address in address pair
-                    if 'mac_address' not in address_pair:
-                        address_pair['mac_address'] = port['mac_address']
-                    db_pair = AllowedAddressPair(
-                        port_id=port['id'],
-                        mac_address=address_pair['mac_address'],
-                        ip_address=address_pair['ip_address'])
-                    context.session.add(db_pair)
-        except db_exc.DBDuplicateEntry:
-            raise addr_pair.DuplicateAddressPairInRequest(
-                mac_address=address_pair['mac_address'],
-                ip_address=address_pair['ip_address'])
-
-        return allowed_address_pairs
-
-    def get_allowed_address_pairs(self, context, port_id):
-        pairs = (context.session.query(AllowedAddressPair).
-                 filter_by(port_id=port_id))
-        return [self._make_allowed_address_pairs_dict(pair)
-                for pair in pairs]
-
-    def _extend_port_dict_allowed_address_pairs(self, port_res, port_db):
-        # If port_db is provided, allowed address pairs will be accessed via
-        # sqlalchemy models. As they're loaded together with ports this
-        # will not cause an extra query.
-        allowed_address_pairs = [
-            self._make_allowed_address_pairs_dict(address_pair) for
-            address_pair in port_db.allowed_address_pairs]
-        port_res[addr_pair.ADDRESS_PAIRS] = allowed_address_pairs
-        return port_res
-
-    # Register dict extend functions for ports
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attr.PORTS, ['_extend_port_dict_allowed_address_pairs'])
-
-    def _delete_allowed_address_pairs(self, context, id):
-        query = self._model_query(context, AllowedAddressPair)
-        with context.session.begin(subtransactions=True):
-            query.filter(AllowedAddressPair.port_id == id).delete()
-
-    def _make_allowed_address_pairs_dict(self, allowed_address_pairs,
-                                         fields=None):
-        res = {'mac_address': allowed_address_pairs['mac_address'],
-               'ip_address': allowed_address_pairs['ip_address']}
-        return self._fields(res, fields)
-
-    def _has_address_pairs(self, port):
-        return (attr.is_attr_set(port['port'][addr_pair.ADDRESS_PAIRS])
-                and port['port'][addr_pair.ADDRESS_PAIRS] != [])
-
-    def _check_update_has_allowed_address_pairs(self, port):
-        """Determine if request has an allowed address pair.
-
-        Return True if the port parameter has a non-empty
-        'allowed_address_pairs' attribute. Otherwise returns False.
-        """
-        return (addr_pair.ADDRESS_PAIRS in port['port'] and
-                self._has_address_pairs(port))
-
-    def _check_update_deletes_allowed_address_pairs(self, port):
-        """Determine if request deletes address pair.
-
-        Return True if port has an allowed address pair and its value
-        is either [] or not is_attr_set, otherwise return False
-        """
-        return (addr_pair.ADDRESS_PAIRS in port['port'] and
-                not self._has_address_pairs(port))
-
-    def is_address_pairs_attribute_updated(self, port, update_attrs):
-        """Check if the address pairs attribute is being updated.
-
-        Returns True if there is an update. This can be used to decide
-        if a port update notification should be sent to agents or third
-        party controllers.
-        """
-
-        new_pairs = update_attrs.get(addr_pair.ADDRESS_PAIRS)
-        if new_pairs is None:
-            return False
-        old_pairs = port.get(addr_pair.ADDRESS_PAIRS)
-
-        # Missing or unchanged address pairs in attributes mean no update
-        return new_pairs != old_pairs
-
-    def update_address_pairs_on_port(self, context, port_id, port,
-                                     original_port, updated_port):
-        """Update allowed address pairs on port.
-
-        Returns True if an update notification is required. Notification
-        is not done here because other changes on the port may need
-        notification. This method is expected to be called within
-        a transaction.
-        """
-        new_pairs = port['port'].get(addr_pair.ADDRESS_PAIRS)
-
-        if self.is_address_pairs_attribute_updated(original_port,
-                                                   port['port']):
-            updated_port[addr_pair.ADDRESS_PAIRS] = new_pairs
-            self._delete_allowed_address_pairs(context, port_id)
-            self._process_create_allowed_address_pairs(
-                context, updated_port, new_pairs)
-            return True
-
-        return False
diff --git a/neutron/db/api.py b/neutron/db/api.py
deleted file mode 100644 (file)
index fdaaca1..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-# Copyright 2011 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-from oslo_config import cfg
-from oslo_db import api as oslo_db_api
-from oslo_db import exception as db_exc
-from oslo_db.sqlalchemy import session
-from oslo_utils import excutils
-from oslo_utils import uuidutils
-from sqlalchemy import exc
-
-from neutron.common import exceptions as n_exc
-from neutron.db import common_db_mixin
-
-
-_FACADE = None
-
-MAX_RETRIES = 10
-is_deadlock = lambda e: isinstance(e, db_exc.DBDeadlock)
-retry_db_errors = oslo_db_api.wrap_db_retry(
-    max_retries=MAX_RETRIES,
-    retry_on_request=True,
-    exception_checker=is_deadlock
-)
-
-
-@contextlib.contextmanager
-def exc_to_retry(exceptions):
-    try:
-        yield
-    except Exception as e:
-        with excutils.save_and_reraise_exception() as ctx:
-            if isinstance(e, exceptions):
-                ctx.reraise = False
-                raise db_exc.RetryRequest(e)
-
-
-def _create_facade_lazily():
-    global _FACADE
-
-    if _FACADE is None:
-        _FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True)
-
-    return _FACADE
-
-
-def get_engine():
-    """Helper method to grab engine."""
-    facade = _create_facade_lazily()
-    return facade.get_engine()
-
-
-def dispose():
-    # Don't need to do anything if an enginefacade hasn't been created
-    if _FACADE is not None:
-        get_engine().pool.dispose()
-
-
-def get_session(autocommit=True, expire_on_commit=False, use_slave=False):
-    """Helper method to grab session."""
-    facade = _create_facade_lazily()
-    return facade.get_session(autocommit=autocommit,
-                              expire_on_commit=expire_on_commit,
-                              use_slave=use_slave)
-
-
-@contextlib.contextmanager
-def autonested_transaction(sess):
-    """This is a convenience method to not bother with 'nested' parameter."""
-    try:
-        session_context = sess.begin_nested()
-    except exc.InvalidRequestError:
-        session_context = sess.begin(subtransactions=True)
-    finally:
-        with session_context as tx:
-            yield tx
-
-
-# Common database operation implementations
-def get_object(context, model, **kwargs):
-    with context.session.begin(subtransactions=True):
-        return (common_db_mixin.model_query(context, model)
-                .filter_by(**kwargs)
-                .first())
-
-
-def get_objects(context, model, **kwargs):
-    with context.session.begin(subtransactions=True):
-        return (common_db_mixin.model_query(context, model)
-                .filter_by(**kwargs)
-                .all())
-
-
-def create_object(context, model, values):
-    with context.session.begin(subtransactions=True):
-        if 'id' not in values:
-            values['id'] = uuidutils.generate_uuid()
-        db_obj = model(**values)
-        context.session.add(db_obj)
-    return db_obj.__dict__
-
-
-def _safe_get_object(context, model, id):
-    db_obj = get_object(context, model, id=id)
-    if db_obj is None:
-        raise n_exc.ObjectNotFound(id=id)
-    return db_obj
-
-
-def update_object(context, model, id, values):
-    with context.session.begin(subtransactions=True):
-        db_obj = _safe_get_object(context, model, id)
-        db_obj.update(values)
-        db_obj.save(session=context.session)
-    return db_obj.__dict__
-
-
-def delete_object(context, model, id):
-    with context.session.begin(subtransactions=True):
-        db_obj = _safe_get_object(context, model, id)
-        context.session.delete(db_obj)
diff --git a/neutron/db/availability_zone/__init__.py b/neutron/db/availability_zone/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/db/availability_zone/network.py b/neutron/db/availability_zone/network.py
deleted file mode 100644 (file)
index 5b4b984..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api.v2 import attributes
-from neutron.db import common_db_mixin
-from neutron.extensions import availability_zone as az_ext
-from neutron.extensions import network_availability_zone as net_az
-
-
-class NetworkAvailabilityZoneMixin(net_az.NetworkAvailabilityZonePluginBase):
-    """Mixin class to enable network's availability zone attributes."""
-
-    def _extend_availability_zone(self, net_res, net_db):
-        net_res[az_ext.AZ_HINTS] = az_ext.convert_az_string_to_list(
-            net_db[az_ext.AZ_HINTS])
-        net_res[az_ext.AVAILABILITY_ZONES] = (
-            self.get_network_availability_zones(net_db))
-
-    common_db_mixin.CommonDbMixin.register_dict_extend_funcs(
-        attributes.NETWORKS, ['_extend_availability_zone'])
diff --git a/neutron/db/availability_zone/router.py b/neutron/db/availability_zone/router.py
deleted file mode 100644 (file)
index 1f3aad4..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import utils
-from neutron.db import l3_attrs_db
-from neutron.extensions import availability_zone as az_ext
-
-
-class RouterAvailabilityZoneMixin(l3_attrs_db.ExtraAttributesMixin):
-    """Mixin class to enable router's availability zone attributes."""
-
-    extra_attributes = [{'name': az_ext.AZ_HINTS, 'default': "[]"}]
-
-    def _extend_extra_router_dict(self, router_res, router_db):
-        super(RouterAvailabilityZoneMixin, self)._extend_extra_router_dict(
-            router_res, router_db)
-        if not utils.is_extension_supported(self, 'router_availability_zone'):
-            return
-        router_res[az_ext.AZ_HINTS] = az_ext.convert_az_string_to_list(
-            router_res[az_ext.AZ_HINTS])
-        router_res['availability_zones'] = (
-            self.get_router_availability_zones(router_db))
-
-    def _process_extra_attr_router_create(
-        self, context, router_db, router_req):
-        if az_ext.AZ_HINTS in router_req:
-            self.validate_availability_zones(context, 'router',
-                                             router_req[az_ext.AZ_HINTS])
-            router_req[az_ext.AZ_HINTS] = az_ext.convert_az_list_to_string(
-                router_req[az_ext.AZ_HINTS])
-        super(RouterAvailabilityZoneMixin,
-              self)._process_extra_attr_router_create(context, router_db,
-                                                      router_req)
diff --git a/neutron/db/common_db_mixin.py b/neutron/db/common_db_mixin.py
deleted file mode 100644 (file)
index b504cfb..0000000
+++ /dev/null
@@ -1,315 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import weakref
-
-from debtcollector import removals
-import six
-from sqlalchemy import and_
-from sqlalchemy import or_
-from sqlalchemy import sql
-
-from neutron._i18n import _
-from neutron.common import exceptions as n_exc
-from neutron.db import sqlalchemyutils
-
-
-def model_query_scope(context, model):
-    # Unless a context has 'admin' or 'advanced-service' rights the
-    # query will be scoped to a single tenant_id
-    return ((not context.is_admin and hasattr(model, 'tenant_id')) and
-            (not context.is_advsvc and hasattr(model, 'tenant_id')))
-
-
-def model_query(context, model):
-    query = context.session.query(model)
-    # define basic filter condition for model query
-    query_filter = None
-    if model_query_scope(context, model):
-        query_filter = (model.tenant_id == context.tenant_id)
-
-    if query_filter is not None:
-        query = query.filter(query_filter)
-    return query
-
-
-class CommonDbMixin(object):
-    """Common methods used in core and service plugins."""
-    # Plugins, mixin classes implementing extension will register
-    # hooks into the dict below for "augmenting" the "core way" of
-    # building a query for retrieving objects from a model class.
-    # To this aim, the register_model_query_hook and unregister_query_hook
-    # from this class should be invoked
-    _model_query_hooks = {}
-
-    # This dictionary will store methods for extending attributes of
-    # api resources. Mixins can use this dict for adding their own methods
-    # TODO(salvatore-orlando): Avoid using class-level variables
-    _dict_extend_functions = {}
-
-    @classmethod
-    def register_model_query_hook(cls, model, name, query_hook, filter_hook,
-                                  result_filters=None):
-        """Register a hook to be invoked when a query is executed.
-
-        Add the hooks to the _model_query_hooks dict. Models are the keys
-        of this dict, whereas the value is another dict mapping hook names to
-        callables performing the hook.
-        Each hook has a "query" component, used to build the query expression
-        and a "filter" component, which is used to build the filter expression.
-
-        Query hooks take as input the query being built and return a
-        transformed query expression.
-
-        Filter hooks take as input the filter expression being built and return
-        a transformed filter expression
-        """
-        cls._model_query_hooks.setdefault(model, {})[name] = {
-            'query': query_hook, 'filter': filter_hook,
-            'result_filters': result_filters}
-
-    @classmethod
-    def register_dict_extend_funcs(cls, resource, funcs):
-        cls._dict_extend_functions.setdefault(resource, []).extend(funcs)
-
-    @property
-    def safe_reference(self):
-        """Return a weakref to the instance.
-
-        Minimize the potential for the instance persisting
-        unnecessarily in memory by returning a weakref proxy that
-        won't prevent deallocation.
-        """
-        return weakref.proxy(self)
-
-    def model_query_scope(self, context, model):
-        return model_query_scope(context, model)
-
-    def _model_query(self, context, model):
-        if isinstance(model, UnionModel):
-            return self._union_model_query(context, model)
-        else:
-            return self._single_model_query(context, model)
-
-    def _union_model_query(self, context, model):
-        # A union query is a query that combines multiple sets of data
-        # together and represents them as one. So if a UnionModel was
-        # passed in, we generate the query for each model with the
-        # appropriate filters and then combine them together with the
-        # .union operator. This allows any subsequent users of the query
-        # to handle it like a normal query (e.g. add pagination/sorting/etc)
-        first_query = None
-        remaining_queries = []
-        for name, component_model in model.model_map.items():
-            query = self._single_model_query(context, component_model)
-            if model.column_type_name:
-                query.add_columns(
-                    sql.expression.column('"%s"' % name, is_literal=True).
-                    label(model.column_type_name)
-                )
-            if first_query is None:
-                first_query = query
-            else:
-                remaining_queries.append(query)
-        return first_query.union(*remaining_queries)
-
-    def _single_model_query(self, context, model):
-        query = context.session.query(model)
-        # define basic filter condition for model query
-        query_filter = None
-        if self.model_query_scope(context, model):
-            if hasattr(model, 'rbac_entries'):
-                query = query.outerjoin(model.rbac_entries)
-                rbac_model = model.rbac_entries.property.mapper.class_
-                query_filter = (
-                    (model.tenant_id == context.tenant_id) |
-                    ((rbac_model.action == 'access_as_shared') &
-                     ((rbac_model.target_tenant == context.tenant_id) |
-                      (rbac_model.target_tenant == '*'))))
-            elif hasattr(model, 'shared'):
-                query_filter = ((model.tenant_id == context.tenant_id) |
-                                (model.shared == sql.true()))
-            else:
-                query_filter = (model.tenant_id == context.tenant_id)
-        # Execute query hooks registered from mixins and plugins
-        for _name, hooks in six.iteritems(self._model_query_hooks.get(model,
-                                                                      {})):
-            query_hook = hooks.get('query')
-            if isinstance(query_hook, six.string_types):
-                query_hook = getattr(self, query_hook, None)
-            if query_hook:
-                query = query_hook(context, model, query)
-
-            filter_hook = hooks.get('filter')
-            if isinstance(filter_hook, six.string_types):
-                filter_hook = getattr(self, filter_hook, None)
-            if filter_hook:
-                query_filter = filter_hook(context, model, query_filter)
-
-        # NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the
-        # condition, raising an exception
-        if query_filter is not None:
-            query = query.filter(query_filter)
-        return query
-
-    def _fields(self, resource, fields):
-        if fields:
-            return dict(((key, item) for key, item in resource.items()
-                         if key in fields))
-        return resource
-
-    @removals.remove(message='This method will be removed in N')
-    def _get_tenant_id_for_create(self, context, resource):
-        if context.is_admin and 'tenant_id' in resource:
-            tenant_id = resource['tenant_id']
-        elif ('tenant_id' in resource and
-              resource['tenant_id'] != context.tenant_id):
-            reason = _('Cannot create resource for another tenant')
-            raise n_exc.AdminRequired(reason=reason)
-        else:
-            tenant_id = context.tenant_id
-        return tenant_id
-
-    def _get_by_id(self, context, model, id):
-        query = self._model_query(context, model)
-        return query.filter(model.id == id).one()
-
-    def _apply_filters_to_query(self, query, model, filters, context=None):
-        if isinstance(model, UnionModel):
-            # NOTE(kevinbenton): a unionmodel is made up of multiple tables so
-            # we apply the filter to each table
-            for component_model in model.model_map.values():
-                query = self._apply_filters_to_query(query, component_model,
-                                                     filters, context)
-            return query
-        if filters:
-            for key, value in six.iteritems(filters):
-                column = getattr(model, key, None)
-                # NOTE(kevinbenton): if column is a hybrid property that
-                # references another expression, attempting to convert to
-                # a boolean will fail so we must compare to None.
-                # See "An Important Expression Language Gotcha" in:
-                # docs.sqlalchemy.org/en/rel_0_9/changelog/migration_06.html
-                if column is not None:
-                    if not value:
-                        query = query.filter(sql.false())
-                        return query
-                    query = query.filter(column.in_(value))
-                elif key == 'shared' and hasattr(model, 'rbac_entries'):
-                    # translate a filter on shared into a query against the
-                    # object's rbac entries
-                    query = query.outerjoin(model.rbac_entries)
-                    rbac = model.rbac_entries.property.mapper.class_
-                    matches = [rbac.target_tenant == '*']
-                    if context:
-                        matches.append(rbac.target_tenant == context.tenant_id)
-                    # any 'access_as_shared' records that match the
-                    # wildcard or requesting tenant
-                    is_shared = and_(rbac.action == 'access_as_shared',
-                                     or_(*matches))
-                    if not value[0]:
-                        # NOTE(kevinbenton): we need to find objects that don't
-                        # have an entry that matches the criteria above so
-                        # we use a subquery to exclude them.
-                        # We can't just filter the inverse of the query above
-                        # because that will still give us a network shared to
-                        # our tenant (or wildcard) if it's shared to another
-                        # tenant.
-                        # This is the column joining the table to rbac via
-                        # the object_id. We can't just use model.id because
-                        # subnets join on network.id so we have to inspect the
-                        # relationship.
-                        join_cols = model.rbac_entries.property.local_columns
-                        oid_col = list(join_cols)[0]
-                        is_shared = ~oid_col.in_(
-                            query.session.query(rbac.object_id).
-                            filter(is_shared)
-                        )
-                    query = query.filter(is_shared)
-            for _nam, hooks in six.iteritems(self._model_query_hooks.get(model,
-                                                                         {})):
-                result_filter = hooks.get('result_filters', None)
-                if isinstance(result_filter, six.string_types):
-                    result_filter = getattr(self, result_filter, None)
-
-                if result_filter:
-                    query = result_filter(query, filters)
-        return query
-
-    def _apply_dict_extend_functions(self, resource_type,
-                                     response, db_object):
-        for func in self._dict_extend_functions.get(
-            resource_type, []):
-            args = (response, db_object)
-            if isinstance(func, six.string_types):
-                func = getattr(self, func, None)
-            else:
-                # must call unbound method - use self as 1st argument
-                args = (self,) + args
-            if func:
-                func(*args)
-
-    def _get_collection_query(self, context, model, filters=None,
-                              sorts=None, limit=None, marker_obj=None,
-                              page_reverse=False):
-        collection = self._model_query(context, model)
-        collection = self._apply_filters_to_query(collection, model, filters,
-                                                  context)
-        if limit and page_reverse and sorts:
-            sorts = [(s[0], not s[1]) for s in sorts]
-        collection = sqlalchemyutils.paginate_query(collection, model, limit,
-                                                    sorts,
-                                                    marker_obj=marker_obj)
-        return collection
-
-    def _get_collection(self, context, model, dict_func, filters=None,
-                        fields=None, sorts=None, limit=None, marker_obj=None,
-                        page_reverse=False):
-        query = self._get_collection_query(context, model, filters=filters,
-                                           sorts=sorts,
-                                           limit=limit,
-                                           marker_obj=marker_obj,
-                                           page_reverse=page_reverse)
-        items = [dict_func(c, fields) for c in query]
-        if limit and page_reverse:
-            items.reverse()
-        return items
-
-    def _get_collection_count(self, context, model, filters=None):
-        return self._get_collection_query(context, model, filters).count()
-
-    def _get_marker_obj(self, context, resource, limit, marker):
-        if limit and marker:
-            return getattr(self, '_get_%s' % resource)(context, marker)
-        return None
-
-    def _filter_non_model_columns(self, data, model):
-        """Remove all the attributes from data which are not columns of
-        the model passed as second parameter.
-        """
-        columns = [c.name for c in model.__table__.columns]
-        return dict((k, v) for (k, v) in
-                    six.iteritems(data) if k in columns)
-
-
-class UnionModel(object):
-    """Collection of models that _model_query can query as a single table."""
-
-    def __init__(self, model_map, column_type_name=None):
-        # model_map is a dictionary of models keyed by an arbitrary name.
-        # If column_type_name is specified, the resulting records will have a
-        # column with that name which identifies the source of each record
-        self.model_map = model_map
-        self.column_type_name = column_type_name
diff --git a/neutron/db/db_base_plugin_common.py b/neutron/db/db_base_plugin_common.py
deleted file mode 100644 (file)
index d189b6e..0000000
+++ /dev/null
@@ -1,311 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import functools
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from sqlalchemy.orm import exc
-
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import utils
-from neutron.db import common_db_mixin
-from neutron.db import models_v2
-
-LOG = logging.getLogger(__name__)
-
-
-def convert_result_to_dict(f):
-    @functools.wraps(f)
-    def inner(*args, **kwargs):
-        result = f(*args, **kwargs)
-
-        if result is None:
-            return None
-        elif isinstance(result, list):
-            return [r.to_dict() for r in result]
-        else:
-            return result.to_dict()
-    return inner
-
-
-def filter_fields(f):
-    @functools.wraps(f)
-    def inner_filter(*args, **kwargs):
-        result = f(*args, **kwargs)
-        fields = kwargs.get('fields')
-        if not fields:
-            try:
-                pos = f.__code__.co_varnames.index('fields')
-                fields = args[pos]
-            except (IndexError, ValueError):
-                return result
-
-        do_filter = lambda d: {k: v for k, v in d.items() if k in fields}
-        if isinstance(result, list):
-            return [do_filter(obj) for obj in result]
-        else:
-            return do_filter(result)
-    return inner_filter
-
-
-class DbBasePluginCommon(common_db_mixin.CommonDbMixin):
-    """Stores getters and helper methods for db_base_plugin_v2
-
-    All private getters and simple helpers like _make_*_dict were moved from
-    db_base_plugin_v2.
-    More complicated logic and public methods left in db_base_plugin_v2.
-    Main purpose of this class is to make getters accessible for Ipam
-    backends.
-    """
-
-    @staticmethod
-    def _generate_mac():
-        return utils.get_random_mac(cfg.CONF.base_mac.split(':'))
-
-    @staticmethod
-    def _delete_ip_allocation(context, network_id, subnet_id, ip_address):
-
-        # Delete the IP address from the IPAllocate table
-        LOG.debug("Delete allocated IP %(ip_address)s "
-                  "(%(network_id)s/%(subnet_id)s)",
-                  {'ip_address': ip_address,
-                   'network_id': network_id,
-                   'subnet_id': subnet_id})
-        context.session.query(models_v2.IPAllocation).filter_by(
-            network_id=network_id,
-            ip_address=ip_address,
-            subnet_id=subnet_id).delete()
-
-    @staticmethod
-    def _store_ip_allocation(context, ip_address, network_id, subnet_id,
-                             port_id):
-        LOG.debug("Allocated IP %(ip_address)s "
-                  "(%(network_id)s/%(subnet_id)s/%(port_id)s)",
-                  {'ip_address': ip_address,
-                   'network_id': network_id,
-                   'subnet_id': subnet_id,
-                   'port_id': port_id})
-        allocated = models_v2.IPAllocation(
-            network_id=network_id,
-            port_id=port_id,
-            ip_address=ip_address,
-            subnet_id=subnet_id
-        )
-        context.session.add(allocated)
-
-    def _make_subnet_dict(self, subnet, fields=None, context=None):
-        res = {'id': subnet['id'],
-               'name': subnet['name'],
-               'tenant_id': subnet['tenant_id'],
-               'network_id': subnet['network_id'],
-               'ip_version': subnet['ip_version'],
-               'cidr': subnet['cidr'],
-               'subnetpool_id': subnet.get('subnetpool_id'),
-               'allocation_pools': [{'start': pool['first_ip'],
-                                     'end': pool['last_ip']}
-                                    for pool in subnet['allocation_pools']],
-               'gateway_ip': subnet['gateway_ip'],
-               'enable_dhcp': subnet['enable_dhcp'],
-               'ipv6_ra_mode': subnet['ipv6_ra_mode'],
-               'ipv6_address_mode': subnet['ipv6_address_mode'],
-               'dns_nameservers': [dns['address']
-                                   for dns in subnet['dns_nameservers']],
-               'host_routes': [{'destination': route['destination'],
-                                'nexthop': route['nexthop']}
-                               for route in subnet['routes']],
-               }
-        # The shared attribute for a subnet is the same as its parent network
-        res['shared'] = self._is_network_shared(context, subnet.networks)
-        # Call auxiliary extend functions, if any
-        self._apply_dict_extend_functions(attributes.SUBNETS, res, subnet)
-        return self._fields(res, fields)
-
-    def _make_subnetpool_dict(self, subnetpool, fields=None):
-        default_prefixlen = str(subnetpool['default_prefixlen'])
-        min_prefixlen = str(subnetpool['min_prefixlen'])
-        max_prefixlen = str(subnetpool['max_prefixlen'])
-        res = {'id': subnetpool['id'],
-               'name': subnetpool['name'],
-               'tenant_id': subnetpool['tenant_id'],
-               'default_prefixlen': default_prefixlen,
-               'min_prefixlen': min_prefixlen,
-               'max_prefixlen': max_prefixlen,
-               'is_default': subnetpool['is_default'],
-               'shared': subnetpool['shared'],
-               'prefixes': [prefix['cidr']
-                            for prefix in subnetpool['prefixes']],
-               'ip_version': subnetpool['ip_version'],
-               'default_quota': subnetpool['default_quota'],
-               'address_scope_id': subnetpool['address_scope_id']}
-        return self._fields(res, fields)
-
-    def _make_port_dict(self, port, fields=None,
-                        process_extensions=True):
-        res = {"id": port["id"],
-               'name': port['name'],
-               "network_id": port["network_id"],
-               'tenant_id': port['tenant_id'],
-               "mac_address": port["mac_address"],
-               "admin_state_up": port["admin_state_up"],
-               "status": port["status"],
-               "fixed_ips": [{'subnet_id': ip["subnet_id"],
-                              'ip_address': ip["ip_address"]}
-                             for ip in port["fixed_ips"]],
-               "device_id": port["device_id"],
-               "device_owner": port["device_owner"]}
-        if "dns_name" in port:
-            res["dns_name"] = port["dns_name"]
-        if "dns_assignment" in port:
-            res["dns_assignment"] = [{"ip_address": a["ip_address"],
-                                      "hostname": a["hostname"],
-                                      "fqdn": a["fqdn"]}
-                                     for a in port["dns_assignment"]]
-        # Call auxiliary extend functions, if any
-        if process_extensions:
-            self._apply_dict_extend_functions(
-                attributes.PORTS, res, port)
-        return self._fields(res, fields)
-
-    def _get_network(self, context, id):
-        try:
-            network = self._get_by_id(context, models_v2.Network, id)
-        except exc.NoResultFound:
-            raise n_exc.NetworkNotFound(net_id=id)
-        return network
-
-    def _get_subnet(self, context, id):
-        try:
-            subnet = self._get_by_id(context, models_v2.Subnet, id)
-        except exc.NoResultFound:
-            raise n_exc.SubnetNotFound(subnet_id=id)
-        return subnet
-
-    def _get_subnetpool(self, context, id):
-        try:
-            return self._get_by_id(context, models_v2.SubnetPool, id)
-        except exc.NoResultFound:
-            raise n_exc.SubnetPoolNotFound(subnetpool_id=id)
-
-    def _get_all_subnetpools(self, context):
-        # NOTE(tidwellr): see note in _get_all_subnets()
-        return context.session.query(models_v2.SubnetPool).all()
-
-    def _get_subnetpools_by_address_scope_id(self, context, address_scope_id):
-        # NOTE(vikram.choudhary): see note in _get_all_subnets()
-        subnetpool_qry = context.session.query(models_v2.SubnetPool)
-        return subnetpool_qry.filter_by(
-            address_scope_id=address_scope_id).all()
-
-    def _get_port(self, context, id):
-        try:
-            port = self._get_by_id(context, models_v2.Port, id)
-        except exc.NoResultFound:
-            raise n_exc.PortNotFound(port_id=id)
-        return port
-
-    def _get_dns_by_subnet(self, context, subnet_id):
-        dns_qry = context.session.query(models_v2.DNSNameServer)
-        return dns_qry.filter_by(subnet_id=subnet_id).order_by(
-            models_v2.DNSNameServer.order).all()
-
-    def _get_route_by_subnet(self, context, subnet_id):
-        route_qry = context.session.query(models_v2.SubnetRoute)
-        return route_qry.filter_by(subnet_id=subnet_id).all()
-
-    def _get_router_gw_ports_by_network(self, context, network_id):
-        port_qry = context.session.query(models_v2.Port)
-        return port_qry.filter_by(network_id=network_id,
-                device_owner=constants.DEVICE_OWNER_ROUTER_GW).all()
-
-    def _get_subnets_by_network(self, context, network_id):
-        subnet_qry = context.session.query(models_v2.Subnet)
-        return subnet_qry.filter_by(network_id=network_id).all()
-
-    def _get_subnets_by_subnetpool(self, context, subnetpool_id):
-        subnet_qry = context.session.query(models_v2.Subnet)
-        return subnet_qry.filter_by(subnetpool_id=subnetpool_id).all()
-
-    def _get_all_subnets(self, context):
-        # NOTE(salvatore-orlando): This query might end up putting
-        # a lot of stress on the db. Consider adding a cache layer
-        return context.session.query(models_v2.Subnet).all()
-
-    def _get_subnets(self, context, filters=None, fields=None,
-                     sorts=None, limit=None, marker=None,
-                     page_reverse=False):
-        marker_obj = self._get_marker_obj(context, 'subnet', limit, marker)
-        make_subnet_dict = functools.partial(self._make_subnet_dict,
-                                             context=context)
-        return self._get_collection(context, models_v2.Subnet,
-                                    make_subnet_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts,
-                                    limit=limit,
-                                    marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-
-    def _make_network_dict(self, network, fields=None,
-                           process_extensions=True, context=None):
-        res = {'id': network['id'],
-               'name': network['name'],
-               'tenant_id': network['tenant_id'],
-               'admin_state_up': network['admin_state_up'],
-               'mtu': network.get('mtu', constants.DEFAULT_NETWORK_MTU),
-               'status': network['status'],
-               'subnets': [subnet['id']
-                           for subnet in network['subnets']]}
-        res['shared'] = self._is_network_shared(context, network)
-        # Call auxiliary extend functions, if any
-        if process_extensions:
-            self._apply_dict_extend_functions(
-                attributes.NETWORKS, res, network)
-        return self._fields(res, fields)
-
-    def _is_network_shared(self, context, network):
-        # The shared attribute for a network now reflects if the network
-        # is shared to the calling tenant via an RBAC entry.
-        matches = ('*',) + ((context.tenant_id,) if context else ())
-        for entry in network.rbac_entries:
-            if (entry.action == 'access_as_shared' and
-                    entry.target_tenant in matches):
-                return True
-        return False
-
-    def _make_subnet_args(self, detail, subnet, subnetpool_id):
-        gateway_ip = str(detail.gateway_ip) if detail.gateway_ip else None
-        args = {'tenant_id': detail.tenant_id,
-                'id': detail.subnet_id,
-                'name': subnet['name'],
-                'network_id': subnet['network_id'],
-                'ip_version': subnet['ip_version'],
-                'cidr': str(detail.subnet_cidr),
-                'subnetpool_id': subnetpool_id,
-                'enable_dhcp': subnet['enable_dhcp'],
-                'gateway_ip': gateway_ip}
-        if subnet['ip_version'] == 6 and subnet['enable_dhcp']:
-            if attributes.is_attr_set(subnet['ipv6_ra_mode']):
-                args['ipv6_ra_mode'] = subnet['ipv6_ra_mode']
-            if attributes.is_attr_set(subnet['ipv6_address_mode']):
-                args['ipv6_address_mode'] = subnet['ipv6_address_mode']
-        return args
-
-    def _make_fixed_ip_dict(self, ips):
-        # Excludes from dict all keys except subnet_id and ip_address
-        return [{'subnet_id': ip["subnet_id"],
-                 'ip_address': ip["ip_address"]}
-                for ip in ips]
diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py
deleted file mode 100644 (file)
index aa15504..0000000
+++ /dev/null
@@ -1,1383 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import functools
-
-import netaddr
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-from oslo_utils import excutils
-from oslo_utils import uuidutils
-from sqlalchemy import and_
-from sqlalchemy import event
-
-from neutron._i18n import _, _LE, _LI
-from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
-from neutron.api.v2 import attributes
-from neutron.callbacks import events
-from neutron.callbacks import exceptions
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.common import utils
-from neutron import context as ctx
-from neutron.db import api as db_api
-from neutron.db import db_base_plugin_common
-from neutron.db import ipam_non_pluggable_backend
-from neutron.db import ipam_pluggable_backend
-from neutron.db import models_v2
-from neutron.db import rbac_db_mixin as rbac_mixin
-from neutron.db import rbac_db_models as rbac_db
-from neutron.db import sqlalchemyutils
-from neutron.extensions import l3
-from neutron import ipam
-from neutron.ipam import subnet_alloc
-from neutron import manager
-from neutron import neutron_plugin_base_v2
-from neutron.notifiers import nova as nova_notifier
-from neutron.plugins.common import constants as service_constants
-
-
-LOG = logging.getLogger(__name__)
-
-# Ports with the following 'device_owner' values will not prevent
-# network deletion.  If delete_network() finds that all ports on a
-# network have these owners, it will explicitly delete each port
-# and allow network deletion to continue.  Similarly, if delete_subnet()
-# finds out that all existing IP Allocations are associated with ports
-# with these owners, it will allow subnet deletion to proceed with the
-# IP allocations being cleaned up by cascade.
-AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP]
-
-DNS_DOMAIN_DEFAULT = 'openstacklocal.'
-FQDN_MAX_LEN = 255
-
-
-def _check_subnet_not_used(context, subnet_id):
-    try:
-        kwargs = {'context': context, 'subnet_id': subnet_id}
-        registry.notify(
-            resources.SUBNET, events.BEFORE_DELETE, None, **kwargs)
-    except exceptions.CallbackFailure as e:
-        raise n_exc.SubnetInUse(subnet_id=subnet_id, reason=e)
-
-
-class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
-                        neutron_plugin_base_v2.NeutronPluginBaseV2,
-                        rbac_mixin.RbacPluginMixin):
-    """V2 Neutron plugin interface implementation using SQLAlchemy models.
-
-    Whenever a non-read call happens the plugin will call an event handler
-    class method (e.g., network_created()).  The result is that this class
-    can be sub-classed by other classes that add custom behaviors on certain
-    events.
-    """
-
-    # This attribute specifies whether the plugin supports or not
-    # bulk/pagination/sorting operations. Name mangling is used in
-    # order to ensure it is qualified by class
-    __native_bulk_support = True
-    __native_pagination_support = True
-    __native_sorting_support = True
-
-    def __init__(self):
-        self.set_ipam_backend()
-        if cfg.CONF.notify_nova_on_port_status_changes:
-            # NOTE(arosen) These event listeners are here to hook into when
-            # port status changes and notify nova about their change.
-            self.nova_notifier = nova_notifier.Notifier()
-            event.listen(models_v2.Port, 'after_insert',
-                         self.nova_notifier.send_port_status)
-            event.listen(models_v2.Port, 'after_update',
-                         self.nova_notifier.send_port_status)
-            event.listen(models_v2.Port.status, 'set',
-                         self.nova_notifier.record_port_status_changed)
-        for e in (events.BEFORE_CREATE, events.BEFORE_UPDATE,
-                  events.BEFORE_DELETE):
-            registry.subscribe(self.validate_network_rbac_policy_change,
-                               rbac_mixin.RBAC_POLICY, e)
-
-    def validate_network_rbac_policy_change(self, resource, event, trigger,
-                                            context, object_type, policy,
-                                            **kwargs):
-        """Validates network RBAC policy changes.
-
-        On creation, verify that the creator is an admin or that it owns the
-        network it is sharing.
-
-        On update and delete, make sure the tenant losing access does not have
-        resources that depend on that access.
-        """
-        if object_type != 'network':
-            # we only care about network policies
-            return
-        # The object a policy targets cannot be changed so we can look
-        # at the original network for the update event as well.
-        net = self._get_network(context, policy['object_id'])
-        if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE):
-            # we still have to verify that the caller owns the network because
-            # _get_network will succeed on a shared network
-            if not context.is_admin and net['tenant_id'] != context.tenant_id:
-                msg = _("Only admins can manipulate policies on networks "
-                        "they do not own.")
-                raise n_exc.InvalidInput(error_message=msg)
-
-        tenant_to_check = None
-        if event == events.BEFORE_UPDATE:
-            new_tenant = kwargs['policy_update']['target_tenant']
-            if policy['target_tenant'] != new_tenant:
-                tenant_to_check = policy['target_tenant']
-
-        if event == events.BEFORE_DELETE:
-            tenant_to_check = policy['target_tenant']
-
-        if tenant_to_check:
-            self.ensure_no_tenant_ports_on_network(net['id'], net['tenant_id'],
-                                                   tenant_to_check)
-
-    def ensure_no_tenant_ports_on_network(self, network_id, net_tenant_id,
-                                          tenant_id):
-        ctx_admin = ctx.get_admin_context()
-        rb_model = rbac_db.NetworkRBAC
-        other_rbac_entries = self._model_query(ctx_admin, rb_model).filter(
-            and_(rb_model.object_id == network_id,
-                 rb_model.action == 'access_as_shared'))
-        ports = self._model_query(ctx_admin, models_v2.Port).filter(
-            models_v2.Port.network_id == network_id)
-        if tenant_id == '*':
-            # for the wildcard we need to get all of the rbac entries to
-            # see if any allow the remaining ports on the network.
-            other_rbac_entries = other_rbac_entries.filter(
-                rb_model.target_tenant != tenant_id)
-            # any port with another RBAC entry covering it or one belonging to
-            # the same tenant as the network owner is ok
-            allowed_tenants = [entry['target_tenant']
-                               for entry in other_rbac_entries]
-            allowed_tenants.append(net_tenant_id)
-            ports = ports.filter(
-                ~models_v2.Port.tenant_id.in_(allowed_tenants))
-        else:
-            # if there is a wildcard rule, we can return early because it
-            # allows any ports
-            query = other_rbac_entries.filter(rb_model.target_tenant == '*')
-            if query.count():
-                return
-            ports = ports.filter(models_v2.Port.tenant_id == tenant_id)
-        if ports.count():
-            raise n_exc.InvalidSharedSetting(network=network_id)
-
-    def set_ipam_backend(self):
-        if cfg.CONF.ipam_driver:
-            self.ipam = ipam_pluggable_backend.IpamPluggableBackend()
-        else:
-            self.ipam = ipam_non_pluggable_backend.IpamNonPluggableBackend()
-
-    def _validate_host_route(self, route, ip_version):
-        try:
-            netaddr.IPNetwork(route['destination'])
-            netaddr.IPAddress(route['nexthop'])
-        except netaddr.core.AddrFormatError:
-            err_msg = _("Invalid route: %s") % route
-            raise n_exc.InvalidInput(error_message=err_msg)
-        except ValueError:
-            # netaddr.IPAddress would raise this
-            err_msg = _("Invalid route: %s") % route
-            raise n_exc.InvalidInput(error_message=err_msg)
-        self._validate_ip_version(ip_version, route['nexthop'], 'nexthop')
-        self._validate_ip_version(ip_version, route['destination'],
-                                  'destination')
-
-    def _validate_shared_update(self, context, id, original, updated):
-        # The only case that needs to be validated is when 'shared'
-        # goes from True to False
-        if updated['shared'] == original.shared or updated['shared']:
-            return
-        ports = self._model_query(
-            context, models_v2.Port).filter(
-                and_(
-                    models_v2.Port.network_id == id,
-                    models_v2.Port.device_owner !=
-                    constants.DEVICE_OWNER_ROUTER_GW,
-                    models_v2.Port.device_owner !=
-                    constants.DEVICE_OWNER_FLOATINGIP))
-        subnets = self._model_query(
-            context, models_v2.Subnet).filter(
-                models_v2.Subnet.network_id == id)
-        tenant_ids = set([port['tenant_id'] for port in ports] +
-                         [subnet['tenant_id'] for subnet in subnets])
-        # raise if multiple tenants found or if the only tenant found
-        # is not the owner of the network
-        if (len(tenant_ids) > 1 or len(tenant_ids) == 1 and
-            tenant_ids.pop() != original.tenant_id):
-            raise n_exc.InvalidSharedSetting(network=original.name)
-
-    def _validate_ipv6_attributes(self, subnet, cur_subnet):
-        if cur_subnet:
-            self._validate_ipv6_update_dhcp(subnet, cur_subnet)
-            return
-        ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode'))
-        address_mode_set = attributes.is_attr_set(
-            subnet.get('ipv6_address_mode'))
-        self._validate_ipv6_dhcp(ra_mode_set, address_mode_set,
-                                 subnet['enable_dhcp'])
-        if ra_mode_set and address_mode_set:
-            self._validate_ipv6_combination(subnet['ipv6_ra_mode'],
-                                            subnet['ipv6_address_mode'])
-        if address_mode_set or ra_mode_set:
-            self._validate_eui64_applicable(subnet)
-
-    def _validate_eui64_applicable(self, subnet):
-        # Per RFC 4862, section 5.5.3, prefix length and interface
-        # id together should be equal to 128. Currently neutron supports
-        # EUI64 interface id only, thus limiting the prefix
-        # length to be 64 only.
-        if ipv6_utils.is_auto_address_subnet(subnet):
-            if netaddr.IPNetwork(subnet['cidr']).prefixlen != 64:
-                msg = _('Invalid CIDR %s for IPv6 address mode. '
-                        'OpenStack uses the EUI-64 address format, '
-                        'which requires the prefix to be /64.')
-                raise n_exc.InvalidInput(
-                    error_message=(msg % subnet['cidr']))
-
-    def _validate_ipv6_combination(self, ra_mode, address_mode):
-        if ra_mode != address_mode:
-            msg = _("ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode "
-                    "set to '%(addr_mode)s' is not valid. "
-                    "If both attributes are set, they must be the same value"
-                    ) % {'ra_mode': ra_mode, 'addr_mode': address_mode}
-            raise n_exc.InvalidInput(error_message=msg)
-
-    def _validate_ipv6_dhcp(self, ra_mode_set, address_mode_set, enable_dhcp):
-        if (ra_mode_set or address_mode_set) and not enable_dhcp:
-            msg = _("ipv6_ra_mode or ipv6_address_mode cannot be set when "
-                    "enable_dhcp is set to False.")
-            raise n_exc.InvalidInput(error_message=msg)
-
-    def _validate_ipv6_update_dhcp(self, subnet, cur_subnet):
-        if ('enable_dhcp' in subnet and not subnet['enable_dhcp']):
-            msg = _("Cannot disable enable_dhcp with "
-                    "ipv6 attributes set")
-
-            ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode'))
-            address_mode_set = attributes.is_attr_set(
-                subnet.get('ipv6_address_mode'))
-
-            if ra_mode_set or address_mode_set:
-                raise n_exc.InvalidInput(error_message=msg)
-
-            old_ra_mode_set = attributes.is_attr_set(
-                cur_subnet.get('ipv6_ra_mode'))
-            old_address_mode_set = attributes.is_attr_set(
-                cur_subnet.get('ipv6_address_mode'))
-
-            if old_ra_mode_set or old_address_mode_set:
-                raise n_exc.InvalidInput(error_message=msg)
-
-    def _create_bulk(self, resource, context, request_items):
-        objects = []
-        collection = "%ss" % resource
-        items = request_items[collection]
-        context.session.begin(subtransactions=True)
-        try:
-            for item in items:
-                obj_creator = getattr(self, 'create_%s' % resource)
-                objects.append(obj_creator(context, item))
-            context.session.commit()
-        except Exception:
-            context.session.rollback()
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE("An exception occurred while creating "
-                              "the %(resource)s:%(item)s"),
-                          {'resource': resource, 'item': item})
-        return objects
-
-    def create_network_bulk(self, context, networks):
-        return self._create_bulk('network', context, networks)
-
-    def create_network(self, context, network):
-        """Handle creation of a single network."""
-        # single request processing
-        n = network['network']
-        # NOTE(jkoelker) Get the tenant_id outside of the session to avoid
-        #                unneeded db action if the operation raises
-        tenant_id = n['tenant_id']
-        with context.session.begin(subtransactions=True):
-            args = {'tenant_id': tenant_id,
-                    'id': n.get('id') or uuidutils.generate_uuid(),
-                    'name': n['name'],
-                    'admin_state_up': n['admin_state_up'],
-                    'mtu': n.get('mtu', constants.DEFAULT_NETWORK_MTU),
-                    'status': n.get('status', constants.NET_STATUS_ACTIVE)}
-            network = models_v2.Network(**args)
-            if n['shared']:
-                entry = rbac_db.NetworkRBAC(
-                    network=network, action='access_as_shared',
-                    target_tenant='*', tenant_id=network['tenant_id'])
-                context.session.add(entry)
-            context.session.add(network)
-        return self._make_network_dict(network, process_extensions=False,
-                                       context=context)
-
-    def update_network(self, context, id, network):
-        n = network['network']
-        with context.session.begin(subtransactions=True):
-            network = self._get_network(context, id)
-            # validate 'shared' parameter
-            if 'shared' in n:
-                entry = None
-                for item in network.rbac_entries:
-                    if (item.action == 'access_as_shared' and
-                            item.target_tenant == '*'):
-                        entry = item
-                        break
-                setattr(network, 'shared', True if entry else False)
-                self._validate_shared_update(context, id, network, n)
-                update_shared = n.pop('shared')
-                if update_shared and not entry:
-                    entry = rbac_db.NetworkRBAC(
-                        network=network, action='access_as_shared',
-                        target_tenant='*', tenant_id=network['tenant_id'])
-                    context.session.add(entry)
-                elif not update_shared and entry:
-                    context.session.delete(entry)
-                    context.session.expire(network, ['rbac_entries'])
-            network.update(n)
-        return self._make_network_dict(network, context=context)
-
-    def delete_network(self, context, id):
-        with context.session.begin(subtransactions=True):
-            network = self._get_network(context, id)
-
-            context.session.query(models_v2.Port).filter_by(
-                network_id=id).filter(
-                models_v2.Port.device_owner.
-                in_(AUTO_DELETE_PORT_OWNERS)).delete(synchronize_session=False)
-
-            port_in_use = context.session.query(models_v2.Port).filter_by(
-                network_id=id).first()
-
-            if port_in_use:
-                raise n_exc.NetworkInUse(net_id=id)
-
-            # clean up subnets
-            subnets = self._get_subnets_by_network(context, id)
-            for subnet in subnets:
-                self.delete_subnet(context, subnet['id'])
-
-            context.session.delete(network)
-
-    def get_network(self, context, id, fields=None):
-        network = self._get_network(context, id)
-        return self._make_network_dict(network, fields, context=context)
-
-    def get_networks(self, context, filters=None, fields=None,
-                     sorts=None, limit=None, marker=None,
-                     page_reverse=False):
-        marker_obj = self._get_marker_obj(context, 'network', limit, marker)
-        make_network_dict = functools.partial(self._make_network_dict,
-                                              context=context)
-        return self._get_collection(context, models_v2.Network,
-                                    make_network_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts,
-                                    limit=limit,
-                                    marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-
-    def get_networks_count(self, context, filters=None):
-        return self._get_collection_count(context, models_v2.Network,
-                                          filters=filters)
-
-    def create_subnet_bulk(self, context, subnets):
-        return self._create_bulk('subnet', context, subnets)
-
-    def _validate_ip_version(self, ip_version, addr, name):
-        """Check IP field of a subnet match specified ip version."""
-        ip = netaddr.IPNetwork(addr)
-        if ip.version != ip_version:
-            data = {'name': name,
-                    'addr': addr,
-                    'ip_version': ip_version}
-            msg = _("%(name)s '%(addr)s' does not match "
-                    "the ip_version '%(ip_version)s'") % data
-            raise n_exc.InvalidInput(error_message=msg)
-
-    def _validate_subnet(self, context, s, cur_subnet=None):
-        """Validate a subnet spec."""
-
-        # This method will validate attributes which may change during
-        # create_subnet() and update_subnet().
-        # The method requires the subnet spec 's' has 'ip_version' field.
-        # If 's' dict does not have 'ip_version' field in an API call
-        # (e.g., update_subnet()), you need to set 'ip_version' field
-        # before calling this method.
-
-        ip_ver = s['ip_version']
-
-        if attributes.is_attr_set(s.get('cidr')):
-            self._validate_ip_version(ip_ver, s['cidr'], 'cidr')
-
-        # TODO(watanabe.isao): After we found a way to avoid the re-sync
-        # from the agent side, this restriction could be removed.
-        if cur_subnet:
-            dhcp_was_enabled = cur_subnet.enable_dhcp
-        else:
-            dhcp_was_enabled = False
-        if s.get('enable_dhcp') and not dhcp_was_enabled:
-            subnet_prefixlen = netaddr.IPNetwork(s['cidr']).prefixlen
-            error_message = _("Subnet has a prefix length that is "
-                              "incompatible with DHCP service enabled.")
-            if ((ip_ver == 4 and subnet_prefixlen > 30) or
-                (ip_ver == 6 and subnet_prefixlen > 126)):
-                    raise n_exc.InvalidInput(error_message=error_message)
-
-            net = netaddr.IPNetwork(s['cidr'])
-            if net.is_multicast():
-                error_message = _("Multicast IP subnet is not supported "
-                                  "if enable_dhcp is True.")
-                raise n_exc.InvalidInput(error_message=error_message)
-            elif net.is_loopback():
-                error_message = _("Loopback IP subnet is not supported "
-                                  "if enable_dhcp is True.")
-                raise n_exc.InvalidInput(error_message=error_message)
-
-        if attributes.is_attr_set(s.get('gateway_ip')):
-            self._validate_ip_version(ip_ver, s['gateway_ip'], 'gateway_ip')
-            if (cfg.CONF.force_gateway_on_subnet and
-                not ipam.utils.check_gateway_in_subnet(
-                    s['cidr'], s['gateway_ip'])):
-                error_message = _("Gateway is not valid on subnet")
-                raise n_exc.InvalidInput(error_message=error_message)
-            # Ensure the gateway IP is not assigned to any port
-            # skip this check in case of create (s parameter won't have id)
-            # NOTE(salv-orlando): There is slight chance of a race, when
-            # a subnet-update and a router-interface-add operation are
-            # executed concurrently
-            if cur_subnet and not ipv6_utils.is_ipv6_pd_enabled(s):
-                alloc_qry = context.session.query(models_v2.IPAllocation)
-                allocated = alloc_qry.filter_by(
-                    ip_address=cur_subnet['gateway_ip'],
-                    subnet_id=cur_subnet['id']).first()
-                if allocated and allocated['port_id']:
-                    raise n_exc.GatewayIpInUse(
-                        ip_address=cur_subnet['gateway_ip'],
-                        port_id=allocated['port_id'])
-
-        if attributes.is_attr_set(s.get('dns_nameservers')):
-            if len(s['dns_nameservers']) > cfg.CONF.max_dns_nameservers:
-                raise n_exc.DNSNameServersExhausted(
-                    subnet_id=s.get('id', _('new subnet')),
-                    quota=cfg.CONF.max_dns_nameservers)
-            for dns in s['dns_nameservers']:
-                try:
-                    netaddr.IPAddress(dns)
-                except Exception:
-                    raise n_exc.InvalidInput(
-                        error_message=(_("Error parsing dns address %s") %
-                                       dns))
-                self._validate_ip_version(ip_ver, dns, 'dns_nameserver')
-
-        if attributes.is_attr_set(s.get('host_routes')):
-            if len(s['host_routes']) > cfg.CONF.max_subnet_host_routes:
-                raise n_exc.HostRoutesExhausted(
-                    subnet_id=s.get('id', _('new subnet')),
-                    quota=cfg.CONF.max_subnet_host_routes)
-            # check if the routes are all valid
-            for rt in s['host_routes']:
-                self._validate_host_route(rt, ip_ver)
-
-        if ip_ver == 4:
-            if attributes.is_attr_set(s.get('ipv6_ra_mode')):
-                raise n_exc.InvalidInput(
-                    error_message=(_("ipv6_ra_mode is not valid when "
-                                     "ip_version is 4")))
-            if attributes.is_attr_set(s.get('ipv6_address_mode')):
-                raise n_exc.InvalidInput(
-                    error_message=(_("ipv6_address_mode is not valid when "
-                                     "ip_version is 4")))
-        if ip_ver == 6:
-            self._validate_ipv6_attributes(s, cur_subnet)
-
-    def _validate_subnet_for_pd(self, subnet):
-        """Validates that subnet parameters are correct for IPv6 PD"""
-        if (subnet.get('ip_version') != constants.IP_VERSION_6):
-            reason = _("Prefix Delegation can only be used with IPv6 "
-                       "subnets.")
-            raise n_exc.BadRequest(resource='subnets', msg=reason)
-
-        mode_list = [constants.IPV6_SLAAC,
-                     constants.DHCPV6_STATELESS,
-                     attributes.ATTR_NOT_SPECIFIED]
-
-        ra_mode = subnet.get('ipv6_ra_mode')
-        if ra_mode not in mode_list:
-            reason = _("IPv6 RA Mode must be SLAAC or Stateless for "
-                       "Prefix Delegation.")
-            raise n_exc.BadRequest(resource='subnets', msg=reason)
-
-        address_mode = subnet.get('ipv6_address_mode')
-        if address_mode not in mode_list:
-            reason = _("IPv6 Address Mode must be SLAAC or Stateless for "
-                       "Prefix Delegation.")
-            raise n_exc.BadRequest(resource='subnets', msg=reason)
-
-    def _update_router_gw_ports(self, context, network, subnet):
-        l3plugin = manager.NeutronManager.get_service_plugins().get(
-                service_constants.L3_ROUTER_NAT)
-        if l3plugin:
-            gw_ports = self._get_router_gw_ports_by_network(context,
-                    network['id'])
-            router_ids = [p['device_id'] for p in gw_ports]
-            ctx_admin = context.elevated()
-            ext_subnets_dict = {s['id']: s for s in network['subnets']}
-            for id in router_ids:
-                router = l3plugin.get_router(ctx_admin, id)
-                external_gateway_info = router['external_gateway_info']
-                # Get all stateful (i.e. non-SLAAC/DHCPv6-stateless) fixed ips
-                fips = [f for f in external_gateway_info['external_fixed_ips']
-                        if not ipv6_utils.is_auto_address_subnet(
-                            ext_subnets_dict[f['subnet_id']])]
-                num_fips = len(fips)
-                # Don't add the fixed IP to the port if it already
-                # has a stateful fixed IP of the same IP version
-                if num_fips > 1:
-                    continue
-                if num_fips == 1 and netaddr.IPAddress(
-                        fips[0]['ip_address']).version == subnet['ip_version']:
-                    continue
-                external_gateway_info['external_fixed_ips'].append(
-                                             {'subnet_id': subnet['id']})
-                info = {'router': {'external_gateway_info':
-                    external_gateway_info}}
-                l3plugin.update_router(context, id, info)
-
-    def _create_subnet(self, context, subnet, subnetpool_id):
-        s = subnet['subnet']
-
-        with context.session.begin(subtransactions=True):
-            network = self._get_network(context, s["network_id"])
-            subnet, ipam_subnet = self.ipam.allocate_subnet(context,
-                                                            network,
-                                                            s,
-                                                            subnetpool_id)
-        if hasattr(network, 'external') and network.external:
-            self._update_router_gw_ports(context,
-                                         network,
-                                         subnet)
-        # If this subnet supports auto-addressing, then update any
-        # internal ports on the network with addresses for this subnet.
-        if ipv6_utils.is_auto_address_subnet(subnet):
-            self.ipam.add_auto_addrs_on_network_ports(context, subnet,
-                                                      ipam_subnet)
-        return self._make_subnet_dict(subnet, context=context)
-
-    def _get_subnetpool_id(self, context, subnet):
-        """Returns the subnetpool id for this request
-
-        If the pool id was explicitly set in the request then that will be
-        returned, even if it is None.
-
-        Otherwise, the default pool for the IP version requested will be
-        returned.  This will either be a pool id or None (the default for each
-        configuration parameter).  This implies that the ip version must be
-        either set implicitly with a specific cidr or explicitly using
-        ip_version attribute.
-
-        :param subnet: The subnet dict from the request
-        """
-        subnetpool_id = subnet.get('subnetpool_id',
-                                   attributes.ATTR_NOT_SPECIFIED)
-        if subnetpool_id != attributes.ATTR_NOT_SPECIFIED:
-            return subnetpool_id
-
-        cidr = subnet.get('cidr')
-        if attributes.is_attr_set(cidr):
-            ip_version = netaddr.IPNetwork(cidr).version
-        else:
-            ip_version = subnet.get('ip_version')
-            if not attributes.is_attr_set(ip_version):
-                msg = _('ip_version must be specified in the absence of '
-                        'cidr and subnetpool_id')
-                raise n_exc.BadRequest(resource='subnets', msg=msg)
-
-        if ip_version == 6 and cfg.CONF.ipv6_pd_enabled:
-            return constants.IPV6_PD_POOL_ID
-
-        subnetpool = self.get_default_subnetpool(context, ip_version)
-        if subnetpool:
-            return subnetpool['id']
-
-        # Until the default_subnet_pool config options are removed in the N
-        # release, check for them after get_default_subnetpool returns None.
-        # TODO(john-davidge): Remove after Mitaka release.
-        if ip_version == 4:
-            return cfg.CONF.default_ipv4_subnet_pool
-        return cfg.CONF.default_ipv6_subnet_pool
-
-    def create_subnet(self, context, subnet):
-
-        s = subnet['subnet']
-        cidr = s.get('cidr', attributes.ATTR_NOT_SPECIFIED)
-        prefixlen = s.get('prefixlen', attributes.ATTR_NOT_SPECIFIED)
-        has_cidr = attributes.is_attr_set(cidr)
-        has_prefixlen = attributes.is_attr_set(prefixlen)
-
-        if has_cidr and has_prefixlen:
-            msg = _('cidr and prefixlen must not be supplied together')
-            raise n_exc.BadRequest(resource='subnets', msg=msg)
-
-        if has_cidr:
-            # turn the CIDR into a proper subnet
-            net = netaddr.IPNetwork(s['cidr'])
-            subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen)
-
-        subnetpool_id = self._get_subnetpool_id(context, s)
-        if subnetpool_id:
-            self.ipam.validate_pools_with_subnetpool(s)
-            if subnetpool_id == constants.IPV6_PD_POOL_ID:
-                if has_cidr:
-                    # We do not currently support requesting a specific
-                    # cidr with IPv6 prefix delegation. Set the subnetpool_id
-                    # to None and allow the request to continue as normal.
-                    subnetpool_id = None
-                    self._validate_subnet(context, s)
-                else:
-                    prefix = constants.PROVISIONAL_IPV6_PD_PREFIX
-                    subnet['subnet']['cidr'] = prefix
-                    self._validate_subnet_for_pd(s)
-        else:
-            if not has_cidr:
-                msg = _('A cidr must be specified in the absence of a '
-                        'subnet pool')
-                raise n_exc.BadRequest(resource='subnets', msg=msg)
-            self._validate_subnet(context, s)
-
-        return self._create_subnet(context, subnet, subnetpool_id)
-
-    def _update_allocation_pools(self, subnet):
-        """Gets new allocation pools and formats them correctly"""
-        allocation_pools = self.ipam.generate_pools(subnet['cidr'],
-                                                    subnet['gateway_ip'])
-        return [{'start': str(netaddr.IPAddress(p.first,
-                                                subnet['ip_version'])),
-                 'end': str(netaddr.IPAddress(p.last, subnet['ip_version']))}
-                for p in allocation_pools]
-
-    def update_subnet(self, context, id, subnet):
-        """Update the subnet with new info.
-
-        The change however will not be realized until the client renew the
-        dns lease or we support gratuitous DHCP offers
-        """
-        s = subnet['subnet']
-        new_cidr = s.get('cidr')
-        db_subnet = self._get_subnet(context, id)
-        # Fill 'ip_version' and 'allocation_pools' fields with the current
-        # value since _validate_subnet() expects subnet spec has 'ip_version'
-        # and 'allocation_pools' fields.
-        s['ip_version'] = db_subnet.ip_version
-        s['cidr'] = db_subnet.cidr
-        s['id'] = db_subnet.id
-        s['tenant_id'] = db_subnet.tenant_id
-        s['subnetpool_id'] = db_subnet.subnetpool_id
-        self._validate_subnet(context, s, cur_subnet=db_subnet)
-        db_pools = [netaddr.IPRange(p['first_ip'], p['last_ip'])
-                    for p in db_subnet.allocation_pools]
-
-        update_ports_needed = False
-        if new_cidr and ipv6_utils.is_ipv6_pd_enabled(s):
-            # This is an ipv6 prefix delegation-enabled subnet being given an
-            # updated cidr by the process_prefix_update RPC
-            s['cidr'] = new_cidr
-            update_ports_needed = True
-            net = netaddr.IPNetwork(s['cidr'], s['ip_version'])
-            # Update gateway_ip and allocation pools based on new cidr
-            s['gateway_ip'] = utils.get_first_host_ip(net, s['ip_version'])
-            s['allocation_pools'] = self._update_allocation_pools(s)
-
-        range_pools = None
-        if s.get('allocation_pools') is not None:
-            # Convert allocation pools to IPRange to simplify future checks
-            range_pools = self.ipam.pools_to_ip_range(s['allocation_pools'])
-            self.ipam.validate_allocation_pools(range_pools, s['cidr'])
-            s['allocation_pools'] = range_pools
-
-        # If either gateway_ip or allocation_pools were specified
-        gateway_ip = s.get('gateway_ip', db_subnet.gateway_ip)
-        gateway_ip_changed = gateway_ip != db_subnet.gateway_ip
-        if gateway_ip_changed or s.get('allocation_pools') is not None:
-            pools = range_pools if range_pools is not None else db_pools
-            if gateway_ip:
-                self.ipam.validate_gw_out_of_pools(gateway_ip, pools)
-
-        if gateway_ip_changed:
-            # Provide pre-update notification not to break plugins that don't
-            # support gateway ip change
-            kwargs = {'context': context, 'subnet_id': id,
-                      'network_id': db_subnet.network_id}
-            registry.notify(resources.SUBNET_GATEWAY, events.BEFORE_UPDATE,
-                            self, **kwargs)
-
-        with context.session.begin(subtransactions=True):
-            subnet, changes = self.ipam.update_db_subnet(context, id, s,
-                                                         db_pools)
-        result = self._make_subnet_dict(subnet, context=context)
-        # Keep up with fields that changed
-        result.update(changes)
-
-        if update_ports_needed:
-            # Find ports that have not yet been updated
-            # with an IP address by Prefix Delegation, and update them
-            ports = self.get_ports(context)
-            routers = []
-            for port in ports:
-                fixed_ips = []
-                new_port = {'port': port}
-                for ip in port['fixed_ips']:
-                    if ip['subnet_id'] == s['id']:
-                        fixed_ip = {'subnet_id': s['id']}
-                        if "router_interface" in port['device_owner']:
-                            routers.append(port['device_id'])
-                            fixed_ip['ip_address'] = s['gateway_ip']
-                        fixed_ips.append(fixed_ip)
-                if fixed_ips:
-                    new_port['port']['fixed_ips'] = fixed_ips
-                    self.update_port(context, port['id'], new_port)
-
-            # Send router_update to l3_agent
-            if routers:
-                l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI()
-                l3_rpc_notifier.routers_updated(context, routers)
-
-        if gateway_ip_changed:
-            kwargs = {'context': context, 'subnet_id': id,
-                      'network_id': db_subnet.network_id}
-            registry.notify(resources.SUBNET_GATEWAY, events.AFTER_UPDATE,
-                            self, **kwargs)
-
-        return result
-
-    def _subnet_check_ip_allocations(self, context, subnet_id):
-        return (context.session.query(models_v2.IPAllocation).
-                filter_by(subnet_id=subnet_id).join(models_v2.Port).first())
-
-    def _subnet_get_user_allocation(self, context, subnet_id):
-        """Check if there are any user ports on subnet and return first."""
-        # need to join with ports table as IPAllocation's port
-        # is not joined eagerly and thus producing query which yields
-        # incorrect results
-        return (context.session.query(models_v2.IPAllocation).
-                filter_by(subnet_id=subnet_id).join(models_v2.Port).
-                filter(~models_v2.Port.device_owner.
-                       in_(AUTO_DELETE_PORT_OWNERS)).first())
-
-    def _subnet_check_ip_allocations_internal_router_ports(self, context,
-                                                           subnet_id):
-        # Do not delete the subnet if IP allocations for internal
-        # router ports still exist
-        allocs = context.session.query(models_v2.IPAllocation).filter_by(
-                subnet_id=subnet_id).join(models_v2.Port).filter(
-                        models_v2.Port.device_owner.in_(
-                            constants.ROUTER_INTERFACE_OWNERS)
-                ).first()
-        if allocs:
-            LOG.debug("Subnet %s still has internal router ports, "
-                      "cannot delete", subnet_id)
-            raise n_exc.SubnetInUse(subnet_id=id)
-
-    def delete_subnet(self, context, id):
-        with context.session.begin(subtransactions=True):
-            subnet = self._get_subnet(context, id)
-
-            # Make sure the subnet isn't used by other resources
-            _check_subnet_not_used(context, id)
-
-            # Delete all network owned ports
-            qry_network_ports = (
-                context.session.query(models_v2.IPAllocation).
-                filter_by(subnet_id=subnet['id']).
-                join(models_v2.Port))
-            # Remove network owned ports, and delete IP allocations
-            # for IPv6 addresses which were automatically generated
-            # via SLAAC
-            is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
-            if is_auto_addr_subnet:
-                self._subnet_check_ip_allocations_internal_router_ports(
-                        context, id)
-            else:
-                qry_network_ports = (
-                    qry_network_ports.filter(models_v2.Port.device_owner.
-                    in_(AUTO_DELETE_PORT_OWNERS)))
-            network_ports = qry_network_ports.all()
-            if network_ports:
-                for port in network_ports:
-                    context.session.delete(port)
-            # Check if there are more IP allocations, unless
-            # is_auto_address_subnet is True. In that case the check is
-            # unnecessary. This additional check not only would be wasteful
-            # for this class of subnet, but is also error-prone since when
-            # the isolation level is set to READ COMMITTED allocations made
-            # concurrently will be returned by this query
-            if not is_auto_addr_subnet:
-                alloc = self._subnet_check_ip_allocations(context, id)
-                if alloc:
-                    LOG.info(_LI("Found port (%(port_id)s, %(ip)s) having IP "
-                                 "allocation on subnet "
-                                 "%(subnet)s, cannot delete"),
-                             {'ip': alloc.ip_address,
-                              'port_id': alloc.port_id,
-                              'subnet': id})
-                    raise n_exc.SubnetInUse(subnet_id=id)
-
-            context.session.delete(subnet)
-            # Delete related ipam subnet manually,
-            # since there is no FK relationship
-            self.ipam.delete_subnet(context, id)
-
-    def get_subnet(self, context, id, fields=None):
-        subnet = self._get_subnet(context, id)
-        return self._make_subnet_dict(subnet, fields, context=context)
-
-    def get_subnets(self, context, filters=None, fields=None,
-                    sorts=None, limit=None, marker=None,
-                    page_reverse=False):
-        return self._get_subnets(context, filters, fields, sorts, limit,
-                                 marker, page_reverse)
-
-    def get_subnets_count(self, context, filters=None):
-        return self._get_collection_count(context, models_v2.Subnet,
-                                          filters=filters)
-
-    def get_subnets_by_network(self, context, network_id):
-        return [self._make_subnet_dict(subnet_db) for subnet_db in
-                self._get_subnets_by_network(context, network_id)]
-
-    def _create_subnetpool_prefix(self, context, cidr, subnetpool_id):
-        prefix_args = {'cidr': cidr, 'subnetpool_id': subnetpool_id}
-        subnetpool_prefix = models_v2.SubnetPoolPrefix(**prefix_args)
-        context.session.add(subnetpool_prefix)
-
-    def _validate_address_scope_id(self, context, address_scope_id,
-                                   subnetpool_id, sp_prefixes, ip_version):
-        """Validate the address scope before associating.
-
-        Subnetpool can associate with an address scope if
-          - the tenant user is the owner of both the subnetpool and
-            address scope
-          - the admin is associating the subnetpool with the shared
-            address scope
-          - there is no prefix conflict with the existing subnetpools
-            associated with the address scope.
-          - the address family of the subnetpool and address scope
-            are the same
-        """
-        if not attributes.is_attr_set(address_scope_id):
-            return
-
-        if not self.is_address_scope_owned_by_tenant(context,
-                                                     address_scope_id):
-            raise n_exc.IllegalSubnetPoolAssociationToAddressScope(
-                subnetpool_id=subnetpool_id, address_scope_id=address_scope_id)
-
-        as_ip_version = self.get_ip_version_for_address_scope(context,
-                                                              address_scope_id)
-
-        if ip_version != as_ip_version:
-            raise n_exc.IllegalSubnetPoolIpVersionAssociationToAddressScope(
-                subnetpool_id=subnetpool_id, address_scope_id=address_scope_id,
-                ip_version=as_ip_version)
-
-        subnetpools = self._get_subnetpools_by_address_scope_id(
-            context, address_scope_id)
-
-        new_set = netaddr.IPSet(sp_prefixes)
-        for sp in subnetpools:
-            if sp.id == subnetpool_id:
-                continue
-            sp_set = netaddr.IPSet([prefix['cidr'] for prefix in sp.prefixes])
-            if sp_set.intersection(new_set):
-                raise n_exc.AddressScopePrefixConflict()
-
-    def _check_subnetpool_update_allowed(self, context, subnetpool_id,
-                                         address_scope_id):
-        """Check if the subnetpool can be updated or not.
-
-        If the subnetpool is associated to a shared address scope not owned
-        by the tenant, then the subnetpool cannot be updated.
-        """
-
-        if not self.is_address_scope_owned_by_tenant(context,
-                                                     address_scope_id):
-            msg = _("subnetpool %(subnetpool_id)s cannot be updated when"
-                    " associated with shared address scope "
-                    "%(address_scope_id)s") % {
-                        'subnetpool_id': subnetpool_id,
-                        'address_scope_id': address_scope_id}
-            raise n_exc.IllegalSubnetPoolUpdate(reason=msg)
-
-    def _check_default_subnetpool_exists(self, context, ip_version):
-        """Check if a default already exists for the given IP version.
-
-        There can only be one default subnetpool for each IP family. Raise an
-        InvalidInput error if a default has already been set.
-        """
-        if self.get_default_subnetpool(context, ip_version):
-            msg = _("A default subnetpool for this IP family has already "
-                    "been set. Only one default may exist per IP family")
-            raise n_exc.InvalidInput(error_message=msg)
-
-    def create_subnetpool(self, context, subnetpool):
-        """Create a subnetpool"""
-
-        sp = subnetpool['subnetpool']
-        sp_reader = subnet_alloc.SubnetPoolReader(sp)
-        if sp_reader.address_scope_id is attributes.ATTR_NOT_SPECIFIED:
-            sp_reader.address_scope_id = None
-        if sp_reader.is_default:
-            self._check_default_subnetpool_exists(context,
-                                                  sp_reader.ip_version)
-        self._validate_address_scope_id(context, sp_reader.address_scope_id,
-                                        id, sp_reader.prefixes,
-                                        sp_reader.ip_version)
-        with context.session.begin(subtransactions=True):
-            pool_args = {'tenant_id': sp['tenant_id'],
-                         'id': sp_reader.id,
-                         'name': sp_reader.name,
-                         'ip_version': sp_reader.ip_version,
-                         'default_prefixlen':
-                         sp_reader.default_prefixlen,
-                         'min_prefixlen': sp_reader.min_prefixlen,
-                         'max_prefixlen': sp_reader.max_prefixlen,
-                         'is_default': sp_reader.is_default,
-                         'shared': sp_reader.shared,
-                         'default_quota': sp_reader.default_quota,
-                         'address_scope_id': sp_reader.address_scope_id}
-            subnetpool = models_v2.SubnetPool(**pool_args)
-            context.session.add(subnetpool)
-            for prefix in sp_reader.prefixes:
-                self._create_subnetpool_prefix(context,
-                                               prefix,
-                                               subnetpool.id)
-
-        return self._make_subnetpool_dict(subnetpool)
-
-    def _update_subnetpool_prefixes(self, context, prefix_list, id):
-        with context.session.begin(subtransactions=True):
-            context.session.query(models_v2.SubnetPoolPrefix).filter_by(
-                subnetpool_id=id).delete()
-            for prefix in prefix_list:
-                model_prefix = models_v2.SubnetPoolPrefix(cidr=prefix,
-                                                      subnetpool_id=id)
-                context.session.add(model_prefix)
-
-    def _updated_subnetpool_dict(self, model, new_pool):
-        updated = {}
-        new_prefixes = new_pool.get('prefixes', attributes.ATTR_NOT_SPECIFIED)
-        orig_prefixes = [str(x.cidr) for x in model['prefixes']]
-        if new_prefixes is not attributes.ATTR_NOT_SPECIFIED:
-            orig_set = netaddr.IPSet(orig_prefixes)
-            new_set = netaddr.IPSet(new_prefixes)
-            if not orig_set.issubset(new_set):
-                msg = _("Existing prefixes must be "
-                        "a subset of the new prefixes")
-                raise n_exc.IllegalSubnetPoolPrefixUpdate(msg=msg)
-            new_set.compact()
-            updated['prefixes'] = [str(x.cidr) for x in new_set.iter_cidrs()]
-        else:
-            updated['prefixes'] = orig_prefixes
-
-        for key in ['id', 'name', 'ip_version', 'min_prefixlen',
-                    'max_prefixlen', 'default_prefixlen', 'is_default',
-                    'shared', 'default_quota', 'address_scope_id']:
-            self._write_key(key, updated, model, new_pool)
-
-        return updated
-
-    def _write_key(self, key, update, orig, new_dict):
-        new_val = new_dict.get(key, attributes.ATTR_NOT_SPECIFIED)
-        if new_val is not attributes.ATTR_NOT_SPECIFIED:
-            update[key] = new_dict[key]
-        else:
-            update[key] = orig[key]
-
-    def update_subnetpool(self, context, id, subnetpool):
-        """Update a subnetpool"""
-        new_sp = subnetpool['subnetpool']
-
-        with context.session.begin(subtransactions=True):
-            orig_sp = self._get_subnetpool(context, id)
-            updated = self._updated_subnetpool_dict(orig_sp, new_sp)
-            updated['tenant_id'] = orig_sp.tenant_id
-            reader = subnet_alloc.SubnetPoolReader(updated)
-            if reader.is_default and not orig_sp.is_default:
-                self._check_default_subnetpool_exists(context,
-                                                      reader.ip_version)
-            if orig_sp.address_scope_id:
-                self._check_subnetpool_update_allowed(context, id,
-                                                      orig_sp.address_scope_id)
-
-            self._validate_address_scope_id(context, reader.address_scope_id,
-                                            id, reader.prefixes,
-                                            reader.ip_version)
-            orig_sp.update(self._filter_non_model_columns(
-                                                      reader.subnetpool,
-                                                      models_v2.SubnetPool))
-            self._update_subnetpool_prefixes(context,
-                                             reader.prefixes,
-                                             id)
-        for key in ['min_prefixlen', 'max_prefixlen', 'default_prefixlen']:
-            updated['key'] = str(updated[key])
-
-        return updated
-
-    def get_subnetpool(self, context, id, fields=None):
-        """Retrieve a subnetpool."""
-        subnetpool = self._get_subnetpool(context, id)
-        return self._make_subnetpool_dict(subnetpool, fields)
-
-    def get_subnetpools(self, context, filters=None, fields=None,
-                        sorts=None, limit=None, marker=None,
-                        page_reverse=False):
-        """Retrieve list of subnetpools."""
-        marker_obj = self._get_marker_obj(context, 'subnetpool', limit, marker)
-        collection = self._get_collection(context, models_v2.SubnetPool,
-                                    self._make_subnetpool_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts,
-                                    limit=limit,
-                                    marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-        return collection
-
-    def get_default_subnetpool(self, context, ip_version):
-        """Retrieve the default subnetpool for the given IP version."""
-        filters = {'is_default': [True],
-                   'ip_version': [ip_version]}
-        subnetpool = self.get_subnetpools(context, filters=filters)
-        if subnetpool:
-            return subnetpool[0]
-
-    def delete_subnetpool(self, context, id):
-        """Delete a subnetpool."""
-        with context.session.begin(subtransactions=True):
-            subnetpool = self._get_subnetpool(context, id)
-            subnets = self._get_subnets_by_subnetpool(context, id)
-            if subnets:
-                reason = _("Subnet pool has existing allocations")
-                raise n_exc.SubnetPoolDeleteError(reason=reason)
-            context.session.delete(subnetpool)
-
-    def _check_mac_addr_update(self, context, port, new_mac, device_owner):
-        if (device_owner and
-            device_owner.startswith(constants.DEVICE_OWNER_NETWORK_PREFIX)):
-            raise n_exc.UnsupportedPortDeviceOwner(
-                op=_("mac address update"), port_id=id,
-                device_owner=device_owner)
-
-    def create_port_bulk(self, context, ports):
-        return self._create_bulk('port', context, ports)
-
-    def _get_dns_domain(self):
-        if not cfg.CONF.dns_domain:
-            return ''
-        if cfg.CONF.dns_domain.endswith('.'):
-            return cfg.CONF.dns_domain
-        return '%s.' % cfg.CONF.dns_domain
-
-    def _get_request_dns_name(self, port):
-        dns_domain = self._get_dns_domain()
-        if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)):
-            return port.get('dns_name', '')
-        return ''
-
-    def _get_dns_names_for_port(self, context, ips, request_dns_name):
-        dns_assignment = []
-        dns_domain = self._get_dns_domain()
-        if request_dns_name:
-            request_fqdn = request_dns_name
-            if not request_dns_name.endswith('.'):
-                request_fqdn = '%s.%s' % (request_dns_name, dns_domain)
-
-        for ip in ips:
-            if request_dns_name:
-                hostname = request_dns_name
-                fqdn = request_fqdn
-            else:
-                hostname = 'host-%s' % ip['ip_address'].replace(
-                    '.', '-').replace(':', '-')
-                fqdn = hostname
-                if dns_domain:
-                    fqdn = '%s.%s' % (hostname, dns_domain)
-            dns_assignment.append({'ip_address': ip['ip_address'],
-                                   'hostname': hostname,
-                                   'fqdn': fqdn})
-
-        return dns_assignment
-
-    def _create_port_with_mac(self, context, network_id, port_data,
-                              mac_address):
-        try:
-            # since this method could either be used within or outside the
-            # transaction, use convenience method to avoid passing a flag
-            with db_api.autonested_transaction(context.session):
-                db_port = models_v2.Port(mac_address=mac_address, **port_data)
-                context.session.add(db_port)
-                return db_port
-        except db_exc.DBDuplicateEntry:
-            raise n_exc.MacAddressInUse(net_id=network_id, mac=mac_address)
-
-    def _create_port(self, context, network_id, port_data):
-        max_retries = cfg.CONF.mac_generation_retries
-        for i in range(max_retries):
-            mac = self._generate_mac()
-            try:
-                return self._create_port_with_mac(
-                    context, network_id, port_data, mac)
-            except n_exc.MacAddressInUse:
-                LOG.debug('Generated mac %(mac_address)s exists on '
-                          'network %(network_id)s',
-                          {'mac_address': mac, 'network_id': network_id})
-
-        LOG.error(_LE("Unable to generate mac address after %s attempts"),
-                  max_retries)
-        raise n_exc.MacAddressGenerationFailure(net_id=network_id)
-
-    def create_port(self, context, port):
-        p = port['port']
-        port_id = p.get('id') or uuidutils.generate_uuid()
-        network_id = p['network_id']
-        # NOTE(jkoelker) Get the tenant_id outside of the session to avoid
-        #                unneeded db action if the operation raises
-        tenant_id = p['tenant_id']
-        if p.get('device_owner'):
-            self._enforce_device_owner_not_router_intf_or_device_id(
-                context, p.get('device_owner'), p.get('device_id'), tenant_id)
-
-        port_data = dict(tenant_id=tenant_id,
-                         name=p['name'],
-                         id=port_id,
-                         network_id=network_id,
-                         admin_state_up=p['admin_state_up'],
-                         status=p.get('status', constants.PORT_STATUS_ACTIVE),
-                         device_id=p['device_id'],
-                         device_owner=p['device_owner'])
-        if 'dns_name' in p:
-            request_dns_name = self._get_request_dns_name(p)
-            port_data['dns_name'] = request_dns_name
-
-        with context.session.begin(subtransactions=True):
-            # Ensure that the network exists.
-            self._get_network(context, network_id)
-
-            # Create the port
-            if p['mac_address'] is attributes.ATTR_NOT_SPECIFIED:
-                db_port = self._create_port(context, network_id, port_data)
-                p['mac_address'] = db_port['mac_address']
-            else:
-                db_port = self._create_port_with_mac(
-                    context, network_id, port_data, p['mac_address'])
-
-            ips = self.ipam.allocate_ips_for_port_and_store(context, port,
-                                                            port_id)
-            if 'dns_name' in p:
-                dns_assignment = []
-                if ips:
-                    dns_assignment = self._get_dns_names_for_port(
-                        context, ips, request_dns_name)
-
-        if 'dns_name' in p:
-            db_port['dns_assignment'] = dns_assignment
-        return self._make_port_dict(db_port, process_extensions=False)
-
-    def _validate_port_for_update(self, context, db_port, new_port, new_mac):
-        changed_owner = 'device_owner' in new_port
-        current_owner = (new_port.get('device_owner') or
-                         db_port['device_owner'])
-        changed_device_id = new_port.get('device_id') != db_port['device_id']
-        current_device_id = new_port.get('device_id') or db_port['device_id']
-
-        if current_owner and changed_device_id or changed_owner:
-            self._enforce_device_owner_not_router_intf_or_device_id(
-                context, current_owner, current_device_id,
-                db_port['tenant_id'])
-
-        if new_mac and new_mac != db_port['mac_address']:
-            self._check_mac_addr_update(context, db_port,
-                                        new_mac, current_owner)
-
-    def _get_dns_names_for_updated_port(self, context, original_ips,
-                                        original_dns_name, request_dns_name,
-                                        changes):
-        if changes.original or changes.add or changes.remove:
-            return self._get_dns_names_for_port(
-                context, changes.original + changes.add,
-                request_dns_name or original_dns_name)
-        if original_ips:
-            return self._get_dns_names_for_port(
-                context, original_ips,
-                request_dns_name or original_dns_name)
-        return []
-
-    def update_port(self, context, id, port):
-        new_port = port['port']
-
-        with context.session.begin(subtransactions=True):
-            port = self._get_port(context, id)
-            if 'dns-integration' in self.supported_extension_aliases:
-                original_ips = self._make_fixed_ip_dict(port['fixed_ips'])
-                original_dns_name = port.get('dns_name', '')
-                request_dns_name = self._get_request_dns_name(new_port)
-                if not request_dns_name:
-                    new_port['dns_name'] = ''
-            new_mac = new_port.get('mac_address')
-            self._validate_port_for_update(context, port, new_port, new_mac)
-            changes = self.ipam.update_port_with_ips(context, port,
-                                                     new_port, new_mac)
-            if 'dns-integration' in self.supported_extension_aliases:
-                dns_assignment = self._get_dns_names_for_updated_port(
-                    context, original_ips, original_dns_name,
-                    request_dns_name, changes)
-        result = self._make_port_dict(port)
-        # Keep up with fields that changed
-        if changes.original or changes.add or changes.remove:
-            result['fixed_ips'] = self._make_fixed_ip_dict(
-                changes.original + changes.add)
-        if 'dns-integration' in self.supported_extension_aliases:
-            result['dns_assignment'] = dns_assignment
-        return result
-
-    def delete_port(self, context, id):
-        with context.session.begin(subtransactions=True):
-            self.ipam.delete_port(context, id)
-
-    def delete_ports_by_device_id(self, context, device_id, network_id=None):
-        query = (context.session.query(models_v2.Port.id)
-                 .enable_eagerloads(False)
-                 .filter(models_v2.Port.device_id == device_id))
-        if network_id:
-            query = query.filter(models_v2.Port.network_id == network_id)
-        port_ids = [p[0] for p in query]
-        for port_id in port_ids:
-            try:
-                self.delete_port(context, port_id)
-            except n_exc.PortNotFound:
-                # Don't raise if something else concurrently deleted the port
-                LOG.debug("Ignoring PortNotFound when deleting port '%s'. "
-                          "The port has already been deleted.",
-                          port_id)
-
-    def _get_dns_name_for_port_get(self, context, port):
-        if port['fixed_ips']:
-            return self._get_dns_names_for_port(
-                context, port['fixed_ips'],
-                port['dns_name'])
-        return []
-
-    def get_port(self, context, id, fields=None):
-        port = self._get_port(context, id)
-        if (('dns-integration' in self.supported_extension_aliases and
-             'dns_name' in port)):
-            port['dns_assignment'] = self._get_dns_name_for_port_get(context,
-                                                                     port)
-        return self._make_port_dict(port, fields)
-
-    def _get_ports_query(self, context, filters=None, sorts=None, limit=None,
-                         marker_obj=None, page_reverse=False):
-        Port = models_v2.Port
-        IPAllocation = models_v2.IPAllocation
-
-        if not filters:
-            filters = {}
-
-        query = self._model_query(context, Port)
-
-        fixed_ips = filters.pop('fixed_ips', {})
-        ip_addresses = fixed_ips.get('ip_address')
-        subnet_ids = fixed_ips.get('subnet_id')
-        if ip_addresses or subnet_ids:
-            query = query.join(Port.fixed_ips)
-            if ip_addresses:
-                query = query.filter(IPAllocation.ip_address.in_(ip_addresses))
-            if subnet_ids:
-                query = query.filter(IPAllocation.subnet_id.in_(subnet_ids))
-
-        query = self._apply_filters_to_query(query, Port, filters, context)
-        if limit and page_reverse and sorts:
-            sorts = [(s[0], not s[1]) for s in sorts]
-        query = sqlalchemyutils.paginate_query(query, Port, limit,
-                                               sorts, marker_obj)
-        return query
-
-    def get_ports(self, context, filters=None, fields=None,
-                  sorts=None, limit=None, marker=None,
-                  page_reverse=False):
-        marker_obj = self._get_marker_obj(context, 'port', limit, marker)
-        query = self._get_ports_query(context, filters=filters,
-                                      sorts=sorts, limit=limit,
-                                      marker_obj=marker_obj,
-                                      page_reverse=page_reverse)
-        items = []
-        for c in query:
-            if (('dns-integration' in self.supported_extension_aliases and
-                 'dns_name' in c)):
-                c['dns_assignment'] = self._get_dns_name_for_port_get(context,
-                                                                      c)
-            items.append(self._make_port_dict(c, fields))
-        if limit and page_reverse:
-            items.reverse()
-        return items
-
-    def get_ports_count(self, context, filters=None):
-        return self._get_ports_query(context, filters).count()
-
-    def _enforce_device_owner_not_router_intf_or_device_id(self, context,
-                                                           device_owner,
-                                                           device_id,
-                                                           tenant_id):
-        """Prevent tenants from replacing the device id of router ports with
-        a router uuid belonging to another tenant.
-        """
-        if device_owner not in constants.ROUTER_INTERFACE_OWNERS:
-            return
-        if not context.is_admin:
-            # check to make sure device_id does not match another tenants
-            # router.
-            if device_id:
-                if hasattr(self, 'get_router'):
-                    try:
-                        ctx_admin = context.elevated()
-                        router = self.get_router(ctx_admin, device_id)
-                    except l3.RouterNotFound:
-                        return
-                else:
-                    l3plugin = (
-                        manager.NeutronManager.get_service_plugins().get(
-                            service_constants.L3_ROUTER_NAT))
-                    if l3plugin:
-                        try:
-                            ctx_admin = context.elevated()
-                            router = l3plugin.get_router(ctx_admin,
-                                                         device_id)
-                        except l3.RouterNotFound:
-                            return
-                    else:
-                        # raise as extension doesn't support L3 anyways.
-                        raise n_exc.DeviceIDNotOwnedByTenant(
-                            device_id=device_id)
-                if tenant_id != router['tenant_id']:
-                    raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id)
diff --git a/neutron/db/dvr_mac_db.py b/neutron/db/dvr_mac_db.py
deleted file mode 100644 (file)
index 952125d..0000000
+++ /dev/null
@@ -1,187 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_log import helpers as log_helpers
-from oslo_log import log as logging
-import sqlalchemy as sa
-from sqlalchemy.orm import exc
-
-from neutron._i18n import _, _LE
-from neutron.common import exceptions as n_exc
-from neutron.common import utils
-from neutron.db import model_base
-from neutron.extensions import dvr as ext_dvr
-from neutron.extensions import portbindings
-from neutron import manager
-
-
-LOG = logging.getLogger(__name__)
-
-
-dvr_mac_address_opts = [
-    cfg.StrOpt('dvr_base_mac',
-               default="fa:16:3f:00:00:00",
-               help=_("The base mac address used for unique "
-                      "DVR instances by Neutron. The first 3 octets will "
-                      "remain unchanged. If the 4th octet is not 00, it will "
-                      "also be used. The others will be randomly generated. "
-                      "The 'dvr_base_mac' *must* be different from "
-                      "'base_mac' to avoid mixing them up with MAC's "
-                      "allocated for tenant ports. A 4 octet example would be "
-                      "dvr_base_mac = fa:16:3f:4f:00:00. The default is 3 "
-                      "octet")),
-]
-cfg.CONF.register_opts(dvr_mac_address_opts)
-
-
-class DistributedVirtualRouterMacAddress(model_base.BASEV2):
-    """Represents a v2 neutron distributed virtual router mac address."""
-
-    __tablename__ = 'dvr_host_macs'
-
-    host = sa.Column(sa.String(255), primary_key=True, nullable=False)
-    mac_address = sa.Column(sa.String(32), nullable=False, unique=True)
-
-
-class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase):
-    """Mixin class to add dvr mac address to db_plugin_base_v2."""
-
-    @property
-    def plugin(self):
-        try:
-            if self._plugin is not None:
-                return self._plugin
-        except AttributeError:
-            pass
-        self._plugin = manager.NeutronManager.get_plugin()
-        return self._plugin
-
-    def _get_dvr_mac_address_by_host(self, context, host):
-        try:
-            query = context.session.query(DistributedVirtualRouterMacAddress)
-            dvrma = query.filter(
-                DistributedVirtualRouterMacAddress.host == host).one()
-        except exc.NoResultFound:
-            raise ext_dvr.DVRMacAddressNotFound(host=host)
-        return dvrma
-
-    def _create_dvr_mac_address(self, context, host):
-        """Create DVR mac address for a given host."""
-        base_mac = cfg.CONF.dvr_base_mac.split(':')
-        max_retries = cfg.CONF.mac_generation_retries
-        for attempt in reversed(range(max_retries)):
-            try:
-                with context.session.begin(subtransactions=True):
-                    mac_address = utils.get_random_mac(base_mac)
-                    dvr_mac_binding = DistributedVirtualRouterMacAddress(
-                        host=host, mac_address=mac_address)
-                    context.session.add(dvr_mac_binding)
-                    LOG.debug("Generated DVR mac for host %(host)s "
-                              "is %(mac_address)s",
-                              {'host': host, 'mac_address': mac_address})
-                dvr_macs = self.get_dvr_mac_address_list(context)
-                # TODO(vivek): improve scalability of this fanout by
-                # sending a single mac address rather than the entire set
-                self.notifier.dvr_mac_address_update(context, dvr_macs)
-                return self._make_dvr_mac_address_dict(dvr_mac_binding)
-            except db_exc.DBDuplicateEntry:
-                LOG.debug("Generated DVR mac %(mac)s exists."
-                          " Remaining attempts %(attempts_left)s.",
-                          {'mac': mac_address, 'attempts_left': attempt})
-        LOG.error(_LE("MAC generation error after %s attempts"), max_retries)
-        raise ext_dvr.MacAddressGenerationFailure(host=host)
-
-    def get_dvr_mac_address_list(self, context):
-        with context.session.begin(subtransactions=True):
-            return (context.session.
-                    query(DistributedVirtualRouterMacAddress).all())
-
-    def get_dvr_mac_address_by_host(self, context, host):
-        """Determine the MAC for the DVR port associated to host."""
-        if not host:
-            return
-
-        try:
-            return self._get_dvr_mac_address_by_host(context, host)
-        except ext_dvr.DVRMacAddressNotFound:
-            return self._create_dvr_mac_address(context, host)
-
-    def _make_dvr_mac_address_dict(self, dvr_mac_entry, fields=None):
-        return {'host': dvr_mac_entry['host'],
-                'mac_address': dvr_mac_entry['mac_address']}
-
-    @log_helpers.log_method_call
-    def get_ports_on_host_by_subnet(self, context, host, subnet):
-        """Returns DVR serviced ports on a given subnet in the input host
-
-        This method returns ports that need to be serviced by DVR.
-        :param context: rpc request context
-        :param host: host id to match and extract ports of interest
-        :param subnet: subnet id to match and extract ports of interest
-        :returns list -- Ports on the given subnet in the input host
-        """
-        # FIXME(vivek, salv-orlando): improve this query by adding the
-        # capability of filtering by binding:host_id
-        ports_by_host = []
-        filter = {'fixed_ips': {'subnet_id': [subnet]}}
-        ports = self.plugin.get_ports(context, filters=filter)
-        LOG.debug("List of Ports on subnet %(subnet)s at host %(host)s "
-                  "received as %(ports)s",
-                  {'subnet': subnet, 'host': host, 'ports': ports})
-        for port in ports:
-            device_owner = port['device_owner']
-            if (utils.is_dvr_serviced(device_owner)):
-                if port[portbindings.HOST_ID] == host:
-                    port_dict = self.plugin._make_port_dict(port,
-                        process_extensions=False)
-                    ports_by_host.append(port_dict)
-        LOG.debug("Returning list of dvr serviced ports on host %(host)s"
-                  " for subnet %(subnet)s ports %(ports)s",
-                  {'host': host, 'subnet': subnet,
-                   'ports': ports_by_host})
-        return ports_by_host
-
-    @log_helpers.log_method_call
-    def get_subnet_for_dvr(self, context, subnet, fixed_ips=None):
-        if fixed_ips:
-            subnet_data = fixed_ips[0]['subnet_id']
-        else:
-            subnet_data = subnet
-        try:
-            subnet_info = self.plugin.get_subnet(
-                context, subnet_data)
-        except n_exc.SubnetNotFound:
-            return {}
-        else:
-            # retrieve the gateway port on this subnet
-            if fixed_ips:
-                ip_address = fixed_ips[0]['ip_address']
-            else:
-                ip_address = subnet_info['gateway_ip']
-
-            filter = {'fixed_ips': {'subnet_id': [subnet],
-                                    'ip_address': [ip_address]}}
-
-            internal_gateway_ports = self.plugin.get_ports(
-                context, filters=filter)
-            if not internal_gateway_ports:
-                LOG.error(_LE("Could not retrieve gateway port "
-                              "for subnet %s"), subnet_info)
-                return {}
-            internal_port = internal_gateway_ports[0]
-            subnet_info['gateway_mac'] = internal_port['mac_address']
-            return subnet_info
diff --git a/neutron/db/external_net_db.py b/neutron/db/external_net_db.py
deleted file mode 100644 (file)
index 178068e..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy.orm import exc
-from sqlalchemy.sql import expression as expr
-
-from neutron.api.v2 import attributes
-from neutron.common import constants as l3_constants
-from neutron.common import exceptions as n_exc
-from neutron.db import db_base_plugin_v2
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.extensions import external_net
-from neutron import manager
-from neutron.plugins.common import constants as service_constants
-
-
-DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW
-
-
-class ExternalNetwork(model_base.BASEV2):
-    network_id = sa.Column(sa.String(36),
-                           sa.ForeignKey('networks.id', ondelete="CASCADE"),
-                           primary_key=True)
-
-    # Add a relationship to the Network model in order to instruct
-    # SQLAlchemy to eagerly load this association
-    network = orm.relationship(
-        models_v2.Network,
-        backref=orm.backref("external", lazy='joined',
-                            uselist=False, cascade='delete'))
-
-
-class External_net_db_mixin(object):
-    """Mixin class to add external network methods to db_base_plugin_v2."""
-
-    def _network_model_hook(self, context, original_model, query):
-        query = query.outerjoin(ExternalNetwork,
-                                (original_model.id ==
-                                 ExternalNetwork.network_id))
-        return query
-
-    def _network_filter_hook(self, context, original_model, conditions):
-        if conditions is not None and not hasattr(conditions, '__iter__'):
-            conditions = (conditions, )
-        # Apply the external network filter only in non-admin and non-advsvc
-        # context
-        if self.model_query_scope(context, original_model):
-            conditions = expr.or_(ExternalNetwork.network_id != expr.null(),
-                                  *conditions)
-        return conditions
-
-    def _network_result_filter_hook(self, query, filters):
-        vals = filters and filters.get(external_net.EXTERNAL, [])
-        if not vals:
-            return query
-        if vals[0]:
-            return query.filter((ExternalNetwork.network_id != expr.null()))
-        return query.filter((ExternalNetwork.network_id == expr.null()))
-
-    # TODO(salvatore-orlando): Perform this operation without explicitly
-    # referring to db_base_plugin_v2, as plugins that do not extend from it
-    # might exist in the future
-    db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
-        models_v2.Network,
-        "external_net",
-        '_network_model_hook',
-        '_network_filter_hook',
-        '_network_result_filter_hook')
-
-    def _network_is_external(self, context, net_id):
-        try:
-            context.session.query(ExternalNetwork).filter_by(
-                network_id=net_id).one()
-            return True
-        except exc.NoResultFound:
-            return False
-
-    def _extend_network_dict_l3(self, network_res, network_db):
-        # Comparing with None for converting uuid into bool
-        network_res[external_net.EXTERNAL] = network_db.external is not None
-        return network_res
-
-    # Register dict extend functions for networks
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attributes.NETWORKS, ['_extend_network_dict_l3'])
-
-    def _process_l3_create(self, context, net_data, req_data):
-        external = req_data.get(external_net.EXTERNAL)
-        external_set = attributes.is_attr_set(external)
-
-        if not external_set:
-            return
-
-        if external:
-            # expects to be called within a plugin's session
-            context.session.add(ExternalNetwork(network_id=net_data['id']))
-        net_data[external_net.EXTERNAL] = external
-
-    def _process_l3_update(self, context, net_data, req_data):
-
-        new_value = req_data.get(external_net.EXTERNAL)
-        net_id = net_data['id']
-        if not attributes.is_attr_set(new_value):
-            return
-
-        if net_data.get(external_net.EXTERNAL) == new_value:
-            return
-
-        if new_value:
-            context.session.add(ExternalNetwork(network_id=net_id))
-            net_data[external_net.EXTERNAL] = True
-        else:
-            # must make sure we do not have any external gateway ports
-            # (and thus, possible floating IPs) on this network before
-            # allow it to be update to external=False
-            port = context.session.query(models_v2.Port).filter_by(
-                device_owner=DEVICE_OWNER_ROUTER_GW,
-                network_id=net_data['id']).first()
-            if port:
-                raise external_net.ExternalNetworkInUse(net_id=net_id)
-
-            context.session.query(ExternalNetwork).filter_by(
-                network_id=net_id).delete()
-            net_data[external_net.EXTERNAL] = False
-
-    def _process_l3_delete(self, context, network_id):
-        l3plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        if l3plugin:
-            l3plugin.delete_disassociated_floatingips(context, network_id)
-
-    def get_external_network_id(self, context):
-        nets = self.get_networks(context, {external_net.EXTERNAL: [True]})
-        if len(nets) > 1:
-            raise n_exc.TooManyExternalNetworks()
-        else:
-            return nets[0]['id'] if nets else None
diff --git a/neutron/db/extradhcpopt_db.py b/neutron/db/extradhcpopt_db.py
deleted file mode 100644 (file)
index 42cdc17..0000000
+++ /dev/null
@@ -1,153 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-
-from neutron.api.v2 import attributes
-from neutron.db import db_base_plugin_v2
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.extensions import extra_dhcp_opt as edo_ext
-
-
-class ExtraDhcpOpt(model_base.BASEV2, model_base.HasId):
-    """Represent a generic concept of extra options associated to a port.
-
-    Each port may have none to many dhcp opts associated to it that can
-    define specifically different or extra options to DHCP clients.
-    These will be written to the <network_id>/opts files, and each option's
-    tag will be referenced in the <network_id>/host file.
-    """
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey('ports.id', ondelete="CASCADE"),
-                        nullable=False)
-    opt_name = sa.Column(sa.String(64), nullable=False)
-    opt_value = sa.Column(sa.String(255), nullable=False)
-    ip_version = sa.Column(sa.Integer, server_default='4', nullable=False)
-    __table_args__ = (sa.UniqueConstraint(
-        'port_id',
-        'opt_name',
-        'ip_version',
-        name='uniq_extradhcpopts0portid0optname0ipversion'),
-                      model_base.BASEV2.__table_args__,)
-
-    # Add a relationship to the Port model in order to instruct SQLAlchemy to
-    # eagerly load extra_dhcp_opts bindings
-    ports = orm.relationship(
-        models_v2.Port,
-        backref=orm.backref("dhcp_opts", lazy='joined', cascade='delete'))
-
-
-class ExtraDhcpOptMixin(object):
-    """Mixin class to add extra options to the DHCP opts file
-    and associate them to a port.
-    """
-
-    def _is_valid_opt_value(self, opt_name, opt_value):
-
-        # If the dhcp opt is blank-able, it shouldn't be saved to the DB in
-        # case that the value is None
-        if opt_name in edo_ext.VALID_BLANK_EXTRA_DHCP_OPTS:
-            return opt_value is not None
-
-        # Otherwise, it shouldn't be saved to the DB in case that the value
-        # is None or empty
-        return bool(opt_value)
-
-    def _process_port_create_extra_dhcp_opts(self, context, port,
-                                             extra_dhcp_opts):
-        if not extra_dhcp_opts:
-            return port
-        with context.session.begin(subtransactions=True):
-            for dopt in extra_dhcp_opts:
-                if self._is_valid_opt_value(dopt['opt_name'],
-                                            dopt['opt_value']):
-                    ip_version = dopt.get('ip_version', 4)
-                    db = ExtraDhcpOpt(
-                        port_id=port['id'],
-                        opt_name=dopt['opt_name'],
-                        opt_value=dopt['opt_value'],
-                        ip_version=ip_version)
-                    context.session.add(db)
-        return self._extend_port_extra_dhcp_opts_dict(context, port)
-
-    def _extend_port_extra_dhcp_opts_dict(self, context, port):
-        port[edo_ext.EXTRADHCPOPTS] = self._get_port_extra_dhcp_opts_binding(
-            context, port['id'])
-
-    def _get_port_extra_dhcp_opts_binding(self, context, port_id):
-        query = self._model_query(context, ExtraDhcpOpt)
-        binding = query.filter(ExtraDhcpOpt.port_id == port_id)
-        return [{'opt_name': r.opt_name, 'opt_value': r.opt_value,
-                 'ip_version': r.ip_version}
-                for r in binding]
-
-    def _update_extra_dhcp_opts_on_port(self, context, id, port,
-                                        updated_port=None):
-        # It is not necessary to update in a transaction, because
-        # its called from within one from ovs_neutron_plugin.
-        dopts = port['port'].get(edo_ext.EXTRADHCPOPTS)
-
-        if dopts:
-            opt_db = self._model_query(
-                context, ExtraDhcpOpt).filter_by(port_id=id).all()
-            # if there are currently no dhcp_options associated to
-            # this port, Then just insert the new ones and be done.
-            with context.session.begin(subtransactions=True):
-                for upd_rec in dopts:
-                    for opt in opt_db:
-                        if (opt['opt_name'] == upd_rec['opt_name']
-                                and opt['ip_version'] == upd_rec.get(
-                                    'ip_version', 4)):
-                            # to handle deleting of a opt from the port.
-                            if upd_rec['opt_value'] is None:
-                                context.session.delete(opt)
-                            else:
-                                if (self._is_valid_opt_value(
-                                        opt['opt_name'],
-                                        upd_rec['opt_value']) and
-                                        opt['opt_value'] !=
-                                        upd_rec['opt_value']):
-                                    opt.update(
-                                        {'opt_value': upd_rec['opt_value']})
-                            break
-                    else:
-                        if self._is_valid_opt_value(
-                                upd_rec['opt_name'],
-                                upd_rec['opt_value']):
-                            ip_version = upd_rec.get('ip_version', 4)
-                            db = ExtraDhcpOpt(
-                                port_id=id,
-                                opt_name=upd_rec['opt_name'],
-                                opt_value=upd_rec['opt_value'],
-                                ip_version=ip_version)
-                            context.session.add(db)
-
-            if updated_port:
-                edolist = self._get_port_extra_dhcp_opts_binding(context, id)
-                updated_port[edo_ext.EXTRADHCPOPTS] = edolist
-
-        return bool(dopts)
-
-    def _extend_port_dict_extra_dhcp_opt(self, res, port):
-        res[edo_ext.EXTRADHCPOPTS] = [{'opt_name': dho.opt_name,
-                                       'opt_value': dho.opt_value,
-                                       'ip_version': dho.ip_version}
-                                      for dho in port.dhcp_opts]
-        return res
-
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attributes.PORTS, ['_extend_port_dict_extra_dhcp_opt'])
diff --git a/neutron/db/extraroute_db.py b/neutron/db/extraroute_db.py
deleted file mode 100644 (file)
index 0f94810..0000000
+++ /dev/null
@@ -1,177 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-import sqlalchemy as sa
-from sqlalchemy import orm
-
-from neutron._i18n import _
-from neutron.common import utils
-from neutron.db import db_base_plugin_v2
-from neutron.db import l3_db
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.extensions import extraroute
-from neutron.extensions import l3
-
-
-LOG = logging.getLogger(__name__)
-
-extra_route_opts = [
-    #TODO(nati): use quota framework when it support quota for attributes
-    cfg.IntOpt('max_routes', default=30,
-               help=_("Maximum number of routes per router")),
-]
-
-cfg.CONF.register_opts(extra_route_opts)
-
-
-class RouterRoute(model_base.BASEV2, models_v2.Route):
-    router_id = sa.Column(sa.String(36),
-                          sa.ForeignKey('routers.id',
-                                        ondelete="CASCADE"),
-                          primary_key=True)
-
-    router = orm.relationship(l3_db.Router,
-                              backref=orm.backref("route_list",
-                                                  lazy='joined',
-                                                  cascade='delete'))
-
-
-class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
-    """Mixin class to support extra route configuration on router."""
-
-    def _extend_router_dict_extraroute(self, router_res, router_db):
-        router_res['routes'] = (ExtraRoute_dbonly_mixin.
-                                _make_extra_route_list(
-                                    router_db['route_list']
-                                ))
-
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        l3.ROUTERS, ['_extend_router_dict_extraroute'])
-
-    def update_router(self, context, id, router):
-        r = router['router']
-        with context.session.begin(subtransactions=True):
-            #check if route exists and have permission to access
-            router_db = self._get_router(context, id)
-            if 'routes' in r:
-                self._update_extra_routes(context, router_db, r['routes'])
-            routes = self._get_extra_routes_by_router_id(context, id)
-        router_updated = super(ExtraRoute_dbonly_mixin, self).update_router(
-            context, id, router)
-        router_updated['routes'] = routes
-
-        return router_updated
-
-    def _get_subnets_by_cidr(self, context, cidr):
-        query_subnets = context.session.query(models_v2.Subnet)
-        return query_subnets.filter_by(cidr=cidr).all()
-
-    def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop):
-        #Note(nati): Nexthop should be connected,
-        # so we need to check
-        # nexthop belongs to one of cidrs of the router ports
-        if not netaddr.all_matching_cidrs(nexthop, cidrs):
-            raise extraroute.InvalidRoutes(
-                routes=routes,
-                reason=_('the nexthop is not connected with router'))
-        #Note(nati) nexthop should not be same as fixed_ips
-        if nexthop in ips:
-            raise extraroute.InvalidRoutes(
-                routes=routes,
-                reason=_('the nexthop is used by router'))
-
-    def _validate_routes(self, context,
-                         router_id, routes):
-        if len(routes) > cfg.CONF.max_routes:
-            raise extraroute.RoutesExhausted(
-                router_id=router_id,
-                quota=cfg.CONF.max_routes)
-
-        filters = {'device_id': [router_id]}
-        ports = self._core_plugin.get_ports(context, filters)
-        cidrs = []
-        ips = []
-        for port in ports:
-            for ip in port['fixed_ips']:
-                cidrs.append(self._core_plugin.get_subnet(
-                    context, ip['subnet_id'])['cidr'])
-                ips.append(ip['ip_address'])
-        for route in routes:
-            self._validate_routes_nexthop(
-                cidrs, ips, routes, route['nexthop'])
-
-    def _update_extra_routes(self, context, router, routes):
-        self._validate_routes(context, router['id'],
-                              routes)
-        old_routes, routes_dict = self._get_extra_routes_dict_by_router_id(
-            context, router['id'])
-        added, removed = utils.diff_list_of_dict(old_routes,
-                                                 routes)
-        LOG.debug('Added routes are %s', added)
-        for route in added:
-            router_routes = RouterRoute(
-                router_id=router['id'],
-                destination=route['destination'],
-                nexthop=route['nexthop'])
-            context.session.add(router_routes)
-
-        LOG.debug('Removed routes are %s', removed)
-        for route in removed:
-            context.session.delete(
-                routes_dict[(route['destination'], route['nexthop'])])
-
-    @staticmethod
-    def _make_extra_route_list(extra_routes):
-        return [{'destination': route['destination'],
-                 'nexthop': route['nexthop']}
-                for route in extra_routes]
-
-    def _get_extra_routes_by_router_id(self, context, id):
-        query = context.session.query(RouterRoute)
-        query = query.filter_by(router_id=id)
-        return self._make_extra_route_list(query)
-
-    def _get_extra_routes_dict_by_router_id(self, context, id):
-        query = context.session.query(RouterRoute)
-        query = query.filter_by(router_id=id)
-        routes = []
-        routes_dict = {}
-        for route in query:
-            routes.append({'destination': route['destination'],
-                           'nexthop': route['nexthop']})
-            routes_dict[(route['destination'], route['nexthop'])] = route
-        return routes, routes_dict
-
-    def _confirm_router_interface_not_in_use(self, context, router_id,
-                                             subnet_id):
-        super(ExtraRoute_dbonly_mixin,
-            self)._confirm_router_interface_not_in_use(
-            context, router_id, subnet_id)
-        subnet = self._core_plugin.get_subnet(context, subnet_id)
-        subnet_cidr = netaddr.IPNetwork(subnet['cidr'])
-        extra_routes = self._get_extra_routes_by_router_id(context, router_id)
-        for route in extra_routes:
-            if netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr]):
-                raise extraroute.RouterInterfaceInUseByRoute(
-                    router_id=router_id, subnet_id=subnet_id)
-
-
-class ExtraRoute_db_mixin(ExtraRoute_dbonly_mixin, l3_db.L3_NAT_db_mixin):
-    """Mixin class to support extra route configuration on router with rpc."""
-    pass
diff --git a/neutron/db/flavors_db.py b/neutron/db/flavors_db.py
deleted file mode 100644 (file)
index 0491665..0000000
+++ /dev/null
@@ -1,293 +0,0 @@
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy.orm import exc as sa_exc
-
-from neutron.api.v2 import attributes as attr
-from neutron.db import common_db_mixin
-from neutron.db import model_base
-from neutron.db import servicetype_db as sdb
-from neutron.extensions import flavors as ext_flavors
-
-LOG = logging.getLogger(__name__)
-
-
-class Flavor(model_base.BASEV2, model_base.HasId):
-    name = sa.Column(sa.String(attr.NAME_MAX_LEN))
-    description = sa.Column(sa.String(attr.LONG_DESCRIPTION_MAX_LEN))
-    enabled = sa.Column(sa.Boolean, nullable=False, default=True,
-                        server_default=sa.sql.true())
-    # Make it True for multi-type flavors
-    service_type = sa.Column(sa.String(36), nullable=True)
-    service_profiles = orm.relationship("FlavorServiceProfileBinding",
-        cascade="all, delete-orphan")
-
-
-class ServiceProfile(model_base.BASEV2, model_base.HasId):
-    description = sa.Column(sa.String(attr.LONG_DESCRIPTION_MAX_LEN))
-    driver = sa.Column(sa.String(1024), nullable=False)
-    enabled = sa.Column(sa.Boolean, nullable=False, default=True,
-                        server_default=sa.sql.true())
-    metainfo = sa.Column(sa.String(4096))
-    flavors = orm.relationship("FlavorServiceProfileBinding")
-
-
-class FlavorServiceProfileBinding(model_base.BASEV2):
-    flavor_id = sa.Column(sa.String(36),
-                          sa.ForeignKey("flavors.id",
-                                        ondelete="CASCADE"),
-                          nullable=False, primary_key=True)
-    flavor = orm.relationship(Flavor)
-    service_profile_id = sa.Column(sa.String(36),
-                                   sa.ForeignKey("serviceprofiles.id",
-                                                 ondelete="CASCADE"),
-                                   nullable=False, primary_key=True)
-    service_profile = orm.relationship(ServiceProfile)
-
-
-class FlavorsDbMixin(common_db_mixin.CommonDbMixin):
-
-    """Class to support flavors and service profiles."""
-
-    def _get_flavor(self, context, flavor_id):
-        try:
-            return self._get_by_id(context, Flavor, flavor_id)
-        except sa_exc.NoResultFound:
-            raise ext_flavors.FlavorNotFound(flavor_id=flavor_id)
-
-    def _get_service_profile(self, context, sp_id):
-        try:
-            return self._get_by_id(context, ServiceProfile, sp_id)
-        except sa_exc.NoResultFound:
-            raise ext_flavors.ServiceProfileNotFound(sp_id=sp_id)
-
-    def _make_flavor_dict(self, flavor_db, fields=None):
-        res = {'id': flavor_db['id'],
-               'name': flavor_db['name'],
-               'description': flavor_db['description'],
-               'service_type': flavor_db['service_type'],
-               'enabled': flavor_db['enabled'],
-               'service_profiles': []}
-        if flavor_db.service_profiles:
-            res['service_profiles'] = [sp['service_profile_id']
-                                       for sp in flavor_db.service_profiles]
-        return self._fields(res, fields)
-
-    def _make_service_profile_dict(self, sp_db, fields=None):
-        res = {'id': sp_db['id'],
-               'description': sp_db['description'],
-               'driver': sp_db['driver'],
-               'enabled': sp_db['enabled'],
-               'metainfo': sp_db['metainfo']}
-        if sp_db.flavors:
-            res['flavors'] = [fl['flavor_id']
-                              for fl in sp_db.flavors]
-        return self._fields(res, fields)
-
-    def _ensure_flavor_not_in_use(self, context, flavor_id):
-        """Checks that flavor is not associated with service instance."""
-        # Future TODO(enikanorov): check that there is no binding to
-        # instances. Shall address in future upon getting the right
-        # flavor supported driver
-        pass
-
-    def _ensure_service_profile_not_in_use(self, context, sp_id):
-        """Ensures no current bindings to flavors exist."""
-        fl = (context.session.query(FlavorServiceProfileBinding).
-              filter_by(service_profile_id=sp_id).first())
-        if fl:
-            raise ext_flavors.ServiceProfileInUse(sp_id=sp_id)
-
-    def _validate_driver(self, context, driver):
-        """Confirms a non-empty driver is a valid provider."""
-        service_type_manager = sdb.ServiceTypeManager.get_instance()
-        providers = service_type_manager.get_service_providers(
-            context,
-            filters={'driver': driver})
-
-        if not providers:
-            raise ext_flavors.ServiceProfileDriverNotFound(driver=driver)
-
-    def create_flavor(self, context, flavor):
-        fl = flavor['flavor']
-        with context.session.begin(subtransactions=True):
-            fl_db = Flavor(id=uuidutils.generate_uuid(),
-                           name=fl['name'],
-                           description=fl['description'],
-                           service_type=fl['service_type'],
-                           enabled=fl['enabled'])
-            context.session.add(fl_db)
-        return self._make_flavor_dict(fl_db)
-
-    def update_flavor(self, context, flavor_id, flavor):
-        fl = flavor['flavor']
-        with context.session.begin(subtransactions=True):
-            self._ensure_flavor_not_in_use(context, flavor_id)
-            fl_db = self._get_flavor(context, flavor_id)
-            fl_db.update(fl)
-        return self._make_flavor_dict(fl_db)
-
-    def get_flavor(self, context, flavor_id, fields=None):
-        fl = self._get_flavor(context, flavor_id)
-        return self._make_flavor_dict(fl, fields)
-
-    def delete_flavor(self, context, flavor_id):
-        with context.session.begin(subtransactions=True):
-            self._ensure_flavor_not_in_use(context, flavor_id)
-            fl_db = self._get_flavor(context, flavor_id)
-            context.session.delete(fl_db)
-
-    def get_flavors(self, context, filters=None, fields=None,
-                    sorts=None, limit=None, marker=None, page_reverse=False):
-        return self._get_collection(context, Flavor, self._make_flavor_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts, limit=limit,
-                                    marker_obj=marker,
-                                    page_reverse=page_reverse)
-
-    def create_flavor_service_profile(self, context,
-                                      service_profile, flavor_id):
-        sp = service_profile['service_profile']
-        with context.session.begin(subtransactions=True):
-            bind_qry = context.session.query(FlavorServiceProfileBinding)
-            binding = bind_qry.filter_by(service_profile_id=sp['id'],
-                                         flavor_id=flavor_id).first()
-            if binding:
-                raise ext_flavors.FlavorServiceProfileBindingExists(
-                    sp_id=sp['id'], fl_id=flavor_id)
-            binding = FlavorServiceProfileBinding(
-                service_profile_id=sp['id'],
-                flavor_id=flavor_id)
-            context.session.add(binding)
-        fl_db = self._get_flavor(context, flavor_id)
-        return self._make_flavor_dict(fl_db)
-
-    def delete_flavor_service_profile(self, context,
-                                      service_profile_id, flavor_id):
-        with context.session.begin(subtransactions=True):
-            binding = (context.session.query(FlavorServiceProfileBinding).
-                       filter_by(service_profile_id=service_profile_id,
-                       flavor_id=flavor_id).first())
-            if not binding:
-                raise ext_flavors.FlavorServiceProfileBindingNotFound(
-                    sp_id=service_profile_id, fl_id=flavor_id)
-            context.session.delete(binding)
-
-    def get_flavor_service_profile(self, context,
-                                   service_profile_id, flavor_id, fields=None):
-        with context.session.begin(subtransactions=True):
-            binding = (context.session.query(FlavorServiceProfileBinding).
-                       filter_by(service_profile_id=service_profile_id,
-                       flavor_id=flavor_id).first())
-            if not binding:
-                raise ext_flavors.FlavorServiceProfileBindingNotFound(
-                    sp_id=service_profile_id, fl_id=flavor_id)
-        res = {'service_profile_id': service_profile_id,
-               'flavor_id': flavor_id}
-        return self._fields(res, fields)
-
-    def create_service_profile(self, context, service_profile):
-        sp = service_profile['service_profile']
-
-        if sp['driver']:
-            self._validate_driver(context, sp['driver'])
-        else:
-            if not sp['metainfo']:
-                raise ext_flavors.ServiceProfileEmpty()
-
-        with context.session.begin(subtransactions=True):
-            sp_db = ServiceProfile(id=uuidutils.generate_uuid(),
-                                   description=sp['description'],
-                                   driver=sp['driver'],
-                                   enabled=sp['enabled'],
-                                   metainfo=sp['metainfo'])
-            context.session.add(sp_db)
-
-        return self._make_service_profile_dict(sp_db)
-
-    def update_service_profile(self, context,
-                               service_profile_id, service_profile):
-        sp = service_profile['service_profile']
-
-        if sp.get('driver'):
-            self._validate_driver(context, sp['driver'])
-
-        with context.session.begin(subtransactions=True):
-            self._ensure_service_profile_not_in_use(context,
-                                                    service_profile_id)
-            sp_db = self._get_service_profile(context, service_profile_id)
-            sp_db.update(sp)
-        return self._make_service_profile_dict(sp_db)
-
-    def get_service_profile(self, context, sp_id, fields=None):
-        sp_db = self._get_service_profile(context, sp_id)
-        return self._make_service_profile_dict(sp_db, fields)
-
-    def delete_service_profile(self, context, sp_id):
-        with context.session.begin(subtransactions=True):
-            self._ensure_service_profile_not_in_use(context, sp_id)
-            sp_db = self._get_service_profile(context, sp_id)
-            context.session.delete(sp_db)
-
-    def get_service_profiles(self, context, filters=None, fields=None,
-                             sorts=None, limit=None, marker=None,
-                             page_reverse=False):
-        return self._get_collection(context, ServiceProfile,
-                                    self._make_service_profile_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts, limit=limit,
-                                    marker_obj=marker,
-                                    page_reverse=page_reverse)
-
-    def get_flavor_next_provider(self, context, flavor_id,
-                                 filters=None, fields=None,
-                                 sorts=None, limit=None,
-                                 marker=None, page_reverse=False):
-        """From flavor, choose service profile and find provider for driver."""
-
-        with context.session.begin(subtransactions=True):
-            bind_qry = context.session.query(FlavorServiceProfileBinding)
-            binding = bind_qry.filter_by(flavor_id=flavor_id).first()
-            if not binding:
-                raise ext_flavors.FlavorServiceProfileBindingNotFound(
-                    sp_id='', fl_id=flavor_id)
-
-        # Get the service profile from the first binding
-        # TODO(jwarendt) Should become a scheduling framework instead
-        sp_db = self._get_service_profile(context,
-                                          binding['service_profile_id'])
-
-        if not sp_db.enabled:
-            raise ext_flavors.ServiceProfileDisabled()
-
-        LOG.debug("Found driver %s.", sp_db.driver)
-
-        service_type_manager = sdb.ServiceTypeManager.get_instance()
-        providers = service_type_manager.get_service_providers(
-            context,
-            filters={'driver': sp_db.driver})
-
-        if not providers:
-            raise ext_flavors.ServiceProfileDriverNotFound(driver=sp_db.driver)
-
-        LOG.debug("Found providers %s.", providers)
-
-        res = {'driver': sp_db.driver,
-               'provider': providers[0].get('name')}
-
-        return [self._fields(res, fields)]
diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py
deleted file mode 100644 (file)
index 850efa7..0000000
+++ /dev/null
@@ -1,465 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import itertools
-
-import netaddr
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-from sqlalchemy.orm import exc as orm_exc
-
-from neutron._i18n import _, _LI
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.common import utils as common_utils
-from neutron.db import db_base_plugin_common
-from neutron.db import models_v2
-from neutron.ipam import utils as ipam_utils
-
-LOG = logging.getLogger(__name__)
-
-
-class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
-    """Contains IPAM specific code which is common for both backends.
-    """
-
-    # Tracks changes in ip allocation for port using namedtuple
-    Changes = collections.namedtuple('Changes', 'add original remove')
-
-    @staticmethod
-    def _rebuild_availability_ranges(context, subnets):
-        """Should be redefined for non-ipam backend only
-        """
-        pass
-
-    @staticmethod
-    def _gateway_ip_str(subnet, cidr_net):
-        if subnet.get('gateway_ip') is attributes.ATTR_NOT_SPECIFIED:
-            return str(netaddr.IPNetwork(cidr_net).network + 1)
-        return subnet.get('gateway_ip')
-
-    @staticmethod
-    def pools_to_ip_range(ip_pools):
-        ip_range_pools = []
-        for ip_pool in ip_pools:
-            try:
-                ip_range_pools.append(netaddr.IPRange(ip_pool['start'],
-                                                      ip_pool['end']))
-            except netaddr.AddrFormatError:
-                LOG.info(_LI("Found invalid IP address in pool: "
-                             "%(start)s - %(end)s:"),
-                         {'start': ip_pool['start'],
-                          'end': ip_pool['end']})
-                raise n_exc.InvalidAllocationPool(pool=ip_pool)
-        return ip_range_pools
-
-    def delete_subnet(self, context, subnet_id):
-        pass
-
-    def validate_pools_with_subnetpool(self, subnet):
-        """Verifies that allocation pools are set correctly
-
-        Allocation pools can be set for specific subnet request only
-        """
-        has_allocpool = attributes.is_attr_set(subnet['allocation_pools'])
-        is_any_subnetpool_request = not attributes.is_attr_set(subnet['cidr'])
-        if is_any_subnetpool_request and has_allocpool:
-            reason = _("allocation_pools allowed only "
-                       "for specific subnet requests.")
-            raise n_exc.BadRequest(resource='subnets', msg=reason)
-
-    def _validate_ip_version_with_subnetpool(self, subnet, subnetpool):
-        """Validates ip version for subnet_pool and requested subnet"""
-        ip_version = subnet.get('ip_version')
-        has_ip_version = attributes.is_attr_set(ip_version)
-        if has_ip_version and ip_version != subnetpool.ip_version:
-            args = {'req_ver': str(subnet['ip_version']),
-                    'pool_ver': str(subnetpool.ip_version)}
-            reason = _("Cannot allocate IPv%(req_ver)s subnet from "
-                       "IPv%(pool_ver)s subnet pool") % args
-            raise n_exc.BadRequest(resource='subnets', msg=reason)
-
-    def _update_db_port(self, context, db_port, new_port, network_id, new_mac):
-        # Remove all attributes in new_port which are not in the port DB model
-        # and then update the port
-        try:
-            db_port.update(self._filter_non_model_columns(new_port,
-                                                          models_v2.Port))
-            context.session.flush()
-        except db_exc.DBDuplicateEntry:
-            raise n_exc.MacAddressInUse(net_id=network_id, mac=new_mac)
-
-    def _update_subnet_host_routes(self, context, id, s):
-
-        def _combine(ht):
-            return ht['destination'] + "_" + ht['nexthop']
-
-        old_route_list = self._get_route_by_subnet(context, id)
-
-        new_route_set = set([_combine(route)
-                             for route in s['host_routes']])
-
-        old_route_set = set([_combine(route)
-                             for route in old_route_list])
-
-        for route_str in old_route_set - new_route_set:
-            for route in old_route_list:
-                if _combine(route) == route_str:
-                    context.session.delete(route)
-        for route_str in new_route_set - old_route_set:
-            route = models_v2.SubnetRoute(
-                destination=route_str.partition("_")[0],
-                nexthop=route_str.partition("_")[2],
-                subnet_id=id)
-            context.session.add(route)
-
-        # Gather host routes for result
-        new_routes = []
-        for route_str in new_route_set:
-            new_routes.append(
-                {'destination': route_str.partition("_")[0],
-                 'nexthop': route_str.partition("_")[2]})
-        del s["host_routes"]
-        return new_routes
-
-    def _update_subnet_dns_nameservers(self, context, id, s):
-        old_dns_list = self._get_dns_by_subnet(context, id)
-        new_dns_addr_list = s["dns_nameservers"]
-
-        # NOTE(changzhi) delete all dns nameservers from db
-        # when update subnet's DNS nameservers. And store new
-        # nameservers with order one by one.
-        for dns in old_dns_list:
-            context.session.delete(dns)
-
-        for order, server in enumerate(new_dns_addr_list):
-            dns = models_v2.DNSNameServer(
-                address=server,
-                order=order,
-                subnet_id=id)
-            context.session.add(dns)
-        del s["dns_nameservers"]
-        return new_dns_addr_list
-
-    def _update_subnet_allocation_pools(self, context, subnet_id, s):
-        context.session.query(models_v2.IPAllocationPool).filter_by(
-            subnet_id=subnet_id).delete()
-        pools = [(netaddr.IPAddress(p.first, p.version).format(),
-                  netaddr.IPAddress(p.last, p.version).format())
-                 for p in s['allocation_pools']]
-        new_pools = [models_v2.IPAllocationPool(first_ip=p[0],
-                                                last_ip=p[1],
-                                                subnet_id=subnet_id)
-                     for p in pools]
-        context.session.add_all(new_pools)
-        # Call static method with self to redefine in child
-        # (non-pluggable backend)
-        if not ipv6_utils.is_ipv6_pd_enabled(s):
-            self._rebuild_availability_ranges(context, [s])
-        # Gather new pools for result
-        result_pools = [{'start': p[0], 'end': p[1]} for p in pools]
-        del s['allocation_pools']
-        return result_pools
-
-    def update_db_subnet(self, context, subnet_id, s, oldpools):
-        changes = {}
-        if "dns_nameservers" in s:
-            changes['dns_nameservers'] = (
-                self._update_subnet_dns_nameservers(context, subnet_id, s))
-
-        if "host_routes" in s:
-            changes['host_routes'] = self._update_subnet_host_routes(
-                context, subnet_id, s)
-
-        if "allocation_pools" in s:
-            changes['allocation_pools'] = (
-                self._update_subnet_allocation_pools(context, subnet_id, s))
-
-        subnet = self._get_subnet(context, subnet_id)
-        subnet.update(s)
-        return subnet, changes
-
-    def _validate_subnet_cidr(self, context, network, new_subnet_cidr):
-        """Validate the CIDR for a subnet.
-
-        Verifies the specified CIDR does not overlap with the ones defined
-        for the other subnets specified for this network, or with any other
-        CIDR if overlapping IPs are disabled. Does not apply to subnets with
-        temporary IPv6 Prefix Delegation CIDRs (::/64).
-        """
-        new_subnet_ipset = netaddr.IPSet([new_subnet_cidr])
-        # Disallow subnets with prefix length 0 as they will lead to
-        # dnsmasq failures (see bug 1362651).
-        # This is not a discrimination against /0 subnets.
-        # A /0 subnet is conceptually possible but hardly a practical
-        # scenario for neutron's use cases.
-        for cidr in new_subnet_ipset.iter_cidrs():
-            if cidr.prefixlen == 0:
-                err_msg = _("0 is not allowed as CIDR prefix length")
-                raise n_exc.InvalidInput(error_message=err_msg)
-
-        if cfg.CONF.allow_overlapping_ips:
-            subnet_list = network.subnets
-        else:
-            subnet_list = self._get_all_subnets(context)
-        for subnet in subnet_list:
-            if ((netaddr.IPSet([subnet.cidr]) & new_subnet_ipset) and
-                subnet.cidr != constants.PROVISIONAL_IPV6_PD_PREFIX):
-                # don't give out details of the overlapping subnet
-                err_msg = (_("Requested subnet with cidr: %(cidr)s for "
-                             "network: %(network_id)s overlaps with another "
-                             "subnet") %
-                           {'cidr': new_subnet_cidr,
-                            'network_id': network.id})
-                LOG.info(_LI("Validation for CIDR: %(new_cidr)s failed - "
-                             "overlaps with subnet %(subnet_id)s "
-                             "(CIDR: %(cidr)s)"),
-                         {'new_cidr': new_subnet_cidr,
-                          'subnet_id': subnet.id,
-                          'cidr': subnet.cidr})
-                raise n_exc.InvalidInput(error_message=err_msg)
-
-    def _validate_network_subnetpools(self, network,
-                                      new_subnetpool_id, ip_version):
-        """Validate all subnets on the given network have been allocated from
-           the same subnet pool as new_subnetpool_id
-        """
-        for subnet in network.subnets:
-            if (subnet.ip_version == ip_version and
-                    new_subnetpool_id != subnet.subnetpool_id):
-                raise n_exc.NetworkSubnetPoolAffinityError()
-
-    def validate_allocation_pools(self, ip_pools, subnet_cidr):
-        """Validate IP allocation pools.
-
-        Verify start and end address for each allocation pool are valid,
-        ie: constituted by valid and appropriately ordered IP addresses.
-        Also, verify pools do not overlap among themselves.
-        Finally, verify that each range fall within the subnet's CIDR.
-        """
-        subnet = netaddr.IPNetwork(subnet_cidr)
-        subnet_first_ip = netaddr.IPAddress(subnet.first + 1)
-        # last address is broadcast in v4
-        subnet_last_ip = netaddr.IPAddress(subnet.last - (subnet.version == 4))
-
-        LOG.debug("Performing IP validity checks on allocation pools")
-        ip_sets = []
-        for ip_pool in ip_pools:
-            start_ip = netaddr.IPAddress(ip_pool.first, ip_pool.version)
-            end_ip = netaddr.IPAddress(ip_pool.last, ip_pool.version)
-            if (start_ip.version != subnet.version or
-                    end_ip.version != subnet.version):
-                LOG.info(_LI("Specified IP addresses do not match "
-                             "the subnet IP version"))
-                raise n_exc.InvalidAllocationPool(pool=ip_pool)
-            if start_ip < subnet_first_ip or end_ip > subnet_last_ip:
-                LOG.info(_LI("Found pool larger than subnet "
-                             "CIDR:%(start)s - %(end)s"),
-                         {'start': start_ip, 'end': end_ip})
-                raise n_exc.OutOfBoundsAllocationPool(
-                    pool=ip_pool,
-                    subnet_cidr=subnet_cidr)
-            # Valid allocation pool
-            # Create an IPSet for it for easily verifying overlaps
-            ip_sets.append(netaddr.IPSet(ip_pool.cidrs()))
-
-        LOG.debug("Checking for overlaps among allocation pools "
-                  "and gateway ip")
-        ip_ranges = ip_pools[:]
-
-        # Use integer cursors as an efficient way for implementing
-        # comparison and avoiding comparing the same pair twice
-        for l_cursor in range(len(ip_sets)):
-            for r_cursor in range(l_cursor + 1, len(ip_sets)):
-                if ip_sets[l_cursor] & ip_sets[r_cursor]:
-                    l_range = ip_ranges[l_cursor]
-                    r_range = ip_ranges[r_cursor]
-                    LOG.info(_LI("Found overlapping ranges: %(l_range)s and "
-                                 "%(r_range)s"),
-                             {'l_range': l_range, 'r_range': r_range})
-                    raise n_exc.OverlappingAllocationPools(
-                        pool_1=l_range,
-                        pool_2=r_range,
-                        subnet_cidr=subnet_cidr)
-
-    def _validate_max_ips_per_port(self, fixed_ip_list, device_owner):
-        if common_utils.is_port_trusted({'device_owner': device_owner}):
-            return
-
-        if len(fixed_ip_list) > cfg.CONF.max_fixed_ips_per_port:
-            msg = _('Exceeded maximum amount of fixed ips per port.')
-            raise n_exc.InvalidInput(error_message=msg)
-
-    def _get_subnet_for_fixed_ip(self, context, fixed, network_id):
-        if 'subnet_id' in fixed:
-            subnet = self._get_subnet(context, fixed['subnet_id'])
-            if subnet['network_id'] != network_id:
-                msg = (_("Failed to create port on network %(network_id)s"
-                         ", because fixed_ips included invalid subnet "
-                         "%(subnet_id)s") %
-                       {'network_id': network_id,
-                        'subnet_id': fixed['subnet_id']})
-                raise n_exc.InvalidInput(error_message=msg)
-            # Ensure that the IP is valid on the subnet
-            if ('ip_address' in fixed and
-                not ipam_utils.check_subnet_ip(subnet['cidr'],
-                                               fixed['ip_address'])):
-                raise n_exc.InvalidIpForSubnet(ip_address=fixed['ip_address'])
-            return subnet
-
-        if 'ip_address' not in fixed:
-            msg = _('IP allocation requires subnet_id or ip_address')
-            raise n_exc.InvalidInput(error_message=msg)
-
-        filter = {'network_id': [network_id]}
-        subnets = self._get_subnets(context, filters=filter)
-
-        for subnet in subnets:
-            if ipam_utils.check_subnet_ip(subnet['cidr'],
-                                          fixed['ip_address']):
-                return subnet
-        raise n_exc.InvalidIpForNetwork(ip_address=fixed['ip_address'])
-
-    def generate_pools(self, cidr, gateway_ip):
-        return ipam_utils.generate_pools(cidr, gateway_ip)
-
-    def _prepare_allocation_pools(self, allocation_pools, cidr, gateway_ip):
-        """Returns allocation pools represented as list of IPRanges"""
-        if not attributes.is_attr_set(allocation_pools):
-            return self.generate_pools(cidr, gateway_ip)
-
-        ip_range_pools = self.pools_to_ip_range(allocation_pools)
-        self.validate_allocation_pools(ip_range_pools, cidr)
-        if gateway_ip:
-            self.validate_gw_out_of_pools(gateway_ip, ip_range_pools)
-        return ip_range_pools
-
-    def validate_gw_out_of_pools(self, gateway_ip, pools):
-        for pool_range in pools:
-            if netaddr.IPAddress(gateway_ip) in pool_range:
-                raise n_exc.GatewayConflictWithAllocationPools(
-                    pool=pool_range,
-                    ip_address=gateway_ip)
-
-    def _is_ip_required_by_subnet(self, context, subnet_id, device_owner):
-        # For ports that are not router ports, retain any automatic
-        # (non-optional, e.g. IPv6 SLAAC) addresses.
-        # NOTE: Need to check the SNAT ports for DVR routers here since
-        # they consume an IP.
-        if device_owner in constants.ROUTER_INTERFACE_OWNERS_SNAT:
-            return True
-
-        subnet = self._get_subnet(context, subnet_id)
-        return not (ipv6_utils.is_auto_address_subnet(subnet) and
-                    not ipv6_utils.is_ipv6_pd_enabled(subnet))
-
-    def _get_changed_ips_for_port(self, context, original_ips,
-                                  new_ips, device_owner):
-        """Calculate changes in IPs for the port."""
-        # the new_ips contain all of the fixed_ips that are to be updated
-        self._validate_max_ips_per_port(new_ips, device_owner)
-
-        add_ips = []
-        remove_ips = []
-        ips_map = {ip['ip_address']: ip
-                   for ip in itertools.chain(new_ips, original_ips)
-                   if 'ip_address' in ip}
-
-        new = set()
-        for ip in new_ips:
-            if 'ip_address' in ip:
-                new.add(ip['ip_address'])
-            else:
-                add_ips.append(ip)
-
-        # Convert original ip addresses to sets
-        orig = set(ip['ip_address'] for ip in original_ips)
-
-        add = new - orig
-        unchanged = new & orig
-        remove = orig - new
-
-        # Convert results back to list of dicts
-        add_ips += [ips_map[ip] for ip in add]
-        prev_ips = [ips_map[ip] for ip in unchanged]
-
-        # Mark ip for removing if it is not found in new_ips
-        # and subnet requires ip to be set manually.
-        # For auto addresses leave ip unchanged
-        for ip in remove:
-            subnet_id = ips_map[ip]['subnet_id']
-            if self._is_ip_required_by_subnet(context, subnet_id,
-                                              device_owner):
-                remove_ips.append(ips_map[ip])
-            else:
-                prev_ips.append(ips_map[ip])
-
-        return self.Changes(add=add_ips,
-                            original=prev_ips,
-                            remove=remove_ips)
-
-    def delete_port(self, context, port_id):
-        query = (context.session.query(models_v2.Port).
-                 enable_eagerloads(False).filter_by(id=port_id))
-        if not context.is_admin:
-            query = query.filter_by(tenant_id=context.tenant_id)
-        # Use of the ORM mapper is needed for ensuring appropriate resource
-        # tracking; otherwise SQL Alchemy events won't be triggered.
-        # For more info check 'caveats' in doc/source/devref/quota.rst
-        try:
-            context.session.delete(query.first())
-        except orm_exc.UnmappedInstanceError:
-            LOG.debug("Port %s was not found and therefore no delete "
-                      "operation was performed", port_id)
-
-    def _save_subnet(self, context,
-                     network,
-                     subnet_args,
-                     dns_nameservers,
-                     host_routes,
-                     subnet_request):
-        self._validate_subnet_cidr(context, network, subnet_args['cidr'])
-        self._validate_network_subnetpools(network,
-                                           subnet_args['subnetpool_id'],
-                                           subnet_args['ip_version'])
-
-        subnet = models_v2.Subnet(**subnet_args)
-        context.session.add(subnet)
-        # NOTE(changzhi) Store DNS nameservers with order into DB one
-        # by one when create subnet with DNS nameservers
-        if attributes.is_attr_set(dns_nameservers):
-            for order, server in enumerate(dns_nameservers):
-                dns = models_v2.DNSNameServer(
-                    address=server,
-                    order=order,
-                    subnet_id=subnet.id)
-                context.session.add(dns)
-
-        if attributes.is_attr_set(host_routes):
-            for rt in host_routes:
-                route = models_v2.SubnetRoute(
-                    subnet_id=subnet.id,
-                    destination=rt['destination'],
-                    nexthop=rt['nexthop'])
-                context.session.add(route)
-
-        self.save_allocation_pools(context, subnet,
-                                   subnet_request.allocation_pools)
-
-        return subnet
diff --git a/neutron/db/ipam_non_pluggable_backend.py b/neutron/db/ipam_non_pluggable_backend.py
deleted file mode 100644 (file)
index 3ecd301..0000000
+++ /dev/null
@@ -1,474 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-from sqlalchemy import and_
-from sqlalchemy import orm
-from sqlalchemy.orm import exc
-
-from neutron._i18n import _
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.db import ipam_backend_mixin
-from neutron.db import models_v2
-from neutron.ipam import requests as ipam_req
-from neutron.ipam import subnet_alloc
-
-LOG = logging.getLogger(__name__)
-
-
-class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
-
-    @staticmethod
-    def _generate_ip(context, subnets):
-        try:
-            return IpamNonPluggableBackend._try_generate_ip(context, subnets)
-        except n_exc.IpAddressGenerationFailure:
-            IpamNonPluggableBackend._rebuild_availability_ranges(context,
-                                                                 subnets)
-
-        return IpamNonPluggableBackend._try_generate_ip(context, subnets)
-
-    @staticmethod
-    def _try_generate_ip(context, subnets):
-        """Generate an IP address.
-
-        The IP address will be generated from one of the subnets defined on
-        the network.
-        """
-        range_qry = context.session.query(
-            models_v2.IPAvailabilityRange).join(
-                models_v2.IPAllocationPool).with_lockmode('update')
-        for subnet in subnets:
-            ip_range = range_qry.filter_by(subnet_id=subnet['id']).first()
-            if not ip_range:
-                LOG.debug("All IPs from subnet %(subnet_id)s (%(cidr)s) "
-                          "allocated",
-                          {'subnet_id': subnet['id'],
-                           'cidr': subnet['cidr']})
-                continue
-            ip_address = ip_range['first_ip']
-            if ip_range['first_ip'] == ip_range['last_ip']:
-                # No more free indices on subnet => delete
-                LOG.debug("No more free IP's in slice. Deleting "
-                          "allocation pool.")
-                context.session.delete(ip_range)
-            else:
-                # increment the first free
-                new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
-                ip_range['first_ip'] = new_first_ip
-            LOG.debug("Allocated IP - %(ip_address)s from %(first_ip)s "
-                      "to %(last_ip)s",
-                      {'ip_address': ip_address,
-                       'first_ip': ip_range['first_ip'],
-                       'last_ip': ip_range['last_ip']})
-            return {'ip_address': ip_address,
-                    'subnet_id': subnet['id']}
-        raise n_exc.IpAddressGenerationFailure(net_id=subnets[0]['network_id'])
-
-    @staticmethod
-    def _rebuild_availability_ranges(context, subnets):
-        """Rebuild availability ranges.
-
-        This method is called only when there's no more IP available or by
-        _update_subnet_allocation_pools. Calling
-        _update_subnet_allocation_pools before calling this function deletes
-        the IPAllocationPools associated with the subnet that is updating,
-        which will result in deleting the IPAvailabilityRange too.
-        """
-        ip_qry = context.session.query(
-            models_v2.IPAllocation).with_lockmode('update')
-        # PostgreSQL does not support select...for update with an outer join.
-        # No join is needed here.
-        pool_qry = context.session.query(
-            models_v2.IPAllocationPool).options(
-                orm.noload('available_ranges')).with_lockmode('update')
-        for subnet in sorted(subnets):
-            LOG.debug("Rebuilding availability ranges for subnet %s",
-                      subnet)
-
-            # Create a set of all currently allocated addresses
-            ip_qry_results = ip_qry.filter_by(subnet_id=subnet['id'])
-            allocations = netaddr.IPSet([netaddr.IPAddress(i['ip_address'])
-                                        for i in ip_qry_results])
-
-            for pool in pool_qry.filter_by(subnet_id=subnet['id']):
-                # Create a set of all addresses in the pool
-                poolset = netaddr.IPSet(netaddr.IPRange(pool['first_ip'],
-                                                        pool['last_ip']))
-
-                # Use set difference to find free addresses in the pool
-                available = poolset - allocations
-
-                # Generator compacts an ip set into contiguous ranges
-                def ipset_to_ranges(ipset):
-                    first, last = None, None
-                    for cidr in ipset.iter_cidrs():
-                        if last and last + 1 != cidr.first:
-                            yield netaddr.IPRange(first, last)
-                            first = None
-                        first, last = first if first else cidr.first, cidr.last
-                    if first:
-                        yield netaddr.IPRange(first, last)
-
-                # Write the ranges to the db
-                for ip_range in ipset_to_ranges(available):
-                    available_range = models_v2.IPAvailabilityRange(
-                        allocation_pool_id=pool['id'],
-                        first_ip=str(netaddr.IPAddress(ip_range.first)),
-                        last_ip=str(netaddr.IPAddress(ip_range.last)))
-                    context.session.add(available_range)
-
-    @staticmethod
-    def _allocate_specific_ip(context, subnet_id, ip_address):
-        """Allocate a specific IP address on the subnet."""
-        ip = int(netaddr.IPAddress(ip_address))
-        range_qry = context.session.query(
-            models_v2.IPAvailabilityRange).join(
-                models_v2.IPAllocationPool).with_lockmode('update')
-        results = range_qry.filter_by(subnet_id=subnet_id)
-        for ip_range in results:
-            first = int(netaddr.IPAddress(ip_range['first_ip']))
-            last = int(netaddr.IPAddress(ip_range['last_ip']))
-            if first <= ip <= last:
-                if first == last:
-                    context.session.delete(ip_range)
-                    return
-                elif first == ip:
-                    new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
-                    ip_range['first_ip'] = new_first_ip
-                    return
-                elif last == ip:
-                    new_last_ip = str(netaddr.IPAddress(ip_address) - 1)
-                    ip_range['last_ip'] = new_last_ip
-                    return
-                else:
-                    # Adjust the original range to end before ip_address
-                    old_last_ip = ip_range['last_ip']
-                    new_last_ip = str(netaddr.IPAddress(ip_address) - 1)
-                    ip_range['last_ip'] = new_last_ip
-
-                    # Create a new second range for after ip_address
-                    new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
-                    new_ip_range = models_v2.IPAvailabilityRange(
-                        allocation_pool_id=ip_range['allocation_pool_id'],
-                        first_ip=new_first_ip,
-                        last_ip=old_last_ip)
-                    context.session.add(new_ip_range)
-                    return
-
-    @staticmethod
-    def _check_unique_ip(context, network_id, subnet_id, ip_address):
-        """Validate that the IP address on the subnet is not in use."""
-        ip_qry = context.session.query(models_v2.IPAllocation)
-        try:
-            ip_qry.filter_by(network_id=network_id,
-                             subnet_id=subnet_id,
-                             ip_address=ip_address).one()
-        except exc.NoResultFound:
-            return True
-        return False
-
-    def save_allocation_pools(self, context, subnet, allocation_pools):
-        for pool in allocation_pools:
-            first_ip = str(netaddr.IPAddress(pool.first, pool.version))
-            last_ip = str(netaddr.IPAddress(pool.last, pool.version))
-            ip_pool = models_v2.IPAllocationPool(subnet=subnet,
-                                                 first_ip=first_ip,
-                                                 last_ip=last_ip)
-            context.session.add(ip_pool)
-            ip_range = models_v2.IPAvailabilityRange(
-                ipallocationpool=ip_pool,
-                first_ip=first_ip,
-                last_ip=last_ip)
-            context.session.add(ip_range)
-
-    def allocate_ips_for_port_and_store(self, context, port, port_id):
-        network_id = port['port']['network_id']
-        ips = self._allocate_ips_for_port(context, port)
-        if ips:
-            for ip in ips:
-                ip_address = ip['ip_address']
-                subnet_id = ip['subnet_id']
-                self._store_ip_allocation(context, ip_address, network_id,
-                                          subnet_id, port_id)
-        return ips
-
-    def update_port_with_ips(self, context, db_port, new_port, new_mac):
-        changes = self.Changes(add=[], original=[], remove=[])
-        # Check if the IPs need to be updated
-        network_id = db_port['network_id']
-        if 'fixed_ips' in new_port:
-            original = self._make_port_dict(db_port, process_extensions=False)
-            changes = self._update_ips_for_port(
-                context, network_id,
-                original["fixed_ips"], new_port['fixed_ips'],
-                original['mac_address'], db_port['device_owner'])
-
-            # Update ips if necessary
-            for ip in changes.add:
-                IpamNonPluggableBackend._store_ip_allocation(
-                    context, ip['ip_address'], network_id,
-                    ip['subnet_id'], db_port.id)
-        self._update_db_port(context, db_port, new_port, network_id, new_mac)
-        return changes
-
-    def _test_fixed_ips_for_port(self, context, network_id, fixed_ips,
-                                 device_owner):
-        """Test fixed IPs for port.
-
-        Check that configured subnets are valid prior to allocating any
-        IPs. Include the subnet_id in the result if only an IP address is
-        configured.
-
-        :raises: InvalidInput, IpAddressInUse, InvalidIpForNetwork,
-                 InvalidIpForSubnet
-        """
-        fixed_ip_set = []
-        for fixed in fixed_ips:
-            subnet = self._get_subnet_for_fixed_ip(context, fixed, network_id)
-
-            is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
-            if ('ip_address' in fixed and
-                subnet['cidr'] != constants.PROVISIONAL_IPV6_PD_PREFIX):
-                # Ensure that the IP's are unique
-                if not IpamNonPluggableBackend._check_unique_ip(
-                        context, network_id,
-                        subnet['id'], fixed['ip_address']):
-                    raise n_exc.IpAddressInUse(net_id=network_id,
-                                               ip_address=fixed['ip_address'])
-
-                if (is_auto_addr_subnet and
-                    device_owner not in
-                        constants.ROUTER_INTERFACE_OWNERS):
-                    msg = (_("IPv6 address %(address)s can not be directly "
-                            "assigned to a port on subnet %(id)s since the "
-                            "subnet is configured for automatic addresses") %
-                           {'address': fixed['ip_address'],
-                            'id': subnet['id']})
-                    raise n_exc.InvalidInput(error_message=msg)
-                fixed_ip_set.append({'subnet_id': subnet['id'],
-                                     'ip_address': fixed['ip_address']})
-            else:
-                # A scan for auto-address subnets on the network is done
-                # separately so that all such subnets (not just those
-                # listed explicitly here by subnet ID) are associated
-                # with the port.
-                if (device_owner in constants.ROUTER_INTERFACE_OWNERS_SNAT or
-                    not is_auto_addr_subnet):
-                    fixed_ip_set.append({'subnet_id': subnet['id']})
-
-        self._validate_max_ips_per_port(fixed_ip_set, device_owner)
-        return fixed_ip_set
-
-    def _allocate_fixed_ips(self, context, fixed_ips, mac_address):
-        """Allocate IP addresses according to the configured fixed_ips."""
-        ips = []
-
-        # we need to start with entries that asked for a specific IP in case
-        # those IPs happen to be next in the line for allocation for ones that
-        # didn't ask for a specific IP
-        fixed_ips.sort(key=lambda x: 'ip_address' not in x)
-        for fixed in fixed_ips:
-            subnet = self._get_subnet(context, fixed['subnet_id'])
-            is_auto_addr = ipv6_utils.is_auto_address_subnet(subnet)
-            if 'ip_address' in fixed:
-                if not is_auto_addr:
-                    # Remove the IP address from the allocation pool
-                    IpamNonPluggableBackend._allocate_specific_ip(
-                        context, fixed['subnet_id'], fixed['ip_address'])
-                ips.append({'ip_address': fixed['ip_address'],
-                            'subnet_id': fixed['subnet_id']})
-            # Only subnet ID is specified => need to generate IP
-            # from subnet
-            else:
-                if is_auto_addr:
-                    ip_address = self._calculate_ipv6_eui64_addr(context,
-                                                                 subnet,
-                                                                 mac_address)
-                    ips.append({'ip_address': ip_address.format(),
-                                'subnet_id': subnet['id']})
-                else:
-                    subnets = [subnet]
-                    # IP address allocation
-                    result = self._generate_ip(context, subnets)
-                    ips.append({'ip_address': result['ip_address'],
-                                'subnet_id': result['subnet_id']})
-        return ips
-
-    def _update_ips_for_port(self, context, network_id, original_ips,
-                             new_ips, mac_address, device_owner):
-        """Add or remove IPs from the port."""
-        added = []
-        changes = self._get_changed_ips_for_port(context, original_ips,
-                                                 new_ips, device_owner)
-        # Check if the IP's to add are OK
-        to_add = self._test_fixed_ips_for_port(context, network_id,
-                                               changes.add, device_owner)
-        for ip in changes.remove:
-            LOG.debug("Port update. Hold %s", ip)
-            IpamNonPluggableBackend._delete_ip_allocation(context,
-                                                          network_id,
-                                                          ip['subnet_id'],
-                                                          ip['ip_address'])
-
-        if to_add:
-            LOG.debug("Port update. Adding %s", to_add)
-            added = self._allocate_fixed_ips(context, to_add, mac_address)
-        return self.Changes(add=added,
-                            original=changes.original,
-                            remove=changes.remove)
-
-    def _allocate_ips_for_port(self, context, port):
-        """Allocate IP addresses for the port.
-
-        If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP
-        addresses for the port. If port['fixed_ips'] contains an IP address or
-        a subnet_id then allocate an IP address accordingly.
-        """
-        p = port['port']
-        ips = []
-        v6_stateless = []
-        net_id_filter = {'network_id': [p['network_id']]}
-        subnets = self._get_subnets(context, filters=net_id_filter)
-        is_router_port = (
-            p['device_owner'] in constants.ROUTER_INTERFACE_OWNERS_SNAT)
-
-        fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED
-        if fixed_configured:
-            configured_ips = self._test_fixed_ips_for_port(context,
-                                                           p["network_id"],
-                                                           p['fixed_ips'],
-                                                           p['device_owner'])
-            ips = self._allocate_fixed_ips(context,
-                                           configured_ips,
-                                           p['mac_address'])
-
-            # For ports that are not router ports, implicitly include all
-            # auto-address subnets for address association.
-            if not is_router_port:
-                v6_stateless += [subnet for subnet in subnets
-                                 if ipv6_utils.is_auto_address_subnet(subnet)]
-        else:
-            # Split into v4, v6 stateless and v6 stateful subnets
-            v4 = []
-            v6_stateful = []
-            for subnet in subnets:
-                if subnet['ip_version'] == 4:
-                    v4.append(subnet)
-                elif ipv6_utils.is_auto_address_subnet(subnet):
-                    if not is_router_port:
-                        v6_stateless.append(subnet)
-                else:
-                    v6_stateful.append(subnet)
-
-            version_subnets = [v4, v6_stateful]
-            for subnets in version_subnets:
-                if subnets:
-                    result = IpamNonPluggableBackend._generate_ip(context,
-                                                                  subnets)
-                    ips.append({'ip_address': result['ip_address'],
-                                'subnet_id': result['subnet_id']})
-
-        for subnet in v6_stateless:
-            # IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets
-            # are implicitly included.
-            ip_address = self._calculate_ipv6_eui64_addr(context, subnet,
-                                                         p['mac_address'])
-            ips.append({'ip_address': ip_address.format(),
-                        'subnet_id': subnet['id']})
-
-        return ips
-
-    def add_auto_addrs_on_network_ports(self, context, subnet, ipam_subnet):
-        """For an auto-address subnet, add addrs for ports on the net."""
-        with context.session.begin(subtransactions=True):
-            network_id = subnet['network_id']
-            port_qry = context.session.query(models_v2.Port)
-            ports = port_qry.filter(
-                and_(models_v2.Port.network_id == network_id,
-                     ~models_v2.Port.device_owner.in_(
-                         constants.ROUTER_INTERFACE_OWNERS_SNAT)))
-            for port in ports:
-                ip_address = self._calculate_ipv6_eui64_addr(
-                    context, subnet, port['mac_address'])
-                allocated = models_v2.IPAllocation(network_id=network_id,
-                                                   port_id=port['id'],
-                                                   ip_address=ip_address,
-                                                   subnet_id=subnet['id'])
-                try:
-                    # Do the insertion of each IP allocation entry within
-                    # the context of a nested transaction, so that the entry
-                    # is rolled back independently of other entries whenever
-                    # the corresponding port has been deleted.
-                    with context.session.begin_nested():
-                        context.session.add(allocated)
-                except db_exc.DBReferenceError:
-                    LOG.debug("Port %s was deleted while updating it with an "
-                              "IPv6 auto-address. Ignoring.", port['id'])
-
-    def _calculate_ipv6_eui64_addr(self, context, subnet, mac_addr):
-        prefix = subnet['cidr']
-        network_id = subnet['network_id']
-        ip_address = ipv6_utils.get_ipv6_addr_by_EUI64(
-            prefix, mac_addr).format()
-        if not self._check_unique_ip(context, network_id,
-                                     subnet['id'], ip_address):
-            raise n_exc.IpAddressInUse(net_id=network_id,
-                                       ip_address=ip_address)
-        return ip_address
-
-    def allocate_subnet(self, context, network, subnet, subnetpool_id):
-        subnetpool = None
-        if subnetpool_id and not subnetpool_id == constants.IPV6_PD_POOL_ID:
-            subnetpool = self._get_subnetpool(context, subnetpool_id)
-            self._validate_ip_version_with_subnetpool(subnet, subnetpool)
-
-        # gateway_ip and allocation pools should be validated or generated
-        # only for specific request
-        if subnet['cidr'] is not attributes.ATTR_NOT_SPECIFIED:
-            subnet['gateway_ip'] = self._gateway_ip_str(subnet,
-                                                        subnet['cidr'])
-            # allocation_pools are converted to list of IPRanges
-            subnet['allocation_pools'] = self._prepare_allocation_pools(
-                subnet['allocation_pools'],
-                subnet['cidr'],
-                subnet['gateway_ip'])
-
-        subnet_request = ipam_req.SubnetRequestFactory.get_request(context,
-                                                                   subnet,
-                                                                   subnetpool)
-
-        if subnetpool_id and not subnetpool_id == constants.IPV6_PD_POOL_ID:
-            driver = subnet_alloc.SubnetAllocator(subnetpool, context)
-            ipam_subnet = driver.allocate_subnet(subnet_request)
-            subnet_request = ipam_subnet.get_details()
-
-        subnet = self._save_subnet(context,
-                                   network,
-                                   self._make_subnet_args(
-                                       subnet_request,
-                                       subnet,
-                                       subnetpool_id),
-                                   subnet['dns_nameservers'],
-                                   subnet['host_routes'],
-                                   subnet_request)
-        # ipam_subnet is not expected to be allocated for non pluggable ipam,
-        # so just return None for it (second element in returned tuple)
-        return subnet, None
diff --git a/neutron/db/ipam_pluggable_backend.py b/neutron/db/ipam_pluggable_backend.py
deleted file mode 100644 (file)
index dbbddab..0000000
+++ /dev/null
@@ -1,452 +0,0 @@
-# Copyright (c) 2015 Infoblox Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-from oslo_utils import excutils
-from sqlalchemy import and_
-
-from neutron._i18n import _, _LE
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.db import ipam_backend_mixin
-from neutron.db import models_v2
-from neutron.ipam import driver
-from neutron.ipam import exceptions as ipam_exc
-from neutron.ipam import requests as ipam_req
-
-
-LOG = logging.getLogger(__name__)
-
-
-class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
-
-    def _get_failed_ips(self, all_ips, success_ips):
-        ips_list = (ip_dict['ip_address'] for ip_dict in success_ips)
-        return (ip_dict['ip_address'] for ip_dict in all_ips
-                if ip_dict['ip_address'] not in ips_list)
-
-    def _ipam_deallocate_ips(self, context, ipam_driver, port, ips,
-                             revert_on_fail=True):
-        """Deallocate set of ips over IPAM.
-
-        If any single ip deallocation fails, tries to allocate deallocated
-        ip addresses with fixed ip request
-        """
-        deallocated = []
-
-        try:
-            for ip in ips:
-                try:
-                    ipam_subnet = ipam_driver.get_subnet(ip['subnet_id'])
-                    ipam_subnet.deallocate(ip['ip_address'])
-                    deallocated.append(ip)
-                except n_exc.SubnetNotFound:
-                    LOG.debug("Subnet was not found on ip deallocation: %s",
-                              ip)
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                LOG.debug("An exception occurred during IP deallocation.")
-                if revert_on_fail and deallocated:
-                    LOG.debug("Reverting deallocation")
-                    self._ipam_allocate_ips(context, ipam_driver, port,
-                                            deallocated, revert_on_fail=False)
-                elif not revert_on_fail and ips:
-                    addresses = ', '.join(self._get_failed_ips(ips,
-                                                               deallocated))
-                    LOG.error(_LE("IP deallocation failed on "
-                                  "external system for %s"), addresses)
-        return deallocated
-
-    def _ipam_try_allocate_ip(self, context, ipam_driver, port, ip_dict):
-        factory = ipam_driver.get_address_request_factory()
-        ip_request = factory.get_request(context, port, ip_dict)
-        ipam_subnet = ipam_driver.get_subnet(ip_dict['subnet_id'])
-        return ipam_subnet.allocate(ip_request)
-
-    def _ipam_allocate_single_ip(self, context, ipam_driver, port, subnets):
-        """Allocates single ip from set of subnets
-
-        Raises n_exc.IpAddressGenerationFailure if allocation failed for
-        all subnets.
-        """
-        for subnet in subnets:
-            try:
-                return [self._ipam_try_allocate_ip(context, ipam_driver,
-                                                   port, subnet),
-                        subnet]
-            except ipam_exc.IpAddressGenerationFailure:
-                continue
-        raise n_exc.IpAddressGenerationFailure(
-            net_id=port['network_id'])
-
-    def _ipam_allocate_ips(self, context, ipam_driver, port, ips,
-                           revert_on_fail=True):
-        """Allocate set of ips over IPAM.
-
-        If any single ip allocation fails, tries to deallocate all
-        allocated ip addresses.
-        """
-        allocated = []
-
-        # we need to start with entries that asked for a specific IP in case
-        # those IPs happen to be next in the line for allocation for ones that
-        # didn't ask for a specific IP
-        ips.sort(key=lambda x: 'ip_address' not in x)
-        try:
-            for ip in ips:
-                # By default IP info is dict, used to allocate single ip
-                # from single subnet.
-                # IP info can be list, used to allocate single ip from
-                # multiple subnets (i.e. first successful ip allocation
-                # is returned)
-                ip_list = [ip] if isinstance(ip, dict) else ip
-                ip_address, ip_subnet = self._ipam_allocate_single_ip(
-                    context, ipam_driver, port, ip_list)
-                allocated.append({'ip_address': ip_address,
-                                  'subnet_id': ip_subnet['subnet_id']})
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                LOG.debug("An exception occurred during IP allocation.")
-
-                if revert_on_fail and allocated:
-                    LOG.debug("Reverting allocation")
-                    self._ipam_deallocate_ips(context, ipam_driver, port,
-                                              allocated, revert_on_fail=False)
-                elif not revert_on_fail and ips:
-                    addresses = ', '.join(self._get_failed_ips(ips,
-                                                               allocated))
-                    LOG.error(_LE("IP allocation failed on "
-                                  "external system for %s"), addresses)
-
-        return allocated
-
-    def _ipam_update_allocation_pools(self, context, ipam_driver, subnet):
-        self.validate_allocation_pools(subnet['allocation_pools'],
-                                       subnet['cidr'])
-
-        factory = ipam_driver.get_subnet_request_factory()
-        subnet_request = factory.get_request(context, subnet, None)
-
-        ipam_driver.update_subnet(subnet_request)
-
-    def delete_subnet(self, context, subnet_id):
-        ipam_driver = driver.Pool.get_instance(None, context)
-        ipam_driver.remove_subnet(subnet_id)
-
-    def allocate_ips_for_port_and_store(self, context, port, port_id):
-        network_id = port['port']['network_id']
-        ips = []
-        try:
-            ips = self._allocate_ips_for_port(context, port)
-            for ip in ips:
-                ip_address = ip['ip_address']
-                subnet_id = ip['subnet_id']
-                IpamPluggableBackend._store_ip_allocation(
-                    context, ip_address, network_id,
-                    subnet_id, port_id)
-            return ips
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                if ips:
-                    LOG.debug("An exception occurred during port creation. "
-                              "Reverting IP allocation")
-                    ipam_driver = driver.Pool.get_instance(None, context)
-                    self._ipam_deallocate_ips(context, ipam_driver,
-                                              port['port'], ips,
-                                              revert_on_fail=False)
-
-    def _allocate_ips_for_port(self, context, port):
-        """Allocate IP addresses for the port. IPAM version.
-
-        If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP
-        addresses for the port. If port['fixed_ips'] contains an IP address or
-        a subnet_id then allocate an IP address accordingly.
-        """
-        p = port['port']
-        ips = []
-        v6_stateless = []
-        net_id_filter = {'network_id': [p['network_id']]}
-        subnets = self._get_subnets(context, filters=net_id_filter)
-        is_router_port = (
-            p['device_owner'] in constants.ROUTER_INTERFACE_OWNERS_SNAT)
-
-        fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED
-        if fixed_configured:
-            ips = self._test_fixed_ips_for_port(context,
-                                                p["network_id"],
-                                                p['fixed_ips'],
-                                                p['device_owner'])
-            # For ports that are not router ports, implicitly include all
-            # auto-address subnets for address association.
-            if not is_router_port:
-                v6_stateless += [subnet for subnet in subnets
-                                 if ipv6_utils.is_auto_address_subnet(subnet)]
-        else:
-            # Split into v4, v6 stateless and v6 stateful subnets
-            v4 = []
-            v6_stateful = []
-            for subnet in subnets:
-                if subnet['ip_version'] == 4:
-                    v4.append(subnet)
-                else:
-                    if ipv6_utils.is_auto_address_subnet(subnet):
-                        if not is_router_port:
-                            v6_stateless.append(subnet)
-                    else:
-                        v6_stateful.append(subnet)
-
-            version_subnets = [v4, v6_stateful]
-            for subnets in version_subnets:
-                if subnets:
-                    ips.append([{'subnet_id': s['id']}
-                                for s in subnets])
-
-        for subnet in v6_stateless:
-            # IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets
-            # are implicitly included.
-            ips.append({'subnet_id': subnet['id'],
-                        'subnet_cidr': subnet['cidr'],
-                        'eui64_address': True,
-                        'mac': p['mac_address']})
-        ipam_driver = driver.Pool.get_instance(None, context)
-        return self._ipam_allocate_ips(context, ipam_driver, p, ips)
-
-    def _test_fixed_ips_for_port(self, context, network_id, fixed_ips,
-                                 device_owner):
-        """Test fixed IPs for port.
-
-        Check that configured subnets are valid prior to allocating any
-        IPs. Include the subnet_id in the result if only an IP address is
-        configured.
-
-        :raises: InvalidInput, IpAddressInUse, InvalidIpForNetwork,
-                 InvalidIpForSubnet
-        """
-        fixed_ip_list = []
-        for fixed in fixed_ips:
-            subnet = self._get_subnet_for_fixed_ip(context, fixed, network_id)
-
-            is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
-            if 'ip_address' in fixed:
-                if (is_auto_addr_subnet and device_owner not in
-                        constants.ROUTER_INTERFACE_OWNERS):
-                    msg = (_("IPv6 address %(address)s can not be directly "
-                            "assigned to a port on subnet %(id)s since the "
-                            "subnet is configured for automatic addresses") %
-                           {'address': fixed['ip_address'],
-                            'id': subnet['id']})
-                    raise n_exc.InvalidInput(error_message=msg)
-                fixed_ip_list.append({'subnet_id': subnet['id'],
-                                      'ip_address': fixed['ip_address']})
-            else:
-                # A scan for auto-address subnets on the network is done
-                # separately so that all such subnets (not just those
-                # listed explicitly here by subnet ID) are associated
-                # with the port.
-                if (device_owner in constants.ROUTER_INTERFACE_OWNERS_SNAT or
-                        not is_auto_addr_subnet):
-                    fixed_ip_list.append({'subnet_id': subnet['id']})
-
-        self._validate_max_ips_per_port(fixed_ip_list, device_owner)
-        return fixed_ip_list
-
-    def _update_ips_for_port(self, context, port,
-                             original_ips, new_ips, mac):
-        """Add or remove IPs from the port. IPAM version"""
-        added = []
-        removed = []
-        changes = self._get_changed_ips_for_port(
-            context, original_ips, new_ips, port['device_owner'])
-        # Check if the IP's to add are OK
-        to_add = self._test_fixed_ips_for_port(
-            context, port['network_id'], changes.add,
-            port['device_owner'])
-
-        ipam_driver = driver.Pool.get_instance(None, context)
-        if changes.remove:
-            removed = self._ipam_deallocate_ips(context, ipam_driver, port,
-                                                changes.remove)
-        if to_add:
-            added = self._ipam_allocate_ips(context, ipam_driver,
-                                            port, to_add)
-        return self.Changes(add=added,
-                            original=changes.original,
-                            remove=removed)
-
-    def save_allocation_pools(self, context, subnet, allocation_pools):
-        for pool in allocation_pools:
-            first_ip = str(netaddr.IPAddress(pool.first, pool.version))
-            last_ip = str(netaddr.IPAddress(pool.last, pool.version))
-            ip_pool = models_v2.IPAllocationPool(subnet=subnet,
-                                                 first_ip=first_ip,
-                                                 last_ip=last_ip)
-            context.session.add(ip_pool)
-
-    def update_port_with_ips(self, context, db_port, new_port, new_mac):
-        changes = self.Changes(add=[], original=[], remove=[])
-
-        if 'fixed_ips' in new_port:
-            original = self._make_port_dict(db_port,
-                                            process_extensions=False)
-            changes = self._update_ips_for_port(context,
-                                                db_port,
-                                                original["fixed_ips"],
-                                                new_port['fixed_ips'],
-                                                new_mac)
-        try:
-            # Check if the IPs need to be updated
-            network_id = db_port['network_id']
-            for ip in changes.add:
-                self._store_ip_allocation(
-                    context, ip['ip_address'], network_id,
-                    ip['subnet_id'], db_port.id)
-            for ip in changes.remove:
-                self._delete_ip_allocation(context, network_id,
-                                           ip['subnet_id'], ip['ip_address'])
-            self._update_db_port(context, db_port, new_port, network_id,
-                                 new_mac)
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                if 'fixed_ips' in new_port:
-                    LOG.debug("An exception occurred during port update.")
-                    ipam_driver = driver.Pool.get_instance(None, context)
-                    if changes.add:
-                        LOG.debug("Reverting IP allocation.")
-                        self._ipam_deallocate_ips(context, ipam_driver,
-                                                  db_port, changes.add,
-                                                  revert_on_fail=False)
-                    if changes.remove:
-                        LOG.debug("Reverting IP deallocation.")
-                        self._ipam_allocate_ips(context, ipam_driver,
-                                                db_port, changes.remove,
-                                                revert_on_fail=False)
-        return changes
-
-    def delete_port(self, context, id):
-        # Get fixed_ips list before port deletion
-        port = self._get_port(context, id)
-        ipam_driver = driver.Pool.get_instance(None, context)
-
-        super(IpamPluggableBackend, self).delete_port(context, id)
-        # Deallocating ips via IPAM after port is deleted locally.
-        # So no need to do rollback actions on remote server
-        # in case of fail to delete port locally
-        self._ipam_deallocate_ips(context, ipam_driver, port,
-                                  port['fixed_ips'])
-
-    def update_db_subnet(self, context, id, s, old_pools):
-        ipam_driver = driver.Pool.get_instance(None, context)
-        if "allocation_pools" in s:
-            self._ipam_update_allocation_pools(context, ipam_driver, s)
-
-        try:
-            subnet, changes = super(IpamPluggableBackend,
-                                    self).update_db_subnet(context, id,
-                                                           s, old_pools)
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                if "allocation_pools" in s and old_pools:
-                    LOG.error(
-                        _LE("An exception occurred during subnet update. "
-                            "Reverting allocation pool changes"))
-                    s['allocation_pools'] = old_pools
-                    self._ipam_update_allocation_pools(context, ipam_driver, s)
-        return subnet, changes
-
-    def add_auto_addrs_on_network_ports(self, context, subnet, ipam_subnet):
-        """For an auto-address subnet, add addrs for ports on the net."""
-        with context.session.begin(subtransactions=True):
-            network_id = subnet['network_id']
-            port_qry = context.session.query(models_v2.Port)
-            ports = port_qry.filter(
-                and_(models_v2.Port.network_id == network_id,
-                     ~models_v2.Port.device_owner.in_(
-                         constants.ROUTER_INTERFACE_OWNERS_SNAT)))
-            for port in ports:
-                ip_request = ipam_req.AutomaticAddressRequest(
-                    prefix=subnet['cidr'],
-                    mac=port['mac_address'])
-                ip_address = ipam_subnet.allocate(ip_request)
-                allocated = models_v2.IPAllocation(network_id=network_id,
-                                                   port_id=port['id'],
-                                                   ip_address=ip_address,
-                                                   subnet_id=subnet['id'])
-                try:
-                    # Do the insertion of each IP allocation entry within
-                    # the context of a nested transaction, so that the entry
-                    # is rolled back independently of other entries whenever
-                    # the corresponding port has been deleted.
-                    with context.session.begin_nested():
-                        context.session.add(allocated)
-                except db_exc.DBReferenceError:
-                    LOG.debug("Port %s was deleted while updating it with an "
-                              "IPv6 auto-address. Ignoring.", port['id'])
-                    LOG.debug("Reverting IP allocation for %s", ip_address)
-                    # Do not fail if reverting allocation was unsuccessful
-                    try:
-                        ipam_subnet.deallocate(ip_address)
-                    except Exception:
-                        LOG.debug("Reverting IP allocation failed for %s",
-                                  ip_address)
-
-    def allocate_subnet(self, context, network, subnet, subnetpool_id):
-        subnetpool = None
-
-        if subnetpool_id and not subnetpool_id == constants.IPV6_PD_POOL_ID:
-            subnetpool = self._get_subnetpool(context, subnetpool_id)
-            self._validate_ip_version_with_subnetpool(subnet, subnetpool)
-
-        # gateway_ip and allocation pools should be validated or generated
-        # only for specific request
-        if subnet['cidr'] is not attributes.ATTR_NOT_SPECIFIED:
-            subnet['gateway_ip'] = self._gateway_ip_str(subnet,
-                                                        subnet['cidr'])
-            subnet['allocation_pools'] = self._prepare_allocation_pools(
-                subnet['allocation_pools'],
-                subnet['cidr'],
-                subnet['gateway_ip'])
-
-        ipam_driver = driver.Pool.get_instance(subnetpool, context)
-        subnet_factory = ipam_driver.get_subnet_request_factory()
-        subnet_request = subnet_factory.get_request(context, subnet,
-                                                    subnetpool)
-        ipam_subnet = ipam_driver.allocate_subnet(subnet_request)
-        # get updated details with actually allocated subnet
-        subnet_request = ipam_subnet.get_details()
-
-        try:
-            subnet = self._save_subnet(context,
-                                       network,
-                                       self._make_subnet_args(
-                                           subnet_request,
-                                           subnet,
-                                           subnetpool_id),
-                                       subnet['dns_nameservers'],
-                                       subnet['host_routes'],
-                                       subnet_request)
-        except Exception:
-            # Note(pbondar): Third-party ipam servers can't rely
-            # on transaction rollback, so explicit rollback call needed.
-            # IPAM part rolled back in exception handling
-            # and subnet part is rolled back by transaction rollback.
-            with excutils.save_and_reraise_exception():
-                LOG.debug("An exception occurred during subnet creation. "
-                          "Reverting subnet allocation.")
-                self.delete_subnet(context, subnet_request.subnet_id)
-        return subnet, ipam_subnet
diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py
deleted file mode 100644 (file)
index 65a7956..0000000
+++ /dev/null
@@ -1,568 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-import oslo_messaging
-import six
-import sqlalchemy as sa
-from sqlalchemy import func
-from sqlalchemy import or_
-from sqlalchemy import orm
-from sqlalchemy.orm import joinedload
-from sqlalchemy import sql
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.common import constants
-from neutron.common import utils as n_utils
-from neutron import context as n_ctx
-from neutron.db import agents_db
-from neutron.db import agentschedulers_db
-from neutron.db import l3_attrs_db
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.extensions import l3agentscheduler
-from neutron.extensions import portbindings
-from neutron.extensions import router_availability_zone as router_az
-from neutron import manager
-from neutron.plugins.common import constants as service_constants
-
-
-LOG = logging.getLogger(__name__)
-
-L3_AGENTS_SCHEDULER_OPTS = [
-    cfg.StrOpt('router_scheduler_driver',
-               default='neutron.scheduler.l3_agent_scheduler.'
-                       'LeastRoutersScheduler',
-               help=_('Driver to use for scheduling '
-                      'router to a default L3 agent')),
-    cfg.BoolOpt('router_auto_schedule', default=True,
-                help=_('Allow auto scheduling of routers to L3 agent.')),
-    cfg.BoolOpt('allow_automatic_l3agent_failover', default=False,
-                help=_('Automatically reschedule routers from offline L3 '
-                       'agents to online L3 agents.')),
-]
-
-cfg.CONF.register_opts(L3_AGENTS_SCHEDULER_OPTS)
-
-# default messaging timeout is 60 sec, so 2 here is chosen to not block API
-# call for more than 2 minutes
-AGENT_NOTIFY_MAX_ATTEMPTS = 2
-
-
-class RouterL3AgentBinding(model_base.BASEV2):
-    """Represents binding between neutron routers and L3 agents."""
-
-    router_id = sa.Column(sa.String(36),
-                          sa.ForeignKey("routers.id", ondelete='CASCADE'),
-                          primary_key=True)
-    l3_agent = orm.relation(agents_db.Agent)
-    l3_agent_id = sa.Column(sa.String(36),
-                            sa.ForeignKey("agents.id", ondelete='CASCADE'),
-                            primary_key=True)
-
-
-class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
-                              agentschedulers_db.AgentSchedulerDbMixin):
-    """Mixin class to add l3 agent scheduler extension to plugins
-    using the l3 agent for routing.
-    """
-
-    router_scheduler = None
-
-    def start_periodic_l3_agent_status_check(self):
-        if not cfg.CONF.allow_automatic_l3agent_failover:
-            LOG.info(_LI("Skipping period L3 agent status check because "
-                         "automatic router rescheduling is disabled."))
-            return
-
-        self.add_agent_status_check(
-            self.reschedule_routers_from_down_agents)
-
-    def reschedule_routers_from_down_agents(self):
-        """Reschedule routers from down l3 agents if admin state is up."""
-        agent_dead_limit = self.agent_dead_limit_seconds()
-        self.wait_down_agents('L3', agent_dead_limit)
-        cutoff = self.get_cutoff_time(agent_dead_limit)
-
-        context = n_ctx.get_admin_context()
-        down_bindings = (
-            context.session.query(RouterL3AgentBinding).
-            join(agents_db.Agent).
-            filter(agents_db.Agent.heartbeat_timestamp < cutoff,
-                   agents_db.Agent.admin_state_up).
-            outerjoin(l3_attrs_db.RouterExtraAttributes,
-                      l3_attrs_db.RouterExtraAttributes.router_id ==
-                      RouterL3AgentBinding.router_id).
-            filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
-                          l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
-        try:
-            agents_back_online = set()
-            for binding in down_bindings:
-                if binding.l3_agent_id in agents_back_online:
-                    continue
-                else:
-                    agent = self._get_agent(context, binding.l3_agent_id)
-                    if agent.is_active:
-                        agents_back_online.add(binding.l3_agent_id)
-                        continue
-
-                agent_mode = self._get_agent_mode(binding.l3_agent)
-                if agent_mode == constants.L3_AGENT_MODE_DVR:
-                    # rescheduling from l3 dvr agent on compute node doesn't
-                    # make sense. Router will be removed from that agent once
-                    # there are no dvr serviceable ports on that compute node
-                    LOG.warn(_LW('L3 DVR agent on node %(host)s is down. '
-                                 'Not rescheduling from agent in \'dvr\' '
-                                 'mode.'), {'host': binding.l3_agent.host})
-                    continue
-                LOG.warn(_LW(
-                    "Rescheduling router %(router)s from agent %(agent)s "
-                    "because the agent did not report to the server in "
-                    "the last %(dead_time)s seconds."),
-                    {'router': binding.router_id,
-                     'agent': binding.l3_agent_id,
-                     'dead_time': agent_dead_limit})
-                try:
-                    self.reschedule_router(context, binding.router_id)
-                except (l3agentscheduler.RouterReschedulingFailed,
-                        oslo_messaging.RemoteError):
-                    # Catch individual router rescheduling errors here
-                    # so one broken one doesn't stop the iteration.
-                    LOG.exception(_LE("Failed to reschedule router %s"),
-                                  binding.router_id)
-        except Exception:
-            # we want to be thorough and catch whatever is raised
-            # to avoid loop abortion
-            LOG.exception(_LE("Exception encountered during router "
-                              "rescheduling."))
-
-    def _get_agent_mode(self, agent_db):
-        agent_conf = self.get_configuration_dict(agent_db)
-        return agent_conf.get(constants.L3_AGENT_MODE,
-                              constants.L3_AGENT_MODE_LEGACY)
-
-    def validate_agent_router_combination(self, context, agent, router):
-        """Validate if the router can be correctly assigned to the agent.
-
-        :raises: RouterL3AgentMismatch if attempting to assign DVR router
-          to legacy agent.
-        :raises: InvalidL3Agent if attempting to assign router to an
-          unsuitable agent (disabled, type != L3, incompatible configuration)
-        :raises: DVRL3CannotAssignToDvrAgent if attempting to assign a
-          router to an agent in 'dvr' mode.
-        """
-        if agent['agent_type'] != constants.AGENT_TYPE_L3:
-            raise l3agentscheduler.InvalidL3Agent(id=agent['id'])
-
-        agent_mode = self._get_agent_mode(agent)
-
-        if agent_mode == constants.L3_AGENT_MODE_DVR:
-            raise l3agentscheduler.DVRL3CannotAssignToDvrAgent()
-
-        if (agent_mode == constants.L3_AGENT_MODE_LEGACY and
-            router.get('distributed')):
-            raise l3agentscheduler.RouterL3AgentMismatch(
-                router_id=router['id'], agent_id=agent['id'])
-
-        is_suitable_agent = (
-            agentschedulers_db.services_available(agent['admin_state_up']) and
-            (self.get_l3_agent_candidates(context, router,
-                                         [agent],
-                                         ignore_admin_state=True) or
-            self.get_snat_candidates(router, [agent]))
-        )
-        if not is_suitable_agent:
-            raise l3agentscheduler.InvalidL3Agent(id=agent['id'])
-
-    def check_l3_agent_router_binding(self, context, router_id, agent_id):
-        query = context.session.query(RouterL3AgentBinding)
-        bindings = query.filter_by(router_id=router_id,
-                                   l3_agent_id=agent_id).all()
-        return bool(bindings)
-
-    def check_agent_router_scheduling_needed(self, context, agent, router):
-        """Check if the router scheduling is needed.
-
-        :raises: RouterHostedByL3Agent if router is already assigned
-          to a different agent.
-        :returns: True if scheduling is needed, otherwise False
-        """
-        router_id = router['id']
-        agent_id = agent['id']
-        query = context.session.query(RouterL3AgentBinding)
-        bindings = query.filter_by(router_id=router_id).all()
-        if not bindings:
-            return True
-        for binding in bindings:
-            if binding.l3_agent_id == agent_id:
-                # router already bound to the agent we need
-                return False
-        if router.get('ha'):
-            return True
-        # legacy router case: router is already bound to some agent
-        raise l3agentscheduler.RouterHostedByL3Agent(
-            router_id=router_id,
-            agent_id=bindings[0].l3_agent_id)
-
-    def create_router_to_agent_binding(self, context, agent, router):
-        """Create router to agent binding."""
-        router_id = router['id']
-        agent_id = agent['id']
-        if self.router_scheduler:
-            try:
-                if router.get('ha'):
-                    plugin = manager.NeutronManager.get_service_plugins().get(
-                        service_constants.L3_ROUTER_NAT)
-                    self.router_scheduler.create_ha_port_and_bind(
-                        plugin, context, router['id'],
-                        router['tenant_id'], agent)
-                else:
-                    self.router_scheduler.bind_router(
-                        context, router_id, agent)
-            except db_exc.DBError:
-                raise l3agentscheduler.RouterSchedulingFailed(
-                    router_id=router_id, agent_id=agent_id)
-
-    def add_router_to_l3_agent(self, context, agent_id, router_id):
-        """Add a l3 agent to host a router."""
-        with context.session.begin(subtransactions=True):
-            router = self.get_router(context, router_id)
-            agent = self._get_agent(context, agent_id)
-            self.validate_agent_router_combination(context, agent, router)
-            if not self.check_agent_router_scheduling_needed(
-                    context, agent, router):
-                return
-        self.create_router_to_agent_binding(context, agent, router)
-
-        l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
-        if l3_notifier:
-            l3_notifier.router_added_to_agent(
-                context, [router_id], agent.host)
-
-    def remove_router_from_l3_agent(self, context, agent_id, router_id):
-        """Remove the router from l3 agent.
-
-        After removal, the router will be non-hosted until there is update
-        which leads to re-schedule or be added to another agent manually.
-        """
-        agent = self._get_agent(context, agent_id)
-        self._unbind_router(context, router_id, agent_id)
-
-        router = self.get_router(context, router_id)
-        if router.get('ha'):
-            plugin = manager.NeutronManager.get_service_plugins().get(
-                service_constants.L3_ROUTER_NAT)
-            plugin.delete_ha_interfaces_on_host(context, router_id, agent.host)
-
-        l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
-        if l3_notifier:
-            l3_notifier.router_removed_from_agent(
-                context, router_id, agent.host)
-
-    def _unbind_router(self, context, router_id, agent_id):
-        with context.session.begin(subtransactions=True):
-            query = context.session.query(RouterL3AgentBinding)
-            query = query.filter(
-                RouterL3AgentBinding.router_id == router_id,
-                RouterL3AgentBinding.l3_agent_id == agent_id)
-            query.delete()
-
-    def _unschedule_router(self, context, router_id, agents_ids):
-        with context.session.begin(subtransactions=True):
-            for agent_id in agents_ids:
-                self._unbind_router(context, router_id, agent_id)
-
-    def reschedule_router(self, context, router_id, candidates=None):
-        """Reschedule router to (a) new l3 agent(s)
-
-        Remove the router from the agent(s) currently hosting it and
-        schedule it again
-        """
-        cur_agents = self.list_l3_agents_hosting_router(
-            context, router_id)['agents']
-        with context.session.begin(subtransactions=True):
-            cur_agents_ids = [agent['id'] for agent in cur_agents]
-            self._unschedule_router(context, router_id, cur_agents_ids)
-
-            self.schedule_router(context, router_id, candidates=candidates)
-            new_agents = self.list_l3_agents_hosting_router(
-                context, router_id)['agents']
-            if not new_agents:
-                raise l3agentscheduler.RouterReschedulingFailed(
-                    router_id=router_id)
-
-        self._notify_agents_router_rescheduled(context, router_id,
-                                               cur_agents, new_agents)
-
-    def _notify_agents_router_rescheduled(self, context, router_id,
-                                          old_agents, new_agents):
-        l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
-        if not l3_notifier:
-            return
-
-        old_hosts = [agent['host'] for agent in old_agents]
-        new_hosts = [agent['host'] for agent in new_agents]
-        for host in set(old_hosts) - set(new_hosts):
-            l3_notifier.router_removed_from_agent(
-                context, router_id, host)
-
-        for agent in new_agents:
-            # Need to make sure agents are notified or unschedule otherwise
-            for attempt in range(AGENT_NOTIFY_MAX_ATTEMPTS):
-                try:
-                    l3_notifier.router_added_to_agent(
-                        context, [router_id], agent['host'])
-                    break
-                except oslo_messaging.MessagingException:
-                    LOG.warning(_LW('Failed to notify L3 agent on host '
-                                    '%(host)s about added router. Attempt '
-                                    '%(attempt)d out of %(max_attempts)d'),
-                                {'host': agent['host'], 'attempt': attempt + 1,
-                                 'max_attempts': AGENT_NOTIFY_MAX_ATTEMPTS})
-            else:
-                self._unbind_router(context, router_id, agent['id'])
-                raise l3agentscheduler.RouterReschedulingFailed(
-                    router_id=router_id)
-
-    def list_routers_on_l3_agent(self, context, agent_id):
-        query = context.session.query(RouterL3AgentBinding.router_id)
-        query = query.filter(RouterL3AgentBinding.l3_agent_id == agent_id)
-
-        router_ids = [item[0] for item in query]
-        if router_ids:
-            return {'routers':
-                    self.get_routers(context, filters={'id': router_ids})}
-        else:
-            # Exception will be thrown if the requested agent does not exist.
-            self._get_agent(context, agent_id)
-            return {'routers': []}
-
-    def _get_active_l3_agent_routers_sync_data(self, context, host, agent,
-                                               router_ids):
-        if n_utils.is_extension_supported(self,
-                                          constants.L3_HA_MODE_EXT_ALIAS):
-            return self.get_ha_sync_data_for_host(context, host, agent,
-                                                  router_ids=router_ids,
-                                                  active=True)
-
-        return self.get_sync_data(context, router_ids=router_ids, active=True)
-
-    def list_router_ids_on_host(self, context, host, router_ids=None):
-        agent = self._get_agent_by_type_and_host(
-            context, constants.AGENT_TYPE_L3, host)
-        if not agentschedulers_db.services_available(agent.admin_state_up):
-            return []
-        query = context.session.query(RouterL3AgentBinding.router_id)
-        query = query.filter(
-            RouterL3AgentBinding.l3_agent_id == agent.id)
-
-        if router_ids:
-            query = query.filter(
-                RouterL3AgentBinding.router_id.in_(router_ids))
-
-        return [item[0] for item in query]
-
-    def list_active_sync_routers_on_active_l3_agent(
-            self, context, host, router_ids):
-        router_ids = self.list_router_ids_on_host(context, host, router_ids)
-        if router_ids:
-            agent = self._get_agent_by_type_and_host(
-                context, constants.AGENT_TYPE_L3, host)
-            return self._get_active_l3_agent_routers_sync_data(context, host,
-                                                               agent,
-                                                               router_ids)
-        return []
-
-    def get_l3_agents_hosting_routers(self, context, router_ids,
-                                      admin_state_up=None,
-                                      active=None):
-        if not router_ids:
-            return []
-        query = context.session.query(RouterL3AgentBinding)
-        query = query.options(orm.contains_eager(
-                              RouterL3AgentBinding.l3_agent))
-        query = query.join(RouterL3AgentBinding.l3_agent)
-        query = query.filter(RouterL3AgentBinding.router_id.in_(router_ids))
-        if admin_state_up is not None:
-            query = (query.filter(agents_db.Agent.admin_state_up ==
-                                  admin_state_up))
-        l3_agents = [binding.l3_agent for binding in query]
-        if active is not None:
-            l3_agents = [l3_agent for l3_agent in
-                         l3_agents if not
-                         agents_db.AgentDbMixin.is_agent_down(
-                             l3_agent['heartbeat_timestamp'])]
-        return l3_agents
-
-    def _get_l3_bindings_hosting_routers(self, context, router_ids):
-        if not router_ids:
-            return []
-        query = context.session.query(RouterL3AgentBinding)
-        query = query.options(joinedload('l3_agent')).filter(
-            RouterL3AgentBinding.router_id.in_(router_ids))
-        return query.all()
-
-    def list_l3_agents_hosting_router(self, context, router_id):
-        with context.session.begin(subtransactions=True):
-            bindings = self._get_l3_bindings_hosting_routers(
-                context, [router_id])
-
-        return {'agents': [self._make_agent_dict(binding.l3_agent) for
-                           binding in bindings]}
-
-    def get_l3_agents(self, context, active=None, filters=None):
-        query = context.session.query(agents_db.Agent)
-        query = query.filter(
-            agents_db.Agent.agent_type == constants.AGENT_TYPE_L3)
-        if active is not None:
-            query = (query.filter(agents_db.Agent.admin_state_up == active))
-        if filters:
-            for key, value in six.iteritems(filters):
-                column = getattr(agents_db.Agent, key, None)
-                if column:
-                    if not value:
-                        return []
-                    query = query.filter(column.in_(value))
-
-            agent_modes = filters.get('agent_modes', [])
-            if agent_modes:
-                agent_mode_key = '\"agent_mode\": \"'
-                configuration_filter = (
-                    [agents_db.Agent.configurations.contains('%s%s\"' %
-                     (agent_mode_key, agent_mode))
-                     for agent_mode in agent_modes])
-                query = query.filter(or_(*configuration_filter))
-
-        return [l3_agent
-                for l3_agent in query
-                if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent(
-                    active, l3_agent)]
-
-    def check_dvr_serviceable_ports_on_host(
-            self, context, host, subnet_ids, except_port=None):
-        """Check for existence of dvr serviceable ports on host
-
-        :param context: request context
-        :param host: host to look ports on
-        :param subnet_ids: IDs of subnets to look ports on
-        :param except_port: ID of the port to ignore (used when checking if
-        DVR router should be removed from host before actual port remove)
-        :return:
-        """
-        # db query will return ports for all subnets if subnet_ids is empty,
-        # so need to check first
-        if not subnet_ids:
-            return False
-
-        core_plugin = manager.NeutronManager.get_plugin()
-        filters = {'fixed_ips': {'subnet_id': subnet_ids},
-                   portbindings.HOST_ID: [host]}
-        ports_query = core_plugin._get_ports_query(context, filters=filters)
-        owner_filter = or_(
-            models_v2.Port.device_owner.startswith(
-                constants.DEVICE_OWNER_COMPUTE_PREFIX),
-            models_v2.Port.device_owner.in_(
-                n_utils.get_other_dvr_serviced_device_owners()))
-        if except_port:
-            ports_query = ports_query.filter(models_v2.Port.id != except_port)
-        ports_query = ports_query.filter(owner_filter)
-        return ports_query.first() is not None
-
-    def get_l3_agent_candidates(self, context, sync_router, l3_agents,
-                                ignore_admin_state=False):
-        """Get the valid l3 agents for the router from a list of l3_agents."""
-        candidates = []
-        is_router_distributed = sync_router.get('distributed', False)
-        if is_router_distributed:
-            subnet_ids = self.get_subnet_ids_on_router(
-                context, sync_router['id'])
-        for l3_agent in l3_agents:
-            if not ignore_admin_state and not l3_agent.admin_state_up:
-                # ignore_admin_state True comes from manual scheduling
-                # where admin_state_up judgement is already done.
-                continue
-            agent_conf = self.get_configuration_dict(l3_agent)
-            router_id = agent_conf.get('router_id', None)
-            handle_internal_only_routers = agent_conf.get(
-                'handle_internal_only_routers', True)
-            gateway_external_network_id = agent_conf.get(
-                'gateway_external_network_id', None)
-            agent_mode = agent_conf.get(constants.L3_AGENT_MODE,
-                                        constants.L3_AGENT_MODE_LEGACY)
-            if router_id and router_id != sync_router['id']:
-                continue
-            ex_net_id = (sync_router['external_gateway_info'] or {}).get(
-                'network_id')
-            if ((not ex_net_id and not handle_internal_only_routers) or
-                (ex_net_id and gateway_external_network_id and
-                 ex_net_id != gateway_external_network_id)):
-                continue
-            if agent_mode in (
-                constants.L3_AGENT_MODE_LEGACY,
-                constants.L3_AGENT_MODE_DVR_SNAT) and (
-                not is_router_distributed):
-                candidates.append(l3_agent)
-            elif (is_router_distributed and subnet_ids and
-                    agent_mode.startswith(constants.L3_AGENT_MODE_DVR) and (
-                        self.check_dvr_serviceable_ports_on_host(
-                            context, l3_agent['host'], subnet_ids))):
-                candidates.append(l3_agent)
-        return candidates
-
-    def auto_schedule_routers(self, context, host, router_ids):
-        if self.router_scheduler:
-            return self.router_scheduler.auto_schedule_routers(
-                self, context, host, router_ids)
-
-    def schedule_router(self, context, router, candidates=None):
-        if self.router_scheduler:
-            return self.router_scheduler.schedule(
-                self, context, router, candidates=candidates)
-
-    def schedule_routers(self, context, routers):
-        """Schedule the routers to l3 agents."""
-        for router in routers:
-            self.schedule_router(context, router, candidates=None)
-
-    def get_l3_agent_with_min_routers(self, context, agent_ids):
-        """Return l3 agent with the least number of routers."""
-        if not agent_ids:
-            return None
-        query = context.session.query(
-            agents_db.Agent,
-            func.count(
-                RouterL3AgentBinding.router_id
-            ).label('count')).outerjoin(RouterL3AgentBinding).group_by(
-                agents_db.Agent.id,
-                RouterL3AgentBinding.l3_agent_id).order_by('count')
-        res = query.filter(agents_db.Agent.id.in_(agent_ids)).first()
-        return res[0]
-
-    def get_hosts_to_notify(self, context, router_id):
-        """Returns all hosts to send notification about router update"""
-        state = agentschedulers_db.get_admin_state_up_filter()
-        agents = self.get_l3_agents_hosting_routers(
-            context, [router_id], admin_state_up=state, active=True)
-        return [a.host for a in agents]
-
-
-class AZL3AgentSchedulerDbMixin(L3AgentSchedulerDbMixin,
-                                router_az.RouterAvailabilityZonePluginBase):
-    """Mixin class to add availability_zone supported l3 agent scheduler."""
-
-    def get_router_availability_zones(self, router):
-        return list({agent.availability_zone for agent in router.l3_agents})
diff --git a/neutron/db/l3_attrs_db.py b/neutron/db/l3_attrs_db.py
deleted file mode 100644 (file)
index 258f39d..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-
-from neutron.db import db_base_plugin_v2
-from neutron.db import model_base
-from neutron.extensions import l3
-
-
-class RouterExtraAttributes(model_base.BASEV2):
-    """Additional attributes for a Virtual Router."""
-
-    # NOTE(armando-migliaccio): this model can be a good place to
-    # add extension attributes to a Router model. Each case needs
-    # to be individually examined, however 'distributed' and other
-    # simple ones fit the pattern well.
-    __tablename__ = "router_extra_attributes"
-    router_id = sa.Column(sa.String(36),
-                          sa.ForeignKey('routers.id', ondelete="CASCADE"),
-                          primary_key=True)
-    # Whether the router is a legacy (centralized) or a distributed one
-    distributed = sa.Column(sa.Boolean, default=False,
-                            server_default=sa.sql.false(),
-                            nullable=False)
-    # Whether the router is to be considered a 'service' router
-    service_router = sa.Column(sa.Boolean, default=False,
-                               server_default=sa.sql.false(),
-                               nullable=False)
-    ha = sa.Column(sa.Boolean, default=False,
-                   server_default=sa.sql.false(),
-                   nullable=False)
-    ha_vr_id = sa.Column(sa.Integer())
-    # Availability Zone support
-    availability_zone_hints = sa.Column(sa.String(255))
-
-    router = orm.relationship(
-        'Router',
-        backref=orm.backref("extra_attributes", lazy='joined',
-                            uselist=False, cascade='delete'))
-
-
-class ExtraAttributesMixin(object):
-    """Mixin class to enable router's extra attributes."""
-
-    extra_attributes = []
-
-    def _extend_extra_router_dict(self, router_res, router_db):
-        extra_attrs = router_db['extra_attributes'] or {}
-        for attr in self.extra_attributes:
-            name = attr['name']
-            default = attr['default']
-            router_res[name] = (
-                extra_attrs[name] if name in extra_attrs else default)
-
-    def _get_extra_attributes(self, router, extra_attributes):
-        return (dict((attr['name'],
-                      router.get(attr['name'], attr['default']))
-                for attr in extra_attributes))
-
-    def _process_extra_attr_router_create(
-        self, context, router_db, router_req):
-        kwargs = self._get_extra_attributes(router_req, self.extra_attributes)
-        # extra_attributes reference is populated via backref
-        if not router_db['extra_attributes']:
-            attributes_db = RouterExtraAttributes(
-                router_id=router_db['id'], **kwargs)
-            context.session.add(attributes_db)
-            router_db['extra_attributes'] = attributes_db
-        else:
-            # The record will exist if RouterExtraAttributes model's
-            # attributes are added with db migrations over time
-            router_db['extra_attributes'].update(kwargs)
-
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        l3.ROUTERS, ['_extend_extra_router_dict'])
diff --git a/neutron/db/l3_db.py b/neutron/db/l3_db.py
deleted file mode 100644 (file)
index c116da9..0000000
+++ /dev/null
@@ -1,1539 +0,0 @@
-# Copyright 2012 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import itertools
-import netaddr
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy.orm import exc
-
-from oslo_utils import excutils
-import six
-
-from neutron._i18n import _, _LI
-from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
-from neutron.api.v2 import attributes
-from neutron.callbacks import events
-from neutron.callbacks import exceptions
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-from neutron.common import constants as l3_constants
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.common import rpc as n_rpc
-from neutron.common import utils
-from neutron.db import l3_agentschedulers_db as l3_agt
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.extensions import external_net
-from neutron.extensions import l3
-from neutron import manager
-from neutron.plugins.common import constants
-from neutron.plugins.common import utils as p_utils
-
-LOG = logging.getLogger(__name__)
-
-
-DEVICE_OWNER_ROUTER_INTF = l3_constants.DEVICE_OWNER_ROUTER_INTF
-DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW
-DEVICE_OWNER_FLOATINGIP = l3_constants.DEVICE_OWNER_FLOATINGIP
-EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO
-
-# Maps API field to DB column
-# API parameter name and Database column names may differ.
-# Useful to keep the filtering between API and Database.
-API_TO_DB_COLUMN_MAP = {'port_id': 'fixed_port_id'}
-CORE_ROUTER_ATTRS = ('id', 'name', 'tenant_id', 'admin_state_up', 'status')
-
-
-class RouterPort(model_base.BASEV2):
-    router_id = sa.Column(
-        sa.String(36),
-        sa.ForeignKey('routers.id', ondelete="CASCADE"),
-        primary_key=True)
-    port_id = sa.Column(
-        sa.String(36),
-        sa.ForeignKey('ports.id', ondelete="CASCADE"),
-        primary_key=True)
-    # The port_type attribute is redundant as the port table already specifies
-    # it in DEVICE_OWNER.However, this redundancy enables more efficient
-    # queries on router ports, and also prevents potential error-prone
-    # conditions which might originate from users altering the DEVICE_OWNER
-    # property of router ports.
-    port_type = sa.Column(sa.String(attributes.DEVICE_OWNER_MAX_LEN))
-    port = orm.relationship(
-        models_v2.Port,
-        backref=orm.backref('routerport', uselist=False, cascade="all,delete"),
-        lazy='joined')
-
-
-class Router(model_base.HasStandardAttributes, model_base.BASEV2,
-             model_base.HasId, model_base.HasTenant):
-    """Represents a v2 neutron router."""
-
-    name = sa.Column(sa.String(attributes.NAME_MAX_LEN))
-    status = sa.Column(sa.String(16))
-    admin_state_up = sa.Column(sa.Boolean)
-    gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
-    gw_port = orm.relationship(models_v2.Port, lazy='joined')
-    attached_ports = orm.relationship(
-        RouterPort,
-        backref='router',
-        lazy='dynamic')
-    l3_agents = orm.relationship(
-        'Agent', lazy='joined', viewonly=True,
-        secondary=l3_agt.RouterL3AgentBinding.__table__)
-
-
-class FloatingIP(model_base.HasStandardAttributes, model_base.BASEV2,
-                 model_base.HasId, model_base.HasTenant):
-    """Represents a floating IP address.
-
-    This IP address may or may not be allocated to a tenant, and may or
-    may not be associated with an internal port/ip address/router.
-    """
-
-    floating_ip_address = sa.Column(sa.String(64), nullable=False)
-    floating_network_id = sa.Column(sa.String(36), nullable=False)
-    floating_port_id = sa.Column(sa.String(36),
-                                 sa.ForeignKey('ports.id', ondelete="CASCADE"),
-                                 nullable=False)
-    fixed_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
-    fixed_ip_address = sa.Column(sa.String(64))
-    router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'))
-    # Additional attribute for keeping track of the router where the floating
-    # ip was associated in order to be able to ensure consistency even if an
-    # asynchronous backend is unavailable when the floating IP is disassociated
-    last_known_router_id = sa.Column(sa.String(36))
-    status = sa.Column(sa.String(16))
-    router = orm.relationship(Router, backref='floating_ips')
-
-
-class L3_NAT_dbonly_mixin(l3.RouterPluginBase):
-    """Mixin class to add L3/NAT router methods to db_base_plugin_v2."""
-
-    router_device_owners = (
-        DEVICE_OWNER_ROUTER_INTF,
-        DEVICE_OWNER_ROUTER_GW,
-        DEVICE_OWNER_FLOATINGIP
-    )
-
-    @property
-    def _core_plugin(self):
-        return manager.NeutronManager.get_plugin()
-
-    def _get_router(self, context, router_id):
-        try:
-            router = self._get_by_id(context, Router, router_id)
-        except exc.NoResultFound:
-            raise l3.RouterNotFound(router_id=router_id)
-        return router
-
-    def _make_router_dict(self, router, fields=None, process_extensions=True):
-        res = dict((key, router[key]) for key in CORE_ROUTER_ATTRS)
-        if router['gw_port_id']:
-            ext_gw_info = {
-                'network_id': router.gw_port['network_id'],
-                'external_fixed_ips': [{'subnet_id': ip["subnet_id"],
-                                        'ip_address': ip["ip_address"]}
-                                       for ip in router.gw_port['fixed_ips']]}
-        else:
-            ext_gw_info = None
-        res.update({
-            EXTERNAL_GW_INFO: ext_gw_info,
-            'gw_port_id': router['gw_port_id'],
-        })
-        # NOTE(salv-orlando): The following assumes this mixin is used in a
-        # class inheriting from CommonDbMixin, which is true for all existing
-        # plugins.
-        if process_extensions:
-            self._apply_dict_extend_functions(l3.ROUTERS, res, router)
-        return self._fields(res, fields)
-
-    def _create_router_db(self, context, router, tenant_id):
-        """Create the DB object."""
-        with context.session.begin(subtransactions=True):
-            # pre-generate id so it will be available when
-            # configuring external gw port
-            router_db = Router(id=(router.get('id') or
-                                   uuidutils.generate_uuid()),
-                               tenant_id=tenant_id,
-                               name=router['name'],
-                               admin_state_up=router['admin_state_up'],
-                               status="ACTIVE")
-            context.session.add(router_db)
-            return router_db
-
-    def create_router(self, context, router):
-        r = router['router']
-        gw_info = r.pop(EXTERNAL_GW_INFO, None)
-        router_db = self._create_router_db(context, r, r['tenant_id'])
-        try:
-            if gw_info:
-                self._update_router_gw_info(context, router_db['id'],
-                                            gw_info, router=router_db)
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                LOG.debug("Could not update gateway info, deleting router.")
-                self.delete_router(context, router_db.id)
-
-        return self._make_router_dict(router_db)
-
-    def _update_router_db(self, context, router_id, data):
-        """Update the DB object."""
-        with context.session.begin(subtransactions=True):
-            router_db = self._get_router(context, router_id)
-            if data:
-                router_db.update(data)
-            return router_db
-
-    def update_router(self, context, id, router):
-        r = router['router']
-        gw_info = r.pop(EXTERNAL_GW_INFO, attributes.ATTR_NOT_SPECIFIED)
-        # check whether router needs and can be rescheduled to the proper
-        # l3 agent (associated with given external network);
-        # do check before update in DB as an exception will be raised
-        # in case no proper l3 agent found
-        if gw_info != attributes.ATTR_NOT_SPECIFIED:
-            candidates = self._check_router_needs_rescheduling(
-                context, id, gw_info)
-            # Update the gateway outside of the DB update since it involves L2
-            # calls that don't make sense to rollback and may cause deadlocks
-            # in a transaction.
-            self._update_router_gw_info(context, id, gw_info)
-        else:
-            candidates = None
-        router_db = self._update_router_db(context, id, r)
-        if candidates:
-            l3_plugin = manager.NeutronManager.get_service_plugins().get(
-                constants.L3_ROUTER_NAT)
-            l3_plugin.reschedule_router(context, id, candidates)
-        return self._make_router_dict(router_db)
-
-    def _check_router_needs_rescheduling(self, context, router_id, gw_info):
-        """Checks whether router's l3 agent can handle the given network
-
-        When external_network_bridge is set, each L3 agent can be associated
-        with at most one external network. If router's new external gateway
-        is on other network then the router needs to be rescheduled to the
-        proper l3 agent.
-        If external_network_bridge is not set then the agent
-        can support multiple external networks and rescheduling is not needed
-
-        :return: list of candidate agents if rescheduling needed,
-        None otherwise; raises exception if there is no eligible l3 agent
-        associated with target external network
-        """
-        # TODO(obondarev): rethink placement of this func as l3 db manager is
-        # not really a proper place for agent scheduling stuff
-        network_id = gw_info.get('network_id') if gw_info else None
-        if not network_id:
-            return
-
-        nets = self._core_plugin.get_networks(
-            context, {external_net.EXTERNAL: [True]})
-        # nothing to do if there is only one external network
-        if len(nets) <= 1:
-            return
-
-        # first get plugin supporting l3 agent scheduling
-        # (either l3 service plugin or core_plugin)
-        l3_plugin = manager.NeutronManager.get_service_plugins().get(
-            constants.L3_ROUTER_NAT)
-        if (not utils.is_extension_supported(
-                l3_plugin,
-                l3_constants.L3_AGENT_SCHEDULER_EXT_ALIAS) or
-            l3_plugin.router_scheduler is None):
-            # that might mean that we are dealing with non-agent-based
-            # implementation of l3 services
-            return
-
-        cur_agents = l3_plugin.list_l3_agents_hosting_router(
-            context, router_id)['agents']
-        for agent in cur_agents:
-            ext_net_id = agent['configurations'].get(
-                'gateway_external_network_id')
-            ext_bridge = agent['configurations'].get(
-                'external_network_bridge', 'br-ex')
-            if (ext_net_id == network_id or
-                    (not ext_net_id and not ext_bridge)):
-                return
-
-        # otherwise find l3 agent with matching gateway_external_network_id
-        active_agents = l3_plugin.get_l3_agents(context, active=True)
-        router = {
-            'id': router_id,
-            'external_gateway_info': {'network_id': network_id}
-        }
-        candidates = l3_plugin.get_l3_agent_candidates(context,
-                                                       router,
-                                                       active_agents)
-        if not candidates:
-            msg = (_('No eligible l3 agent associated with external network '
-                     '%s found') % network_id)
-            raise n_exc.BadRequest(resource='router', msg=msg)
-
-        return candidates
-
-    def _create_router_gw_port(self, context, router, network_id, ext_ips):
-        # Port has no 'tenant-id', as it is hidden from user
-        port_data = {'tenant_id': '',  # intentionally not set
-                     'network_id': network_id,
-                     'fixed_ips': ext_ips or attributes.ATTR_NOT_SPECIFIED,
-                     'device_id': router['id'],
-                     'device_owner': DEVICE_OWNER_ROUTER_GW,
-                     'admin_state_up': True,
-                     'name': ''}
-        gw_port = p_utils.create_port(self._core_plugin,
-                                      context.elevated(), {'port': port_data})
-
-        if not gw_port['fixed_ips']:
-            LOG.debug('No IPs available for external network %s',
-                      network_id)
-
-        with context.session.begin(subtransactions=True):
-            router.gw_port = self._core_plugin._get_port(context.elevated(),
-                                                         gw_port['id'])
-            router_port = RouterPort(
-                router_id=router.id,
-                port_id=gw_port['id'],
-                port_type=DEVICE_OWNER_ROUTER_GW
-            )
-            context.session.add(router)
-            context.session.add(router_port)
-
-    def _validate_gw_info(self, context, gw_port, info, ext_ips):
-        network_id = info['network_id'] if info else None
-        if network_id:
-            network_db = self._core_plugin._get_network(context, network_id)
-            if not network_db.external:
-                msg = _("Network %s is not an external network") % network_id
-                raise n_exc.BadRequest(resource='router', msg=msg)
-            if ext_ips:
-                subnets = self._core_plugin.get_subnets_by_network(context,
-                                                                   network_id)
-                for s in subnets:
-                    if not s['gateway_ip']:
-                        continue
-                    for ext_ip in ext_ips:
-                        if ext_ip.get('ip_address') == s['gateway_ip']:
-                            msg = _("External IP %s is the same as the "
-                                    "gateway IP") % ext_ip.get('ip_address')
-                            raise n_exc.BadRequest(resource='router', msg=msg)
-        return network_id
-
-    def _delete_current_gw_port(self, context, router_id, router, new_network):
-        """Delete gw port if attached to an old network."""
-        port_requires_deletion = (
-            router.gw_port and router.gw_port['network_id'] != new_network)
-        if not port_requires_deletion:
-            return
-        admin_ctx = context.elevated()
-
-        if self.get_floatingips_count(
-            admin_ctx, {'router_id': [router_id]}):
-            raise l3.RouterExternalGatewayInUseByFloatingIp(
-                router_id=router_id, net_id=router.gw_port['network_id'])
-        with context.session.begin(subtransactions=True):
-            gw_port = router.gw_port
-            router.gw_port = None
-            context.session.add(router)
-            context.session.expire(gw_port)
-            self._check_router_gw_port_in_use(context, router_id)
-        self._core_plugin.delete_port(
-            admin_ctx, gw_port['id'], l3_port_check=False)
-
-    def _check_router_gw_port_in_use(self, context, router_id):
-        try:
-            kwargs = {'context': context, 'router_id': router_id}
-            registry.notify(
-                resources.ROUTER_GATEWAY, events.BEFORE_DELETE, self, **kwargs)
-        except exceptions.CallbackFailure as e:
-            with excutils.save_and_reraise_exception():
-                # NOTE(armax): preserve old check's behavior
-                if len(e.errors) == 1:
-                    raise e.errors[0].error
-                raise l3.RouterInUse(router_id=router_id, reason=e)
-
-    def _create_gw_port(self, context, router_id, router, new_network,
-                        ext_ips):
-        new_valid_gw_port_attachment = (
-            new_network and (not router.gw_port or
-                             router.gw_port['network_id'] != new_network))
-        if new_valid_gw_port_attachment:
-            subnets = self._core_plugin.get_subnets_by_network(context,
-                                                               new_network)
-            for subnet in subnets:
-                self._check_for_dup_router_subnet(context, router,
-                                                  new_network, subnet['id'],
-                                                  subnet['cidr'])
-            self._create_router_gw_port(context, router, new_network, ext_ips)
-
-    def _update_current_gw_port(self, context, router_id, router, ext_ips):
-        self._core_plugin.update_port(context, router.gw_port['id'], {'port':
-                                      {'fixed_ips': ext_ips}})
-        context.session.expire(router.gw_port)
-
-    def _update_router_gw_info(self, context, router_id, info, router=None):
-        # TODO(salvatore-orlando): guarantee atomic behavior also across
-        # operations that span beyond the model classes handled by this
-        # class (e.g.: delete_port)
-        router = router or self._get_router(context, router_id)
-        gw_port = router.gw_port
-        ext_ips = info.get('external_fixed_ips') if info else []
-        ext_ip_change = self._check_for_external_ip_change(
-            context, gw_port, ext_ips)
-        network_id = self._validate_gw_info(context, gw_port, info, ext_ips)
-        if gw_port and ext_ip_change and gw_port['network_id'] == network_id:
-            self._update_current_gw_port(context, router_id, router,
-                                         ext_ips)
-        else:
-            self._delete_current_gw_port(context, router_id, router,
-                                         network_id)
-            self._create_gw_port(context, router_id, router, network_id,
-                                 ext_ips)
-
-    def _check_for_external_ip_change(self, context, gw_port, ext_ips):
-        # determine if new external IPs differ from the existing fixed_ips
-        if not ext_ips:
-            # no external_fixed_ips were included
-            return False
-        if not gw_port:
-            return True
-
-        subnet_ids = set(ip['subnet_id'] for ip in gw_port['fixed_ips'])
-        new_subnet_ids = set(f['subnet_id'] for f in ext_ips
-                             if f.get('subnet_id'))
-        subnet_change = not new_subnet_ids == subnet_ids
-        if subnet_change:
-            return True
-        ip_addresses = set(ip['ip_address'] for ip in gw_port['fixed_ips'])
-        new_ip_addresses = set(f['ip_address'] for f in ext_ips
-                               if f.get('ip_address'))
-        ip_address_change = not ip_addresses == new_ip_addresses
-        return ip_address_change
-
-    def _ensure_router_not_in_use(self, context, router_id):
-        """Ensure that no internal network interface is attached
-        to the router.
-        """
-        router = self._get_router(context, router_id)
-        device_owner = self._get_device_owner(context, router)
-        if any(rp.port_type == device_owner
-               for rp in router.attached_ports.all()):
-            raise l3.RouterInUse(router_id=router_id)
-        return router
-
-    def delete_router(self, context, id):
-
-        #TODO(nati) Refactor here when we have router insertion model
-        router = self._ensure_router_not_in_use(context, id)
-        self._delete_current_gw_port(context, id, router, None)
-
-        router_ports = router.attached_ports.all()
-        for rp in router_ports:
-            self._core_plugin.delete_port(context.elevated(),
-                                          rp.port.id,
-                                          l3_port_check=False)
-        with context.session.begin(subtransactions=True):
-            context.session.delete(router)
-
-    def get_router(self, context, id, fields=None):
-        router = self._get_router(context, id)
-        return self._make_router_dict(router, fields)
-
-    def get_routers(self, context, filters=None, fields=None,
-                    sorts=None, limit=None, marker=None,
-                    page_reverse=False):
-        marker_obj = self._get_marker_obj(context, 'router', limit, marker)
-        return self._get_collection(context, Router,
-                                    self._make_router_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts,
-                                    limit=limit,
-                                    marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-
-    def get_routers_count(self, context, filters=None):
-        return self._get_collection_count(context, Router,
-                                          filters=filters)
-
-    def _check_for_dup_router_subnet(self, context, router,
-                                     network_id, subnet_id, subnet_cidr):
-        try:
-            # It's possible these ports are on the same network, but
-            # different subnets.
-            new_ipnet = netaddr.IPNetwork(subnet_cidr)
-            for p in (rp.port for rp in router.attached_ports):
-                for ip in p['fixed_ips']:
-                    if ip['subnet_id'] == subnet_id:
-                        msg = (_("Router already has a port on subnet %s")
-                               % subnet_id)
-                        raise n_exc.BadRequest(resource='router', msg=msg)
-                    # Ignore temporary Prefix Delegation CIDRs
-                    if subnet_cidr == l3_constants.PROVISIONAL_IPV6_PD_PREFIX:
-                        continue
-                    sub_id = ip['subnet_id']
-                    cidr = self._core_plugin.get_subnet(context.elevated(),
-                                                        sub_id)['cidr']
-                    ipnet = netaddr.IPNetwork(cidr)
-                    match1 = netaddr.all_matching_cidrs(new_ipnet, [cidr])
-                    match2 = netaddr.all_matching_cidrs(ipnet, [subnet_cidr])
-                    if match1 or match2:
-                        data = {'subnet_cidr': subnet_cidr,
-                                'subnet_id': subnet_id,
-                                'cidr': cidr,
-                                'sub_id': sub_id}
-                        msg = (_("Cidr %(subnet_cidr)s of subnet "
-                                 "%(subnet_id)s overlaps with cidr %(cidr)s "
-                                 "of subnet %(sub_id)s") % data)
-                        raise n_exc.BadRequest(resource='router', msg=msg)
-        except exc.NoResultFound:
-            pass
-
-    def _get_device_owner(self, context, router=None):
-        """Get device_owner for the specified router."""
-        # NOTE(armando-migliaccio): in the base case this is invariant
-        return DEVICE_OWNER_ROUTER_INTF
-
-    def _validate_interface_info(self, interface_info, for_removal=False):
-        port_id_specified = interface_info and 'port_id' in interface_info
-        subnet_id_specified = interface_info and 'subnet_id' in interface_info
-        if not (port_id_specified or subnet_id_specified):
-            msg = _("Either subnet_id or port_id must be specified")
-            raise n_exc.BadRequest(resource='router', msg=msg)
-        if not for_removal:
-            if port_id_specified and subnet_id_specified:
-                msg = _("Cannot specify both subnet-id and port-id")
-                raise n_exc.BadRequest(resource='router', msg=msg)
-        return port_id_specified, subnet_id_specified
-
-    def _check_router_port(self, context, port_id, device_id):
-        port = self._core_plugin.get_port(context, port_id)
-        if port['device_id'] != device_id:
-            raise n_exc.PortInUse(net_id=port['network_id'],
-                                  port_id=port['id'],
-                                  device_id=port['device_id'])
-        if not port['fixed_ips']:
-            msg = _('Router port must have at least one fixed IP')
-            raise n_exc.BadRequest(resource='router', msg=msg)
-        return port
-
-    def _add_interface_by_port(self, context, router, port_id, owner):
-        # Update owner before actual process in order to avoid the
-        # case where a port might get attached to a router without the
-        # owner successfully updating due to an unavailable backend.
-        self._check_router_port(context, port_id, '')
-        self._core_plugin.update_port(
-            context, port_id, {'port': {'device_id': router.id,
-                                        'device_owner': owner}})
-
-        with context.session.begin(subtransactions=True):
-            # check again within transaction to mitigate race
-            port = self._check_router_port(context, port_id, router.id)
-
-            # Only allow one router port with IPv6 subnets per network id
-            if self._port_has_ipv6_address(port):
-                for existing_port in (rp.port for rp in router.attached_ports):
-                    if (existing_port['network_id'] == port['network_id'] and
-                            self._port_has_ipv6_address(existing_port)):
-                        msg = _("Cannot have multiple router ports with the "
-                                "same network id if both contain IPv6 "
-                                "subnets. Existing port %(p)s has IPv6 "
-                                "subnet(s) and network id %(nid)s")
-                        raise n_exc.BadRequest(resource='router', msg=msg % {
-                            'p': existing_port['id'],
-                            'nid': existing_port['network_id']})
-
-            fixed_ips = [ip for ip in port['fixed_ips']]
-            subnets = []
-            for fixed_ip in fixed_ips:
-                subnet = self._core_plugin.get_subnet(context,
-                                                      fixed_ip['subnet_id'])
-                subnets.append(subnet)
-                self._check_for_dup_router_subnet(context, router,
-                                                  port['network_id'],
-                                                  subnet['id'],
-                                                  subnet['cidr'])
-
-            # Keep the restriction against multiple IPv4 subnets
-            if len([s for s in subnets if s['ip_version'] == 4]) > 1:
-                msg = _("Cannot have multiple "
-                        "IPv4 subnets on router port")
-                raise n_exc.BadRequest(resource='router', msg=msg)
-            return port, subnets
-
-    def _port_has_ipv6_address(self, port):
-        for fixed_ip in port['fixed_ips']:
-            if netaddr.IPNetwork(fixed_ip['ip_address']).version == 6:
-                return True
-
-    def _find_ipv6_router_port_by_network(self, router, net_id):
-        for port in router.attached_ports:
-            p = port['port']
-            if p['network_id'] == net_id and self._port_has_ipv6_address(p):
-                return port
-
-    def _add_interface_by_subnet(self, context, router, subnet_id, owner):
-        subnet = self._core_plugin.get_subnet(context, subnet_id)
-        if not subnet['gateway_ip']:
-            msg = _('Subnet for router interface must have a gateway IP')
-            raise n_exc.BadRequest(resource='router', msg=msg)
-        if (subnet['ip_version'] == 6 and subnet['ipv6_ra_mode'] is None
-                and subnet['ipv6_address_mode'] is not None):
-            msg = (_('IPv6 subnet %s configured to receive RAs from an '
-                   'external router cannot be added to Neutron Router.') %
-                   subnet['id'])
-            raise n_exc.BadRequest(resource='router', msg=msg)
-        self._check_for_dup_router_subnet(context, router,
-                                          subnet['network_id'],
-                                          subnet_id,
-                                          subnet['cidr'])
-        fixed_ip = {'ip_address': subnet['gateway_ip'],
-                    'subnet_id': subnet['id']}
-
-        if (subnet['ip_version'] == 6 and not
-            ipv6_utils.is_ipv6_pd_enabled(subnet)):
-            # Add new prefix to an existing ipv6 port with the same network id
-            # if one exists
-            port = self._find_ipv6_router_port_by_network(router,
-                                                          subnet['network_id'])
-            if port:
-                fixed_ips = list(port['port']['fixed_ips'])
-                fixed_ips.append(fixed_ip)
-                return self._core_plugin.update_port(context,
-                        port['port_id'], {'port':
-                            {'fixed_ips': fixed_ips}}), [subnet], False
-
-        port_data = {'tenant_id': subnet['tenant_id'],
-                     'network_id': subnet['network_id'],
-                     'fixed_ips': [fixed_ip],
-                     'admin_state_up': True,
-                     'device_id': router.id,
-                     'device_owner': owner,
-                     'name': ''}
-        return p_utils.create_port(self._core_plugin, context,
-                                   {'port': port_data}), [subnet], True
-
-    @staticmethod
-    def _make_router_interface_info(
-            router_id, tenant_id, port_id, subnet_id, subnet_ids):
-        return {
-            'id': router_id,
-            'tenant_id': tenant_id,
-            'port_id': port_id,
-            'subnet_id': subnet_id,  # deprecated by IPv6 multi-prefix
-            'subnet_ids': subnet_ids
-        }
-
-    def add_router_interface(self, context, router_id, interface_info):
-        router = self._get_router(context, router_id)
-        add_by_port, add_by_sub = self._validate_interface_info(interface_info)
-        device_owner = self._get_device_owner(context, router_id)
-
-        # This should be True unless adding an IPv6 prefix to an existing port
-        new_port = True
-
-        if add_by_port:
-            port, subnets = self._add_interface_by_port(
-                    context, router, interface_info['port_id'], device_owner)
-        # add_by_subnet is not used here, because the validation logic of
-        # _validate_interface_info ensures that either of add_by_* is True.
-        else:
-            port, subnets, new_port = self._add_interface_by_subnet(
-                    context, router, interface_info['subnet_id'], device_owner)
-
-        if new_port:
-            with context.session.begin(subtransactions=True):
-                router_port = RouterPort(
-                    port_id=port['id'],
-                    router_id=router.id,
-                    port_type=device_owner
-                )
-                context.session.add(router_port)
-
-        return self._make_router_interface_info(
-            router.id, port['tenant_id'], port['id'], subnets[-1]['id'],
-            [subnet['id'] for subnet in subnets])
-
-    def _confirm_router_interface_not_in_use(self, context, router_id,
-                                             subnet_id):
-        subnet = self._core_plugin.get_subnet(context, subnet_id)
-        subnet_cidr = netaddr.IPNetwork(subnet['cidr'])
-        fip_qry = context.session.query(FloatingIP)
-        try:
-            kwargs = {'context': context, 'subnet_id': subnet_id}
-            registry.notify(
-                resources.ROUTER_INTERFACE,
-                events.BEFORE_DELETE, self, **kwargs)
-        except exceptions.CallbackFailure as e:
-            with excutils.save_and_reraise_exception():
-                # NOTE(armax): preserve old check's behavior
-                if len(e.errors) == 1:
-                    raise e.errors[0].error
-                raise l3.RouterInUse(router_id=router_id, reason=e)
-        for fip_db in fip_qry.filter_by(router_id=router_id):
-            if netaddr.IPAddress(fip_db['fixed_ip_address']) in subnet_cidr:
-                raise l3.RouterInterfaceInUseByFloatingIP(
-                    router_id=router_id, subnet_id=subnet_id)
-
-    def _remove_interface_by_port(self, context, router_id,
-                                  port_id, subnet_id, owner):
-        qry = context.session.query(RouterPort)
-        qry = qry.filter_by(
-            port_id=port_id,
-            router_id=router_id,
-            port_type=owner
-        )
-        try:
-            port_db = qry.one().port
-        except exc.NoResultFound:
-            raise l3.RouterInterfaceNotFound(router_id=router_id,
-                                             port_id=port_id)
-        port_subnet_ids = [fixed_ip['subnet_id']
-                           for fixed_ip in port_db['fixed_ips']]
-        if subnet_id and subnet_id not in port_subnet_ids:
-            raise n_exc.SubnetMismatchForPort(
-                port_id=port_id, subnet_id=subnet_id)
-        subnets = [self._core_plugin.get_subnet(context, port_subnet_id)
-                   for port_subnet_id in port_subnet_ids]
-        for port_subnet_id in port_subnet_ids:
-            self._confirm_router_interface_not_in_use(
-                    context, router_id, port_subnet_id)
-        self._core_plugin.delete_port(context, port_db['id'],
-                                      l3_port_check=False)
-        return (port_db, subnets)
-
-    def _remove_interface_by_subnet(self, context,
-                                    router_id, subnet_id, owner):
-        self._confirm_router_interface_not_in_use(
-            context, router_id, subnet_id)
-        subnet = self._core_plugin.get_subnet(context, subnet_id)
-
-        try:
-            rport_qry = context.session.query(models_v2.Port).join(RouterPort)
-            ports = rport_qry.filter(
-                RouterPort.router_id == router_id,
-                RouterPort.port_type == owner,
-                models_v2.Port.network_id == subnet['network_id']
-            )
-
-            for p in ports:
-                port_subnets = [fip['subnet_id'] for fip in p['fixed_ips']]
-                if subnet_id in port_subnets and len(port_subnets) > 1:
-                    # multiple prefix port - delete prefix from port
-                    fixed_ips = [fip for fip in p['fixed_ips'] if
-                            fip['subnet_id'] != subnet_id]
-                    self._core_plugin.update_port(context, p['id'],
-                            {'port':
-                                {'fixed_ips': fixed_ips}})
-                    return (p, [subnet])
-                elif subnet_id in port_subnets:
-                    # only one subnet on port - delete the port
-                    self._core_plugin.delete_port(context, p['id'],
-                                                  l3_port_check=False)
-                    return (p, [subnet])
-        except exc.NoResultFound:
-            pass
-        raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
-                                                  subnet_id=subnet_id)
-
-    def remove_router_interface(self, context, router_id, interface_info):
-        remove_by_port, remove_by_subnet = (
-            self._validate_interface_info(interface_info, for_removal=True)
-        )
-        port_id = interface_info.get('port_id')
-        subnet_id = interface_info.get('subnet_id')
-        device_owner = self._get_device_owner(context, router_id)
-        if remove_by_port:
-            port, subnets = self._remove_interface_by_port(context, router_id,
-                                                           port_id, subnet_id,
-                                                           device_owner)
-        # remove_by_subnet is not used here, because the validation logic of
-        # _validate_interface_info ensures that at least one of remote_by_*
-        # is True.
-        else:
-            port, subnets = self._remove_interface_by_subnet(
-                    context, router_id, subnet_id, device_owner)
-
-        return self._make_router_interface_info(router_id, port['tenant_id'],
-                                                port['id'], subnets[0]['id'],
-                                                [subnet['id'] for subnet in
-                                                    subnets])
-
-    def _get_floatingip(self, context, id):
-        try:
-            floatingip = self._get_by_id(context, FloatingIP, id)
-        except exc.NoResultFound:
-            raise l3.FloatingIPNotFound(floatingip_id=id)
-        return floatingip
-
-    def _make_floatingip_dict(self, floatingip, fields=None):
-        res = {'id': floatingip['id'],
-               'tenant_id': floatingip['tenant_id'],
-               'floating_ip_address': floatingip['floating_ip_address'],
-               'floating_network_id': floatingip['floating_network_id'],
-               'router_id': floatingip['router_id'],
-               'port_id': floatingip['fixed_port_id'],
-               'fixed_ip_address': floatingip['fixed_ip_address'],
-               'status': floatingip['status']}
-        return self._fields(res, fields)
-
-    def _get_router_for_floatingip(self, context, internal_port,
-                                   internal_subnet_id,
-                                   external_network_id):
-        subnet = self._core_plugin.get_subnet(context, internal_subnet_id)
-        if not subnet['gateway_ip']:
-            msg = (_('Cannot add floating IP to port on subnet %s '
-                     'which has no gateway_ip') % internal_subnet_id)
-            raise n_exc.BadRequest(resource='floatingip', msg=msg)
-
-        # Find routers(with router_id and interface address) that
-        # connect given internal subnet and the external network.
-        # Among them, if the router's interface address matches
-        # with subnet's gateway-ip, return that router.
-        # Otherwise return the first router.
-        gw_port = orm.aliased(models_v2.Port, name="gw_port")
-        routerport_qry = context.session.query(
-            RouterPort.router_id, models_v2.IPAllocation.ip_address).join(
-            models_v2.Port, models_v2.IPAllocation).filter(
-            models_v2.Port.network_id == internal_port['network_id'],
-            RouterPort.port_type.in_(l3_constants.ROUTER_INTERFACE_OWNERS),
-            models_v2.IPAllocation.subnet_id == internal_subnet_id
-        ).join(gw_port, gw_port.device_id == RouterPort.router_id).filter(
-            gw_port.network_id == external_network_id).distinct()
-
-        first_router_id = None
-        for router_id, interface_ip in routerport_qry:
-            if interface_ip == subnet['gateway_ip']:
-                return router_id
-            if not first_router_id:
-                first_router_id = router_id
-        if first_router_id:
-            return first_router_id
-
-        raise l3.ExternalGatewayForFloatingIPNotFound(
-            subnet_id=internal_subnet_id,
-            external_network_id=external_network_id,
-            port_id=internal_port['id'])
-
-    def _port_ipv4_fixed_ips(self, port):
-        return [ip for ip in port['fixed_ips']
-                if netaddr.IPAddress(ip['ip_address']).version == 4]
-
-    def _internal_fip_assoc_data(self, context, fip):
-        """Retrieve internal port data for floating IP.
-
-        Retrieve information concerning the internal port where
-        the floating IP should be associated to.
-        """
-        internal_port = self._core_plugin.get_port(context, fip['port_id'])
-        if not internal_port['tenant_id'] == fip['tenant_id']:
-            port_id = fip['port_id']
-            if 'id' in fip:
-                floatingip_id = fip['id']
-                data = {'port_id': port_id,
-                        'floatingip_id': floatingip_id}
-                msg = (_('Port %(port_id)s is associated with a different '
-                         'tenant than Floating IP %(floatingip_id)s and '
-                         'therefore cannot be bound.') % data)
-            else:
-                msg = (_('Cannot create floating IP and bind it to '
-                         'Port %s, since that port is owned by a '
-                         'different tenant.') % port_id)
-            raise n_exc.BadRequest(resource='floatingip', msg=msg)
-
-        internal_subnet_id = None
-        if fip.get('fixed_ip_address'):
-            internal_ip_address = fip['fixed_ip_address']
-            if netaddr.IPAddress(internal_ip_address).version != 4:
-                if 'id' in fip:
-                    data = {'floatingip_id': fip['id'],
-                            'internal_ip': internal_ip_address}
-                    msg = (_('Floating IP %(floatingip_id)s is associated '
-                             'with non-IPv4 address %s(internal_ip)s and '
-                             'therefore cannot be bound.') % data)
-                else:
-                    msg = (_('Cannot create floating IP and bind it to %s, '
-                             'since that is not an IPv4 address.') %
-                           internal_ip_address)
-                raise n_exc.BadRequest(resource='floatingip', msg=msg)
-            for ip in internal_port['fixed_ips']:
-                if ip['ip_address'] == internal_ip_address:
-                    internal_subnet_id = ip['subnet_id']
-            if not internal_subnet_id:
-                msg = (_('Port %(id)s does not have fixed ip %(address)s') %
-                       {'id': internal_port['id'],
-                        'address': internal_ip_address})
-                raise n_exc.BadRequest(resource='floatingip', msg=msg)
-        else:
-            ipv4_fixed_ips = self._port_ipv4_fixed_ips(internal_port)
-            if not ipv4_fixed_ips:
-                msg = (_('Cannot add floating IP to port %s that has '
-                         'no fixed IPv4 addresses') % internal_port['id'])
-                raise n_exc.BadRequest(resource='floatingip', msg=msg)
-            if len(ipv4_fixed_ips) > 1:
-                msg = (_('Port %s has multiple fixed IPv4 addresses.  Must '
-                         'provide a specific IPv4 address when assigning a '
-                         'floating IP') % internal_port['id'])
-                raise n_exc.BadRequest(resource='floatingip', msg=msg)
-            internal_ip_address = ipv4_fixed_ips[0]['ip_address']
-            internal_subnet_id = ipv4_fixed_ips[0]['subnet_id']
-        return internal_port, internal_subnet_id, internal_ip_address
-
-    def _get_assoc_data(self, context, fip, floating_network_id):
-        """Determine/extract data associated with the internal port.
-
-        When a floating IP is associated with an internal port,
-        we need to extract/determine some data associated with the
-        internal port, including the internal_ip_address, and router_id.
-        The confirmation of the internal port whether owned by the tenant who
-        owns the floating IP will be confirmed by _get_router_for_floatingip.
-        """
-        (internal_port, internal_subnet_id,
-         internal_ip_address) = self._internal_fip_assoc_data(context, fip)
-        router_id = self._get_router_for_floatingip(context,
-                                                    internal_port,
-                                                    internal_subnet_id,
-                                                    floating_network_id)
-
-        return (fip['port_id'], internal_ip_address, router_id)
-
-    def _check_and_get_fip_assoc(self, context, fip, floatingip_db):
-        port_id = internal_ip_address = router_id = None
-        if fip.get('fixed_ip_address') and not fip.get('port_id'):
-            msg = _("fixed_ip_address cannot be specified without a port_id")
-            raise n_exc.BadRequest(resource='floatingip', msg=msg)
-        if fip.get('port_id'):
-            port_id, internal_ip_address, router_id = self._get_assoc_data(
-                context,
-                fip,
-                floatingip_db['floating_network_id'])
-            fip_qry = context.session.query(FloatingIP)
-            try:
-                fip_qry.filter_by(
-                    fixed_port_id=fip['port_id'],
-                    floating_network_id=floatingip_db['floating_network_id'],
-                    fixed_ip_address=internal_ip_address).one()
-                raise l3.FloatingIPPortAlreadyAssociated(
-                    port_id=fip['port_id'],
-                    fip_id=floatingip_db['id'],
-                    floating_ip_address=floatingip_db['floating_ip_address'],
-                    fixed_ip=internal_ip_address,
-                    net_id=floatingip_db['floating_network_id'])
-            except exc.NoResultFound:
-                pass
-        return port_id, internal_ip_address, router_id
-
-    def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
-        previous_router_id = floatingip_db.router_id
-        port_id, internal_ip_address, router_id = (
-            self._check_and_get_fip_assoc(context, fip, floatingip_db))
-        floatingip_db.update({'fixed_ip_address': internal_ip_address,
-                              'fixed_port_id': port_id,
-                              'router_id': router_id,
-                              'last_known_router_id': previous_router_id})
-
-    def _is_ipv4_network(self, context, net_id):
-        net = self._core_plugin._get_network(context, net_id)
-        return any(s.ip_version == 4 for s in net.subnets)
-
-    def _create_floatingip(self, context, floatingip,
-            initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
-        fip = floatingip['floatingip']
-        fip_id = uuidutils.generate_uuid()
-
-        f_net_id = fip['floating_network_id']
-        if not self._core_plugin._network_is_external(context, f_net_id):
-            msg = _("Network %s is not a valid external network") % f_net_id
-            raise n_exc.BadRequest(resource='floatingip', msg=msg)
-
-        if not self._is_ipv4_network(context, f_net_id):
-            msg = _("Network %s does not contain any IPv4 subnet") % f_net_id
-            raise n_exc.BadRequest(resource='floatingip', msg=msg)
-
-        with context.session.begin(subtransactions=True):
-            # This external port is never exposed to the tenant.
-            # it is used purely for internal system and admin use when
-            # managing floating IPs.
-
-            port = {'tenant_id': '',  # tenant intentionally not set
-                    'network_id': f_net_id,
-                    'admin_state_up': True,
-                    'device_id': fip_id,
-                    'device_owner': DEVICE_OWNER_FLOATINGIP,
-                    'status': l3_constants.PORT_STATUS_NOTAPPLICABLE,
-                    'name': ''}
-            if fip.get('floating_ip_address'):
-                port['fixed_ips'] = [
-                    {'ip_address': fip['floating_ip_address']}]
-
-            if fip.get('subnet_id'):
-                port['fixed_ips'] = [
-                    {'subnet_id': fip['subnet_id']}]
-
-            # 'status' in port dict could not be updated by default, use
-            # check_allow_post to stop the verification of system
-            external_port = p_utils.create_port(self._core_plugin,
-                                                context.elevated(),
-                                                {'port': port},
-                                                check_allow_post=False)
-            # Ensure IPv4 addresses are allocated on external port
-            external_ipv4_ips = self._port_ipv4_fixed_ips(external_port)
-            if not external_ipv4_ips:
-                raise n_exc.ExternalIpAddressExhausted(net_id=f_net_id)
-
-            floating_fixed_ip = external_ipv4_ips[0]
-            floating_ip_address = floating_fixed_ip['ip_address']
-            floatingip_db = FloatingIP(
-                id=fip_id,
-                tenant_id=fip['tenant_id'],
-                status=initial_status,
-                floating_network_id=fip['floating_network_id'],
-                floating_ip_address=floating_ip_address,
-                floating_port_id=external_port['id'])
-            # Update association with internal port
-            # and define external IP address
-            self._update_fip_assoc(context, fip,
-                                   floatingip_db, external_port)
-            context.session.add(floatingip_db)
-
-        return self._make_floatingip_dict(floatingip_db)
-
-    def create_floatingip(self, context, floatingip,
-            initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
-        return self._create_floatingip(context, floatingip, initial_status)
-
-    def _update_floatingip(self, context, id, floatingip):
-        fip = floatingip['floatingip']
-        with context.session.begin(subtransactions=True):
-            floatingip_db = self._get_floatingip(context, id)
-            old_floatingip = self._make_floatingip_dict(floatingip_db)
-            fip['tenant_id'] = floatingip_db['tenant_id']
-            fip['id'] = id
-            fip_port_id = floatingip_db['floating_port_id']
-            self._update_fip_assoc(context, fip, floatingip_db,
-                                   self._core_plugin.get_port(
-                                       context.elevated(), fip_port_id))
-        return old_floatingip, self._make_floatingip_dict(floatingip_db)
-
-    def _floatingips_to_router_ids(self, floatingips):
-        return list(set([floatingip['router_id']
-                         for floatingip in floatingips
-                         if floatingip['router_id']]))
-
-    def update_floatingip(self, context, id, floatingip):
-        _old_floatingip, floatingip = self._update_floatingip(
-            context, id, floatingip)
-        return floatingip
-
-    def update_floatingip_status(self, context, floatingip_id, status):
-        """Update operational status for floating IP in neutron DB."""
-        fip_query = self._model_query(context, FloatingIP).filter(
-            FloatingIP.id == floatingip_id)
-        fip_query.update({'status': status}, synchronize_session=False)
-
-    def _delete_floatingip(self, context, id):
-        floatingip = self._get_floatingip(context, id)
-        # Foreign key cascade will take care of the removal of the
-        # floating IP record once the port is deleted. We can't start
-        # a transaction first to remove it ourselves because the delete_port
-        # method will yield in its post-commit activities.
-        self._core_plugin.delete_port(context.elevated(),
-                                      floatingip['floating_port_id'],
-                                      l3_port_check=False)
-        return self._make_floatingip_dict(floatingip)
-
-    def delete_floatingip(self, context, id):
-        self._delete_floatingip(context, id)
-
-    def get_floatingip(self, context, id, fields=None):
-        floatingip = self._get_floatingip(context, id)
-        return self._make_floatingip_dict(floatingip, fields)
-
-    def get_floatingips(self, context, filters=None, fields=None,
-                        sorts=None, limit=None, marker=None,
-                        page_reverse=False):
-        marker_obj = self._get_marker_obj(context, 'floatingip', limit,
-                                          marker)
-        if filters is not None:
-            for key, val in six.iteritems(API_TO_DB_COLUMN_MAP):
-                if key in filters:
-                    filters[val] = filters.pop(key)
-
-        return self._get_collection(context, FloatingIP,
-                                    self._make_floatingip_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts,
-                                    limit=limit,
-                                    marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-
-    def delete_disassociated_floatingips(self, context, network_id):
-        query = self._model_query(context, FloatingIP)
-        query = query.filter_by(floating_network_id=network_id,
-                                fixed_port_id=None,
-                                router_id=None)
-        for fip in query:
-            self.delete_floatingip(context, fip.id)
-
-    def get_floatingips_count(self, context, filters=None):
-        return self._get_collection_count(context, FloatingIP,
-                                          filters=filters)
-
-    def prevent_l3_port_deletion(self, context, port_id):
-        """Checks to make sure a port is allowed to be deleted.
-
-        Raises an exception if this is not the case.  This should be called by
-        any plugin when the API requests the deletion of a port, since some
-        ports for L3 are not intended to be deleted directly via a DELETE
-        to /ports, but rather via other API calls that perform the proper
-        deletion checks.
-        """
-        try:
-            port = self._core_plugin.get_port(context, port_id)
-        except n_exc.PortNotFound:
-            # non-existent ports don't need to be protected from deletion
-            return
-        if port['device_owner'] in self.router_device_owners:
-            # Raise port in use only if the port has IP addresses
-            # Otherwise it's a stale port that can be removed
-            fixed_ips = port['fixed_ips']
-            if fixed_ips:
-                reason = _('has device owner %s') % port['device_owner']
-                raise n_exc.ServicePortInUse(port_id=port['id'],
-                                             reason=reason)
-            else:
-                LOG.debug("Port %(port_id)s has owner %(port_owner)s, but "
-                          "no IP address, so it can be deleted",
-                          {'port_id': port['id'],
-                           'port_owner': port['device_owner']})
-
-    def disassociate_floatingips(self, context, port_id):
-        """Disassociate all floating IPs linked to specific port.
-
-        @param port_id: ID of the port to disassociate floating IPs.
-        @param do_notify: whether we should notify routers right away.
-        @return: set of router-ids that require notification updates
-                 if do_notify is False, otherwise None.
-        """
-        router_ids = set()
-
-        with context.session.begin(subtransactions=True):
-            fip_qry = context.session.query(FloatingIP)
-            floating_ips = fip_qry.filter_by(fixed_port_id=port_id)
-            for floating_ip in floating_ips:
-                router_ids.add(floating_ip['router_id'])
-                floating_ip.update({'fixed_port_id': None,
-                                    'fixed_ip_address': None,
-                                    'router_id': None})
-        return router_ids
-
-    def _build_routers_list(self, context, routers, gw_ports):
-        """Subclasses can override this to add extra gateway info"""
-        return routers
-
-    def _make_router_dict_with_gw_port(self, router, fields):
-        result = self._make_router_dict(router, fields)
-        if router.get('gw_port'):
-            result['gw_port'] = self._core_plugin._make_port_dict(
-                router['gw_port'], None)
-        return result
-
-    def _get_sync_routers(self, context, router_ids=None, active=None):
-        """Query routers and their gw ports for l3 agent.
-
-        Query routers with the router_ids. The gateway ports, if any,
-        will be queried too.
-        l3 agent has an option to deal with only one router id. In addition,
-        when we need to notify the agent the data about only one router
-        (when modification of router, its interfaces, gw_port and floatingips),
-        we will have router_ids.
-        @param router_ids: the list of router ids which we want to query.
-                           if it is None, all of routers will be queried.
-        @return: a list of dicted routers with dicted gw_port populated if any
-        """
-        filters = {'id': router_ids} if router_ids else {}
-        if active is not None:
-            filters['admin_state_up'] = [active]
-        router_dicts = self._get_collection(
-            context, Router, self._make_router_dict_with_gw_port,
-            filters=filters)
-        if not router_dicts:
-            return []
-        gw_ports = dict((r['gw_port']['id'], r['gw_port'])
-                        for r in router_dicts
-                        if r.get('gw_port'))
-        return self._build_routers_list(context, router_dicts, gw_ports)
-
-    @staticmethod
-    def _unique_floatingip_iterator(query):
-        """Iterates over only one row per floating ip.  Ignores others."""
-        # Group rows by fip id.  They must be sorted by same.
-        q = query.order_by(FloatingIP.id)
-        keyfunc = lambda row: row[0]['id']
-        group_iterator = itertools.groupby(q, keyfunc)
-
-        # Just hit the first row of each group
-        for key, value in group_iterator:
-            yield six.next(value)
-
-    def _make_floatingip_dict_with_scope(self, floatingip_db, scope_id):
-        d = self._make_floatingip_dict(floatingip_db)
-        d['fixed_ip_address_scope'] = scope_id
-        return d
-
-    def _get_sync_floating_ips(self, context, router_ids):
-        """Query floating_ips that relate to list of router_ids with scope.
-
-        This is different than the regular get_floatingips in that it finds the
-        address scope of the fixed IP.  The router needs to know this to
-        distinguish it from other scopes.
-
-        There are a few redirections to go through to discover the address
-        scope from the floating ip.
-        """
-        if not router_ids:
-            return []
-
-        query = context.session.query(FloatingIP,
-                                      models_v2.SubnetPool.address_scope_id)
-        query = query.join(models_v2.Port,
-            FloatingIP.fixed_port_id == models_v2.Port.id)
-        # Outer join of Subnet can cause each ip to have more than one row.
-        query = query.outerjoin(models_v2.Subnet,
-            models_v2.Subnet.network_id == models_v2.Port.network_id)
-        query = query.filter(models_v2.Subnet.ip_version == 4)
-        query = query.outerjoin(models_v2.SubnetPool,
-            models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id)
-
-        # Filter out on router_ids
-        query = query.filter(FloatingIP.router_id.in_(router_ids))
-
-        return [self._make_floatingip_dict_with_scope(*row)
-                for row in self._unique_floatingip_iterator(query)]
-
-    def _get_sync_interfaces(self, context, router_ids, device_owners=None):
-        """Query router interfaces that relate to list of router_ids."""
-        device_owners = device_owners or [DEVICE_OWNER_ROUTER_INTF]
-        if not router_ids:
-            return []
-        qry = context.session.query(RouterPort)
-        qry = qry.filter(
-            RouterPort.router_id.in_(router_ids),
-            RouterPort.port_type.in_(device_owners)
-        )
-
-        interfaces = [self._core_plugin._make_port_dict(rp.port, None)
-                      for rp in qry]
-        return interfaces
-
-    @staticmethod
-    def _each_port_having_fixed_ips(ports):
-        for port in ports or []:
-            fixed_ips = port.get('fixed_ips', [])
-            if not fixed_ips:
-                # Skip ports without IPs, which can occur if a subnet
-                # attached to a router is deleted
-                LOG.info(_LI("Skipping port %s as no IP is configure on "
-                             "it"),
-                         port['id'])
-                continue
-            yield port
-
-    def _get_subnets_by_network_list(self, context, network_ids):
-        if not network_ids:
-            return {}
-
-        query = context.session.query(models_v2.Subnet,
-                                      models_v2.SubnetPool.address_scope_id)
-        query = query.outerjoin(
-            models_v2.SubnetPool,
-            models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id)
-        query = query.filter(models_v2.Subnet.network_id.in_(network_ids))
-
-        fields = ['id', 'cidr', 'gateway_ip', 'dns_nameservers',
-                  'network_id', 'ipv6_ra_mode', 'subnetpool_id']
-
-        def make_subnet_dict_with_scope(row):
-            subnet_db, address_scope_id = row
-            subnet = self._core_plugin._make_subnet_dict(
-                subnet_db, fields, context=context)
-            subnet['address_scope_id'] = address_scope_id
-            return subnet
-
-        subnets_by_network = dict((id, []) for id in network_ids)
-        for subnet in (make_subnet_dict_with_scope(row) for row in query):
-            subnets_by_network[subnet['network_id']].append(subnet)
-        return subnets_by_network
-
-    def _populate_subnets_for_ports(self, context, ports):
-        """Populate ports with subnets.
-
-        These ports already have fixed_ips populated.
-        """
-        network_ids = [p['network_id']
-                       for p in self._each_port_having_fixed_ips(ports)]
-
-        subnets_by_network = self._get_subnets_by_network_list(
-            context, network_ids)
-
-        for port in self._each_port_having_fixed_ips(ports):
-
-            port['subnets'] = []
-            port['extra_subnets'] = []
-            port['address_scopes'] = {l3_constants.IP_VERSION_4: None,
-                                      l3_constants.IP_VERSION_6: None}
-
-            scopes = {}
-            for subnet in subnets_by_network[port['network_id']]:
-                scope = subnet['address_scope_id']
-                cidr = netaddr.IPNetwork(subnet['cidr'])
-                scopes[cidr.version] = scope
-
-                # If this subnet is used by the port (has a matching entry
-                # in the port's fixed_ips), then add this subnet to the
-                # port's subnets list, and populate the fixed_ips entry
-                # entry with the subnet's prefix length.
-                subnet_info = {'id': subnet['id'],
-                               'cidr': subnet['cidr'],
-                               'gateway_ip': subnet['gateway_ip'],
-                               'dns_nameservers': subnet['dns_nameservers'],
-                               'ipv6_ra_mode': subnet['ipv6_ra_mode'],
-                               'subnetpool_id': subnet['subnetpool_id']}
-                for fixed_ip in port['fixed_ips']:
-                    if fixed_ip['subnet_id'] == subnet['id']:
-                        port['subnets'].append(subnet_info)
-                        prefixlen = cidr.prefixlen
-                        fixed_ip['prefixlen'] = prefixlen
-                        break
-                else:
-                    # This subnet is not used by the port.
-                    port['extra_subnets'].append(subnet_info)
-
-            port['address_scopes'].update(scopes)
-
-    def _process_floating_ips(self, context, routers_dict, floating_ips):
-        for floating_ip in floating_ips:
-            router = routers_dict.get(floating_ip['router_id'])
-            if router:
-                router_floatingips = router.get(l3_constants.FLOATINGIP_KEY,
-                                                [])
-                router_floatingips.append(floating_ip)
-                router[l3_constants.FLOATINGIP_KEY] = router_floatingips
-
-    def _process_interfaces(self, routers_dict, interfaces):
-        for interface in interfaces:
-            router = routers_dict.get(interface['device_id'])
-            if router:
-                router_interfaces = router.get(l3_constants.INTERFACE_KEY, [])
-                router_interfaces.append(interface)
-                router[l3_constants.INTERFACE_KEY] = router_interfaces
-
-    def _get_router_info_list(self, context, router_ids=None, active=None,
-                              device_owners=None):
-        """Query routers and their related floating_ips, interfaces."""
-        with context.session.begin(subtransactions=True):
-            routers = self._get_sync_routers(context,
-                                             router_ids=router_ids,
-                                             active=active)
-            router_ids = [router['id'] for router in routers]
-            interfaces = self._get_sync_interfaces(
-                context, router_ids, device_owners)
-            floating_ips = self._get_sync_floating_ips(context, router_ids)
-            return (routers, interfaces, floating_ips)
-
-    def get_sync_data(self, context, router_ids=None, active=None):
-        routers, interfaces, floating_ips = self._get_router_info_list(
-            context, router_ids=router_ids, active=active)
-        ports_to_populate = [router['gw_port'] for router in routers
-                             if router.get('gw_port')] + interfaces
-        self._populate_subnets_for_ports(context, ports_to_populate)
-        routers_dict = dict((router['id'], router) for router in routers)
-        self._process_floating_ips(context, routers_dict, floating_ips)
-        self._process_interfaces(routers_dict, interfaces)
-        return list(routers_dict.values())
-
-
-class L3RpcNotifierMixin(object):
-    """Mixin class to add rpc notifier attribute to db_base_plugin_v2."""
-
-    @property
-    def l3_rpc_notifier(self):
-        if not hasattr(self, '_l3_rpc_notifier'):
-            self._l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI()
-        return self._l3_rpc_notifier
-
-    @l3_rpc_notifier.setter
-    def l3_rpc_notifier(self, value):
-        self._l3_rpc_notifier = value
-
-    def notify_router_updated(self, context, router_id,
-                              operation=None):
-        if router_id:
-            self.l3_rpc_notifier.routers_updated(
-                context, [router_id], operation)
-
-    def notify_routers_updated(self, context, router_ids,
-                               operation=None, data=None):
-        if router_ids:
-            self.l3_rpc_notifier.routers_updated(
-                context, router_ids, operation, data)
-
-    def notify_router_deleted(self, context, router_id):
-        self.l3_rpc_notifier.router_deleted(context, router_id)
-
-
-class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, L3RpcNotifierMixin):
-    """Mixin class to add rpc notifier methods to db_base_plugin_v2."""
-
-    def update_router(self, context, id, router):
-        router_dict = super(L3_NAT_db_mixin, self).update_router(context,
-                                                                 id, router)
-        self.notify_router_updated(context, router_dict['id'], None)
-        return router_dict
-
-    def delete_router(self, context, id):
-        super(L3_NAT_db_mixin, self).delete_router(context, id)
-        self.notify_router_deleted(context, id)
-
-    def notify_router_interface_action(
-            self, context, router_interface_info, action):
-        l3_method = '%s_router_interface' % action
-        super(L3_NAT_db_mixin, self).notify_routers_updated(
-            context, [router_interface_info['id']], l3_method,
-            {'subnet_id': router_interface_info['subnet_id']})
-
-        mapping = {'add': 'create', 'remove': 'delete'}
-        notifier = n_rpc.get_notifier('network')
-        router_event = 'router.interface.%s' % mapping[action]
-        notifier.info(context, router_event,
-                      {'router_interface': router_interface_info})
-
-    def add_router_interface(self, context, router_id, interface_info):
-        router_interface_info = super(
-            L3_NAT_db_mixin, self).add_router_interface(
-                context, router_id, interface_info)
-        self.notify_router_interface_action(
-            context, router_interface_info, 'add')
-        return router_interface_info
-
-    def remove_router_interface(self, context, router_id, interface_info):
-        router_interface_info = super(
-            L3_NAT_db_mixin, self).remove_router_interface(
-                context, router_id, interface_info)
-        self.notify_router_interface_action(
-            context, router_interface_info, 'remove')
-        return router_interface_info
-
-    def create_floatingip(self, context, floatingip,
-            initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
-        floatingip_dict = super(L3_NAT_db_mixin, self).create_floatingip(
-            context, floatingip, initial_status)
-        router_id = floatingip_dict['router_id']
-        self.notify_router_updated(context, router_id, 'create_floatingip')
-        return floatingip_dict
-
-    def update_floatingip(self, context, id, floatingip):
-        old_floatingip, floatingip = self._update_floatingip(
-            context, id, floatingip)
-        router_ids = self._floatingips_to_router_ids(
-            [old_floatingip, floatingip])
-        super(L3_NAT_db_mixin, self).notify_routers_updated(
-            context, router_ids, 'update_floatingip', {})
-        return floatingip
-
-    def delete_floatingip(self, context, id):
-        floating_ip = self._delete_floatingip(context, id)
-        self.notify_router_updated(context, floating_ip['router_id'],
-                                   'delete_floatingip')
-
-    def disassociate_floatingips(self, context, port_id, do_notify=True):
-        """Disassociate all floating IPs linked to specific port.
-
-        @param port_id: ID of the port to disassociate floating IPs.
-        @param do_notify: whether we should notify routers right away.
-        @return: set of router-ids that require notification updates
-                 if do_notify is False, otherwise None.
-        """
-        router_ids = super(L3_NAT_db_mixin, self).disassociate_floatingips(
-            context, port_id)
-        if do_notify:
-            self.notify_routers_updated(context, router_ids)
-            # since caller assumes that we handled notifications on its
-            # behalf, return nothing
-            return
-
-        return router_ids
-
-    def notify_routers_updated(self, context, router_ids):
-        super(L3_NAT_db_mixin, self).notify_routers_updated(
-            context, list(router_ids), 'disassociate_floatingips', {})
-
-
-def _prevent_l3_port_delete_callback(resource, event, trigger, **kwargs):
-    context = kwargs['context']
-    port_id = kwargs['port_id']
-    port_check = kwargs['port_check']
-    l3plugin = manager.NeutronManager.get_service_plugins().get(
-        constants.L3_ROUTER_NAT)
-    if l3plugin and port_check:
-        l3plugin.prevent_l3_port_deletion(context, port_id)
-
-
-def _notify_routers_callback(resource, event, trigger, **kwargs):
-    context = kwargs['context']
-    router_ids = kwargs['router_ids']
-    l3plugin = manager.NeutronManager.get_service_plugins().get(
-        constants.L3_ROUTER_NAT)
-    l3plugin.notify_routers_updated(context, router_ids)
-
-
-def _notify_subnet_gateway_ip_update(resource, event, trigger, **kwargs):
-    l3plugin = manager.NeutronManager.get_service_plugins().get(
-            constants.L3_ROUTER_NAT)
-    if not l3plugin:
-        return
-    context = kwargs['context']
-    network_id = kwargs['network_id']
-    subnet_id = kwargs['subnet_id']
-    query = context.session.query(models_v2.Port).filter_by(
-                network_id=network_id,
-                device_owner=l3_constants.DEVICE_OWNER_ROUTER_GW)
-    query = query.join(models_v2.Port.fixed_ips).filter(
-                models_v2.IPAllocation.subnet_id == subnet_id)
-    router_ids = set(port['device_id'] for port in query)
-    for router_id in router_ids:
-        l3plugin.notify_router_updated(context, router_id)
-
-
-def subscribe():
-    registry.subscribe(
-        _prevent_l3_port_delete_callback, resources.PORT, events.BEFORE_DELETE)
-    registry.subscribe(
-        _notify_routers_callback, resources.PORT, events.AFTER_DELETE)
-    registry.subscribe(
-        _notify_subnet_gateway_ip_update, resources.SUBNET_GATEWAY,
-        events.AFTER_UPDATE)
-
-# NOTE(armax): multiple l3 service plugins (potentially out of tree) inherit
-# from l3_db and may need the callbacks to be processed. Having an implicit
-# subscription (through the module import) preserves the existing behavior,
-# and at the same time it avoids fixing it manually in each and every l3 plugin
-# out there. That said, The subscription is also made explicit in the
-# reference l3 plugin. The subscription operation is idempotent so there is no
-# harm in registering the same callback multiple times.
-subscribe()
diff --git a/neutron/db/l3_dvr_db.py b/neutron/db/l3_dvr_db.py
deleted file mode 100644 (file)
index 6ff9b49..0000000
+++ /dev/null
@@ -1,810 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import collections
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import excutils
-import six
-
-from neutron._i18n import _, _LI, _LW
-from neutron.api.v2 import attributes
-from neutron.callbacks import events
-from neutron.callbacks import exceptions
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-from neutron.common import constants as l3_const
-from neutron.common import exceptions as n_exc
-from neutron.common import utils as n_utils
-from neutron.db import l3_attrs_db
-from neutron.db import l3_db
-from neutron.db import l3_dvrscheduler_db as l3_dvrsched_db
-from neutron.extensions import l3
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.plugins.common import constants
-from neutron.plugins.common import utils as p_utils
-
-
-LOG = logging.getLogger(__name__)
-router_distributed_opts = [
-    cfg.BoolOpt('router_distributed',
-                default=False,
-                help=_("System-wide flag to determine the type of router "
-                       "that tenants can create. Only admin can override.")),
-]
-cfg.CONF.register_opts(router_distributed_opts)
-
-
-class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
-                               l3_attrs_db.ExtraAttributesMixin):
-    """Mixin class to enable DVR support."""
-
-    router_device_owners = (
-        l3_db.L3_NAT_db_mixin.router_device_owners +
-        (l3_const.DEVICE_OWNER_DVR_INTERFACE,
-         l3_const.DEVICE_OWNER_ROUTER_SNAT,
-         l3_const.DEVICE_OWNER_AGENT_GW))
-
-    extra_attributes = (
-        l3_attrs_db.ExtraAttributesMixin.extra_attributes + [{
-            'name': "distributed",
-            'default': cfg.CONF.router_distributed
-        }])
-
-    def _create_router_db(self, context, router, tenant_id):
-        """Create a router db object with dvr additions."""
-        router['distributed'] = is_distributed_router(router)
-        with context.session.begin(subtransactions=True):
-            router_db = super(
-                L3_NAT_with_dvr_db_mixin, self)._create_router_db(
-                    context, router, tenant_id)
-            self._process_extra_attr_router_create(context, router_db, router)
-            return router_db
-
-    def _validate_router_migration(self, context, router_db, router_res):
-        """Allow centralized -> distributed state transition only."""
-        if (router_db.extra_attributes.distributed and
-            router_res.get('distributed') is False):
-            LOG.info(_LI("Centralizing distributed router %s "
-                         "is not supported"), router_db['id'])
-            raise n_exc.BadRequest(
-                resource='router',
-                msg=_("Migration from distributed router to centralized is "
-                      "not supported"))
-        elif (not router_db.extra_attributes.distributed and
-              router_res.get('distributed')):
-            # router should be disabled in order for upgrade
-            if router_db.admin_state_up:
-                msg = _('Cannot upgrade active router to distributed. Please '
-                        'set router admin_state_up to False prior to upgrade.')
-                raise n_exc.BadRequest(resource='router', msg=msg)
-
-            # Notify advanced services of the imminent state transition
-            # for the router.
-            try:
-                kwargs = {'context': context, 'router': router_db}
-                registry.notify(
-                    resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs)
-            except exceptions.CallbackFailure as e:
-                with excutils.save_and_reraise_exception():
-                    # NOTE(armax): preserve old check's behavior
-                    if len(e.errors) == 1:
-                        raise e.errors[0].error
-                    raise l3.RouterInUse(router_id=router_db['id'],
-                                         reason=e)
-
-    def _update_distributed_attr(
-        self, context, router_id, router_db, data):
-        """Update the model to support the dvr case of a router."""
-        if data.get('distributed'):
-            old_owner = l3_const.DEVICE_OWNER_ROUTER_INTF
-            new_owner = l3_const.DEVICE_OWNER_DVR_INTERFACE
-            for rp in router_db.attached_ports.filter_by(port_type=old_owner):
-                rp.port_type = new_owner
-                rp.port.device_owner = new_owner
-
-    def _update_router_db(self, context, router_id, data):
-        with context.session.begin(subtransactions=True):
-            router_db = super(
-                L3_NAT_with_dvr_db_mixin, self)._update_router_db(
-                    context, router_id, data)
-            migrating_to_distributed = (
-                not router_db.extra_attributes.distributed and
-                data.get('distributed') is True)
-            self._validate_router_migration(context, router_db, data)
-            router_db.extra_attributes.update(data)
-            self._update_distributed_attr(
-                context, router_id, router_db, data)
-            if migrating_to_distributed:
-                if router_db['gw_port_id']:
-                    # If the Legacy router is getting migrated to a DVR
-                    # router, make sure to create corresponding
-                    # snat interface ports that are to be consumed by
-                    # the Service Node.
-                    if not self._create_snat_intf_ports_if_not_exists(
-                        context.elevated(), router_db):
-                        LOG.debug("SNAT interface ports not created: %s",
-                                  router_db['id'])
-                cur_agents = self.list_l3_agents_hosting_router(
-                    context, router_db['id'])['agents']
-                for agent in cur_agents:
-                    self._unbind_router(context, router_db['id'],
-                                        agent['id'])
-            return router_db
-
-    def _delete_current_gw_port(self, context, router_id, router, new_network):
-        """
-        Overriden here to handle deletion of dvr internal ports.
-
-        If there is a valid router update with gateway port to be deleted,
-        then go ahead and delete the csnat ports and the floatingip
-        agent gateway port associated with the dvr router.
-        """
-
-        gw_ext_net_id = (
-            router.gw_port['network_id'] if router.gw_port else None)
-
-        super(L3_NAT_with_dvr_db_mixin,
-              self)._delete_current_gw_port(context, router_id,
-                                            router, new_network)
-        if (is_distributed_router(router) and
-            gw_ext_net_id != new_network and gw_ext_net_id is not None):
-            self.delete_csnat_router_interface_ports(
-                context.elevated(), router)
-            # NOTE(Swami): Delete the Floatingip agent gateway port
-            # on all hosts when it is the last gateway port in the
-            # given external network.
-            filters = {'network_id': [gw_ext_net_id],
-                       'device_owner': [l3_const.DEVICE_OWNER_ROUTER_GW]}
-            ext_net_gw_ports = self._core_plugin.get_ports(
-                context.elevated(), filters)
-            if not ext_net_gw_ports:
-                self.delete_floatingip_agent_gateway_port(
-                    context.elevated(), None, gw_ext_net_id)
-                # Send the information to all the L3 Agent hosts
-                # to clean up the fip namespace as it is no longer required.
-                self.l3_rpc_notifier.delete_fipnamespace_for_ext_net(
-                    context, gw_ext_net_id)
-
-    def _create_gw_port(self, context, router_id, router, new_network,
-                        ext_ips):
-        super(L3_NAT_with_dvr_db_mixin,
-              self)._create_gw_port(context, router_id, router, new_network,
-                                    ext_ips)
-        # Make sure that the gateway port exists before creating the
-        # snat interface ports for distributed router.
-        if router.extra_attributes.distributed and router.gw_port:
-            snat_p_list = self._create_snat_intf_ports_if_not_exists(
-                context.elevated(), router)
-            if not snat_p_list:
-                LOG.debug("SNAT interface ports not created: %s", snat_p_list)
-
-    def _get_device_owner(self, context, router=None):
-        """Get device_owner for the specified router."""
-        router_is_uuid = isinstance(router, six.string_types)
-        if router_is_uuid:
-            router = self._get_router(context, router)
-        if is_distributed_router(router):
-            return l3_const.DEVICE_OWNER_DVR_INTERFACE
-        return super(L3_NAT_with_dvr_db_mixin,
-                     self)._get_device_owner(context, router)
-
-    def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
-        """Override to create floating agent gw port for DVR.
-
-        Floating IP Agent gateway port will be created when a
-        floatingIP association happens.
-        """
-        fip_port = fip.get('port_id')
-        super(L3_NAT_with_dvr_db_mixin, self)._update_fip_assoc(
-            context, fip, floatingip_db, external_port)
-        associate_fip = fip_port and floatingip_db['id']
-        if associate_fip and floatingip_db.get('router_id'):
-            admin_ctx = context.elevated()
-            router_dict = self.get_router(
-                admin_ctx, floatingip_db['router_id'])
-            # Check if distributed router and then create the
-            # FloatingIP agent gateway port
-            if router_dict.get('distributed'):
-                hostid = self._get_dvr_service_port_hostid(
-                    context, fip_port)
-                if hostid:
-                    # FIXME (Swami): This FIP Agent Gateway port should be
-                    # created only once and there should not be a duplicate
-                    # for the same host. Until we find a good solution for
-                    # augmenting multiple server requests we should use the
-                    # existing flow.
-                    fip_agent_port = (
-                        self.create_fip_agent_gw_port_if_not_exists(
-                            admin_ctx, external_port['network_id'],
-                            hostid))
-                    LOG.debug("FIP Agent gateway port: %s", fip_agent_port)
-
-    def _get_floatingip_on_port(self, context, port_id=None):
-        """Helper function to retrieve the fip associated with port."""
-        fip_qry = context.session.query(l3_db.FloatingIP)
-        floating_ip = fip_qry.filter_by(fixed_port_id=port_id)
-        return floating_ip.first()
-
-    def add_router_interface(self, context, router_id, interface_info):
-        add_by_port, add_by_sub = self._validate_interface_info(interface_info)
-        router = self._get_router(context, router_id)
-        device_owner = self._get_device_owner(context, router)
-
-        # This should be True unless adding an IPv6 prefix to an existing port
-        new_port = True
-
-        if add_by_port:
-            port, subnets = self._add_interface_by_port(
-                    context, router, interface_info['port_id'], device_owner)
-        elif add_by_sub:
-            port, subnets, new_port = self._add_interface_by_subnet(
-                    context, router, interface_info['subnet_id'], device_owner)
-
-        subnet = subnets[0]
-
-        if new_port:
-            if router.extra_attributes.distributed and router.gw_port:
-                try:
-                    admin_context = context.elevated()
-                    self._add_csnat_router_interface_port(
-                        admin_context, router, port['network_id'],
-                        port['fixed_ips'][-1]['subnet_id'])
-                except Exception:
-                    with excutils.save_and_reraise_exception():
-                        # we need to preserve the original state prior
-                        # the request by rolling back the port creation
-                        # that led to new_port=True
-                        self._core_plugin.delete_port(
-                            admin_context, port['id'])
-
-            with context.session.begin(subtransactions=True):
-                router_port = l3_db.RouterPort(
-                    port_id=port['id'],
-                    router_id=router.id,
-                    port_type=device_owner
-                )
-                context.session.add(router_port)
-
-        # NOTE: For IPv6 additional subnets added to the same
-        # network we need to update the CSNAT port with respective
-        # IPv6 subnet
-        elif subnet and port:
-            fixed_ip = {'subnet_id': subnet['id']}
-            if subnet['ip_version'] == 6:
-                # Add new prefix to an existing ipv6 csnat port with the
-                # same network id if one exists
-                cs_port = self._find_router_port_by_network_and_device_owner(
-                    router, subnet['network_id'],
-                    l3_const.DEVICE_OWNER_ROUTER_SNAT)
-                if cs_port:
-                    fixed_ips = list(cs_port['port']['fixed_ips'])
-                    fixed_ips.append(fixed_ip)
-                    updated_port = self._core_plugin.update_port(
-                        context.elevated(),
-                        cs_port['port_id'], {'port': {'fixed_ips': fixed_ips}})
-                    LOG.debug("CSNAT port updated for IPv6 subnet: "
-                              "%s", updated_port)
-        router_interface_info = self._make_router_interface_info(
-            router_id, port['tenant_id'], port['id'], subnet['id'],
-            [subnet['id']])
-        self.notify_router_interface_action(
-            context, router_interface_info, 'add')
-        return router_interface_info
-
-    def _port_has_ipv6_address(self, port, csnat_port_check=True):
-        """Overridden to return False if DVR SNAT port."""
-        if csnat_port_check:
-            if port['device_owner'] == l3_const.DEVICE_OWNER_ROUTER_SNAT:
-                return False
-        return super(L3_NAT_with_dvr_db_mixin,
-                     self)._port_has_ipv6_address(port)
-
-    def _find_router_port_by_network_and_device_owner(
-        self, router, net_id, device_owner):
-        for port in router.attached_ports:
-            p = port['port']
-            if (p['network_id'] == net_id and
-                p['device_owner'] == device_owner and
-                self._port_has_ipv6_address(p, csnat_port_check=False)):
-                return port
-
-    def _check_for_multiprefix_csnat_port_and_update(
-        self, context, router, subnet, port):
-        """Checks if the csnat port contains multiple ipv6 prefixes.
-
-        If the csnat port contains multiple ipv6 prefixes for the given
-        network when a router interface is deleted, make sure we don't
-        delete the port when a single subnet is deleted and just update
-        it with the right fixed_ip.
-        This function returns true if it is a multiprefix port.
-        """
-        subnet_id = subnet['id']
-        if router.gw_port:
-            # If router has a gateway port, check if it has IPV6 subnet
-            cs_port = (
-                self._find_router_port_by_network_and_device_owner(
-                    router, subnet['network_id'],
-                    l3_const.DEVICE_OWNER_ROUTER_SNAT))
-            if cs_port:
-                fixed_ips = (
-                    [fixedip for fixedip in
-                        cs_port['port']['fixed_ips']
-                        if fixedip['subnet_id'] != subnet_id])
-                if fixed_ips:
-                    # multiple prefix port - delete prefix from port
-                    self._core_plugin.update_port(
-                        context.elevated(),
-                        cs_port['port_id'], {'port': {'fixed_ips': fixed_ips}})
-                    return True
-        return False
-
-    def _check_dvr_router_remove_required_and_notify_agent(
-        self, context, router, port, subnet):
-        if router.extra_attributes.distributed:
-            is_multiple_prefix_csport = (
-                self._check_for_multiprefix_csnat_port_and_update(
-                    context, router, subnet, port))
-            if not is_multiple_prefix_csport:
-                # Single prefix port - go ahead and delete the port
-                self.delete_csnat_router_interface_ports(
-                    context.elevated(), router, subnet_id=subnet['id'])
-            plugin = manager.NeutronManager.get_service_plugins().get(
-                        constants.L3_ROUTER_NAT)
-            l3_agents = plugin.get_l3_agents_hosting_routers(context,
-                                                             [router['id']])
-            subnet_ids = plugin.get_subnet_ids_on_router(
-                context, router['id'])
-            if subnet_ids:
-                for l3_agent in l3_agents:
-                    if not plugin.check_dvr_serviceable_ports_on_host(
-                        context, l3_agent['host'], subnet_ids):
-                        plugin.remove_router_from_l3_agent(
-                            context, l3_agent['id'], router['id'])
-        router_interface_info = self._make_router_interface_info(
-            router['id'], port['tenant_id'], port['id'], subnet['id'],
-            [subnet['id']])
-        self.notify_router_interface_action(
-            context, router_interface_info, 'remove')
-        return router_interface_info
-
-    def remove_router_interface(self, context, router_id, interface_info):
-        remove_by_port, remove_by_subnet = (
-            self._validate_interface_info(interface_info, for_removal=True)
-        )
-        port_id = interface_info.get('port_id')
-        subnet_id = interface_info.get('subnet_id')
-        router = self._get_router(context, router_id)
-        device_owner = self._get_device_owner(context, router)
-
-        if remove_by_port:
-            port, subnets = self._remove_interface_by_port(
-                    context, router_id, port_id, subnet_id, device_owner)
-
-        # remove_by_subnet is not used here, because the validation logic of
-        # _validate_interface_info ensures that at least one of remote_by_*
-        # is True.
-        else:
-            port, subnets = self._remove_interface_by_subnet(
-                    context, router_id, subnet_id, device_owner)
-
-        subnet = subnets[0]
-        router_interface_info = (
-            self._check_dvr_router_remove_required_and_notify_agent(
-                context, router, port, subnet))
-        return router_interface_info
-
-    def _get_snat_sync_interfaces(self, context, router_ids):
-        """Query router interfaces that relate to list of router_ids."""
-        if not router_ids:
-            return []
-        qry = context.session.query(l3_db.RouterPort)
-        qry = qry.filter(
-            l3_db.RouterPort.router_id.in_(router_ids),
-            l3_db.RouterPort.port_type == l3_const.DEVICE_OWNER_ROUTER_SNAT
-        )
-        interfaces = collections.defaultdict(list)
-        for rp in qry:
-            interfaces[rp.router_id].append(
-                self._core_plugin._make_port_dict(rp.port, None))
-        LOG.debug("Return the SNAT ports: %s", interfaces)
-        return interfaces
-
-    def _build_routers_list(self, context, routers, gw_ports):
-        # Perform a single query up front for all routers
-        if not routers:
-            return []
-        router_ids = [r['id'] for r in routers]
-        snat_binding = l3_dvrsched_db.CentralizedSnatL3AgentBinding
-        query = (context.session.query(snat_binding).
-                 filter(snat_binding.router_id.in_(router_ids))).all()
-        bindings = dict((b.router_id, b) for b in query)
-
-        for rtr in routers:
-            gw_port_id = rtr['gw_port_id']
-            # Collect gw ports only if available
-            if gw_port_id and gw_ports.get(gw_port_id):
-                rtr['gw_port'] = gw_ports[gw_port_id]
-                if 'enable_snat' in rtr[l3.EXTERNAL_GW_INFO]:
-                    rtr['enable_snat'] = (
-                        rtr[l3.EXTERNAL_GW_INFO]['enable_snat'])
-
-                binding = bindings.get(rtr['id'])
-                if not binding:
-                    rtr['gw_port_host'] = None
-                    LOG.debug('No snat is bound to router %s', rtr['id'])
-                    continue
-
-                rtr['gw_port_host'] = binding.l3_agent.host
-
-        return routers
-
-    def _process_routers(self, context, routers):
-        routers_dict = {}
-        snat_intfs_by_router_id = self._get_snat_sync_interfaces(
-            context, [r['id'] for r in routers])
-        for router in routers:
-            routers_dict[router['id']] = router
-            if router['gw_port_id']:
-                snat_router_intfs = snat_intfs_by_router_id[router['id']]
-                LOG.debug("SNAT ports returned: %s ", snat_router_intfs)
-                router[l3_const.SNAT_ROUTER_INTF_KEY] = snat_router_intfs
-        return routers_dict
-
-    def _process_floating_ips_dvr(self, context, routers_dict,
-                                  floating_ips, host, agent):
-        fip_sync_interfaces = None
-        LOG.debug("FIP Agent : %s ", agent.id)
-        for floating_ip in floating_ips:
-            router = routers_dict.get(floating_ip['router_id'])
-            if router:
-                router_floatingips = router.get(l3_const.FLOATINGIP_KEY, [])
-                if router['distributed']:
-                    if floating_ip.get('host', None) != host:
-                        continue
-                    LOG.debug("Floating IP host: %s", floating_ip['host'])
-                router_floatingips.append(floating_ip)
-                router[l3_const.FLOATINGIP_KEY] = router_floatingips
-                if not fip_sync_interfaces:
-                    fip_sync_interfaces = self._get_fip_sync_interfaces(
-                        context, agent.id)
-                    LOG.debug("FIP Agent ports: %s", fip_sync_interfaces)
-                router[l3_const.FLOATINGIP_AGENT_INTF_KEY] = (
-                    fip_sync_interfaces)
-
-    def _get_fip_sync_interfaces(self, context, fip_agent_id):
-        """Query router interfaces that relate to list of router_ids."""
-        if not fip_agent_id:
-            return []
-        filters = {'device_id': [fip_agent_id],
-                   'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]}
-        interfaces = self._core_plugin.get_ports(context.elevated(), filters)
-        LOG.debug("Return the FIP ports: %s ", interfaces)
-        return interfaces
-
-    def _get_dvr_sync_data(self, context, host, agent, router_ids=None,
-                          active=None):
-        routers, interfaces, floating_ips = self._get_router_info_list(
-            context, router_ids=router_ids, active=active,
-            device_owners=l3_const.ROUTER_INTERFACE_OWNERS)
-        dvr_router_ids = set(router['id'] for router in routers
-                             if is_distributed_router(router))
-        floating_ip_port_ids = [fip['port_id'] for fip in floating_ips
-                                if fip['router_id'] in dvr_router_ids]
-        if floating_ip_port_ids:
-            port_filter = {portbindings.HOST_ID: [host],
-                           'id': floating_ip_port_ids}
-            ports = self._core_plugin.get_ports(context, port_filter)
-            port_dict = dict((port['id'], port) for port in ports)
-            # Add the port binding host to the floatingip dictionary
-            for fip in floating_ips:
-                vm_port = port_dict.get(fip['port_id'], None)
-                if vm_port:
-                    fip['host'] = self._get_dvr_service_port_hostid(
-                        context, fip['port_id'], port=vm_port)
-        routers_dict = self._process_routers(context, routers)
-        self._process_floating_ips_dvr(context, routers_dict,
-                                       floating_ips, host, agent)
-        ports_to_populate = []
-        for router in routers_dict.values():
-            if router.get('gw_port'):
-                ports_to_populate.append(router['gw_port'])
-            if router.get(l3_const.FLOATINGIP_AGENT_INTF_KEY):
-                ports_to_populate += router[l3_const.FLOATINGIP_AGENT_INTF_KEY]
-            if router.get(l3_const.SNAT_ROUTER_INTF_KEY):
-                ports_to_populate += router[l3_const.SNAT_ROUTER_INTF_KEY]
-        ports_to_populate += interfaces
-        self._populate_subnets_for_ports(context, ports_to_populate)
-        self._process_interfaces(routers_dict, interfaces)
-        return list(routers_dict.values())
-
-    def _get_dvr_service_port_hostid(self, context, port_id, port=None):
-        """Returns the portbinding host_id for dvr service port."""
-        port_db = port or self._core_plugin.get_port(context, port_id)
-        device_owner = port_db['device_owner'] if port_db else ""
-        if (n_utils.is_dvr_serviced(device_owner) or
-            device_owner == l3_const.DEVICE_OWNER_AGENT_GW):
-            return port_db[portbindings.HOST_ID]
-
-    def _get_agent_gw_ports_exist_for_network(
-            self, context, network_id, host, agent_id):
-        """Return agent gw port if exist, or None otherwise."""
-        if not network_id:
-            LOG.debug("Network not specified")
-            return
-
-        filters = {
-            'network_id': [network_id],
-            'device_id': [agent_id],
-            'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]
-        }
-        ports = self._core_plugin.get_ports(context, filters)
-        if ports:
-            return ports[0]
-
-    def delete_floatingip_agent_gateway_port(
-        self, context, host_id, ext_net_id):
-        """Function to delete FIP gateway port with given ext_net_id."""
-        # delete any fip agent gw port
-        device_filter = {'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW],
-                         'network_id': [ext_net_id]}
-        ports = self._core_plugin.get_ports(context,
-                                            filters=device_filter)
-        for p in ports:
-            if not host_id or p[portbindings.HOST_ID] == host_id:
-                self._core_plugin.ipam.delete_port(context, p['id'])
-                if host_id:
-                    return
-
-    def create_fip_agent_gw_port_if_not_exists(
-        self, context, network_id, host):
-        """Function to return the FIP Agent GW port.
-
-        This function will create a FIP Agent GW port
-        if required. If the port already exists, it
-        will return the existing port and will not
-        create a new one.
-        """
-        l3_agent_db = self._get_agent_by_type_and_host(
-            context, l3_const.AGENT_TYPE_L3, host)
-        if l3_agent_db:
-            LOG.debug("Agent ID exists: %s", l3_agent_db['id'])
-            f_port = self._get_agent_gw_ports_exist_for_network(
-                context, network_id, host, l3_agent_db['id'])
-            if not f_port:
-                LOG.info(_LI('Agent Gateway port does not exist,'
-                             ' so create one: %s'), f_port)
-                port_data = {'tenant_id': '',
-                             'network_id': network_id,
-                             'device_id': l3_agent_db['id'],
-                             'device_owner': l3_const.DEVICE_OWNER_AGENT_GW,
-                             portbindings.HOST_ID: host,
-                             'admin_state_up': True,
-                             'name': ''}
-                agent_port = p_utils.create_port(self._core_plugin, context,
-                                                 {'port': port_data})
-                if agent_port:
-                    self._populate_subnets_for_ports(context, [agent_port])
-                    return agent_port
-                msg = _("Unable to create the Agent Gateway Port")
-                raise n_exc.BadRequest(resource='router', msg=msg)
-            else:
-                self._populate_subnets_for_ports(context, [f_port])
-                return f_port
-
-    def _get_snat_interface_ports_for_router(self, context, router_id):
-        """Return all existing snat_router_interface ports."""
-        qry = context.session.query(l3_db.RouterPort)
-        qry = qry.filter_by(
-            router_id=router_id,
-            port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT
-        )
-
-        ports = [self._core_plugin._make_port_dict(rp.port, None)
-                 for rp in qry]
-        return ports
-
-    def _add_csnat_router_interface_port(
-            self, context, router, network_id, subnet_id, do_pop=True):
-        """Add SNAT interface to the specified router and subnet."""
-        port_data = {'tenant_id': '',
-                     'network_id': network_id,
-                     'fixed_ips': [{'subnet_id': subnet_id}],
-                     'device_id': router.id,
-                     'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT,
-                     'admin_state_up': True,
-                     'name': ''}
-        snat_port = p_utils.create_port(self._core_plugin, context,
-                                        {'port': port_data})
-        if not snat_port:
-            msg = _("Unable to create the SNAT Interface Port")
-            raise n_exc.BadRequest(resource='router', msg=msg)
-
-        with context.session.begin(subtransactions=True):
-            router_port = l3_db.RouterPort(
-                port_id=snat_port['id'],
-                router_id=router.id,
-                port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT
-            )
-            context.session.add(router_port)
-
-        if do_pop:
-            return self._populate_subnets_for_ports(context, [snat_port])
-        return snat_port
-
-    def _create_snat_intf_ports_if_not_exists(self, context, router):
-        """Function to return the snat interface port list.
-
-        This function will return the snat interface port list
-        if it exists. If the port does not exist it will create
-        new ports and then return the list.
-        """
-        port_list = self._get_snat_interface_ports_for_router(
-            context, router.id)
-        if port_list:
-            self._populate_subnets_for_ports(context, port_list)
-            return port_list
-        port_list = []
-
-        int_ports = (
-            rp.port for rp in
-            router.attached_ports.filter_by(
-                port_type=l3_const.DEVICE_OWNER_DVR_INTERFACE
-            )
-        )
-        LOG.info(_LI('SNAT interface port list does not exist,'
-                     ' so create one: %s'), port_list)
-        for intf in int_ports:
-            if intf.fixed_ips:
-                # Passing the subnet for the port to make sure the IP's
-                # are assigned on the right subnet if multiple subnet
-                # exists
-                snat_port = self._add_csnat_router_interface_port(
-                    context, router, intf['network_id'],
-                    intf['fixed_ips'][0]['subnet_id'], do_pop=False)
-                port_list.append(snat_port)
-        if port_list:
-            self._populate_subnets_for_ports(context, port_list)
-        return port_list
-
-    def _generate_arp_table_and_notify_agent(
-        self, context, fixed_ip, mac_address, notifier):
-        """Generates the arp table entry and notifies the l3 agent."""
-        ip_address = fixed_ip['ip_address']
-        subnet = fixed_ip['subnet_id']
-        filters = {'fixed_ips': {'subnet_id': [subnet]}}
-        ports = self._core_plugin.get_ports(context, filters=filters)
-        for port in ports:
-            if port['device_owner'] == l3_const.DEVICE_OWNER_DVR_INTERFACE:
-                router_id = port['device_id']
-                router_dict = self._get_router(context, router_id)
-                if router_dict.extra_attributes.distributed:
-                    arp_table = {'ip_address': ip_address,
-                                 'mac_address': mac_address,
-                                 'subnet_id': subnet}
-                    notifier(context, router_id, arp_table)
-                    return
-
-    def update_arp_entry_for_dvr_service_port(
-            self, context, port_dict, action):
-        """Notify L3 agents of ARP table entry for dvr service port.
-
-        When a dvr service port goes up or down, look for the DVR
-        router on the port's subnet, and send the ARP details to all
-        L3 agents hosting the router.
-        """
-
-        # Check this is a valid VM or service port
-        if not (n_utils.is_dvr_serviced(port_dict['device_owner']) and
-                port_dict['fixed_ips']):
-            return
-        changed_fixed_ips = port_dict['fixed_ips']
-        for fixed_ip in changed_fixed_ips:
-            if action == "add":
-                notifier = self.l3_rpc_notifier.add_arp_entry
-            elif action == "del":
-                notifier = self.l3_rpc_notifier.del_arp_entry
-            else:
-                return
-
-            self._generate_arp_table_and_notify_agent(
-                context, fixed_ip, port_dict['mac_address'], notifier)
-
-    def delete_csnat_router_interface_ports(self, context,
-                                            router, subnet_id=None):
-        # Each csnat router interface port is associated
-        # with a subnet, so we need to pass the subnet id to
-        # delete the right ports.
-
-        # TODO(markmcclain): This is suboptimal but was left to reduce
-        # changeset size since it is late in cycle
-        ports = [
-            rp.port.id for rp in
-            router.attached_ports.filter_by(
-                    port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT)
-            if rp.port
-        ]
-
-        c_snat_ports = self._core_plugin.get_ports(
-            context,
-            filters={'id': ports}
-        )
-        for p in c_snat_ports:
-            if subnet_id is None:
-                self._core_plugin.delete_port(context,
-                                              p['id'],
-                                              l3_port_check=False)
-            else:
-                if p['fixed_ips'][0]['subnet_id'] == subnet_id:
-                    LOG.debug("Subnet matches: %s", subnet_id)
-                    self._core_plugin.delete_port(context,
-                                                  p['id'],
-                                                  l3_port_check=False)
-
-    def create_floatingip(self, context, floatingip,
-                          initial_status=l3_const.FLOATINGIP_STATUS_ACTIVE):
-        floating_ip = self._create_floatingip(
-            context, floatingip, initial_status)
-        self._notify_floating_ip_change(context, floating_ip)
-        return floating_ip
-
-    def _notify_floating_ip_change(self, context, floating_ip):
-        router_id = floating_ip['router_id']
-        fixed_port_id = floating_ip['port_id']
-        # we need to notify agents only in case Floating IP is associated
-        if not router_id or not fixed_port_id:
-            return
-
-        try:
-            # using admin context as router may belong to admin tenant
-            router = self._get_router(context.elevated(), router_id)
-        except l3.RouterNotFound:
-            LOG.warning(_LW("Router %s was not found. "
-                            "Skipping agent notification."),
-                        router_id)
-            return
-
-        if is_distributed_router(router):
-            host = self._get_dvr_service_port_hostid(context, fixed_port_id)
-            self.l3_rpc_notifier.routers_updated_on_host(
-                context, [router_id], host)
-        else:
-            self.notify_router_updated(context, router_id)
-
-    def update_floatingip(self, context, id, floatingip):
-        old_floatingip, floatingip = self._update_floatingip(
-            context, id, floatingip)
-        self._notify_floating_ip_change(context, old_floatingip)
-        if (floatingip['router_id'] != old_floatingip['router_id'] or
-                floatingip['port_id'] != old_floatingip['port_id']):
-            self._notify_floating_ip_change(context, floatingip)
-        return floatingip
-
-    def delete_floatingip(self, context, id):
-        floating_ip = self._delete_floatingip(context, id)
-        self._notify_floating_ip_change(context, floating_ip)
-
-
-def is_distributed_router(router):
-    """Return True if router to be handled is distributed."""
-    try:
-        # See if router is a DB object first
-        requested_router_type = router.extra_attributes.distributed
-    except AttributeError:
-        # if not, try to see if it is a request body
-        requested_router_type = router.get('distributed')
-    if attributes.is_attr_set(requested_router_type):
-        return requested_router_type
-    return cfg.CONF.router_distributed
diff --git a/neutron/db/l3_dvrscheduler_db.py b/neutron/db/l3_dvrscheduler_db.py
deleted file mode 100644 (file)
index b24039d..0000000
+++ /dev/null
@@ -1,572 +0,0 @@
-#    (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
-#    All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import random
-
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-import sqlalchemy as sa
-from sqlalchemy import or_
-from sqlalchemy import orm
-from sqlalchemy.orm import joinedload
-
-from neutron._i18n import _LI, _LW
-from neutron.callbacks import events
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-from neutron.common import constants as n_const
-from neutron.common import utils as n_utils
-from neutron.db import agents_db
-from neutron.db import agentschedulers_db
-from neutron.db import l3_agentschedulers_db as l3agent_sch_db
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.extensions import l3agentscheduler
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.plugins.common import constants as service_constants
-from neutron.plugins.ml2 import db as ml2_db
-from neutron.plugins.ml2 import models as ml2_models
-
-LOG = logging.getLogger(__name__)
-
-
-class CentralizedSnatL3AgentBinding(model_base.BASEV2):
-    """Represents binding between Neutron Centralized SNAT and L3 agents."""
-
-    __tablename__ = "csnat_l3_agent_bindings"
-
-    router_id = sa.Column(sa.String(36),
-                          sa.ForeignKey("routers.id", ondelete='CASCADE'),
-                          primary_key=True)
-    l3_agent_id = sa.Column(sa.String(36),
-                            sa.ForeignKey("agents.id", ondelete='CASCADE'),
-                            primary_key=True)
-    host_id = sa.Column(sa.String(255))
-    csnat_gw_port_id = sa.Column(sa.String(36),
-                                 sa.ForeignKey('ports.id', ondelete='CASCADE'))
-    l3_agent = orm.relationship(agents_db.Agent)
-    csnat_gw_port = orm.relationship(models_v2.Port)
-
-
-class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
-    """Mixin class for L3 DVR scheduler.
-
-    DVR currently supports the following use cases:
-
-     - East/West (E/W) traffic between VMs: this is handled in a
-       distributed manner across Compute Nodes without a centralized element.
-       This includes E/W traffic between VMs on the same Compute Node.
-     - North/South traffic for Floating IPs (FIP N/S): this is supported on the
-       distributed routers on Compute Nodes without any centralized element.
-     - North/South traffic for SNAT (SNAT N/S): this is supported via a
-       centralized element that handles the SNAT traffic.
-
-    To support these use cases,  DVR routers rely on an L3 agent that runs on a
-    central node (also known as Network Node or Service Node),  as well as, L3
-    agents that run individually on each Compute Node of an OpenStack cloud.
-
-    Each L3 agent creates namespaces to route traffic according to the use
-    cases outlined above.  The mechanism adopted for creating and managing
-    these namespaces is via (Router,  Agent) binding and Scheduling in general.
-
-    The main difference between distributed routers and centralized ones is
-    that in the distributed case,  multiple bindings will exist,  one for each
-    of the agents participating in the routed topology for the specific router.
-
-    These bindings are created in the following circumstances:
-
-    - A subnet is added to a router via router-interface-add, and that subnet
-      has running VM's deployed in it.  A binding will be created between the
-      router and any L3 agent whose Compute Node is hosting the VM(s).
-    - An external gateway is set to a router via router-gateway-set.  A binding
-      will be created between the router and the L3 agent running centrally
-      on the Network Node.
-
-    Therefore,  any time a router operation occurs (create, update or delete),
-    scheduling will determine whether the router needs to be associated to an
-    L3 agent, just like a regular centralized router, with the difference that,
-    in the distributed case,  the bindings required are established based on
-    the state of the router and the Compute Nodes.
-    """
-
-    def dvr_handle_new_service_port(self, context, port):
-        """Handle new dvr service port creation.
-
-        When a new dvr service port is created, this function will
-        schedule a dvr router to new compute node if needed and notify
-        l3 agent on that node.
-        """
-        port_host = port[portbindings.HOST_ID]
-        l3_agent_on_host = (self.get_l3_agents(
-            context, filters={'host': [port_host]}) or [None])[0]
-        if not l3_agent_on_host:
-            return
-
-        ips = port['fixed_ips']
-        router_ids = self.get_dvr_routers_by_portid(context, port['id'], ips)
-        if not router_ids:
-            return
-
-        for router_id in router_ids:
-            if not self.check_l3_agent_router_binding(
-                    context, router_id, l3_agent_on_host['id']):
-                self.schedule_router(
-                    context, router_id, candidates=[l3_agent_on_host])
-            LOG.debug('DVR: Handle new service_port on router: %s', router_id)
-
-        self.l3_rpc_notifier.routers_updated_on_host(
-            context, router_ids, port_host)
-
-    def get_dvr_routers_by_portid(self, context, port_id, fixed_ips=None):
-        """Gets the dvr routers on vmport subnets."""
-        router_ids = set()
-        if fixed_ips is None:
-            port_dict = self._core_plugin.get_port(context, port_id)
-            fixed_ips = port_dict['fixed_ips']
-        for fixedip in fixed_ips:
-            vm_subnet = fixedip['subnet_id']
-            filter_sub = {'fixed_ips': {'subnet_id': [vm_subnet]},
-                          'device_owner':
-                          [n_const.DEVICE_OWNER_DVR_INTERFACE]}
-            subnet_ports = self._core_plugin.get_ports(
-                context, filters=filter_sub)
-            for subnet_port in subnet_ports:
-                router_ids.add(subnet_port['device_id'])
-        return router_ids
-
-    def get_subnet_ids_on_router(self, context, router_id):
-        """Return subnet IDs for interfaces attached to the given router."""
-        subnet_ids = set()
-        filter_rtr = {'device_id': [router_id]}
-        int_ports = self._core_plugin.get_ports(context, filters=filter_rtr)
-        for int_port in int_ports:
-            int_ips = int_port['fixed_ips']
-            if int_ips:
-                int_subnet = int_ips[0]['subnet_id']
-                subnet_ids.add(int_subnet)
-            else:
-                LOG.debug('DVR: Could not find a subnet id '
-                          'for router %s', router_id)
-        return subnet_ids
-
-    def dvr_deletens_if_no_port(self, context, port_id, port_host=None):
-        """Delete the DVR namespace if no dvr serviced port exists."""
-        admin_context = context.elevated()
-        router_ids = self.get_dvr_routers_by_portid(admin_context, port_id)
-        if not port_host:
-            port_host = ml2_db.get_port_binding_host(admin_context.session,
-                                                     port_id)
-            if not port_host:
-                LOG.debug('Host name not found for port %s', port_id)
-                return []
-
-        if not router_ids:
-            LOG.debug('No namespaces available for this DVR port %(port)s '
-                      'on host %(host)s', {'port': port_id,
-                                           'host': port_host})
-            return []
-        removed_router_info = []
-        for router_id in router_ids:
-            subnet_ids = self.get_subnet_ids_on_router(admin_context,
-                                                       router_id)
-            if self.check_dvr_serviceable_ports_on_host(
-                    admin_context, port_host, subnet_ids, except_port=port_id):
-                continue
-            filter_rtr = {'device_id': [router_id],
-                          'device_owner':
-                          [n_const.DEVICE_OWNER_DVR_INTERFACE]}
-            int_ports = self._core_plugin.get_ports(
-                admin_context, filters=filter_rtr)
-            for port in int_ports:
-                dvr_binding = (ml2_db.
-                               get_dvr_port_binding_by_host(context.session,
-                                                            port['id'],
-                                                            port_host))
-                if dvr_binding:
-                    # unbind this port from router
-                    dvr_binding['router_id'] = None
-                    dvr_binding.update(dvr_binding)
-            agent = self._get_agent_by_type_and_host(context,
-                                                     n_const.AGENT_TYPE_L3,
-                                                     port_host)
-            info = {'router_id': router_id, 'host': port_host,
-                    'agent_id': str(agent.id)}
-            removed_router_info.append(info)
-            LOG.debug('Router namespace %(router_id)s on host %(host)s '
-                      'to be deleted', info)
-        return removed_router_info
-
-    def bind_snat_router(self, context, router_id, chosen_agent):
-        """Bind the router to the chosen l3 agent."""
-        with context.session.begin(subtransactions=True):
-            binding = CentralizedSnatL3AgentBinding()
-            binding.l3_agent = chosen_agent
-            binding.router_id = router_id
-            context.session.add(binding)
-            LOG.debug('SNAT Router %(router_id)s is scheduled to L3 agent '
-                      '%(agent_id)s', {'router_id': router_id,
-                                       'agent_id': chosen_agent.id})
-
-    def bind_dvr_router_servicenode(self, context, router_id,
-                                    chosen_snat_agent):
-        """Bind the IR router to service node if not already hosted."""
-        query = (context.session.query(l3agent_sch_db.RouterL3AgentBinding).
-                 filter_by(router_id=router_id))
-        for bind in query:
-            if bind.l3_agent_id == chosen_snat_agent.id:
-                LOG.debug('Distributed Router %(router_id)s already hosted '
-                          'on snat l3_agent %(snat_id)s',
-                          {'router_id': router_id,
-                           'snat_id': chosen_snat_agent.id})
-                return
-        with context.session.begin(subtransactions=True):
-            binding = l3agent_sch_db.RouterL3AgentBinding()
-            binding.l3_agent = chosen_snat_agent
-            binding.router_id = router_id
-            context.session.add(binding)
-            LOG.debug('Binding the distributed router %(router_id)s to '
-                      'the snat agent %(snat_id)s',
-                      {'router_id': router_id,
-                       'snat_id': chosen_snat_agent.id})
-
-    def bind_snat_servicenode(self, context, router_id, snat_candidates):
-        """Bind the snat router to the chosen l3 service agent."""
-        chosen_snat_agent = random.choice(snat_candidates)
-        self.bind_snat_router(context, router_id, chosen_snat_agent)
-        return chosen_snat_agent
-
-    def unbind_snat(self, context, router_id, agent_id=None):
-        """Unbind snat from the chosen l3 service agent.
-
-        Unbinds from all L3 agents hosting SNAT if passed agent_id is None
-        """
-        with context.session.begin(subtransactions=True):
-            query = (context.session.
-                     query(CentralizedSnatL3AgentBinding).
-                     filter_by(router_id=router_id))
-            if agent_id:
-                query = query.filter_by(l3_agent_id=agent_id)
-            binding = query.first()
-            if not binding:
-                LOG.debug('no SNAT router binding found for router: '
-                          '%(router)s, agent: %(agent)s',
-                          {'router': router_id, 'agent': agent_id or 'any'})
-                return
-
-            query.delete()
-        LOG.debug('Deleted binding of the SNAT router %s', router_id)
-
-        return binding
-
-    def unbind_router_servicenode(self, context, router_id, binding):
-        """Unbind the router from the chosen l3 service agent."""
-        port_found = False
-        with context.session.begin(subtransactions=True):
-            host = binding.l3_agent.host
-            subnet_ids = self.get_subnet_ids_on_router(context, router_id)
-            for subnet in subnet_ids:
-                ports = (
-                    self._core_plugin.get_ports_on_host_by_subnet(
-                        context, host, subnet))
-                for port in ports:
-                    if (n_utils.is_dvr_serviced(port['device_owner'])):
-                        port_found = True
-                        LOG.debug('One or more ports exist on the snat '
-                                  'enabled l3_agent host %(host)s and '
-                                  'router_id %(id)s',
-                                  {'host': host, 'id': router_id})
-                        break
-            agent_id = binding.l3_agent_id
-
-            if not port_found:
-                context.session.query(
-                    l3agent_sch_db.RouterL3AgentBinding).filter_by(
-                        router_id=router_id, l3_agent_id=agent_id).delete(
-                            synchronize_session=False)
-
-        if not port_found:
-            self.l3_rpc_notifier.router_removed_from_agent(
-                context, router_id, host)
-            LOG.debug('Removed binding for router %(router_id)s and '
-                      'agent %(agent_id)s',
-                      {'router_id': router_id, 'agent_id': agent_id})
-        return port_found
-
-    def unbind_snat_servicenode(self, context, router_id):
-        """Unbind snat AND the router from the current agent."""
-        with context.session.begin(subtransactions=True):
-            binding = self.unbind_snat(context, router_id)
-            if binding:
-                self.unbind_router_servicenode(context, router_id, binding)
-
-    def get_snat_bindings(self, context, router_ids):
-        """Retrieves the dvr snat bindings for a router."""
-        if not router_ids:
-            return []
-        query = context.session.query(CentralizedSnatL3AgentBinding)
-        query = query.options(joinedload('l3_agent')).filter(
-            CentralizedSnatL3AgentBinding.router_id.in_(router_ids))
-        return query.all()
-
-    def get_snat_candidates(self, sync_router, l3_agents):
-        """Get the valid snat enabled l3 agents for the distributed router."""
-        candidates = []
-        is_router_distributed = sync_router.get('distributed', False)
-        if not is_router_distributed:
-            return candidates
-        for l3_agent in l3_agents:
-            if not l3_agent.admin_state_up:
-                continue
-
-            agent_conf = self.get_configuration_dict(l3_agent)
-            agent_mode = agent_conf.get(n_const.L3_AGENT_MODE,
-                                        n_const.L3_AGENT_MODE_LEGACY)
-            if agent_mode != n_const.L3_AGENT_MODE_DVR_SNAT:
-                continue
-
-            router_id = agent_conf.get('router_id', None)
-            if router_id and router_id != sync_router['id']:
-                continue
-
-            handle_internal_only_routers = agent_conf.get(
-                'handle_internal_only_routers', True)
-            gateway_external_network_id = agent_conf.get(
-                'gateway_external_network_id', None)
-            ex_net_id = (sync_router['external_gateway_info'] or {}).get(
-                'network_id')
-            if ((not ex_net_id and not handle_internal_only_routers) or
-                (ex_net_id and gateway_external_network_id and
-                 ex_net_id != gateway_external_network_id)):
-                continue
-
-            candidates.append(l3_agent)
-        return candidates
-
-    def schedule_snat_router(self, context, router_id, sync_router):
-        """Schedule the snat router on l3 service agent."""
-        active_l3_agents = self.get_l3_agents(context, active=True)
-        if not active_l3_agents:
-            LOG.warn(_LW('No active L3 agents found for SNAT'))
-            return
-        snat_candidates = self.get_snat_candidates(sync_router,
-                                                   active_l3_agents)
-        if not snat_candidates:
-            LOG.warn(_LW('No candidates found for SNAT'))
-            return
-        else:
-            try:
-                chosen_agent = self.bind_snat_servicenode(
-                    context, router_id, snat_candidates)
-            except db_exc.DBDuplicateEntry:
-                LOG.info(_LI("SNAT already bound to a service node."))
-                return
-            self.bind_dvr_router_servicenode(
-                context, router_id, chosen_agent)
-            return chosen_agent
-
-    def _unschedule_router(self, context, router_id, agents_ids):
-        router = self.get_router(context, router_id)
-        if router.get('distributed', False):
-            # for DVR router unscheduling means just unscheduling SNAT portion
-            self.unbind_snat_servicenode(context, router_id)
-        else:
-            super(L3_DVRsch_db_mixin, self)._unschedule_router(
-                context, router_id, agents_ids)
-
-    def _get_active_l3_agent_routers_sync_data(self, context, host, agent,
-                                               router_ids):
-        if n_utils.is_extension_supported(self, n_const.L3_HA_MODE_EXT_ALIAS):
-            return self.get_ha_sync_data_for_host(context, host, agent,
-                                                  router_ids=router_ids,
-                                                  active=True)
-        return self._get_dvr_sync_data(context, host, agent,
-                                       router_ids=router_ids, active=True)
-
-    def check_agent_router_scheduling_needed(self, context, agent, router):
-        if router.get('distributed'):
-            if router['external_gateway_info']:
-                return not self.get_snat_bindings(context, [router['id']])
-            return False
-        return super(L3_DVRsch_db_mixin,
-                     self).check_agent_router_scheduling_needed(
-                     context, agent, router)
-
-    def create_router_to_agent_binding(self, context, agent, router):
-        """Create router to agent binding."""
-        router_id = router['id']
-        agent_id = agent['id']
-        if router['external_gateway_info'] and self.router_scheduler and (
-                router.get('distributed')):
-            try:
-                self.bind_snat_router(context, router_id, agent)
-                self.bind_dvr_router_servicenode(context,
-                                                 router_id, agent)
-            except db_exc.DBError:
-                raise l3agentscheduler.RouterSchedulingFailed(
-                    router_id=router_id,
-                    agent_id=agent_id)
-        else:
-            super(L3_DVRsch_db_mixin, self).create_router_to_agent_binding(
-                  context, agent, router)
-
-    def remove_router_from_l3_agent(self, context, agent_id, router_id):
-        binding = None
-        router = self.get_router(context, router_id)
-        if router['external_gateway_info'] and router.get('distributed'):
-            binding = self.unbind_snat(context, router_id, agent_id=agent_id)
-            # binding only exists when agent mode is dvr_snat
-            if binding:
-                notification_not_sent = self.unbind_router_servicenode(context,
-                                             router_id, binding)
-                if notification_not_sent:
-                    self.l3_rpc_notifier.routers_updated(
-                        context, [router_id], schedule_routers=False)
-
-        # Below Needs to be done when agent mode is legacy or dvr.
-        if not binding:
-            super(L3_DVRsch_db_mixin,
-                  self).remove_router_from_l3_agent(
-                    context, agent_id, router_id)
-
-    def get_hosts_to_notify(self, context, router_id):
-        """Returns all hosts to send notification about router update"""
-        hosts = super(L3_DVRsch_db_mixin, self).get_hosts_to_notify(
-            context, router_id)
-        router = self.get_router(context, router_id)
-        if router.get('distributed', False):
-            dvr_hosts = self._get_dvr_hosts_for_router(context, router_id)
-            dvr_hosts = set(dvr_hosts) - set(hosts)
-            state = agentschedulers_db.get_admin_state_up_filter()
-            agents = self.get_l3_agents(context, active=state,
-                                        filters={'host': dvr_hosts})
-            hosts += [a.host for a in agents]
-
-        return hosts
-
-    def _get_dvr_hosts_for_router(self, context, router_id):
-        """Get a list of hosts where specified DVR router should be hosted
-
-        It will first get IDs of all subnets connected to the router and then
-        get a set of hosts where all dvr serviceable ports on those subnets
-        are bound
-        """
-        subnet_ids = self.get_subnet_ids_on_router(context, router_id)
-        Binding = ml2_models.PortBinding
-        Port = models_v2.Port
-        IPAllocation = models_v2.IPAllocation
-
-        query = context.session.query(Binding.host).distinct()
-        query = query.join(Binding.port)
-        query = query.join(Port.fixed_ips)
-        query = query.filter(IPAllocation.subnet_id.in_(subnet_ids))
-        owner_filter = or_(
-            Port.device_owner.startswith(n_const.DEVICE_OWNER_COMPUTE_PREFIX),
-            Port.device_owner.in_(
-                n_utils.get_other_dvr_serviced_device_owners()))
-        query = query.filter(owner_filter)
-        hosts = [item[0] for item in query]
-        LOG.debug('Hosts for router %s: %s', router_id, hosts)
-        return hosts
-
-
-def _notify_l3_agent_new_port(resource, event, trigger, **kwargs):
-    LOG.debug('Received %(resource)s %(event)s', {
-        'resource': resource,
-        'event': event})
-    port = kwargs.get('port')
-    if not port:
-        return
-
-    if n_utils.is_dvr_serviced(port['device_owner']):
-        l3plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        context = kwargs['context']
-        l3plugin.dvr_handle_new_service_port(context, port)
-        l3plugin.update_arp_entry_for_dvr_service_port(context, port, "add")
-
-
-def _notify_port_delete(event, resource, trigger, **kwargs):
-    context = kwargs['context']
-    port = kwargs['port']
-    removed_routers = kwargs['removed_routers']
-    l3plugin = manager.NeutronManager.get_service_plugins().get(
-        service_constants.L3_ROUTER_NAT)
-    l3plugin.update_arp_entry_for_dvr_service_port(context, port, "del")
-    for router in removed_routers:
-        # we need admin context in case a tenant removes the last dvr
-        # serviceable port on a shared network owned by admin, where router
-        # is also owned by admin
-        l3plugin.remove_router_from_l3_agent(
-            context.elevated(), router['agent_id'], router['router_id'])
-
-
-def _notify_l3_agent_port_update(resource, event, trigger, **kwargs):
-    new_port = kwargs.get('port')
-    original_port = kwargs.get('original_port')
-
-    if new_port and original_port:
-        original_device_owner = original_port.get('device_owner', '')
-        new_device_owner = new_port.get('device_owner', '')
-        l3plugin = manager.NeutronManager.get_service_plugins().get(
-                service_constants.L3_ROUTER_NAT)
-        context = kwargs['context']
-        is_port_no_longer_serviced = (
-            n_utils.is_dvr_serviced(original_device_owner) and
-            not n_utils.is_dvr_serviced(new_device_owner))
-        is_port_moved = (
-            original_port[portbindings.HOST_ID] and
-            original_port[portbindings.HOST_ID] !=
-            new_port[portbindings.HOST_ID])
-        if is_port_no_longer_serviced or is_port_moved:
-            removed_routers = l3plugin.dvr_deletens_if_no_port(
-                context,
-                original_port['id'],
-                port_host=original_port[portbindings.HOST_ID])
-            if removed_routers:
-                removed_router_args = {
-                    'context': context,
-                    'port': original_port,
-                    'removed_routers': removed_routers,
-                }
-                _notify_port_delete(
-                    event, resource, trigger, **removed_router_args)
-            if not n_utils.is_dvr_serviced(new_device_owner):
-                return
-        is_fixed_ips_changed = (
-            'fixed_ips' in new_port and
-            'fixed_ips' in original_port and
-            new_port['fixed_ips'] != original_port['fixed_ips'])
-        is_new_port_binding_changed = (
-            new_port[portbindings.HOST_ID] and
-            (original_port[portbindings.HOST_ID] !=
-                new_port[portbindings.HOST_ID]))
-        if (is_new_port_binding_changed and
-            n_utils.is_dvr_serviced(new_device_owner)):
-            l3plugin.dvr_handle_new_service_port(context, new_port)
-            l3plugin.update_arp_entry_for_dvr_service_port(
-                context, new_port, "add")
-        elif kwargs.get('mac_address_updated') or is_fixed_ips_changed:
-            l3plugin.update_arp_entry_for_dvr_service_port(
-                context, new_port, "add")
-
-
-def subscribe():
-    registry.subscribe(
-        _notify_l3_agent_port_update, resources.PORT, events.AFTER_UPDATE)
-    registry.subscribe(
-        _notify_l3_agent_new_port, resources.PORT, events.AFTER_CREATE)
-    registry.subscribe(
-        _notify_port_delete, resources.PORT, events.AFTER_DELETE)
diff --git a/neutron/db/l3_gwmode_db.py b/neutron/db/l3_gwmode_db.py
deleted file mode 100644 (file)
index ad21823..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from oslo_config import cfg
-import sqlalchemy as sa
-from sqlalchemy import sql
-
-from neutron._i18n import _
-from neutron.db import db_base_plugin_v2
-from neutron.db import l3_db
-from neutron.extensions import l3
-
-
-OPTS = [
-    cfg.BoolOpt('enable_snat_by_default', default=True,
-                help=_('Define the default value of enable_snat if not '
-                       'provided in external_gateway_info.'))
-]
-cfg.CONF.register_opts(OPTS)
-EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO
-
-# Modify the Router Data Model adding the enable_snat attribute
-setattr(l3_db.Router, 'enable_snat',
-        sa.Column(sa.Boolean, default=True, server_default=sql.true(),
-                  nullable=False))
-
-
-class L3_NAT_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
-    """Mixin class to add configurable gateway modes."""
-
-    # Register dict extend functions for ports and networks
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        l3.ROUTERS, ['_extend_router_dict_gw_mode'])
-
-    def _extend_router_dict_gw_mode(self, router_res, router_db):
-        if router_db.gw_port_id:
-            nw_id = router_db.gw_port['network_id']
-            router_res[EXTERNAL_GW_INFO] = {
-                'network_id': nw_id,
-                'enable_snat': router_db.enable_snat,
-                'external_fixed_ips': [
-                    {'subnet_id': ip["subnet_id"],
-                     'ip_address': ip["ip_address"]}
-                    for ip in router_db.gw_port['fixed_ips']
-                ]
-            }
-
-    def _update_router_gw_info(self, context, router_id, info, router=None):
-        # Load the router only if necessary
-        if not router:
-            router = self._get_router(context, router_id)
-        with context.session.begin(subtransactions=True):
-            router.enable_snat = self._get_enable_snat(info)
-
-        # Calls superclass, pass router db object for avoiding re-loading
-        super(L3_NAT_dbonly_mixin, self)._update_router_gw_info(
-            context, router_id, info, router=router)
-        # Returning the router might come back useful if this
-        # method is overridden in child classes
-        return router
-
-    @staticmethod
-    def _get_enable_snat(info):
-        if info and 'enable_snat' in info:
-            return info['enable_snat']
-        # if enable_snat is not specified then use the default value
-        return cfg.CONF.enable_snat_by_default
-
-    def _build_routers_list(self, context, routers, gw_ports):
-        for rtr in routers:
-            gw_port_id = rtr['gw_port_id']
-            # Collect gw ports only if available
-            if gw_port_id and gw_ports.get(gw_port_id):
-                rtr['gw_port'] = gw_ports[gw_port_id]
-                # Add enable_snat key
-                rtr['enable_snat'] = rtr[EXTERNAL_GW_INFO]['enable_snat']
-        return routers
-
-
-class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, l3_db.L3_NAT_db_mixin):
-    pass
diff --git a/neutron/db/l3_hamode_db.py b/neutron/db/l3_hamode_db.py
deleted file mode 100644 (file)
index 14d07b9..0000000
+++ /dev/null
@@ -1,640 +0,0 @@
-# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import netaddr
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-from oslo_utils import excutils
-import sqlalchemy as sa
-from sqlalchemy import orm
-
-from neutron._i18n import _, _LI
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import utils as n_utils
-from neutron.db import agents_db
-from neutron.db.availability_zone import router as router_az_db
-from neutron.db import l3_attrs_db
-from neutron.db import l3_db
-from neutron.db import l3_dvr_db
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.extensions import l3_ext_ha_mode as l3_ha
-from neutron.extensions import portbindings
-from neutron.extensions import providernet
-from neutron.plugins.common import utils as p_utils
-
-
-VR_ID_RANGE = set(range(1, 255))
-MAX_ALLOCATION_TRIES = 10
-UNLIMITED_AGENTS_PER_ROUTER = 0
-
-LOG = logging.getLogger(__name__)
-
-L3_HA_OPTS = [
-    cfg.BoolOpt('l3_ha',
-                default=False,
-                help=_('Enable HA mode for virtual routers.')),
-    cfg.IntOpt('max_l3_agents_per_router',
-               default=3,
-               help=_("Maximum number of L3 agents which a HA router will be "
-                      "scheduled on. If it is set to 0 then the router will "
-                      "be scheduled on every agent.")),
-    cfg.IntOpt('min_l3_agents_per_router',
-               default=constants.MINIMUM_AGENTS_FOR_HA,
-               help=_("Minimum number of L3 agents which a HA router will be "
-                      "scheduled on. If it is set to 0 then the router will "
-                      "be scheduled on every agent.")),
-    cfg.StrOpt('l3_ha_net_cidr',
-               default='169.254.192.0/18',
-               help=_('Subnet used for the l3 HA admin network.')),
-    cfg.StrOpt('l3_ha_network_type', default='',
-               help=_("The network type to use when creating the HA network "
-                      "for an HA router. By default or if empty, the first "
-                      "'tenant_network_types' is used. This is helpful when "
-                      "the VRRP traffic should use a specific network which "
-                      "is not the default one.")),
-    cfg.StrOpt('l3_ha_network_physical_name', default='',
-               help=_("The physical network name with which the HA network "
-                      "can be created."))
-]
-cfg.CONF.register_opts(L3_HA_OPTS)
-
-
-class L3HARouterAgentPortBinding(model_base.BASEV2):
-    """Represent agent binding state of a HA router port.
-
-    A HA Router has one HA port per agent on which it is spawned.
-    This binding table stores which port is used for a HA router by a
-    L3 agent.
-    """
-
-    __tablename__ = 'ha_router_agent_port_bindings'
-
-    port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id',
-                                                     ondelete='CASCADE'),
-                        nullable=False, primary_key=True)
-    port = orm.relationship(models_v2.Port)
-
-    router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id',
-                                                       ondelete='CASCADE'),
-                          nullable=False)
-
-    l3_agent_id = sa.Column(sa.String(36),
-                            sa.ForeignKey("agents.id",
-                                          ondelete='CASCADE'))
-    agent = orm.relationship(agents_db.Agent)
-
-    state = sa.Column(sa.Enum(constants.HA_ROUTER_STATE_ACTIVE,
-                              constants.HA_ROUTER_STATE_STANDBY,
-                              name='l3_ha_states'),
-                      default=constants.HA_ROUTER_STATE_STANDBY,
-                      server_default=constants.HA_ROUTER_STATE_STANDBY)
-
-
-class L3HARouterNetwork(model_base.BASEV2):
-    """Host HA network for a tenant.
-
-    One HA Network is used per tenant, all HA router ports are created
-    on this network.
-    """
-
-    __tablename__ = 'ha_router_networks'
-
-    tenant_id = sa.Column(sa.String(attributes.TENANT_ID_MAX_LEN),
-                          primary_key=True, nullable=False)
-    network_id = sa.Column(sa.String(36),
-                           sa.ForeignKey('networks.id', ondelete="CASCADE"),
-                           nullable=False, primary_key=True)
-    network = orm.relationship(models_v2.Network)
-
-
-class L3HARouterVRIdAllocation(model_base.BASEV2):
-    """VRID allocation per HA network.
-
-    Keep a track of the VRID allocations per HA network.
-    """
-
-    __tablename__ = 'ha_router_vrid_allocations'
-
-    network_id = sa.Column(sa.String(36),
-                           sa.ForeignKey('networks.id', ondelete="CASCADE"),
-                           nullable=False, primary_key=True)
-    vr_id = sa.Column(sa.Integer(), nullable=False, primary_key=True)
-
-
-class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
-                         router_az_db.RouterAvailabilityZoneMixin):
-    """Mixin class to add high availability capability to routers."""
-
-    extra_attributes = (
-        l3_dvr_db.L3_NAT_with_dvr_db_mixin.extra_attributes +
-        router_az_db.RouterAvailabilityZoneMixin.extra_attributes + [
-            {'name': 'ha', 'default': cfg.CONF.l3_ha},
-            {'name': 'ha_vr_id', 'default': 0}])
-
-    def _verify_configuration(self):
-        self.ha_cidr = cfg.CONF.l3_ha_net_cidr
-        try:
-            net = netaddr.IPNetwork(self.ha_cidr)
-        except netaddr.AddrFormatError:
-            raise l3_ha.HANetworkCIDRNotValid(cidr=self.ha_cidr)
-        if ('/' not in self.ha_cidr or net.network != net.ip):
-            raise l3_ha.HANetworkCIDRNotValid(cidr=self.ha_cidr)
-
-        self._check_num_agents_per_router()
-
-    def _check_num_agents_per_router(self):
-        max_agents = cfg.CONF.max_l3_agents_per_router
-        min_agents = cfg.CONF.min_l3_agents_per_router
-
-        if (max_agents != UNLIMITED_AGENTS_PER_ROUTER
-            and max_agents < min_agents):
-            raise l3_ha.HAMaximumAgentsNumberNotValid(
-                max_agents=max_agents, min_agents=min_agents)
-
-        if min_agents < constants.MINIMUM_AGENTS_FOR_HA:
-            raise l3_ha.HAMinimumAgentsNumberNotValid()
-
-    def __init__(self):
-        self._verify_configuration()
-        super(L3_HA_NAT_db_mixin, self).__init__()
-
-    def get_ha_network(self, context, tenant_id):
-        return (context.session.query(L3HARouterNetwork).
-                filter(L3HARouterNetwork.tenant_id == tenant_id).
-                first())
-
-    def _get_allocated_vr_id(self, context, network_id):
-        with context.session.begin(subtransactions=True):
-            query = (context.session.query(L3HARouterVRIdAllocation).
-                     filter(L3HARouterVRIdAllocation.network_id == network_id))
-
-            allocated_vr_ids = set(a.vr_id for a in query) - set([0])
-
-        return allocated_vr_ids
-
-    def _allocate_vr_id(self, context, network_id, router_id):
-        for count in range(MAX_ALLOCATION_TRIES):
-            try:
-                # NOTE(kevinbenton): we disallow subtransactions because the
-                # retry logic will bust any parent transactions
-                with context.session.begin():
-                    allocated_vr_ids = self._get_allocated_vr_id(context,
-                                                                 network_id)
-                    available_vr_ids = VR_ID_RANGE - allocated_vr_ids
-
-                    if not available_vr_ids:
-                        raise l3_ha.NoVRIDAvailable(router_id=router_id)
-
-                    allocation = L3HARouterVRIdAllocation()
-                    allocation.network_id = network_id
-                    allocation.vr_id = available_vr_ids.pop()
-
-                    context.session.add(allocation)
-
-                    return allocation.vr_id
-
-            except db_exc.DBDuplicateEntry:
-                LOG.info(_LI("Attempt %(count)s to allocate a VRID in the "
-                             "network %(network)s for the router %(router)s"),
-                         {'count': count, 'network': network_id,
-                          'router': router_id})
-
-        raise l3_ha.MaxVRIDAllocationTriesReached(
-            network_id=network_id, router_id=router_id,
-            max_tries=MAX_ALLOCATION_TRIES)
-
-    def _delete_vr_id_allocation(self, context, ha_network, vr_id):
-        with context.session.begin(subtransactions=True):
-            context.session.query(L3HARouterVRIdAllocation).filter_by(
-                network_id=ha_network.network_id,
-                vr_id=vr_id).delete()
-
-    def _set_vr_id(self, context, router, ha_network):
-        router.extra_attributes.ha_vr_id = self._allocate_vr_id(
-            context, ha_network.network_id, router.id)
-
-    def _create_ha_subnet(self, context, network_id, tenant_id):
-        args = {'network_id': network_id,
-                'tenant_id': '',
-                'name': constants.HA_SUBNET_NAME % tenant_id,
-                'ip_version': 4,
-                'cidr': cfg.CONF.l3_ha_net_cidr,
-                'enable_dhcp': False,
-                'gateway_ip': None}
-        return p_utils.create_subnet(self._core_plugin, context,
-                                     {'subnet': args})
-
-    def _create_ha_network_tenant_binding(self, context, tenant_id,
-                                          network_id):
-        with context.session.begin(subtransactions=True):
-            ha_network = L3HARouterNetwork(tenant_id=tenant_id,
-                                           network_id=network_id)
-            context.session.add(ha_network)
-        return ha_network
-
-    def _add_ha_network_settings(self, network):
-        if cfg.CONF.l3_ha_network_type:
-            network[providernet.NETWORK_TYPE] = cfg.CONF.l3_ha_network_type
-
-        if cfg.CONF.l3_ha_network_physical_name:
-            network[providernet.PHYSICAL_NETWORK] = (
-                cfg.CONF.l3_ha_network_physical_name)
-
-    def _create_ha_network(self, context, tenant_id):
-        admin_ctx = context.elevated()
-
-        args = {'network':
-                {'name': constants.HA_NETWORK_NAME % tenant_id,
-                 'tenant_id': '',
-                 'shared': False,
-                 'admin_state_up': True}}
-        self._add_ha_network_settings(args['network'])
-        network = p_utils.create_network(self._core_plugin, admin_ctx, args)
-
-        try:
-            ha_network = self._create_ha_network_tenant_binding(admin_ctx,
-                                                                tenant_id,
-                                                                network['id'])
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                self._core_plugin.delete_network(admin_ctx, network['id'])
-
-        try:
-            self._create_ha_subnet(admin_ctx, network['id'], tenant_id)
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                self._core_plugin.delete_network(admin_ctx, network['id'])
-
-        return ha_network
-
-    def get_number_of_agents_for_scheduling(self, context):
-        """Return the number of agents on which the router will be scheduled.
-
-        Raises an exception if there are not enough agents available to honor
-        the min_agents config parameter. If the max_agents parameter is set to
-        0 all the agents will be used.
-        """
-
-        min_agents = cfg.CONF.min_l3_agents_per_router
-        num_agents = len(self.get_l3_agents(context, active=True,
-            filters={'agent_modes': [constants.L3_AGENT_MODE_LEGACY,
-                                     constants.L3_AGENT_MODE_DVR_SNAT]}))
-        max_agents = cfg.CONF.max_l3_agents_per_router
-        if max_agents:
-            if max_agents > num_agents:
-                LOG.info(_LI("Number of active agents lower than "
-                             "max_l3_agents_per_router. L3 agents "
-                             "available: %s"), num_agents)
-            else:
-                num_agents = max_agents
-
-        if num_agents < min_agents:
-            raise l3_ha.HANotEnoughAvailableAgents(min_agents=min_agents,
-                                                   num_agents=num_agents)
-
-        return num_agents
-
-    def _create_ha_port_binding(self, context, port_id, router_id):
-        with context.session.begin(subtransactions=True):
-            portbinding = L3HARouterAgentPortBinding(port_id=port_id,
-                                                     router_id=router_id)
-            context.session.add(portbinding)
-
-        return portbinding
-
-    def add_ha_port(self, context, router_id, network_id, tenant_id):
-        # NOTE(kevinbenton): we have to block any ongoing transactions because
-        # our exception handling will try to delete the port using the normal
-        # core plugin API. If this function is called inside of a transaction
-        # the exception will mangle the state, cause the delete call to fail,
-        # and end up relying on the DB rollback to remove the port instead of
-        # proper delete_port call.
-        if context.session.is_active:
-            raise RuntimeError(_('add_ha_port cannot be called inside of a '
-                                 'transaction.'))
-        args = {'tenant_id': '',
-                'network_id': network_id,
-                'admin_state_up': True,
-                'device_id': router_id,
-                'device_owner': constants.DEVICE_OWNER_ROUTER_HA_INTF,
-                'name': constants.HA_PORT_NAME % tenant_id}
-        port = p_utils.create_port(self._core_plugin, context,
-                                 {'port': args})
-
-        try:
-            return self._create_ha_port_binding(context, port['id'], router_id)
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                self._core_plugin.delete_port(context, port['id'],
-                                              l3_port_check=False)
-
-    def _create_ha_interfaces(self, context, router, ha_network):
-        admin_ctx = context.elevated()
-
-        num_agents = self.get_number_of_agents_for_scheduling(context)
-
-        port_ids = []
-        try:
-            for index in range(num_agents):
-                binding = self.add_ha_port(admin_ctx, router.id,
-                                           ha_network.network['id'],
-                                           router.tenant_id)
-                port_ids.append(binding.port_id)
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                for port_id in port_ids:
-                    self._core_plugin.delete_port(admin_ctx, port_id,
-                                                  l3_port_check=False)
-
-    def _delete_ha_interfaces(self, context, router_id):
-        admin_ctx = context.elevated()
-        device_filter = {'device_id': [router_id],
-                         'device_owner':
-                         [constants.DEVICE_OWNER_ROUTER_HA_INTF]}
-        ports = self._core_plugin.get_ports(admin_ctx, filters=device_filter)
-
-        for port in ports:
-            self._core_plugin.delete_port(admin_ctx, port['id'],
-                                          l3_port_check=False)
-
-    def delete_ha_interfaces_on_host(self, context, router_id, host):
-        admin_ctx = context.elevated()
-        port_ids = (binding.port_id for binding
-                    in self.get_ha_router_port_bindings(admin_ctx,
-                                                        [router_id], host))
-        for port_id in port_ids:
-            self._core_plugin.delete_port(admin_ctx, port_id,
-                                          l3_port_check=False)
-
-    def _notify_ha_interfaces_updated(self, context, router_id):
-        self.l3_rpc_notifier.routers_updated(
-            context, [router_id], shuffle_agents=True)
-
-    @classmethod
-    def _is_ha(cls, router):
-        ha = router.get('ha')
-        if not attributes.is_attr_set(ha):
-            ha = cfg.CONF.l3_ha
-        return ha
-
-    def create_router(self, context, router):
-        is_ha = self._is_ha(router['router'])
-
-        if is_ha and l3_dvr_db.is_distributed_router(router['router']):
-            raise l3_ha.DistributedHARouterNotSupported()
-
-        router['router']['ha'] = is_ha
-        router_dict = super(L3_HA_NAT_db_mixin,
-                            self).create_router(context, router)
-
-        if is_ha:
-            try:
-                router_db = self._get_router(context, router_dict['id'])
-                ha_network = self.get_ha_network(context,
-                                                 router_db.tenant_id)
-                if not ha_network:
-                    ha_network = self._create_ha_network(context,
-                                                         router_db.tenant_id)
-
-                self._set_vr_id(context, router_db, ha_network)
-                self._create_ha_interfaces(context, router_db, ha_network)
-                self._notify_ha_interfaces_updated(context, router_db.id)
-            except Exception:
-                with excutils.save_and_reraise_exception():
-                    self.delete_router(context, router_dict['id'])
-            router_dict['ha_vr_id'] = router_db.extra_attributes.ha_vr_id
-        return router_dict
-
-    def _update_router_db(self, context, router_id, data):
-        router_db = self._get_router(context, router_id)
-
-        original_distributed_state = router_db.extra_attributes.distributed
-        original_ha_state = router_db.extra_attributes.ha
-
-        requested_ha_state = data.pop('ha', None)
-        requested_distributed_state = data.get('distributed', None)
-
-        if ((original_ha_state and requested_distributed_state) or
-            (requested_ha_state and original_distributed_state) or
-            (requested_ha_state and requested_distributed_state)):
-            raise l3_ha.DistributedHARouterNotSupported()
-
-        with context.session.begin(subtransactions=True):
-            router_db = super(L3_HA_NAT_db_mixin, self)._update_router_db(
-                context, router_id, data)
-
-            ha_not_changed = (requested_ha_state is None or
-                              requested_ha_state == original_ha_state)
-            if ha_not_changed:
-                return router_db
-
-            if router_db.admin_state_up:
-                msg = _('Cannot change HA attribute of active routers. Please '
-                        'set router admin_state_up to False prior to upgrade.')
-                raise n_exc.BadRequest(resource='router', msg=msg)
-
-            ha_network = self.get_ha_network(context,
-                                             router_db.tenant_id)
-            router_db.extra_attributes.ha = requested_ha_state
-            if not requested_ha_state:
-                self._delete_vr_id_allocation(
-                    context, ha_network, router_db.extra_attributes.ha_vr_id)
-                router_db.extra_attributes.ha_vr_id = None
-
-        # The HA attribute has changed. First unbind the router from agents
-        # to force a proper re-scheduling to agents.
-        # TODO(jschwarz): This will have to be more selective to get HA + DVR
-        # working (Only unbind from dvr_snat nodes).
-        self._unbind_ha_router(context, router_id)
-
-        if requested_ha_state:
-            if not ha_network:
-                ha_network = self._create_ha_network(context,
-                                                     router_db.tenant_id)
-
-            self._set_vr_id(context, router_db, ha_network)
-            self._create_ha_interfaces(context, router_db, ha_network)
-            self._notify_ha_interfaces_updated(context, router_db.id)
-        else:
-            self._delete_ha_interfaces(context, router_db.id)
-            self._notify_ha_interfaces_updated(context, router_db.id)
-
-        return router_db
-
-    def _delete_ha_network(self, context, net):
-        admin_ctx = context.elevated()
-        self._core_plugin.delete_network(admin_ctx, net.network_id)
-
-    def _ha_routers_present(self, context, tenant_id):
-        ha = True
-        routers = context.session.query(l3_db.Router).filter(
-            l3_db.Router.tenant_id == tenant_id).subquery()
-        ha_routers = context.session.query(
-            l3_attrs_db.RouterExtraAttributes).join(
-            routers,
-            l3_attrs_db.RouterExtraAttributes.router_id == routers.c.id
-        ).filter(l3_attrs_db.RouterExtraAttributes.ha == ha).first()
-        return ha_routers is not None
-
-    def delete_router(self, context, id):
-        router_db = self._get_router(context, id)
-        super(L3_HA_NAT_db_mixin, self).delete_router(context, id)
-
-        if router_db.extra_attributes.ha:
-            ha_network = self.get_ha_network(context,
-                                             router_db.tenant_id)
-            if ha_network:
-                self._delete_vr_id_allocation(
-                    context, ha_network, router_db.extra_attributes.ha_vr_id)
-                self._delete_ha_interfaces(context, router_db.id)
-
-                # In case that create HA router failed because of the failure
-                # in HA network creation. So here put this deleting HA network
-                # procedure under 'if ha_network' block.
-                if not self._ha_routers_present(context,
-                                                router_db.tenant_id):
-                    try:
-                        self._delete_ha_network(context, ha_network)
-                    except (n_exc.NetworkNotFound,
-                            orm.exc.ObjectDeletedError):
-                        LOG.debug(
-                            "HA network for tenant %s was already deleted.",
-                            router_db.tenant_id)
-                    except sa.exc.InvalidRequestError:
-                        LOG.info(_LI("HA network %s can not be deleted."),
-                                 ha_network.network_id)
-                    except n_exc.NetworkInUse:
-                        LOG.debug("HA network %s is still in use.",
-                                  ha_network.network_id)
-                    else:
-                        LOG.info(_LI("HA network %(network)s was deleted as "
-                                     "no HA routers are present in tenant "
-                                     "%(tenant)s."),
-                                 {'network': ha_network.network_id,
-                                  'tenant': router_db.tenant_id})
-
-    def _unbind_ha_router(self, context, router_id):
-        for agent in self.get_l3_agents_hosting_routers(context, [router_id]):
-            self.remove_router_from_l3_agent(context, agent['id'], router_id)
-
-    def get_ha_router_port_bindings(self, context, router_ids, host=None):
-        if not router_ids:
-            return []
-        query = context.session.query(L3HARouterAgentPortBinding)
-
-        if host:
-            query = query.join(agents_db.Agent).filter(
-                agents_db.Agent.host == host)
-
-        query = query.filter(
-            L3HARouterAgentPortBinding.router_id.in_(router_ids))
-
-        return query.all()
-
-    def get_l3_bindings_hosting_router_with_ha_states(
-            self, context, router_id):
-        """Return a list of [(agent, ha_state), ...]."""
-        bindings = self.get_ha_router_port_bindings(context, [router_id])
-        return [(binding.agent, binding.state) for binding in bindings
-                if binding.agent is not None]
-
-    def get_active_host_for_ha_router(self, context, router_id):
-        bindings = self.get_l3_bindings_hosting_router_with_ha_states(
-            context, router_id)
-        # TODO(amuller): In case we have two or more actives, this method
-        # needs to return the last agent to become active. This requires
-        # timestamps for state changes. Otherwise, if a host goes down
-        # and another takes over, we'll have two actives. In this case,
-        # if an interface is added to a router, its binding might be wrong
-        # and l2pop would not work correctly.
-        return next(
-            (agent.host for agent, state in bindings
-             if state == constants.HA_ROUTER_STATE_ACTIVE),
-            None)
-
-    def _process_sync_ha_data(self, context, routers, host):
-        routers_dict = dict((router['id'], router) for router in routers)
-
-        bindings = self.get_ha_router_port_bindings(context,
-                                                    routers_dict.keys(),
-                                                    host)
-        for binding in bindings:
-            port_dict = self._core_plugin._make_port_dict(binding.port)
-
-            router = routers_dict.get(binding.router_id)
-            router[constants.HA_INTERFACE_KEY] = port_dict
-            router[constants.HA_ROUTER_STATE_KEY] = binding.state
-
-        for router in routers_dict.values():
-            interface = router.get(constants.HA_INTERFACE_KEY)
-            if interface:
-                self._populate_subnets_for_ports(context, [interface])
-
-        return list(routers_dict.values())
-
-    def get_ha_sync_data_for_host(self, context, host, agent,
-                                  router_ids=None, active=None):
-        agent_mode = self._get_agent_mode(agent)
-        dvr_agent_mode = (agent_mode in [constants.L3_AGENT_MODE_DVR_SNAT,
-                                         constants.L3_AGENT_MODE_DVR])
-        if (dvr_agent_mode and n_utils.is_extension_supported(
-                self, constants.L3_DISTRIBUTED_EXT_ALIAS)):
-            # DVR has to be handled differently
-            sync_data = self._get_dvr_sync_data(context, host, agent,
-                                                router_ids, active)
-        else:
-            sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data(context,
-                                                            router_ids, active)
-        return self._process_sync_ha_data(context, sync_data, host)
-
-    @classmethod
-    def _set_router_states(cls, context, bindings, states):
-        for binding in bindings:
-            try:
-                with context.session.begin(subtransactions=True):
-                    binding.state = states[binding.router_id]
-            except (orm.exc.StaleDataError, orm.exc.ObjectDeletedError):
-                # Take concurrently deleted routers in to account
-                pass
-
-    def update_routers_states(self, context, states, host):
-        """Receive dict of router ID to state and update them all."""
-
-        bindings = self.get_ha_router_port_bindings(
-            context, router_ids=states.keys(), host=host)
-        self._set_router_states(context, bindings, states)
-        self._update_router_port_bindings(context, states, host)
-
-    def _update_router_port_bindings(self, context, states, host):
-        admin_ctx = context.elevated()
-        device_filter = {'device_id': states.keys(),
-                         'device_owner':
-                         [constants.DEVICE_OWNER_ROUTER_INTF]}
-        ports = self._core_plugin.get_ports(admin_ctx, filters=device_filter)
-        active_ports = (port for port in ports
-            if states[port['device_id']] == constants.HA_ROUTER_STATE_ACTIVE)
-
-        for port in active_ports:
-            port[portbindings.HOST_ID] = host
-            try:
-                self._core_plugin.update_port(admin_ctx, port['id'],
-                                              {attributes.PORT: port})
-            except (orm.exc.StaleDataError, orm.exc.ObjectDeletedError):
-                # Take concurrently deleted interfaces in to account
-                pass
diff --git a/neutron/db/l3_hascheduler_db.py b/neutron/db/l3_hascheduler_db.py
deleted file mode 100644 (file)
index 7acb1c8..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import func
-from sqlalchemy import sql
-
-from neutron.db import agents_db
-from neutron.db import l3_agentschedulers_db as l3_sch_db
-from neutron.db import l3_attrs_db
-from neutron.db import l3_db
-
-
-class L3_HA_scheduler_db_mixin(l3_sch_db.AZL3AgentSchedulerDbMixin):
-
-    def get_ha_routers_l3_agents_count(self, context):
-        """Return a map between HA routers and how many agents every
-        router is scheduled to.
-        """
-
-        # Postgres requires every column in the select to be present in
-        # the group by statement when using an aggregate function.
-        # One solution is to generate a subquery and join it with the desired
-        # columns.
-        binding_model = l3_sch_db.RouterL3AgentBinding
-        sub_query = (context.session.query(
-            binding_model.router_id,
-            func.count(binding_model.router_id).label('count')).
-            join(l3_attrs_db.RouterExtraAttributes,
-                 binding_model.router_id ==
-                 l3_attrs_db.RouterExtraAttributes.router_id).
-            join(l3_db.Router).
-            filter(l3_attrs_db.RouterExtraAttributes.ha == sql.true()).
-            group_by(binding_model.router_id).subquery())
-
-        query = (context.session.query(l3_db.Router, sub_query.c.count).
-                 join(sub_query))
-
-        return [(self._make_router_dict(router), agent_count)
-                for router, agent_count in query]
-
-    def get_l3_agents_ordered_by_num_routers(self, context, agent_ids):
-        if not agent_ids:
-            return []
-        query = (context.session.query(agents_db.Agent, func.count(
-            l3_sch_db.RouterL3AgentBinding.router_id).label('count')).
-            outerjoin(l3_sch_db.RouterL3AgentBinding).
-            group_by(agents_db.Agent.id).
-            filter(agents_db.Agent.id.in_(agent_ids)).
-            order_by('count'))
-
-        return [record[0] for record in query]
-
-    def _get_agents_dict_for_router(self, agents_and_states):
-        agents = []
-        for agent, ha_state in agents_and_states:
-            l3_agent_dict = self._make_agent_dict(agent)
-            l3_agent_dict['ha_state'] = ha_state
-            agents.append(l3_agent_dict)
-        return {'agents': agents}
-
-    def list_l3_agents_hosting_router(self, context, router_id):
-        with context.session.begin(subtransactions=True):
-            router_db = self._get_router(context, router_id)
-            if router_db.extra_attributes.ha:
-                bindings = self.get_l3_bindings_hosting_router_with_ha_states(
-                    context, router_id)
-            else:
-                bindings = self._get_l3_bindings_hosting_routers(
-                    context, [router_id])
-                bindings = [(binding.l3_agent, None) for binding in bindings]
-
-        return self._get_agents_dict_for_router(bindings)
diff --git a/neutron/db/metering/__init__.py b/neutron/db/metering/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/db/metering/metering_db.py b/neutron/db/metering/metering_db.py
deleted file mode 100644 (file)
index ebed45b..0000000
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import netaddr
-from oslo_utils import uuidutils
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy import sql
-
-from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api
-from neutron.api.v2 import attributes as attr
-from neutron.common import constants
-from neutron.db import common_db_mixin as base_db
-from neutron.db import l3_db
-from neutron.db import model_base
-from neutron.extensions import metering
-
-
-class MeteringLabelRule(model_base.BASEV2, model_base.HasId):
-    direction = sa.Column(sa.Enum('ingress', 'egress',
-                                  name='meteringlabels_direction'))
-    remote_ip_prefix = sa.Column(sa.String(64))
-    metering_label_id = sa.Column(sa.String(36),
-                                  sa.ForeignKey("meteringlabels.id",
-                                                ondelete="CASCADE"),
-                                  nullable=False)
-    excluded = sa.Column(sa.Boolean, default=False, server_default=sql.false())
-
-
-class MeteringLabel(model_base.BASEV2, model_base.HasId, model_base.HasTenant):
-    name = sa.Column(sa.String(attr.NAME_MAX_LEN))
-    description = sa.Column(sa.String(attr.LONG_DESCRIPTION_MAX_LEN))
-    rules = orm.relationship(MeteringLabelRule, backref="label",
-                             cascade="delete", lazy="joined")
-    routers = orm.relationship(
-        l3_db.Router,
-        primaryjoin="MeteringLabel.tenant_id==Router.tenant_id",
-        foreign_keys='MeteringLabel.tenant_id',
-        uselist=True)
-    shared = sa.Column(sa.Boolean, default=False, server_default=sql.false())
-
-
-class MeteringDbMixin(metering.MeteringPluginBase,
-                      base_db.CommonDbMixin):
-
-    def __init__(self):
-        self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI()
-
-    def _make_metering_label_dict(self, metering_label, fields=None):
-        res = {'id': metering_label['id'],
-               'name': metering_label['name'],
-               'description': metering_label['description'],
-               'shared': metering_label['shared'],
-               'tenant_id': metering_label['tenant_id']}
-        return self._fields(res, fields)
-
-    def create_metering_label(self, context, metering_label):
-        m = metering_label['metering_label']
-
-        with context.session.begin(subtransactions=True):
-            metering_db = MeteringLabel(id=uuidutils.generate_uuid(),
-                                        description=m['description'],
-                                        tenant_id=m['tenant_id'],
-                                        name=m['name'],
-                                        shared=m['shared'])
-            context.session.add(metering_db)
-
-        return self._make_metering_label_dict(metering_db)
-
-    def delete_metering_label(self, context, label_id):
-        with context.session.begin(subtransactions=True):
-            try:
-                label = self._get_by_id(context, MeteringLabel, label_id)
-            except orm.exc.NoResultFound:
-                raise metering.MeteringLabelNotFound(label_id=label_id)
-
-            context.session.delete(label)
-
-    def get_metering_label(self, context, label_id, fields=None):
-        try:
-            metering_label = self._get_by_id(context, MeteringLabel, label_id)
-        except orm.exc.NoResultFound:
-            raise metering.MeteringLabelNotFound(label_id=label_id)
-
-        return self._make_metering_label_dict(metering_label, fields)
-
-    def get_metering_labels(self, context, filters=None, fields=None,
-                            sorts=None, limit=None, marker=None,
-                            page_reverse=False):
-        marker_obj = self._get_marker_obj(context, 'metering_labels', limit,
-                                          marker)
-        return self._get_collection(context, MeteringLabel,
-                                    self._make_metering_label_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts,
-                                    limit=limit,
-                                    marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-
-    def _make_metering_label_rule_dict(self, metering_label_rule, fields=None):
-        res = {'id': metering_label_rule['id'],
-               'metering_label_id': metering_label_rule['metering_label_id'],
-               'direction': metering_label_rule['direction'],
-               'remote_ip_prefix': metering_label_rule['remote_ip_prefix'],
-               'excluded': metering_label_rule['excluded']}
-        return self._fields(res, fields)
-
-    def get_metering_label_rules(self, context, filters=None, fields=None,
-                                 sorts=None, limit=None, marker=None,
-                                 page_reverse=False):
-        marker_obj = self._get_marker_obj(context, 'metering_label_rules',
-                                          limit, marker)
-
-        return self._get_collection(context, MeteringLabelRule,
-                                    self._make_metering_label_rule_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts,
-                                    limit=limit,
-                                    marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-
-    def get_metering_label_rule(self, context, rule_id, fields=None):
-        try:
-            metering_label_rule = self._get_by_id(context,
-                                                  MeteringLabelRule, rule_id)
-        except orm.exc.NoResultFound:
-            raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
-
-        return self._make_metering_label_rule_dict(metering_label_rule, fields)
-
-    def _validate_cidr(self, context, label_id, remote_ip_prefix,
-                       direction, excluded):
-        r_ips = self.get_metering_label_rules(context,
-                                              filters={'metering_label_id':
-                                                       [label_id],
-                                                       'direction':
-                                                       [direction],
-                                                       'excluded':
-                                                       [excluded]},
-                                              fields=['remote_ip_prefix'])
-
-        cidrs = [r['remote_ip_prefix'] for r in r_ips]
-        new_cidr_ipset = netaddr.IPSet([remote_ip_prefix])
-        if (netaddr.IPSet(cidrs) & new_cidr_ipset):
-            raise metering.MeteringLabelRuleOverlaps(
-                remote_ip_prefix=remote_ip_prefix)
-
-    def create_metering_label_rule(self, context, metering_label_rule):
-        m = metering_label_rule['metering_label_rule']
-        with context.session.begin(subtransactions=True):
-            label_id = m['metering_label_id']
-            ip_prefix = m['remote_ip_prefix']
-            direction = m['direction']
-            excluded = m['excluded']
-
-            self._validate_cidr(context, label_id, ip_prefix, direction,
-                                excluded)
-            metering_db = MeteringLabelRule(id=uuidutils.generate_uuid(),
-                                            metering_label_id=label_id,
-                                            direction=direction,
-                                            excluded=m['excluded'],
-                                            remote_ip_prefix=ip_prefix)
-            context.session.add(metering_db)
-
-        return self._make_metering_label_rule_dict(metering_db)
-
-    def delete_metering_label_rule(self, context, rule_id):
-        with context.session.begin(subtransactions=True):
-            try:
-                rule = self._get_by_id(context, MeteringLabelRule, rule_id)
-            except orm.exc.NoResultFound:
-                raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
-            context.session.delete(rule)
-
-        return self._make_metering_label_rule_dict(rule)
-
-    def _get_metering_rules_dict(self, metering_label):
-        rules = []
-        for rule in metering_label.rules:
-            rule_dict = self._make_metering_label_rule_dict(rule)
-            rules.append(rule_dict)
-
-        return rules
-
-    def _make_router_dict(self, router):
-        res = {'id': router['id'],
-               'name': router['name'],
-               'tenant_id': router['tenant_id'],
-               'admin_state_up': router['admin_state_up'],
-               'status': router['status'],
-               'gw_port_id': router['gw_port_id'],
-               constants.METERING_LABEL_KEY: []}
-
-        return res
-
-    def _process_sync_metering_data(self, context, labels):
-        all_routers = None
-
-        routers_dict = {}
-        for label in labels:
-            if label.shared:
-                if not all_routers:
-                    all_routers = self._get_collection_query(context,
-                                                             l3_db.Router)
-                routers = all_routers
-            else:
-                routers = label.routers
-
-            for router in routers:
-                router_dict = routers_dict.get(
-                    router['id'],
-                    self._make_router_dict(router))
-
-                rules = self._get_metering_rules_dict(label)
-
-                data = {'id': label['id'], 'rules': rules}
-                router_dict[constants.METERING_LABEL_KEY].append(data)
-
-                routers_dict[router['id']] = router_dict
-
-        return list(routers_dict.values())
-
-    def get_sync_data_for_rule(self, context, rule):
-        label = context.session.query(MeteringLabel).get(
-            rule['metering_label_id'])
-
-        if label.shared:
-            routers = self._get_collection_query(context, l3_db.Router)
-        else:
-            routers = label.routers
-
-        routers_dict = {}
-        for router in routers:
-            router_dict = routers_dict.get(router['id'],
-                                           self._make_router_dict(router))
-            data = {'id': label['id'], 'rule': rule}
-            router_dict[constants.METERING_LABEL_KEY].append(data)
-            routers_dict[router['id']] = router_dict
-
-        return list(routers_dict.values())
-
-    def get_sync_data_metering(self, context, label_id=None, router_ids=None):
-        labels = context.session.query(MeteringLabel)
-
-        if label_id:
-            labels = labels.filter(MeteringLabel.id == label_id)
-        elif router_ids:
-            labels = (labels.join(MeteringLabel.routers).
-                      filter(l3_db.Router.id.in_(router_ids)))
-
-        return self._process_sync_metering_data(context, labels)
diff --git a/neutron/db/metering/metering_rpc.py b/neutron/db/metering/metering_rpc.py
deleted file mode 100644 (file)
index 19e8e90..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-import oslo_messaging
-
-from neutron._i18n import _LE
-from neutron.common import constants as consts
-from neutron.common import utils
-from neutron import manager
-from neutron.plugins.common import constants as service_constants
-
-LOG = logging.getLogger(__name__)
-
-
-class MeteringRpcCallbacks(object):
-
-    target = oslo_messaging.Target(version='1.0')
-
-    def __init__(self, meter_plugin):
-        self.meter_plugin = meter_plugin
-
-    def get_sync_data_metering(self, context, **kwargs):
-        l3_plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        if not l3_plugin:
-            return
-
-        host = kwargs.get('host')
-        if not utils.is_extension_supported(
-            l3_plugin, consts.L3_AGENT_SCHEDULER_EXT_ALIAS) or not host:
-            return self.meter_plugin.get_sync_data_metering(context)
-        else:
-            agents = l3_plugin.get_l3_agents(context, filters={'host': [host]})
-            if not agents:
-                LOG.error(_LE('Unable to find agent %s.'), host)
-                return
-
-            routers = l3_plugin.list_routers_on_l3_agent(context, agents[0].id)
-            router_ids = [router['id'] for router in routers['routers']]
-            if not router_ids:
-                return
-
-        return self.meter_plugin.get_sync_data_metering(context,
-                                                        router_ids=router_ids)
diff --git a/neutron/db/migration/README b/neutron/db/migration/README
deleted file mode 100644 (file)
index 18a126c..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-See doc/source/devref/alembic_migrations.rst
-
-Rendered at
-http://docs.openstack.org/developer/neutron/devref/alembic_migrations.html
diff --git a/neutron/db/migration/__init__.py b/neutron/db/migration/__init__.py
deleted file mode 100644 (file)
index 484aeea..0000000
+++ /dev/null
@@ -1,180 +0,0 @@
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import functools
-
-import alembic
-from alembic import context
-from alembic import op
-import sqlalchemy as sa
-from sqlalchemy.engine import reflection
-
-from neutron._i18n import _
-
-# Neutron milestones for upgrade aliases
-LIBERTY = 'liberty'
-MITAKA = 'mitaka'
-
-NEUTRON_MILESTONES = [
-    # earlier milestones were not tagged
-    LIBERTY,
-    MITAKA,
-]
-
-CREATION_OPERATIONS = (sa.sql.ddl.CreateIndex,
-                       sa.sql.ddl.CreateTable,
-                       sa.sql.ddl.CreateColumn,
-                       )
-DROP_OPERATIONS = (sa.sql.ddl.DropConstraint,
-                   sa.sql.ddl.DropIndex,
-                   sa.sql.ddl.DropTable,
-                   alembic.ddl.base.DropColumn)
-
-
-def skip_if_offline(func):
-    """Decorator for skipping migrations in offline mode."""
-    @functools.wraps(func)
-    def decorator(*args, **kwargs):
-        if context.is_offline_mode():
-            return
-        return func(*args, **kwargs)
-
-    return decorator
-
-
-def raise_if_offline(func):
-    """Decorator for raising if a function is called in offline mode."""
-    @functools.wraps(func)
-    def decorator(*args, **kwargs):
-        if context.is_offline_mode():
-            raise RuntimeError(_("%s cannot be called while in offline mode") %
-                               func.__name__)
-        return func(*args, **kwargs)
-
-    return decorator
-
-
-@raise_if_offline
-def schema_has_table(table_name):
-    """Check whether the specified table exists in the current schema.
-
-    This method cannot be executed in offline mode.
-    """
-    bind = op.get_bind()
-    insp = sa.engine.reflection.Inspector.from_engine(bind)
-    return table_name in insp.get_table_names()
-
-
-@raise_if_offline
-def schema_has_column(table_name, column_name):
-    """Check whether the specified column exists in the current schema.
-
-    This method cannot be executed in offline mode.
-    """
-    bind = op.get_bind()
-    insp = sa.engine.reflection.Inspector.from_engine(bind)
-    # first check that the table exists
-    if not schema_has_table(table_name):
-        return
-    # check whether column_name exists in table columns
-    return column_name in [column['name'] for column in
-                           insp.get_columns(table_name)]
-
-
-@raise_if_offline
-def alter_column_if_exists(table_name, column_name, **kwargs):
-    """Alter a column only if it exists in the schema."""
-    if schema_has_column(table_name, column_name):
-        op.alter_column(table_name, column_name, **kwargs)
-
-
-@raise_if_offline
-def drop_table_if_exists(table_name):
-    if schema_has_table(table_name):
-        op.drop_table(table_name)
-
-
-@raise_if_offline
-def rename_table_if_exists(old_table_name, new_table_name):
-    if schema_has_table(old_table_name):
-        op.rename_table(old_table_name, new_table_name)
-
-
-def alter_enum(table, column, enum_type, nullable):
-    bind = op.get_bind()
-    engine = bind.engine
-    if engine.name == 'postgresql':
-        values = {'table': table,
-                  'column': column,
-                  'name': enum_type.name}
-        op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values)
-        enum_type.create(bind, checkfirst=False)
-        op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO "
-                   "old_%(column)s" % values)
-        op.add_column(table, sa.Column(column, enum_type, nullable=nullable))
-        op.execute("UPDATE %(table)s SET %(column)s = "
-                   "old_%(column)s::text::%(name)s" % values)
-        op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values)
-        op.execute("DROP TYPE old_%(name)s" % values)
-    else:
-        op.alter_column(table, column, type_=enum_type,
-                        existing_nullable=nullable)
-
-
-def create_table_if_not_exist_psql(table_name, values):
-    if op.get_bind().engine.dialect.server_version_info < (9, 1, 0):
-        op.execute("CREATE LANGUAGE plpgsql")
-    op.execute("CREATE OR REPLACE FUNCTION execute(TEXT) RETURNS VOID AS $$"
-               "BEGIN EXECUTE $1; END;"
-               "$$ LANGUAGE plpgsql STRICT;")
-    op.execute("CREATE OR REPLACE FUNCTION table_exist(TEXT) RETURNS bool as "
-               "$$ SELECT exists(select 1 from pg_class where relname=$1);"
-               "$$ language sql STRICT;")
-    op.execute("SELECT execute($$CREATE TABLE %(name)s %(columns)s $$) "
-               "WHERE NOT table_exist(%(name)r);" %
-               {'name': table_name,
-                'columns': values})
-
-
-def remove_foreign_keys(table, foreign_keys):
-    for fk in foreign_keys:
-        op.drop_constraint(
-            constraint_name=fk['name'],
-            table_name=table,
-            type_='foreignkey'
-        )
-
-
-def create_foreign_keys(table, foreign_keys):
-    for fk in foreign_keys:
-        op.create_foreign_key(
-            constraint_name=fk['name'],
-            source_table=table,
-            referent_table=fk['referred_table'],
-            local_cols=fk['constrained_columns'],
-            remote_cols=fk['referred_columns'],
-            ondelete='CASCADE'
-        )
-
-
-@contextlib.contextmanager
-def remove_fks_from_table(table):
-    try:
-        inspector = reflection.Inspector.from_engine(op.get_bind())
-        foreign_keys = inspector.get_foreign_keys(table)
-        remove_foreign_keys(table, foreign_keys)
-        yield
-    finally:
-        create_foreign_keys(table, foreign_keys)
diff --git a/neutron/db/migration/alembic.ini b/neutron/db/migration/alembic.ini
deleted file mode 100644 (file)
index 3fb3b0f..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-# A generic, single database configuration.
-
-[alembic]
-# path to migration scripts
-script_location = %(here)s/alembic_migrations
-
-# template used to generate migration files
-# file_template = %%(rev)s_%%(slug)s
-
-# set to 'true' to run the environment during
-# the 'revision' command, regardless of autogenerate
-# revision_environment = false
-
-# default to an empty string because the Neutron migration cli will
-# extract the correct value and set it programmatically before alembic is fully
-# invoked.
-sqlalchemy.url =
-
-# Logging configuration
-[loggers]
-keys = root,sqlalchemy,alembic
-
-[handlers]
-keys = console
-
-[formatters]
-keys = generic
-
-[logger_root]
-level = WARN
-handlers = console
-qualname =
-
-[logger_sqlalchemy]
-level = WARN
-handlers =
-qualname = sqlalchemy.engine
-
-[logger_alembic]
-level = INFO
-handlers =
-qualname = alembic
-
-[handler_console]
-class = StreamHandler
-args = (sys.stderr,)
-level = NOTSET
-formatter = generic
-
-[formatter_generic]
-format = %(levelname)-5.5s [%(name)s] %(message)s
-datefmt = %H:%M:%S
diff --git a/neutron/db/migration/alembic_migrations/__init__.py b/neutron/db/migration/alembic_migrations/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/db/migration/alembic_migrations/agent_init_ops.py b/neutron/db/migration/alembic_migrations/agent_init_ops.py
deleted file mode 100644 (file)
index 1d832fc..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for agent management extension
-# This module only manages the 'agents' table. Binding tables are created
-# in the modules for relevant resources
-
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'agents',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('agent_type', sa.String(length=255), nullable=False),
-        sa.Column('binary', sa.String(length=255), nullable=False),
-        sa.Column('topic', sa.String(length=255), nullable=False),
-        sa.Column('host', sa.String(length=255), nullable=False),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=False,
-                  server_default=sa.sql.true()),
-        sa.Column('created_at', sa.DateTime(), nullable=False),
-        sa.Column('started_at', sa.DateTime(), nullable=False),
-        sa.Column('heartbeat_timestamp', sa.DateTime(), nullable=False),
-        sa.Column('description', sa.String(length=255), nullable=True),
-        sa.Column('configurations', sa.String(length=4095), nullable=False),
-        sa.Column('load', sa.Integer(), server_default='0', nullable=False),
-        sa.PrimaryKeyConstraint('id'),
-        sa.UniqueConstraint('agent_type', 'host',
-                            name='uniq_agents0agent_type0host'))
diff --git a/neutron/db/migration/alembic_migrations/brocade_init_ops.py b/neutron/db/migration/alembic_migrations/brocade_init_ops.py
deleted file mode 100644 (file)
index 96740bd..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for the Mellanox plugin
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'brocadenetworks',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('vlan', sa.String(length=10), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'brocadeports',
-        sa.Column('port_id', sa.String(length=36), nullable=False,
-                  server_default=''),
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-        sa.Column('physical_interface', sa.String(length=36), nullable=True),
-        sa.Column('vlan_id', sa.String(length=36), nullable=True),
-        sa.Column('tenant_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['network_id'], ['brocadenetworks.id'], ),
-        sa.PrimaryKeyConstraint('port_id'))
-
-    op.create_table(
-        'ml2_brocadenetworks',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('vlan', sa.String(length=10), nullable=True),
-        sa.Column('segment_id', sa.String(length=36), nullable=True),
-        sa.Column('network_type', sa.String(length=10), nullable=True),
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'ml2_brocadeports',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-        sa.Column('physical_interface', sa.String(length=36), nullable=True),
-        sa.Column('vlan_id', sa.String(length=36), nullable=True),
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.PrimaryKeyConstraint('id'),
-        sa.ForeignKeyConstraint(['network_id'], ['ml2_brocadenetworks.id']))
diff --git a/neutron/db/migration/alembic_migrations/cisco_init_ops.py b/neutron/db/migration/alembic_migrations/cisco_init_ops.py
deleted file mode 100644 (file)
index a66fb29..0000000
+++ /dev/null
@@ -1,322 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial schema operations for cisco plugin
-
-from alembic import op
-import sqlalchemy as sa
-
-segment_type = sa.Enum('vlan', 'overlay', 'trunk', 'multi-segment',
-                       name='segment_type')
-profile_type = sa.Enum('network', 'policy', name='profile_type')
-
-network_profile_type = sa.Enum('vlan', 'vxlan', name='network_profile_type')
-
-
-def upgrade():
-    op.create_table(
-        'cisco_policy_profiles',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'cisco_network_profiles',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('segment_type', segment_type, nullable=False),
-        sa.Column('sub_type', sa.String(length=255), nullable=True),
-        sa.Column('segment_range', sa.String(length=255), nullable=True),
-        sa.Column('multicast_ip_index', sa.Integer(), nullable=True,
-                  server_default='0'),
-        sa.Column('multicast_ip_range', sa.String(length=255), nullable=True),
-        sa.Column('physical_network', sa.String(length=255), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'cisco_n1kv_vxlan_allocations',
-        sa.Column('vxlan_id', sa.Integer(), autoincrement=False,
-                  nullable=False),
-        sa.Column('allocated', sa.Boolean(), nullable=False,
-                  server_default=sa.sql.false()),
-        sa.Column('network_profile_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['network_profile_id'],
-                                ['cisco_network_profiles.id'],
-                                ondelete='CASCADE',
-                                name='cisco_n1kv_vxlan_allocations_ibfk_1'),
-        sa.PrimaryKeyConstraint('vxlan_id'))
-
-    op.create_table(
-        'cisco_n1kv_vlan_allocations',
-        sa.Column('physical_network', sa.String(length=64), nullable=False),
-        sa.Column('vlan_id', sa.Integer(), autoincrement=False,
-                  nullable=False),
-        sa.Column('allocated', sa.Boolean(), autoincrement=False,
-                  nullable=False, server_default=sa.sql.false()),
-        sa.Column('network_profile_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('physical_network', 'vlan_id'),
-        sa.ForeignKeyConstraint(['network_profile_id'],
-                                ['cisco_network_profiles.id'],
-                                ondelete='CASCADE',
-                                name='cisco_n1kv_vlan_allocations_ibfk_1'))
-
-    op.create_table(
-        'cisco_credentials',
-        sa.Column('credential_id', sa.String(length=255), nullable=True),
-        sa.Column('credential_name', sa.String(length=255), nullable=False),
-        sa.Column('user_name', sa.String(length=255), nullable=True),
-        sa.Column('password', sa.String(length=255), nullable=True),
-        sa.Column('type', sa.String(length=255), nullable=True),
-        sa.PrimaryKeyConstraint('credential_name'))
-
-    op.create_table(
-        'cisco_qos_policies',
-        sa.Column('qos_id', sa.String(length=255), nullable=True),
-        sa.Column('tenant_id', sa.String(length=255), nullable=False),
-        sa.Column('qos_name', sa.String(length=255), nullable=False),
-        sa.Column('qos_desc', sa.String(length=255), nullable=True),
-        sa.PrimaryKeyConstraint('tenant_id', 'qos_name'))
-
-    op.create_table(
-        'cisco_n1kv_profile_bindings',
-        sa.Column('profile_type', profile_type, nullable=True),
-        sa.Column('tenant_id', sa.String(length=36), nullable=False,
-                  server_default='TENANT_ID_NOT_SET'),
-        sa.Column('profile_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('tenant_id', 'profile_id'))
-
-    op.create_table(
-        'cisco_n1kv_vmnetworks',
-        sa.Column('name', sa.String(length=80), nullable=False),
-        sa.Column('profile_id', sa.String(length=36), nullable=True),
-        sa.Column('network_id', sa.String(length=36), nullable=True),
-        sa.Column('port_count', sa.Integer(), nullable=True),
-        sa.ForeignKeyConstraint(['profile_id'],
-                                ['cisco_policy_profiles.id'], ),
-        sa.PrimaryKeyConstraint('name'))
-
-    op.create_table(
-        'cisco_n1kv_trunk_segments',
-        sa.Column('trunk_segment_id', sa.String(length=36), nullable=False),
-        sa.Column('segment_id', sa.String(length=36), nullable=False),
-        sa.Column('dot1qtag', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['trunk_segment_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('trunk_segment_id', 'segment_id', 'dot1qtag'))
-
-    op.create_table(
-        'cisco_provider_networks',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('network_type', sa.String(length=255), nullable=False),
-        sa.Column('segmentation_id', sa.Integer(), nullable=False),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id'))
-
-    op.create_table(
-        'cisco_n1kv_multi_segments',
-        sa.Column('multi_segment_id', sa.String(length=36), nullable=False),
-        sa.Column('segment1_id', sa.String(length=36), nullable=False),
-        sa.Column('segment2_id', sa.String(length=36), nullable=False),
-        sa.Column('encap_profile_name', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['multi_segment_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('multi_segment_id', 'segment1_id',
-                                'segment2_id'))
-
-    op.create_table(
-        'cisco_n1kv_network_bindings',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('network_type', sa.String(length=32), nullable=False),
-        sa.Column('physical_network', sa.String(length=64), nullable=True),
-        sa.Column('segmentation_id', sa.Integer(), nullable=True),
-        sa.Column('multicast_ip', sa.String(length=32), nullable=True),
-        sa.Column('profile_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['profile_id'],
-                                ['cisco_network_profiles.id']),
-        sa.PrimaryKeyConstraint('network_id'))
-
-    op.create_table(
-        'cisco_n1kv_port_bindings',
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('profile_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id']),
-        sa.PrimaryKeyConstraint('port_id'))
-
-    op.create_table(
-        'cisco_csr_identifier_map',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('ipsec_site_conn_id', sa.String(length=36),
-                  primary_key=True),
-        sa.Column('csr_tunnel_id', sa.Integer(), nullable=False),
-        sa.Column('csr_ike_policy_id', sa.Integer(), nullable=False),
-        sa.Column('csr_ipsec_policy_id', sa.Integer(), nullable=False),
-        sa.ForeignKeyConstraint(['ipsec_site_conn_id'],
-                                ['ipsec_site_connections.id'],
-                                ondelete='CASCADE')
-    )
-
-    op.create_table(
-        'cisco_ml2_apic_host_links',
-        sa.Column('host', sa.String(length=255), nullable=False),
-        sa.Column('ifname', sa.String(length=64), nullable=False),
-        sa.Column('ifmac', sa.String(length=32), nullable=True),
-        sa.Column('swid', sa.String(length=32), nullable=False),
-        sa.Column('module', sa.String(length=32), nullable=False),
-        sa.Column('port', sa.String(length=32), nullable=False),
-        sa.PrimaryKeyConstraint('host', 'ifname'))
-
-    op.create_table(
-        'cisco_ml2_apic_names',
-        sa.Column('neutron_id', sa.String(length=36), nullable=False),
-        sa.Column('neutron_type', sa.String(length=32), nullable=False),
-        sa.Column('apic_name', sa.String(length=255), nullable=False),
-        sa.PrimaryKeyConstraint('neutron_id', 'neutron_type'))
-
-    op.create_table(
-        'cisco_ml2_apic_contracts',
-        sa.Column('tenant_id', sa.String(length=255), index=True),
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['router_id'], ['routers.id']),
-        sa.PrimaryKeyConstraint('router_id'))
-
-    op.create_table('cisco_hosting_devices',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('complementary_id', sa.String(length=36), nullable=True),
-        sa.Column('device_id', sa.String(length=255), nullable=True),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-        sa.Column('management_port_id', sa.String(length=36), nullable=True),
-        sa.Column('protocol_port', sa.Integer(), nullable=True),
-        sa.Column('cfg_agent_id', sa.String(length=36), nullable=True),
-        sa.Column('created_at', sa.DateTime(), nullable=False),
-        sa.Column('status', sa.String(length=16), nullable=True),
-        sa.ForeignKeyConstraint(['cfg_agent_id'], ['agents.id'], ),
-        sa.ForeignKeyConstraint(['management_port_id'], ['ports.id'],
-                                ondelete='SET NULL'),
-        sa.PrimaryKeyConstraint('id')
-    )
-    op.create_table('cisco_port_mappings',
-        sa.Column('logical_resource_id', sa.String(length=36), nullable=False),
-        sa.Column('logical_port_id', sa.String(length=36), nullable=False),
-        sa.Column('port_type', sa.String(length=32), nullable=True),
-        sa.Column('network_type', sa.String(length=32), nullable=True),
-        sa.Column('hosting_port_id', sa.String(length=36), nullable=True),
-        sa.Column('segmentation_id', sa.Integer(), autoincrement=False,
-                  nullable=True),
-        sa.ForeignKeyConstraint(['hosting_port_id'], ['ports.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['logical_port_id'], ['ports.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('logical_resource_id', 'logical_port_id')
-    )
-    op.create_table('cisco_router_mappings',
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.Column('auto_schedule', sa.Boolean(), nullable=False),
-        sa.Column('hosting_device_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['hosting_device_id'],
-                                ['cisco_hosting_devices.id'],
-                                ondelete='SET NULL'),
-        sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('router_id')
-    )
-    op.create_table(
-        'cisco_ml2_n1kv_policy_profiles',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=False),
-        sa.Column('vsm_ip', sa.String(length=16), nullable=False),
-        sa.PrimaryKeyConstraint('id', 'vsm_ip'),
-    )
-
-    op.create_table(
-        'cisco_ml2_n1kv_network_profiles',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=False),
-        sa.Column('segment_type', network_profile_type, nullable=False),
-        sa.Column('segment_range', sa.String(length=255), nullable=True),
-        sa.Column('multicast_ip_index', sa.Integer(), nullable=True),
-        sa.Column('multicast_ip_range', sa.String(length=255), nullable=True),
-        sa.Column('sub_type', sa.String(length=255), nullable=True),
-        sa.Column('physical_network', sa.String(length=255), nullable=True),
-        sa.PrimaryKeyConstraint('id'),
-    )
-
-    op.create_table(
-        'cisco_ml2_n1kv_port_bindings',
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('profile_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('port_id'),
-    )
-
-    op.create_table(
-        'cisco_ml2_n1kv_network_bindings',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('network_type', sa.String(length=32), nullable=False),
-        sa.Column('segmentation_id', sa.Integer(), autoincrement=False),
-        sa.Column('profile_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['profile_id'],
-                                ['cisco_ml2_n1kv_network_profiles.id']),
-        sa.PrimaryKeyConstraint('network_id')
-    )
-
-    op.create_table(
-        'cisco_ml2_n1kv_vxlan_allocations',
-        sa.Column('vxlan_id', sa.Integer(), autoincrement=False,
-                  nullable=False),
-        sa.Column('allocated', sa.Boolean(), nullable=False),
-        sa.Column('network_profile_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['network_profile_id'],
-                                ['cisco_ml2_n1kv_network_profiles.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('vxlan_id')
-    )
-
-    op.create_table(
-        'cisco_ml2_n1kv_vlan_allocations',
-        sa.Column('physical_network', sa.String(length=64), nullable=False),
-        sa.Column('vlan_id', sa.Integer(), autoincrement=False,
-                  nullable=False),
-        sa.Column('allocated', sa.Boolean(), autoincrement=False,
-                  nullable=False),
-        sa.Column('network_profile_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['network_profile_id'],
-                                ['cisco_ml2_n1kv_network_profiles.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('physical_network', 'vlan_id')
-    )
-    op.create_table(
-        'cisco_ml2_n1kv_profile_bindings',
-        sa.Column('profile_type', profile_type, nullable=True),
-        sa.Column('tenant_id', sa.String(length=36), nullable=False,
-                  server_default='tenant_id_not_set'),
-        sa.Column('profile_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('tenant_id', 'profile_id')
-    )
-    op.create_table(
-        'ml2_ucsm_port_profiles',
-        sa.Column('vlan_id', sa.Integer(), nullable=False),
-        sa.Column('profile_id', sa.String(length=64), nullable=False),
-        sa.Column('created_on_ucs', sa.Boolean(), nullable=False),
-        sa.PrimaryKeyConstraint('vlan_id')
-    )
diff --git a/neutron/db/migration/alembic_migrations/core_init_ops.py b/neutron/db/migration/alembic_migrations/core_init_ops.py
deleted file mode 100644 (file)
index 0c074d0..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for core resources
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'networks',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('status', sa.String(length=16), nullable=True),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=True),
-        sa.Column('shared', sa.Boolean(), nullable=True),
-        sa.Column('mtu', sa.Integer(), nullable=True),
-        sa.Column('vlan_transparent', sa.Boolean(), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'ports',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('mac_address', sa.String(length=32), nullable=False),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-        sa.Column('status', sa.String(length=16), nullable=False),
-        sa.Column('device_id', sa.String(length=255), nullable=False),
-        sa.Column('device_owner', sa.String(length=255), nullable=False),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id']),
-        sa.UniqueConstraint('network_id', 'mac_address',
-                            name='uniq_ports0network_id0mac_address'),
-        sa.PrimaryKeyConstraint('id'),
-        sa.Index(op.f('ix_ports_network_id_device_owner'), 'network_id',
-                 'device_owner'),
-        sa.Index(op.f('ix_ports_network_id_mac_address'), 'network_id',
-                 'mac_address'))
-
-    op.create_table(
-        'subnets',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('network_id', sa.String(length=36), nullable=True),
-        sa.Column('ip_version', sa.Integer(), nullable=False),
-        sa.Column('cidr', sa.String(length=64), nullable=False),
-        sa.Column('gateway_ip', sa.String(length=64), nullable=True),
-        sa.Column('enable_dhcp', sa.Boolean(), nullable=True),
-        sa.Column('shared', sa.Boolean(), nullable=True),
-        sa.Column('ipv6_ra_mode',
-                  sa.Enum('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless',
-                          name='ipv6_ra_modes'),
-                  nullable=True),
-        sa.Column('ipv6_address_mode',
-                  sa.Enum('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless',
-                          name='ipv6_address_modes'),
-                  nullable=True),
-        sa.Column('subnetpool_id', sa.String(length=36), nullable=True,
-                  index=True),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'dnsnameservers',
-        sa.Column('address', sa.String(length=128), nullable=False),
-        sa.Column('subnet_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('address', 'subnet_id'))
-
-    op.create_table(
-        'ipallocationpools',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('subnet_id', sa.String(length=36), nullable=True),
-        sa.Column('first_ip', sa.String(length=64), nullable=False),
-        sa.Column('last_ip', sa.String(length=64), nullable=False),
-        sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'subnetroutes',
-        sa.Column('destination', sa.String(length=64), nullable=False),
-        sa.Column('nexthop', sa.String(length=64), nullable=False),
-        sa.Column('subnet_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('destination', 'nexthop', 'subnet_id'))
-
-    op.create_table(
-        'ipallocations',
-        sa.Column('port_id', sa.String(length=36), nullable=True),
-        sa.Column('ip_address', sa.String(length=64), nullable=False),
-        sa.Column('subnet_id', sa.String(length=36), nullable=False),
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('ip_address', 'subnet_id', 'network_id'))
-
-    op.create_table(
-        'ipavailabilityranges',
-        sa.Column('allocation_pool_id', sa.String(length=36), nullable=False),
-        sa.Column('first_ip', sa.String(length=64), nullable=False),
-        sa.Column('last_ip', sa.String(length=64), nullable=False),
-        sa.ForeignKeyConstraint(['allocation_pool_id'],
-                                ['ipallocationpools.id'], ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip'),
-        sa.UniqueConstraint(
-            'first_ip', 'allocation_pool_id',
-            name='uniq_ipavailabilityranges0first_ip0allocation_pool_id'),
-        sa.UniqueConstraint(
-            'last_ip', 'allocation_pool_id',
-            name='uniq_ipavailabilityranges0last_ip0allocation_pool_id'))
-
-    op.create_table(
-        'networkdhcpagentbindings',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('dhcp_agent_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['dhcp_agent_id'], ['agents.id'],
-            ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-            ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id', 'dhcp_agent_id'))
diff --git a/neutron/db/migration/alembic_migrations/dvr_init_opts.py b/neutron/db/migration/alembic_migrations/dvr_init_opts.py
deleted file mode 100644 (file)
index 729b098..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for dvr
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'dvr_host_macs',
-        sa.Column('host', sa.String(length=255), nullable=False),
-        sa.Column('mac_address', sa.String(length=32),
-                  nullable=False, unique=True),
-        sa.PrimaryKeyConstraint('host')
-    )
-    op.create_table(
-        'ml2_dvr_port_bindings',
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('host', sa.String(length=255), nullable=False),
-        sa.Column('router_id', sa.String(length=36), nullable=True),
-        sa.Column('vif_type', sa.String(length=64), nullable=False),
-        sa.Column('vif_details', sa.String(length=4095),
-                  nullable=False, server_default=''),
-        sa.Column('vnic_type', sa.String(length=64),
-                  nullable=False, server_default='normal'),
-        sa.Column('profile', sa.String(length=4095),
-                  nullable=False, server_default=''),
-        sa.Column(u'status', sa.String(16), nullable=False),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('port_id', 'host')
-    )
-    op.create_table(
-        'csnat_l3_agent_bindings',
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.Column('l3_agent_id', sa.String(length=36), nullable=False),
-        sa.Column('host_id', sa.String(length=255), nullable=True),
-        sa.Column('csnat_gw_port_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['csnat_gw_port_id'], ['ports.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('router_id', 'l3_agent_id')
-    )
diff --git a/neutron/db/migration/alembic_migrations/env.py b/neutron/db/migration/alembic_migrations/env.py
deleted file mode 100644 (file)
index 153b22d..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from logging import config as logging_config
-
-from alembic import context
-from oslo_config import cfg
-from oslo_db.sqlalchemy import session
-import sqlalchemy as sa
-from sqlalchemy import event
-
-from neutron.db.migration.alembic_migrations import external
-from neutron.db.migration import autogen
-from neutron.db.migration.models import head  # noqa
-from neutron.db import model_base
-
-try:
-    # NOTE(mriedem): This is to register the DB2 alembic code which
-    # is an optional runtime dependency.
-    from ibm_db_alembic.ibm_db import IbmDbImpl  # noqa # pylint: disable=unused-import
-except ImportError:
-    pass
-
-
-MYSQL_ENGINE = None
-
-# this is the Alembic Config object, which provides
-# access to the values within the .ini file in use.
-config = context.config
-neutron_config = config.neutron_config
-
-# Interpret the config file for Python logging.
-# This line sets up loggers basically.
-logging_config.fileConfig(config.config_file_name)
-
-# set the target for 'autogenerate' support
-target_metadata = model_base.BASEV2.metadata
-
-
-def set_mysql_engine():
-    try:
-        mysql_engine = neutron_config.command.mysql_engine
-    except cfg.NoSuchOptError:
-        mysql_engine = None
-
-    global MYSQL_ENGINE
-    MYSQL_ENGINE = (mysql_engine or
-                    model_base.BASEV2.__table_args__['mysql_engine'])
-
-
-def include_object(object_, name, type_, reflected, compare_to):
-    if type_ == 'table' and name in external.TABLES:
-        return False
-    elif type_ == 'index' and reflected and name.startswith("idx_autoinc_"):
-        # skip indexes created by SQLAlchemy autoincrement=True
-        # on composite PK integer columns
-        return False
-    else:
-        return True
-
-
-def run_migrations_offline():
-    """Run migrations in 'offline' mode.
-
-    This configures the context with either a URL
-    or an Engine.
-
-    Calls to context.execute() here emit the given string to the
-    script output.
-
-    """
-    set_mysql_engine()
-
-    kwargs = dict()
-    if neutron_config.database.connection:
-        kwargs['url'] = neutron_config.database.connection
-    else:
-        kwargs['dialect_name'] = neutron_config.database.engine
-    kwargs['include_object'] = include_object
-    context.configure(**kwargs)
-
-    with context.begin_transaction():
-        context.run_migrations()
-
-
-@event.listens_for(sa.Table, 'after_parent_attach')
-def set_storage_engine(target, parent):
-    if MYSQL_ENGINE:
-        target.kwargs['mysql_engine'] = MYSQL_ENGINE
-
-
-def run_migrations_online():
-    """Run migrations in 'online' mode.
-
-    In this scenario we need to create an Engine
-    and associate a connection with the context.
-
-    """
-    set_mysql_engine()
-    connection = config.attributes.get('connection')
-    new_engine = connection is None
-    if new_engine:
-        engine = session.create_engine(neutron_config.database.connection)
-        connection = engine.connect()
-    context.configure(
-        connection=connection,
-        target_metadata=target_metadata,
-        include_object=include_object,
-        process_revision_directives=autogen.process_revision_directives
-    )
-
-    try:
-        with context.begin_transaction():
-            context.run_migrations()
-    finally:
-        if new_engine:
-            connection.close()
-            engine.dispose()
-
-
-if context.is_offline_mode():
-    run_migrations_offline()
-else:
-    run_migrations_online()
diff --git a/neutron/db/migration/alembic_migrations/external.py b/neutron/db/migration/alembic_migrations/external.py
deleted file mode 100644 (file)
index 51b6e39..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-# These tables are in the neutron database, but their models have moved
-# to separate repositories. We skip the migration checks for these tables.
-
-VPNAAS_TABLES = ['vpnservices', 'ipsecpolicies', 'ipsecpeercidrs',
-                 'ipsec_site_connections', 'cisco_csr_identifier_map',
-                 'ikepolicies']
-
-LBAAS_TABLES = ['vips', 'sessionpersistences', 'pools', 'healthmonitors',
-                'poolstatisticss', 'members', 'poolloadbalanceragentbindings',
-                'poolmonitorassociations']
-
-FWAAS_TABLES = ['firewall_rules', 'firewalls', 'firewall_policies']
-
-# Arista ML2 driver Models moved to openstack/networking-arista
-REPO_ARISTA_TABLES = [
-    'arista_provisioned_nets',
-    'arista_provisioned_vms',
-    'arista_provisioned_tenants',
-]
-
-# Models moved to openstack/networking-cisco
-REPO_CISCO_TABLES = [
-    'cisco_ml2_apic_contracts',
-    'cisco_ml2_apic_names',
-    'cisco_ml2_apic_host_links',
-    'cisco_ml2_n1kv_policy_profiles',
-    'cisco_ml2_n1kv_network_profiles',
-    'cisco_ml2_n1kv_port_bindings',
-    'cisco_ml2_n1kv_network_bindings',
-    'cisco_ml2_n1kv_vxlan_allocations',
-    'cisco_ml2_n1kv_vlan_allocations',
-    'cisco_ml2_n1kv_profile_bindings',
-    'cisco_ml2_nexusport_bindings',
-    'cisco_ml2_nexus_nve',
-    'ml2_nexus_vxlan_allocations',
-    'ml2_nexus_vxlan_mcast_groups',
-    'ml2_ucsm_port_profiles',
-    'cisco_hosting_devices',
-    'cisco_port_mappings',
-    'cisco_router_mappings',
-]
-
-# VMware-NSX models moved to openstack/vmware-nsx
-REPO_VMWARE_TABLES = [
-    'tz_network_bindings',
-    'neutron_nsx_network_mappings',
-    'neutron_nsx_security_group_mappings',
-    'neutron_nsx_port_mappings',
-    'neutron_nsx_router_mappings',
-    'multi_provider_networks',
-    'networkconnections',
-    'networkgatewaydevicereferences',
-    'networkgatewaydevices',
-    'networkgateways',
-    'maclearningstates',
-    'qosqueues',
-    'portqueuemappings',
-    'networkqueuemappings',
-    'lsn_port',
-    'lsn',
-    'nsxv_router_bindings',
-    'nsxv_edge_vnic_bindings',
-    'nsxv_edge_dhcp_static_bindings',
-    'nsxv_internal_networks',
-    'nsxv_internal_edges',
-    'nsxv_security_group_section_mappings',
-    'nsxv_rule_mappings',
-    'nsxv_port_vnic_mappings',
-    'nsxv_router_ext_attributes',
-    'nsxv_tz_network_bindings',
-    'nsxv_port_index_mappings',
-    'nsxv_firewall_rule_bindings',
-    'nsxv_spoofguard_policy_network_mappings',
-    'nsxv_vdr_dhcp_bindings',
-    'vcns_router_bindings',
-]
-
-# NEC models are in openstack/networking-nec
-REPO_NEC_TABLES = [
-    'ofcnetworkmappings',
-    'ofcportmappings',
-    'ofcroutermappings',
-    'ofcfiltermappings',
-    'ofctenantmappings',
-    'portinfos',
-    'routerproviders',
-    'packetfilters',
-]
-
-# Brocade models are in openstack/networking-brocade
-REPO_BROCADE_TABLES = [
-    'brocadenetworks',
-    'brocadeports',
-    'ml2_brocadenetworks',
-    'ml2_brocadeports',
-]
-
-# BigSwitch models are in openstack/networking-bigswitch
-REPO_BIGSWITCH_TABLES = [
-    'consistencyhashes',
-    'routerrules',
-    'nexthops',
-]
-
-# Nuage models are in github.com/nuagenetworks/nuage-openstack-neutron
-REPO_NUAGE_TABLES = [
-    'nuage_net_partitions',
-    'nuage_net_partition_router_mapping',
-    'nuage_provider_net_bindings',
-    'nuage_subnet_l2dom_mapping',
-]
-
-TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES +
-          REPO_ARISTA_TABLES +
-          REPO_CISCO_TABLES +
-          REPO_VMWARE_TABLES +
-          REPO_NEC_TABLES +
-          REPO_BROCADE_TABLES +
-          REPO_BIGSWITCH_TABLES +
-          REPO_NUAGE_TABLES)
diff --git a/neutron/db/migration/alembic_migrations/firewall_init_ops.py b/neutron/db/migration/alembic_migrations/firewall_init_ops.py
deleted file mode 100644 (file)
index 981edc6..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial schema operations for firewall service plugin
-
-
-from alembic import op
-import sqlalchemy as sa
-
-
-action_types = sa.Enum('allow', 'deny', name='firewallrules_action')
-
-
-def upgrade():
-    op.create_table(
-        'firewall_policies',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('description', sa.String(length=1024), nullable=True),
-        sa.Column('shared', sa.Boolean(), nullable=True),
-        sa.Column('audited', sa.Boolean(), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'firewalls',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('description', sa.String(length=1024), nullable=True),
-        sa.Column('shared', sa.Boolean(), nullable=True),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=True),
-        sa.Column('status', sa.String(length=16), nullable=True),
-        sa.Column('firewall_policy_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['firewall_policy_id'],
-                                ['firewall_policies.id'],
-                                name='firewalls_ibfk_1'),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'firewall_rules',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('description', sa.String(length=1024), nullable=True),
-        sa.Column('firewall_policy_id', sa.String(length=36), nullable=True),
-        sa.Column('shared', sa.Boolean(), nullable=True),
-        sa.Column('protocol', sa.String(length=40), nullable=True),
-        sa.Column('ip_version', sa.Integer(), nullable=False),
-        sa.Column('source_ip_address', sa.String(length=46), nullable=True),
-        sa.Column('destination_ip_address', sa.String(length=46),
-                  nullable=True),
-        sa.Column('source_port_range_min', sa.Integer(), nullable=True),
-        sa.Column('source_port_range_max', sa.Integer(), nullable=True),
-        sa.Column('destination_port_range_min', sa.Integer(), nullable=True),
-        sa.Column('destination_port_range_max', sa.Integer(), nullable=True),
-        sa.Column('action', action_types, nullable=True),
-        sa.Column('enabled', sa.Boolean(), nullable=True),
-        sa.Column('position', sa.Integer(), nullable=True),
-        sa.ForeignKeyConstraint(['firewall_policy_id'],
-                                ['firewall_policies.id'],
-                                name='firewall_rules_ibfk_1'),
-        sa.PrimaryKeyConstraint('id'))
diff --git a/neutron/db/migration/alembic_migrations/l3_init_ops.py b/neutron/db/migration/alembic_migrations/l3_init_ops.py
deleted file mode 100644 (file)
index 4c1dea5..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for l3 extension
-
-from alembic import op
-import sqlalchemy as sa
-
-l3_ha_states = sa.Enum('active', 'standby', name='l3_ha_states')
-
-
-def create_routerroutes():
-    op.create_table(
-        'routerroutes',
-        sa.Column('destination', sa.String(length=64), nullable=False),
-        sa.Column('nexthop', sa.String(length=64), nullable=False),
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('destination', 'nexthop', 'router_id'))
-
-
-def upgrade():
-    op.create_table(
-        'externalnetworks',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id'))
-
-    op.create_table(
-        'routers',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('status', sa.String(length=16), nullable=True),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=True),
-        sa.Column('gw_port_id', sa.String(length=36), nullable=True),
-        sa.Column('enable_snat', sa.Boolean(), nullable=False,
-                  server_default=sa.sql.true()),
-        sa.ForeignKeyConstraint(['gw_port_id'], ['ports.id'], ),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'floatingips',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('floating_ip_address', sa.String(length=64), nullable=False),
-        sa.Column('floating_network_id', sa.String(length=36), nullable=False),
-        sa.Column('floating_port_id', sa.String(length=36), nullable=False),
-        sa.Column('fixed_port_id', sa.String(length=36), nullable=True),
-        sa.Column('fixed_ip_address', sa.String(length=64), nullable=True),
-        sa.Column('router_id', sa.String(length=36), nullable=True),
-        sa.Column('last_known_router_id', sa.String(length=36), nullable=True),
-        sa.Column('status', sa.String(length=16), nullable=True),
-        sa.ForeignKeyConstraint(['fixed_port_id'], ['ports.id'], ),
-        sa.ForeignKeyConstraint(['floating_port_id'], ['ports.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ),
-        sa.PrimaryKeyConstraint('id'))
-
-    create_routerroutes()
-
-    op.create_table(
-        'routerl3agentbindings',
-        sa.Column('router_id', sa.String(length=36), nullable=True),
-        sa.Column('l3_agent_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('router_id', 'l3_agent_id'))
-    op.create_table(
-        'router_extra_attributes',
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.Column('distributed', sa.Boolean(), nullable=False,
-                  server_default=sa.sql.false()),
-        sa.Column('service_router', sa.Boolean(), nullable=False,
-                  server_default=sa.sql.false()),
-        sa.Column('ha', sa.Boolean(), nullable=False,
-                  server_default=sa.sql.false()),
-        sa.Column('ha_vr_id', sa.Integer()),
-        sa.ForeignKeyConstraint(
-            ['router_id'], ['routers.id'], ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('router_id')
-    )
-    op.create_table('ha_router_agent_port_bindings',
-                    sa.Column('port_id', sa.String(length=36),
-                              nullable=False),
-                    sa.Column('router_id', sa.String(length=36),
-                              nullable=False),
-                    sa.Column('l3_agent_id', sa.String(length=36),
-                              nullable=True),
-                    sa.Column('state', l3_ha_states,
-                              server_default='standby'),
-                    sa.PrimaryKeyConstraint('port_id'),
-                    sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
-                                            ondelete='CASCADE'),
-                    sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
-                                            ondelete='CASCADE'),
-                    sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'],
-                                            ondelete='CASCADE'))
-
-    op.create_table('ha_router_networks',
-                    sa.Column('tenant_id', sa.String(length=255),
-                              nullable=False, primary_key=True),
-                    sa.Column('network_id', sa.String(length=36),
-                              nullable=False,
-                              primary_key=True),
-                    sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                            ondelete='CASCADE'))
-
-    op.create_table('ha_router_vrid_allocations',
-                    sa.Column('network_id', sa.String(length=36),
-                              nullable=False,
-                              primary_key=True),
-                    sa.Column('vr_id', sa.Integer(),
-                              nullable=False,
-                              primary_key=True),
-                    sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                            ondelete='CASCADE'))
-    op.create_table(
-        'routerports',
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('port_type', sa.String(length=255)),
-        sa.PrimaryKeyConstraint('router_id', 'port_id'),
-        sa.ForeignKeyConstraint(
-            ['router_id'],
-            ['routers.id'],
-            ondelete='CASCADE'
-        ),
-        sa.ForeignKeyConstraint(
-            ['port_id'],
-            ['ports.id'],
-            ondelete='CASCADE'
-        ),
-    )
diff --git a/neutron/db/migration/alembic_migrations/lb_init_ops.py b/neutron/db/migration/alembic_migrations/lb_init_ops.py
deleted file mode 100644 (file)
index f97961f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for the port security extension
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'network_states',
-        sa.Column('physical_network', sa.String(length=64), nullable=False),
-        sa.Column('vlan_id', sa.Integer(), autoincrement=False,
-                  nullable=False),
-        sa.Column('allocated', sa.Boolean(), nullable=False),
-        sa.PrimaryKeyConstraint('physical_network', 'vlan_id'))
-
-    op.create_table(
-        'network_bindings',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('physical_network', sa.String(length=64), nullable=True),
-        sa.Column('vlan_id', sa.Integer(), nullable=False),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id'))
diff --git a/neutron/db/migration/alembic_migrations/loadbalancer_init_ops.py b/neutron/db/migration/alembic_migrations/loadbalancer_init_ops.py
deleted file mode 100644 (file)
index 111bcf6..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial schema operations for the load balancer service plugin
-
-
-from alembic import op
-import sqlalchemy as sa
-
-
-protocols = sa.Enum('HTTP', 'HTTPS', 'TCP', name='lb_protocols')
-session_persistence_type = sa.Enum('SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE',
-                                   name='sesssionpersistences_type')
-lb_methods = sa.Enum('ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP',
-                     name='pools_lb_method')
-health_monitor_type = sa.Enum('PING', 'TCP', 'HTTP', 'HTTPS',
-                              name='healthmontiors_type')
-
-
-def upgrade():
-    op.create_table(
-        'healthmonitors',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('type', health_monitor_type, nullable=False),
-        sa.Column('delay', sa.Integer(), nullable=False),
-        sa.Column('timeout', sa.Integer(), nullable=False),
-        sa.Column('max_retries', sa.Integer(), nullable=False),
-        sa.Column('http_method', sa.String(length=16), nullable=True),
-        sa.Column('url_path', sa.String(length=255), nullable=True),
-        sa.Column('expected_codes', sa.String(length=64), nullable=True),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'vips',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('status', sa.String(length=16), nullable=False),
-        sa.Column('status_description', sa.String(length=255), nullable=True),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('description', sa.String(length=255), nullable=True),
-        sa.Column('port_id', sa.String(length=36), nullable=True),
-        sa.Column('protocol_port', sa.Integer(), nullable=False),
-        sa.Column('protocol', protocols, nullable=False),
-        sa.Column('pool_id', sa.String(length=36), nullable=False),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-        sa.Column('connection_limit', sa.Integer(), nullable=True),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ),
-        sa.PrimaryKeyConstraint('id'),
-        sa.UniqueConstraint('pool_id'))
-
-    op.create_table(
-        'pools',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('status', sa.String(length=16), nullable=False),
-        sa.Column('status_description', sa.String(length=255), nullable=True),
-        sa.Column('vip_id', sa.String(length=36), nullable=True),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('description', sa.String(length=255), nullable=True),
-        sa.Column('subnet_id', sa.String(length=36), nullable=False),
-        sa.Column('protocol', protocols, nullable=False),
-        sa.Column('lb_method', lb_methods, nullable=False),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-        sa.ForeignKeyConstraint(['vip_id'], ['vips.id'], ),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'sessionpersistences',
-        sa.Column('vip_id', sa.String(length=36), nullable=False),
-        sa.Column('type', session_persistence_type, nullable=False),
-        sa.Column('cookie_name', sa.String(length=1024), nullable=True),
-        sa.ForeignKeyConstraint(['vip_id'], ['vips.id'], ),
-        sa.PrimaryKeyConstraint('vip_id'))
-
-    op.create_table(
-        'poolloadbalanceragentbindings',
-        sa.Column('pool_id', sa.String(length=36), nullable=False),
-        sa.Column('agent_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['pool_id'], ['pools.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['agent_id'], ['agents.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('pool_id'))
-
-    op.create_table(
-        'members',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('status', sa.String(length=16), nullable=False),
-        sa.Column('status_description', sa.String(length=255), nullable=True),
-        sa.Column('pool_id', sa.String(length=36), nullable=False),
-        sa.Column('address', sa.String(length=64), nullable=False),
-        sa.Column('protocol_port', sa.Integer(), nullable=False),
-        sa.Column('weight', sa.Integer(), nullable=False),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-        sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ),
-        sa.PrimaryKeyConstraint('id'),
-        sa.UniqueConstraint('pool_id', 'address', 'protocol_port',
-                            name='uniq_member0pool_id0address0port'))
-
-    op.create_table(
-        'poolmonitorassociations',
-        sa.Column('status', sa.String(length=16), nullable=False),
-        sa.Column('status_description', sa.String(length=255), nullable=True),
-        sa.Column('pool_id', sa.String(length=36), nullable=False),
-        sa.Column('monitor_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ),
-        sa.ForeignKeyConstraint(['monitor_id'], ['healthmonitors.id'], ),
-        sa.PrimaryKeyConstraint('pool_id', 'monitor_id'))
-
-    op.create_table(
-        'poolstatisticss',
-        sa.Column('pool_id', sa.String(length=36), nullable=False),
-        sa.Column('bytes_in', sa.BigInteger(), nullable=False),
-        sa.Column('bytes_out', sa.BigInteger(), nullable=False),
-        sa.Column('active_connections', sa.BigInteger(), nullable=False),
-        sa.Column('total_connections', sa.BigInteger(), nullable=False),
-        sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ),
-        sa.PrimaryKeyConstraint('pool_id'))
-
-    op.create_table(
-        u'embrane_pool_port',
-        sa.Column(u'pool_id', sa.String(length=36), nullable=False),
-        sa.Column(u'port_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'],
-                                name=u'embrane_pool_port_ibfk_1'),
-        sa.ForeignKeyConstraint(['port_id'], [u'ports.id'],
-                                name=u'embrane_pool_port_ibfk_2'),
-        sa.PrimaryKeyConstraint(u'pool_id'))
diff --git a/neutron/db/migration/alembic_migrations/metering_init_ops.py b/neutron/db/migration/alembic_migrations/metering_init_ops.py
deleted file mode 100644 (file)
index 6d152d2..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for the metering service plugin
-
-
-from alembic import op
-import sqlalchemy as sa
-
-
-direction = sa.Enum('ingress', 'egress',
-                    name='meteringlabels_direction')
-
-
-def create_meteringlabels():
-    op.create_table(
-        'meteringlabels',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('description', sa.String(length=1024), nullable=True),
-        sa.Column('shared', sa.Boolean(), server_default=sa.sql.false(),
-                  nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-
-def upgrade():
-    create_meteringlabels()
-
-    op.create_table(
-        'meteringlabelrules',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('direction', direction, nullable=True),
-        sa.Column('remote_ip_prefix', sa.String(length=64), nullable=True),
-        sa.Column('metering_label_id', sa.String(length=36), nullable=False),
-        sa.Column('excluded', sa.Boolean(), nullable=True,
-                  server_default=sa.sql.false()),
-        sa.ForeignKeyConstraint(['metering_label_id'],
-                                ['meteringlabels.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('id'))
diff --git a/neutron/db/migration/alembic_migrations/ml2_init_ops.py b/neutron/db/migration/alembic_migrations/ml2_init_ops.py
deleted file mode 100644 (file)
index c2b0f0d..0000000
+++ /dev/null
@@ -1,183 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for ML2 plugin and drivers
-
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'ml2_vlan_allocations',
-        sa.Column('physical_network', sa.String(length=64), nullable=False),
-        sa.Column('vlan_id', sa.Integer(), autoincrement=False,
-                  nullable=False),
-        sa.Column('allocated', sa.Boolean(), nullable=False),
-        sa.PrimaryKeyConstraint('physical_network', 'vlan_id'),
-        sa.Index(op.f('ix_ml2_vlan_allocations_physical_network_allocated'),
-                 'physical_network', 'allocated'))
-
-    op.create_table(
-        'ml2_vxlan_endpoints',
-        sa.Column('ip_address', sa.String(length=64), nullable=False),
-        sa.Column('udp_port', sa.Integer(), autoincrement=False,
-                  nullable=False),
-        sa.Column('host', sa.String(length=255), nullable=True),
-        sa.UniqueConstraint('host', name='unique_ml2_vxlan_endpoints0host'),
-        sa.PrimaryKeyConstraint('ip_address'))
-
-    op.create_table(
-        'ml2_gre_endpoints',
-        sa.Column('ip_address', sa.String(length=64), nullable=False),
-        sa.Column('host', sa.String(length=255), nullable=True),
-        sa.UniqueConstraint('host', name='unique_ml2_gre_endpoints0host'),
-        sa.PrimaryKeyConstraint('ip_address'))
-
-    op.create_table(
-        'ml2_vxlan_allocations',
-        sa.Column('vxlan_vni', sa.Integer(), autoincrement=False,
-                  nullable=False),
-        sa.Column('allocated', sa.Boolean(), nullable=False,
-                  server_default=sa.sql.false(), index=True),
-        sa.PrimaryKeyConstraint('vxlan_vni'))
-
-    op.create_table(
-        'ml2_gre_allocations',
-        sa.Column('gre_id', sa.Integer(), autoincrement=False,
-                  nullable=False),
-        sa.Column('allocated', sa.Boolean(), nullable=False,
-                  server_default=sa.sql.false(), index=True),
-        sa.PrimaryKeyConstraint('gre_id'))
-
-    op.create_table(
-        'ml2_flat_allocations',
-        sa.Column('physical_network', sa.String(length=64), nullable=False),
-        sa.PrimaryKeyConstraint('physical_network'))
-
-    op.create_table(
-        'ml2_network_segments',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('network_type', sa.String(length=32), nullable=False),
-        sa.Column('physical_network', sa.String(length=64), nullable=True),
-        sa.Column('segmentation_id', sa.Integer(), nullable=True),
-        sa.Column('is_dynamic', sa.Boolean(), nullable=False,
-                  server_default=sa.sql.false()),
-        sa.Column('segment_index', sa.Integer(), nullable=False,
-                  server_default='0'),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'ml2_port_bindings',
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('host', sa.String(length=255), nullable=False,
-                  server_default=''),
-        sa.Column('vif_type', sa.String(length=64), nullable=False),
-        sa.Column('vnic_type', sa.String(length=64), nullable=False,
-                  server_default='normal'),
-        sa.Column('profile', sa.String(length=4095), nullable=False,
-                  server_default=''),
-        sa.Column('vif_details', sa.String(length=4095), nullable=False,
-                  server_default=''),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('port_id'))
-
-    op.create_table(
-        'ml2_port_binding_levels',
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('host', sa.String(length=255), nullable=False),
-        sa.Column('level', sa.Integer(), autoincrement=False, nullable=False),
-        sa.Column('driver', sa.String(length=64), nullable=True),
-        sa.Column('segment_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['segment_id'], ['ml2_network_segments.id'],
-                                ondelete='SET NULL'),
-        sa.PrimaryKeyConstraint('port_id', 'host', 'level')
-    )
-
-    op.create_table(
-        'cisco_ml2_nexusport_bindings',
-        sa.Column('binding_id', sa.Integer(), nullable=False),
-        sa.Column('port_id', sa.String(length=255), nullable=True),
-        sa.Column('vlan_id', sa.Integer(), autoincrement=False,
-                  nullable=False),
-        sa.Column('switch_ip', sa.String(length=255), nullable=True),
-        sa.Column('instance_id', sa.String(length=255), nullable=True),
-        sa.Column('vni', sa.Integer(), nullable=True),
-        sa.Column('is_provider_vlan', sa.Boolean(), nullable=False,
-                  server_default=sa.sql.false()),
-        sa.PrimaryKeyConstraint('binding_id'),
-    )
-
-    op.create_table(
-        'arista_provisioned_nets',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('network_id', sa.String(length=36), nullable=True),
-        sa.Column('segmentation_id', sa.Integer(),
-                  autoincrement=False, nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'arista_provisioned_vms',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('vm_id', sa.String(length=255), nullable=True),
-        sa.Column('host_id', sa.String(length=255), nullable=True),
-        sa.Column('port_id', sa.String(length=36), nullable=True),
-        sa.Column('network_id', sa.String(length=36), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'arista_provisioned_tenants',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'ml2_nexus_vxlan_allocations',
-        sa.Column('vxlan_vni', sa.Integer(), nullable=False,
-                  autoincrement=False),
-        sa.Column('allocated', sa.Boolean(), nullable=False,
-                  server_default=sa.sql.false()),
-        sa.PrimaryKeyConstraint('vxlan_vni')
-    )
-
-    op.create_table(
-        'ml2_nexus_vxlan_mcast_groups',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('mcast_group', sa.String(length=64), nullable=False),
-        sa.Column('associated_vni', sa.Integer(), nullable=False),
-        sa.PrimaryKeyConstraint('id'),
-        sa.ForeignKeyConstraint(['associated_vni'],
-                                ['ml2_nexus_vxlan_allocations.vxlan_vni'],
-                                ondelete='CASCADE')
-    )
-
-    op.create_table(
-        'cisco_ml2_nexus_nve',
-        sa.Column('vni', sa.Integer(), nullable=False),
-        sa.Column('switch_ip', sa.String(length=255), nullable=True),
-        sa.Column('device_id', sa.String(length=255), nullable=True),
-        sa.Column('mcast_group', sa.String(length=255), nullable=True),
-        sa.PrimaryKeyConstraint('vni', 'switch_ip', 'device_id'))
diff --git a/neutron/db/migration/alembic_migrations/nec_init_ops.py b/neutron/db/migration/alembic_migrations/nec_init_ops.py
deleted file mode 100644 (file)
index acff610..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for NEC plugin
-
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'ofcportmappings',
-        sa.Column('ofc_id', sa.String(length=255), nullable=False),
-        sa.Column('neutron_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('neutron_id'),
-        sa.UniqueConstraint('ofc_id'))
-
-    op.create_table(
-        'ofcroutermappings',
-        sa.Column('ofc_id', sa.String(length=255), nullable=False),
-        sa.Column('neutron_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('neutron_id'),
-        sa.UniqueConstraint('ofc_id'))
-
-    op.create_table(
-        'routerproviders',
-        sa.Column('provider', sa.String(length=255), nullable=True),
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('router_id'))
-
-    op.create_table(
-        'ofctenantmappings',
-        sa.Column('ofc_id', sa.String(length=255), nullable=False),
-        sa.Column('neutron_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('neutron_id'),
-        sa.UniqueConstraint('ofc_id'))
-
-    op.create_table(
-        'ofcfiltermappings',
-        sa.Column('ofc_id', sa.String(length=255), nullable=False),
-        sa.Column('neutron_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('neutron_id'),
-        sa.UniqueConstraint('ofc_id'))
-
-    op.create_table(
-        'ofcnetworkmappings',
-        sa.Column('ofc_id', sa.String(length=255), nullable=False),
-        sa.Column('neutron_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('neutron_id'),
-        sa.UniqueConstraint('ofc_id'))
-
-    op.create_table(
-        'packetfilters',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('priority', sa.Integer(), nullable=False),
-        sa.Column('action', sa.String(length=16), nullable=False),
-        sa.Column('in_port', sa.String(length=36), nullable=True),
-        sa.Column('src_mac', sa.String(length=32), nullable=False),
-        sa.Column('dst_mac', sa.String(length=32), nullable=False),
-        sa.Column('eth_type', sa.Integer(), nullable=False),
-        sa.Column('src_cidr', sa.String(length=64), nullable=False),
-        sa.Column('dst_cidr', sa.String(length=64), nullable=False),
-        sa.Column('protocol', sa.String(length=16), nullable=False),
-        sa.Column('src_port', sa.Integer(), nullable=False),
-        sa.Column('dst_port', sa.Integer(), nullable=False),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-        sa.Column('status', sa.String(length=16), nullable=False),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['in_port'], ['ports.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'portinfos',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('datapath_id', sa.String(length=36), nullable=False),
-        sa.Column('port_no', sa.Integer(), nullable=False),
-        sa.Column('vlan_id', sa.Integer(), nullable=False),
-        sa.Column('mac', sa.String(length=32), nullable=False),
-        sa.ForeignKeyConstraint(['id'], ['ports.id'], ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('id'))
diff --git a/neutron/db/migration/alembic_migrations/nsxv_initial_opts.py b/neutron/db/migration/alembic_migrations/nsxv_initial_opts.py
deleted file mode 100644 (file)
index 7143aa8..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from alembic import op
-import sqlalchemy as sa
-
-
-appliance_sizes_enum = sa.Enum('compact', 'large', 'xlarge', 'quadlarge',
-                               name='nsxv_router_bindings_appliance_size')
-edge_types_enum = sa.Enum('service', 'vdr',
-                          name='nsxv_router_bindings_edge_type')
-internal_network_purpose_enum = sa.Enum('inter_edge_net',
-                                        name='nsxv_internal_networks_purpose')
-internal_edge_purpose_enum = sa.Enum('inter_edge_net',
-                                     name='nsxv_internal_edges_purpose')
-tz_binding_type_enum = sa.Enum('flat', 'vlan', 'portgroup',
-                               name='nsxv_tz_network_bindings_binding_type')
-router_types_enum = sa.Enum('shared', 'exclusive',
-                            name='nsxv_router_type')
-
-
-def upgrade():
-    op.create_table(
-        'nsxv_router_bindings',
-        sa.Column('status', sa.String(length=16), nullable=False),
-        sa.Column('status_description', sa.String(length=255), nullable=True),
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.Column('edge_id', sa.String(length=36), nullable=True),
-        sa.Column('lswitch_id', sa.String(length=36), nullable=True),
-        sa.Column('appliance_size',
-                  appliance_sizes_enum,
-                  nullable=True),
-        sa.Column('edge_type', edge_types_enum, nullable=True),
-        sa.PrimaryKeyConstraint('router_id'))
-    op.create_table(
-        'nsxv_internal_networks',
-        sa.Column('network_purpose', internal_network_purpose_enum,
-                  nullable=False),
-        sa.Column('network_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_purpose'))
-    op.create_table(
-        'nsxv_internal_edges',
-        sa.Column('ext_ip_address', sa.String(length=64), nullable=False),
-        sa.Column('router_id', sa.String(length=36), nullable=True),
-        sa.Column('purpose', internal_edge_purpose_enum, nullable=True),
-        sa.PrimaryKeyConstraint('ext_ip_address'))
-    op.create_table(
-        'nsxv_firewall_rule_bindings',
-        sa.Column('rule_id', sa.String(length=36), nullable=False),
-        sa.Column('edge_id', sa.String(length=36), nullable=False),
-        sa.Column('rule_vse_id', sa.String(length=36), nullable=True),
-        sa.PrimaryKeyConstraint('rule_id', 'edge_id'))
-    op.create_table(
-        'nsxv_edge_dhcp_static_bindings',
-        sa.Column('edge_id', sa.String(length=36), nullable=False),
-        sa.Column('mac_address', sa.String(length=32), nullable=False),
-        sa.Column('binding_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('edge_id', 'mac_address'))
-    op.create_table(
-        'nsxv_edge_vnic_bindings',
-        sa.Column('edge_id', sa.String(length=36), nullable=False),
-        sa.Column('vnic_index', sa.Integer(), nullable=False),
-        sa.Column('tunnel_index', sa.Integer(), nullable=False),
-        sa.Column('network_id', sa.String(length=36), nullable=True),
-        sa.PrimaryKeyConstraint('edge_id', 'vnic_index', 'tunnel_index'))
-    op.create_table(
-        'nsxv_spoofguard_policy_network_mappings',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('policy_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id'))
-    op.create_table(
-        'nsxv_security_group_section_mappings',
-        sa.Column('neutron_id', sa.String(length=36), nullable=False),
-        sa.Column('ip_section_id', sa.String(length=100), nullable=True),
-        sa.ForeignKeyConstraint(['neutron_id'], ['securitygroups.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('neutron_id'))
-    op.create_table(
-        'nsxv_tz_network_bindings',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('binding_type',
-                  tz_binding_type_enum,
-                  nullable=False),
-        sa.Column('phy_uuid', sa.String(length=36), nullable=True),
-        sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=True),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id', 'binding_type', 'phy_uuid',
-                                'vlan_id'))
-    op.create_table(
-        'nsxv_port_vnic_mappings',
-        sa.Column('neutron_id', sa.String(length=36), nullable=False),
-        sa.Column('nsx_id', sa.String(length=42), nullable=False),
-        sa.ForeignKeyConstraint(['neutron_id'], ['ports.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('neutron_id', 'nsx_id'))
-    op.create_table(
-        'nsxv_port_index_mappings',
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('device_id', sa.String(length=255), nullable=False),
-        sa.Column('index', sa.Integer(), nullable=False),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('port_id'),
-        sa.UniqueConstraint('device_id', 'index'))
-    op.create_table(
-        'nsxv_rule_mappings',
-        sa.Column('neutron_id', sa.String(length=36), nullable=False),
-        sa.Column('nsx_rule_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['neutron_id'], ['securitygrouprules.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('neutron_id', 'nsx_rule_id'))
-    op.create_table(
-        'nsxv_router_ext_attributes',
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.Column('distributed', sa.Boolean(), nullable=False),
-        sa.Column('router_type', router_types_enum,
-                  default='exclusive', nullable=False),
-        sa.Column('service_router', sa.Boolean(), nullable=False),
-        sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('router_id'))
diff --git a/neutron/db/migration/alembic_migrations/nuage_init_opts.py b/neutron/db/migration/alembic_migrations/nuage_init_opts.py
deleted file mode 100644 (file)
index b09ca81..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for Nuage plugin
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-
-    op.create_table(
-        'nuage_net_partitions',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=64), nullable=True),
-        sa.Column('l3dom_tmplt_id', sa.String(length=36), nullable=True),
-        sa.Column('l2dom_tmplt_id', sa.String(length=36), nullable=True),
-        sa.Column('isolated_zone', sa.String(length=64), nullable=True),
-        sa.Column('shared_zone', sa.String(length=64), nullable=True),
-        sa.PrimaryKeyConstraint('id'),
-    )
-    op.create_table(
-        'nuage_subnet_l2dom_mapping',
-        sa.Column('subnet_id', sa.String(length=36), nullable=False),
-        sa.Column('net_partition_id', sa.String(length=36), nullable=True),
-        sa.Column('nuage_subnet_id', sa.String(length=36), nullable=True,
-                  unique=True),
-        sa.Column('nuage_l2dom_tmplt_id', sa.String(length=36),
-                  nullable=True),
-        sa.Column('nuage_user_id', sa.String(length=36), nullable=True),
-        sa.Column('nuage_group_id', sa.String(length=36), nullable=True),
-        sa.Column('nuage_managed_subnet', sa.Boolean(), nullable=True),
-        sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['net_partition_id'],
-                                ['nuage_net_partitions.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('subnet_id'),
-    )
-    op.create_table(
-        'nuage_net_partition_router_mapping',
-        sa.Column('net_partition_id', sa.String(length=36), nullable=False),
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.Column('nuage_router_id', sa.String(length=36), nullable=True,
-                  unique=True),
-        sa.Column('nuage_rtr_rd', sa.String(length=36), nullable=True),
-        sa.Column('nuage_rtr_rt', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['net_partition_id'],
-                                ['nuage_net_partitions.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('net_partition_id', 'router_id'),
-    )
-    op.create_table(
-        'nuage_provider_net_bindings',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('network_type', sa.String(length=32), nullable=False),
-        sa.Column('physical_network', sa.String(length=64), nullable=False),
-        sa.Column('vlan_id', sa.Integer(), nullable=False),
-        sa.ForeignKeyConstraint(
-            ['network_id'], ['networks.id'], ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id')
-    )
diff --git a/neutron/db/migration/alembic_migrations/other_extensions_init_ops.py b/neutron/db/migration/alembic_migrations/other_extensions_init_ops.py
deleted file mode 100644 (file)
index abff73a..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for extensions:
-# allowedaddresspairs
-# extradhcpopts
-# portbindings
-# quotas
-# routedserviceinsertion
-# servicetype
-
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'providerresourceassociations',
-        sa.Column('provider_name', sa.String(length=255), nullable=False),
-        sa.Column('resource_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('provider_name', 'resource_id'),
-        sa.UniqueConstraint('resource_id'))
-
-    op.create_table(
-        'quotas',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('resource', sa.String(length=255), nullable=True),
-        sa.Column('limit', sa.Integer(), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'allowedaddresspairs',
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('mac_address', sa.String(length=32), nullable=False),
-        sa.Column('ip_address', sa.String(length=64), nullable=False),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('port_id', 'mac_address', 'ip_address'))
-
-    op.create_table(
-        'portbindingports',
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('host', sa.String(length=255), nullable=False),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('port_id'))
-
-    op.create_table(
-        'extradhcpopts',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('opt_name', sa.String(length=64), nullable=False),
-        sa.Column('opt_value', sa.String(length=255), nullable=False),
-        sa.Column('ip_version', sa.Integer(), server_default='4',
-                  nullable=False),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('id'),
-        sa.UniqueConstraint(
-            'port_id', 'opt_name', 'ip_version',
-            name='uniq_extradhcpopts0portid0optname0ipversion'))
-
-    op.create_table('subnetpools',
-                    sa.Column('tenant_id',
-                              sa.String(length=255),
-                              nullable=True,
-                              index=True),
-                    sa.Column('id', sa.String(length=36), nullable=False),
-                    sa.Column('name', sa.String(length=255), nullable=True),
-                    sa.Column('ip_version', sa.Integer(), nullable=False),
-                    sa.Column('default_prefixlen',
-                              sa.Integer(),
-                              nullable=False),
-                    sa.Column('min_prefixlen', sa.Integer(), nullable=False),
-                    sa.Column('max_prefixlen', sa.Integer(), nullable=False),
-                    sa.Column('shared', sa.Boolean(), nullable=False),
-                    sa.Column('default_quota', sa.Integer(), nullable=True),
-                    sa.PrimaryKeyConstraint('id'))
-    op.create_table('subnetpoolprefixes',
-                    sa.Column('cidr', sa.String(length=64), nullable=False),
-                    sa.Column('subnetpool_id',
-                              sa.String(length=36),
-                              nullable=False),
-                    sa.ForeignKeyConstraint(['subnetpool_id'],
-                                            ['subnetpools.id'],
-                                            ondelete='CASCADE'),
-                    sa.PrimaryKeyConstraint('cidr', 'subnetpool_id'))
diff --git a/neutron/db/migration/alembic_migrations/other_plugins_init_ops.py b/neutron/db/migration/alembic_migrations/other_plugins_init_ops.py
deleted file mode 100644 (file)
index db04401..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for plugins:
-# bigswitch
-# metaplugin
-
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    # metaplugin
-    op.create_table(
-        'networkflavors',
-        sa.Column('flavor', sa.String(length=255), nullable=True),
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id'))
-
-    op.create_table(
-        'routerflavors',
-        sa.Column('flavor', sa.String(length=255), nullable=True),
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('router_id'))
-
-    # big switch
-    op.create_table(
-        'routerrules',
-        sa.Column('id', sa.Integer(), nullable=False),
-        sa.Column('source', sa.String(length=64), nullable=False),
-        sa.Column('destination', sa.String(length=64), nullable=False),
-        sa.Column('action', sa.String(length=10), nullable=False),
-        sa.Column('router_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'nexthops',
-        sa.Column('rule_id', sa.Integer(), nullable=False),
-        sa.Column('nexthop', sa.String(length=64), nullable=False),
-        sa.ForeignKeyConstraint(['rule_id'], ['routerrules.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('rule_id', 'nexthop'))
-
-    op.create_table(
-        'consistencyhashes',
-        sa.Column('hash_id', sa.String(255), primary_key=True),
-        sa.Column('hash', sa.String(255), nullable=False)
-    )
diff --git a/neutron/db/migration/alembic_migrations/ovs_init_ops.py b/neutron/db/migration/alembic_migrations/ovs_init_ops.py
deleted file mode 100644 (file)
index c02a324..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for the OVS plugin
-
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'ovs_tunnel_endpoints',
-        sa.Column('ip_address', sa.String(length=64), nullable=False),
-        sa.Column('id', sa.Integer(), nullable=False),
-        sa.PrimaryKeyConstraint('ip_address'),
-        sa.UniqueConstraint('id', name='uniq_ovs_tunnel_endpoints0id'))
-
-    op.create_table(
-        'ovs_tunnel_allocations',
-        sa.Column('tunnel_id', sa.Integer(), autoincrement=False,
-                  nullable=False),
-        sa.Column('allocated', sa.Boolean(), nullable=False),
-        sa.PrimaryKeyConstraint('tunnel_id'))
-
-    op.create_table(
-        'ovs_vlan_allocations',
-        sa.Column('physical_network', sa.String(length=64), nullable=False),
-        sa.Column('vlan_id', sa.Integer(), autoincrement=False,
-                  nullable=False),
-        sa.Column('allocated', sa.Boolean(), nullable=False),
-        sa.PrimaryKeyConstraint('physical_network', 'vlan_id'))
-
-    op.create_table(
-        'ovs_network_bindings',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('network_type', sa.String(length=32), nullable=False),
-        sa.Column('physical_network', sa.String(length=64), nullable=True),
-        sa.Column('segmentation_id', sa.Integer(), nullable=True),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id'))
diff --git a/neutron/db/migration/alembic_migrations/portsec_init_ops.py b/neutron/db/migration/alembic_migrations/portsec_init_ops.py
deleted file mode 100644 (file)
index 8fbc243..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for the port security extension
-
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'networksecuritybindings',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('port_security_enabled', sa.Boolean(), nullable=False),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id'))
-
-    op.create_table(
-        'portsecuritybindings',
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('port_security_enabled', sa.Boolean(), nullable=False),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('port_id'))
diff --git a/neutron/db/migration/alembic_migrations/script.py.mako b/neutron/db/migration/alembic_migrations/script.py.mako
deleted file mode 100644 (file)
index 121181a..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright ${create_date.year} OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""${message}
-
-Revision ID: ${up_revision}
-Revises: ${down_revision}
-Create Date: ${create_date}
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = ${repr(up_revision)}
-down_revision = ${repr(down_revision)}
-% if branch_labels:
-branch_labels = ${repr(branch_labels)}
-% endif
-
-from alembic import op
-import sqlalchemy as sa
-${imports if imports else ""}
-
-
-def upgrade():
-    ${upgrades if upgrades else "pass"}
diff --git a/neutron/db/migration/alembic_migrations/secgroup_init_ops.py b/neutron/db/migration/alembic_migrations/secgroup_init_ops.py
deleted file mode 100644 (file)
index c65a204..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial operations for security group extension
-from alembic import op
-import sqlalchemy as sa
-
-
-rule_direction_enum = sa.Enum('ingress', 'egress',
-                              name='securitygrouprules_direction')
-
-
-def upgrade():
-    op.create_table(
-        'securitygroups',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('description', sa.String(length=255), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'securitygrouprules',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('security_group_id', sa.String(length=36), nullable=False),
-        sa.Column('remote_group_id', sa.String(length=36), nullable=True),
-        sa.Column('direction', rule_direction_enum, nullable=True),
-        sa.Column('ethertype', sa.String(length=40), nullable=True),
-        sa.Column('protocol', sa.String(length=40), nullable=True),
-        sa.Column('port_range_min', sa.Integer(), nullable=True),
-        sa.Column('port_range_max', sa.Integer(), nullable=True),
-        sa.Column('remote_ip_prefix', sa.String(length=255), nullable=True),
-        sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['remote_group_id'], ['securitygroups.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'securitygroupportbindings',
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('security_group_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id']),
-        sa.PrimaryKeyConstraint('port_id', 'security_group_id'))
-
-    op.create_table(
-        'default_security_group',
-        sa.Column('tenant_id', sa.String(length=255), nullable=False),
-        sa.Column('security_group_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('tenant_id'),
-        sa.ForeignKeyConstraint(['security_group_id'],
-                                ['securitygroups.id'],
-                                ondelete="CASCADE"))
diff --git a/neutron/db/migration/alembic_migrations/versions/CONTRACT_HEAD b/neutron/db/migration/alembic_migrations/versions/CONTRACT_HEAD
deleted file mode 100644 (file)
index 78fde72..0000000
+++ /dev/null
@@ -1 +0,0 @@
-8a6d8bdae39
diff --git a/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD b/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD
deleted file mode 100644 (file)
index d6602ab..0000000
+++ /dev/null
@@ -1 +0,0 @@
-c3a73f615e4
diff --git a/neutron/db/migration/alembic_migrations/versions/README b/neutron/db/migration/alembic_migrations/versions/README
deleted file mode 100644 (file)
index 1067a38..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-This directory contains the migration scripts for the Neutron project.  Please
-see the README in neutron/db/migration on how to use and generate new
-migrations.
-
-
diff --git a/neutron/db/migration/alembic_migrations/versions/kilo_initial.py b/neutron/db/migration/alembic_migrations/versions/kilo_initial.py
deleted file mode 100644 (file)
index cb305f7..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""kilo_initial
-
-Revision ID: kilo
-Revises: None
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'kilo'
-down_revision = None
-
-
-from neutron.db.migration.alembic_migrations import agent_init_ops
-from neutron.db.migration.alembic_migrations import brocade_init_ops
-from neutron.db.migration.alembic_migrations import cisco_init_ops
-from neutron.db.migration.alembic_migrations import core_init_ops
-from neutron.db.migration.alembic_migrations import dvr_init_opts
-from neutron.db.migration.alembic_migrations import firewall_init_ops
-from neutron.db.migration.alembic_migrations import l3_init_ops
-from neutron.db.migration.alembic_migrations import lb_init_ops
-from neutron.db.migration.alembic_migrations import loadbalancer_init_ops
-from neutron.db.migration.alembic_migrations import metering_init_ops
-from neutron.db.migration.alembic_migrations import ml2_init_ops
-from neutron.db.migration.alembic_migrations import nec_init_ops
-from neutron.db.migration.alembic_migrations import nsxv_initial_opts
-from neutron.db.migration.alembic_migrations import nuage_init_opts
-from neutron.db.migration.alembic_migrations import other_extensions_init_ops
-from neutron.db.migration.alembic_migrations import other_plugins_init_ops
-from neutron.db.migration.alembic_migrations import ovs_init_ops
-from neutron.db.migration.alembic_migrations import portsec_init_ops
-from neutron.db.migration.alembic_migrations import secgroup_init_ops
-from neutron.db.migration.alembic_migrations import vmware_init_ops
-from neutron.db.migration.alembic_migrations import vpn_init_ops
-
-
-def upgrade():
-    agent_init_ops.upgrade()
-    core_init_ops.upgrade()
-    l3_init_ops.upgrade()
-    secgroup_init_ops.upgrade()
-    portsec_init_ops.upgrade()
-    other_extensions_init_ops.upgrade()
-    lb_init_ops.upgrade()
-    ovs_init_ops.upgrade()
-    ml2_init_ops.upgrade()
-    dvr_init_opts.upgrade()
-    firewall_init_ops.upgrade()
-    loadbalancer_init_ops.upgrade()
-    vpn_init_ops.upgrade()
-    metering_init_ops.upgrade()
-    brocade_init_ops.upgrade()
-    cisco_init_ops.upgrade()
-    nec_init_ops.upgrade()
-    other_plugins_init_ops.upgrade()
-    vmware_init_ops.upgrade()
-    nuage_init_opts.upgrade()
-    nsxv_initial_opts.upgrade()
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_geneve_ml2_type_driver.py b/neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_geneve_ml2_type_driver.py
deleted file mode 100644 (file)
index 9ef5584..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""add geneve ml2 type driver
-
-Revision ID: 11926bcfe72d
-Revises: 2e5352a0ad4d
-Create Date: 2015-08-27 19:56:16.356522
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '11926bcfe72d'
-down_revision = '2e5352a0ad4d'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'ml2_geneve_allocations',
-        sa.Column('geneve_vni', sa.Integer(),
-                  autoincrement=False, nullable=False),
-        sa.Column('allocated', sa.Boolean(),
-                  server_default=sa.sql.false(), nullable=False),
-        sa.PrimaryKeyConstraint('geneve_vni'),
-    )
-    op.create_index(op.f('ix_ml2_geneve_allocations_allocated'),
-                    'ml2_geneve_allocations', ['allocated'], unique=False)
-    op.create_table(
-        'ml2_geneve_endpoints',
-        sa.Column('ip_address', sa.String(length=64), nullable=False),
-        sa.Column('host', sa.String(length=255), nullable=True),
-        sa.PrimaryKeyConstraint('ip_address'),
-        sa.UniqueConstraint('host', name='unique_ml2_geneve_endpoints0host'),
-    )
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/contract/2a16083502f3_metaplugin_removal.py b/neutron/db/migration/alembic_migrations/versions/liberty/contract/2a16083502f3_metaplugin_removal.py
deleted file mode 100644 (file)
index 802ad7a..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Metaplugin removal
-
-Revision ID: 2a16083502f3
-Revises: 5498d17be016
-Create Date: 2015-06-16 09:11:10.488566
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '2a16083502f3'
-down_revision = '5498d17be016'
-
-from alembic import op
-
-
-def upgrade():
-    op.drop_table('networkflavors')
-    op.drop_table('routerflavors')
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a0ad4d_add_missing_foreign_keys.py b/neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a0ad4d_add_missing_foreign_keys.py
deleted file mode 100644 (file)
index 322f6b0..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Add missing foreign keys
-
-Revision ID: 2e5352a0ad4d
-Revises: 2a16083502f3
-Create Date: 2015-08-20 12:43:09.110427
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '2e5352a0ad4d'
-down_revision = '2a16083502f3'
-
-from alembic import op
-from sqlalchemy.engine import reflection
-
-from neutron.db import migration
-
-
-TABLE_NAME = 'flavorserviceprofilebindings'
-
-
-def upgrade():
-    inspector = reflection.Inspector.from_engine(op.get_bind())
-    fk_constraints = inspector.get_foreign_keys(TABLE_NAME)
-    migration.remove_foreign_keys(TABLE_NAME, fk_constraints)
-    migration.create_foreign_keys(TABLE_NAME, fk_constraints)
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.py b/neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.py
deleted file mode 100644 (file)
index 0e6358f..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Initial no-op Liberty contract rule.
-
-Revision ID: 30018084ec99
-Revises: None
-Create Date: 2015-06-22 00:00:00.000000
-
-"""
-
-from neutron.db.migration import cli
-
-
-# revision identifiers, used by Alembic.
-revision = '30018084ec99'
-down_revision = 'kilo'
-branch_labels = (cli.CONTRACT_BRANCH,)
-
-
-def upgrade():
-    pass
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/contract/4af11ca47297_drop_cisco_monolithic_tables.py b/neutron/db/migration/alembic_migrations/versions/liberty/contract/4af11ca47297_drop_cisco_monolithic_tables.py
deleted file mode 100644 (file)
index 7a91b79..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2015 Cisco Systems, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Drop cisco monolithic tables
-
-Revision ID: 4af11ca47297
-Revises: 11926bcfe72d
-Create Date: 2015-08-13 08:01:19.709839
-
-"""
-
-from alembic import op
-
-from neutron.db import migration
-
-
-# revision identifiers, used by Alembic.
-revision = '4af11ca47297'
-down_revision = '11926bcfe72d'
-
-# milestone identifier, used by neutron-db-manage
-neutron_milestone = [migration.LIBERTY]
-
-
-def upgrade():
-    op.drop_table('cisco_n1kv_port_bindings')
-    op.drop_table('cisco_n1kv_network_bindings')
-    op.drop_table('cisco_n1kv_multi_segments')
-    op.drop_table('cisco_provider_networks')
-    op.drop_table('cisco_n1kv_trunk_segments')
-    op.drop_table('cisco_n1kv_vmnetworks')
-    op.drop_table('cisco_n1kv_profile_bindings')
-    op.drop_table('cisco_qos_policies')
-    op.drop_table('cisco_credentials')
-    op.drop_table('cisco_n1kv_vlan_allocations')
-    op.drop_table('cisco_n1kv_vxlan_allocations')
-    op.drop_table('cisco_network_profiles')
-    op.drop_table('cisco_policy_profiles')
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/contract/4ffceebfada_rbac_network.py b/neutron/db/migration/alembic_migrations/versions/liberty/contract/4ffceebfada_rbac_network.py
deleted file mode 100644 (file)
index 76926fa..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""network_rbac
-
-Revision ID: 4ffceebfada
-Revises: 30018084ec99
-Create Date: 2015-06-14 13:12:04.012457
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '4ffceebfada'
-down_revision = '30018084ec99'
-depends_on = ('8675309a5c4f',)
-
-from alembic import op
-from oslo_utils import uuidutils
-import sqlalchemy as sa
-
-
-# A simple model of the networks table with only the fields needed for
-# the migration.
-network = sa.Table('networks', sa.MetaData(),
-                   sa.Column('id', sa.String(length=36), nullable=False),
-                   sa.Column('tenant_id', sa.String(length=255)),
-                   sa.Column('shared', sa.Boolean(), nullable=False))
-
-networkrbacs = sa.Table(
-    'networkrbacs', sa.MetaData(),
-    sa.Column('id', sa.String(length=36), nullable=False),
-    sa.Column('object_id', sa.String(length=36), nullable=False),
-    sa.Column('tenant_id', sa.String(length=255), nullable=True,
-              index=True),
-    sa.Column('target_tenant', sa.String(length=255), nullable=False),
-    sa.Column('action', sa.String(length=255), nullable=False))
-
-
-def upgrade():
-    op.bulk_insert(networkrbacs, get_values())
-    op.drop_column('networks', 'shared')
-    # the shared column on subnets was just an internal representation of the
-    # shared status of the network it was related to. This is now handled by
-    # other logic so we just drop it.
-    op.drop_column('subnets', 'shared')
-
-
-def get_values():
-    session = sa.orm.Session(bind=op.get_bind())
-    values = []
-    for row in session.query(network).filter(network.c.shared).all():
-        values.append({'id': uuidutils.generate_uuid(), 'object_id': row[0],
-                       'tenant_id': row[1], 'target_tenant': '*',
-                       'action': 'access_as_shared'})
-    # this commit appears to be necessary to allow further operations
-    session.commit()
-    return values
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/contract/5498d17be016_drop_legacy_ovs_and_lb.py b/neutron/db/migration/alembic_migrations/versions/liberty/contract/5498d17be016_drop_legacy_ovs_and_lb.py
deleted file mode 100644 (file)
index 55ad8d1..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Drop legacy OVS and LB plugin tables
-
-Revision ID: 5498d17be016
-Revises: 4ffceebfada
-Create Date: 2015-06-25 14:08:30.984419
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '5498d17be016'
-down_revision = '4ffceebfada'
-
-from alembic import op
-
-
-def upgrade():
-    op.drop_table('ovs_network_bindings')
-    op.drop_table('ovs_vlan_allocations')
-    op.drop_table('network_bindings')
-    op.drop_table('ovs_tunnel_allocations')
-    op.drop_table('network_states')
-    op.drop_table('ovs_tunnel_endpoints')
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/1b4c6e320f79_address_scope_support_in_subnetpool.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/1b4c6e320f79_address_scope_support_in_subnetpool.py
deleted file mode 100644 (file)
index f1ffdaf..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2015 Huawei Technologies India Pvt. Ltd.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""address scope support in subnetpool
-
-Revision ID: 1b4c6e320f79
-Revises: 1c844d1677f7
-Create Date: 2015-07-03 09:48:39.491058
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '1b4c6e320f79'
-down_revision = '1c844d1677f7'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.add_column('subnetpools',
-                  sa.Column('address_scope_id',
-                            sa.String(length=36),
-                            nullable=True))
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d1677f7_dns_nameservers_order.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d1677f7_dns_nameservers_order.py
deleted file mode 100644 (file)
index e1781ed..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""add order to dnsnameservers
-
-Revision ID: 1c844d1677f7
-Revises: 26c371498592
-Create Date: 2015-07-21 22:59:03.383850
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '1c844d1677f7'
-down_revision = '26c371498592'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.add_column('dnsnameservers',
-                  sa.Column('order', sa.Integer(),
-                            server_default='0', nullable=False))
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/26c371498592_subnetpool_hash.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/26c371498592_subnetpool_hash.py
deleted file mode 100644 (file)
index 5bff7c8..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (c) 2015 Thales Services SAS
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""subnetpool hash
-
-Revision ID: 26c371498592
-Revises: 45f955889773
-Create Date: 2015-06-02 21:18:19.942076
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '26c371498592'
-down_revision = '45f955889773'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.add_column(
-        'subnetpools',
-        sa.Column('hash', sa.String(36), nullable=False, server_default=''))
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/31337ec0ffee_flavors.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/31337ec0ffee_flavors.py
deleted file mode 100644 (file)
index 4ac5ac8..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2014-2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Flavor framework
-
-Revision ID: 313373c0ffee
-Revises: 52c5312f6baf
-
-Create Date: 2014-07-17 03:00:00.00
-"""
-# revision identifiers, used by Alembic.
-revision = '313373c0ffee'
-down_revision = '52c5312f6baf'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'flavors',
-        sa.Column('id', sa.String(36)),
-        sa.Column('name', sa.String(255)),
-        sa.Column('description', sa.String(1024)),
-        sa.Column('enabled', sa.Boolean, nullable=False,
-                  server_default=sa.sql.true()),
-        sa.Column('service_type', sa.String(36), nullable=True),
-        sa.PrimaryKeyConstraint('id')
-    )
-
-    op.create_table(
-        'serviceprofiles',
-        sa.Column('id', sa.String(36)),
-        sa.Column('description', sa.String(1024)),
-        sa.Column('driver', sa.String(1024), nullable=False),
-        sa.Column('enabled', sa.Boolean, nullable=False,
-                  server_default=sa.sql.true()),
-        sa.Column('metainfo', sa.String(4096)),
-        sa.PrimaryKeyConstraint('id')
-    )
-
-    op.create_table(
-        'flavorserviceprofilebindings',
-        sa.Column('service_profile_id', sa.String(36), nullable=False),
-        sa.Column('flavor_id', sa.String(36), nullable=False),
-        sa.ForeignKeyConstraint(['service_profile_id'],
-                                ['serviceprofiles.id']),
-        sa.ForeignKeyConstraint(['flavor_id'], ['flavors.id']),
-        sa.PrimaryKeyConstraint('service_profile_id', 'flavor_id')
-    )
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_name_to_port.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_name_to_port.py
deleted file mode 100644 (file)
index 3b2707c..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2015 Rackspace
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Add dns_name to Port
-
-Revision ID: 34af2b5c5a59
-Revises: 9859ac9c136
-Create Date: 2015-08-23 00:22:47.618593
-
-"""
-
-from alembic import op
-import sqlalchemy as sa
-
-from neutron.db import migration
-from neutron.extensions import dns
-
-
-# revision identifiers, used by Alembic.
-revision = '34af2b5c5a59'
-down_revision = '9859ac9c136'
-
-# milestone identifier, used by neutron-db-manage
-neutron_milestone = [migration.LIBERTY]
-
-
-def upgrade():
-    op.add_column('ports',
-                  sa.Column('dns_name',
-                            sa.String(length=dns.FQDN_MAX_LEN),
-                            nullable=True))
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.py
deleted file mode 100644 (file)
index e63b3f5..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""nsxv_vdr_metadata.py
-
-Revision ID: 354db87e3225
-Revises: kilo
-Create Date: 2015-04-19 14:59:15.102609
-
-"""
-
-from alembic import op
-import sqlalchemy as sa
-
-from neutron.db.migration import cli
-
-
-# revision identifiers, used by Alembic.
-revision = '354db87e3225'
-down_revision = 'kilo'
-branch_labels = (cli.EXPAND_BRANCH,)
-
-
-def upgrade():
-    op.create_table(
-        'nsxv_vdr_dhcp_bindings',
-        sa.Column('vdr_router_id', sa.String(length=36), nullable=False),
-        sa.Column('dhcp_router_id', sa.String(length=36), nullable=False),
-        sa.Column('dhcp_edge_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('vdr_router_id'),
-        sa.UniqueConstraint(
-            'dhcp_router_id',
-            name='unique_nsxv_vdr_dhcp_bindings0dhcp_router_id'),
-        sa.UniqueConstraint(
-            'dhcp_edge_id',
-            name='unique_nsxv_vdr_dhcp_bindings0dhcp_edge_id'))
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/45f955889773_quota_usage.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/45f955889773_quota_usage.py
deleted file mode 100644 (file)
index e10edc9..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""quota_usage
-
-Revision ID: 45f955889773
-Revises: 8675309a5c4f
-Create Date: 2015-04-17 08:09:37.611546
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '45f955889773'
-down_revision = '8675309a5c4f'
-
-from alembic import op
-import sqlalchemy as sa
-from sqlalchemy import sql
-
-
-def upgrade():
-    op.create_table(
-        'quotausages',
-        sa.Column('tenant_id', sa.String(length=255),
-                  nullable=False, primary_key=True, index=True),
-        sa.Column('resource', sa.String(length=255),
-                  nullable=False, primary_key=True, index=True),
-        sa.Column('dirty', sa.Boolean(), nullable=False,
-                  server_default=sql.false()),
-        sa.Column('in_use', sa.Integer(), nullable=False,
-                  server_default='0'),
-        sa.Column('reserved', sa.Integer(), nullable=False,
-                  server_default='0'))
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.py
deleted file mode 100755 (executable)
index a692b95..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""qos db changes
-
-Revision ID: 48153cb5f051
-Revises: 1b4c6e320f79
-Create Date: 2015-06-24 17:03:34.965101
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '48153cb5f051'
-down_revision = '1b4c6e320f79'
-
-from alembic import op
-import sqlalchemy as sa
-
-from neutron.api.v2 import attributes as attrs
-
-
-def upgrade():
-    op.create_table(
-        'qos_policies',
-        sa.Column('id', sa.String(length=36), primary_key=True),
-        sa.Column('name', sa.String(length=attrs.NAME_MAX_LEN)),
-        sa.Column('description', sa.String(length=attrs.DESCRIPTION_MAX_LEN)),
-        sa.Column('shared', sa.Boolean(), nullable=False),
-        sa.Column('tenant_id', sa.String(length=attrs.TENANT_ID_MAX_LEN),
-                  index=True))
-
-    op.create_table(
-        'qos_network_policy_bindings',
-        sa.Column('policy_id', sa.String(length=36),
-                  sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
-                  nullable=False),
-        sa.Column('network_id', sa.String(length=36),
-                  sa.ForeignKey('networks.id', ondelete='CASCADE'),
-                  nullable=False, unique=True))
-
-    op.create_table(
-        'qos_port_policy_bindings',
-        sa.Column('policy_id', sa.String(length=36),
-                  sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
-                  nullable=False),
-        sa.Column('port_id', sa.String(length=36),
-                  sa.ForeignKey('ports.id', ondelete='CASCADE'),
-                  nullable=False, unique=True))
-
-    op.create_table(
-        'qos_bandwidth_limit_rules',
-        sa.Column('id', sa.String(length=36), primary_key=True),
-        sa.Column('qos_policy_id', sa.String(length=36),
-                  sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
-                  nullable=False, unique=True),
-        sa.Column('max_kbps', sa.Integer()),
-        sa.Column('max_burst_kbps', sa.Integer()))
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/52c5312f6baf_address_scopes.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/52c5312f6baf_address_scopes.py
deleted file mode 100644 (file)
index 9fa1466..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Initial operations in support of address scopes
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '52c5312f6baf'
-down_revision = '599c6a226151'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'address_scopes',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=False),
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('shared', sa.Boolean(), nullable=False),
-        sa.PrimaryKeyConstraint('id'))
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/599c6a226151_neutrodb_ipam.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/599c6a226151_neutrodb_ipam.py
deleted file mode 100644 (file)
index cea591d..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""neutrodb_ipam
-
-Revision ID: 599c6a226151
-Revises: 354db87e3225
-Create Date: 2015-03-08 18:12:08.962378
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '599c6a226151'
-down_revision = '354db87e3225'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'ipamsubnets',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('neutron_subnet_id', sa.String(length=36), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'ipamallocations',
-        sa.Column('ip_address', sa.String(length=64), nullable=False),
-        sa.Column('status', sa.String(length=36), nullable=True),
-        sa.Column('ipam_subnet_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['ipam_subnet_id'],
-                                ['ipamsubnets.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('ip_address', 'ipam_subnet_id'))
-
-    op.create_table(
-        'ipamallocationpools',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('ipam_subnet_id', sa.String(length=36), nullable=False),
-        sa.Column('first_ip', sa.String(length=64), nullable=False),
-        sa.Column('last_ip', sa.String(length=64), nullable=False),
-        sa.ForeignKeyConstraint(['ipam_subnet_id'],
-                                ['ipamsubnets.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'ipamavailabilityranges',
-        sa.Column('allocation_pool_id', sa.String(length=36), nullable=False),
-        sa.Column('first_ip', sa.String(length=64), nullable=False),
-        sa.Column('last_ip', sa.String(length=64), nullable=False),
-        sa.ForeignKeyConstraint(['allocation_pool_id'],
-                                ['ipamallocationpools.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip'),
-        sa.Index('ix_ipamavailabilityranges_first_ip_allocation_pool_id',
-                 'first_ip', 'allocation_pool_id'),
-        sa.Index('ix_ipamavailabilityranges_last_ip_allocation_pool_id',
-                 'last_ip', 'allocation_pool_id'))
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/8675309a5c4f_rbac_network.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/8675309a5c4f_rbac_network.py
deleted file mode 100644 (file)
index b2c7156..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""network_rbac
-
-Revision ID: 8675309a5c4f
-Revises: 313373c0ffee
-Create Date: 2015-06-14 13:12:04.012457
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '8675309a5c4f'
-down_revision = '313373c0ffee'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'networkrbacs',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('object_id', sa.String(length=36), nullable=False),
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('target_tenant', sa.String(length=255), nullable=False),
-        sa.Column('action', sa.String(length=255), nullable=False),
-        sa.ForeignKeyConstraint(['object_id'],
-                                ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('id'),
-        sa.UniqueConstraint(
-            'action', 'object_id', 'target_tenant',
-            name='uniq_networkrbacs0tenant_target0object_id0action'))
diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c136_quota_reservations.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c136_quota_reservations.py
deleted file mode 100644 (file)
index c8935a8..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""quota_reservations
-
-Revision ID: 9859ac9c136
-Revises: 48153cb5f051
-Create Date: 2015-03-11 06:40:56.775075
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '9859ac9c136'
-down_revision = '48153cb5f051'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.create_table(
-        'reservations',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('expiration', sa.DateTime(), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'resourcedeltas',
-        sa.Column('resource', sa.String(length=255), nullable=False),
-        sa.Column('reservation_id', sa.String(length=36), nullable=False),
-        sa.Column('amount', sa.Integer(), nullable=True),
-        sa.ForeignKeyConstraint(['reservation_id'], ['reservations.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('resource', 'reservation_id'))
diff --git a/neutron/db/migration/alembic_migrations/versions/mitaka/contract/1b294093239c_remove_embrane_plugin.py b/neutron/db/migration/alembic_migrations/versions/mitaka/contract/1b294093239c_remove_embrane_plugin.py
deleted file mode 100644 (file)
index 68e6d25..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Drop embrane plugin table
-
-Revision ID: 1b294093239c
-Revises: 4af11ca47297
-Create Date: 2015-10-09 14:07:59.968597
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '1b294093239c'
-down_revision = '4af11ca47297'
-
-from alembic import op
-
-
-def upgrade():
-    op.drop_table('embrane_pool_port')
diff --git a/neutron/db/migration/alembic_migrations/versions/mitaka/contract/8a6d8bdae39_migrate_neutron_resources_table.py b/neutron/db/migration/alembic_migrations/versions/mitaka/contract/8a6d8bdae39_migrate_neutron_resources_table.py
deleted file mode 100644 (file)
index e7acce8..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""standardattributes migration
-
-Revision ID: 8a6d8bdae39
-Revises: 1b294093239c
-Create Date: 2015-09-10 03:12:04.012457
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '8a6d8bdae39'
-down_revision = '1b294093239c'
-depends_on = ('32e5974ada25',)
-
-from alembic import op
-import sqlalchemy as sa
-
-
-# basic model of the tables with required field for migration
-TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups',
-          'floatingips', 'routers', 'securitygrouprules')
-TABLE_MODELS = [
-    (table, sa.Table(table, sa.MetaData(),
-                     sa.Column('id', sa.String(length=36), nullable=False),
-                     sa.Column('standard_attr_id', sa.BigInteger(),
-                               nullable=True)))
-    for table in TABLES
-]
-
-standardattrs = sa.Table(
-    'standardattributes', sa.MetaData(),
-    sa.Column('id', sa.BigInteger(), primary_key=True, autoincrement=True),
-    sa.Column('resource_type', sa.String(length=255), nullable=False))
-
-
-def upgrade():
-    generate_records_for_existing()
-    for table, model in TABLE_MODELS:
-        # add constraint(s) now that everything is populated on that table.
-        # note that some MariaDB versions will *not* allow the ALTER to
-        # NOT NULL on a column that has an FK constraint, so we set NOT NULL
-        # first, then the FK constraint.
-        op.alter_column(table, 'standard_attr_id', nullable=False,
-                        existing_type=sa.BigInteger(), existing_nullable=True,
-                        existing_server_default=False)
-        op.create_foreign_key(
-            constraint_name=None, source_table=table,
-            referent_table='standardattributes',
-            local_cols=['standard_attr_id'], remote_cols=['id'],
-            ondelete='CASCADE')
-        op.create_unique_constraint(
-            constraint_name='uniq_%s0standard_attr_id' % table,
-            table_name=table, columns=['standard_attr_id'])
-
-
-def generate_records_for_existing():
-    session = sa.orm.Session(bind=op.get_bind())
-    values = []
-    with session.begin(subtransactions=True):
-        for table, model in TABLE_MODELS:
-            for row in session.query(model):
-                # NOTE(kevinbenton): without this disabled, pylint complains
-                # about a missing 'dml' argument.
-                #pylint: disable=no-value-for-parameter
-                res = session.execute(
-                    standardattrs.insert().values(resource_type=table))
-                session.execute(
-                    model.update().values(
-                        standard_attr_id=res.inserted_primary_key).where(
-                            model.c.id == row[0]))
-    # this commit is necessary to allow further operations
-    session.commit()
-    return values
diff --git a/neutron/db/migration/alembic_migrations/versions/mitaka/expand/13cfb89f881a_add_is_default_to_subnetpool.py b/neutron/db/migration/alembic_migrations/versions/mitaka/expand/13cfb89f881a_add_is_default_to_subnetpool.py
deleted file mode 100644 (file)
index 6e7d9f7..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2015 Cisco Systems
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""add is_default to subnetpool
-
-Revision ID: 13cfb89f881a
-Revises: 59cb5b6cf4d
-Create Date: 2015-09-30 15:58:31.170153
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '13cfb89f881a'
-down_revision = '59cb5b6cf4d'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.add_column('subnetpools',
-                  sa.Column('is_default',
-                            sa.Boolean(),
-                            nullable=False))
diff --git a/neutron/db/migration/alembic_migrations/versions/mitaka/expand/32e5974ada25_add_neutron_resources_table.py b/neutron/db/migration/alembic_migrations/versions/mitaka/expand/32e5974ada25_add_neutron_resources_table.py
deleted file mode 100644 (file)
index 39e2104..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Add standard attribute table
-
-Revision ID: 32e5974ada25
-Revises: 13cfb89f881a
-Create Date: 2015-09-10 00:22:47.618593
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '32e5974ada25'
-down_revision = '13cfb89f881a'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups',
-          'floatingips', 'routers', 'securitygrouprules')
-
-
-def upgrade():
-    op.create_table(
-        'standardattributes',
-        sa.Column('id', sa.BigInteger(), autoincrement=True),
-        sa.Column('resource_type', sa.String(length=255), nullable=False),
-        sa.PrimaryKeyConstraint('id')
-    )
-    for table in TABLES:
-        op.add_column(table, sa.Column('standard_attr_id', sa.BigInteger(),
-                                       nullable=True))
diff --git a/neutron/db/migration/alembic_migrations/versions/mitaka/expand/59cb5b6cf4d_availability_zone.py b/neutron/db/migration/alembic_migrations/versions/mitaka/expand/59cb5b6cf4d_availability_zone.py
deleted file mode 100644 (file)
index d2c5482..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Add availability zone
-
-Revision ID: 59cb5b6cf4d
-Revises: 34af2b5c5a59
-Create Date: 2015-01-20 14:38:47.156574
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = '59cb5b6cf4d'
-down_revision = '34af2b5c5a59'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.add_column('agents',
-                  sa.Column('availability_zone', sa.String(length=255)))
diff --git a/neutron/db/migration/alembic_migrations/versions/mitaka/expand/c3a73f615e4_add_ip_version_to_address_scope.py b/neutron/db/migration/alembic_migrations/versions/mitaka/expand/c3a73f615e4_add_ip_version_to_address_scope.py
deleted file mode 100644 (file)
index e039f70..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Add ip_version to AddressScope
-
-Revision ID: c3a73f615e4
-Revises: 13cfb89f881a
-Create Date: 2015-10-08 17:34:32.231256
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'c3a73f615e4'
-down_revision = 'dce3ec7a25c9'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.add_column('address_scopes',
-                  sa.Column('ip_version', sa.Integer(), nullable=False))
diff --git a/neutron/db/migration/alembic_migrations/versions/mitaka/expand/dce3ec7a25c9_router_az.py b/neutron/db/migration/alembic_migrations/versions/mitaka/expand/dce3ec7a25c9_router_az.py
deleted file mode 100644 (file)
index 62af98a..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Add router availability zone
-
-Revision ID: dce3ec7a25c9
-Revises: ec7fcfbf72ee
-Create Date: 2015-09-17 09:36:17.468901
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'dce3ec7a25c9'
-down_revision = 'ec7fcfbf72ee'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.add_column('router_extra_attributes',
-                  sa.Column('availability_zone_hints', sa.String(length=255)))
diff --git a/neutron/db/migration/alembic_migrations/versions/mitaka/expand/ec7fcfbf72ee_network_az.py b/neutron/db/migration/alembic_migrations/versions/mitaka/expand/ec7fcfbf72ee_network_az.py
deleted file mode 100644 (file)
index e6582d4..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Add network availability zone
-
-Revision ID: ec7fcfbf72ee
-Revises: 32e5974ada25
-Create Date: 2015-09-17 09:21:51.257579
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'ec7fcfbf72ee'
-down_revision = '32e5974ada25'
-
-from alembic import op
-import sqlalchemy as sa
-
-
-def upgrade():
-    op.add_column('networks',
-                  sa.Column('availability_zone_hints', sa.String(length=255)))
diff --git a/neutron/db/migration/alembic_migrations/vmware_init_ops.py b/neutron/db/migration/alembic_migrations/vmware_init_ops.py
deleted file mode 100644 (file)
index e83ddc8..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial schema operations for VMware plugins
-
-
-from alembic import op
-import sqlalchemy as sa
-
-
-net_binding_type = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
-                           name='tz_network_bindings_binding_type')
-l2gw_segmentation_type = sa.Enum('flat', 'vlan',
-                                 name='networkconnections_segmentation_type')
-qos_marking = sa.Enum('untrusted', 'trusted', name='qosqueues_qos_marking')
-
-
-def upgrade():
-    op.create_table(
-        'tz_network_bindings',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('binding_type', net_binding_type, nullable=False),
-        sa.Column('phy_uuid', sa.String(length=36), nullable=True),
-        sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=True),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id', 'binding_type',
-                                'phy_uuid', 'vlan_id'))
-
-    op.create_table(
-        'multi_provider_networks',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id'))
-
-    op.create_table(
-        'vcns_router_bindings',
-        sa.Column('status', sa.String(length=16), nullable=False),
-        sa.Column('status_description', sa.String(length=255), nullable=True),
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.Column('edge_id', sa.String(length=16), nullable=True),
-        sa.Column('lswitch_id', sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('router_id'))
-
-    op.create_table(
-        'networkgateways',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('tenant_id', sa.String(length=36), nullable=True),
-        sa.Column('default', sa.Boolean(), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'networkconnections',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
-        sa.Column('network_id', sa.String(length=36), nullable=True),
-        sa.Column('segmentation_type', l2gw_segmentation_type, nullable=True),
-        sa.Column('segmentation_id', sa.Integer(), nullable=True),
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('port_id'),
-        sa.UniqueConstraint('network_gateway_id', 'segmentation_type',
-                            'segmentation_id'))
-
-    op.create_table(
-        'qosqueues',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('default', sa.Boolean(), nullable=True,
-                  server_default=sa.sql.false()),
-        sa.Column('min', sa.Integer(), nullable=False),
-        sa.Column('max', sa.Integer(), nullable=True),
-        sa.Column('qos_marking', qos_marking, nullable=True),
-        sa.Column('dscp', sa.Integer(), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'networkqueuemappings',
-        sa.Column('network_id', sa.String(length=36), nullable=False),
-        sa.Column('queue_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['queue_id'], ['qosqueues.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('network_id'))
-
-    op.create_table(
-        'portqueuemappings',
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('queue_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
-                                ondelete='CASCADE'),
-        sa.ForeignKeyConstraint(['queue_id'], ['qosqueues.id'], ),
-        sa.PrimaryKeyConstraint('port_id', 'queue_id'))
-
-    op.create_table(
-        'maclearningstates',
-        sa.Column('port_id', sa.String(length=36), nullable=False),
-        sa.Column('mac_learning_enabled', sa.Boolean(), nullable=False),
-        sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('port_id'))
-
-    op.create_table('neutron_nsx_port_mappings',
-                    sa.Column('neutron_id', sa.String(length=36),
-                              nullable=False),
-                    sa.Column('nsx_port_id', sa.String(length=36),
-                              nullable=False),
-                    sa.Column('nsx_switch_id', sa.String(length=36),
-                              nullable=True),
-                    sa.ForeignKeyConstraint(['neutron_id'], ['ports.id'],
-                                            ondelete='CASCADE'),
-                    sa.PrimaryKeyConstraint('neutron_id'))
-
-    op.create_table(
-        'lsn',
-        sa.Column('net_id',
-                  sa.String(length=36), nullable=False),
-        sa.Column('lsn_id',
-                  sa.String(length=36), nullable=False),
-        sa.PrimaryKeyConstraint('lsn_id'))
-
-    op.create_table(
-        'lsn_port',
-        sa.Column('lsn_port_id',
-                  sa.String(length=36), nullable=False),
-        sa.Column('lsn_id',
-                  sa.String(length=36), nullable=False),
-        sa.Column('sub_id',
-                  sa.String(length=36), nullable=False, unique=True),
-        sa.Column('mac_addr',
-                  sa.String(length=32), nullable=False, unique=True),
-        sa.ForeignKeyConstraint(['lsn_id'], ['lsn.lsn_id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('lsn_port_id'))
-
-    op.create_table(
-        'neutron_nsx_network_mappings',
-        sa.Column('neutron_id', sa.String(length=36), nullable=False),
-        sa.Column('nsx_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['neutron_id'], ['networks.id'],
-                                ondelete='CASCADE'),
-        # There might be multiple switches for a neutron network
-        sa.PrimaryKeyConstraint('neutron_id', 'nsx_id'),
-    )
-
-    op.create_table(
-        'neutron_nsx_router_mappings',
-        sa.Column('neutron_id', sa.String(length=36), nullable=False),
-        sa.Column('nsx_id', sa.String(length=36), nullable=True),
-        sa.ForeignKeyConstraint(['neutron_id'], ['routers.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('neutron_id'),
-    )
-
-    op.create_table(
-        'neutron_nsx_security_group_mappings',
-        sa.Column('neutron_id', sa.String(length=36), nullable=False),
-        sa.Column('nsx_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['neutron_id'], ['securitygroups.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('neutron_id', 'nsx_id'))
-
-    op.create_table(
-        'networkgatewaydevicereferences',
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
-        sa.Column('interface_name', sa.String(length=64), nullable=True),
-        sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name'))
-
-    op.create_table(
-        'networkgatewaydevices',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True,
-                  index=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('nsx_id', sa.String(length=36), nullable=True),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('connector_type', sa.String(length=10), nullable=True),
-        sa.Column('connector_ip', sa.String(length=64), nullable=True),
-        sa.Column('status', sa.String(length=16), nullable=True),
-        sa.PrimaryKeyConstraint('id'))
diff --git a/neutron/db/migration/alembic_migrations/vpn_init_ops.py b/neutron/db/migration/alembic_migrations/vpn_init_ops.py
deleted file mode 100644 (file)
index ba3cd3d..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-# Initial schema operations for IPSEC VPN service plugin
-
-
-from alembic import op
-import sqlalchemy as sa
-
-
-auth_algorithms = sa.Enum('sha1', name='vpn_auth_algorithms')
-encryption_algorithms = sa.Enum('3des', 'aes-128', 'aes-256', 'aes-192',
-                                name='vpn_encrypt_algorithms')
-encapsulation_modes = sa.Enum('tunnel', 'transport',
-                              name='ipsec_encapsulations')
-lifetime_unit_types = sa.Enum('seconds', 'kilobytes',
-                              name='vpn_lifetime_units')
-transform_protocols = sa.Enum('esp', 'ah', 'ah-esp',
-                              name='ipsec_transform_protocols')
-pfs_types = sa.Enum('group2', 'group5', 'group14', name='vpn_pfs')
-phase1_negotiation_modes = sa.Enum('main', name='ike_phase1_mode')
-ike_versions = sa.Enum('v1', 'v2', name='ike_versions')
-initiator_types = sa.Enum('bi-directional', 'response-only',
-                          name='vpn_initiators')
-dpd_actions = sa.Enum('hold', 'clear', 'restart', 'disabled',
-                      'restart-by-peer', name='vpn_dpd_actions')
-
-
-def upgrade():
-    op.create_table(
-        'ipsecpolicies',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('description', sa.String(length=255), nullable=True),
-        sa.Column('transform_protocol', transform_protocols, nullable=False),
-        sa.Column('auth_algorithm', auth_algorithms, nullable=False),
-        sa.Column('encryption_algorithm', encryption_algorithms,
-                  nullable=False),
-        sa.Column('encapsulation_mode', encapsulation_modes, nullable=False),
-        sa.Column('lifetime_units', lifetime_unit_types, nullable=False),
-        sa.Column('lifetime_value', sa.Integer(), nullable=False),
-        sa.Column('pfs', pfs_types, nullable=False),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'ikepolicies',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('description', sa.String(length=255), nullable=True),
-        sa.Column('auth_algorithm', auth_algorithms, nullable=False),
-        sa.Column('encryption_algorithm', encryption_algorithms,
-                  nullable=False),
-        sa.Column('phase1_negotiation_mode',
-                  phase1_negotiation_modes,
-                  nullable=False),
-        sa.Column('lifetime_units', lifetime_unit_types, nullable=False),
-        sa.Column('lifetime_value', sa.Integer(), nullable=False),
-        sa.Column('ike_version', ike_versions, nullable=False),
-        sa.Column('pfs', pfs_types, nullable=False),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'vpnservices',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('description', sa.String(length=255), nullable=True),
-        sa.Column('status', sa.String(length=16), nullable=False),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-        sa.Column('subnet_id', sa.String(length=36), nullable=False),
-        sa.Column('router_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ),
-        sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'ipsec_site_connections',
-        sa.Column('tenant_id', sa.String(length=255), nullable=True),
-        sa.Column('id', sa.String(length=36), nullable=False),
-        sa.Column('name', sa.String(length=255), nullable=True),
-        sa.Column('description', sa.String(length=255), nullable=True),
-        sa.Column('peer_address', sa.String(length=255), nullable=False),
-        sa.Column('peer_id', sa.String(length=255), nullable=False),
-        sa.Column('route_mode', sa.String(length=8), nullable=False),
-        sa.Column('mtu', sa.Integer(), nullable=False),
-        sa.Column('initiator', initiator_types, nullable=False),
-        sa.Column('auth_mode', sa.String(length=16), nullable=False),
-        sa.Column('psk', sa.String(length=255), nullable=False),
-        sa.Column('dpd_action', dpd_actions, nullable=False),
-        sa.Column('dpd_interval', sa.Integer(), nullable=False),
-        sa.Column('dpd_timeout', sa.Integer(), nullable=False),
-        sa.Column('status', sa.String(length=16), nullable=False),
-        sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-        sa.Column('vpnservice_id', sa.String(length=36), nullable=False),
-        sa.Column('ipsecpolicy_id', sa.String(length=36), nullable=False),
-        sa.Column('ikepolicy_id', sa.String(length=36), nullable=False),
-        sa.ForeignKeyConstraint(['vpnservice_id'], ['vpnservices.id'], ),
-        sa.ForeignKeyConstraint(['ipsecpolicy_id'], ['ipsecpolicies.id'], ),
-        sa.ForeignKeyConstraint(['ikepolicy_id'], ['ikepolicies.id'], ),
-        sa.PrimaryKeyConstraint('id'))
-
-    op.create_table(
-        'ipsecpeercidrs',
-        sa.Column('cidr', sa.String(length=32), nullable=False),
-        sa.Column('ipsec_site_connection_id', sa.String(length=36),
-                  nullable=False),
-        sa.ForeignKeyConstraint(['ipsec_site_connection_id'],
-                                ['ipsec_site_connections.id'],
-                                ondelete='CASCADE'),
-        sa.PrimaryKeyConstraint('cidr', 'ipsec_site_connection_id'))
diff --git a/neutron/db/migration/autogen.py b/neutron/db/migration/autogen.py
deleted file mode 100644 (file)
index daa3aaa..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright (c) 2015 Red Hat
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from alembic.operations import ops
-from alembic.util import Dispatcher
-from alembic.util import rev_id as new_rev_id
-
-from neutron.db.migration import cli
-
-_ec_dispatcher = Dispatcher()
-
-
-def process_revision_directives(context, revision, directives):
-    if cli._use_separate_migration_branches(context.config):
-        directives[:] = [
-            directive for directive in _assign_directives(context, directives)
-        ]
-
-
-def _assign_directives(context, directives, phase=None):
-    for directive in directives:
-        decider = _ec_dispatcher.dispatch(directive)
-        if phase is None:
-            phases = cli.MIGRATION_BRANCHES
-        else:
-            phases = (phase,)
-        for phase in phases:
-            decided = decider(context, directive, phase)
-            if decided:
-                yield decided
-
-
-@_ec_dispatcher.dispatch_for(ops.MigrationScript)
-def _migration_script_ops(context, directive, phase):
-    """Generate a new ops.MigrationScript() for a given phase.
-
-    E.g. given an ops.MigrationScript() directive from a vanilla autogenerate
-    and an expand/contract phase name, produce a new ops.MigrationScript()
-    which contains only those sub-directives appropriate to "expand" or
-    "contract".  Also ensure that the branch directory exists and that
-    the correct branch labels/depends_on/head revision are set up.
-
-    """
-    version_path = cli._get_version_branch_path(
-        context.config, release=cli.CURRENT_RELEASE, branch=phase)
-    autogen_kwargs = {}
-    cli._check_bootstrap_new_branch(phase, version_path, autogen_kwargs)
-
-    op = ops.MigrationScript(
-        new_rev_id(),
-        ops.UpgradeOps(ops=[
-            d for d in _assign_directives(
-                context, directive.upgrade_ops.ops, phase)
-        ]),
-        ops.DowngradeOps(ops=[]),
-        message=directive.message,
-        **autogen_kwargs
-    )
-
-    if not op.upgrade_ops.is_empty():
-        return op
-
-
-@_ec_dispatcher.dispatch_for(ops.AddConstraintOp)
-@_ec_dispatcher.dispatch_for(ops.CreateIndexOp)
-@_ec_dispatcher.dispatch_for(ops.CreateTableOp)
-@_ec_dispatcher.dispatch_for(ops.AddColumnOp)
-def _expands(context, directive, phase):
-    if phase == 'expand':
-        return directive
-    else:
-        return None
-
-
-@_ec_dispatcher.dispatch_for(ops.DropConstraintOp)
-@_ec_dispatcher.dispatch_for(ops.DropIndexOp)
-@_ec_dispatcher.dispatch_for(ops.DropTableOp)
-@_ec_dispatcher.dispatch_for(ops.DropColumnOp)
-def _contracts(context, directive, phase):
-    if phase == 'contract':
-        return directive
-    else:
-        return None
-
-
-@_ec_dispatcher.dispatch_for(ops.AlterColumnOp)
-def _alter_column(context, directive, phase):
-    is_expand = phase == 'expand'
-
-    if is_expand and (
-        directive.modify_nullable is True
-    ):
-        return directive
-    elif not is_expand and (
-        directive.modify_nullable is False
-    ):
-        return directive
-    else:
-        raise NotImplementedError(
-            "Don't know if operation is an expand or "
-            "contract at the moment: %s" % directive)
-
-
-@_ec_dispatcher.dispatch_for(ops.ModifyTableOps)
-def _modify_table_ops(context, directive, phase):
-    op = ops.ModifyTableOps(
-        directive.table_name,
-        ops=[
-            d for d in _assign_directives(context, directive.ops, phase)
-        ],
-        schema=directive.schema)
-    if not op.is_empty():
-        return op
diff --git a/neutron/db/migration/cli.py b/neutron/db/migration/cli.py
deleted file mode 100644 (file)
index e885307..0000000
+++ /dev/null
@@ -1,693 +0,0 @@
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from alembic import command as alembic_command
-from alembic import config as alembic_config
-from alembic import environment
-from alembic import script as alembic_script
-from alembic import util as alembic_util
-import debtcollector
-from oslo_config import cfg
-from oslo_utils import fileutils
-from oslo_utils import importutils
-import pkg_resources
-import six
-
-from neutron._i18n import _
-from neutron.common import utils
-from neutron.db import migration
-
-
-HEAD_FILENAME = 'HEAD'
-HEADS_FILENAME = 'HEADS'
-CONTRACT_HEAD_FILENAME = 'CONTRACT_HEAD'
-EXPAND_HEAD_FILENAME = 'EXPAND_HEAD'
-
-CURRENT_RELEASE = migration.MITAKA
-RELEASES = (
-    migration.LIBERTY,
-    migration.MITAKA,
-)
-
-EXPAND_BRANCH = 'expand'
-CONTRACT_BRANCH = 'contract'
-MIGRATION_BRANCHES = (EXPAND_BRANCH, CONTRACT_BRANCH)
-
-MIGRATION_ENTRYPOINTS = 'neutron.db.alembic_migrations'
-migration_entrypoints = {
-    entrypoint.name: entrypoint
-    for entrypoint in pkg_resources.iter_entry_points(MIGRATION_ENTRYPOINTS)
-}
-
-
-BRANCHLESS_WARNING = 'Branchless migration chains are deprecated as of Mitaka.'
-
-
-neutron_alembic_ini = os.path.join(os.path.dirname(__file__), 'alembic.ini')
-
-VALID_SERVICES = ['fwaas', 'lbaas', 'vpnaas']
-INSTALLED_SERVICES = [service_ for service_ in VALID_SERVICES
-                      if 'neutron-%s' % service_ in migration_entrypoints]
-INSTALLED_SUBPROJECTS = [project_ for project_ in migration_entrypoints]
-
-_core_opts = [
-    cfg.StrOpt('core_plugin',
-               default='',
-               help=_('Neutron plugin provider module'),
-               deprecated_for_removal=True),
-    cfg.StrOpt('service',
-               choices=INSTALLED_SERVICES,
-               help=(_("(Deprecated. Use '--subproject neutron-SERVICE' "
-                       "instead.) The advanced service to execute the "
-                       "command against.")),
-               deprecated_for_removal=True),
-    cfg.StrOpt('subproject',
-               choices=INSTALLED_SUBPROJECTS,
-               help=(_("The subproject to execute the command against. "
-                       "Can be one of: '%s'.")
-                     % "', '".join(INSTALLED_SUBPROJECTS))),
-    cfg.BoolOpt('split_branches',
-                default=False,
-                help=_("Enforce using split branches file structure."))
-]
-
-_quota_opts = [
-    cfg.StrOpt('quota_driver',
-               default='',
-               help=_('Neutron quota driver class'),
-               deprecated_for_removal=True),
-]
-
-_db_opts = [
-    cfg.StrOpt('connection',
-               deprecated_name='sql_connection',
-               default='',
-               secret=True,
-               help=_('URL to database')),
-    cfg.StrOpt('engine',
-               default='',
-               help=_('Database engine for which script will be generated '
-                      'when using offline migration.')),
-]
-
-CONF = cfg.ConfigOpts()
-CONF.register_cli_opts(_core_opts)
-CONF.register_cli_opts(_db_opts, 'database')
-CONF.register_opts(_quota_opts, 'QUOTAS')
-
-
-def do_alembic_command(config, cmd, revision=None, desc=None, **kwargs):
-    args = []
-    if revision:
-        args.append(revision)
-
-    project = config.get_main_option('neutron_project')
-    if desc:
-        alembic_util.msg(_('Running %(cmd)s (%(desc)s) for %(project)s ...') %
-                         {'cmd': cmd, 'desc': desc, 'project': project})
-    else:
-        alembic_util.msg(_('Running %(cmd)s for %(project)s ...') %
-                         {'cmd': cmd, 'project': project})
-    try:
-        getattr(alembic_command, cmd)(config, *args, **kwargs)
-    except alembic_util.CommandError as e:
-        alembic_util.err(six.text_type(e))
-    alembic_util.msg(_('OK'))
-
-
-def _get_alembic_entrypoint(project):
-    if project not in migration_entrypoints:
-        alembic_util.err(_('Sub-project %s not installed.') % project)
-    return migration_entrypoints[project]
-
-
-def do_generic_show(config, cmd):
-    kwargs = {'verbose': CONF.command.verbose}
-    do_alembic_command(config, cmd, **kwargs)
-
-
-def do_check_migration(config, cmd):
-    do_alembic_command(config, 'branches')
-    validate_revisions(config)
-    validate_head_file(config)
-
-
-def add_alembic_subparser(sub, cmd):
-    return sub.add_parser(cmd, help=getattr(alembic_command, cmd).__doc__)
-
-
-def add_branch_options(parser):
-    group = parser.add_mutually_exclusive_group()
-    group.add_argument('--expand', action='store_true')
-    group.add_argument('--contract', action='store_true')
-
-
-def _find_milestone_revisions(config, milestone, branch=None):
-    """Return the revision(s) for a given milestone."""
-    script = alembic_script.ScriptDirectory.from_config(config)
-    return [
-        (m.revision, label)
-        for m in _get_revisions(script)
-        for label in (m.branch_labels or [None])
-        if milestone in getattr(m.module, 'neutron_milestone', []) and
-        (branch is None or branch in m.branch_labels)
-    ]
-
-
-def do_upgrade(config, cmd):
-    branch = None
-
-    if ((CONF.command.revision or CONF.command.delta) and
-        (CONF.command.expand or CONF.command.contract)):
-        raise SystemExit(_(
-            'Phase upgrade options do not accept revision specification'))
-
-    if CONF.command.expand:
-        branch = EXPAND_BRANCH
-        revision = _get_branch_head(EXPAND_BRANCH)
-
-    elif CONF.command.contract:
-        branch = CONTRACT_BRANCH
-        revision = _get_branch_head(CONTRACT_BRANCH)
-
-    elif not CONF.command.revision and not CONF.command.delta:
-        raise SystemExit(_('You must provide a revision or relative delta'))
-
-    else:
-        revision = CONF.command.revision or ''
-        if '-' in revision:
-            raise SystemExit(_('Negative relative revision (downgrade) not '
-                               'supported'))
-
-        delta = CONF.command.delta
-        if delta:
-            if '+' in revision:
-                raise SystemExit(_('Use either --delta or relative revision, '
-                                   'not both'))
-            if delta < 0:
-                raise SystemExit(_('Negative delta (downgrade) not supported'))
-            revision = '%s+%d' % (revision, delta)
-
-        # leave branchless 'head' revision request backward compatible by
-        # applying all heads in all available branches.
-        if revision == 'head':
-            revision = 'heads'
-
-    if revision in migration.NEUTRON_MILESTONES:
-        revisions = _find_milestone_revisions(config, revision, branch)
-    else:
-        revisions = [(revision, branch)]
-
-    for revision, branch in revisions:
-        if not CONF.command.sql:
-            run_sanity_checks(config, revision)
-        do_alembic_command(config, cmd, revision=revision,
-                           desc=branch, sql=CONF.command.sql)
-
-
-def no_downgrade(config, cmd):
-    raise SystemExit(_("Downgrade no longer supported"))
-
-
-def do_stamp(config, cmd):
-    do_alembic_command(config, cmd,
-                       revision=CONF.command.revision,
-                       sql=CONF.command.sql)
-
-
-def _get_branch_head(branch):
-    '''Get the latest @head specification for a branch.'''
-    return '%s@head' % branch
-
-
-def _check_bootstrap_new_branch(branch, version_path, addn_kwargs):
-    addn_kwargs['version_path'] = version_path
-    addn_kwargs['head'] = _get_branch_head(branch)
-    if not os.path.exists(version_path):
-        # Bootstrap initial directory structure
-        utils.ensure_dir(version_path)
-
-
-def do_revision(config, cmd):
-    kwargs = {
-        'message': CONF.command.message,
-        'autogenerate': CONF.command.autogenerate,
-        'sql': CONF.command.sql,
-    }
-    if CONF.command.expand:
-        kwargs['head'] = 'expand@head'
-    elif CONF.command.contract:
-        kwargs['head'] = 'contract@head'
-
-    do_alembic_command(config, cmd, **kwargs)
-    if _use_separate_migration_branches(config):
-        update_head_files(config)
-    else:
-        update_head_file(config)
-
-
-def _get_release_labels(labels):
-    result = set()
-    for label in labels:
-        # release labels were introduced Liberty for a short time and dropped
-        # in that same release cycle
-        result.add('%s_%s' % (migration.LIBERTY, label))
-    return result
-
-
-def _compare_labels(revision, expected_labels):
-    # validate that the script has expected labels only
-    bad_labels = revision.branch_labels - expected_labels
-    if bad_labels:
-        # NOTE(ihrachyshka): this hack is temporary to accommodate those
-        # projects that already initialized their branches with liberty_*
-        # labels. Let's notify them about the deprecation for now and drop it
-        # later.
-        bad_labels_with_release = (revision.branch_labels -
-                                   _get_release_labels(expected_labels))
-        if not bad_labels_with_release:
-            alembic_util.warn(
-                _('Release aware branch labels (%s) are deprecated. '
-                  'Please switch to expand@ and contract@ '
-                  'labels.') % bad_labels)
-            return
-
-        script_name = os.path.basename(revision.path)
-        alembic_util.err(
-            _('Unexpected label for script %(script_name)s: %(labels)s') %
-            {'script_name': script_name,
-             'labels': bad_labels}
-        )
-
-
-def _validate_single_revision_labels(script_dir, revision, label=None):
-    expected_labels = set()
-    if label is not None:
-        expected_labels.add(label)
-
-    _compare_labels(revision, expected_labels)
-
-    # if it's not the root element of the branch, expect the parent of the
-    # script to have the same label
-    if revision.down_revision is not None:
-        down_revision = script_dir.get_revision(revision.down_revision)
-        _compare_labels(down_revision, expected_labels)
-
-
-def _validate_revision(script_dir, revision):
-    for branch in MIGRATION_BRANCHES:
-        if branch in revision.path:
-            _validate_single_revision_labels(
-                script_dir, revision, label=branch)
-            return
-
-    # validate script from branchless part of migration rules
-    _validate_single_revision_labels(script_dir, revision)
-
-
-def validate_revisions(config):
-    script_dir = alembic_script.ScriptDirectory.from_config(config)
-    revisions = _get_revisions(script_dir)
-
-    for revision in revisions:
-        _validate_revision(script_dir, revision)
-
-    branchpoints = _get_branch_points(script_dir)
-    if len(branchpoints) > 1:
-        branchpoints = ', '.join(p.revision for p in branchpoints)
-        alembic_util.err(
-            _('Unexpected number of alembic branch points: %(branchpoints)s') %
-            {'branchpoints': branchpoints}
-        )
-
-
-def _get_revisions(script):
-    return list(script.walk_revisions(base='base', head='heads'))
-
-
-def _get_branch_points(script):
-    branchpoints = []
-    for revision in _get_revisions(script):
-        if revision.is_branch_point:
-            branchpoints.append(revision)
-    return branchpoints
-
-
-def validate_head_file(config):
-    '''Check that HEAD file contains the latest head for the branch.'''
-    if _use_separate_migration_branches(config):
-        _validate_head_files(config)
-    else:
-        _validate_head_file(config)
-
-
-@debtcollector.removals.remove(message=BRANCHLESS_WARNING)
-def _validate_head_file(config):
-    '''Check that HEAD file contains the latest head for the branch.'''
-    script = alembic_script.ScriptDirectory.from_config(config)
-    expected_head = script.get_heads()
-    head_path = _get_head_file_path(config)
-    try:
-        with open(head_path) as file_:
-            observed_head = file_.read().split()
-            if observed_head == expected_head:
-                return
-    except IOError:
-        pass
-    alembic_util.err(
-        _('HEAD file does not match migration timeline head, expected: %s')
-        % expected_head)
-
-
-def _get_heads_map(config):
-    script = alembic_script.ScriptDirectory.from_config(config)
-    heads = script.get_heads()
-    head_map = {}
-    for head in heads:
-        if CONTRACT_BRANCH in script.get_revision(head).branch_labels:
-            head_map[CONTRACT_BRANCH] = head
-        else:
-            head_map[EXPAND_BRANCH] = head
-    return head_map
-
-
-def _check_head(branch_name, head_file, head):
-    try:
-        with open(head_file) as file_:
-            observed_head = file_.read().strip()
-    except IOError:
-        pass
-    else:
-        if observed_head != head:
-            alembic_util.err(
-                _('%(branch)s HEAD file does not match migration timeline '
-                  'head, expected: %(head)s') % {'branch': branch_name.title(),
-                                                 'head': head})
-
-
-def _validate_head_files(config):
-    '''Check that HEAD files contain the latest head for the branch.'''
-    contract_head = _get_contract_head_file_path(config)
-    expand_head = _get_expand_head_file_path(config)
-    if not os.path.exists(contract_head) or not os.path.exists(expand_head):
-        alembic_util.warn(_("Repository does not contain HEAD files for "
-                            "contract and expand branches."))
-        return
-    head_map = _get_heads_map(config)
-    _check_head(CONTRACT_BRANCH, contract_head, head_map[CONTRACT_BRANCH])
-    _check_head(EXPAND_BRANCH, expand_head, head_map[EXPAND_BRANCH])
-
-
-def update_head_files(config):
-    '''Update HEAD files with the latest branch heads.'''
-    head_map = _get_heads_map(config)
-    contract_head = _get_contract_head_file_path(config)
-    expand_head = _get_expand_head_file_path(config)
-    with open(contract_head, 'w+') as f:
-        f.write(head_map[CONTRACT_BRANCH] + '\n')
-    with open(expand_head, 'w+') as f:
-        f.write(head_map[EXPAND_BRANCH] + '\n')
-
-    old_head_file = _get_head_file_path(config)
-    old_heads_file = _get_heads_file_path(config)
-    for file_ in (old_head_file, old_heads_file):
-        fileutils.delete_if_exists(file_)
-
-
-@debtcollector.removals.remove(message=BRANCHLESS_WARNING)
-def update_head_file(config):
-    script = alembic_script.ScriptDirectory.from_config(config)
-    head = script.get_heads()
-    with open(_get_head_file_path(config), 'w+') as f:
-        f.write('\n'.join(head))
-
-
-def add_command_parsers(subparsers):
-    for name in ['current', 'history', 'branches', 'heads']:
-        parser = add_alembic_subparser(subparsers, name)
-        parser.set_defaults(func=do_generic_show)
-        parser.add_argument('--verbose',
-                            action='store_true',
-                            help='Display more verbose output for the '
-                                 'specified command')
-
-    help_text = (getattr(alembic_command, 'branches').__doc__ +
-                 ' and validate head file')
-    parser = subparsers.add_parser('check_migration', help=help_text)
-    parser.set_defaults(func=do_check_migration)
-
-    parser = add_alembic_subparser(subparsers, 'upgrade')
-    parser.add_argument('--delta', type=int)
-    parser.add_argument('--sql', action='store_true')
-    parser.add_argument('revision', nargs='?')
-    parser.add_argument('--mysql-engine',
-                        default='',
-                        help='Change MySQL storage engine of current '
-                             'existing tables')
-    add_branch_options(parser)
-
-    parser.set_defaults(func=do_upgrade)
-
-    parser = subparsers.add_parser('downgrade', help="(No longer supported)")
-    parser.add_argument('None', nargs='?', help="Downgrade not supported")
-    parser.set_defaults(func=no_downgrade)
-
-    parser = add_alembic_subparser(subparsers, 'stamp')
-    parser.add_argument('--sql', action='store_true')
-    parser.add_argument('revision')
-    parser.set_defaults(func=do_stamp)
-
-    parser = add_alembic_subparser(subparsers, 'revision')
-    parser.add_argument('-m', '--message')
-    parser.add_argument('--autogenerate', action='store_true')
-    parser.add_argument('--sql', action='store_true')
-    add_branch_options(parser)
-    parser.set_defaults(func=do_revision)
-
-
-command_opt = cfg.SubCommandOpt('command',
-                                title='Command',
-                                help=_('Available commands'),
-                                handler=add_command_parsers)
-
-CONF.register_cli_opt(command_opt)
-
-
-def _get_project_base(config):
-    '''Return the base python namespace name for a project.'''
-    script_location = config.get_main_option('script_location')
-    return script_location.split(':')[0].split('.')[0]
-
-
-def _get_package_root_dir(config):
-    root_module = importutils.try_import(_get_project_base(config))
-    if not root_module:
-        project = config.get_main_option('neutron_project')
-        alembic_util.err(_("Failed to locate source for %s.") % project)
-    # The root_module.__file__ property is a path like
-    #    '/opt/stack/networking-foo/networking_foo/__init__.py'
-    # We return just
-    #    '/opt/stack/networking-foo'
-    return os.path.dirname(os.path.dirname(root_module.__file__))
-
-
-def _get_root_versions_dir(config):
-    '''Return root directory that contains all migration rules.'''
-    root_dir = _get_package_root_dir(config)
-    script_location = config.get_main_option('script_location')
-    # Script location is something like:
-    #   'project_base.db.migration:alembic_migrations'
-    # Convert it to:
-    #   'project_base/db/migration/alembic_migrations/versions'
-    part1, part2 = script_location.split(':')
-    parts = part1.split('.') + part2.split('.') + ['versions']
-    # Return the absolute path to the versions dir
-    return os.path.join(root_dir, *parts)
-
-
-def _get_head_file_path(config):
-    '''Return the path of the file that contains single head.'''
-    return os.path.join(
-        _get_root_versions_dir(config),
-        HEAD_FILENAME)
-
-
-def _get_heads_file_path(config):
-    '''
-    Return the path of the file that was once used to maintain the list of
-    latest heads.
-    '''
-    return os.path.join(
-        _get_root_versions_dir(config),
-        HEADS_FILENAME)
-
-
-def _get_contract_head_file_path(config):
-    '''
-    Return the path of the file that is used to maintain contract head
-    '''
-    return os.path.join(
-        _get_root_versions_dir(config),
-        CONTRACT_HEAD_FILENAME)
-
-
-def _get_expand_head_file_path(config):
-    '''
-    Return the path of the file that is used to maintain expand head
-    '''
-    return os.path.join(
-        _get_root_versions_dir(config),
-        EXPAND_HEAD_FILENAME)
-
-
-def _get_version_branch_path(config, release=None, branch=None):
-    version_path = _get_root_versions_dir(config)
-    if branch and release:
-        return os.path.join(version_path, release, branch)
-    return version_path
-
-
-def _use_separate_migration_branches(config):
-    '''Detect whether split migration branches should be used.'''
-    if CONF.split_branches:
-        return True
-
-    script_dir = alembic_script.ScriptDirectory.from_config(config)
-    if _get_branch_points(script_dir):
-        return True
-
-    return False
-
-
-def _set_version_locations(config):
-    '''Make alembic see all revisions in all migration branches.'''
-    split_branches = False
-    version_paths = [_get_version_branch_path(config)]
-    for release in RELEASES:
-        for branch in MIGRATION_BRANCHES:
-            version_path = _get_version_branch_path(config, release, branch)
-            if split_branches or os.path.exists(version_path):
-                split_branches = True
-                version_paths.append(version_path)
-
-    config.set_main_option('version_locations', ' '.join(version_paths))
-
-
-def _get_installed_entrypoint(subproject):
-    '''Get the entrypoint for the subproject, which must be installed.'''
-    if subproject not in migration_entrypoints:
-        alembic_util.err(_('Package %s not installed') % subproject)
-    return migration_entrypoints[subproject]
-
-
-def _get_subproject_script_location(subproject):
-    '''Get the script location for the installed subproject.'''
-    entrypoint = _get_installed_entrypoint(subproject)
-    return ':'.join([entrypoint.module_name, entrypoint.attrs[0]])
-
-
-def _get_service_script_location(service):
-    '''Get the script location for the service, which must be installed.'''
-    return _get_subproject_script_location('neutron-%s' % service)
-
-
-def _get_subproject_base(subproject):
-    '''Get the import base name for the installed subproject.'''
-    entrypoint = _get_installed_entrypoint(subproject)
-    return entrypoint.module_name.split('.')[0]
-
-
-def get_alembic_configs():
-    '''Return a list of alembic configs, one per project.
-    '''
-
-    # Get the script locations for the specified or installed projects.
-    # Which projects to get script locations for is determined by the CLI
-    # options as follows:
-    #     --service X       # only subproject neutron-X (deprecated)
-    #     --subproject Y    # only subproject Y (where Y can be neutron)
-    #     (none specified)  # neutron and all installed subprojects
-    script_locations = {}
-    if CONF.service:
-        script_location = _get_service_script_location(CONF.service)
-        script_locations['neutron-%s' % CONF.service] = script_location
-    elif CONF.subproject:
-        script_location = _get_subproject_script_location(CONF.subproject)
-        script_locations[CONF.subproject] = script_location
-    else:
-        for subproject, ep in migration_entrypoints.items():
-            script_locations[subproject] = _get_subproject_script_location(
-                subproject)
-
-    # Return a list of alembic configs from the projects in the
-    # script_locations dict. If neutron is in the list it is first.
-    configs = []
-    project_seq = sorted(script_locations.keys())
-    # Core neutron must be the first project if there is more than one
-    if len(project_seq) > 1 and 'neutron' in project_seq:
-        project_seq.insert(0, project_seq.pop(project_seq.index('neutron')))
-    for project in project_seq:
-        config = alembic_config.Config(neutron_alembic_ini)
-        config.set_main_option('neutron_project', project)
-        script_location = script_locations[project]
-        config.set_main_option('script_location', script_location)
-        _set_version_locations(config)
-        config.neutron_config = CONF
-        configs.append(config)
-
-    return configs
-
-
-def get_neutron_config():
-    # Neutron's alembic config is always the first one
-    return get_alembic_configs()[0]
-
-
-def run_sanity_checks(config, revision):
-    script_dir = alembic_script.ScriptDirectory.from_config(config)
-
-    def check_sanity(rev, context):
-        # TODO(ihrachyshka): here we use internal API for alembic; we may need
-        # alembic to expose implicit_base= argument into public
-        # iterate_revisions() call
-        for script in script_dir.revision_map.iterate_revisions(
-                revision, rev, implicit_base=True):
-            if hasattr(script.module, 'check_sanity'):
-                script.module.check_sanity(context.connection)
-        return []
-
-    with environment.EnvironmentContext(config, script_dir,
-                                        fn=check_sanity,
-                                        starting_rev=None,
-                                        destination_rev=revision):
-        script_dir.run_env()
-
-
-def validate_cli_options():
-    if CONF.subproject and CONF.service:
-        alembic_util.err(_("Cannot specify both --service and --subproject."))
-
-
-def get_engine_config():
-    return [obj for obj in _db_opts if obj.name == 'engine']
-
-
-def main():
-    CONF(project='neutron')
-    validate_cli_options()
-    for config in get_alembic_configs():
-        #TODO(gongysh) enable logging
-        CONF.command.func(config, CONF.command.name)
diff --git a/neutron/db/migration/models/__init__.py b/neutron/db/migration/models/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/db/migration/models/head.py b/neutron/db/migration/models/head.py
deleted file mode 100644 (file)
index db81154..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-The module provides all database models at current HEAD.
-
-Its purpose is to create comparable metadata with current database schema.
-Based on this comparison database can be healed with healing migration.
-
-"""
-
-from neutron.db import address_scope_db  # noqa
-from neutron.db import agents_db  # noqa
-from neutron.db import agentschedulers_db  # noqa
-from neutron.db import allowedaddresspairs_db  # noqa
-from neutron.db import dvr_mac_db  # noqa
-from neutron.db import external_net_db  # noqa
-from neutron.db import extradhcpopt_db  # noqa
-from neutron.db import extraroute_db  # noqa
-from neutron.db import flavors_db  # noqa
-from neutron.db import l3_agentschedulers_db  # noqa
-from neutron.db import l3_attrs_db  # noqa
-from neutron.db import l3_db  # noqa
-from neutron.db import l3_dvrscheduler_db  # noqa
-from neutron.db import l3_gwmode_db  # noqa
-from neutron.db import l3_hamode_db  # noqa
-from neutron.db.metering import metering_db  # noqa
-from neutron.db import model_base
-from neutron.db import models_v2  # noqa
-from neutron.db import portbindings_db  # noqa
-from neutron.db import portsecurity_db  # noqa
-from neutron.db.qos import models as qos_models  # noqa
-from neutron.db.quota import models  # noqa
-from neutron.db import rbac_db_models  # noqa
-from neutron.db import securitygroups_db  # noqa
-from neutron.db import servicetype_db  # noqa
-from neutron.ipam.drivers.neutrondb_ipam import db_models  # noqa
-from neutron.plugins.ml2.drivers import type_flat  # noqa
-from neutron.plugins.ml2.drivers import type_geneve  # noqa
-from neutron.plugins.ml2.drivers import type_gre  # noqa
-from neutron.plugins.ml2.drivers import type_vlan  # noqa
-from neutron.plugins.ml2.drivers import type_vxlan  # noqa
-from neutron.plugins.ml2 import models  # noqa
-
-
-def get_metadata():
-    return model_base.BASEV2.metadata
diff --git a/neutron/db/model_base.py b/neutron/db/model_base.py
deleted file mode 100644 (file)
index c5a4f04..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from oslo_db.sqlalchemy import models
-from oslo_utils import uuidutils
-import sqlalchemy as sa
-from sqlalchemy.ext import declarative
-from sqlalchemy import orm
-
-from neutron.api.v2 import attributes as attr
-
-
-class HasTenant(object):
-    """Tenant mixin, add to subclasses that have a tenant."""
-
-    # NOTE(jkoelker) tenant_id is just a free form string ;(
-    tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), index=True)
-
-
-class HasId(object):
-    """id mixin, add to subclasses that have an id."""
-
-    id = sa.Column(sa.String(36),
-                   primary_key=True,
-                   default=uuidutils.generate_uuid)
-
-
-class HasStatusDescription(object):
-    """Status with description mixin."""
-
-    status = sa.Column(sa.String(16), nullable=False)
-    status_description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN))
-
-
-class NeutronBase(models.ModelBase):
-    """Base class for Neutron Models."""
-
-    __table_args__ = {'mysql_engine': 'InnoDB'}
-
-    def __iter__(self):
-        self._i = iter(orm.object_mapper(self).columns)
-        return self
-
-    def next(self):
-        n = next(self._i).name
-        return n, getattr(self, n)
-
-    __next__ = next
-
-    def __repr__(self):
-        """sqlalchemy based automatic __repr__ method."""
-        items = ['%s=%r' % (col.name, getattr(self, col.name))
-                 for col in self.__table__.columns]
-        return "<%s.%s[object at %x] {%s}>" % (self.__class__.__module__,
-                                               self.__class__.__name__,
-                                               id(self), ', '.join(items))
-
-
-class NeutronBaseV2(NeutronBase):
-
-    @declarative.declared_attr
-    def __tablename__(cls):
-        # NOTE(jkoelker) use the pluralized name of the class as the table
-        return cls.__name__.lower() + 's'
-
-
-BASEV2 = declarative.declarative_base(cls=NeutronBaseV2)
-
-
-class StandardAttribute(BASEV2):
-    """Common table to associate all Neutron API resources.
-
-    By having Neutron objects related to this table, we can associate new
-    tables that apply to many Neutron objects (e.g. timestamps, rbac entries)
-    to this table to avoid schema duplication while maintaining referential
-    integrity.
-
-    NOTE(kevinbenton): This table should not have more columns added to it
-    unless we are absolutely certain the new column will have a value for
-    every single type of Neutron resource. Otherwise this table will be filled
-    with NULL entries for combinations that don't make sense. Additionally,
-    by keeping this table small we can ensure that performance isn't adversely
-    impacted for queries on objects.
-    """
-
-    # sqlite doesn't support auto increment on big integers so we use big int
-    # for everything but sqlite
-    id = sa.Column(sa.BigInteger().with_variant(sa.Integer(), 'sqlite'),
-                   primary_key=True, autoincrement=True)
-
-    # NOTE(kevinbenton): this column is redundant information, but it allows
-    # operators/devs to look at the contents of this table and know which table
-    # the corresponding object is in.
-    # 255 was selected as a max just because it's the varchar ceiling in mysql
-    # before a 2-byte prefix is required. We shouldn't get anywhere near this
-    # limit with our table names...
-    resource_type = sa.Column(sa.String(255), nullable=False)
-
-
-class HasStandardAttributes(object):
-    @declarative.declared_attr
-    def standard_attr_id(cls):
-        return sa.Column(
-            sa.BigInteger().with_variant(sa.Integer(), 'sqlite'),
-            sa.ForeignKey(StandardAttribute.id, ondelete="CASCADE"),
-            unique=True,
-            nullable=False
-        )
-
-    # NOTE(kevinbenton): we have to disable the following pylint check because
-    # it thinks we are overriding this method in the __init__ method.
-    #pylint: disable=method-hidden
-    @declarative.declared_attr
-    def standard_attr(cls):
-        return orm.relationship(StandardAttribute,
-                                lazy='joined',
-                                cascade='all, delete-orphan',
-                                single_parent=True,
-                                uselist=False)
-
-    def __init__(self, *args, **kwargs):
-        super(HasStandardAttributes, self).__init__(*args, **kwargs)
-        # here we automatically create the related standard attribute object
-        self.standard_attr = StandardAttribute(
-            resource_type=self.__tablename__)
diff --git a/neutron/db/models_v2.py b/neutron/db/models_v2.py
deleted file mode 100644 (file)
index 03a3a82..0000000
+++ /dev/null
@@ -1,273 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-
-from neutron.api.v2 import attributes as attr
-from neutron.common import constants
-from neutron.db import agentschedulers_db as agt
-from neutron.db import model_base
-from neutron.db import rbac_db_models
-
-
-# NOTE(kevinbenton): these are here for external projects that expect them
-# to be found in this module.
-HasTenant = model_base.HasTenant
-HasId = model_base.HasId
-HasStatusDescription = model_base.HasStatusDescription
-
-
-class IPAvailabilityRange(model_base.BASEV2):
-    """Internal representation of available IPs for Neutron subnets.
-
-    Allocation - first entry from the range will be allocated.
-    If the first entry is equal to the last entry then this row
-    will be deleted.
-    Recycling ips involves reading the IPAllocationPool and IPAllocation tables
-    and inserting ranges representing available ips.  This happens after the
-    final allocation is pulled from this table and a new ip allocation is
-    requested.  Any contiguous ranges of available ips will be inserted as a
-    single range.
-    """
-
-    allocation_pool_id = sa.Column(sa.String(36),
-                                   sa.ForeignKey('ipallocationpools.id',
-                                                 ondelete="CASCADE"),
-                                   nullable=False,
-                                   primary_key=True)
-    first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True)
-    last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True)
-    __table_args__ = (
-        sa.UniqueConstraint(
-            first_ip, allocation_pool_id,
-            name='uniq_ipavailabilityranges0first_ip0allocation_pool_id'),
-        sa.UniqueConstraint(
-            last_ip, allocation_pool_id,
-            name='uniq_ipavailabilityranges0last_ip0allocation_pool_id'),
-        model_base.BASEV2.__table_args__
-    )
-
-    def __repr__(self):
-        return "%s - %s" % (self.first_ip, self.last_ip)
-
-
-class IPAllocationPool(model_base.BASEV2, HasId):
-    """Representation of an allocation pool in a Neutron subnet."""
-
-    subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id',
-                                                       ondelete="CASCADE"),
-                          nullable=True)
-    first_ip = sa.Column(sa.String(64), nullable=False)
-    last_ip = sa.Column(sa.String(64), nullable=False)
-    available_ranges = orm.relationship(IPAvailabilityRange,
-                                        backref='ipallocationpool',
-                                        lazy="select",
-                                        cascade='all, delete-orphan')
-
-    def __repr__(self):
-        return "%s - %s" % (self.first_ip, self.last_ip)
-
-
-class IPAllocation(model_base.BASEV2):
-    """Internal representation of allocated IP addresses in a Neutron subnet.
-    """
-
-    port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id',
-                                                     ondelete="CASCADE"),
-                        nullable=True)
-    ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True)
-    subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id',
-                                                       ondelete="CASCADE"),
-                          nullable=False, primary_key=True)
-    network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id",
-                                                        ondelete="CASCADE"),
-                           nullable=False, primary_key=True)
-
-
-class Route(object):
-    """mixin of a route."""
-
-    destination = sa.Column(sa.String(64), nullable=False, primary_key=True)
-    nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True)
-
-
-class SubnetRoute(model_base.BASEV2, Route):
-
-    subnet_id = sa.Column(sa.String(36),
-                          sa.ForeignKey('subnets.id',
-                                        ondelete="CASCADE"),
-                          primary_key=True)
-
-
-class Port(model_base.HasStandardAttributes, model_base.BASEV2,
-           HasId, HasTenant):
-    """Represents a port on a Neutron v2 network."""
-
-    name = sa.Column(sa.String(attr.NAME_MAX_LEN))
-    network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
-                           nullable=False)
-    fixed_ips = orm.relationship(IPAllocation, backref='port', lazy='joined',
-                                 passive_deletes='all')
-    mac_address = sa.Column(sa.String(32), nullable=False)
-    admin_state_up = sa.Column(sa.Boolean(), nullable=False)
-    status = sa.Column(sa.String(16), nullable=False)
-    device_id = sa.Column(sa.String(attr.DEVICE_ID_MAX_LEN), nullable=False)
-    device_owner = sa.Column(sa.String(attr.DEVICE_OWNER_MAX_LEN),
-                             nullable=False)
-    dns_name = sa.Column(sa.String(255), nullable=True)
-    __table_args__ = (
-        sa.Index(
-            'ix_ports_network_id_mac_address', 'network_id', 'mac_address'),
-        sa.Index(
-            'ix_ports_network_id_device_owner', 'network_id', 'device_owner'),
-        sa.UniqueConstraint(
-            network_id, mac_address,
-            name='uniq_ports0network_id0mac_address'),
-        model_base.BASEV2.__table_args__
-    )
-
-    def __init__(self, id=None, tenant_id=None, name=None, network_id=None,
-                 mac_address=None, admin_state_up=None, status=None,
-                 device_id=None, device_owner=None, fixed_ips=None,
-                 dns_name=None):
-        super(Port, self).__init__()
-        self.id = id
-        self.tenant_id = tenant_id
-        self.name = name
-        self.network_id = network_id
-        self.mac_address = mac_address
-        self.admin_state_up = admin_state_up
-        self.device_owner = device_owner
-        self.device_id = device_id
-        self.dns_name = dns_name
-        # Since this is a relationship only set it if one is passed in.
-        if fixed_ips:
-            self.fixed_ips = fixed_ips
-
-        # NOTE(arosen): status must be set last as an event is triggered on!
-        self.status = status
-
-
-class DNSNameServer(model_base.BASEV2):
-    """Internal representation of a DNS nameserver."""
-
-    address = sa.Column(sa.String(128), nullable=False, primary_key=True)
-    subnet_id = sa.Column(sa.String(36),
-                          sa.ForeignKey('subnets.id',
-                                        ondelete="CASCADE"),
-                          primary_key=True)
-    order = sa.Column(sa.Integer, nullable=False, server_default='0')
-
-
-class Subnet(model_base.HasStandardAttributes, model_base.BASEV2,
-             HasId, HasTenant):
-    """Represents a neutron subnet.
-
-    When a subnet is created the first and last entries will be created. These
-    are used for the IP allocation.
-    """
-
-    name = sa.Column(sa.String(attr.NAME_MAX_LEN))
-    network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id'))
-    subnetpool_id = sa.Column(sa.String(36), index=True)
-    ip_version = sa.Column(sa.Integer, nullable=False)
-    cidr = sa.Column(sa.String(64), nullable=False)
-    gateway_ip = sa.Column(sa.String(64))
-    allocation_pools = orm.relationship(IPAllocationPool,
-                                        backref='subnet',
-                                        lazy="joined",
-                                        cascade='delete')
-    enable_dhcp = sa.Column(sa.Boolean())
-    dns_nameservers = orm.relationship(DNSNameServer,
-                                       backref='subnet',
-                                       cascade='all, delete, delete-orphan',
-                                       order_by=DNSNameServer.order,
-                                       lazy='joined')
-    routes = orm.relationship(SubnetRoute,
-                              backref='subnet',
-                              cascade='all, delete, delete-orphan',
-                              lazy='joined')
-    ipv6_ra_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC,
-                                     constants.DHCPV6_STATEFUL,
-                                     constants.DHCPV6_STATELESS,
-                                     name='ipv6_ra_modes'), nullable=True)
-    ipv6_address_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC,
-                                  constants.DHCPV6_STATEFUL,
-                                  constants.DHCPV6_STATELESS,
-                                  name='ipv6_address_modes'), nullable=True)
-    # subnets don't have their own rbac_entries, they just inherit from
-    # the network rbac entries
-    rbac_entries = orm.relationship(
-        rbac_db_models.NetworkRBAC, lazy='joined',
-        foreign_keys='Subnet.network_id',
-        primaryjoin='Subnet.network_id==NetworkRBAC.object_id')
-
-
-class SubnetPoolPrefix(model_base.BASEV2):
-    """Represents a neutron subnet pool prefix
-    """
-
-    __tablename__ = 'subnetpoolprefixes'
-
-    cidr = sa.Column(sa.String(64), nullable=False, primary_key=True)
-    subnetpool_id = sa.Column(sa.String(36),
-                              sa.ForeignKey('subnetpools.id',
-                                            ondelete='CASCADE'),
-                              nullable=False,
-                              primary_key=True)
-
-
-class SubnetPool(model_base.HasStandardAttributes, model_base.BASEV2,
-                 HasId, HasTenant):
-    """Represents a neutron subnet pool.
-    """
-
-    name = sa.Column(sa.String(attr.NAME_MAX_LEN))
-    ip_version = sa.Column(sa.Integer, nullable=False)
-    default_prefixlen = sa.Column(sa.Integer, nullable=False)
-    min_prefixlen = sa.Column(sa.Integer, nullable=False)
-    max_prefixlen = sa.Column(sa.Integer, nullable=False)
-    shared = sa.Column(sa.Boolean, nullable=False)
-    is_default = sa.Column(sa.Boolean, nullable=False)
-    default_quota = sa.Column(sa.Integer, nullable=True)
-    hash = sa.Column(sa.String(36), nullable=False, server_default='')
-    address_scope_id = sa.Column(sa.String(36), nullable=True)
-    prefixes = orm.relationship(SubnetPoolPrefix,
-                                backref='subnetpools',
-                                cascade='all, delete, delete-orphan',
-                                lazy='joined')
-
-
-class Network(model_base.HasStandardAttributes, model_base.BASEV2,
-              HasId, HasTenant):
-    """Represents a v2 neutron network."""
-
-    name = sa.Column(sa.String(attr.NAME_MAX_LEN))
-    ports = orm.relationship(Port, backref='networks')
-    subnets = orm.relationship(
-        Subnet, backref=orm.backref('networks', lazy='joined'),
-        lazy="joined")
-    status = sa.Column(sa.String(16))
-    admin_state_up = sa.Column(sa.Boolean)
-    mtu = sa.Column(sa.Integer, nullable=True)
-    vlan_transparent = sa.Column(sa.Boolean, nullable=True)
-    rbac_entries = orm.relationship(rbac_db_models.NetworkRBAC,
-                                    backref='network', lazy='joined',
-                                    cascade='all, delete, delete-orphan')
-    availability_zone_hints = sa.Column(sa.String(255))
-    dhcp_agents = orm.relationship(
-        'Agent', lazy='joined', viewonly=True,
-        secondary=agt.NetworkDhcpAgentBinding.__table__)
diff --git a/neutron/db/netmtu_db.py b/neutron/db/netmtu_db.py
deleted file mode 100644 (file)
index 7d6acf7..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api.v2 import attributes
-from neutron.db import db_base_plugin_v2
-from neutron.extensions import netmtu
-
-
-class Netmtu_db_mixin(object):
-    """Mixin class to add network MTU methods to db_base_plugin_v2."""
-
-    def _extend_network_dict_mtu(self, network_res, network_db):
-        network_res[netmtu.MTU] = network_db.mtu
-        return network_res
-
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attributes.NETWORKS, ['_extend_network_dict_mtu'])
diff --git a/neutron/db/portbindings_base.py b/neutron/db/portbindings_base.py
deleted file mode 100644 (file)
index 8114d2c..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2013 UnitedStack Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api.v2 import attributes
-from neutron.db import db_base_plugin_v2
-
-
-class PortBindingBaseMixin(object):
-    base_binding_dict = None
-
-    def _process_portbindings_create_and_update(self, context, port_data,
-                                                port):
-        self.extend_port_dict_binding(port, None)
-
-    def extend_port_dict_binding(self, port_res, port_db):
-        if self.base_binding_dict:
-            port_res.update(self.base_binding_dict)
-
-
-def _extend_port_dict_binding(plugin, port_res, port_db):
-    if not isinstance(plugin, PortBindingBaseMixin):
-        return
-    plugin.extend_port_dict_binding(port_res, port_db)
-
-
-def register_port_dict_function():
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attributes.PORTS, [_extend_port_dict_binding])
diff --git a/neutron/db/portbindings_db.py b/neutron/db/portbindings_db.py
deleted file mode 100644 (file)
index a8feddc..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-
-from neutron.api.v2 import attributes
-from neutron.db import db_base_plugin_v2
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.db import portbindings_base
-from neutron.extensions import portbindings
-
-
-class PortBindingPort(model_base.BASEV2):
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey('ports.id', ondelete="CASCADE"),
-                        primary_key=True)
-    host = sa.Column(sa.String(255), nullable=False)
-    port = orm.relationship(
-        models_v2.Port,
-        backref=orm.backref("portbinding",
-                            lazy='joined', uselist=False,
-                            cascade='delete'))
-
-
-class PortBindingMixin(portbindings_base.PortBindingBaseMixin):
-    extra_binding_dict = None
-
-    def _port_model_hook(self, context, original_model, query):
-        query = query.outerjoin(PortBindingPort,
-                                (original_model.id ==
-                                 PortBindingPort.port_id))
-        return query
-
-    def _port_result_filter_hook(self, query, filters):
-        values = filters and filters.get(portbindings.HOST_ID, [])
-        if not values:
-            return query
-        query = query.filter(PortBindingPort.host.in_(values))
-        return query
-
-    db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
-        models_v2.Port,
-        "portbindings_port",
-        '_port_model_hook',
-        None,
-        '_port_result_filter_hook')
-
-    def _process_portbindings_create_and_update(self, context, port_data,
-                                                port):
-        binding_profile = port.get(portbindings.PROFILE)
-        binding_profile_set = attributes.is_attr_set(binding_profile)
-        if not binding_profile_set and binding_profile is not None:
-            del port[portbindings.PROFILE]
-
-        binding_vnic = port.get(portbindings.VNIC_TYPE)
-        binding_vnic_set = attributes.is_attr_set(binding_vnic)
-        if not binding_vnic_set and binding_vnic is not None:
-            del port[portbindings.VNIC_TYPE]
-        # REVISIT(irenab) Add support for vnic_type for plugins that
-        # can handle more than one type.
-        # Currently implemented for ML2 plugin that does not use
-        # PortBindingMixin.
-
-        host = port_data.get(portbindings.HOST_ID)
-        host_set = attributes.is_attr_set(host)
-        with context.session.begin(subtransactions=True):
-            bind_port = context.session.query(
-                PortBindingPort).filter_by(port_id=port['id']).first()
-            if host_set:
-                if not bind_port:
-                    context.session.add(PortBindingPort(port_id=port['id'],
-                                                        host=host))
-                else:
-                    bind_port.host = host
-            else:
-                host = bind_port.host if bind_port else None
-        self._extend_port_dict_binding_host(port, host)
-
-    def get_port_host(self, context, port_id):
-        with context.session.begin(subtransactions=True):
-            bind_port = context.session.query(
-                PortBindingPort).filter_by(port_id=port_id).first()
-            return bind_port.host if bind_port else None
-
-    def _extend_port_dict_binding_host(self, port_res, host):
-        super(PortBindingMixin, self).extend_port_dict_binding(
-            port_res, None)
-        port_res[portbindings.HOST_ID] = host
-
-    def extend_port_dict_binding(self, port_res, port_db):
-        host = port_db.portbinding.host if port_db.portbinding else None
-        self._extend_port_dict_binding_host(port_res, host)
-
-
-def _extend_port_dict_binding(plugin, port_res, port_db):
-    if not isinstance(plugin, PortBindingMixin):
-        return
-    plugin.extend_port_dict_binding(port_res, port_db)
-
-
-# Register dict extend functions for ports
-db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-    attributes.PORTS, [_extend_port_dict_binding])
diff --git a/neutron/db/portsecurity_db.py b/neutron/db/portsecurity_db.py
deleted file mode 100644 (file)
index 343b537..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api.v2 import attributes as attrs
-from neutron.common import utils
-from neutron.db import db_base_plugin_v2
-from neutron.db import portsecurity_db_common
-from neutron.extensions import portsecurity as psec
-
-
-class PortSecurityDbMixin(portsecurity_db_common.PortSecurityDbCommon):
-    # Register dict extend functions for ports and networks
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attrs.NETWORKS, ['_extend_port_security_dict'])
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attrs.PORTS, ['_extend_port_security_dict'])
-
-    def _extend_port_security_dict(self, response_data, db_data):
-        if ('port-security' in
-            getattr(self, 'supported_extension_aliases', [])):
-            psec_value = db_data['port_security'][psec.PORTSECURITY]
-            response_data[psec.PORTSECURITY] = psec_value
-
-    def _determine_port_security_and_has_ip(self, context, port):
-        """Returns a tuple of booleans (port_security_enabled, has_ip).
-
-        Port_security is the value associated with the port if one is present
-        otherwise the value associated with the network is returned. has_ip is
-        if the port is associated with an ip or not.
-        """
-        has_ip = self._ip_on_port(port)
-        # we don't apply security groups for dhcp, router
-        if port.get('device_owner') and utils.is_port_trusted(port):
-            return (False, has_ip)
-
-        if attrs.is_attr_set(port.get(psec.PORTSECURITY)):
-            port_security_enabled = port[psec.PORTSECURITY]
-
-        # If port has an ip and security_groups are passed in
-        # conveniently set port_security_enabled to true this way
-        # user doesn't also have to pass in port_security_enabled=True
-        # when creating ports.
-        elif (has_ip and attrs.is_attr_set(port.get('security_groups'))):
-            port_security_enabled = True
-        else:
-            port_security_enabled = self._get_network_security_binding(
-                context, port['network_id'])
-
-        return (port_security_enabled, has_ip)
-
-    def _ip_on_port(self, port):
-        return bool(port.get('fixed_ips'))
diff --git a/neutron/db/portsecurity_db_common.py b/neutron/db/portsecurity_db_common.py
deleted file mode 100644 (file)
index 6462227..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy.orm import exc
-
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.extensions import portsecurity as psec
-
-
-class PortSecurityBinding(model_base.BASEV2):
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey('ports.id', ondelete="CASCADE"),
-                        primary_key=True)
-    port_security_enabled = sa.Column(sa.Boolean(), nullable=False)
-
-    # Add a relationship to the Port model in order to be to able to
-    # instruct SQLAlchemy to eagerly load port security binding
-    port = orm.relationship(
-        models_v2.Port,
-        backref=orm.backref("port_security", uselist=False,
-                            cascade='delete', lazy='joined'))
-
-
-class NetworkSecurityBinding(model_base.BASEV2):
-    network_id = sa.Column(sa.String(36),
-                           sa.ForeignKey('networks.id', ondelete="CASCADE"),
-                           primary_key=True)
-    port_security_enabled = sa.Column(sa.Boolean(), nullable=False)
-
-    # Add a relationship to the Port model in order to be able to instruct
-    # SQLAlchemy to eagerly load default port security setting for ports
-    # on this network
-    network = orm.relationship(
-        models_v2.Network,
-        backref=orm.backref("port_security", uselist=False,
-                            cascade='delete', lazy='joined'))
-
-
-class PortSecurityDbCommon(object):
-    """Mixin class to add port security."""
-
-    def _process_network_port_security_create(
-        self, context, network_req, network_res):
-        with context.session.begin(subtransactions=True):
-            db = NetworkSecurityBinding(
-                network_id=network_res['id'],
-                port_security_enabled=network_req[psec.PORTSECURITY])
-            context.session.add(db)
-        network_res[psec.PORTSECURITY] = network_req[psec.PORTSECURITY]
-        return self._make_network_port_security_dict(db)
-
-    def _process_port_port_security_create(
-        self, context, port_req, port_res):
-        with context.session.begin(subtransactions=True):
-            db = PortSecurityBinding(
-                port_id=port_res['id'],
-                port_security_enabled=port_req[psec.PORTSECURITY])
-            context.session.add(db)
-        port_res[psec.PORTSECURITY] = port_req[psec.PORTSECURITY]
-        return self._make_port_security_dict(db)
-
-    def _get_network_security_binding(self, context, network_id):
-        try:
-            query = self._model_query(context, NetworkSecurityBinding)
-            binding = query.filter(
-                NetworkSecurityBinding.network_id == network_id).one()
-        except exc.NoResultFound:
-            raise psec.PortSecurityBindingNotFound()
-        return binding.port_security_enabled
-
-    def _get_port_security_binding(self, context, port_id):
-        try:
-            query = self._model_query(context, PortSecurityBinding)
-            binding = query.filter(
-                PortSecurityBinding.port_id == port_id).one()
-        except exc.NoResultFound:
-            raise psec.PortSecurityBindingNotFound()
-        return binding.port_security_enabled
-
-    def _process_port_port_security_update(
-        self, context, port_req, port_res):
-        if psec.PORTSECURITY in port_req:
-            port_security_enabled = port_req[psec.PORTSECURITY]
-        else:
-            return
-        try:
-            query = self._model_query(context, PortSecurityBinding)
-            port_id = port_res['id']
-            binding = query.filter(
-                PortSecurityBinding.port_id == port_id).one()
-
-            binding.port_security_enabled = port_security_enabled
-            port_res[psec.PORTSECURITY] = port_security_enabled
-        except exc.NoResultFound:
-            raise psec.PortSecurityBindingNotFound()
-
-    def _process_network_port_security_update(
-        self, context, network_req, network_res):
-        if psec.PORTSECURITY in network_req:
-            port_security_enabled = network_req[psec.PORTSECURITY]
-        else:
-            return
-        try:
-            query = self._model_query(context, NetworkSecurityBinding)
-            network_id = network_res['id']
-            binding = query.filter(
-                NetworkSecurityBinding.network_id == network_id).one()
-
-            binding.port_security_enabled = port_security_enabled
-            network_res[psec.PORTSECURITY] = port_security_enabled
-        except exc.NoResultFound:
-            raise psec.PortSecurityBindingNotFound()
-
-    def _make_network_port_security_dict(self, port_security, fields=None):
-        res = {'network_id': port_security['network_id'],
-               psec.PORTSECURITY: port_security.port_security_enabled}
-        return self._fields(res, fields)
-
-    def _make_port_security_dict(self, port, fields=None):
-        res = {'port_id': port['port_id'],
-               psec.PORTSECURITY: port.port_security_enabled}
-        return self._fields(res, fields)
diff --git a/neutron/db/qos/__init__.py b/neutron/db/qos/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/db/qos/api.py b/neutron/db/qos/api.py
deleted file mode 100644 (file)
index cdc4bb4..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_db import exception as oslo_db_exception
-from sqlalchemy.orm import exc as orm_exc
-
-from neutron.common import exceptions as n_exc
-from neutron.db import common_db_mixin as db
-from neutron.db.qos import models
-
-
-def create_policy_network_binding(context, policy_id, network_id):
-    try:
-        with context.session.begin(subtransactions=True):
-            db_obj = models.QosNetworkPolicyBinding(policy_id=policy_id,
-                                                    network_id=network_id)
-            context.session.add(db_obj)
-    except oslo_db_exception.DBReferenceError:
-        raise n_exc.NetworkQosBindingNotFound(net_id=network_id,
-                                              policy_id=policy_id)
-
-
-def delete_policy_network_binding(context, policy_id, network_id):
-    try:
-        with context.session.begin(subtransactions=True):
-            db_object = (db.model_query(context,
-                                        models.QosNetworkPolicyBinding)
-                         .filter_by(policy_id=policy_id,
-                                    network_id=network_id).one())
-            context.session.delete(db_object)
-    except orm_exc.NoResultFound:
-        raise n_exc.NetworkQosBindingNotFound(net_id=network_id,
-                                              policy_id=policy_id)
-
-
-def create_policy_port_binding(context, policy_id, port_id):
-    try:
-        with context.session.begin(subtransactions=True):
-            db_obj = models.QosPortPolicyBinding(policy_id=policy_id,
-                                                 port_id=port_id)
-            context.session.add(db_obj)
-    except oslo_db_exception.DBReferenceError:
-        raise n_exc.PortQosBindingNotFound(port_id=port_id,
-                                           policy_id=policy_id)
-
-
-def delete_policy_port_binding(context, policy_id, port_id):
-    try:
-        with context.session.begin(subtransactions=True):
-            db_object = (db.model_query(context, models.QosPortPolicyBinding)
-                         .filter_by(policy_id=policy_id,
-                                    port_id=port_id).one())
-            context.session.delete(db_object)
-    except orm_exc.NoResultFound:
-        raise n_exc.PortQosBindingNotFound(port_id=port_id,
-                                           policy_id=policy_id)
diff --git a/neutron/db/qos/models.py b/neutron/db/qos/models.py
deleted file mode 100755 (executable)
index f0014f6..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy as sa
-
-from neutron.api.v2 import attributes as attrs
-from neutron.db import model_base
-from neutron.db import models_v2
-
-
-class QosPolicy(model_base.BASEV2, model_base.HasId, model_base.HasTenant):
-    __tablename__ = 'qos_policies'
-    name = sa.Column(sa.String(attrs.NAME_MAX_LEN))
-    description = sa.Column(sa.String(attrs.DESCRIPTION_MAX_LEN))
-    shared = sa.Column(sa.Boolean, nullable=False)
-
-
-class QosNetworkPolicyBinding(model_base.BASEV2):
-    __tablename__ = 'qos_network_policy_bindings'
-    policy_id = sa.Column(sa.String(36),
-                          sa.ForeignKey('qos_policies.id',
-                                        ondelete='CASCADE'),
-                          nullable=False,
-                          primary_key=True)
-    network_id = sa.Column(sa.String(36),
-                           sa.ForeignKey('networks.id',
-                                         ondelete='CASCADE'),
-                           nullable=False,
-                           unique=True,
-                           primary_key=True)
-    network = sa.orm.relationship(
-        models_v2.Network,
-        backref=sa.orm.backref("qos_policy_binding", uselist=False,
-                               cascade='delete', lazy='joined'))
-
-
-class QosPortPolicyBinding(model_base.BASEV2):
-    __tablename__ = 'qos_port_policy_bindings'
-    policy_id = sa.Column(sa.String(36),
-                          sa.ForeignKey('qos_policies.id',
-                                        ondelete='CASCADE'),
-                          nullable=False,
-                          primary_key=True)
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey('ports.id',
-                                      ondelete='CASCADE'),
-                        nullable=False,
-                        unique=True,
-                        primary_key=True)
-    port = sa.orm.relationship(
-        models_v2.Port,
-        backref=sa.orm.backref("qos_policy_binding", uselist=False,
-                               cascade='delete', lazy='joined'))
-
-
-class QosBandwidthLimitRule(model_base.HasId, model_base.BASEV2):
-    __tablename__ = 'qos_bandwidth_limit_rules'
-    qos_policy_id = sa.Column(sa.String(36),
-                              sa.ForeignKey('qos_policies.id',
-                                            ondelete='CASCADE'),
-                              nullable=False,
-                              unique=True)
-    max_kbps = sa.Column(sa.Integer)
-    max_burst_kbps = sa.Column(sa.Integer)
diff --git a/neutron/db/quota/__init__.py b/neutron/db/quota/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/db/quota/api.py b/neutron/db/quota/api.py
deleted file mode 100644 (file)
index 8b109a1..0000000
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import datetime
-
-import sqlalchemy as sa
-from sqlalchemy.orm import exc as orm_exc
-from sqlalchemy import sql
-
-from neutron.db import api as db_api
-from neutron.db import common_db_mixin as common_db_api
-from neutron.db.quota import models as quota_models
-
-
-# Wrapper for utcnow - needed for mocking it in unit tests
-def utcnow():
-    return datetime.datetime.utcnow()
-
-
-class QuotaUsageInfo(collections.namedtuple(
-    'QuotaUsageInfo', ['resource', 'tenant_id', 'used', 'dirty'])):
-    """Information about resource quota usage."""
-
-
-class ReservationInfo(collections.namedtuple(
-    'ReservationInfo', ['reservation_id', 'tenant_id',
-                        'expiration', 'deltas'])):
-    """Information about a resource reservation."""
-
-
-def get_quota_usage_by_resource_and_tenant(context, resource, tenant_id,
-                                           lock_for_update=False):
-    """Return usage info for a given resource and tenant.
-
-    :param context: Request context
-    :param resource: Name of the resource
-    :param tenant_id: Tenant identifier
-    :param lock_for_update: if True sets a write-intent lock on the query
-    :returns: a QuotaUsageInfo instance
-    """
-
-    query = common_db_api.model_query(context, quota_models.QuotaUsage)
-    query = query.filter_by(resource=resource, tenant_id=tenant_id)
-
-    if lock_for_update:
-        query = query.with_lockmode('update')
-
-    result = query.first()
-    if not result:
-        return
-    return QuotaUsageInfo(result.resource,
-                          result.tenant_id,
-                          result.in_use,
-                          result.dirty)
-
-
-def get_quota_usage_by_resource(context, resource):
-    query = common_db_api.model_query(context, quota_models.QuotaUsage)
-    query = query.filter_by(resource=resource)
-    return [QuotaUsageInfo(item.resource,
-                           item.tenant_id,
-                           item.in_use,
-                           item.dirty) for item in query]
-
-
-def get_quota_usage_by_tenant_id(context, tenant_id):
-    query = common_db_api.model_query(context, quota_models.QuotaUsage)
-    query = query.filter_by(tenant_id=tenant_id)
-    return [QuotaUsageInfo(item.resource,
-                           item.tenant_id,
-                           item.in_use,
-                           item.dirty) for item in query]
-
-
-def set_quota_usage(context, resource, tenant_id,
-                    in_use=None, delta=False):
-    """Set resource quota usage.
-
-    :param context: instance of neutron context with db session
-    :param resource: name of the resource for which usage is being set
-    :param tenant_id: identifier of the tenant for which quota usage is
-                      being set
-    :param in_use: integer specifying the new quantity of used resources,
-                   or a delta to apply to current used resource
-    :param delta: Specifies whether in_use is an absolute number
-                  or a delta (default to False)
-    """
-    with db_api.autonested_transaction(context.session):
-        query = common_db_api.model_query(context, quota_models.QuotaUsage)
-        query = query.filter_by(resource=resource).filter_by(
-            tenant_id=tenant_id)
-        usage_data = query.first()
-        if not usage_data:
-            # Must create entry
-            usage_data = quota_models.QuotaUsage(
-                resource=resource,
-                tenant_id=tenant_id)
-            context.session.add(usage_data)
-        # Perform explicit comparison with None as 0 is a valid value
-        if in_use is not None:
-            if delta:
-                in_use = usage_data.in_use + in_use
-            usage_data.in_use = in_use
-        # After an explicit update the dirty bit should always be reset
-        usage_data.dirty = False
-    return QuotaUsageInfo(usage_data.resource,
-                          usage_data.tenant_id,
-                          usage_data.in_use,
-                          usage_data.dirty)
-
-
-def set_quota_usage_dirty(context, resource, tenant_id, dirty=True):
-    """Set quota usage dirty bit for a given resource and tenant.
-
-    :param resource: a resource for which quota usage if tracked
-    :param tenant_id: tenant identifier
-    :param dirty: the desired value for the dirty bit (defaults to True)
-    :returns: 1 if the quota usage data were updated, 0 otherwise.
-    """
-    query = common_db_api.model_query(context, quota_models.QuotaUsage)
-    query = query.filter_by(resource=resource).filter_by(tenant_id=tenant_id)
-    return query.update({'dirty': dirty})
-
-
-def set_resources_quota_usage_dirty(context, resources, tenant_id, dirty=True):
-    """Set quota usage dirty bit for a given tenant and multiple resources.
-
-    :param resources: list of resource for which the dirty bit is going
-                      to be set
-    :param tenant_id: tenant identifier
-    :param dirty: the desired value for the dirty bit (defaults to True)
-    :returns: the number of records for which the bit was actually set.
-    """
-    query = common_db_api.model_query(context, quota_models.QuotaUsage)
-    query = query.filter_by(tenant_id=tenant_id)
-    if resources:
-        query = query.filter(quota_models.QuotaUsage.resource.in_(resources))
-    # synchronize_session=False needed because of the IN condition
-    return query.update({'dirty': dirty}, synchronize_session=False)
-
-
-def set_all_quota_usage_dirty(context, resource, dirty=True):
-    """Set the dirty bit on quota usage for all tenants.
-
-    :param resource: the resource for which the dirty bit should be set
-    :returns: the number of tenants for which the dirty bit was
-              actually updated
-    """
-    query = common_db_api.model_query(context, quota_models.QuotaUsage)
-    query = query.filter_by(resource=resource)
-    return query.update({'dirty': dirty})
-
-
-def create_reservation(context, tenant_id, deltas, expiration=None):
-    # This method is usually called from within another transaction.
-    # Consider using begin_nested
-    with context.session.begin(subtransactions=True):
-        expiration = expiration or (utcnow() + datetime.timedelta(0, 120))
-        resv = quota_models.Reservation(tenant_id=tenant_id,
-                                        expiration=expiration)
-        context.session.add(resv)
-        for (resource, delta) in deltas.items():
-            context.session.add(
-                quota_models.ResourceDelta(resource=resource,
-                                           amount=delta,
-                                           reservation=resv))
-    return ReservationInfo(resv['id'],
-                           resv['tenant_id'],
-                           resv['expiration'],
-                           dict((delta.resource, delta.amount)
-                                for delta in resv.resource_deltas))
-
-
-def get_reservation(context, reservation_id):
-    query = context.session.query(quota_models.Reservation).filter_by(
-        id=reservation_id)
-    resv = query.first()
-    if not resv:
-        return
-    return ReservationInfo(resv['id'],
-                           resv['tenant_id'],
-                           resv['expiration'],
-                           dict((delta.resource, delta.amount)
-                                for delta in resv.resource_deltas))
-
-
-def remove_reservation(context, reservation_id, set_dirty=False):
-    delete_query = context.session.query(quota_models.Reservation).filter_by(
-        id=reservation_id)
-    # Not handling MultipleResultsFound as the query is filtering by primary
-    # key
-    try:
-        reservation = delete_query.one()
-    except orm_exc.NoResultFound:
-        # TODO(salv-orlando): Raise here and then handle the exception?
-        return
-    tenant_id = reservation.tenant_id
-    resources = [delta.resource for delta in reservation.resource_deltas]
-    num_deleted = delete_query.delete()
-    if set_dirty:
-        # quota_usage for all resource involved in this reservation must
-        # be marked as dirty
-        set_resources_quota_usage_dirty(context, resources, tenant_id)
-    return num_deleted
-
-
-def get_reservations_for_resources(context, tenant_id, resources,
-                                   expired=False):
-    """Retrieve total amount of reservations for specified resources.
-
-    :param context: Neutron context with db session
-    :param tenant_id: Tenant identifier
-    :param resources: Resources for which reserved amounts should be fetched
-    :param expired: False to fetch active reservations, True to fetch expired
-                    reservations (defaults to False)
-    :returns: a dictionary mapping resources with corresponding deltas
-    """
-    if not resources:
-        # Do not waste time
-        return
-    now = utcnow()
-    resv_query = context.session.query(
-        quota_models.ResourceDelta.resource,
-        quota_models.Reservation.expiration,
-        sql.func.sum(quota_models.ResourceDelta.amount)).join(
-        quota_models.Reservation)
-    if expired:
-        exp_expr = (quota_models.Reservation.expiration < now)
-    else:
-        exp_expr = (quota_models.Reservation.expiration >= now)
-    resv_query = resv_query.filter(sa.and_(
-        quota_models.Reservation.tenant_id == tenant_id,
-        quota_models.ResourceDelta.resource.in_(resources),
-        exp_expr)).group_by(
-        quota_models.ResourceDelta.resource,
-        quota_models.Reservation.expiration)
-    return dict((resource, total_reserved)
-            for (resource, exp, total_reserved) in resv_query)
-
-
-def remove_expired_reservations(context, tenant_id=None):
-    now = utcnow()
-    resv_query = context.session.query(quota_models.Reservation)
-    if tenant_id:
-        tenant_expr = (quota_models.Reservation.tenant_id == tenant_id)
-    else:
-        tenant_expr = sql.true()
-    resv_query = resv_query.filter(sa.and_(
-        tenant_expr, quota_models.Reservation.expiration < now))
-    return resv_query.delete()
diff --git a/neutron/db/quota/driver.py b/neutron/db/quota/driver.py
deleted file mode 100644 (file)
index 8329166..0000000
+++ /dev/null
@@ -1,260 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_db import api as oslo_db_api
-from oslo_log import log
-
-from neutron.common import exceptions
-from neutron.db import api as db_api
-from neutron.db import common_db_mixin as common_db
-from neutron.db.quota import api as quota_api
-from neutron.db.quota import models as quota_models
-
-LOG = log.getLogger(__name__)
-
-
-class DbQuotaDriver(object):
-    """Driver to perform necessary checks to enforce quotas and obtain quota
-    information.
-
-    The default driver utilizes the local database.
-    """
-
-    @staticmethod
-    def get_tenant_quotas(context, resources, tenant_id):
-        """Given a list of resources, retrieve the quotas for the given
-        tenant. If no limits are found for the specified tenant, the operation
-        returns the default limits.
-
-        :param context: The request context, for access checks.
-        :param resources: A dictionary of the registered resource keys.
-        :param tenant_id: The ID of the tenant to return quotas for.
-        :return dict: from resource name to dict of name and limit
-        """
-
-        # init with defaults
-        tenant_quota = dict((key, resource.default)
-                            for key, resource in resources.items())
-
-        # update with tenant specific limits
-        q_qry = common_db.model_query(context, quota_models.Quota).filter_by(
-            tenant_id=tenant_id)
-        for item in q_qry:
-            tenant_quota[item['resource']] = item['limit']
-
-        return tenant_quota
-
-    @staticmethod
-    def delete_tenant_quota(context, tenant_id):
-        """Delete the quota entries for a given tenant_id.
-
-        Atfer deletion, this tenant will use default quota values in conf.
-        """
-        with context.session.begin():
-            tenant_quotas = context.session.query(quota_models.Quota)
-            tenant_quotas = tenant_quotas.filter_by(tenant_id=tenant_id)
-            tenant_quotas.delete()
-
-    @staticmethod
-    def get_all_quotas(context, resources):
-        """Given a list of resources, retrieve the quotas for the all tenants.
-
-        :param context: The request context, for access checks.
-        :param resources: A dictionary of the registered resource keys.
-        :return quotas: list of dict of tenant_id:, resourcekey1:
-        resourcekey2: ...
-        """
-        tenant_default = dict((key, resource.default)
-                              for key, resource in resources.items())
-
-        all_tenant_quotas = {}
-
-        for quota in context.session.query(quota_models.Quota):
-            tenant_id = quota['tenant_id']
-
-            # avoid setdefault() because only want to copy when actually
-            # required
-            tenant_quota = all_tenant_quotas.get(tenant_id)
-            if tenant_quota is None:
-                tenant_quota = tenant_default.copy()
-                tenant_quota['tenant_id'] = tenant_id
-                all_tenant_quotas[tenant_id] = tenant_quota
-
-            tenant_quota[quota['resource']] = quota['limit']
-
-        # Convert values to a list to as caller expect an indexable iterable,
-        # where python3's dict_values does not support indexing
-        return list(all_tenant_quotas.values())
-
-    @staticmethod
-    def update_quota_limit(context, tenant_id, resource, limit):
-        with context.session.begin():
-            tenant_quota = context.session.query(quota_models.Quota).filter_by(
-                tenant_id=tenant_id, resource=resource).first()
-
-            if tenant_quota:
-                tenant_quota.update({'limit': limit})
-            else:
-                tenant_quota = quota_models.Quota(tenant_id=tenant_id,
-                                                  resource=resource,
-                                                  limit=limit)
-                context.session.add(tenant_quota)
-
-    def _get_quotas(self, context, tenant_id, resources):
-        """Retrieves the quotas for specific resources.
-
-        A helper method which retrieves the quotas for the specific
-        resources identified by keys, and which apply to the current
-        context.
-
-        :param context: The request context, for access checks.
-        :param tenant_id: the tenant_id to check quota.
-        :param resources: A dictionary of the registered resources.
-        """
-        # Grab and return the quotas (without usages)
-        quotas = DbQuotaDriver.get_tenant_quotas(
-            context, resources, tenant_id)
-
-        return dict((k, v) for k, v in quotas.items())
-
-    def _handle_expired_reservations(self, context, tenant_id):
-        LOG.debug("Deleting expired reservations for tenant:%s" % tenant_id)
-        # Delete expired reservations (we don't want them to accrue
-        # in the database)
-        quota_api.remove_expired_reservations(
-            context, tenant_id=tenant_id)
-
-    @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
-                               retry_interval=0.1,
-                               inc_retry_interval=True,
-                               retry_on_request=True,
-                               exception_checker=db_api.is_deadlock)
-    def make_reservation(self, context, tenant_id, resources, deltas, plugin):
-        # Lock current reservation table
-        # NOTE(salv-orlando): This routine uses DB write locks.
-        # These locks are acquired by the count() method invoked on resources.
-        # Please put your shotguns aside.
-        # A non locking algorithm for handling reservation is feasible, however
-        # it will require two database writes even in cases when there are not
-        # concurrent reservations.
-        # For this reason it might be advisable to handle contention using
-        # this kind of locks and paying the cost of a write set certification
-        # failure when a MySQL Galera cluster is employed. Also, this class of
-        # locks should be ok to use when support for sending "hotspot" writes
-        # to a single node will be available.
-        requested_resources = deltas.keys()
-        with db_api.autonested_transaction(context.session):
-            # get_tenant_quotes needs in input a dictionary mapping resource
-            # name to BaseResosurce instances so that the default quota can be
-            # retrieved
-            current_limits = self.get_tenant_quotas(
-                context, resources, tenant_id)
-            unlimited_resources = set([resource for (resource, limit) in
-                                       current_limits.items() if limit < 0])
-            # Do not even bother counting resources and calculating headroom
-            # for resources with unlimited quota
-            LOG.debug(("Resources %s have unlimited quota limit. It is not "
-                       "required to calculated headroom "),
-                      ",".join(unlimited_resources))
-            requested_resources = (set(requested_resources) -
-                                   unlimited_resources)
-            # Gather current usage information
-            # TODO(salv-orlando): calling count() for every resource triggers
-            # multiple queries on quota usage. This should be improved, however
-            # this is not an urgent matter as the REST API currently only
-            # allows allocation of a resource at a time
-            # NOTE: pass plugin too for compatibility with CountableResource
-            # instances
-            current_usages = dict(
-                (resource, resources[resource].count(
-                    context, plugin, tenant_id, resync_usage=False)) for
-                resource in requested_resources)
-            # Adjust for expired reservations. Apparently it is cheaper than
-            # querying every time for active reservations and counting overall
-            # quantity of resources reserved
-            expired_deltas = quota_api.get_reservations_for_resources(
-                context, tenant_id, requested_resources, expired=True)
-            # Verify that the request can be accepted with current limits
-            resources_over_limit = []
-            for resource in requested_resources:
-                expired_reservations = expired_deltas.get(resource, 0)
-                total_usage = current_usages[resource] - expired_reservations
-                res_headroom = current_limits[resource] - total_usage
-                LOG.debug(("Attempting to reserve %(delta)d items for "
-                           "resource %(resource)s. Total usage: %(total)d; "
-                           "quota limit: %(limit)d; headroom:%(headroom)d"),
-                          {'resource': resource,
-                           'delta': deltas[resource],
-                           'total': total_usage,
-                           'limit': current_limits[resource],
-                           'headroom': res_headroom})
-                if res_headroom < deltas[resource]:
-                    resources_over_limit.append(resource)
-                if expired_reservations:
-                    self._handle_expired_reservations(context, tenant_id)
-
-            if resources_over_limit:
-                raise exceptions.OverQuota(overs=sorted(resources_over_limit))
-            # Success, store the reservation
-            # TODO(salv-orlando): Make expiration time configurable
-            return quota_api.create_reservation(
-                context, tenant_id, deltas)
-
-    def commit_reservation(self, context, reservation_id):
-        # Do not mark resource usage as dirty. If a reservation is committed,
-        # then the relevant resources have been created. Usage data for these
-        # resources has therefore already been marked dirty.
-        quota_api.remove_reservation(context, reservation_id,
-                                     set_dirty=False)
-
-    def cancel_reservation(self, context, reservation_id):
-        # Mark resource usage as dirty so the next time both actual resources
-        # used and reserved will be recalculated
-        quota_api.remove_reservation(context, reservation_id,
-                                     set_dirty=True)
-
-    def limit_check(self, context, tenant_id, resources, values):
-        """Check simple quota limits.
-
-        For limits--those quotas for which there is no usage
-        synchronization function--this method checks that a set of
-        proposed values are permitted by the limit restriction.
-
-        If any of the proposed values is over the defined quota, an
-        OverQuota exception will be raised with the sorted list of the
-        resources which are too high.  Otherwise, the method returns
-        nothing.
-
-        :param context: The request context, for access checks.
-        :param tenant_id: The tenant_id to check the quota.
-        :param resources: A dictionary of the registered resources.
-        :param values: A dictionary of the values to check against the
-                       quota.
-        """
-
-        # Ensure no value is less than zero
-        unders = [key for key, val in values.items() if val < 0]
-        if unders:
-            raise exceptions.InvalidQuotaValue(unders=sorted(unders))
-
-        # Get the applicable quotas
-        quotas = self._get_quotas(context, tenant_id, resources)
-
-        # Check the quotas and construct a list of the resources that
-        # would be put over limit by the desired values
-        overs = [key for key, val in values.items()
-                 if quotas[key] >= 0 and quotas[key] < val]
-        if overs:
-            raise exceptions.OverQuota(overs=sorted(overs))
diff --git a/neutron/db/quota/models.py b/neutron/db/quota/models.py
deleted file mode 100644 (file)
index db79658..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy import sql
-
-from neutron.api.v2 import attributes as attr
-from neutron.db import model_base
-
-
-class ResourceDelta(model_base.BASEV2):
-    resource = sa.Column(sa.String(255), primary_key=True)
-    reservation_id = sa.Column(sa.String(36),
-                               sa.ForeignKey('reservations.id',
-                                             ondelete='CASCADE'),
-                               primary_key=True,
-                               nullable=False)
-    # Requested amount of resource
-    amount = sa.Column(sa.Integer)
-
-
-class Reservation(model_base.BASEV2, model_base.HasId):
-    tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN))
-    expiration = sa.Column(sa.DateTime())
-    resource_deltas = orm.relationship(ResourceDelta,
-                                       backref='reservation',
-                                       lazy="joined",
-                                       cascade='all, delete-orphan')
-
-
-class Quota(model_base.BASEV2, model_base.HasId, model_base.HasTenant):
-    """Represent a single quota override for a tenant.
-
-    If there is no row for a given tenant id and resource, then the
-    default for the deployment is used.
-    """
-    resource = sa.Column(sa.String(255))
-    limit = sa.Column(sa.Integer)
-
-
-class QuotaUsage(model_base.BASEV2):
-    """Represents the current usage for a given resource."""
-
-    resource = sa.Column(sa.String(255), nullable=False,
-                         primary_key=True, index=True)
-    tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), nullable=False,
-                          primary_key=True, index=True)
-    dirty = sa.Column(sa.Boolean, nullable=False, server_default=sql.false())
-
-    in_use = sa.Column(sa.Integer, nullable=False,
-                       server_default="0")
-    reserved = sa.Column(sa.Integer, nullable=False,
-                         server_default="0")
diff --git a/neutron/db/quota_db.py b/neutron/db/quota_db.py
deleted file mode 100644 (file)
index 1ce75ae..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-from neutron.db.quota import driver  # noqa
-
-# This module has been preserved for backward compatibility, and will be
-# deprecated in the future
-sys.modules[__name__] = sys.modules['neutron.db.quota.driver']
diff --git a/neutron/db/rbac_db_mixin.py b/neutron/db/rbac_db_mixin.py
deleted file mode 100644 (file)
index f2efb64..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from sqlalchemy.orm import exc
-
-from neutron.callbacks import events
-from neutron.callbacks import exceptions as c_exc
-from neutron.callbacks import registry
-from neutron.common import exceptions as n_exc
-from neutron.db import common_db_mixin
-from neutron.db import rbac_db_models as models
-from neutron.extensions import rbac as ext_rbac
-
-# resource name using in callbacks
-RBAC_POLICY = 'rbac-policy'
-
-
-class RbacPluginMixin(common_db_mixin.CommonDbMixin):
-    """Plugin mixin that implements the RBAC DB operations."""
-
-    object_type_cache = {}
-    supported_extension_aliases = ['rbac-policies']
-
-    def create_rbac_policy(self, context, rbac_policy):
-        e = rbac_policy['rbac_policy']
-        try:
-            registry.notify(RBAC_POLICY, events.BEFORE_CREATE, self,
-                            context=context, object_type=e['object_type'],
-                            policy=e)
-        except c_exc.CallbackFailure as e:
-            raise n_exc.InvalidInput(error_message=e)
-        dbmodel = models.get_type_model_map()[e['object_type']]
-        with context.session.begin(subtransactions=True):
-            db_entry = dbmodel(object_id=e['object_id'],
-                               target_tenant=e['target_tenant'],
-                               action=e['action'],
-                               tenant_id=e['tenant_id'])
-            context.session.add(db_entry)
-        return self._make_rbac_policy_dict(db_entry)
-
-    def _make_rbac_policy_dict(self, db_entry, fields=None):
-        res = {f: db_entry[f] for f in ('id', 'tenant_id', 'target_tenant',
-                                        'action', 'object_id')}
-        res['object_type'] = db_entry.object_type
-        return self._fields(res, fields)
-
-    def update_rbac_policy(self, context, id, rbac_policy):
-        pol = rbac_policy['rbac_policy']
-        entry = self._get_rbac_policy(context, id)
-        object_type = entry['object_type']
-        try:
-            registry.notify(RBAC_POLICY, events.BEFORE_UPDATE, self,
-                            context=context, policy=entry,
-                            object_type=object_type, policy_update=pol)
-        except c_exc.CallbackFailure as ex:
-            raise ext_rbac.RbacPolicyInUse(object_id=entry['object_id'],
-                                           details=ex)
-        with context.session.begin(subtransactions=True):
-            entry.update(pol)
-        return self._make_rbac_policy_dict(entry)
-
-    def delete_rbac_policy(self, context, id):
-        entry = self._get_rbac_policy(context, id)
-        object_type = entry['object_type']
-        try:
-            registry.notify(RBAC_POLICY, events.BEFORE_DELETE, self,
-                            context=context, object_type=object_type,
-                            policy=entry)
-        except c_exc.CallbackFailure as ex:
-            raise ext_rbac.RbacPolicyInUse(object_id=entry['object_id'],
-                                           details=ex)
-        with context.session.begin(subtransactions=True):
-            context.session.delete(entry)
-        self.object_type_cache.pop(id, None)
-
-    def _get_rbac_policy(self, context, id):
-        object_type = self._get_object_type(context, id)
-        dbmodel = models.get_type_model_map()[object_type]
-        try:
-            return self._model_query(context,
-                                     dbmodel).filter(dbmodel.id == id).one()
-        except exc.NoResultFound:
-            raise ext_rbac.RbacPolicyNotFound(id=id, object_type=object_type)
-
-    def get_rbac_policy(self, context, id, fields=None):
-        return self._make_rbac_policy_dict(
-            self._get_rbac_policy(context, id), fields=fields)
-
-    def get_rbac_policies(self, context, filters=None, fields=None,
-                          sorts=None, limit=None, page_reverse=False):
-        model = common_db_mixin.UnionModel(
-            models.get_type_model_map(), 'object_type')
-        return self._get_collection(
-            context, model, self._make_rbac_policy_dict, filters=filters,
-            fields=fields, sorts=sorts, limit=limit, page_reverse=page_reverse)
-
-    def _get_object_type(self, context, entry_id):
-        """Scans all RBAC tables for an ID to figure out the type.
-
-        This will be an expensive operation as the number of RBAC tables grows.
-        The result is cached since object types cannot be updated for a policy.
-        """
-        if entry_id in self.object_type_cache:
-            return self.object_type_cache[entry_id]
-        for otype, model in models.get_type_model_map().items():
-            if (context.session.query(model).
-                    filter(model.id == entry_id).first()):
-                self.object_type_cache[entry_id] = otype
-                return otype
-        raise ext_rbac.RbacPolicyNotFound(id=entry_id, object_type='unknown')
diff --git a/neutron/db/rbac_db_models.py b/neutron/db/rbac_db_models.py
deleted file mode 100644 (file)
index 956f05f..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-import sqlalchemy as sa
-from sqlalchemy.orm import validates
-
-from neutron._i18n import _
-from neutron.api.v2 import attributes as attr
-from neutron.common import exceptions as n_exc
-from neutron.db import model_base
-
-
-class InvalidActionForType(n_exc.InvalidInput):
-    message = _("Invalid action '%(action)s' for object type "
-                "'%(object_type)s'. Valid actions: %(valid_actions)s")
-
-
-class RBACColumns(model_base.HasId, model_base.HasTenant):
-    """Mixin that object-specific RBAC tables should inherit.
-
-    All RBAC tables should inherit directly from this one because
-    the RBAC code uses the __subclasses__() method to discover the
-    RBAC types.
-    """
-
-    # the target_tenant is the subject that the policy will affect. this may
-    # also be a wildcard '*' to indicate all tenants or it may be a role if
-    # neutron gets better integration with keystone
-    target_tenant = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN),
-                              nullable=False)
-
-    action = sa.Column(sa.String(255), nullable=False)
-
-    @abc.abstractproperty
-    def object_type(self):
-        # this determines the name that users will use in the API
-        # to reference the type. sub-classes should set their own
-        pass
-
-    __table_args__ = (
-        sa.UniqueConstraint('target_tenant', 'object_id', 'action'),
-        model_base.BASEV2.__table_args__
-    )
-
-    @validates('action')
-    def _validate_action(self, key, action):
-        if action not in self.get_valid_actions():
-            raise InvalidActionForType(
-                action=action, object_type=self.object_type,
-                valid_actions=self.get_valid_actions())
-        return action
-
-    @abc.abstractmethod
-    def get_valid_actions(self):
-        # object table needs to override this to return an interable
-        # with the valid actions rbac entries
-        pass
-
-
-def get_type_model_map():
-    return {table.object_type: table for table in RBACColumns.__subclasses__()}
-
-
-class NetworkRBAC(RBACColumns, model_base.BASEV2):
-    """RBAC table for networks."""
-
-    object_id = sa.Column(sa.String(36),
-                          sa.ForeignKey('networks.id', ondelete="CASCADE"),
-                          nullable=False)
-    object_type = 'network'
-
-    def get_valid_actions(self):
-        return ('access_as_shared',)
diff --git a/neutron/db/securitygroups_db.py b/neutron/db/securitygroups_db.py
deleted file mode 100644 (file)
index f462f14..0000000
+++ /dev/null
@@ -1,785 +0,0 @@
-# Copyright 2012 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy.orm import exc
-from sqlalchemy.orm import scoped_session
-
-from neutron._i18n import _
-from neutron.api.v2 import attributes
-from neutron.callbacks import events
-from neutron.callbacks import exceptions
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-from neutron.common import constants
-from neutron.common import utils
-from neutron.db import api as db_api
-from neutron.db import db_base_plugin_v2
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.extensions import securitygroup as ext_sg
-
-
-LOG = logging.getLogger(__name__)
-
-IP_PROTOCOL_MAP = {constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP,
-                   constants.PROTO_NAME_UDP: constants.PROTO_NUM_UDP,
-                   constants.PROTO_NAME_ICMP: constants.PROTO_NUM_ICMP,
-                   constants.PROTO_NAME_ICMP_V6: constants.PROTO_NUM_ICMP_V6}
-
-
-class SecurityGroup(model_base.HasStandardAttributes, model_base.BASEV2,
-                    model_base.HasId, model_base.HasTenant):
-    """Represents a v2 neutron security group."""
-
-    name = sa.Column(sa.String(attributes.NAME_MAX_LEN))
-    description = sa.Column(sa.String(attributes.DESCRIPTION_MAX_LEN))
-
-
-class DefaultSecurityGroup(model_base.BASEV2):
-    __tablename__ = 'default_security_group'
-
-    tenant_id = sa.Column(sa.String(attributes.TENANT_ID_MAX_LEN),
-                          primary_key=True, nullable=False)
-    security_group_id = sa.Column(sa.String(36),
-                                  sa.ForeignKey("securitygroups.id",
-                                                ondelete="CASCADE"),
-                                  nullable=False)
-    security_group = orm.relationship(
-        SecurityGroup, lazy='joined',
-        backref=orm.backref('default_security_group', cascade='all,delete'),
-        primaryjoin="SecurityGroup.id==DefaultSecurityGroup.security_group_id",
-    )
-
-
-class SecurityGroupPortBinding(model_base.BASEV2):
-    """Represents binding between neutron ports and security profiles."""
-
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey("ports.id",
-                                      ondelete='CASCADE'),
-                        primary_key=True)
-    security_group_id = sa.Column(sa.String(36),
-                                  sa.ForeignKey("securitygroups.id"),
-                                  primary_key=True)
-
-    # Add a relationship to the Port model in order to instruct SQLAlchemy to
-    # eagerly load security group bindings
-    ports = orm.relationship(
-        models_v2.Port,
-        backref=orm.backref("security_groups",
-                            lazy='joined', cascade='delete'))
-
-
-class SecurityGroupRule(model_base.HasStandardAttributes, model_base.BASEV2,
-                        model_base.HasId, model_base.HasTenant):
-    """Represents a v2 neutron security group rule."""
-
-    security_group_id = sa.Column(sa.String(36),
-                                  sa.ForeignKey("securitygroups.id",
-                                                ondelete="CASCADE"),
-                                  nullable=False)
-
-    remote_group_id = sa.Column(sa.String(36),
-                                sa.ForeignKey("securitygroups.id",
-                                              ondelete="CASCADE"),
-                                nullable=True)
-
-    direction = sa.Column(sa.Enum('ingress', 'egress',
-                                  name='securitygrouprules_direction'))
-    ethertype = sa.Column(sa.String(40))
-    protocol = sa.Column(sa.String(40))
-    port_range_min = sa.Column(sa.Integer)
-    port_range_max = sa.Column(sa.Integer)
-    remote_ip_prefix = sa.Column(sa.String(255))
-    security_group = orm.relationship(
-        SecurityGroup,
-        backref=orm.backref('rules', cascade='all,delete', lazy='joined'),
-        primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id")
-    source_group = orm.relationship(
-        SecurityGroup,
-        backref=orm.backref('source_rules', cascade='all,delete'),
-        primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id")
-
-
-class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
-    """Mixin class to add security group to db_base_plugin_v2."""
-
-    __native_bulk_support = True
-
-    def create_security_group_bulk(self, context, security_group_rule):
-        return self._create_bulk('security_group', context,
-                                 security_group_rule)
-
-    def create_security_group(self, context, security_group, default_sg=False):
-        """Create security group.
-
-        If default_sg is true that means we are a default security group for
-        a given tenant if it does not exist.
-        """
-        s = security_group['security_group']
-        kwargs = {
-            'context': context,
-            'security_group': s,
-            'is_default': default_sg,
-        }
-        # NOTE(armax): a callback exception here will prevent the request
-        # from being processed. This is a hook point for backend's validation;
-        # we raise to propagate the reason for the failure.
-        try:
-            registry.notify(
-                resources.SECURITY_GROUP, events.BEFORE_CREATE, self,
-                **kwargs)
-        except exceptions.CallbackFailure as e:
-            raise ext_sg.SecurityGroupConflict(reason=e)
-
-        tenant_id = s['tenant_id']
-
-        if not default_sg:
-            self._ensure_default_security_group(context, tenant_id)
-
-        with db_api.autonested_transaction(context.session):
-            security_group_db = SecurityGroup(id=s.get('id') or (
-                                              uuidutils.generate_uuid()),
-                                              description=s['description'],
-                                              tenant_id=tenant_id,
-                                              name=s['name'])
-            context.session.add(security_group_db)
-            if default_sg:
-                context.session.add(DefaultSecurityGroup(
-                    security_group=security_group_db,
-                    tenant_id=security_group_db['tenant_id']))
-            for ethertype in ext_sg.sg_supported_ethertypes:
-                if default_sg:
-                    # Allow intercommunication
-                    ingress_rule = SecurityGroupRule(
-                        id=uuidutils.generate_uuid(), tenant_id=tenant_id,
-                        security_group=security_group_db,
-                        direction='ingress',
-                        ethertype=ethertype,
-                        source_group=security_group_db)
-                    context.session.add(ingress_rule)
-
-                egress_rule = SecurityGroupRule(
-                    id=uuidutils.generate_uuid(), tenant_id=tenant_id,
-                    security_group=security_group_db,
-                    direction='egress',
-                    ethertype=ethertype)
-                context.session.add(egress_rule)
-
-        secgroup_dict = self._make_security_group_dict(security_group_db)
-
-        kwargs['security_group'] = secgroup_dict
-        registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self,
-                        **kwargs)
-        return secgroup_dict
-
-    def get_security_groups(self, context, filters=None, fields=None,
-                            sorts=None, limit=None,
-                            marker=None, page_reverse=False, default_sg=False):
-
-        # If default_sg is True do not call _ensure_default_security_group()
-        # so this can be done recursively. Context.tenant_id is checked
-        # because all the unit tests do not explicitly set the context on
-        # GETS. TODO(arosen)  context handling can probably be improved here.
-        if not default_sg and context.tenant_id:
-            tenant_id = filters.get('tenant_id')
-            if tenant_id:
-                tenant_id = tenant_id[0]
-            else:
-                tenant_id = context.tenant_id
-            self._ensure_default_security_group(context, tenant_id)
-        marker_obj = self._get_marker_obj(context, 'security_group', limit,
-                                          marker)
-        return self._get_collection(context,
-                                    SecurityGroup,
-                                    self._make_security_group_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts,
-                                    limit=limit, marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-
-    def get_security_groups_count(self, context, filters=None):
-        return self._get_collection_count(context, SecurityGroup,
-                                          filters=filters)
-
-    def get_security_group(self, context, id, fields=None, tenant_id=None):
-        """Tenant id is given to handle the case when creating a security
-        group rule on behalf of another use.
-        """
-
-        if tenant_id:
-            tmp_context_tenant_id = context.tenant_id
-            context.tenant_id = tenant_id
-
-        try:
-            with context.session.begin(subtransactions=True):
-                ret = self._make_security_group_dict(self._get_security_group(
-                                                     context, id), fields)
-                ret['security_group_rules'] = self.get_security_group_rules(
-                    context, {'security_group_id': [id]})
-        finally:
-            if tenant_id:
-                context.tenant_id = tmp_context_tenant_id
-        return ret
-
-    def _get_security_group(self, context, id):
-        try:
-            query = self._model_query(context, SecurityGroup)
-            sg = query.filter(SecurityGroup.id == id).one()
-
-        except exc.NoResultFound:
-            raise ext_sg.SecurityGroupNotFound(id=id)
-        return sg
-
-    def delete_security_group(self, context, id):
-        filters = {'security_group_id': [id]}
-        ports = self._get_port_security_group_bindings(context, filters)
-        if ports:
-            raise ext_sg.SecurityGroupInUse(id=id)
-        # confirm security group exists
-        sg = self._get_security_group(context, id)
-
-        if sg['name'] == 'default' and not context.is_admin:
-            raise ext_sg.SecurityGroupCannotRemoveDefault()
-        kwargs = {
-            'context': context,
-            'security_group_id': id,
-            'security_group': sg,
-        }
-        # NOTE(armax): a callback exception here will prevent the request
-        # from being processed. This is a hook point for backend's validation;
-        # we raise to propagate the reason for the failure.
-        try:
-            registry.notify(
-                resources.SECURITY_GROUP, events.BEFORE_DELETE, self,
-                **kwargs)
-        except exceptions.CallbackFailure as e:
-            reason = _('cannot be deleted due to %s') % e
-            raise ext_sg.SecurityGroupInUse(id=id, reason=reason)
-
-        with context.session.begin(subtransactions=True):
-            context.session.delete(sg)
-
-        kwargs.pop('security_group')
-        registry.notify(resources.SECURITY_GROUP, events.AFTER_DELETE, self,
-                        **kwargs)
-
-    def update_security_group(self, context, id, security_group):
-        s = security_group['security_group']
-
-        kwargs = {
-            'context': context,
-            'security_group_id': id,
-            'security_group': s,
-        }
-        # NOTE(armax): a callback exception here will prevent the request
-        # from being processed. This is a hook point for backend's validation;
-        # we raise to propagate the reason for the failure.
-        try:
-            registry.notify(
-                resources.SECURITY_GROUP, events.BEFORE_UPDATE, self,
-                **kwargs)
-        except exceptions.CallbackFailure as e:
-            raise ext_sg.SecurityGroupConflict(reason=e)
-
-        with context.session.begin(subtransactions=True):
-            sg = self._get_security_group(context, id)
-            if sg['name'] == 'default' and 'name' in s:
-                raise ext_sg.SecurityGroupCannotUpdateDefault()
-            sg.update(s)
-        sg_dict = self._make_security_group_dict(sg)
-
-        kwargs['security_group'] = sg_dict
-        registry.notify(resources.SECURITY_GROUP, events.AFTER_UPDATE, self,
-                        **kwargs)
-        return sg_dict
-
-    def _make_security_group_dict(self, security_group, fields=None):
-        res = {'id': security_group['id'],
-               'name': security_group['name'],
-               'tenant_id': security_group['tenant_id'],
-               'description': security_group['description']}
-        res['security_group_rules'] = [self._make_security_group_rule_dict(r)
-                                       for r in security_group.rules]
-        return self._fields(res, fields)
-
-    def _make_security_group_binding_dict(self, security_group, fields=None):
-        res = {'port_id': security_group['port_id'],
-               'security_group_id': security_group['security_group_id']}
-        return self._fields(res, fields)
-
-    def _create_port_security_group_binding(self, context, port_id,
-                                            security_group_id):
-        with context.session.begin(subtransactions=True):
-            db = SecurityGroupPortBinding(port_id=port_id,
-                                          security_group_id=security_group_id)
-            context.session.add(db)
-
-    def _get_port_security_group_bindings(self, context,
-                                          filters=None, fields=None):
-        return self._get_collection(context,
-                                    SecurityGroupPortBinding,
-                                    self._make_security_group_binding_dict,
-                                    filters=filters, fields=fields)
-
-    def _delete_port_security_group_bindings(self, context, port_id):
-        query = self._model_query(context, SecurityGroupPortBinding)
-        bindings = query.filter(
-            SecurityGroupPortBinding.port_id == port_id)
-        with context.session.begin(subtransactions=True):
-            for binding in bindings:
-                context.session.delete(binding)
-
-    def create_security_group_rule_bulk(self, context, security_group_rules):
-        return self._create_bulk('security_group_rule', context,
-                                 security_group_rules)
-
-    def create_security_group_rule_bulk_native(self, context,
-                                               security_group_rules):
-        rules = security_group_rules['security_group_rules']
-        scoped_session(context.session)
-        security_group_id = self._validate_security_group_rules(
-            context, security_group_rules)
-        with context.session.begin(subtransactions=True):
-            if not self.get_security_group(context, security_group_id):
-                raise ext_sg.SecurityGroupNotFound(id=security_group_id)
-
-            self._check_for_duplicate_rules(context, rules)
-            ret = []
-            for rule_dict in rules:
-                res_rule_dict = self._create_security_group_rule(
-                    context, rule_dict, validate=False)
-                ret.append(res_rule_dict)
-            return ret
-
-    def create_security_group_rule(self, context, security_group_rule):
-        return self._create_security_group_rule(context, security_group_rule)
-
-    def _create_security_group_rule(self, context, security_group_rule,
-                                    validate=True):
-        if validate:
-            self._validate_security_group_rule(context, security_group_rule)
-            self._check_for_duplicate_rules_in_db(context, security_group_rule)
-
-        rule_dict = security_group_rule['security_group_rule']
-        kwargs = {
-            'context': context,
-            'security_group_rule': rule_dict
-        }
-        # NOTE(armax): a callback exception here will prevent the request
-        # from being processed. This is a hook point for backend's validation;
-        # we raise to propagate the reason for the failure.
-        try:
-            registry.notify(
-                resources.SECURITY_GROUP_RULE, events.BEFORE_CREATE, self,
-                **kwargs)
-        except exceptions.CallbackFailure as e:
-            raise ext_sg.SecurityGroupConflict(reason=e)
-
-        with context.session.begin(subtransactions=True):
-            db = SecurityGroupRule(
-                id=(rule_dict.get('id') or uuidutils.generate_uuid()),
-                tenant_id=rule_dict['tenant_id'],
-                security_group_id=rule_dict['security_group_id'],
-                direction=rule_dict['direction'],
-                remote_group_id=rule_dict.get('remote_group_id'),
-                ethertype=rule_dict['ethertype'],
-                protocol=rule_dict['protocol'],
-                port_range_min=rule_dict['port_range_min'],
-                port_range_max=rule_dict['port_range_max'],
-                remote_ip_prefix=rule_dict.get('remote_ip_prefix'))
-            context.session.add(db)
-        res_rule_dict = self._make_security_group_rule_dict(db)
-        kwargs['security_group_rule'] = res_rule_dict
-        registry.notify(
-            resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, self,
-            **kwargs)
-        return res_rule_dict
-
-    def _get_ip_proto_number(self, protocol):
-        if protocol is None:
-            return
-        # According to bug 1381379, protocol is always set to string to avoid
-        # problems with comparing int and string in PostgreSQL. Here this
-        # string is converted to int to give an opportunity to use it as
-        # before.
-        return int(IP_PROTOCOL_MAP.get(protocol, protocol))
-
-    def _validate_port_range(self, rule):
-        """Check that port_range is valid."""
-        if (rule['port_range_min'] is None and
-            rule['port_range_max'] is None):
-            return
-        if not rule['protocol']:
-            raise ext_sg.SecurityGroupProtocolRequiredWithPorts()
-        ip_proto = self._get_ip_proto_number(rule['protocol'])
-        if ip_proto in [constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP]:
-            if rule['port_range_min'] == 0 or rule['port_range_max'] == 0:
-                raise ext_sg.SecurityGroupInvalidPortValue(port=0)
-            elif (rule['port_range_min'] is not None and
-                rule['port_range_max'] is not None and
-                rule['port_range_min'] <= rule['port_range_max']):
-                pass
-            else:
-                raise ext_sg.SecurityGroupInvalidPortRange()
-        elif ip_proto == constants.PROTO_NUM_ICMP:
-            for attr, field in [('port_range_min', 'type'),
-                                ('port_range_max', 'code')]:
-                if rule[attr] is not None and not (0 <= rule[attr] <= 255):
-                    raise ext_sg.SecurityGroupInvalidIcmpValue(
-                        field=field, attr=attr, value=rule[attr])
-            if (rule['port_range_min'] is None and
-                    rule['port_range_max'] is not None):
-                raise ext_sg.SecurityGroupMissingIcmpType(
-                    value=rule['port_range_max'])
-
-    def _validate_ethertype_and_protocol(self, rule):
-        """Check if given ethertype and  protocol are valid or not"""
-        if rule['protocol'] == constants.PROTO_NAME_ICMP_V6:
-            if rule['ethertype'] == constants.IPv4:
-                raise ext_sg.SecurityGroupEthertypeConflictWithProtocol(
-                        ethertype=rule['ethertype'], protocol=rule['protocol'])
-
-    def _validate_single_tenant_and_group(self, security_group_rules):
-        """Check that all rules belong to the same security group and tenant
-        """
-        sg_groups = set()
-        tenants = set()
-        for rule_dict in security_group_rules['security_group_rules']:
-            rule = rule_dict['security_group_rule']
-            sg_groups.add(rule['security_group_id'])
-            if len(sg_groups) > 1:
-                raise ext_sg.SecurityGroupNotSingleGroupRules()
-
-            tenants.add(rule['tenant_id'])
-            if len(tenants) > 1:
-                raise ext_sg.SecurityGroupRulesNotSingleTenant()
-        return sg_groups.pop()
-
-    def _validate_security_group_rule(self, context, security_group_rule):
-        rule = security_group_rule['security_group_rule']
-        self._validate_port_range(rule)
-        self._validate_ip_prefix(rule)
-        self._validate_ethertype_and_protocol(rule)
-
-        if rule['remote_ip_prefix'] and rule['remote_group_id']:
-            raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix()
-
-        remote_group_id = rule['remote_group_id']
-        # Check that remote_group_id exists for tenant
-        if remote_group_id:
-            self.get_security_group(context, remote_group_id,
-                                    tenant_id=rule['tenant_id'])
-
-        security_group_id = rule['security_group_id']
-
-        # Confirm that the tenant has permission
-        # to add rules to this security group.
-        self.get_security_group(context, security_group_id,
-                                tenant_id=rule['tenant_id'])
-        return security_group_id
-
-    def _validate_security_group_rules(self, context, security_group_rules):
-        sg_id = self._validate_single_tenant_and_group(security_group_rules)
-        for rule in security_group_rules['security_group_rules']:
-            self._validate_security_group_rule(context, rule)
-        return sg_id
-
-    def _make_security_group_rule_dict(self, security_group_rule, fields=None):
-        res = {'id': security_group_rule['id'],
-               'tenant_id': security_group_rule['tenant_id'],
-               'security_group_id': security_group_rule['security_group_id'],
-               'ethertype': security_group_rule['ethertype'],
-               'direction': security_group_rule['direction'],
-               'protocol': security_group_rule['protocol'],
-               'port_range_min': security_group_rule['port_range_min'],
-               'port_range_max': security_group_rule['port_range_max'],
-               'remote_ip_prefix': security_group_rule['remote_ip_prefix'],
-               'remote_group_id': security_group_rule['remote_group_id']}
-
-        return self._fields(res, fields)
-
-    def _make_security_group_rule_filter_dict(self, security_group_rule):
-        sgr = security_group_rule['security_group_rule']
-        res = {'tenant_id': [sgr['tenant_id']],
-               'security_group_id': [sgr['security_group_id']],
-               'direction': [sgr['direction']]}
-
-        include_if_present = ['protocol', 'port_range_max', 'port_range_min',
-                              'ethertype', 'remote_ip_prefix',
-                              'remote_group_id']
-        for key in include_if_present:
-            value = sgr.get(key)
-            if value:
-                res[key] = [value]
-        return res
-
-    def _check_for_duplicate_rules(self, context, security_group_rules):
-        for i in security_group_rules:
-            found_self = False
-            for j in security_group_rules:
-                if i['security_group_rule'] == j['security_group_rule']:
-                    if found_self:
-                        raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i)
-                    found_self = True
-
-            self._check_for_duplicate_rules_in_db(context, i)
-
-    def _check_for_duplicate_rules_in_db(self, context, security_group_rule):
-        # Check in database if rule exists
-        filters = self._make_security_group_rule_filter_dict(
-            security_group_rule)
-        db_rules = self.get_security_group_rules(context, filters)
-        # Note(arosen): the call to get_security_group_rules wildcards
-        # values in the filter that have a value of [None]. For
-        # example, filters = {'remote_group_id': [None]} will return
-        # all security group rules regardless of their value of
-        # remote_group_id. Therefore it is not possible to do this
-        # query unless the behavior of _get_collection()
-        # is changed which cannot be because other methods are already
-        # relying on this behavior. Therefore, we do the filtering
-        # below to check for these corner cases.
-        for db_rule in db_rules:
-            # need to remove id from db_rule for matching
-            id = db_rule.pop('id')
-            if (security_group_rule['security_group_rule'] == db_rule):
-                raise ext_sg.SecurityGroupRuleExists(id=id)
-
-    def _validate_ip_prefix(self, rule):
-        """Check that a valid cidr was specified as remote_ip_prefix
-
-        No need to check that it is in fact an IP address as this is already
-        validated by attribute validators.
-        Check that rule ethertype is consistent with remote_ip_prefix ip type.
-        Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32).
-        """
-        input_prefix = rule['remote_ip_prefix']
-        if input_prefix:
-            addr = netaddr.IPNetwork(input_prefix)
-            # set input_prefix to always include the netmask:
-            rule['remote_ip_prefix'] = str(addr)
-            # check consistency of ethertype with addr version
-            if rule['ethertype'] != "IPv%d" % (addr.version):
-                raise ext_sg.SecurityGroupRuleParameterConflict(
-                    ethertype=rule['ethertype'], cidr=input_prefix)
-
-    def get_security_group_rules(self, context, filters=None, fields=None,
-                                 sorts=None, limit=None, marker=None,
-                                 page_reverse=False):
-        marker_obj = self._get_marker_obj(context, 'security_group_rule',
-                                          limit, marker)
-        return self._get_collection(context,
-                                    SecurityGroupRule,
-                                    self._make_security_group_rule_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts,
-                                    limit=limit, marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-
-    def get_security_group_rules_count(self, context, filters=None):
-        return self._get_collection_count(context, SecurityGroupRule,
-                                          filters=filters)
-
-    def get_security_group_rule(self, context, id, fields=None):
-        security_group_rule = self._get_security_group_rule(context, id)
-        return self._make_security_group_rule_dict(security_group_rule, fields)
-
-    def _get_security_group_rule(self, context, id):
-        try:
-            query = self._model_query(context, SecurityGroupRule)
-            sgr = query.filter(SecurityGroupRule.id == id).one()
-        except exc.NoResultFound:
-            raise ext_sg.SecurityGroupRuleNotFound(id=id)
-        return sgr
-
-    def delete_security_group_rule(self, context, id):
-        kwargs = {
-            'context': context,
-            'security_group_rule_id': id
-        }
-        # NOTE(armax): a callback exception here will prevent the request
-        # from being processed. This is a hook point for backend's validation;
-        # we raise to propagate the reason for the failure.
-        try:
-            registry.notify(
-                resources.SECURITY_GROUP_RULE, events.BEFORE_DELETE, self,
-                **kwargs)
-        except exceptions.CallbackFailure as e:
-            reason = _('cannot be deleted due to %s') % e
-            raise ext_sg.SecurityGroupRuleInUse(id=id, reason=reason)
-
-        with context.session.begin(subtransactions=True):
-            query = self._model_query(context, SecurityGroupRule).filter(
-                SecurityGroupRule.id == id)
-            try:
-                # As there is a filter on a primary key it is not possible for
-                # MultipleResultsFound to be raised
-                context.session.delete(query.one())
-            except exc.NoResultFound:
-                raise ext_sg.SecurityGroupRuleNotFound(id=id)
-
-        registry.notify(
-            resources.SECURITY_GROUP_RULE, events.AFTER_DELETE, self,
-            **kwargs)
-
-    def _extend_port_dict_security_group(self, port_res, port_db):
-        # Security group bindings will be retrieved from the SQLAlchemy
-        # model. As they're loaded eagerly with ports because of the
-        # joined load they will not cause an extra query.
-        security_group_ids = [sec_group_mapping['security_group_id'] for
-                              sec_group_mapping in port_db.security_groups]
-        port_res[ext_sg.SECURITYGROUPS] = security_group_ids
-        return port_res
-
-    # Register dict extend functions for ports
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attributes.PORTS, ['_extend_port_dict_security_group'])
-
-    def _process_port_create_security_group(self, context, port,
-                                            security_group_ids):
-        if attributes.is_attr_set(security_group_ids):
-            for security_group_id in security_group_ids:
-                self._create_port_security_group_binding(context, port['id'],
-                                                         security_group_id)
-        # Convert to list as a set might be passed here and
-        # this has to be serialized
-        port[ext_sg.SECURITYGROUPS] = (security_group_ids and
-                                       list(security_group_ids) or [])
-
-    def _ensure_default_security_group(self, context, tenant_id):
-        """Create a default security group if one doesn't exist.
-
-        :returns: the default security group id for given tenant.
-        """
-        # Make no more than two attempts
-        for attempts in (1, 2):
-            try:
-                query = self._model_query(context, DefaultSecurityGroup)
-                default_group = query.filter_by(tenant_id=tenant_id).one()
-                return default_group['security_group_id']
-            except exc.NoResultFound as ex:
-                if attempts > 1:
-                    # the second iteration means that attempt to add default
-                    # group failed with duplicate error. Since we're still
-                    # not seeing this group we're most probably inside a
-                    # transaction with REPEATABLE READ isolation level ->
-                    # need to restart the whole transaction
-                    raise db_exc.RetryRequest(ex)
-
-                security_group = {
-                    'security_group':
-                        {'name': 'default',
-                         'tenant_id': tenant_id,
-                         'description': _('Default security group')}
-                }
-                try:
-                    security_group = self.create_security_group(
-                        context, security_group, default_sg=True)
-                    return security_group['id']
-                except db_exc.DBDuplicateEntry as ex:
-                    # default security group was created concurrently
-                    LOG.debug("Duplicate default security group %s was "
-                              "not created", ex.value)
-
-    def _get_security_groups_on_port(self, context, port):
-        """Check that all security groups on port belong to tenant.
-
-        :returns: all security groups IDs on port belonging to tenant.
-        """
-        port = port['port']
-        if not attributes.is_attr_set(port.get(ext_sg.SECURITYGROUPS)):
-            return
-        if port.get('device_owner') and utils.is_port_trusted(port):
-            return
-
-        port_sg = port.get(ext_sg.SECURITYGROUPS, [])
-        filters = {'id': port_sg}
-        tenant_id = port.get('tenant_id')
-        if tenant_id:
-            filters['tenant_id'] = [tenant_id]
-        valid_groups = set(g['id'] for g in
-                           self.get_security_groups(context, fields=['id'],
-                                                    filters=filters))
-
-        requested_groups = set(port_sg)
-        port_sg_missing = requested_groups - valid_groups
-        if port_sg_missing:
-            raise ext_sg.SecurityGroupNotFound(id=', '.join(port_sg_missing))
-
-        return requested_groups
-
-    def _ensure_default_security_group_on_port(self, context, port):
-        # we don't apply security groups for dhcp, router
-        port = port['port']
-        if port.get('device_owner') and utils.is_port_trusted(port):
-            return
-        default_sg = self._ensure_default_security_group(context,
-                                                         port['tenant_id'])
-        if not attributes.is_attr_set(port.get(ext_sg.SECURITYGROUPS)):
-            port[ext_sg.SECURITYGROUPS] = [default_sg]
-
-    def _check_update_deletes_security_groups(self, port):
-        """Return True if port has as a security group and it's value
-        is either [] or not is_attr_set, otherwise return False
-        """
-        if (ext_sg.SECURITYGROUPS in port['port'] and
-            not (attributes.is_attr_set(port['port'][ext_sg.SECURITYGROUPS])
-                 and port['port'][ext_sg.SECURITYGROUPS] != [])):
-            return True
-        return False
-
-    def _check_update_has_security_groups(self, port):
-        """Return True if port has security_groups attribute set and
-        its not empty, or False otherwise.
-        This method is called both for port create and port update.
-        """
-        if (ext_sg.SECURITYGROUPS in port['port'] and
-            (attributes.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and
-             port['port'][ext_sg.SECURITYGROUPS] != [])):
-            return True
-        return False
-
-    def update_security_group_on_port(self, context, id, port,
-                                      original_port, updated_port):
-        """Update security groups on port.
-
-        This method returns a flag which indicates request notification
-        is required and does not perform notification itself.
-        It is because another changes for the port may require notification.
-        """
-        need_notify = False
-        port_updates = port['port']
-        if (ext_sg.SECURITYGROUPS in port_updates and
-            not utils.compare_elements(
-                original_port.get(ext_sg.SECURITYGROUPS),
-                port_updates[ext_sg.SECURITYGROUPS])):
-            # delete the port binding and read it with the new rules
-            port_updates[ext_sg.SECURITYGROUPS] = (
-                self._get_security_groups_on_port(context, port))
-            self._delete_port_security_group_bindings(context, id)
-            self._process_port_create_security_group(
-                context,
-                updated_port,
-                port_updates[ext_sg.SECURITYGROUPS])
-            need_notify = True
-        else:
-            updated_port[ext_sg.SECURITYGROUPS] = (
-                original_port[ext_sg.SECURITYGROUPS])
-        return need_notify
diff --git a/neutron/db/securitygroups_rpc_base.py b/neutron/db/securitygroups_rpc_base.py
deleted file mode 100644 (file)
index 7f151fe..0000000
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-from oslo_log import log as logging
-from sqlalchemy.orm import exc
-
-from neutron._i18n import _, _LW
-from neutron.common import constants as n_const
-from neutron.common import ipv6_utils as ipv6
-from neutron.common import utils
-from neutron.db import allowedaddresspairs_db as addr_pair
-from neutron.db import models_v2
-from neutron.db import securitygroups_db as sg_db
-from neutron.extensions import securitygroup as ext_sg
-
-LOG = logging.getLogger(__name__)
-
-
-DIRECTION_IP_PREFIX = {'ingress': 'source_ip_prefix',
-                       'egress': 'dest_ip_prefix'}
-
-DHCP_RULE_PORT = {4: (67, 68, n_const.IPv4), 6: (547, 546, n_const.IPv6)}
-
-
-class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin):
-    """Mixin class to add agent-based security group implementation."""
-
-    def get_port_from_device(self, context, device):
-        """Get port dict from device name on an agent.
-
-        Subclass must provide this method or get_ports_from_devices.
-
-        :param device: device name which identifies a port on the agent side.
-        What is specified in "device" depends on a plugin agent implementation.
-        For example, it is a port ID in OVS agent and netdev name in Linux
-        Bridge agent.
-        :return: port dict returned by DB plugin get_port(). In addition,
-        it must contain the following fields in the port dict returned.
-        - device
-        - security_groups
-        - security_group_rules,
-        - security_group_source_groups
-        - fixed_ips
-        """
-        raise NotImplementedError(_("%s must implement get_port_from_device "
-                                    "or get_ports_from_devices.")
-                                  % self.__class__.__name__)
-
-    def get_ports_from_devices(self, context, devices):
-        """Bulk method of get_port_from_device.
-
-        Subclasses may override this to provide better performance for DB
-        queries, backend calls, etc.
-        """
-        return [self.get_port_from_device(context, device)
-                for device in devices]
-
-    def create_security_group_rule(self, context, security_group_rule):
-        rule = super(SecurityGroupServerRpcMixin,
-                     self).create_security_group_rule(context,
-                                                      security_group_rule)
-        sgids = [rule['security_group_id']]
-        self.notifier.security_groups_rule_updated(context, sgids)
-        return rule
-
-    def create_security_group_rule_bulk(self, context, security_group_rules):
-        rules = super(SecurityGroupServerRpcMixin,
-                      self).create_security_group_rule_bulk_native(
-                          context, security_group_rules)
-        sgids = set([r['security_group_id'] for r in rules])
-        self.notifier.security_groups_rule_updated(context, list(sgids))
-        return rules
-
-    def delete_security_group_rule(self, context, sgrid):
-        rule = self.get_security_group_rule(context, sgrid)
-        super(SecurityGroupServerRpcMixin,
-              self).delete_security_group_rule(context, sgrid)
-        self.notifier.security_groups_rule_updated(context,
-                                                   [rule['security_group_id']])
-
-    def check_and_notify_security_group_member_changed(
-            self, context, original_port, updated_port):
-        sg_change = not utils.compare_elements(
-            original_port.get(ext_sg.SECURITYGROUPS),
-            updated_port.get(ext_sg.SECURITYGROUPS))
-        if sg_change:
-            self.notify_security_groups_member_updated_bulk(
-                context, [original_port, updated_port])
-        elif original_port['fixed_ips'] != updated_port['fixed_ips']:
-            self.notify_security_groups_member_updated(context, updated_port)
-
-    def is_security_group_member_updated(self, context,
-                                         original_port, updated_port):
-        """Check security group member updated or not.
-
-        This method returns a flag which indicates request notification
-        is required and does not perform notification itself.
-        It is because another changes for the port may require notification.
-        """
-        need_notify = False
-        if (original_port['fixed_ips'] != updated_port['fixed_ips'] or
-            original_port['mac_address'] != updated_port['mac_address'] or
-            not utils.compare_elements(
-                original_port.get(ext_sg.SECURITYGROUPS),
-                updated_port.get(ext_sg.SECURITYGROUPS))):
-            need_notify = True
-        return need_notify
-
-    def notify_security_groups_member_updated_bulk(self, context, ports):
-        """Notify update event of security group members for ports.
-
-        The agent setups the iptables rule to allow
-        ingress packet from the dhcp server (as a part of provider rules),
-        so we need to notify an update of dhcp server ip
-        address to the plugin agent.
-        security_groups_provider_updated() just notifies that an event
-        occurs and the plugin agent fetches the update provider
-        rule in the other RPC call (security_group_rules_for_devices).
-        """
-        sg_provider_updated_networks = set()
-        sec_groups = set()
-        for port in ports:
-            if port['device_owner'] == n_const.DEVICE_OWNER_DHCP:
-                sg_provider_updated_networks.add(
-                    port['network_id'])
-            # For IPv6, provider rule need to be updated in case router
-            # interface is created or updated after VM port is created.
-            # NOTE (Swami): ROUTER_INTERFACE_OWNERS check is required
-            # since it includes the legacy router interface device owners
-            # and DVR router interface device owners.
-            elif port['device_owner'] in n_const.ROUTER_INTERFACE_OWNERS:
-                if any(netaddr.IPAddress(fixed_ip['ip_address']).version == 6
-                       for fixed_ip in port['fixed_ips']):
-                    sg_provider_updated_networks.add(
-                        port['network_id'])
-            else:
-                sec_groups |= set(port.get(ext_sg.SECURITYGROUPS))
-
-        if sg_provider_updated_networks:
-            ports_query = context.session.query(models_v2.Port.id).filter(
-                models_v2.Port.network_id.in_(
-                    sg_provider_updated_networks)).all()
-            ports_to_update = [p.id for p in ports_query]
-            self.notifier.security_groups_provider_updated(
-                context, ports_to_update)
-        if sec_groups:
-            self.notifier.security_groups_member_updated(
-                context, list(sec_groups))
-
-    def notify_security_groups_member_updated(self, context, port):
-        self.notify_security_groups_member_updated_bulk(context, [port])
-
-    def security_group_info_for_ports(self, context, ports):
-        sg_info = {'devices': ports,
-                   'security_groups': {},
-                   'sg_member_ips': {}}
-        rules_in_db = self._select_rules_for_ports(context, ports)
-        remote_security_group_info = {}
-        for (port_id, rule_in_db) in rules_in_db:
-            remote_gid = rule_in_db.get('remote_group_id')
-            security_group_id = rule_in_db.get('security_group_id')
-            ethertype = rule_in_db['ethertype']
-            if ('security_group_source_groups'
-                not in sg_info['devices'][port_id]):
-                sg_info['devices'][port_id][
-                    'security_group_source_groups'] = []
-
-            if remote_gid:
-                if (remote_gid
-                    not in sg_info['devices'][port_id][
-                        'security_group_source_groups']):
-                    sg_info['devices'][port_id][
-                        'security_group_source_groups'].append(remote_gid)
-                if remote_gid not in remote_security_group_info:
-                    remote_security_group_info[remote_gid] = {}
-                if ethertype not in remote_security_group_info[remote_gid]:
-                    # this set will be serialized into a list by rpc code
-                    remote_security_group_info[remote_gid][ethertype] = set()
-
-            direction = rule_in_db['direction']
-            rule_dict = {
-                'direction': direction,
-                'ethertype': ethertype}
-
-            for key in ('protocol', 'port_range_min', 'port_range_max',
-                        'remote_ip_prefix', 'remote_group_id'):
-                if rule_in_db.get(key) is not None:
-                    if key == 'remote_ip_prefix':
-                        direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
-                        rule_dict[direction_ip_prefix] = rule_in_db[key]
-                        continue
-                    rule_dict[key] = rule_in_db[key]
-            if security_group_id not in sg_info['security_groups']:
-                sg_info['security_groups'][security_group_id] = []
-            if rule_dict not in sg_info['security_groups'][security_group_id]:
-                sg_info['security_groups'][security_group_id].append(
-                    rule_dict)
-        # Update the security groups info if they don't have any rules
-        sg_ids = self._select_sg_ids_for_ports(context, ports)
-        for (sg_id, ) in sg_ids:
-            if sg_id not in sg_info['security_groups']:
-                sg_info['security_groups'][sg_id] = []
-
-        sg_info['sg_member_ips'] = remote_security_group_info
-        # the provider rules do not belong to any security group, so these
-        # rules still reside in sg_info['devices'] [port_id]
-        self._apply_provider_rule(context, sg_info['devices'])
-
-        return self._get_security_group_member_ips(context, sg_info)
-
-    def _get_security_group_member_ips(self, context, sg_info):
-        ips = self._select_ips_for_remote_group(
-            context, sg_info['sg_member_ips'].keys())
-        for sg_id, member_ips in ips.items():
-            for ip in member_ips:
-                ethertype = 'IPv%d' % netaddr.IPNetwork(ip).version
-                if ethertype in sg_info['sg_member_ips'][sg_id]:
-                    sg_info['sg_member_ips'][sg_id][ethertype].add(ip)
-        return sg_info
-
-    def _select_sg_ids_for_ports(self, context, ports):
-        if not ports:
-            return []
-        sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
-        sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id
-        query = context.session.query(sg_binding_sgid)
-        query = query.filter(sg_binding_port.in_(ports.keys()))
-        return query.all()
-
-    def _select_rules_for_ports(self, context, ports):
-        if not ports:
-            return []
-        sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
-        sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id
-
-        sgr_sgid = sg_db.SecurityGroupRule.security_group_id
-
-        query = context.session.query(sg_binding_port,
-                                      sg_db.SecurityGroupRule)
-        query = query.join(sg_db.SecurityGroupRule,
-                           sgr_sgid == sg_binding_sgid)
-        query = query.filter(sg_binding_port.in_(ports.keys()))
-        return query.all()
-
-    def _select_ips_for_remote_group(self, context, remote_group_ids):
-        ips_by_group = {}
-        if not remote_group_ids:
-            return ips_by_group
-        for remote_group_id in remote_group_ids:
-            ips_by_group[remote_group_id] = set()
-
-        ip_port = models_v2.IPAllocation.port_id
-        sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
-        sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id
-
-        # Join the security group binding table directly to the IP allocation
-        # table instead of via the Port table skip an unnecessary intermediary
-        query = context.session.query(sg_binding_sgid,
-                                      models_v2.IPAllocation.ip_address,
-                                      addr_pair.AllowedAddressPair.ip_address)
-        query = query.join(models_v2.IPAllocation,
-                           ip_port == sg_binding_port)
-        # Outerjoin because address pairs may be null and we still want the
-        # IP for the port.
-        query = query.outerjoin(
-            addr_pair.AllowedAddressPair,
-            sg_binding_port == addr_pair.AllowedAddressPair.port_id)
-        query = query.filter(sg_binding_sgid.in_(remote_group_ids))
-        # Each allowed address pair IP record for a port beyond the 1st
-        # will have a duplicate regular IP in the query response since
-        # the relationship is 1-to-many. Dedup with a set
-        for security_group_id, ip_address, allowed_addr_ip in query:
-            ips_by_group[security_group_id].add(ip_address)
-            if allowed_addr_ip:
-                ips_by_group[security_group_id].add(allowed_addr_ip)
-        return ips_by_group
-
-    def _select_remote_group_ids(self, ports):
-        remote_group_ids = []
-        for port in ports.values():
-            for rule in port.get('security_group_rules'):
-                remote_group_id = rule.get('remote_group_id')
-                if remote_group_id:
-                    remote_group_ids.append(remote_group_id)
-        return remote_group_ids
-
-    def _select_network_ids(self, ports):
-        return set((port['network_id'] for port in ports.values()))
-
-    def _select_dhcp_ips_for_network_ids(self, context, network_ids):
-        if not network_ids:
-            return {}
-        query = context.session.query(models_v2.Port.mac_address,
-                                      models_v2.Port.network_id,
-                                      models_v2.IPAllocation.ip_address)
-        query = query.join(models_v2.IPAllocation)
-        query = query.filter(models_v2.Port.network_id.in_(network_ids))
-        owner = n_const.DEVICE_OWNER_DHCP
-        query = query.filter(models_v2.Port.device_owner == owner)
-        ips = {}
-
-        for network_id in network_ids:
-            ips[network_id] = []
-
-        for mac_address, network_id, ip in query:
-            if (netaddr.IPAddress(ip).version == 6
-                and not netaddr.IPAddress(ip).is_link_local()):
-                ip = str(ipv6.get_ipv6_addr_by_EUI64(n_const.IPV6_LLA_PREFIX,
-                    mac_address))
-            if ip not in ips[network_id]:
-                ips[network_id].append(ip)
-
-        return ips
-
-    def _select_ra_ips_for_network_ids(self, context, network_ids):
-        """Select IP addresses to allow sending router advertisement from.
-
-        If the OpenStack managed radvd process sends an RA, get link local
-        address of gateway and allow RA from this Link Local address.
-        The gateway port link local address will only be obtained
-        when router is created before VM instance is booted and
-        subnet is attached to router.
-
-        If OpenStack doesn't send RA, allow RA from gateway IP.
-        Currently, the gateway IP needs to be link local to be able
-        to send RA to VM.
-        """
-        if not network_ids:
-            return {}
-        ips = {}
-        for network_id in network_ids:
-            ips[network_id] = set([])
-        query = context.session.query(models_v2.Subnet)
-        subnets = query.filter(models_v2.Subnet.network_id.in_(network_ids))
-        for subnet in subnets:
-            gateway_ip = subnet['gateway_ip']
-            if subnet['ip_version'] != 6 or not gateway_ip:
-                continue
-            if not netaddr.IPAddress(gateway_ip).is_link_local():
-                if subnet['ipv6_ra_mode']:
-                    gateway_ip = self._get_lla_gateway_ip_for_subnet(context,
-                                                                     subnet)
-                else:
-                    # TODO(xuhanp):Figure out how to allow gateway IP from
-                    # existing device to be global address and figure out the
-                    # link local address by other method.
-                    continue
-            if gateway_ip:
-                ips[subnet['network_id']].add(gateway_ip)
-
-        return ips
-
-    def _get_lla_gateway_ip_for_subnet(self, context, subnet):
-        query = context.session.query(models_v2.Port.mac_address)
-        query = query.join(models_v2.IPAllocation)
-        query = query.filter(
-            models_v2.IPAllocation.subnet_id == subnet['id'])
-        query = query.filter(
-            models_v2.IPAllocation.ip_address == subnet['gateway_ip'])
-        query = query.filter(
-            models_v2.Port.device_owner.in_(n_const.ROUTER_INTERFACE_OWNERS))
-        try:
-            mac_address = query.one()[0]
-        except (exc.NoResultFound, exc.MultipleResultsFound):
-            LOG.warn(_LW('No valid gateway port on subnet %s is '
-                         'found for IPv6 RA'), subnet['id'])
-            return
-        lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
-            n_const.IPV6_LLA_PREFIX,
-            mac_address))
-        return lla_ip
-
-    def _convert_remote_group_id_to_ip_prefix(self, context, ports):
-        remote_group_ids = self._select_remote_group_ids(ports)
-        ips = self._select_ips_for_remote_group(context, remote_group_ids)
-        for port in ports.values():
-            updated_rule = []
-            for rule in port.get('security_group_rules'):
-                remote_group_id = rule.get('remote_group_id')
-                direction = rule.get('direction')
-                direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
-                if not remote_group_id:
-                    updated_rule.append(rule)
-                    continue
-
-                port['security_group_source_groups'].append(remote_group_id)
-                base_rule = rule
-                for ip in ips[remote_group_id]:
-                    if ip in port.get('fixed_ips', []):
-                        continue
-                    ip_rule = base_rule.copy()
-                    version = netaddr.IPNetwork(ip).version
-                    ethertype = 'IPv%s' % version
-                    if base_rule['ethertype'] != ethertype:
-                        continue
-                    ip_rule[direction_ip_prefix] = str(
-                        netaddr.IPNetwork(ip).cidr)
-                    updated_rule.append(ip_rule)
-            port['security_group_rules'] = updated_rule
-        return ports
-
-    def _add_ingress_dhcp_rule(self, port, ips):
-        dhcp_ips = ips.get(port['network_id'])
-        for dhcp_ip in dhcp_ips:
-            source_port, dest_port, ethertype = DHCP_RULE_PORT[
-                netaddr.IPAddress(dhcp_ip).version]
-            dhcp_rule = {'direction': 'ingress',
-                         'ethertype': ethertype,
-                         'protocol': 'udp',
-                         'port_range_min': dest_port,
-                         'port_range_max': dest_port,
-                         'source_port_range_min': source_port,
-                         'source_port_range_max': source_port,
-                         'source_ip_prefix': dhcp_ip}
-            port['security_group_rules'].append(dhcp_rule)
-
-    def _add_ingress_ra_rule(self, port, ips):
-        ra_ips = ips.get(port['network_id'])
-        for ra_ip in ra_ips:
-            ra_rule = {'direction': 'ingress',
-                       'ethertype': n_const.IPv6,
-                       'protocol': n_const.PROTO_NAME_ICMP_V6,
-                       'source_ip_prefix': ra_ip,
-                       'source_port_range_min': n_const.ICMPV6_TYPE_RA}
-            port['security_group_rules'].append(ra_rule)
-
-    def _apply_provider_rule(self, context, ports):
-        network_ids = self._select_network_ids(ports)
-        ips_dhcp = self._select_dhcp_ips_for_network_ids(context, network_ids)
-        ips_ra = self._select_ra_ips_for_network_ids(context, network_ids)
-        for port in ports.values():
-            self._add_ingress_ra_rule(port, ips_ra)
-            self._add_ingress_dhcp_rule(port, ips_dhcp)
-
-    def security_group_rules_for_ports(self, context, ports):
-        rules_in_db = self._select_rules_for_ports(context, ports)
-        for (port_id, rule_in_db) in rules_in_db:
-            port = ports[port_id]
-            direction = rule_in_db['direction']
-            rule_dict = {
-                'security_group_id': rule_in_db['security_group_id'],
-                'direction': direction,
-                'ethertype': rule_in_db['ethertype'],
-            }
-            for key in ('protocol', 'port_range_min', 'port_range_max',
-                        'remote_ip_prefix', 'remote_group_id'):
-                if rule_in_db.get(key) is not None:
-                    if key == 'remote_ip_prefix':
-                        direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
-                        rule_dict[direction_ip_prefix] = rule_in_db[key]
-                        continue
-                    rule_dict[key] = rule_in_db[key]
-            port['security_group_rules'].append(rule_dict)
-        self._apply_provider_rule(context, ports)
-        return self._convert_remote_group_id_to_ip_prefix(context, ports)
diff --git a/neutron/db/servicetype_db.py b/neutron/db/servicetype_db.py
deleted file mode 100644 (file)
index 61f2de5..0000000
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from itertools import chain
-
-from oslo_log import log as logging
-import sqlalchemy as sa
-
-from neutron.api.v2 import attributes as attr
-from neutron.db import model_base
-from neutron.services import provider_configuration as pconf
-
-LOG = logging.getLogger(__name__)
-
-
-class ProviderResourceAssociation(model_base.BASEV2):
-    provider_name = sa.Column(sa.String(attr.NAME_MAX_LEN),
-                              nullable=False, primary_key=True)
-    # should be manually deleted on resource deletion
-    resource_id = sa.Column(sa.String(36), nullable=False, primary_key=True,
-                            unique=True)
-
-
-class ServiceTypeManager(object):
-    """Manage service type objects in Neutron."""
-
-    _instance = None
-
-    @classmethod
-    def get_instance(cls):
-        if cls._instance is None:
-            cls._instance = cls()
-        return cls._instance
-
-    def __init__(self):
-        self.config = {}
-
-    def add_provider_configuration(self, service_type, configuration):
-        """Add or update the provider configuration for the service type."""
-        LOG.debug('Adding provider configuration for service %s', service_type)
-        self.config.update({service_type: configuration})
-
-    def get_service_providers(self, context, filters=None, fields=None):
-        if filters and 'service_type' in filters:
-            return list(
-                chain.from_iterable(self.config[svc_type].
-                                    get_service_providers(filters, fields)
-                    for svc_type in filters['service_type']
-                        if svc_type in self.config)
-            )
-        return list(
-            chain.from_iterable(
-                self.config[p].get_service_providers(filters, fields)
-                for p in self.config)
-        )
-
-    def get_default_service_provider(self, context, service_type):
-        """Return the default provider for a given service type."""
-        filters = {'service_type': [service_type],
-                   'default': [True]}
-        providers = self.get_service_providers(context, filters=filters)
-        # By construction we expect at most a single item in provider
-        if not providers:
-            raise pconf.DefaultServiceProviderNotFound(
-                service_type=service_type
-            )
-        return providers[0]
-
-    def add_resource_association(self, context, service_type, provider_name,
-                                 resource_id):
-        r = self.get_service_providers(context,
-            filters={'service_type': [service_type], 'name': [provider_name]})
-        if not r:
-            raise pconf.ServiceProviderNotFound(provider=provider_name,
-                                                service_type=service_type)
-
-        with context.session.begin(subtransactions=True):
-            # we don't actually need service type for association.
-            # resource_id is unique and belongs to specific service
-            # which knows its type
-            assoc = ProviderResourceAssociation(provider_name=provider_name,
-                                                resource_id=resource_id)
-            context.session.add(assoc)
-        # NOTE(blogan): the ProviderResourceAssociation relationship will not
-        # be populated if a resource was created before this.  The expire_all
-        # will force the session to go retrieve the new data when that
-        # resource will be read again.  It has been suggested that we can
-        # crawl through everything in the mapper to find the resource with
-        # the ID that matches resource_id and expire that one, but we can
-        # just start with this.
-        context.session.expire_all()
-
-    def del_resource_associations(self, context, resource_ids):
-        if not resource_ids:
-            return
-        with context.session.begin(subtransactions=True):
-            (context.session.query(ProviderResourceAssociation).
-             filter(
-                 ProviderResourceAssociation.resource_id.in_(resource_ids)).
-             delete(synchronize_session='fetch'))
diff --git a/neutron/db/sqlalchemyutils.py b/neutron/db/sqlalchemyutils.py
deleted file mode 100644 (file)
index 92c5db2..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from six import moves
-import sqlalchemy
-from sqlalchemy.orm import properties
-
-from neutron._i18n import _
-from neutron.common import exceptions as n_exc
-
-
-def paginate_query(query, model, limit, sorts, marker_obj=None):
-    """Returns a query with sorting / pagination criteria added.
-
-    Pagination works by requiring a unique sort key, specified by sorts.
-    (If sort keys is not unique, then we risk looping through values.)
-    We use the last row in the previous page as the 'marker' for pagination.
-    So we must return values that follow the passed marker in the order.
-    With a single-valued sort key, this would be easy: sort_key > X.
-    With a compound-values sort key, (k1, k2, k3) we must do this to repeat
-    the lexicographical ordering:
-    (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
-    The reason of didn't use OFFSET clause was it don't scale, please refer
-    discussion at https://lists.launchpad.net/openstack/msg02547.html
-
-    We also have to cope with different sort directions.
-
-    Typically, the id of the last row is used as the client-facing pagination
-    marker, then the actual marker object must be fetched from the db and
-    passed in to us as marker.
-
-    :param query: the query object to which we should add paging/sorting
-    :param model: the ORM model class
-    :param limit: maximum number of items to return
-    :param sorts: array of attributes and direction by which results should
-                 be sorted
-    :param marker: the last item of the previous page; we returns the next
-                    results after this value.
-    :rtype: sqlalchemy.orm.query.Query
-    :return: The query with sorting/pagination added.
-    """
-    if not sorts:
-        return query
-
-    # A primary key must be specified in sort keys
-    assert not (limit and
-                len(set(dict(sorts).keys()) &
-                    set(model.__table__.primary_key.columns.keys())) == 0)
-
-    # Add sorting
-    for sort_key, sort_direction in sorts:
-        sort_dir_func = sqlalchemy.asc if sort_direction else sqlalchemy.desc
-        try:
-            sort_key_attr = getattr(model, sort_key)
-        except AttributeError:
-            # Extension attribute doesn't support for sorting. Because it
-            # existed in attr_info, it will be caught here
-            msg = _("%s is invalid attribute for sort_key") % sort_key
-            raise n_exc.BadRequest(resource=model.__tablename__, msg=msg)
-        if isinstance(sort_key_attr.property, properties.RelationshipProperty):
-            msg = _("The attribute '%(attr)s' is reference to other "
-                    "resource, can't used by sort "
-                    "'%(resource)s'") % {'attr': sort_key,
-                                         'resource': model.__tablename__}
-            raise n_exc.BadRequest(resource=model.__tablename__, msg=msg)
-        query = query.order_by(sort_dir_func(sort_key_attr))
-
-    # Add pagination
-    if marker_obj:
-        marker_values = [getattr(marker_obj, sort[0]) for sort in sorts]
-
-        # Build up an array of sort criteria as in the docstring
-        criteria_list = []
-        for i, sort in enumerate(sorts):
-            crit_attrs = [(getattr(model, sorts[j][0]) == marker_values[j])
-                          for j in moves.range(i)]
-            model_attr = getattr(model, sort[0])
-            if sort[1]:
-                crit_attrs.append((model_attr > marker_values[i]))
-            else:
-                crit_attrs.append((model_attr < marker_values[i]))
-
-            criteria = sqlalchemy.sql.and_(*crit_attrs)
-            criteria_list.append(criteria)
-
-        f = sqlalchemy.sql.or_(*criteria_list)
-        query = query.filter(f)
-
-    if limit:
-        query = query.limit(limit)
-
-    return query
diff --git a/neutron/db/vlantransparent_db.py b/neutron/db/vlantransparent_db.py
deleted file mode 100644 (file)
index ba024fe..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) 2015 Cisco Systems, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api.v2 import attributes
-from neutron.db import db_base_plugin_v2
-from neutron.extensions import vlantransparent
-
-
-class Vlantransparent_db_mixin(object):
-    """Mixin class to add vlan transparent methods to db_base_plugin_v2."""
-
-    def _extend_network_dict_vlan_transparent(self, network_res, network_db):
-        network_res[vlantransparent.VLANTRANSPARENT] = (
-            network_db.vlan_transparent)
-        return network_res
-
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attributes.NETWORKS, ['_extend_network_dict_vlan_transparent'])
diff --git a/neutron/debug/README b/neutron/debug/README
deleted file mode 100644 (file)
index 181cb4f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-Debug Helper Script for Neutron
-
-- Configure
-export NEUTRON_TEST_CONFIG_FILE=/etc/neutron/debug.ini
-or
-export NEUTRON_TEST_CONFIG_FILE=/etc/neutron/l3_agent.ini
-
-you can also specify config file by --config-file option
-
-- Usage
-neutron-debug commands
-
-probe-create <net-id>
-  Create probe port - create port and interface, then plug it in.
-  This commands returns a port id of a probe port. A probe port is a port which is used to test.
-  The port id is probe id.
-  We can have multiple probe probes in a network, in order to check connectivity between ports.
-
-  neutron-debug probe-exec probe_id_1 'nc -l 192.168.100.3 22'
-  neutron-debug probe-exec probe_id_2 'nc -vz 192.168.100.4 22'
-
-  Note: You should use a user and a tenant who has permission to
-   modify network and subnet if you want to probe. For example, you need to be admin user if you
-   want to probe external network.
-
-probe-delete <port-id>  Delete probe - delete port then uplug
-probe-exec <port-id> 'command'    Exec commands on the namespace of the probe
-`probe-exec <port-id>` 'interactive command' Exec interactive command (eg, ssh)
-
-probe-list     List probes
-probe-clear    Clear All probes
-
-ping-all --id <network_id> --timeout 1 (optional)
-         ping-all is all-in-one command to ping all fixed ip's in all network or a specified network.
-         In the command probe is automatically created if needed.
-
-neutron-debug extends the shell of neutronclient,  so you can use all the commands of neutron
-
diff --git a/neutron/debug/__init__.py b/neutron/debug/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/debug/commands.py b/neutron/debug/commands.py
deleted file mode 100644 (file)
index 7caa353..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-# Copyright 2012,  Nachi Ueno,  NTT MCL,  Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License,  Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing,  software
-#    distributed under the License is distributed on an "AS IS" BASIS,  WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND,  either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from cliff import lister
-from neutronclient.common import utils
-from neutronclient.neutron import v2_0 as client
-from neutronclient.neutron.v2_0 import port
-
-from neutron._i18n import _, _LI
-
-
-class ProbeCommand(client.NeutronCommand):
-
-    def get_debug_agent(self):
-        return self.app.debug_agent
-
-
-class CreateProbe(ProbeCommand):
-    """Create probe port and interface, then plug it in."""
-
-    def get_parser(self, prog_name):
-        parser = super(CreateProbe, self).get_parser(prog_name)
-        parser.add_argument(
-            'id', metavar='network_id',
-            help=_('ID of network to probe'))
-        parser.add_argument(
-            '--device-owner',
-            default='network', choices=['network', 'compute'],
-            help=_('Owner type of the device: network/compute'))
-        return parser
-
-    def take_action(self, parsed_args):
-        debug_agent = self.get_debug_agent()
-        probe_port = debug_agent.create_probe(parsed_args.id,
-                                              parsed_args.device_owner)
-        self.log.info(_('Probe created : %s '), probe_port.id)
-
-
-class DeleteProbe(ProbeCommand):
-    """Delete probe - delete port then uplug."""
-
-    def get_parser(self, prog_name):
-        parser = super(DeleteProbe, self).get_parser(prog_name)
-        parser.add_argument(
-            'id', metavar='port_id',
-            help=_('ID of probe port to delete'))
-        return parser
-
-    def take_action(self, parsed_args):
-        debug_agent = self.get_debug_agent()
-        debug_agent.delete_probe(parsed_args.id)
-        self.log.info(_('Probe %s deleted'), parsed_args.id)
-
-
-class ListProbe(ProbeCommand, lister.Lister):
-    """List probes."""
-
-    _formatters = {'fixed_ips': port._format_fixed_ips, }
-
-    def take_action(self, parsed_args):
-        debug_agent = self.get_debug_agent()
-        info = debug_agent.list_probes()
-        columns = sorted(info[0].keys()) if info else []
-        return (columns, (utils.get_item_properties(
-            s, columns, formatters=self._formatters, )
-            for s in info), )
-
-
-class ClearProbe(ProbeCommand):
-    """Clear All probes."""
-
-    def take_action(self, parsed_args):
-        debug_agent = self.get_debug_agent()
-        cleared_probes_count = debug_agent.clear_probes()
-        self.log.info(_LI('%d probe(s) deleted'), cleared_probes_count)
-
-
-class ExecProbe(ProbeCommand):
-    """Exec commands on the namespace of the probe."""
-
-    def get_parser(self, prog_name):
-        parser = super(ExecProbe, self).get_parser(prog_name)
-        parser.add_argument(
-            'id', metavar='port_id',
-            help=_('ID of probe port to execute command'))
-        parser.add_argument(
-            'command', metavar='command',
-            nargs='?',
-            default=None,
-            help=_('Command to execute'))
-        return parser
-
-    def take_action(self, parsed_args):
-        debug_agent = self.get_debug_agent()
-        result = debug_agent.exec_command(parsed_args.id, parsed_args.command)
-        self.app.stdout.write(result + '\n')
-
-
-class PingAll(ProbeCommand):
-    """Ping all fixed_ip."""
-
-    def get_parser(self, prog_name):
-        parser = super(PingAll, self).get_parser(prog_name)
-        parser.add_argument(
-            '--timeout', metavar='<timeout>',
-            default=10,
-            help=_('Ping timeout'))
-        parser.add_argument(
-            '--id', metavar='network_id',
-            default=None,
-            help=_('ID of network'))
-        return parser
-
-    def take_action(self, parsed_args):
-        debug_agent = self.get_debug_agent()
-        result = debug_agent.ping_all(parsed_args.id,
-                                      timeout=parsed_args.timeout)
-        self.app.stdout.write(result + '\n')
diff --git a/neutron/debug/debug_agent.py b/neutron/debug/debug_agent.py
deleted file mode 100644 (file)
index d1f069c..0000000
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright 2012,  Nachi Ueno,  NTT MCL,  Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License,  Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing,  software
-#    distributed under the License is distributed on an "AS IS" BASIS,  WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND,  either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import shlex
-import socket
-
-import netaddr
-from oslo_log import log as logging
-
-from neutron._i18n import _LW
-from neutron.agent.linux import dhcp
-from neutron.agent.linux import ip_lib
-from neutron.common import constants
-from neutron.extensions import portbindings
-
-LOG = logging.getLogger(__name__)
-
-DEVICE_OWNER_NETWORK_PROBE = 'network:probe'
-
-DEVICE_OWNER_COMPUTE_PROBE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'probe'
-
-
-class NeutronDebugAgent(object):
-
-    def __init__(self, conf, client, driver):
-        self.conf = conf
-        self.client = client
-        self.driver = driver
-
-    def _get_namespace(self, port):
-        return "qprobe-%s" % port.id
-
-    def create_probe(self, network_id, device_owner='network'):
-        network = self._get_network(network_id)
-        bridge = None
-        if network.external:
-            bridge = self.conf.external_network_bridge
-
-        port = self._create_port(network, device_owner)
-        interface_name = self.driver.get_device_name(port)
-        namespace = self._get_namespace(port)
-
-        if ip_lib.device_exists(interface_name, namespace=namespace):
-            LOG.debug('Reusing existing device: %s.', interface_name)
-        else:
-            self.driver.plug(network.id,
-                             port.id,
-                             interface_name,
-                             port.mac_address,
-                             bridge=bridge,
-                             namespace=namespace)
-        ip_cidrs = []
-        for fixed_ip in port.fixed_ips:
-            subnet = fixed_ip.subnet
-            net = netaddr.IPNetwork(subnet.cidr)
-            ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
-            ip_cidrs.append(ip_cidr)
-        self.driver.init_l3(interface_name, ip_cidrs, namespace=namespace)
-        return port
-
-    def _get_subnet(self, subnet_id):
-        subnet_dict = self.client.show_subnet(subnet_id)['subnet']
-        return dhcp.DictModel(subnet_dict)
-
-    def _get_network(self, network_id):
-        network_dict = self.client.show_network(network_id)['network']
-        network = dhcp.DictModel(network_dict)
-        network.external = network_dict.get('router:external')
-        obj_subnet = [self._get_subnet(s_id) for s_id in network.subnets]
-        network.subnets = obj_subnet
-        return network
-
-    def clear_probes(self):
-        """Returns number of deleted probes"""
-        ports = self.client.list_ports(
-            device_id=socket.gethostname(),
-            device_owner=[DEVICE_OWNER_NETWORK_PROBE,
-                          DEVICE_OWNER_COMPUTE_PROBE])
-        info = ports['ports']
-        for port in info:
-            self.delete_probe(port['id'])
-        return len(info)
-
-    def delete_probe(self, port_id):
-        port = dhcp.DictModel(self.client.show_port(port_id)['port'])
-        network = self._get_network(port.network_id)
-        bridge = None
-        if network.external:
-            bridge = self.conf.external_network_bridge
-        ip = ip_lib.IPWrapper()
-        namespace = self._get_namespace(port)
-        if ip.netns.exists(namespace):
-            self.driver.unplug(self.driver.get_device_name(port),
-                               bridge=bridge,
-                               namespace=namespace)
-            try:
-                ip.netns.delete(namespace)
-            except Exception:
-                LOG.warn(_LW('Failed to delete namespace %s'), namespace)
-        else:
-            self.driver.unplug(self.driver.get_device_name(port),
-                               bridge=bridge)
-        self.client.delete_port(port.id)
-
-    def list_probes(self):
-        ports = self.client.list_ports(
-            device_owner=[DEVICE_OWNER_NETWORK_PROBE,
-                          DEVICE_OWNER_COMPUTE_PROBE])
-        info = ports['ports']
-        for port in info:
-            port['device_name'] = self.driver.get_device_name(
-                dhcp.DictModel(port))
-        return info
-
-    def exec_command(self, port_id, command=None):
-        port = dhcp.DictModel(self.client.show_port(port_id)['port'])
-        ip = ip_lib.IPWrapper()
-        namespace = self._get_namespace(port)
-        if not command:
-            return "sudo ip netns exec %s" % self._get_namespace(port)
-        namespace = ip.ensure_namespace(namespace)
-        return namespace.netns.execute(shlex.split(command))
-
-    def ensure_probe(self, network_id):
-        ports = self.client.list_ports(network_id=network_id,
-                                       device_id=socket.gethostname(),
-                                       device_owner=DEVICE_OWNER_NETWORK_PROBE)
-        info = ports.get('ports', [])
-        if info:
-            return dhcp.DictModel(info[0])
-        else:
-            return self.create_probe(network_id)
-
-    def ping_all(self, network_id=None, timeout=1):
-        if network_id:
-            ports = self.client.list_ports(network_id=network_id)['ports']
-        else:
-            ports = self.client.list_ports()['ports']
-        result = ""
-        for port in ports:
-            probe = self.ensure_probe(port['network_id'])
-            if port['device_owner'] == DEVICE_OWNER_NETWORK_PROBE:
-                continue
-            for fixed_ip in port['fixed_ips']:
-                address = fixed_ip['ip_address']
-                subnet = self._get_subnet(fixed_ip['subnet_id'])
-                if subnet.ip_version == 4:
-                    ping_command = 'ping'
-                else:
-                    ping_command = 'ping6'
-                result += self.exec_command(probe.id,
-                                            '%s -c 1 -w %s %s' % (ping_command,
-                                                                  timeout,
-                                                                  address))
-        return result
-
-    def _create_port(self, network, device_owner):
-        host = self.conf.host
-        body = {'port': {'admin_state_up': True,
-                         'network_id': network.id,
-                         'device_id': '%s' % socket.gethostname(),
-                         'device_owner': '%s:probe' % device_owner,
-                         'tenant_id': network.tenant_id,
-                         portbindings.HOST_ID: host,
-                         'fixed_ips': [dict(subnet_id=s.id)
-                                       for s in network.subnets]}}
-        port_dict = self.client.create_port(body)['port']
-        port = dhcp.DictModel(port_dict)
-        port.network = network
-        for fixed_ip in port.fixed_ips:
-            fixed_ip.subnet = self._get_subnet(fixed_ip.subnet_id)
-        return port
diff --git a/neutron/debug/shell.py b/neutron/debug/shell.py
deleted file mode 100644 (file)
index 6a00071..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2012,  Nachi Ueno,  NTT MCL,  Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License,  Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing,  software
-#    distributed under the License is distributed on an "AS IS" BASIS,  WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND,  either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-from oslo_config import cfg
-from oslo_utils import importutils
-
-from neutron._i18n import _
-from neutron.agent.common import config
-from neutron.agent.common import utils
-from neutron.agent.linux import interface
-from neutron.debug import debug_agent
-from neutronclient.common import exceptions as exc
-from neutronclient import shell
-
-COMMAND_V2 = {
-    'probe-create': importutils.import_class(
-        'neutron.debug.commands.CreateProbe'),
-    'probe-delete': importutils.import_class(
-        'neutron.debug.commands.DeleteProbe'),
-    'probe-list': importutils.import_class(
-        'neutron.debug.commands.ListProbe'),
-    'probe-clear': importutils.import_class(
-        'neutron.debug.commands.ClearProbe'),
-    'probe-exec': importutils.import_class(
-        'neutron.debug.commands.ExecProbe'),
-    'ping-all': importutils.import_class(
-        'neutron.debug.commands.PingAll'),
-    #TODO(nati)  ping, netcat , nmap, bench
-}
-COMMANDS = {'2.0': COMMAND_V2}
-
-
-class NeutronDebugShell(shell.NeutronShell):
-    def __init__(self, api_version):
-        super(NeutronDebugShell, self).__init__(api_version)
-        for k, v in COMMANDS[api_version].items():
-            self.command_manager.add_command(k, v)
-
-    def build_option_parser(self, description, version):
-        parser = super(NeutronDebugShell, self).build_option_parser(
-            description, version)
-        default = (
-            shell.env('NEUTRON_TEST_CONFIG_FILE') or
-            shell.env('QUANTUM_TEST_CONFIG_FILE')
-        )
-        parser.add_argument(
-            '--config-file',
-            default=default,
-            help=_('Config file for interface driver '
-                   '(You may also use l3_agent.ini)'))
-        return parser
-
-    def initialize_app(self, argv):
-        super(NeutronDebugShell, self).initialize_app(argv)
-        if not self.options.config_file:
-            raise exc.CommandError(
-                _("You must provide a config file for bridge -"
-                  " either --config-file or env[NEUTRON_TEST_CONFIG_FILE]"))
-        client = self.client_manager.neutron
-        cfg.CONF.register_opts(interface.OPTS)
-        cfg.CONF.register_opts(config.EXT_NET_BRIDGE_OPTS)
-        config.register_interface_driver_opts_helper(cfg.CONF)
-        cfg.CONF(['--config-file', self.options.config_file])
-        config.setup_logging()
-        driver = utils.load_interface_driver(cfg.CONF)
-        self.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF,
-                                                         client,
-                                                         driver)
-
-
-def main(argv=None):
-    return NeutronDebugShell(shell.NEUTRON_API_VERSION).run(
-        argv or sys.argv[1:])
diff --git a/neutron/extensions/__init__.py b/neutron/extensions/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/extensions/address_scope.py b/neutron/extensions/address_scope.py
deleted file mode 100644 (file)
index 2fe7514..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright (c) 2015 Huawei Technologies Co.,LTD.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.api.v2 import base
-from neutron.common import exceptions as nexception
-from neutron import manager
-import six
-
-ADDRESS_SCOPE = 'address_scope'
-ADDRESS_SCOPES = '%ss' % ADDRESS_SCOPE
-ADDRESS_SCOPE_ID = 'address_scope_id'
-
-# Attribute Map
-RESOURCE_ATTRIBUTE_MAP = {
-    ADDRESS_SCOPES: {
-        'id': {'allow_post': False,
-               'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True,
-               'primary_key': True},
-        'name': {'allow_post': True,
-                 'allow_put': True,
-                 'default': '',
-                 'validate': {'type:string': attr.NAME_MAX_LEN},
-                 'is_visible': True},
-        'tenant_id': {'allow_post': True,
-                      'allow_put': False,
-                      'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
-                      'required_by_policy': True,
-                      'is_visible': True},
-        attr.SHARED: {'allow_post': True,
-                      'allow_put': True,
-                      'default': False,
-                      'convert_to': attr.convert_to_boolean,
-                      'is_visible': True,
-                      'required_by_policy': True,
-                      'enforce_policy': True},
-        'ip_version': {'allow_post': True, 'allow_put': False,
-                       'convert_to': attr.convert_to_int,
-                       'validate': {'type:values': [4, 6]},
-                       'is_visible': True},
-    },
-    attr.SUBNETPOOLS: {
-        ADDRESS_SCOPE_ID: {'allow_post': True,
-                           'allow_put': True,
-                           'default': attr.ATTR_NOT_SPECIFIED,
-                           'validate': {'type:uuid_or_none': None},
-                           'is_visible': True}
-    }
-}
-
-
-class AddressScopeNotFound(nexception.NotFound):
-    message = _("Address scope %(address_scope_id)s could not be found")
-
-
-class AddressScopeInUse(nexception.InUse):
-    message = _("Unable to complete operation on "
-                "address scope %(address_scope_id)s. There are one or more"
-                " subnet pools in use on the address scope")
-
-
-class AddressScopeUpdateError(nexception.BadRequest):
-    message = _("Unable to update address scope %(address_scope_id)s : "
-                "%(reason)s")
-
-
-class Address_scope(extensions.ExtensionDescriptor):
-    """Extension class supporting Address Scopes."""
-
-    @classmethod
-    def get_name(cls):
-        return "Address scope"
-
-    @classmethod
-    def get_alias(cls):
-        return "address-scope"
-
-    @classmethod
-    def get_description(cls):
-        return "Address scopes extension."
-
-    @classmethod
-    def get_updated(cls):
-        return "2015-07-26T10:00:00-00:00"
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()]
-        attr.PLURALS.update(dict(my_plurals))
-        plugin = manager.NeutronManager.get_plugin()
-        collection_name = ADDRESS_SCOPES.replace('_', '-')
-        params = RESOURCE_ATTRIBUTE_MAP.get(ADDRESS_SCOPES, dict())
-        controller = base.create_resource(collection_name,
-                                          ADDRESS_SCOPE,
-                                          plugin, params, allow_bulk=True,
-                                          allow_pagination=True,
-                                          allow_sorting=True)
-
-        ex = extensions.ResourceExtension(collection_name, controller,
-                                          attr_map=params)
-        return [ex]
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return RESOURCE_ATTRIBUTE_MAP
-        else:
-            return {}
-
-
-@six.add_metaclass(abc.ABCMeta)
-class AddressScopePluginBase(object):
-
-    @abc.abstractmethod
-    def create_address_scope(self, context, address_scope):
-        pass
-
-    @abc.abstractmethod
-    def update_address_scope(self, context, id, address_scope):
-        pass
-
-    @abc.abstractmethod
-    def get_address_scope(self, context, id, fields=None):
-        pass
-
-    @abc.abstractmethod
-    def get_address_scopes(self, context, filters=None, fields=None,
-                           sorts=None, limit=None, marker=None,
-                           page_reverse=False):
-        pass
-
-    @abc.abstractmethod
-    def delete_address_scope(self, context, id):
-        pass
-
-    def get_address_scopes_count(self, context, filters=None):
-        raise NotImplementedError()
diff --git a/neutron/extensions/agent.py b/neutron/extensions/agent.py
deleted file mode 100644 (file)
index a5f1ea4..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import abc
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.api.v2 import base
-from neutron.common import exceptions
-from neutron import manager
-
-
-# Attribute Map
-RESOURCE_NAME = 'agent'
-RESOURCE_ATTRIBUTE_MAP = {
-    RESOURCE_NAME + 's': {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True},
-        'agent_type': {'allow_post': False, 'allow_put': False,
-                       'is_visible': True},
-        'binary': {'allow_post': False, 'allow_put': False,
-                   'is_visible': True},
-        'topic': {'allow_post': False, 'allow_put': False,
-                  'is_visible': True},
-        'host': {'allow_post': False, 'allow_put': False,
-                 'is_visible': True},
-        'admin_state_up': {'allow_post': False, 'allow_put': True,
-                           'convert_to': attr.convert_to_boolean,
-                           'is_visible': True},
-        'created_at': {'allow_post': False, 'allow_put': False,
-                       'is_visible': True},
-        'started_at': {'allow_post': False, 'allow_put': False,
-                       'is_visible': True},
-        'heartbeat_timestamp': {'allow_post': False, 'allow_put': False,
-                                'is_visible': True},
-        'alive': {'allow_post': False, 'allow_put': False,
-                  'is_visible': True},
-        'configurations': {'allow_post': False, 'allow_put': False,
-                           'is_visible': True},
-        'description': {'allow_post': False, 'allow_put': True,
-                        'is_visible': True,
-                        'validate': {
-                            'type:string_or_none': attr.DESCRIPTION_MAX_LEN}},
-    },
-}
-
-
-class AgentNotFound(exceptions.NotFound):
-    message = _("Agent %(id)s could not be found")
-
-
-class AgentNotFoundByTypeHost(exceptions.NotFound):
-    message = _("Agent with agent_type=%(agent_type)s and host=%(host)s "
-                "could not be found")
-
-
-class MultipleAgentFoundByTypeHost(exceptions.Conflict):
-    message = _("Multiple agents with agent_type=%(agent_type)s and "
-                "host=%(host)s found")
-
-
-class Agent(extensions.ExtensionDescriptor):
-    """Agent management extension."""
-
-    @classmethod
-    def get_name(cls):
-        return "agent"
-
-    @classmethod
-    def get_alias(cls):
-        return "agent"
-
-    @classmethod
-    def get_description(cls):
-        return "The agent management extension."
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-02-03T10:00:00-00:00"
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()]
-        attr.PLURALS.update(dict(my_plurals))
-        plugin = manager.NeutronManager.get_plugin()
-        params = RESOURCE_ATTRIBUTE_MAP.get(RESOURCE_NAME + 's')
-        controller = base.create_resource(RESOURCE_NAME + 's',
-                                          RESOURCE_NAME,
-                                          plugin, params
-                                          )
-
-        ex = extensions.ResourceExtension(RESOURCE_NAME + 's',
-                                          controller)
-
-        return [ex]
-
-    def update_attributes_map(self, attributes):
-        super(Agent, self).update_attributes_map(
-            attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return RESOURCE_ATTRIBUTE_MAP
-        else:
-            return {}
-
-
-class AgentPluginBase(object):
-    """REST API to operate the Agent.
-
-    All of method must be in an admin context.
-    """
-
-    def create_agent(self, context, agent):
-        """Create agent.
-
-        This operation is not allow in REST API.
-        @raise exceptions.BadRequest:
-        """
-        raise exceptions.BadRequest()
-
-    @abc.abstractmethod
-    def delete_agent(self, context, id):
-        """Delete agent.
-
-        Agents register themselves on reporting state.
-        But if an agent does not report its status
-        for a long time (for example, it is dead forever. ),
-        admin can remove it. Agents must be disabled before
-        being removed.
-        """
-        pass
-
-    @abc.abstractmethod
-    def update_agent(self, context, agent):
-        """Disable or Enable the agent.
-
-        Discription also can be updated. Some agents cannot be disabled, such
-        as plugins, services. An error code should be reported in this case.
-        @raise exceptions.BadRequest:
-        """
-        pass
-
-    @abc.abstractmethod
-    def get_agents(self, context, filters=None, fields=None):
-        pass
-
-    @abc.abstractmethod
-    def get_agent(self, context, id, fields=None):
-        pass
diff --git a/neutron/extensions/allowedaddresspairs.py b/neutron/extensions/allowedaddresspairs.py
deleted file mode 100644 (file)
index 18884b3..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import webob.exc
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.common import exceptions as nexception
-from oslo_config import cfg
-
-allowed_address_pair_opts = [
-    #TODO(limao): use quota framework when it support quota for attributes
-    cfg.IntOpt('max_allowed_address_pair', default=10,
-               help=_("Maximum number of allowed address pairs")),
-]
-
-cfg.CONF.register_opts(allowed_address_pair_opts)
-
-
-class AllowedAddressPairsMissingIP(nexception.InvalidInput):
-    message = _("AllowedAddressPair must contain ip_address")
-
-
-class AddressPairAndPortSecurityRequired(nexception.Conflict):
-    message = _("Port Security must be enabled in order to have allowed "
-                "address pairs on a port.")
-
-
-class DuplicateAddressPairInRequest(nexception.InvalidInput):
-    message = _("Request contains duplicate address pair: "
-                "mac_address %(mac_address)s ip_address %(ip_address)s.")
-
-
-class AllowedAddressPairExhausted(nexception.BadRequest):
-    message = _("The number of allowed address pair "
-                "exceeds the maximum %(quota)s.")
-
-
-def _validate_allowed_address_pairs(address_pairs, valid_values=None):
-    unique_check = {}
-    try:
-        if len(address_pairs) > cfg.CONF.max_allowed_address_pair:
-            raise AllowedAddressPairExhausted(
-                quota=cfg.CONF.max_allowed_address_pair)
-    except TypeError:
-        raise webob.exc.HTTPBadRequest(
-            _("Allowed address pairs must be a list."))
-
-    for address_pair in address_pairs:
-        # mac_address is optional, if not set we use the mac on the port
-        if 'mac_address' in address_pair:
-            msg = attr._validate_mac_address(address_pair['mac_address'])
-            if msg:
-                raise webob.exc.HTTPBadRequest(msg)
-        if 'ip_address' not in address_pair:
-            raise AllowedAddressPairsMissingIP()
-
-        mac = address_pair.get('mac_address')
-        ip_address = address_pair['ip_address']
-        if (mac, ip_address) not in unique_check:
-            unique_check[(mac, ip_address)] = None
-        else:
-            raise DuplicateAddressPairInRequest(mac_address=mac,
-                                                ip_address=ip_address)
-
-        invalid_attrs = set(address_pair.keys()) - set(['mac_address',
-                                                        'ip_address'])
-        if invalid_attrs:
-            msg = (_("Unrecognized attribute(s) '%s'") %
-                   ', '.join(set(address_pair.keys()) -
-                             set(['mac_address', 'ip_address'])))
-            raise webob.exc.HTTPBadRequest(msg)
-
-        if '/' in ip_address:
-            msg = attr._validate_subnet(ip_address)
-        else:
-            msg = attr._validate_ip_address(ip_address)
-        if msg:
-            raise webob.exc.HTTPBadRequest(msg)
-
-attr.validators['type:validate_allowed_address_pairs'] = (
-    _validate_allowed_address_pairs)
-
-ADDRESS_PAIRS = 'allowed_address_pairs'
-EXTENDED_ATTRIBUTES_2_0 = {
-    'ports': {
-        ADDRESS_PAIRS: {'allow_post': True, 'allow_put': True,
-                        'convert_list_to':
-                        attr.convert_kvp_list_to_dict,
-                        'validate': {'type:validate_allowed_address_pairs':
-                                     None},
-                        'enforce_policy': True,
-                        'default': attr.ATTR_NOT_SPECIFIED,
-                        'is_visible': True},
-    }
-}
-
-
-class Allowedaddresspairs(extensions.ExtensionDescriptor):
-    """Extension class supporting allowed address pairs."""
-
-    @classmethod
-    def get_name(cls):
-        return "Allowed Address Pairs"
-
-    @classmethod
-    def get_alias(cls):
-        return "allowed-address-pairs"
-
-    @classmethod
-    def get_description(cls):
-        return "Provides allowed address pairs"
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-07-23T10:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            attr.PLURALS.update({'allowed_address_pairs':
-                                 'allowed_address_pair'})
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/availability_zone.py b/neutron/extensions/availability_zone.py
deleted file mode 100644 (file)
index ec94dc4..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import abc
-
-from oslo_serialization import jsonutils
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.api.v2 import base
-from neutron.common import exceptions
-from neutron import manager
-
-
-AZ_HINTS_DB_LEN = 255
-
-
-# resource independent common methods
-def convert_az_list_to_string(az_list):
-    return jsonutils.dumps(az_list)
-
-
-def convert_az_string_to_list(az_string):
-    return jsonutils.loads(az_string) if az_string else []
-
-
-def _validate_availability_zone_hints(data, valid_value=None):
-    # syntax check only here. existence of az will be checked later.
-    msg = attr.validate_list_of_unique_strings(data)
-    if msg:
-        return msg
-    az_string = convert_az_list_to_string(data)
-    if len(az_string) > AZ_HINTS_DB_LEN:
-        msg = _("Too many availability_zone_hints specified")
-        raise exceptions.InvalidInput(error_message=msg)
-
-
-attr.validators['type:availability_zone_hints'] = (
-    _validate_availability_zone_hints)
-
-# Attribute Map
-RESOURCE_NAME = 'availability_zone'
-AVAILABILITY_ZONES = 'availability_zones'
-AZ_HINTS = 'availability_zone_hints'
-# name: name of availability zone (string)
-# resource: type of resource: 'network' or 'router'
-# state: state of availability zone: 'available' or 'unavailable'
-# It means whether users can use the availability zone.
-RESOURCE_ATTRIBUTE_MAP = {
-    AVAILABILITY_ZONES: {
-        'name': {'is_visible': True},
-        'resource': {'is_visible': True},
-        'state': {'is_visible': True}
-    }
-}
-
-EXTENDED_ATTRIBUTES_2_0 = {
-    'agents': {
-        RESOURCE_NAME: {'allow_post': False, 'allow_put': False,
-                        'is_visible': True}
-    }
-}
-
-
-class AvailabilityZoneNotFound(exceptions.NotFound):
-    message = _("AvailabilityZone %(availability_zone)s could not be found.")
-
-
-class Availability_zone(extensions.ExtensionDescriptor):
-    """Availability zone extension."""
-
-    @classmethod
-    def get_name(cls):
-        return "Availability Zone"
-
-    @classmethod
-    def get_alias(cls):
-        return "availability_zone"
-
-    @classmethod
-    def get_description(cls):
-        return "The availability zone extension."
-
-    @classmethod
-    def get_updated(cls):
-        return "2015-01-01T10:00:00-00:00"
-
-    def get_required_extensions(self):
-        return ["agent"]
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()]
-        attr.PLURALS.update(dict(my_plurals))
-        plugin = manager.NeutronManager.get_plugin()
-        params = RESOURCE_ATTRIBUTE_MAP.get(AVAILABILITY_ZONES)
-        controller = base.create_resource(AVAILABILITY_ZONES,
-                                          RESOURCE_NAME, plugin, params)
-
-        ex = extensions.ResourceExtension(AVAILABILITY_ZONES, controller)
-
-        return [ex]
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) +
-                        list(RESOURCE_ATTRIBUTE_MAP.items()))
-        else:
-            return {}
-
-
-class AvailabilityZonePluginBase(object):
-    """REST API to operate the Availability Zone."""
-
-    @abc.abstractmethod
-    def get_availability_zones(self, context, filters=None, fields=None,
-                               sorts=None, limit=None, marker=None,
-                               page_reverse=False):
-        """Return availability zones which a resource belongs to"""
-
-    @abc.abstractmethod
-    def validate_availability_zones(self, context, resource_type,
-                                    availability_zones):
-        """Verify that the availability zones exist."""
diff --git a/neutron/extensions/dhcpagentscheduler.py b/neutron/extensions/dhcpagentscheduler.py
deleted file mode 100644 (file)
index ea636e6..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import base
-from neutron.api.v2 import resource
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.common import rpc as n_rpc
-from neutron.extensions import agent
-from neutron import manager
-from neutron import policy
-from neutron import wsgi
-
-DHCP_NET = 'dhcp-network'
-DHCP_NETS = DHCP_NET + 's'
-DHCP_AGENT = 'dhcp-agent'
-DHCP_AGENTS = DHCP_AGENT + 's'
-
-
-class NetworkSchedulerController(wsgi.Controller):
-    def index(self, request, **kwargs):
-        plugin = manager.NeutronManager.get_plugin()
-        policy.enforce(request.context,
-                       "get_%s" % DHCP_NETS,
-                       {})
-        return plugin.list_networks_on_dhcp_agent(
-            request.context, kwargs['agent_id'])
-
-    def create(self, request, body, **kwargs):
-        plugin = manager.NeutronManager.get_plugin()
-        policy.enforce(request.context,
-                       "create_%s" % DHCP_NET,
-                       {})
-        agent_id = kwargs['agent_id']
-        network_id = body['network_id']
-        result = plugin.add_network_to_dhcp_agent(request.context, agent_id,
-                                                  network_id)
-        notify(request.context, 'dhcp_agent.network.add', network_id, agent_id)
-        return result
-
-    def delete(self, request, id, **kwargs):
-        plugin = manager.NeutronManager.get_plugin()
-        policy.enforce(request.context,
-                       "delete_%s" % DHCP_NET,
-                       {})
-        agent_id = kwargs['agent_id']
-        result = plugin.remove_network_from_dhcp_agent(request.context,
-                                                       agent_id, id)
-        notify(request.context, 'dhcp_agent.network.remove', id, agent_id)
-        return result
-
-
-class DhcpAgentsHostingNetworkController(wsgi.Controller):
-    def index(self, request, **kwargs):
-        plugin = manager.NeutronManager.get_plugin()
-        policy.enforce(request.context,
-                       "get_%s" % DHCP_AGENTS,
-                       {})
-        return plugin.list_dhcp_agents_hosting_network(
-            request.context, kwargs['network_id'])
-
-
-class Dhcpagentscheduler(extensions.ExtensionDescriptor):
-    """Extension class supporting dhcp agent scheduler.
-    """
-
-    @classmethod
-    def get_name(cls):
-        return "DHCP Agent Scheduler"
-
-    @classmethod
-    def get_alias(cls):
-        return constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS
-
-    @classmethod
-    def get_description(cls):
-        return "Schedule networks among dhcp agents"
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-02-07T10:00:00-00:00"
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        exts = []
-        parent = dict(member_name="agent",
-                      collection_name="agents")
-        controller = resource.Resource(NetworkSchedulerController(),
-                                       base.FAULT_MAP)
-        exts.append(extensions.ResourceExtension(
-            DHCP_NETS, controller, parent))
-
-        parent = dict(member_name="network",
-                      collection_name="networks")
-
-        controller = resource.Resource(DhcpAgentsHostingNetworkController(),
-                                       base.FAULT_MAP)
-        exts.append(extensions.ResourceExtension(
-            DHCP_AGENTS, controller, parent))
-        return exts
-
-    def get_extended_resources(self, version):
-        return {}
-
-
-class InvalidDHCPAgent(agent.AgentNotFound):
-    message = _("Agent %(id)s is not a valid DHCP Agent or has been disabled")
-
-
-class NetworkHostedByDHCPAgent(exceptions.Conflict):
-    message = _("The network %(network_id)s has been already hosted"
-                " by the DHCP Agent %(agent_id)s.")
-
-
-class NetworkNotHostedByDhcpAgent(exceptions.Conflict):
-    message = _("The network %(network_id)s is not hosted"
-                " by the DHCP agent %(agent_id)s.")
-
-
-class DhcpAgentSchedulerPluginBase(object):
-    """REST API to operate the DHCP agent scheduler.
-
-    All of method must be in an admin context.
-    """
-
-    @abc.abstractmethod
-    def add_network_to_dhcp_agent(self, context, id, network_id):
-        pass
-
-    @abc.abstractmethod
-    def remove_network_from_dhcp_agent(self, context, id, network_id):
-        pass
-
-    @abc.abstractmethod
-    def list_networks_on_dhcp_agent(self, context, id):
-        pass
-
-    @abc.abstractmethod
-    def list_dhcp_agents_hosting_network(self, context, network_id):
-        pass
-
-
-def notify(context, action, network_id, agent_id):
-    info = {'id': agent_id, 'network_id': network_id}
-    notifier = n_rpc.get_notifier('network')
-    notifier.info(context, action, {'agent': info})
diff --git a/neutron/extensions/dns.py b/neutron/extensions/dns.py
deleted file mode 100644 (file)
index 93e1ffb..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-# Copyright (c) 2015 Rackspace
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-import six
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.common import exceptions as n_exc
-
-DNS_LABEL_MAX_LEN = 63
-DNS_LABEL_REGEX = "[a-z0-9-]{1,%d}$" % DNS_LABEL_MAX_LEN
-FQDN_MAX_LEN = 255
-DNS_DOMAIN_DEFAULT = 'openstacklocal.'
-
-
-def _validate_dns_name(data, max_len=FQDN_MAX_LEN):
-    msg = _validate_dns_format(data, max_len)
-    if msg:
-        return msg
-    request_dns_name = _get_request_dns_name(data)
-    if request_dns_name:
-        msg = _validate_dns_name_with_dns_domain(request_dns_name)
-        if msg:
-            return msg
-
-
-def _validate_dns_format(data, max_len=FQDN_MAX_LEN):
-    # NOTE: An individual name regex instead of an entire FQDN was used
-    # because its easier to make correct. The logic should validate that the
-    # dns_name matches RFC 1123 (section 2.1) and RFC 952.
-    if not data:
-        return
-    try:
-        # Trailing periods are allowed to indicate that a name is fully
-        # qualified per RFC 1034 (page 7).
-        trimmed = data if not data.endswith('.') else data[:-1]
-        if len(trimmed) > 255:
-            raise TypeError(
-                _("'%s' exceeds the 255 character FQDN limit") % trimmed)
-        names = trimmed.split('.')
-        for name in names:
-            if not name:
-                raise TypeError(_("Encountered an empty component."))
-            if name.endswith('-') or name[0] == '-':
-                raise TypeError(
-                    _("Name '%s' must not start or end with a hyphen.") % name)
-            if not re.match(DNS_LABEL_REGEX, name):
-                raise TypeError(
-                    _("Name '%s' must be 1-63 characters long, each of "
-                      "which can only be alphanumeric or a hyphen.") % name)
-        # RFC 1123 hints that a TLD can't be all numeric. last is a TLD if
-        # it's an FQDN.
-        if len(names) > 1 and re.match("^[0-9]+$", names[-1]):
-            raise TypeError(_("TLD '%s' must not be all numeric") % names[-1])
-    except TypeError as e:
-        msg = _("'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s") % {
-            'data': data, 'reason': str(e)}
-        return msg
-
-
-def _validate_dns_name_with_dns_domain(request_dns_name):
-    # If a PQDN was passed, make sure the FQDN that will be generated is of
-    # legal size
-    dns_domain = _get_dns_domain()
-    higher_labels = dns_domain
-    if dns_domain:
-        higher_labels = '.%s' % dns_domain
-    higher_labels_len = len(higher_labels)
-    dns_name_len = len(request_dns_name)
-    if not request_dns_name.endswith('.'):
-        if dns_name_len + higher_labels_len > FQDN_MAX_LEN:
-            msg = _("The dns_name passed is a PQDN and its size is "
-                    "'%(dns_name_len)s'. The dns_domain option in "
-                    "neutron.conf is set to %(dns_domain)s, with a "
-                    "length of '%(higher_labels_len)s'. When the two are "
-                    "concatenated to form a FQDN (with a '.' at the end), "
-                    "the resulting length exceeds the maximum size "
-                    "of '%(fqdn_max_len)s'"
-                    ) % {'dns_name_len': dns_name_len,
-                         'dns_domain': cfg.CONF.dns_domain,
-                         'higher_labels_len': higher_labels_len,
-                         'fqdn_max_len': FQDN_MAX_LEN}
-            return msg
-        return
-
-    # A FQDN was passed
-    if (dns_name_len <= higher_labels_len or not
-        request_dns_name.endswith(higher_labels)):
-        msg = _("The dns_name passed is a FQDN. Its higher level labels "
-                "must be equal to the dns_domain option in neutron.conf, "
-                "that has been set to '%(dns_domain)s'. It must also "
-                "include one or more valid DNS labels to the left "
-                "of '%(dns_domain)s'") % {'dns_domain':
-                                          cfg.CONF.dns_domain}
-        return msg
-
-
-def _get_dns_domain():
-    if not cfg.CONF.dns_domain:
-        return ''
-    if cfg.CONF.dns_domain.endswith('.'):
-        return cfg.CONF.dns_domain
-    return '%s.' % cfg.CONF.dns_domain
-
-
-def _get_request_dns_name(data):
-    dns_domain = _get_dns_domain()
-    if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)):
-        return data
-    return ''
-
-
-def convert_to_lowercase(data):
-    if isinstance(data, six.string_types):
-        return data.lower()
-    msg = _("'%s' cannot be converted to lowercase string") % data
-    raise n_exc.InvalidInput(error_message=msg)
-
-
-attr.validators['type:dns_name'] = (
-    _validate_dns_name)
-
-
-DNSNAME = 'dns_name'
-DNSASSIGNMENT = 'dns_assignment'
-EXTENDED_ATTRIBUTES_2_0 = {
-    'ports': {
-        DNSNAME: {'allow_post': True, 'allow_put': True,
-                  'default': '',
-                  'convert_to': convert_to_lowercase,
-                  'validate': {'type:dns_name': FQDN_MAX_LEN},
-                  'is_visible': True},
-        DNSASSIGNMENT: {'allow_post': False, 'allow_put': False,
-                        'is_visible': True},
-    }
-}
-
-
-class Dns(extensions.ExtensionDescriptor):
-    """Extension class supporting DNS Integration."""
-
-    @classmethod
-    def get_name(cls):
-        return "DNS Integration"
-
-    @classmethod
-    def get_alias(cls):
-        return "dns-integration"
-
-    @classmethod
-    def get_description(cls):
-        return "Provides integration with internal DNS."
-
-    @classmethod
-    def get_updated(cls):
-        return "2015-08-15T18:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/dvr.py b/neutron/extensions/dvr.py
deleted file mode 100644 (file)
index 28b549b..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-import six
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions
-
-DISTRIBUTED = 'distributed'
-EXTENDED_ATTRIBUTES_2_0 = {
-    'routers': {
-        DISTRIBUTED: {'allow_post': True,
-                      'allow_put': True,
-                      'is_visible': True,
-                      'default': attributes.ATTR_NOT_SPECIFIED,
-                      'convert_to': attributes.convert_to_boolean_if_not_none,
-                      'enforce_policy': True},
-    }
-}
-
-
-class DVRMacAddressNotFound(exceptions.NotFound):
-    message = _("Distributed Virtual Router Mac Address for "
-                "host %(host)s does not exist.")
-
-
-class MacAddressGenerationFailure(exceptions.ServiceUnavailable):
-    message = _("Unable to generate unique DVR mac for host %(host)s.")
-
-
-class Dvr(extensions.ExtensionDescriptor):
-    """Extension class supporting distributed virtual router."""
-
-    @classmethod
-    def get_name(cls):
-        return "Distributed Virtual Router"
-
-    @classmethod
-    def get_alias(cls):
-        return constants.L3_DISTRIBUTED_EXT_ALIAS
-
-    @classmethod
-    def get_description(cls):
-        return "Enables configuration of Distributed Virtual Routers."
-
-    @classmethod
-    def get_updated(cls):
-        return "2014-06-1T10:00:00-00:00"
-
-    def get_required_extensions(self):
-        return ["router"]
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        return []
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
-
-
-@six.add_metaclass(abc.ABCMeta)
-class DVRMacAddressPluginBase(object):
-
-    @abc.abstractmethod
-    def get_dvr_mac_address_list(self, context):
-        pass
-
-    @abc.abstractmethod
-    def get_dvr_mac_address_by_host(self, context, host):
-        pass
diff --git a/neutron/extensions/external_net.py b/neutron/extensions/external_net.py
deleted file mode 100644 (file)
index ebaab5d..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.common import exceptions as nexception
-
-
-class ExternalNetworkInUse(nexception.InUse):
-    message = _("External network %(net_id)s cannot be updated to be made "
-                "non-external, since it has existing gateway ports")
-
-
-# For backward compatibility the 'router' prefix is kept.
-EXTERNAL = 'router:external'
-EXTENDED_ATTRIBUTES_2_0 = {
-    'networks': {EXTERNAL: {'allow_post': True,
-                            'allow_put': True,
-                            'default': False,
-                            'is_visible': True,
-                            'convert_to': attr.convert_to_boolean,
-                            'enforce_policy': True,
-                            'required_by_policy': True}}}
-
-
-class External_net(extensions.ExtensionDescriptor):
-
-    @classmethod
-    def get_name(cls):
-        return "Neutron external network"
-
-    @classmethod
-    def get_alias(cls):
-        return "external-net"
-
-    @classmethod
-    def get_description(cls):
-        return _("Adds external network attribute to network resource.")
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-01-14T10:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/extra_dhcp_opt.py b/neutron/extensions/extra_dhcp_opt.py
deleted file mode 100644 (file)
index 1ad943d..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.common import exceptions
-
-
-# ExtraDHcpOpts Exceptions
-class ExtraDhcpOptNotFound(exceptions.NotFound):
-    message = _("ExtraDhcpOpt %(id)s could not be found")
-
-
-class ExtraDhcpOptBadData(exceptions.InvalidInput):
-    message = _("Invalid data format for extra-dhcp-opt: %(data)s")
-
-
-# Valid blank extra dhcp opts
-VALID_BLANK_EXTRA_DHCP_OPTS = ('router', 'classless-static-route')
-
-# Common definitions for maximum string field length
-DHCP_OPT_NAME_MAX_LEN = 64
-DHCP_OPT_VALUE_MAX_LEN = 255
-
-EXTRA_DHCP_OPT_KEY_SPECS = {
-    'id': {'type:uuid': None, 'required': False},
-    'opt_name': {'type:not_empty_string': DHCP_OPT_NAME_MAX_LEN,
-                 'required': True},
-    'opt_value': {'type:not_empty_string_or_none': DHCP_OPT_VALUE_MAX_LEN,
-                  'required': True},
-    'ip_version': {'convert_to': attr.convert_to_int,
-                   'type:values': [4, 6],
-                   'required': False}
-}
-
-
-def _validate_extra_dhcp_opt(data, key_specs=None):
-    if data is not None:
-        if not isinstance(data, list):
-            raise ExtraDhcpOptBadData(data=data)
-        for d in data:
-            if d['opt_name'] in VALID_BLANK_EXTRA_DHCP_OPTS:
-                msg = attr._validate_string_or_none(d['opt_value'],
-                                                    DHCP_OPT_VALUE_MAX_LEN)
-            else:
-                msg = attr._validate_dict(d, key_specs)
-            if msg:
-                raise ExtraDhcpOptBadData(data=msg)
-
-
-attr.validators['type:list_of_extra_dhcp_opts'] = _validate_extra_dhcp_opt
-
-# Attribute Map
-EXTRADHCPOPTS = 'extra_dhcp_opts'
-
-CLIENT_ID = "client-id"
-
-EXTENDED_ATTRIBUTES_2_0 = {
-    'ports': {
-        EXTRADHCPOPTS: {
-            'allow_post': True,
-            'allow_put': True,
-            'is_visible': True,
-            'default': None,
-            'validate': {
-                'type:list_of_extra_dhcp_opts': EXTRA_DHCP_OPT_KEY_SPECS
-            }
-        }
-    }
-}
-
-
-class Extra_dhcp_opt(extensions.ExtensionDescriptor):
-    @classmethod
-    def get_name(cls):
-        return "Neutron Extra DHCP opts"
-
-    @classmethod
-    def get_alias(cls):
-        return "extra_dhcp_opt"
-
-    @classmethod
-    def get_description(cls):
-        return ("Extra options configuration for DHCP. "
-                "For example PXE boot options to DHCP clients can "
-                "be specified (e.g. tftp-server, server-ip-address, "
-                "bootfile-name)")
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-03-17T12:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/extraroute.py b/neutron/extensions/extraroute.py
deleted file mode 100644 (file)
index 4c2e96e..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.common import exceptions as nexception
-
-
-# Extra Routes Exceptions
-class InvalidRoutes(nexception.InvalidInput):
-    message = _("Invalid format for routes: %(routes)s, %(reason)s")
-
-
-class RouterInterfaceInUseByRoute(nexception.InUse):
-    message = _("Router interface for subnet %(subnet_id)s on router "
-                "%(router_id)s cannot be deleted, as it is required "
-                "by one or more routes.")
-
-
-class RoutesExhausted(nexception.BadRequest):
-    message = _("Unable to complete operation for %(router_id)s. "
-                "The number of routes exceeds the maximum %(quota)s.")
-
-# Attribute Map
-EXTENDED_ATTRIBUTES_2_0 = {
-    'routers': {
-        'routes': {'allow_post': False, 'allow_put': True,
-                   'validate': {'type:hostroutes': None},
-                   'convert_to': attr.convert_none_to_empty_list,
-                   'is_visible': True, 'default': attr.ATTR_NOT_SPECIFIED},
-    }
-}
-
-
-class Extraroute(extensions.ExtensionDescriptor):
-
-    @classmethod
-    def get_name(cls):
-        return "Neutron Extra Route"
-
-    @classmethod
-    def get_alias(cls):
-        return "extraroute"
-
-    @classmethod
-    def get_description(cls):
-        return "Extra routes configuration for L3 router"
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-02-01T10:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            attr.PLURALS.update({'routes': 'route'})
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/flavors.py b/neutron/extensions/flavors.py
deleted file mode 100644 (file)
index a8167b3..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.api.v2 import base
-from neutron.api.v2 import resource_helper
-from neutron.common import exceptions as nexception
-from neutron import manager
-from neutron.plugins.common import constants
-
-
-# Flavor Exceptions
-class FlavorNotFound(nexception.NotFound):
-    message = _("Flavor %(flavor_id)s could not be found.")
-
-
-class FlavorInUse(nexception.InUse):
-    message = _("Flavor %(flavor_id)s is used by some service instance.")
-
-
-class ServiceProfileNotFound(nexception.NotFound):
-    message = _("Service Profile %(sp_id)s could not be found.")
-
-
-class ServiceProfileInUse(nexception.InUse):
-    message = _("Service Profile %(sp_id)s is used by some service instance.")
-
-
-class FlavorServiceProfileBindingExists(nexception.Conflict):
-    message = _("Service Profile %(sp_id)s is already associated "
-                "with flavor %(fl_id)s.")
-
-
-class FlavorServiceProfileBindingNotFound(nexception.NotFound):
-    message = _("Service Profile %(sp_id)s is not associated "
-                "with flavor %(fl_id)s.")
-
-
-class ServiceProfileDriverNotFound(nexception.NotFound):
-    message = _("Service Profile driver %(driver)s could not be found.")
-
-
-class ServiceProfileEmpty(nexception.InvalidInput):
-    message = _("Service Profile needs either a driver or metainfo.")
-
-
-class FlavorDisabled(nexception.ServiceUnavailable):
-    message = _("Flavor is not enabled.")
-
-
-class ServiceProfileDisabled(nexception.ServiceUnavailable):
-    message = _("Service Profile is not enabled.")
-
-
-class InvalidFlavorServiceType(nexception.InvalidInput):
-    message = _("Invalid service type %(service_type)s.")
-
-
-def _validate_flavor_service_type(validate_type, valid_values=None):
-    """Ensure requested flavor service type plugin is loaded."""
-    plugins = manager.NeutronManager.get_service_plugins()
-    if validate_type not in plugins:
-        raise InvalidFlavorServiceType(service_type=validate_type)
-
-attr.validators['type:validate_flavor_service_type'] = (
-    _validate_flavor_service_type)
-
-FLAVORS = 'flavors'
-SERVICE_PROFILES = 'service_profiles'
-FLAVORS_PREFIX = ""
-
-RESOURCE_ATTRIBUTE_MAP = {
-    FLAVORS: {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True,
-               'primary_key': True},
-        'name': {'allow_post': True, 'allow_put': True,
-                 'validate': {'type:string': attr.NAME_MAX_LEN},
-                 'is_visible': True, 'default': ''},
-        'description': {'allow_post': True, 'allow_put': True,
-                        'validate': {'type:string_or_none':
-                                     attr.LONG_DESCRIPTION_MAX_LEN},
-                        'is_visible': True, 'default': ''},
-        'service_type': {'allow_post': True, 'allow_put': False,
-                         'validate':
-                         {'type:validate_flavor_service_type': None},
-                         'is_visible': True},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'required_by_policy': True,
-                      'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
-                      'is_visible': True},
-        'service_profiles': {'allow_post': True, 'allow_put': True,
-                             'validate': {'type:uuid_list': None},
-                             'is_visible': True, 'default': []},
-        'enabled': {'allow_post': True, 'allow_put': True,
-                    'convert_to': attr.convert_to_boolean_if_not_none,
-                    'default': True,
-                    'is_visible': True},
-    },
-    SERVICE_PROFILES: {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True,
-               'primary_key': True},
-        'description': {'allow_post': True, 'allow_put': True,
-                        'validate': {'type:string_or_none':
-                                     attr.LONG_DESCRIPTION_MAX_LEN},
-                        'is_visible': True, 'default': ''},
-        'driver': {'allow_post': True, 'allow_put': True,
-                   'validate': {'type:string':
-                                attr.LONG_DESCRIPTION_MAX_LEN},
-                   'is_visible': True,
-                   'default': ''},
-        'metainfo': {'allow_post': True, 'allow_put': True,
-                     'is_visible': True,
-                     'default': ''},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'required_by_policy': True,
-                      'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
-                      'is_visible': True},
-        'enabled': {'allow_post': True, 'allow_put': True,
-                    'convert_to': attr.convert_to_boolean_if_not_none,
-                    'is_visible': True, 'default': True},
-    },
-}
-
-
-SUB_RESOURCE_ATTRIBUTE_MAP = {
-    'next_providers': {
-        'parent': {'collection_name': 'flavors',
-                   'member_name': 'flavor'},
-        'parameters': {'provider': {'allow_post': False,
-                                    'allow_put': False,
-                                    'is_visible': True},
-                       'driver': {'allow_post': False,
-                                  'allow_put': False,
-                                  'is_visible': True},
-                       'metainfo': {'allow_post': False,
-                                    'allow_put': False,
-                                    'is_visible': True},
-                       'tenant_id': {'allow_post': True, 'allow_put': False,
-                                     'required_by_policy': True,
-                                     'validate': {'type:string':
-                                                  attr.TENANT_ID_MAX_LEN},
-                                     'is_visible': True}}
-    },
-    'service_profiles': {
-        'parent': {'collection_name': 'flavors',
-                   'member_name': 'flavor'},
-        'parameters': {'id': {'allow_post': True, 'allow_put': False,
-                              'validate': {'type:uuid': None},
-                              'is_visible': True},
-                       'tenant_id': {'allow_post': True, 'allow_put': False,
-                                     'required_by_policy': True,
-                                     'validate': {'type:string':
-                                                  attr.TENANT_ID_MAX_LEN},
-                                     'is_visible': True}}
-    }
-}
-
-
-class Flavors(extensions.ExtensionDescriptor):
-
-    @classmethod
-    def get_name(cls):
-        return "Neutron Service Flavors"
-
-    @classmethod
-    def get_alias(cls):
-        return "flavors"
-
-    @classmethod
-    def get_description(cls):
-        return "Flavor specification for Neutron advanced services"
-
-    @classmethod
-    def get_updated(cls):
-        return "2015-09-17T10:00:00-00:00"
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        plural_mappings = resource_helper.build_plural_mappings(
-            {}, RESOURCE_ATTRIBUTE_MAP)
-        attr.PLURALS.update(plural_mappings)
-        resources = resource_helper.build_resource_info(
-            plural_mappings,
-            RESOURCE_ATTRIBUTE_MAP,
-            constants.FLAVORS)
-        plugin = manager.NeutronManager.get_service_plugins()[
-            constants.FLAVORS]
-        for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP:
-            # Special handling needed for sub-resources with 'y' ending
-            # (e.g. proxies -> proxy)
-            resource_name = collection_name[:-1]
-            parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent')
-            params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get(
-                'parameters')
-
-            controller = base.create_resource(collection_name, resource_name,
-                                              plugin, params,
-                                              allow_bulk=True,
-                                              parent=parent)
-
-            resource = extensions.ResourceExtension(
-                collection_name,
-                controller, parent,
-                path_prefix=FLAVORS_PREFIX,
-                attr_map=params)
-            resources.append(resource)
-
-        return resources
-
-    def update_attributes_map(self, attributes):
-        super(Flavors, self).update_attributes_map(
-            attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return RESOURCE_ATTRIBUTE_MAP
-        else:
-            return {}
diff --git a/neutron/extensions/l3.py b/neutron/extensions/l3.py
deleted file mode 100644 (file)
index 1c4f0ff..0000000
+++ /dev/null
@@ -1,271 +0,0 @@
-# Copyright 2012 VMware, Inc.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.api.v2 import resource_helper
-from neutron.common import exceptions as nexception
-from neutron.plugins.common import constants
-
-
-# L3 Exceptions
-class RouterNotFound(nexception.NotFound):
-    message = _("Router %(router_id)s could not be found")
-
-
-class RouterInUse(nexception.InUse):
-    message = _("Router %(router_id)s %(reason)s")
-
-    def __init__(self, **kwargs):
-        if 'reason' not in kwargs:
-            kwargs['reason'] = "still has ports"
-        super(RouterInUse, self).__init__(**kwargs)
-
-
-class RouterInterfaceNotFound(nexception.NotFound):
-    message = _("Router %(router_id)s does not have "
-                "an interface with id %(port_id)s")
-
-
-class RouterInterfaceNotFoundForSubnet(nexception.NotFound):
-    message = _("Router %(router_id)s has no interface "
-                "on subnet %(subnet_id)s")
-
-
-class RouterInterfaceInUseByFloatingIP(nexception.InUse):
-    message = _("Router interface for subnet %(subnet_id)s on router "
-                "%(router_id)s cannot be deleted, as it is required "
-                "by one or more floating IPs.")
-
-
-class FloatingIPNotFound(nexception.NotFound):
-    message = _("Floating IP %(floatingip_id)s could not be found")
-
-
-class ExternalGatewayForFloatingIPNotFound(nexception.NotFound):
-    message = _("External network %(external_network_id)s is not reachable "
-                "from subnet %(subnet_id)s.  Therefore, cannot associate "
-                "Port %(port_id)s with a Floating IP.")
-
-
-class FloatingIPPortAlreadyAssociated(nexception.InUse):
-    message = _("Cannot associate floating IP %(floating_ip_address)s "
-                "(%(fip_id)s) with port %(port_id)s "
-                "using fixed IP %(fixed_ip)s, as that fixed IP already "
-                "has a floating IP on external network %(net_id)s.")
-
-
-class RouterExternalGatewayInUseByFloatingIp(nexception.InUse):
-    message = _("Gateway cannot be updated for router %(router_id)s, since a "
-                "gateway to external network %(net_id)s is required by one or "
-                "more floating IPs.")
-
-ROUTERS = 'routers'
-EXTERNAL_GW_INFO = 'external_gateway_info'
-FLOATINGIPS = 'floatingips'
-
-RESOURCE_ATTRIBUTE_MAP = {
-    ROUTERS: {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True,
-               'primary_key': True},
-        'name': {'allow_post': True, 'allow_put': True,
-                 'validate': {'type:string': attr.NAME_MAX_LEN},
-                 'is_visible': True, 'default': ''},
-        'admin_state_up': {'allow_post': True, 'allow_put': True,
-                           'default': True,
-                           'convert_to': attr.convert_to_boolean,
-                           'is_visible': True},
-        'status': {'allow_post': False, 'allow_put': False,
-                   'is_visible': True},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'required_by_policy': True,
-                      'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
-                      'is_visible': True},
-        EXTERNAL_GW_INFO: {'allow_post': True, 'allow_put': True,
-                           'is_visible': True, 'default': None,
-                           'enforce_policy': True,
-                           'validate': {
-                               'type:dict_or_nodata': {
-                                   'network_id': {'type:uuid': None,
-                                                  'required': True},
-                                   'external_fixed_ips': {
-                                       'convert_list_to':
-                                       attr.convert_kvp_list_to_dict,
-                                       'type:fixed_ips': None,
-                                       'default': None,
-                                       'required': False,
-                                   }
-                               }
-                           }}
-    },
-    FLOATINGIPS: {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True,
-               'primary_key': True},
-        'floating_ip_address': {'allow_post': True, 'allow_put': False,
-                                'validate': {'type:ip_address_or_none': None},
-                                'is_visible': True, 'default': None,
-                                'enforce_policy': True},
-        'subnet_id': {'allow_post': True, 'allow_put': False,
-                      'validate': {'type:uuid_or_none': None},
-                      'is_visible': False,  # Use False for input only attr
-                      'default': None},
-        'floating_network_id': {'allow_post': True, 'allow_put': False,
-                                'validate': {'type:uuid': None},
-                                'is_visible': True},
-        'router_id': {'allow_post': False, 'allow_put': False,
-                      'validate': {'type:uuid_or_none': None},
-                      'is_visible': True, 'default': None},
-        'port_id': {'allow_post': True, 'allow_put': True,
-                    'validate': {'type:uuid_or_none': None},
-                    'is_visible': True, 'default': None,
-                    'required_by_policy': True},
-        'fixed_ip_address': {'allow_post': True, 'allow_put': True,
-                             'validate': {'type:ip_address_or_none': None},
-                             'is_visible': True, 'default': None},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'required_by_policy': True,
-                      'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
-                      'is_visible': True},
-        'status': {'allow_post': False, 'allow_put': False,
-                   'is_visible': True},
-    },
-}
-
-l3_quota_opts = [
-    cfg.IntOpt('quota_router',
-               default=10,
-               help=_('Number of routers allowed per tenant. '
-                      'A negative value means unlimited.')),
-    cfg.IntOpt('quota_floatingip',
-               default=50,
-               help=_('Number of floating IPs allowed per tenant. '
-                      'A negative value means unlimited.')),
-]
-cfg.CONF.register_opts(l3_quota_opts, 'QUOTAS')
-
-
-class L3(extensions.ExtensionDescriptor):
-
-    @classmethod
-    def get_name(cls):
-        return "Neutron L3 Router"
-
-    @classmethod
-    def get_alias(cls):
-        return "router"
-
-    @classmethod
-    def get_description(cls):
-        return ("Router abstraction for basic L3 forwarding"
-                " between L2 Neutron networks and access to external"
-                " networks via a NAT gateway.")
-
-    @classmethod
-    def get_updated(cls):
-        return "2012-07-20T10:00:00-00:00"
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        plural_mappings = resource_helper.build_plural_mappings(
-            {}, RESOURCE_ATTRIBUTE_MAP)
-        plural_mappings['external_fixed_ips'] = 'external_fixed_ip'
-        attr.PLURALS.update(plural_mappings)
-        action_map = {'router': {'add_router_interface': 'PUT',
-                                 'remove_router_interface': 'PUT'}}
-        return resource_helper.build_resource_info(plural_mappings,
-                                                   RESOURCE_ATTRIBUTE_MAP,
-                                                   constants.L3_ROUTER_NAT,
-                                                   action_map=action_map,
-                                                   register_quota=True)
-
-    def update_attributes_map(self, attributes):
-        super(L3, self).update_attributes_map(
-            attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return RESOURCE_ATTRIBUTE_MAP
-        else:
-            return {}
-
-
-class RouterPluginBase(object):
-
-    @abc.abstractmethod
-    def create_router(self, context, router):
-        pass
-
-    @abc.abstractmethod
-    def update_router(self, context, id, router):
-        pass
-
-    @abc.abstractmethod
-    def get_router(self, context, id, fields=None):
-        pass
-
-    @abc.abstractmethod
-    def delete_router(self, context, id):
-        pass
-
-    @abc.abstractmethod
-    def get_routers(self, context, filters=None, fields=None,
-                    sorts=None, limit=None, marker=None, page_reverse=False):
-        pass
-
-    @abc.abstractmethod
-    def add_router_interface(self, context, router_id, interface_info):
-        pass
-
-    @abc.abstractmethod
-    def remove_router_interface(self, context, router_id, interface_info):
-        pass
-
-    @abc.abstractmethod
-    def create_floatingip(self, context, floatingip):
-        pass
-
-    @abc.abstractmethod
-    def update_floatingip(self, context, id, floatingip):
-        pass
-
-    @abc.abstractmethod
-    def get_floatingip(self, context, id, fields=None):
-        pass
-
-    @abc.abstractmethod
-    def delete_floatingip(self, context, id):
-        pass
-
-    @abc.abstractmethod
-    def get_floatingips(self, context, filters=None, fields=None,
-                        sorts=None, limit=None, marker=None,
-                        page_reverse=False):
-        pass
-
-    def get_routers_count(self, context, filters=None):
-        raise NotImplementedError()
-
-    def get_floatingips_count(self, context, filters=None):
-        raise NotImplementedError()
diff --git a/neutron/extensions/l3_ext_gw_mode.py b/neutron/extensions/l3_ext_gw_mode.py
deleted file mode 100644 (file)
index a9726a1..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2013 VMware, Inc.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attrs
-from neutron.extensions import l3
-
-
-EXTENDED_ATTRIBUTES_2_0 = {
-    'routers': {l3.EXTERNAL_GW_INFO:
-                {'allow_post': True,
-                 'allow_put': True,
-                 'is_visible': True,
-                 'default': None,
-                 'enforce_policy': True,
-                 'validate':
-                 {'type:dict_or_nodata':
-                  {'network_id': {'type:uuid': None, 'required': True},
-                   'enable_snat': {'type:boolean': None, 'required': False,
-                                   'convert_to': attrs.convert_to_boolean},
-                   'external_fixed_ips': {
-                       'convert_list_to': attrs.convert_kvp_list_to_dict,
-                       'validate': {'type:fixed_ips': None},
-                       'default': None,
-                       'required': False}
-                   }
-                  }}}}
-
-
-class L3_ext_gw_mode(extensions.ExtensionDescriptor):
-
-    @classmethod
-    def get_name(cls):
-        return "Neutron L3 Configurable external gateway mode"
-
-    @classmethod
-    def get_alias(cls):
-        return "ext-gw-mode"
-
-    @classmethod
-    def get_description(cls):
-        return ("Extension of the router abstraction for specifying whether "
-                "SNAT should occur on the external gateway")
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-03-28T10:00:00-00:00"
-
-    def get_required_extensions(self):
-        return ["router"]
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return dict(EXTENDED_ATTRIBUTES_2_0.items())
-        else:
-            return {}
diff --git a/neutron/extensions/l3_ext_ha_mode.py b/neutron/extensions/l3_ext_ha_mode.py
deleted file mode 100644 (file)
index 48d24cd..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions
-
-HA_INFO = 'ha'
-EXTENDED_ATTRIBUTES_2_0 = {
-    'routers': {
-        HA_INFO: {'allow_post': True, 'allow_put': True,
-                  'default': attributes.ATTR_NOT_SPECIFIED, 'is_visible': True,
-                  'enforce_policy': True,
-                  'convert_to': attributes.convert_to_boolean_if_not_none}
-    }
-}
-
-
-class DistributedHARouterNotSupported(exceptions.BadRequest):
-    message = _("Currently distributed HA routers are "
-                "not supported.")
-
-
-class MaxVRIDAllocationTriesReached(exceptions.NeutronException):
-    message = _("Failed to allocate a VRID in the network %(network_id)s "
-                "for the router %(router_id)s after %(max_tries)s tries.")
-
-
-class NoVRIDAvailable(exceptions.Conflict):
-    message = _("No more Virtual Router Identifier (VRID) available when "
-                "creating router %(router_id)s. The limit of number "
-                "of HA Routers per tenant is 254.")
-
-
-class HANetworkCIDRNotValid(exceptions.NeutronException):
-    message = _("The HA Network CIDR specified in the configuration file "
-                "isn't valid; %(cidr)s.")
-
-
-class HANotEnoughAvailableAgents(exceptions.NeutronException):
-    message = _("Not enough l3 agents available to ensure HA. Minimum "
-                "required %(min_agents)s, available %(num_agents)s.")
-
-
-class HAMaximumAgentsNumberNotValid(exceptions.NeutronException):
-    message = _("max_l3_agents_per_router %(max_agents)s config parameter "
-                "is not valid. It has to be greater than or equal to "
-                "min_l3_agents_per_router %(min_agents)s.")
-
-
-class HAMinimumAgentsNumberNotValid(exceptions.NeutronException):
-    message = (_("min_l3_agents_per_router config parameter is not valid. "
-                 "It has to be equal to or more than %s for HA.") %
-               constants.MINIMUM_AGENTS_FOR_HA)
-
-
-class L3_ext_ha_mode(extensions.ExtensionDescriptor):
-    """Extension class supporting virtual router in HA mode."""
-
-    @classmethod
-    def get_name(cls):
-        return "HA Router extension"
-
-    @classmethod
-    def get_alias(cls):
-        return constants.L3_HA_MODE_EXT_ALIAS
-
-    @classmethod
-    def get_description(cls):
-        return "Add HA capability to routers."
-
-    @classmethod
-    def get_updated(cls):
-        return "2014-04-26T00:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/l3agentscheduler.py b/neutron/extensions/l3agentscheduler.py
deleted file mode 100644 (file)
index 39b6cd7..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-from oslo_log import log as logging
-import webob.exc
-
-from neutron._i18n import _, _LE
-from neutron.api import extensions
-from neutron.api.v2 import base
-from neutron.api.v2 import resource
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.common import rpc as n_rpc
-from neutron.extensions import agent
-from neutron import manager
-from neutron.plugins.common import constants as service_constants
-from neutron import policy
-from neutron import wsgi
-
-
-LOG = logging.getLogger(__name__)
-
-
-L3_ROUTER = 'l3-router'
-L3_ROUTERS = L3_ROUTER + 's'
-L3_AGENT = 'l3-agent'
-L3_AGENTS = L3_AGENT + 's'
-
-
-class RouterSchedulerController(wsgi.Controller):
-    def get_plugin(self):
-        plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        if not plugin:
-            LOG.error(_LE('No plugin for L3 routing registered to handle '
-                          'router scheduling'))
-            msg = _('The resource could not be found.')
-            raise webob.exc.HTTPNotFound(msg)
-        return plugin
-
-    def index(self, request, **kwargs):
-        plugin = self.get_plugin()
-        policy.enforce(request.context,
-                       "get_%s" % L3_ROUTERS,
-                       {})
-        return plugin.list_routers_on_l3_agent(
-            request.context, kwargs['agent_id'])
-
-    def create(self, request, body, **kwargs):
-        plugin = self.get_plugin()
-        policy.enforce(request.context,
-                       "create_%s" % L3_ROUTER,
-                       {})
-        agent_id = kwargs['agent_id']
-        router_id = body['router_id']
-        result = plugin.add_router_to_l3_agent(request.context, agent_id,
-                                               router_id)
-        notify(request.context, 'l3_agent.router.add', router_id, agent_id)
-        return result
-
-    def delete(self, request, id, **kwargs):
-        plugin = self.get_plugin()
-        policy.enforce(request.context,
-                       "delete_%s" % L3_ROUTER,
-                       {})
-        agent_id = kwargs['agent_id']
-        result = plugin.remove_router_from_l3_agent(request.context, agent_id,
-                                                    id)
-        notify(request.context, 'l3_agent.router.remove', id, agent_id)
-        return result
-
-
-class L3AgentsHostingRouterController(wsgi.Controller):
-    def get_plugin(self):
-        plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        if not plugin:
-            LOG.error(_LE('No plugin for L3 routing registered to handle '
-                          'router scheduling'))
-            msg = _('The resource could not be found.')
-            raise webob.exc.HTTPNotFound(msg)
-        return plugin
-
-    def index(self, request, **kwargs):
-        plugin = self.get_plugin()
-        policy.enforce(request.context,
-                       "get_%s" % L3_AGENTS,
-                       {})
-        return plugin.list_l3_agents_hosting_router(
-            request.context, kwargs['router_id'])
-
-
-class L3agentscheduler(extensions.ExtensionDescriptor):
-    """Extension class supporting l3 agent scheduler.
-    """
-
-    @classmethod
-    def get_name(cls):
-        return "L3 Agent Scheduler"
-
-    @classmethod
-    def get_alias(cls):
-        return constants.L3_AGENT_SCHEDULER_EXT_ALIAS
-
-    @classmethod
-    def get_description(cls):
-        return "Schedule routers among l3 agents"
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-02-07T10:00:00-00:00"
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        exts = []
-        parent = dict(member_name="agent",
-                      collection_name="agents")
-
-        controller = resource.Resource(RouterSchedulerController(),
-                                       base.FAULT_MAP)
-        exts.append(extensions.ResourceExtension(
-            L3_ROUTERS, controller, parent))
-
-        parent = dict(member_name="router",
-                      collection_name="routers")
-
-        controller = resource.Resource(L3AgentsHostingRouterController(),
-                                       base.FAULT_MAP)
-        exts.append(extensions.ResourceExtension(
-            L3_AGENTS, controller, parent))
-        return exts
-
-    def get_extended_resources(self, version):
-        return {}
-
-
-class InvalidL3Agent(agent.AgentNotFound):
-    message = _("Agent %(id)s is not a L3 Agent or has been disabled")
-
-
-class RouterHostedByL3Agent(exceptions.Conflict):
-    message = _("The router %(router_id)s has been already hosted "
-                "by the L3 Agent %(agent_id)s.")
-
-
-class RouterSchedulingFailed(exceptions.Conflict):
-    message = _("Failed scheduling router %(router_id)s to "
-                "the L3 Agent %(agent_id)s.")
-
-
-class RouterReschedulingFailed(exceptions.Conflict):
-    message = _("Failed rescheduling router %(router_id)s: "
-                "no eligible l3 agent found.")
-
-
-class RouterL3AgentMismatch(exceptions.Conflict):
-    message = _("Cannot host distributed router %(router_id)s "
-                "on legacy L3 agent %(agent_id)s.")
-
-
-class DVRL3CannotAssignToDvrAgent(exceptions.Conflict):
-    message = _("Not allowed to manually assign a router to an "
-                "agent in 'dvr' mode.")
-
-
-class L3AgentSchedulerPluginBase(object):
-    """REST API to operate the l3 agent scheduler.
-
-    All of method must be in an admin context.
-    """
-
-    @abc.abstractmethod
-    def add_router_to_l3_agent(self, context, id, router_id):
-        pass
-
-    @abc.abstractmethod
-    def remove_router_from_l3_agent(self, context, id, router_id):
-        pass
-
-    @abc.abstractmethod
-    def list_routers_on_l3_agent(self, context, id):
-        pass
-
-    @abc.abstractmethod
-    def list_l3_agents_hosting_router(self, context, router_id):
-        pass
-
-
-def notify(context, action, router_id, agent_id):
-    info = {'id': agent_id, 'router_id': router_id}
-    notifier = n_rpc.get_notifier('router')
-    notifier.info(context, action, {'agent': info})
diff --git a/neutron/extensions/metering.py b/neutron/extensions/metering.py
deleted file mode 100644 (file)
index 8b9f919..0000000
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-import six
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.api.v2 import resource_helper
-from neutron.common import exceptions as nexception
-from neutron.plugins.common import constants
-from neutron.services import service_base
-
-
-class MeteringLabelNotFound(nexception.NotFound):
-    message = _("Metering label %(label_id)s does not exist")
-
-
-class DuplicateMeteringRuleInPost(nexception.InUse):
-    message = _("Duplicate Metering Rule in POST.")
-
-
-class MeteringLabelRuleNotFound(nexception.NotFound):
-    message = _("Metering label rule %(rule_id)s does not exist")
-
-
-class MeteringLabelRuleOverlaps(nexception.Conflict):
-    message = _("Metering label rule with remote_ip_prefix "
-                "%(remote_ip_prefix)s overlaps another")
-
-
-RESOURCE_ATTRIBUTE_MAP = {
-    'metering_labels': {
-        'id': {'allow_post': False, 'allow_put': False,
-               'is_visible': True,
-               'primary_key': True},
-        'name': {'allow_post': True, 'allow_put': False,
-                 'is_visible': True, 'default': ''},
-        'description': {'allow_post': True, 'allow_put': False,
-                        'is_visible': True, 'default': ''},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'required_by_policy': True,
-                      'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
-                      'is_visible': True},
-        'shared': {'allow_post': True, 'allow_put': False,
-                   'is_visible': True, 'default': False,
-                   'convert_to': attr.convert_to_boolean}
-    },
-    'metering_label_rules': {
-        'id': {'allow_post': False, 'allow_put': False,
-               'is_visible': True,
-               'primary_key': True},
-        'metering_label_id': {'allow_post': True, 'allow_put': False,
-                              'validate': {'type:uuid': None},
-                              'is_visible': True, 'required_by_policy': True},
-        'direction': {'allow_post': True, 'allow_put': False,
-                      'is_visible': True,
-                      'validate': {'type:values': ['ingress', 'egress']}},
-        'excluded': {'allow_post': True, 'allow_put': False,
-                     'is_visible': True, 'default': False,
-                     'convert_to': attr.convert_to_boolean},
-        'remote_ip_prefix': {'allow_post': True, 'allow_put': False,
-                             'is_visible': True, 'required_by_policy': True,
-                             'validate': {'type:subnet': None}},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'required_by_policy': True,
-                      'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
-                      'is_visible': True}
-    }
-}
-
-
-class Metering(extensions.ExtensionDescriptor):
-
-    @classmethod
-    def get_name(cls):
-        return "Neutron Metering"
-
-    @classmethod
-    def get_alias(cls):
-        return "metering"
-
-    @classmethod
-    def get_description(cls):
-        return "Neutron Metering extension."
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-06-12T10:00:00-00:00"
-
-    @classmethod
-    def get_plugin_interface(cls):
-        return MeteringPluginBase
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        plural_mappings = resource_helper.build_plural_mappings(
-            {}, RESOURCE_ATTRIBUTE_MAP)
-        attr.PLURALS.update(plural_mappings)
-        # PCM: Metering sets pagination and sorting to True. Do we have cfg
-        # entries for these so can be read? Else, must pass in.
-        return resource_helper.build_resource_info(plural_mappings,
-                                                   RESOURCE_ATTRIBUTE_MAP,
-                                                   constants.METERING,
-                                                   translate_name=True,
-                                                   allow_bulk=True)
-
-    def update_attributes_map(self, attributes):
-        super(Metering, self).update_attributes_map(
-            attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return RESOURCE_ATTRIBUTE_MAP
-        else:
-            return {}
-
-
-@six.add_metaclass(abc.ABCMeta)
-class MeteringPluginBase(service_base.ServicePluginBase):
-
-    def get_plugin_description(self):
-        return constants.METERING
-
-    def get_plugin_type(self):
-        return constants.METERING
-
-    @abc.abstractmethod
-    def create_metering_label(self, context, metering_label):
-        """Create a metering label."""
-        pass
-
-    @abc.abstractmethod
-    def delete_metering_label(self, context, label_id):
-        """Delete a metering label."""
-        pass
-
-    @abc.abstractmethod
-    def get_metering_label(self, context, label_id, fields=None):
-        """Get a metering label."""
-        pass
-
-    @abc.abstractmethod
-    def get_metering_labels(self, context, filters=None, fields=None,
-                            sorts=None, limit=None, marker=None,
-                            page_reverse=False):
-        """List all metering labels."""
-        pass
-
-    @abc.abstractmethod
-    def create_metering_label_rule(self, context, metering_label_rule):
-        """Create a metering label rule."""
-        pass
-
-    @abc.abstractmethod
-    def get_metering_label_rule(self, context, rule_id, fields=None):
-        """Get a metering label rule."""
-        pass
-
-    @abc.abstractmethod
-    def delete_metering_label_rule(self, context, rule_id):
-        """Delete a metering label rule."""
-        pass
-
-    @abc.abstractmethod
-    def get_metering_label_rules(self, context, filters=None, fields=None,
-                                 sorts=None, limit=None, marker=None,
-                                 page_reverse=False):
-        """List all metering label rules."""
-        pass
diff --git a/neutron/extensions/multiprovidernet.py b/neutron/extensions/multiprovidernet.py
deleted file mode 100644 (file)
index 565179f..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import webob.exc
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.common import exceptions as nexception
-from neutron.extensions import providernet as pnet
-
-SEGMENTS = 'segments'
-
-
-class SegmentsSetInConjunctionWithProviders(nexception.InvalidInput):
-    message = _("Segments and provider values cannot both be set.")
-
-
-class SegmentsContainDuplicateEntry(nexception.InvalidInput):
-    message = _("Duplicate segment entry in request.")
-
-
-def _convert_and_validate_segments(segments, valid_values=None):
-    for segment in segments:
-        segment.setdefault(pnet.NETWORK_TYPE, attr.ATTR_NOT_SPECIFIED)
-        segment.setdefault(pnet.PHYSICAL_NETWORK, attr.ATTR_NOT_SPECIFIED)
-        segmentation_id = segment.get(pnet.SEGMENTATION_ID)
-        if segmentation_id:
-            segment[pnet.SEGMENTATION_ID] = attr.convert_to_int(
-                segmentation_id)
-        else:
-            segment[pnet.SEGMENTATION_ID] = attr.ATTR_NOT_SPECIFIED
-        if len(segment.keys()) != 3:
-            msg = (_("Unrecognized attribute(s) '%s'") %
-                   ', '.join(set(segment.keys()) -
-                             set([pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
-                                  pnet.SEGMENTATION_ID])))
-            raise webob.exc.HTTPBadRequest(msg)
-
-
-def check_duplicate_segments(segments, is_partial_func=None):
-    """Helper function checking duplicate segments.
-
-    If is_partial_funcs is specified and not None, then
-    SegmentsContainDuplicateEntry is raised if two segments are identical and
-    non partially defined (is_partial_func(segment) == False).
-    Otherwise SegmentsContainDuplicateEntry is raised if two segment are
-    identical.
-    """
-    if is_partial_func is not None:
-        segments = [s for s in segments if not is_partial_func(s)]
-    fully_specifieds = [tuple(sorted(s.items())) for s in segments]
-    if len(set(fully_specifieds)) != len(fully_specifieds):
-        raise SegmentsContainDuplicateEntry()
-
-
-attr.validators['type:convert_segments'] = (
-    _convert_and_validate_segments)
-
-
-EXTENDED_ATTRIBUTES_2_0 = {
-    'networks': {
-        SEGMENTS: {'allow_post': True, 'allow_put': True,
-                   'validate': {'type:convert_segments': None},
-                   'convert_list_to': attr.convert_kvp_list_to_dict,
-                   'default': attr.ATTR_NOT_SPECIFIED,
-                   'enforce_policy': True,
-                   'is_visible': True},
-    }
-}
-
-
-class Multiprovidernet(extensions.ExtensionDescriptor):
-    """Extension class supporting multiple provider networks.
-
-    This class is used by neutron's extension framework to make
-    metadata about the multiple provider network extension available to
-    clients. No new resources are defined by this extension. Instead,
-    the existing network resource's request and response messages are
-    extended with 'segments' attribute.
-
-    With admin rights, network dictionaries returned will also include
-    'segments' attribute.
-    """
-
-    @classmethod
-    def get_name(cls):
-        return "Multi Provider Network"
-
-    @classmethod
-    def get_alias(cls):
-        return "multi-provider"
-
-    @classmethod
-    def get_description(cls):
-        return ("Expose mapping of virtual networks to multiple physical "
-                "networks")
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-06-27T10:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/netmtu.py b/neutron/extensions/netmtu.py
deleted file mode 100644 (file)
index b433251..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2015 Openstack Foundation.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api import extensions
-
-
-MTU = 'mtu'
-EXTENDED_ATTRIBUTES_2_0 = {
-    'networks': {
-        MTU: {'allow_post': False, 'allow_put': False,
-              'is_visible': True},
-    },
-}
-
-
-class Netmtu(extensions.ExtensionDescriptor):
-    """Extension class supporting network MTU."""
-
-    @classmethod
-    def get_name(cls):
-        return "Network MTU"
-
-    @classmethod
-    def get_alias(cls):
-        return "net-mtu"
-
-    @classmethod
-    def get_description(cls):
-        return "Provides MTU attribute for a network resource."
-
-    @classmethod
-    def get_updated(cls):
-        return "2015-03-25T10:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/network_availability_zone.py b/neutron/extensions/network_availability_zone.py
deleted file mode 100644 (file)
index 2e62efb..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import abc
-
-import six
-
-from neutron.api import extensions
-from neutron.extensions import availability_zone as az_ext
-
-
-EXTENDED_ATTRIBUTES_2_0 = {
-    'networks': {
-        az_ext.AVAILABILITY_ZONES: {'allow_post': False, 'allow_put': False,
-                                    'is_visible': True},
-        az_ext.AZ_HINTS: {
-            'allow_post': True, 'allow_put': False, 'is_visible': True,
-            'validate': {'type:availability_zone_hints': None},
-            'default': []}},
-}
-
-
-class Network_availability_zone(extensions.ExtensionDescriptor):
-    """Network availability zone extension."""
-
-    @classmethod
-    def get_name(cls):
-        return "Network Availability Zone"
-
-    @classmethod
-    def get_alias(cls):
-        return "network_availability_zone"
-
-    @classmethod
-    def get_description(cls):
-        return "Availability zone support for network."
-
-    @classmethod
-    def get_updated(cls):
-        return "2015-01-01T10:00:00-00:00"
-
-    def get_required_extensions(self):
-        return ["availability_zone"]
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
-
-
-@six.add_metaclass(abc.ABCMeta)
-class NetworkAvailabilityZonePluginBase(object):
-
-    @abc.abstractmethod
-    def get_network_availability_zones(self, network):
-        """Return availability zones which a network belongs to"""
diff --git a/neutron/extensions/portbindings.py b/neutron/extensions/portbindings.py
deleted file mode 100644 (file)
index e9dc8b8..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-
-# The type of vnic that this port should be attached to
-VNIC_TYPE = 'binding:vnic_type'
-# The service will return the vif type for the specific port.
-VIF_TYPE = 'binding:vif_type'
-# The service may return a dictionary containing additional
-# information needed by the interface driver. The set of items
-# returned may depend on the value of VIF_TYPE.
-VIF_DETAILS = 'binding:vif_details'
-# In some cases different implementations may be run on different hosts.
-# The host on which the port will be allocated.
-HOST_ID = 'binding:host_id'
-# The profile will be a dictionary that enables the application running
-# on the specific host to pass and receive vif port specific information to
-# the plugin.
-PROFILE = 'binding:profile'
-
-# The keys below are used in the VIF_DETAILS attribute to convey
-# information to the VIF driver.
-
-# TODO(rkukura): Replace CAP_PORT_FILTER, which nova no longer
-# understands, with the new set of VIF security details to be used in
-# the VIF_DETAILS attribute.
-#
-#  - port_filter : Boolean value indicating Neutron provides port filtering
-#                  features such as security group and anti MAC/IP spoofing
-#  - ovs_hybrid_plug: Boolean used to inform Nova that the hybrid plugging
-#                     strategy for OVS should be used
-CAP_PORT_FILTER = 'port_filter'
-OVS_HYBRID_PLUG = 'ovs_hybrid_plug'
-VIF_DETAILS_VLAN = 'vlan'
-
-# The keys below are used in the VIF_DETAILS attribute to convey
-# information related to the configuration of the vhost-user VIF driver.
-
-# - vhost_user_mode: String value used to declare the mode of a
-#                    vhost-user socket
-VHOST_USER_MODE = 'vhostuser_mode'
-# - server: socket created by hypervisor
-VHOST_USER_MODE_SERVER = 'server'
-# - client: socket created by vswitch
-VHOST_USER_MODE_CLIENT = 'client'
-# - vhostuser_socket String value used to declare the vhostuser socket name
-VHOST_USER_SOCKET = 'vhostuser_socket'
-# - vhost_user_ovs_plug: Boolean used to inform Nova that the ovs plug
-#                        method should be used when binding the
-#                        vhost-user vif.
-VHOST_USER_OVS_PLUG = 'vhostuser_ovs_plug'
-# - vhost-user:  The vhost-user interface type is a standard virtio interface
-#                provided by qemu 2.1+. This constant defines the neutron side
-#                of the vif binding type to provide a common definition
-#                to enable reuse in multiple agents and drivers.
-VIF_TYPE_VHOST_USER = 'vhostuser'
-
-VIF_TYPE_UNBOUND = 'unbound'
-VIF_TYPE_BINDING_FAILED = 'binding_failed'
-VIF_TYPE_DISTRIBUTED = 'distributed'
-VIF_TYPE_OVS = 'ovs'
-VIF_TYPE_BRIDGE = 'bridge'
-VIF_TYPE_OTHER = 'other'
-
-VNIC_NORMAL = 'normal'
-VNIC_DIRECT = 'direct'
-VNIC_MACVTAP = 'macvtap'
-VNIC_BAREMETAL = 'baremetal'
-VNIC_DIRECT_PHYSICAL = 'direct-physical'
-VNIC_TYPES = [VNIC_NORMAL, VNIC_DIRECT, VNIC_MACVTAP, VNIC_BAREMETAL,
-              VNIC_DIRECT_PHYSICAL]
-
-EXTENDED_ATTRIBUTES_2_0 = {
-    'ports': {
-        VIF_TYPE: {'allow_post': False, 'allow_put': False,
-                   'default': attributes.ATTR_NOT_SPECIFIED,
-                   'enforce_policy': True,
-                   'is_visible': True},
-        VIF_DETAILS: {'allow_post': False, 'allow_put': False,
-                      'default': attributes.ATTR_NOT_SPECIFIED,
-                      'enforce_policy': True,
-                      'is_visible': True},
-        VNIC_TYPE: {'allow_post': True, 'allow_put': True,
-                    'default': VNIC_NORMAL,
-                    'is_visible': True,
-                    'validate': {'type:values': VNIC_TYPES},
-                    'enforce_policy': True},
-        HOST_ID: {'allow_post': True, 'allow_put': True,
-                  'default': attributes.ATTR_NOT_SPECIFIED,
-                  'is_visible': True,
-                  'enforce_policy': True},
-        PROFILE: {'allow_post': True, 'allow_put': True,
-                  'default': attributes.ATTR_NOT_SPECIFIED,
-                  'enforce_policy': True,
-                  'validate': {'type:dict_or_none': None},
-                  'is_visible': True},
-    }
-}
-
-
-class Portbindings(extensions.ExtensionDescriptor):
-    """Extension class supporting port bindings.
-
-    This class is used by neutron's extension framework to make
-    metadata about the port bindings available to external applications.
-
-    With admin rights one will be able to update and read the values.
-    """
-
-    @classmethod
-    def get_name(cls):
-        return "Port Binding"
-
-    @classmethod
-    def get_alias(cls):
-        return "binding"
-
-    @classmethod
-    def get_description(cls):
-        return "Expose port bindings of a virtual port to external application"
-
-    @classmethod
-    def get_updated(cls):
-        return "2014-02-03T10:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/portsecurity.py b/neutron/extensions/portsecurity.py
deleted file mode 100644 (file)
index fede653..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.common import exceptions as nexception
-
-
-class PortSecurityPortHasSecurityGroup(nexception.InUse):
-    message = _("Port has security group associated. Cannot disable port "
-                "security or ip address until security group is removed")
-
-
-class PortSecurityAndIPRequiredForSecurityGroups(nexception.InvalidInput):
-    message = _("Port security must be enabled and port must have an IP"
-                " address in order to use security groups.")
-
-
-class PortSecurityBindingNotFound(nexception.InvalidExtensionEnv):
-    message = _("Port does not have port security binding.")
-
-PORTSECURITY = 'port_security_enabled'
-EXTENDED_ATTRIBUTES_2_0 = {
-    'networks': {
-        PORTSECURITY: {'allow_post': True, 'allow_put': True,
-                       'convert_to': attributes.convert_to_boolean,
-                       'enforce_policy': True,
-                       'default': True,
-                       'is_visible': True},
-    },
-    'ports': {
-        PORTSECURITY: {'allow_post': True, 'allow_put': True,
-                       'convert_to': attributes.convert_to_boolean,
-                       'default': attributes.ATTR_NOT_SPECIFIED,
-                       'enforce_policy': True,
-                       'is_visible': True},
-    }
-}
-
-
-class Portsecurity(extensions.ExtensionDescriptor):
-    """Extension class supporting port security."""
-
-    @classmethod
-    def get_name(cls):
-        return "Port Security"
-
-    @classmethod
-    def get_alias(cls):
-        return "port-security"
-
-    @classmethod
-    def get_description(cls):
-        return "Provides port security"
-
-    @classmethod
-    def get_updated(cls):
-        return "2012-07-23T10:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/providernet.py b/neutron/extensions/providernet.py
deleted file mode 100644 (file)
index cf3bdce..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.common import exceptions as n_exc
-
-
-NETWORK_TYPE = 'provider:network_type'
-PHYSICAL_NETWORK = 'provider:physical_network'
-SEGMENTATION_ID = 'provider:segmentation_id'
-ATTRIBUTES = (NETWORK_TYPE, PHYSICAL_NETWORK, SEGMENTATION_ID)
-
-# Common definitions for maximum string field length
-NETWORK_TYPE_MAX_LEN = 32
-PHYSICAL_NETWORK_MAX_LEN = 64
-
-EXTENDED_ATTRIBUTES_2_0 = {
-    'networks': {
-        NETWORK_TYPE: {'allow_post': True, 'allow_put': True,
-                       'validate': {'type:string': NETWORK_TYPE_MAX_LEN},
-                       'default': attributes.ATTR_NOT_SPECIFIED,
-                       'enforce_policy': True,
-                       'is_visible': True},
-        PHYSICAL_NETWORK: {'allow_post': True, 'allow_put': True,
-                           'validate': {'type:string':
-                                        PHYSICAL_NETWORK_MAX_LEN},
-                           'default': attributes.ATTR_NOT_SPECIFIED,
-                           'enforce_policy': True,
-                           'is_visible': True},
-        SEGMENTATION_ID: {'allow_post': True, 'allow_put': True,
-                          'convert_to': attributes.convert_to_int,
-                          'enforce_policy': True,
-                          'default': attributes.ATTR_NOT_SPECIFIED,
-                          'is_visible': True},
-    }
-}
-
-
-def _raise_if_updates_provider_attributes(attrs):
-    """Raise exception if provider attributes are present.
-
-    This method is used for plugins that do not support
-    updating provider networks.
-    """
-    if any(attributes.is_attr_set(attrs.get(a)) for a in ATTRIBUTES):
-        msg = _("Plugin does not support updating provider attributes")
-        raise n_exc.InvalidInput(error_message=msg)
-
-
-class Providernet(extensions.ExtensionDescriptor):
-    """Extension class supporting provider networks.
-
-    This class is used by neutron's extension framework to make
-    metadata about the provider network extension available to
-    clients. No new resources are defined by this extension. Instead,
-    the existing network resource's request and response messages are
-    extended with attributes in the provider namespace.
-
-    With admin rights, network dictionaries returned will also include
-    provider attributes.
-    """
-
-    @classmethod
-    def get_name(cls):
-        return "Provider Network"
-
-    @classmethod
-    def get_alias(cls):
-        return "provider"
-
-    @classmethod
-    def get_description(cls):
-        return "Expose mapping of virtual networks to physical networks"
-
-    @classmethod
-    def get_updated(cls):
-        return "2012-09-07T10:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/qos.py b/neutron/extensions/qos.py
deleted file mode 100644 (file)
index 5a14293..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-# Copyright (c) 2015 Red Hat Inc.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import itertools
-
-import six
-
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.api.v2 import base
-from neutron.api.v2 import resource_helper
-from neutron import manager
-from neutron.plugins.common import constants
-from neutron.services.qos import qos_consts
-from neutron.services import service_base
-
-QOS_PREFIX = "/qos"
-
-# Attribute Map
-QOS_RULE_COMMON_FIELDS = {
-    'id': {'allow_post': False, 'allow_put': False,
-           'validate': {'type:uuid': None},
-           'is_visible': True,
-           'primary_key': True},
-    'tenant_id': {'allow_post': True, 'allow_put': False,
-                  'required_by_policy': True,
-                  'is_visible': True},
-}
-
-RESOURCE_ATTRIBUTE_MAP = {
-    'policies': {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True, 'primary_key': True},
-        'name': {'allow_post': True, 'allow_put': True,
-                 'is_visible': True, 'default': '',
-                 'validate': {'type:string': None}},
-        'description': {'allow_post': True, 'allow_put': True,
-                        'is_visible': True, 'default': '',
-                        'validate': {'type:string': None}},
-        'shared': {'allow_post': True, 'allow_put': True,
-                   'is_visible': True, 'default': False,
-                   'convert_to': attr.convert_to_boolean},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'required_by_policy': True,
-                      'is_visible': True},
-        'rules': {'allow_post': False, 'allow_put': False, 'is_visible': True},
-    },
-    'rule_types': {
-        'type': {'allow_post': False, 'allow_put': False,
-                 'is_visible': True}
-    }
-}
-
-SUB_RESOURCE_ATTRIBUTE_MAP = {
-    'bandwidth_limit_rules': {
-        'parent': {'collection_name': 'policies',
-                   'member_name': 'policy'},
-        'parameters': dict(QOS_RULE_COMMON_FIELDS,
-                           **{'max_kbps': {
-                                  'allow_post': True, 'allow_put': True,
-                                  'is_visible': True, 'default': None,
-                                  'validate': {'type:non_negative': None}},
-                              'max_burst_kbps': {
-                                  'allow_post': True, 'allow_put': True,
-                                  'is_visible': True, 'default': 0,
-                                  'validate': {'type:non_negative': None}}})
-    }
-}
-
-EXTENDED_ATTRIBUTES_2_0 = {
-    'ports': {qos_consts.QOS_POLICY_ID: {
-                                    'allow_post': True,
-                                    'allow_put': True,
-                                    'is_visible': True,
-                                    'default': None,
-                                    'validate': {'type:uuid_or_none': None}}},
-    'networks': {qos_consts.QOS_POLICY_ID: {
-                                    'allow_post': True,
-                                    'allow_put': True,
-                                    'is_visible': True,
-                                    'default': None,
-                                    'validate': {'type:uuid_or_none': None}}}}
-
-
-class Qos(extensions.ExtensionDescriptor):
-    """Quality of service API extension."""
-
-    @classmethod
-    def get_name(cls):
-        return "qos"
-
-    @classmethod
-    def get_alias(cls):
-        return "qos"
-
-    @classmethod
-    def get_description(cls):
-        return "The Quality of Service extension."
-
-    @classmethod
-    def get_updated(cls):
-        return "2015-06-08T10:00:00-00:00"
-
-    @classmethod
-    def get_plugin_interface(cls):
-        return QoSPluginBase
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        special_mappings = {'policies': 'policy'}
-        plural_mappings = resource_helper.build_plural_mappings(
-            special_mappings, itertools.chain(RESOURCE_ATTRIBUTE_MAP,
-                                           SUB_RESOURCE_ATTRIBUTE_MAP))
-        attr.PLURALS.update(plural_mappings)
-
-        resources = resource_helper.build_resource_info(
-                plural_mappings,
-                RESOURCE_ATTRIBUTE_MAP,
-                constants.QOS,
-                translate_name=True,
-                allow_bulk=True)
-
-        plugin = manager.NeutronManager.get_service_plugins()[constants.QOS]
-        for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP:
-            resource_name = collection_name[:-1]
-            parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent')
-            params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get(
-                'parameters')
-
-            controller = base.create_resource(collection_name, resource_name,
-                                              plugin, params,
-                                              allow_bulk=True,
-                                              parent=parent,
-                                              allow_pagination=True,
-                                              allow_sorting=True)
-
-            resource = extensions.ResourceExtension(
-                collection_name,
-                controller, parent,
-                path_prefix=QOS_PREFIX,
-                attr_map=params)
-            resources.append(resource)
-
-        return resources
-
-    def update_attributes_map(self, attributes, extension_attrs_map=None):
-        super(Qos, self).update_attributes_map(
-            attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) +
-                        list(RESOURCE_ATTRIBUTE_MAP.items()))
-        else:
-            return {}
-
-
-@six.add_metaclass(abc.ABCMeta)
-class QoSPluginBase(service_base.ServicePluginBase):
-
-    path_prefix = QOS_PREFIX
-
-    def get_plugin_description(self):
-        return "QoS Service Plugin for ports and networks"
-
-    def get_plugin_type(self):
-        return constants.QOS
-
-    @abc.abstractmethod
-    def get_policy(self, context, policy_id, fields=None):
-        pass
-
-    @abc.abstractmethod
-    def get_policies(self, context, filters=None, fields=None,
-                     sorts=None, limit=None, marker=None,
-                     page_reverse=False):
-        pass
-
-    @abc.abstractmethod
-    def create_policy(self, context, policy):
-        pass
-
-    @abc.abstractmethod
-    def update_policy(self, context, policy_id, policy):
-        pass
-
-    @abc.abstractmethod
-    def delete_policy(self, context, policy_id):
-        pass
-
-    @abc.abstractmethod
-    def get_policy_bandwidth_limit_rule(self, context, rule_id,
-                                        policy_id, fields=None):
-        pass
-
-    @abc.abstractmethod
-    def get_policy_bandwidth_limit_rules(self, context, policy_id,
-                                         filters=None, fields=None,
-                                         sorts=None, limit=None,
-                                         marker=None, page_reverse=False):
-        pass
-
-    @abc.abstractmethod
-    def create_policy_bandwidth_limit_rule(self, context, policy_id,
-                                           bandwidth_limit_rule):
-        pass
-
-    @abc.abstractmethod
-    def update_policy_bandwidth_limit_rule(self, context, rule_id, policy_id,
-                                           bandwidth_limit_rule):
-        pass
-
-    @abc.abstractmethod
-    def delete_policy_bandwidth_limit_rule(self, context, rule_id, policy_id):
-        pass
-
-    @abc.abstractmethod
-    def get_rule_types(self, context, filters=None, fields=None,
-                       sorts=None, limit=None,
-                       marker=None, page_reverse=False):
-        pass
diff --git a/neutron/extensions/quotasv2.py b/neutron/extensions/quotasv2.py
deleted file mode 100644 (file)
index fb0abb4..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_utils import importutils
-import webob
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.api.v2 import base
-from neutron.api.v2 import resource
-from neutron.common import constants as const
-from neutron.common import exceptions as n_exc
-from neutron import manager
-from neutron.pecan_wsgi import controllers
-from neutron import quota
-from neutron.quota import resource_registry
-from neutron import wsgi
-
-RESOURCE_NAME = 'quota'
-RESOURCE_COLLECTION = RESOURCE_NAME + "s"
-QUOTAS = quota.QUOTAS
-DB_QUOTA_DRIVER = 'neutron.db.quota.driver.DbQuotaDriver'
-EXTENDED_ATTRIBUTES_2_0 = {
-    RESOURCE_COLLECTION: {}
-}
-
-
-class QuotaSetsController(wsgi.Controller):
-
-    def __init__(self, plugin):
-        self._resource_name = RESOURCE_NAME
-        self._plugin = plugin
-        self._driver = importutils.import_class(
-            cfg.CONF.QUOTAS.quota_driver
-        )
-        self._update_extended_attributes = True
-
-    def _update_attributes(self):
-        for quota_resource in resource_registry.get_all_resources().keys():
-            attr_dict = EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION]
-            attr_dict[quota_resource] = {
-                'allow_post': False,
-                'allow_put': True,
-                'convert_to': attributes.convert_to_int,
-                'validate': {'type:range': [-1, const.DB_INTEGER_MAX_VALUE]},
-                'is_visible': True}
-        self._update_extended_attributes = False
-
-    def _get_quotas(self, request, tenant_id):
-        return self._driver.get_tenant_quotas(
-            request.context,
-            resource_registry.get_all_resources(),
-            tenant_id)
-
-    def create(self, request, body=None):
-        msg = _('POST requests are not supported on this resource.')
-        raise webob.exc.HTTPNotImplemented(msg)
-
-    def index(self, request):
-        context = request.context
-        self._check_admin(context)
-        return {self._resource_name + "s":
-                self._driver.get_all_quotas(
-                    context, resource_registry.get_all_resources())}
-
-    def tenant(self, request):
-        """Retrieve the tenant info in context."""
-        context = request.context
-        if not context.tenant_id:
-            raise n_exc.QuotaMissingTenant()
-        return {'tenant': {'tenant_id': context.tenant_id}}
-
-    def show(self, request, id):
-        if id != request.context.tenant_id:
-            self._check_admin(request.context,
-                              reason=_("Only admin is authorized "
-                                       "to access quotas for another tenant"))
-        return {self._resource_name: self._get_quotas(request, id)}
-
-    def _check_admin(self, context,
-                     reason=_("Only admin can view or configure quota")):
-        if not context.is_admin:
-            raise n_exc.AdminRequired(reason=reason)
-
-    def delete(self, request, id):
-        self._check_admin(request.context)
-        self._driver.delete_tenant_quota(request.context, id)
-
-    def update(self, request, id, body=None):
-        self._check_admin(request.context)
-        if self._update_extended_attributes:
-            self._update_attributes()
-        body = base.Controller.prepare_request_body(
-            request.context, body, False, self._resource_name,
-            EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION])
-        for key, value in body[self._resource_name].items():
-            self._driver.update_quota_limit(request.context, id, key, value)
-        return {self._resource_name: self._get_quotas(request, id)}
-
-
-class Quotasv2(extensions.ExtensionDescriptor):
-    """Quotas management support."""
-
-    @classmethod
-    def get_name(cls):
-        return "Quota management support"
-
-    @classmethod
-    def get_alias(cls):
-        return RESOURCE_COLLECTION
-
-    @classmethod
-    def get_description(cls):
-        description = 'Expose functions for quotas management'
-        if cfg.CONF.QUOTAS.quota_driver == DB_QUOTA_DRIVER:
-            description += ' per tenant'
-        return description
-
-    @classmethod
-    def get_updated(cls):
-        return "2012-07-29T10:00:00-00:00"
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        controller = resource.Resource(
-            QuotaSetsController(manager.NeutronManager.get_plugin()),
-            faults=base.FAULT_MAP)
-        return [extensions.ResourceExtension(
-            Quotasv2.get_alias(),
-            controller,
-            collection_actions={'tenant': 'GET'})]
-
-    @classmethod
-    def get_pecan_controllers(cls):
-        return ((RESOURCE_COLLECTION, controllers.QuotasController()), )
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/rbac.py b/neutron/extensions/rbac.py
deleted file mode 100644 (file)
index 7a1e9cd..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.api.v2 import base
-from neutron.common import exceptions as n_exc
-from neutron.db import rbac_db_models
-from neutron import manager
-from neutron.quota import resource_registry
-
-
-class RbacPolicyNotFound(n_exc.NotFound):
-    message = _("RBAC policy of type %(object_type)s with ID %(id)s not found")
-
-
-class RbacPolicyInUse(n_exc.Conflict):
-    message = _("RBAC policy on object %(object_id)s cannot be removed "
-                "because other objects depend on it.\nDetails: %(details)s")
-
-
-def convert_valid_object_type(otype):
-    normalized = otype.strip().lower()
-    if normalized in rbac_db_models.get_type_model_map():
-        return normalized
-    msg = _("'%s' is not a valid RBAC object type") % otype
-    raise n_exc.InvalidInput(error_message=msg)
-
-
-RESOURCE_NAME = 'rbac_policy'
-RESOURCE_COLLECTION = 'rbac_policies'
-
-RESOURCE_ATTRIBUTE_MAP = {
-    RESOURCE_COLLECTION: {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True, 'primary_key': True},
-        'object_type': {'allow_post': True, 'allow_put': False,
-                        'convert_to': convert_valid_object_type,
-                        'is_visible': True, 'default': None,
-                        'enforce_policy': True},
-        'object_id': {'allow_post': True, 'allow_put': False,
-                      'validate': {'type:uuid': None},
-                      'is_visible': True, 'default': None,
-                      'enforce_policy': True},
-        'target_tenant': {'allow_post': True, 'allow_put': True,
-                          'is_visible': True, 'enforce_policy': True,
-                          'default': None},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'required_by_policy': True, 'is_visible': True},
-        'action': {'allow_post': True, 'allow_put': False,
-                   # action depends on type so validation has to occur in
-                   # the extension
-                   'validate': {'type:string': attr.DESCRIPTION_MAX_LEN},
-                   'is_visible': True},
-    }
-}
-
-rbac_quota_opts = [
-    cfg.IntOpt('quota_rbac_policy', default=10,
-               deprecated_name='quota_rbac_entry',
-               help=_('Default number of RBAC entries allowed per tenant. '
-                      'A negative value means unlimited.'))
-]
-cfg.CONF.register_opts(rbac_quota_opts, 'QUOTAS')
-
-
-class Rbac(extensions.ExtensionDescriptor):
-    """RBAC policy support."""
-
-    @classmethod
-    def get_name(cls):
-        return "RBAC Policies"
-
-    @classmethod
-    def get_alias(cls):
-        return 'rbac-policies'
-
-    @classmethod
-    def get_description(cls):
-        return ("Allows creation and modification of policies that control "
-                "tenant access to resources.")
-
-    @classmethod
-    def get_updated(cls):
-        return "2015-06-17T12:15:12-00:00"
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        plural_mappings = {'rbac_policies': 'rbac_policy'}
-        attr.PLURALS.update(plural_mappings)
-        plugin = manager.NeutronManager.get_plugin()
-        params = RESOURCE_ATTRIBUTE_MAP['rbac_policies']
-        collection_name = 'rbac-policies'
-        resource_name = 'rbac_policy'
-        resource_registry.register_resource_by_name(resource_name)
-        controller = base.create_resource(collection_name, resource_name,
-                                          plugin, params, allow_bulk=True,
-                                          allow_pagination=False,
-                                          allow_sorting=True)
-        return [extensions.ResourceExtension(collection_name, controller,
-                                             attr_map=params)]
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return RESOURCE_ATTRIBUTE_MAP
-        return {}
diff --git a/neutron/extensions/router_availability_zone.py b/neutron/extensions/router_availability_zone.py
deleted file mode 100644 (file)
index 3e09bb3..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import abc
-
-import six
-
-from neutron.api import extensions
-from neutron.extensions import availability_zone as az_ext
-
-
-EXTENDED_ATTRIBUTES_2_0 = {
-    'routers': {
-        az_ext.AVAILABILITY_ZONES: {'allow_post': False, 'allow_put': False,
-                                    'is_visible': True},
-        az_ext.AZ_HINTS: {
-                'allow_post': True, 'allow_put': False, 'is_visible': True,
-                'validate': {'type:availability_zone_hints': None},
-                'default': []}}
-}
-
-
-class Router_availability_zone(extensions.ExtensionDescriptor):
-    """Router availability zone extension."""
-
-    @classmethod
-    def get_name(cls):
-        return "Router Availability Zone"
-
-    @classmethod
-    def get_alias(cls):
-        return "router_availability_zone"
-
-    @classmethod
-    def get_description(cls):
-        return "Availability zone support for router."
-
-    @classmethod
-    def get_updated(cls):
-        return "2015-01-01T10:00:00-00:00"
-
-    def get_required_extensions(self):
-        return ["router", "availability_zone"]
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
-
-
-@six.add_metaclass(abc.ABCMeta)
-class RouterAvailabilityZonePluginBase(object):
-
-    @abc.abstractmethod
-    def get_router_availability_zones(self, router):
-        """Return availability zones which a router belongs to."""
diff --git a/neutron/extensions/routerservicetype.py b/neutron/extensions/routerservicetype.py
deleted file mode 100644 (file)
index e40ffa5..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api import extensions
-
-SERVICE_TYPE_ID = 'service_type_id'
-EXTENDED_ATTRIBUTES_2_0 = {
-    'routers': {
-        SERVICE_TYPE_ID: {'allow_post': True, 'allow_put': False,
-                          'validate': {'type:uuid_or_none': None},
-                          'default': None, 'is_visible': True},
-    }
-}
-
-
-class Routerservicetype(extensions.ExtensionDescriptor):
-    """Extension class supporting router service type."""
-
-    @classmethod
-    def get_name(cls):
-        return "Router Service Type"
-
-    @classmethod
-    def get_alias(cls):
-        return "router-service-type"
-
-    @classmethod
-    def get_description(cls):
-        return "Provides router service type"
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-01-29T00:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/extensions/securitygroup.py b/neutron/extensions/securitygroup.py
deleted file mode 100644 (file)
index 9ae3806..0000000
+++ /dev/null
@@ -1,386 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import netaddr
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-import six
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-from neutron.api.v2 import base
-from neutron.common import constants as const
-from neutron.common import exceptions as nexception
-from neutron import manager
-from neutron.quota import resource_registry
-
-
-# Security group Exceptions
-class SecurityGroupInvalidPortRange(nexception.InvalidInput):
-    message = _("For TCP/UDP protocols, port_range_min must be "
-                "<= port_range_max")
-
-
-class SecurityGroupInvalidPortValue(nexception.InvalidInput):
-    message = _("Invalid value for port %(port)s")
-
-
-class SecurityGroupInvalidIcmpValue(nexception.InvalidInput):
-    message = _("Invalid value for ICMP %(field)s (%(attr)s) "
-                "%(value)s. It must be 0 to 255.")
-
-
-class SecurityGroupEthertypeConflictWithProtocol(nexception.InvalidInput):
-    message = _("Invalid ethertype %(ethertype)s for protocol "
-                "%(protocol)s.")
-
-
-class SecurityGroupMissingIcmpType(nexception.InvalidInput):
-    message = _("ICMP code (port-range-max) %(value)s is provided"
-                " but ICMP type (port-range-min) is missing.")
-
-
-class SecurityGroupInUse(nexception.InUse):
-    message = _("Security Group %(id)s %(reason)s.")
-
-    def __init__(self, **kwargs):
-        if 'reason' not in kwargs:
-            kwargs['reason'] = _("in use")
-        super(SecurityGroupInUse, self).__init__(**kwargs)
-
-
-class SecurityGroupCannotRemoveDefault(nexception.InUse):
-    message = _("Insufficient rights for removing default security group.")
-
-
-class SecurityGroupCannotUpdateDefault(nexception.InUse):
-    message = _("Updating default security group not allowed.")
-
-
-class SecurityGroupDefaultAlreadyExists(nexception.InUse):
-    message = _("Default security group already exists.")
-
-
-class SecurityGroupRuleInvalidProtocol(nexception.InvalidInput):
-    message = _("Security group rule protocol %(protocol)s not supported. "
-                "Only protocol values %(values)s and integer representations "
-                "[0 to 255] are supported.")
-
-
-class SecurityGroupRulesNotSingleTenant(nexception.InvalidInput):
-    message = _("Multiple tenant_ids in bulk security group rule create"
-                " not allowed")
-
-
-class SecurityGroupRemoteGroupAndRemoteIpPrefix(nexception.InvalidInput):
-    message = _("Only remote_ip_prefix or remote_group_id may "
-                "be provided.")
-
-
-class SecurityGroupProtocolRequiredWithPorts(nexception.InvalidInput):
-    message = _("Must also specifiy protocol if port range is given.")
-
-
-class SecurityGroupNotSingleGroupRules(nexception.InvalidInput):
-    message = _("Only allowed to update rules for "
-                "one security profile at a time")
-
-
-class SecurityGroupNotFound(nexception.NotFound):
-    message = _("Security group %(id)s does not exist")
-
-
-class SecurityGroupRuleNotFound(nexception.NotFound):
-    message = _("Security group rule %(id)s does not exist")
-
-
-class DuplicateSecurityGroupRuleInPost(nexception.InUse):
-    message = _("Duplicate Security Group Rule in POST.")
-
-
-class SecurityGroupRuleExists(nexception.InUse):
-    message = _("Security group rule already exists. Rule id is %(id)s.")
-
-
-class SecurityGroupRuleInUse(nexception.InUse):
-    message = _("Security Group Rule %(id)s %(reason)s.")
-
-    def __init__(self, **kwargs):
-        if 'reason' not in kwargs:
-            kwargs['reason'] = _("in use")
-        super(SecurityGroupRuleInUse, self).__init__(**kwargs)
-
-
-class SecurityGroupRuleParameterConflict(nexception.InvalidInput):
-    message = _("Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s")
-
-
-class SecurityGroupConflict(nexception.Conflict):
-    message = _("Error %(reason)s while attempting the operation.")
-
-
-class SecurityGroupRuleInvalidEtherType(nexception.InvalidInput):
-    message = _("Security group rule for ethertype '%(ethertype)s' not "
-                "supported. Allowed values are %(values)s.")
-
-
-def convert_protocol(value):
-    if value is None:
-        return
-    try:
-        val = int(value)
-        if val >= 0 and val <= 255:
-            # Set value of protocol number to string due to bug 1381379,
-            # PostgreSQL fails when it tries to compare integer with string,
-            # that exists in db.
-            return str(value)
-        raise SecurityGroupRuleInvalidProtocol(
-            protocol=value, values=sg_supported_protocols)
-    except (ValueError, TypeError):
-        if value.lower() in sg_supported_protocols:
-            return value.lower()
-        raise SecurityGroupRuleInvalidProtocol(
-            protocol=value, values=sg_supported_protocols)
-    except AttributeError:
-        raise SecurityGroupRuleInvalidProtocol(
-            protocol=value, values=sg_supported_protocols)
-
-
-def convert_ethertype_to_case_insensitive(value):
-    if isinstance(value, six.string_types):
-        for ethertype in sg_supported_ethertypes:
-            if ethertype.lower() == value.lower():
-                return ethertype
-    raise SecurityGroupRuleInvalidEtherType(
-        ethertype=value, values=sg_supported_ethertypes)
-
-
-def convert_validate_port_value(port):
-    if port is None:
-        return port
-    try:
-        val = int(port)
-    except (ValueError, TypeError):
-        raise SecurityGroupInvalidPortValue(port=port)
-
-    if val >= 0 and val <= 65535:
-        return val
-    else:
-        raise SecurityGroupInvalidPortValue(port=port)
-
-
-def convert_to_uuid_list_or_none(value_list):
-    if value_list is None:
-        return
-    for sg_id in value_list:
-        if not uuidutils.is_uuid_like(sg_id):
-            msg = _("'%s' is not an integer or uuid") % sg_id
-            raise nexception.InvalidInput(error_message=msg)
-    return value_list
-
-
-def convert_ip_prefix_to_cidr(ip_prefix):
-    if not ip_prefix:
-        return
-    try:
-        cidr = netaddr.IPNetwork(ip_prefix)
-        return str(cidr)
-    except (ValueError, TypeError, netaddr.AddrFormatError):
-        raise nexception.InvalidCIDR(input=ip_prefix)
-
-
-def _validate_name_not_default(data, valid_values=None):
-    if data.lower() == "default":
-        raise SecurityGroupDefaultAlreadyExists()
-
-
-attr.validators['type:name_not_default'] = _validate_name_not_default
-
-sg_supported_protocols = [None, const.PROTO_NAME_TCP, const.PROTO_NAME_UDP,
-                          const.PROTO_NAME_ICMP, const.PROTO_NAME_ICMP_V6]
-sg_supported_ethertypes = ['IPv4', 'IPv6']
-
-# Attribute Map
-RESOURCE_ATTRIBUTE_MAP = {
-    'security_groups': {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True,
-               'primary_key': True},
-        'name': {'allow_post': True, 'allow_put': True,
-                 'is_visible': True, 'default': '',
-                 'validate': {'type:name_not_default': attr.NAME_MAX_LEN}},
-        'description': {'allow_post': True, 'allow_put': True,
-                        'validate': {'type:string': attr.DESCRIPTION_MAX_LEN},
-                        'is_visible': True, 'default': ''},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'required_by_policy': True,
-                      'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
-                      'is_visible': True},
-        'security_group_rules': {'allow_post': False, 'allow_put': False,
-                                 'is_visible': True},
-    },
-    'security_group_rules': {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True,
-               'primary_key': True},
-        'security_group_id': {'allow_post': True, 'allow_put': False,
-                              'is_visible': True, 'required_by_policy': True},
-        'remote_group_id': {'allow_post': True, 'allow_put': False,
-                            'default': None, 'is_visible': True},
-        'direction': {'allow_post': True, 'allow_put': False,
-                      'is_visible': True,
-                      'validate': {'type:values': ['ingress', 'egress']}},
-        'protocol': {'allow_post': True, 'allow_put': False,
-                     'is_visible': True, 'default': None,
-                     'convert_to': convert_protocol},
-        'port_range_min': {'allow_post': True, 'allow_put': False,
-                           'convert_to': convert_validate_port_value,
-                           'default': None, 'is_visible': True},
-        'port_range_max': {'allow_post': True, 'allow_put': False,
-                           'convert_to': convert_validate_port_value,
-                           'default': None, 'is_visible': True},
-        'ethertype': {'allow_post': True, 'allow_put': False,
-                      'is_visible': True, 'default': 'IPv4',
-                      'convert_to': convert_ethertype_to_case_insensitive,
-                      'validate': {'type:values': sg_supported_ethertypes}},
-        'remote_ip_prefix': {'allow_post': True, 'allow_put': False,
-                             'default': None, 'is_visible': True,
-                             'convert_to': convert_ip_prefix_to_cidr},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'required_by_policy': True,
-                      'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
-                      'is_visible': True},
-    }
-}
-
-
-SECURITYGROUPS = 'security_groups'
-EXTENDED_ATTRIBUTES_2_0 = {
-    'ports': {SECURITYGROUPS: {'allow_post': True,
-                               'allow_put': True,
-                               'is_visible': True,
-                               'convert_to': convert_to_uuid_list_or_none,
-                               'default': attr.ATTR_NOT_SPECIFIED}}}
-security_group_quota_opts = [
-    cfg.IntOpt('quota_security_group',
-               default=10,
-               help=_('Number of security groups allowed per tenant. '
-                      'A negative value means unlimited.')),
-    cfg.IntOpt('quota_security_group_rule',
-               default=100,
-               help=_('Number of security rules allowed per tenant. '
-                      'A negative value means unlimited.')),
-]
-cfg.CONF.register_opts(security_group_quota_opts, 'QUOTAS')
-
-
-class Securitygroup(extensions.ExtensionDescriptor):
-    """Security group extension."""
-
-    @classmethod
-    def get_name(cls):
-        return "security-group"
-
-    @classmethod
-    def get_alias(cls):
-        return "security-group"
-
-    @classmethod
-    def get_description(cls):
-        return "The security groups extension."
-
-    @classmethod
-    def get_updated(cls):
-        return "2012-10-05T10:00:00-00:00"
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()]
-        attr.PLURALS.update(dict(my_plurals))
-        exts = []
-        plugin = manager.NeutronManager.get_plugin()
-        for resource_name in ['security_group', 'security_group_rule']:
-            collection_name = resource_name.replace('_', '-') + "s"
-            params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict())
-            resource_registry.register_resource_by_name(resource_name)
-            controller = base.create_resource(collection_name,
-                                              resource_name,
-                                              plugin, params, allow_bulk=True,
-                                              allow_pagination=True,
-                                              allow_sorting=True)
-
-            ex = extensions.ResourceExtension(collection_name,
-                                              controller,
-                                              attr_map=params)
-            exts.append(ex)
-
-        return exts
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) +
-                        list(RESOURCE_ATTRIBUTE_MAP.items()))
-        else:
-            return {}
-
-
-@six.add_metaclass(abc.ABCMeta)
-class SecurityGroupPluginBase(object):
-
-    @abc.abstractmethod
-    def create_security_group(self, context, security_group):
-        pass
-
-    @abc.abstractmethod
-    def update_security_group(self, context, id, security_group):
-        pass
-
-    @abc.abstractmethod
-    def delete_security_group(self, context, id):
-        pass
-
-    @abc.abstractmethod
-    def get_security_groups(self, context, filters=None, fields=None,
-                            sorts=None, limit=None, marker=None,
-                            page_reverse=False):
-        pass
-
-    @abc.abstractmethod
-    def get_security_group(self, context, id, fields=None):
-        pass
-
-    @abc.abstractmethod
-    def create_security_group_rule(self, context, security_group_rule):
-        pass
-
-    @abc.abstractmethod
-    def delete_security_group_rule(self, context, id):
-        pass
-
-    @abc.abstractmethod
-    def get_security_group_rules(self, context, filters=None, fields=None,
-                                 sorts=None, limit=None, marker=None,
-                                 page_reverse=False):
-        pass
-
-    @abc.abstractmethod
-    def get_security_group_rule(self, context, id, fields=None):
-        pass
diff --git a/neutron/extensions/servicetype.py b/neutron/extensions/servicetype.py
deleted file mode 100644 (file)
index b7f1871..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.api.v2 import base
-from neutron.db import servicetype_db
-
-
-RESOURCE_NAME = "service_provider"
-COLLECTION_NAME = "%ss" % RESOURCE_NAME
-SERVICE_ATTR = 'service_type'
-PLUGIN_ATTR = 'plugin'
-DRIVER_ATTR = 'driver'
-EXT_ALIAS = 'service-type'
-
-# Attribute Map for Service Provider Resource
-# Allow read-only access
-RESOURCE_ATTRIBUTE_MAP = {
-    COLLECTION_NAME: {
-        'service_type': {'allow_post': False, 'allow_put': False,
-                         'is_visible': True},
-        'name': {'allow_post': False, 'allow_put': False,
-                 'is_visible': True},
-        'default': {'allow_post': False, 'allow_put': False,
-                    'is_visible': True},
-    }
-}
-
-
-class Servicetype(extensions.ExtensionDescriptor):
-
-    @classmethod
-    def get_name(cls):
-        return _("Neutron Service Type Management")
-
-    @classmethod
-    def get_alias(cls):
-        return EXT_ALIAS
-
-    @classmethod
-    def get_description(cls):
-        return _("API for retrieving service providers for "
-                 "Neutron advanced services")
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-01-20T00:00:00-00:00"
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Extended Resource for service type management."""
-        my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()]
-        attributes.PLURALS.update(dict(my_plurals))
-        attr_map = RESOURCE_ATTRIBUTE_MAP[COLLECTION_NAME]
-        collection_name = COLLECTION_NAME.replace('_', '-')
-        controller = base.create_resource(
-            collection_name,
-            RESOURCE_NAME,
-            servicetype_db.ServiceTypeManager.get_instance(),
-            attr_map)
-        return [extensions.ResourceExtension(collection_name,
-                                             controller,
-                                             attr_map=attr_map)]
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return RESOURCE_ATTRIBUTE_MAP
-        else:
-            return {}
diff --git a/neutron/extensions/subnetallocation.py b/neutron/extensions/subnetallocation.py
deleted file mode 100644 (file)
index 3a9712a..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api import extensions
-from neutron.common import constants
-
-
-class Subnetallocation(extensions.ExtensionDescriptor):
-    """Extension class supporting subnet allocation."""
-
-    @classmethod
-    def get_name(cls):
-        return "Subnet Allocation"
-
-    @classmethod
-    def get_alias(cls):
-        return constants.SUBNET_ALLOCATION_EXT_ALIAS
-
-    @classmethod
-    def get_description(cls):
-        return "Enables allocation of subnets from a subnet pool"
-
-    @classmethod
-    def get_updated(cls):
-        return "2015-03-30T10:00:00-00:00"
-
-    def get_required_extensions(self):
-        return ["router"]
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        return []
-
-    def get_extended_resources(self, version):
-        return {}
diff --git a/neutron/extensions/vlantransparent.py b/neutron/extensions/vlantransparent.py
deleted file mode 100644 (file)
index 57a3cbb..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright (c) 2015 Cisco Systems, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from neutron._i18n import _, _LI
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.common import exceptions as nexception
-
-LOG = logging.getLogger(__name__)
-
-
-class VlanTransparencyDriverError(nexception.NeutronException):
-    """Vlan Transparency not supported by all mechanism drivers."""
-    message = _("Backend does not support VLAN Transparency.")
-
-
-VLANTRANSPARENT = 'vlan_transparent'
-EXTENDED_ATTRIBUTES_2_0 = {
-    'networks': {
-        VLANTRANSPARENT: {'allow_post': True, 'allow_put': False,
-                          'convert_to': attributes.convert_to_boolean,
-                          'default': attributes.ATTR_NOT_SPECIFIED,
-                          'is_visible': True},
-    },
-}
-
-
-def disable_extension_by_config(aliases):
-    if not cfg.CONF.vlan_transparent:
-        if 'vlan-transparent' in aliases:
-            aliases.remove('vlan-transparent')
-        LOG.info(_LI('Disabled vlantransparent extension.'))
-
-
-def get_vlan_transparent(network):
-    return (network['vlan_transparent']
-            if ('vlan_transparent' in network and
-                attributes.is_attr_set(network['vlan_transparent']))
-            else False)
-
-
-class Vlantransparent(extensions.ExtensionDescriptor):
-    """Extension class supporting vlan transparent networks."""
-
-    @classmethod
-    def get_name(cls):
-        return "Vlantransparent"
-
-    @classmethod
-    def get_alias(cls):
-        return "vlan-transparent"
-
-    @classmethod
-    def get_description(cls):
-        return "Provides Vlan Transparent Networks"
-
-    @classmethod
-    def get_updated(cls):
-        return "2015-03-23T09:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/hacking/__init__.py b/neutron/hacking/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/hacking/checks.py b/neutron/hacking/checks.py
deleted file mode 100644 (file)
index 3441b68..0000000
+++ /dev/null
@@ -1,245 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-
-import pep8
-import six
-
-# Guidelines for writing new hacking checks
-#
-#  - Use only for Neutron specific tests. OpenStack general tests
-#    should be submitted to the common 'hacking' module.
-#  - Pick numbers in the range N3xx. Find the current test with
-#    the highest allocated number and then pick the next value.
-#  - Keep the test method code in the source file ordered based
-#    on the N3xx value.
-#  - List the new rule in the top level HACKING.rst file
-#  - Add test cases for each new rule to
-#    neutron/tests/unit/hacking/test_checks.py
-
-_all_log_levels = {
-    'reserved': '_',  # this should never be used with a log unless
-                      # it is a variable used for a log message and
-                      # a exception
-    'error': '_LE',
-    'info': '_LI',
-    'warn': '_LW',
-    'warning': '_LW',
-    'critical': '_LC',
-    'exception': '_LE',
-}
-_all_hints = set(_all_log_levels.values())
-mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
-
-
-def _regex_for_level(level, hint):
-    return r".*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % {
-        'level': level,
-        'wrong_hints': '|'.join(_all_hints - set([hint])),
-    }
-
-
-log_translation_hint = re.compile(
-    '|'.join('(?:%s)' % _regex_for_level(level, hint)
-             for level, hint in six.iteritems(_all_log_levels)))
-
-oslo_namespace_imports_dot = re.compile(r"import[\s]+oslo[.][^\s]+")
-oslo_namespace_imports_from_dot = re.compile(r"from[\s]+oslo[.]")
-oslo_namespace_imports_from_root = re.compile(r"from[\s]+oslo[\s]+import[\s]+")
-contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
-
-
-def validate_log_translations(logical_line, physical_line, filename):
-    # Translations are not required in the test directory
-    if "neutron/tests" in filename:
-        return
-    if pep8.noqa(physical_line):
-        return
-
-    msg = "N320: Log messages require translation hints!"
-    if log_translation_hint.match(logical_line):
-        yield (0, msg)
-
-
-def use_jsonutils(logical_line, filename):
-    msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s"
-
-    # Some files in the tree are not meant to be run from inside Neutron
-    # itself, so we should not complain about them not using jsonutils
-    json_check_skipped_patterns = [
-        "neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/"
-        "plugins/netwrap",
-    ]
-
-    for pattern in json_check_skipped_patterns:
-        if pattern in filename:
-            return
-
-    if "json." in logical_line:
-        json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
-        for f in json_funcs:
-            pos = logical_line.find('json.%s' % f)
-            if pos != -1:
-                yield (pos, msg % {'fun': f[:-1]})
-
-
-def no_translate_debug_logs(logical_line, filename):
-    """Check for 'LOG.debug(_(' and 'LOG.debug(_Lx('
-
-    As per our translation policy,
-    https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
-    we shouldn't translate debug level logs.
-
-    * This check assumes that 'LOG' is a logger.
-    N319
-    """
-    for hint in _all_hints:
-        if logical_line.startswith("LOG.debug(%s(" % hint):
-            yield(0, "N319 Don't translate debug level logs")
-
-
-def check_assert_called_once_with(logical_line, filename):
-    # Try to detect unintended calls of nonexistent mock methods like:
-    #    assert_called_once
-    #    assertCalledOnceWith
-    #    assert_has_called
-    if 'neutron/tests/' in filename:
-        if '.assert_called_once_with(' in logical_line:
-            return
-        uncased_line = logical_line.lower().replace('_', '')
-
-        if '.assertcalledonce' in uncased_line:
-            msg = ("N322: Possible use of no-op mock method. "
-                   "please use assert_called_once_with.")
-            yield (0, msg)
-
-        if '.asserthascalled' in uncased_line:
-            msg = ("N322: Possible use of no-op mock method. "
-                   "please use assert_has_calls.")
-            yield (0, msg)
-
-
-def check_oslo_namespace_imports(logical_line):
-    if re.match(oslo_namespace_imports_from_dot, logical_line):
-        msg = ("N323: '%s' must be used instead of '%s'.") % (
-               logical_line.replace('oslo.', 'oslo_'),
-               logical_line)
-        yield(0, msg)
-    elif re.match(oslo_namespace_imports_from_root, logical_line):
-        msg = ("N323: '%s' must be used instead of '%s'.") % (
-               logical_line.replace('from oslo import ', 'import oslo_'),
-               logical_line)
-        yield(0, msg)
-    elif re.match(oslo_namespace_imports_dot, logical_line):
-        msg = ("N323: '%s' must be used instead of '%s'.") % (
-               logical_line.replace('import', 'from').replace('.', ' import '),
-               logical_line)
-        yield(0, msg)
-
-
-def check_no_contextlib_nested(logical_line, filename):
-    msg = ("N324: contextlib.nested is deprecated. With Python 2.7 and later "
-           "the with-statement supports multiple nested objects. See https://"
-           "docs.python.org/2/library/contextlib.html#contextlib.nested for "
-           "more information.")
-
-    if contextlib_nested.match(logical_line):
-        yield(0, msg)
-
-
-def check_python3_xrange(logical_line):
-    if re.search(r"\bxrange\s*\(", logical_line):
-        yield(0, "N325: Do not use xrange. Use range, or six.moves.range for "
-                 "large loops.")
-
-
-def check_no_basestring(logical_line):
-    if re.search(r"\bbasestring\b", logical_line):
-        msg = ("N326: basestring is not Python3-compatible, use "
-               "six.string_types instead.")
-        yield(0, msg)
-
-
-def check_python3_no_iteritems(logical_line):
-    if re.search(r".*\.iteritems\(\)", logical_line):
-        msg = ("N327: Use six.iteritems() instead of dict.iteritems().")
-        yield(0, msg)
-
-
-def check_asserttrue(logical_line, filename):
-    if 'neutron/tests/' in filename:
-        if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?\)", logical_line):
-            msg = ("N328: Use assertTrue(observed) instead of "
-                   "assertEqual(True, observed)")
-            yield (0, msg)
-        if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?\)", logical_line):
-            msg = ("N328: Use assertTrue(observed) instead of "
-                   "assertEqual(True, observed)")
-            yield (0, msg)
-
-
-def no_mutable_default_args(logical_line):
-    msg = "N329: Method's default argument shouldn't be mutable!"
-    if mutable_default_args.match(logical_line):
-        yield (0, msg)
-
-
-def check_assertfalse(logical_line, filename):
-    if 'neutron/tests/' in filename:
-        if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?\)", logical_line):
-            msg = ("N328: Use assertFalse(observed) instead of "
-                   "assertEqual(False, observed)")
-            yield (0, msg)
-        if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?\)", logical_line):
-            msg = ("N328: Use assertFalse(observed) instead of "
-                   "assertEqual(False, observed)")
-            yield (0, msg)
-
-
-def check_assertempty(logical_line, filename):
-    if 'neutron/tests/' in filename:
-        msg = ("N330: Use assertEqual(*empty*, observed) instead of "
-               "assertEqual(observed, *empty*). *empty* contains "
-               "{}, [], (), set(), '', \"\"")
-        empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")"
-        reg = r"assertEqual\(([^,]*,\s*)+?%s\)\s*$" % empties
-        if re.search(reg, logical_line):
-            yield (0, msg)
-
-
-def check_assertisinstance(logical_line, filename):
-    if 'neutron/tests/' in filename:
-        if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)",
-                     logical_line):
-            msg = ("N331: Use assertIsInstance(observed, type) instead "
-                   "of assertTrue(isinstance(observed, type))")
-            yield (0, msg)
-
-
-def factory(register):
-    register(validate_log_translations)
-    register(use_jsonutils)
-    register(check_assert_called_once_with)
-    register(no_translate_debug_logs)
-    register(check_oslo_namespace_imports)
-    register(check_no_contextlib_nested)
-    register(check_python3_xrange)
-    register(check_no_basestring)
-    register(check_python3_no_iteritems)
-    register(check_asserttrue)
-    register(no_mutable_default_args)
-    register(check_assertfalse)
-    register(check_assertempty)
-    register(check_assertisinstance)
diff --git a/neutron/i18n.py b/neutron/i18n.py
deleted file mode 100644 (file)
index 7c93336..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# TODO(dougwig) - remove this file at the beginning of N.
-
-from debtcollector import moves
-
-import neutron._i18n
-
-message = "moved to neutron._i18n; please migrate to local oslo_i18n " \
-    "usage, as defined in the devref and at " \
-    "http://docs.openstack.org/developer/oslo.i18n/usage.html"
-
-_ = moves.moved_function(neutron._i18n._, '_', __name__, message=message)
-_LI = moves.moved_function(neutron._i18n._LI, '_LI', __name__, message=message)
-_LW = moves.moved_function(neutron._i18n._LW, '_LW', __name__, message=message)
-_LE = moves.moved_function(neutron._i18n._LE, '_LE', __name__, message=message)
-_LC = moves.moved_function(neutron._i18n._LC, '_LC', __name__, message=message)
diff --git a/neutron/ipam/__init__.py b/neutron/ipam/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/ipam/driver.py b/neutron/ipam/driver.py
deleted file mode 100644 (file)
index 3460517..0000000
+++ /dev/null
@@ -1,150 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-from oslo_config import cfg
-from oslo_log import log
-import six
-
-from neutron.ipam import requests as ipam_req
-from neutron import manager
-
-LOG = log.getLogger(__name__)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class Pool(object):
-    """Interface definition for an IPAM driver.
-
-    There should be an instance of the driver for every subnet pool.
-    """
-
-    def __init__(self, subnetpool, context):
-        """Initialize pool
-
-        :param subnetpool: SubnetPool of the address space to use.
-        :type subnetpool: dict
-        """
-        self._subnetpool = subnetpool
-        self._context = context
-
-    @classmethod
-    def get_instance(cls, subnet_pool, context):
-        """Returns an instance of the configured IPAM driver
-
-        :param subnet_pool: Subnet pool of the address space to use.
-        :type subnet_pool: dict
-        :returns: An instance of Driver for the given subnet pool
-        """
-        ipam_driver_name = cfg.CONF.ipam_driver
-        mgr = manager.NeutronManager
-        LOG.debug("Loading ipam driver: %s", ipam_driver_name)
-        driver_class = mgr.load_class_for_provider('neutron.ipam_drivers',
-                                                   ipam_driver_name)
-        return driver_class(subnet_pool, context)
-
-    @abc.abstractmethod
-    def allocate_subnet(self, request):
-        """Allocates a subnet based on the subnet request
-
-        :param request: Describes the allocation requested.
-        :type request: An instance of a sub-class of SubnetRequest
-        :returns: An instance of Subnet
-        :raises: RequestNotSupported, IPAMAlreadyAllocated
-        """
-
-    @abc.abstractmethod
-    def get_subnet(self, subnet_id):
-        """Gets the matching subnet if it has been allocated
-
-        :param subnet_id: the subnet identifier
-        :type subnet_id: str uuid
-        :returns: An instance of IPAM Subnet
-        :raises: IPAMAllocationNotFound
-        """
-
-    @abc.abstractmethod
-    def update_subnet(self, request):
-        """Updates an already allocated subnet
-
-        This is used to notify the external IPAM system of updates to a subnet.
-
-        :param request: Update the subnet to match this request
-        :type request: An instance of a sub-class of SpecificSubnetRequest
-        :returns: An instance of IPAM Subnet
-        :raises: RequestNotSupported, IPAMAllocationNotFound
-        """
-
-    @abc.abstractmethod
-    def remove_subnet(self, subnet_id):
-        """Removes an allocation
-
-        The initial reference implementation will probably do nothing.
-
-        :param subnet_id: the subnet identifier
-        :type subnet_id: str uuid
-        :raises: IPAMAllocationNotFound
-        """
-
-    def get_subnet_request_factory(self):
-        """Returns default SubnetRequestFactory
-
-        Can be overridden on driver level to return custom factory
-        """
-        return ipam_req.SubnetRequestFactory
-
-    def get_address_request_factory(self):
-        """Returns default AddressRequestFactory
-
-        Can be overridden on driver level to return custom factory
-        """
-        return ipam_req.AddressRequestFactory
-
-
-@six.add_metaclass(abc.ABCMeta)
-class Subnet(object):
-    """Interface definition for an IPAM subnet
-
-    A subnet would typically be associated with a network but may not be.  It
-    could represent a dynamically routed IP address space in which case the
-    normal network and broadcast addresses would be useable.  It should always
-    be a routable block of addresses and representable in CIDR notation.
-    """
-
-    @abc.abstractmethod
-    def allocate(self, address_request):
-        """Allocates an IP address based on the request passed in
-
-        :param address_request: Specifies what to allocate.
-        :type address_request: An instance of a subclass of AddressRequest
-        :returns: A netaddr.IPAddress
-        :raises: AddressNotAvailable, AddressOutsideAllocationPool,
-            AddressOutsideSubnet
-        """
-
-    @abc.abstractmethod
-    def deallocate(self, address):
-        """Returns a previously allocated address to the pool
-
-        :param address: The address to give back.
-        :type address: A netaddr.IPAddress or convertible to one.
-        :returns: None
-        :raises: IPAMAllocationNotFound
-        """
-
-    @abc.abstractmethod
-    def get_details(self):
-        """Returns the details of the subnet
-
-        :returns: An instance of SpecificSubnetRequest with the subnet detail.
-        """
diff --git a/neutron/ipam/drivers/__init__.py b/neutron/ipam/drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/ipam/drivers/neutrondb_ipam/__init__.py b/neutron/ipam/drivers/neutrondb_ipam/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/ipam/drivers/neutrondb_ipam/db_api.py b/neutron/ipam/drivers/neutrondb_ipam/db_api.py
deleted file mode 100644 (file)
index 768e6ee..0000000
+++ /dev/null
@@ -1,251 +0,0 @@
-# Copyright 2015 OpenStack LLC.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_db import exception as db_exc
-from oslo_utils import uuidutils
-from sqlalchemy.orm import exc as orm_exc
-
-from neutron.ipam.drivers.neutrondb_ipam import db_models
-from neutron.ipam import exceptions as ipam_exc
-
-# Database operations for Neutron's DB-backed IPAM driver
-
-
-class IpamSubnetManager(object):
-
-    @classmethod
-    def load_by_neutron_subnet_id(cls, session, neutron_subnet_id):
-        return session.query(db_models.IpamSubnet).filter_by(
-            neutron_subnet_id=neutron_subnet_id).first()
-
-    def __init__(self, ipam_subnet_id, neutron_subnet_id):
-        self._ipam_subnet_id = ipam_subnet_id
-        self._neutron_subnet_id = neutron_subnet_id
-
-    @property
-    def neutron_id(self):
-        return self._neutron_subnet_id
-
-    def create(self, session):
-        """Create database models for an IPAM subnet.
-
-        This method creates a subnet resource for the IPAM driver and
-        associates it with its neutron identifier, if specified.
-
-        :param session: database sesssion.
-        :returns: the idenfier of created IPAM subnet
-        """
-        if not self._ipam_subnet_id:
-            self._ipam_subnet_id = uuidutils.generate_uuid()
-        ipam_subnet = db_models.IpamSubnet(
-            id=self._ipam_subnet_id,
-            neutron_subnet_id=self._neutron_subnet_id)
-        session.add(ipam_subnet)
-        return self._ipam_subnet_id
-
-    @classmethod
-    def delete(cls, session, neutron_subnet_id):
-        """Delete IPAM subnet.
-
-        IPAM subnet no longer has foreign key to neutron subnet,
-        so need to perform delete manually
-
-        :param session: database sesssion
-        :param neutron_subnet_id: neutron subnet id associated with ipam subnet
-        """
-        return session.query(db_models.IpamSubnet).filter_by(
-            neutron_subnet_id=neutron_subnet_id).delete()
-
-    def create_pool(self, session, pool_start, pool_end):
-        """Create an allocation pool and availability ranges for the subnet.
-
-        This method does not perform any validation on parameters; it simply
-        persist data on the database.
-
-        :param pool_start: string expressing the start of the pool
-        :param pool_end: string expressing the end of the pool
-        :return: the newly created pool object.
-        """
-        ip_pool = db_models.IpamAllocationPool(
-            ipam_subnet_id=self._ipam_subnet_id,
-            first_ip=pool_start,
-            last_ip=pool_end)
-        session.add(ip_pool)
-        ip_range = db_models.IpamAvailabilityRange(
-            allocation_pool=ip_pool,
-            first_ip=pool_start,
-            last_ip=pool_end)
-        session.add(ip_range)
-        return ip_pool
-
-    def delete_allocation_pools(self, session):
-        """Remove all allocation pools for the current subnet.
-
-        :param session: database session
-        """
-        session.query(db_models.IpamAllocationPool).filter_by(
-            ipam_subnet_id=self._ipam_subnet_id).delete()
-
-    def list_pools(self, session):
-        """Return pools for the current subnet."""
-        return session.query(
-            db_models.IpamAllocationPool).filter_by(
-            ipam_subnet_id=self._ipam_subnet_id)
-
-    def _range_query(self, session):
-        return session.query(
-            db_models.IpamAvailabilityRange).join(
-            db_models.IpamAllocationPool).filter_by(
-            ipam_subnet_id=self._ipam_subnet_id)
-
-    def get_first_range(self, session):
-        """Return the first availability range for the subnet
-
-        :param session: database session
-        :return: first available range as instance of
-            neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange
-        """
-        return self._range_query(session).first()
-
-    def list_ranges_by_subnet_id(self, session):
-        """Return availability ranges for a given ipam subnet
-
-        :param session: database session
-        :return: list of availability ranges as instances of
-            neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange
-        """
-        return self._range_query(session)
-
-    def list_ranges_by_allocation_pool(self, session, allocation_pool_id):
-        """Return availability ranges for a given pool.
-
-        :param session: database session
-        :param allocation_pool_id: allocation pool identifier
-        :return: list of availability ranges as instances of
-            neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange
-        """
-        return session.query(
-            db_models.IpamAvailabilityRange).join(
-            db_models.IpamAllocationPool).filter_by(
-            id=allocation_pool_id)
-
-    def update_range(self, session, db_range, first_ip=None, last_ip=None):
-        """Updates db_range to have new first_ip and last_ip.
-
-        :param session: database session
-        :param db_range: IpamAvailabilityRange db object
-        :param first_ip: first ip address in range
-        :param last_ip: last ip address in range
-        :return: count of updated rows
-        """
-        opts = {}
-        if first_ip:
-            opts['first_ip'] = str(first_ip)
-        if last_ip:
-            opts['last_ip'] = str(last_ip)
-        if not opts:
-            raise ipam_exc.IpamAvailabilityRangeNoChanges()
-        try:
-            return session.query(
-                db_models.IpamAvailabilityRange).filter_by(
-                allocation_pool_id=db_range.allocation_pool_id).filter_by(
-                first_ip=db_range.first_ip).filter_by(
-                last_ip=db_range.last_ip).update(opts)
-        except orm_exc.ObjectDeletedError:
-            raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed)
-
-    def delete_range(self, session, db_range):
-        """Return count of deleted ranges
-
-        :param session: database session
-        :param db_range: IpamAvailabilityRange db object
-        """
-        try:
-            return session.query(
-                db_models.IpamAvailabilityRange).filter_by(
-                allocation_pool_id=db_range.allocation_pool_id).filter_by(
-                first_ip=db_range.first_ip).filter_by(
-                last_ip=db_range.last_ip).delete()
-        except orm_exc.ObjectDeletedError:
-            raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed)
-
-    def create_range(self, session, allocation_pool_id,
-                     range_start, range_end):
-        """Create an availability range for a given pool.
-
-        This method does not perform any validation on parameters; it simply
-        persist data on the database.
-
-        :param session: database session
-        :param allocation_pool_id: allocation pool identifier
-        :param range_start: first ip address in the range
-        :param range_end: last ip address in the range
-        :return: the newly created availability range as an instance of
-            neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange
-        """
-        new_ip_range = db_models.IpamAvailabilityRange(
-            allocation_pool_id=allocation_pool_id,
-            first_ip=range_start,
-            last_ip=range_end)
-        session.add(new_ip_range)
-        return new_ip_range
-
-    def check_unique_allocation(self, session, ip_address):
-        """Validate that the IP address on the subnet is not in use."""
-        iprequest = session.query(db_models.IpamAllocation).filter_by(
-            ipam_subnet_id=self._ipam_subnet_id, status='ALLOCATED',
-            ip_address=ip_address).first()
-        if iprequest:
-            return False
-        return True
-
-    def list_allocations(self, session, status='ALLOCATED'):
-        """Return current allocations for the subnet.
-
-        :param session: database session
-        :param status: IP allocation status
-        :returns: a list of IP allocation as instance of
-            neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAllocation
-        """
-        return session.query(
-            db_models.IpamAllocation).filter_by(
-            ipam_subnet_id=self._ipam_subnet_id,
-            status=status)
-
-    def create_allocation(self, session, ip_address,
-                          status='ALLOCATED'):
-        """Create an IP allocation entry.
-
-        :param session: database session
-        :param ip_address: the IP address to allocate
-        :param status: IP allocation status
-        """
-        ip_request = db_models.IpamAllocation(
-            ip_address=ip_address,
-            status=status,
-            ipam_subnet_id=self._ipam_subnet_id)
-        session.add(ip_request)
-
-    def delete_allocation(self, session, ip_address):
-        """Remove an IP allocation for this subnet.
-
-        :param session: database session
-        :param ip_address: IP address for which the allocation entry should
-            be removed.
-        """
-        return session.query(db_models.IpamAllocation).filter_by(
-            ip_address=ip_address,
-            ipam_subnet_id=self._ipam_subnet_id).delete(
-                synchronize_session=False)
diff --git a/neutron/ipam/drivers/neutrondb_ipam/db_models.py b/neutron/ipam/drivers/neutrondb_ipam/db_models.py
deleted file mode 100644 (file)
index 27fdfdd..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright 2015 OpenStack LLC.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-import sqlalchemy as sa
-from sqlalchemy import orm as sa_orm
-
-from neutron.db import model_base
-
-# Database models used by the neutron DB IPAM driver
-
-
-# NOTE(salv-orlando): This is meant to replace the class
-# neutron.db.models_v2.IPAvailabilityRange.
-class IpamAvailabilityRange(model_base.BASEV2):
-    """Internal representation of available IPs for Neutron subnets.
-
-    Allocation - first entry from the range will be allocated.
-    If the first entry is equal to the last entry then this row
-    will be deleted.
-    Recycling ips involves reading the IPAllocationPool and IPAllocation tables
-    and inserting ranges representing available ips.  This happens after the
-    final allocation is pulled from this table and a new ip allocation is
-    requested.  Any contiguous ranges of available ips will be inserted as a
-    single range.
-    """
-
-    allocation_pool_id = sa.Column(sa.String(36),
-                                   sa.ForeignKey('ipamallocationpools.id',
-                                                 ondelete="CASCADE"),
-                                   nullable=False,
-                                   primary_key=True)
-    first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True)
-    last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True)
-    __table_args__ = (
-        sa.Index('ix_ipamavailabilityranges_first_ip_allocation_pool_id',
-                 'first_ip', 'allocation_pool_id'),
-        sa.Index('ix_ipamavailabilityranges_last_ip_allocation_pool_id',
-                 'last_ip', 'allocation_pool_id'),
-        model_base.BASEV2.__table_args__
-    )
-
-    def __repr__(self):
-        return "%s - %s" % (self.first_ip, self.last_ip)
-
-
-# NOTE(salv-orlando): The following data model creates redundancy with
-# models_v2.IPAllocationPool. This level of data redundancy could be tolerated
-# considering that the following model is specific to the IPAM driver logic.
-# It therefore represents an internal representation of a subnet allocation
-# pool and can therefore change in the future, where as
-# models_v2.IPAllocationPool is the representation of IP allocation pools in
-# the management layer and therefore its evolution is subject to APIs backward
-# compatibility policies
-class IpamAllocationPool(model_base.BASEV2, model_base.HasId):
-    """Representation of an allocation pool in a Neutron subnet."""
-
-    ipam_subnet_id = sa.Column(sa.String(36),
-                               sa.ForeignKey('ipamsubnets.id',
-                                             ondelete="CASCADE"),
-                               nullable=False)
-    first_ip = sa.Column(sa.String(64), nullable=False)
-    last_ip = sa.Column(sa.String(64), nullable=False)
-    available_ranges = sa_orm.relationship(IpamAvailabilityRange,
-                                           backref='allocation_pool',
-                                           lazy="joined",
-                                           cascade='all, delete-orphan')
-
-    def __repr__(self):
-        return "%s - %s" % (self.first_ip, self.last_ip)
-
-
-class IpamSubnet(model_base.BASEV2, model_base.HasId):
-    """Association between IPAM entities and neutron subnets.
-
-    For subnet data persistency - such as cidr and gateway IP, the IPAM
-    driver relies on Neutron's subnet model as source of truth to limit
-    data redundancy.
-    """
-    neutron_subnet_id = sa.Column(sa.String(36),
-                                  nullable=True)
-    allocation_pools = sa_orm.relationship(IpamAllocationPool,
-                                           backref='subnet',
-                                           lazy="joined",
-                                           cascade='delete')
-
-
-class IpamAllocation(model_base.BASEV2):
-    """Model class for IP Allocation requests. """
-    ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True)
-    status = sa.Column(sa.String(36))
-    # The subnet identifier is redundant but come handy for looking up
-    # IP addresses to remove.
-    ipam_subnet_id = sa.Column(sa.String(36),
-                               sa.ForeignKey('ipamsubnets.id',
-                                             ondelete="CASCADE"),
-                               primary_key=True,
-                               nullable=False)
diff --git a/neutron/ipam/drivers/neutrondb_ipam/driver.py b/neutron/ipam/drivers/neutrondb_ipam/driver.py
deleted file mode 100644 (file)
index d820ff1..0000000
+++ /dev/null
@@ -1,455 +0,0 @@
-# Copyright 2015 OpenStack LLC.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-from oslo_db import exception as db_exc
-from oslo_log import log
-from oslo_utils import uuidutils
-
-from neutron._i18n import _LE
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.db import api as db_api
-from neutron.ipam import driver as ipam_base
-from neutron.ipam.drivers.neutrondb_ipam import db_api as ipam_db_api
-from neutron.ipam import exceptions as ipam_exc
-from neutron.ipam import requests as ipam_req
-from neutron.ipam import subnet_alloc
-from neutron.ipam import utils as ipam_utils
-from neutron import manager
-
-
-LOG = log.getLogger(__name__)
-
-
-class NeutronDbSubnet(ipam_base.Subnet):
-    """Manage IP addresses for Neutron DB IPAM driver.
-
-    This class implements the strategy for IP address allocation and
-    deallocation for the Neutron DB IPAM driver.
-    Allocation for IP addresses is based on the concept of availability
-    ranges, which were already used in Neutron's DB base class for handling
-    IPAM operations.
-    """
-
-    @classmethod
-    def create_allocation_pools(cls, subnet_manager, session, pools, cidr):
-        for pool in pools:
-            # IPv6 addresses that start '::1', '::2', etc cause IP version
-            # ambiguity when converted to integers by pool.first and pool.last.
-            # Infer the IP version from the subnet cidr.
-            ip_version = cidr.version
-            subnet_manager.create_pool(
-                session,
-                netaddr.IPAddress(pool.first, ip_version).format(),
-                netaddr.IPAddress(pool.last, ip_version).format())
-
-    @classmethod
-    def create_from_subnet_request(cls, subnet_request, ctx):
-        ipam_subnet_id = uuidutils.generate_uuid()
-        subnet_manager = ipam_db_api.IpamSubnetManager(
-            ipam_subnet_id,
-            subnet_request.subnet_id)
-        # Create subnet resource
-        session = ctx.session
-        subnet_manager.create(session)
-        # If allocation pools are not specified, define them around
-        # the subnet's gateway IP
-        if not subnet_request.allocation_pools:
-            pools = ipam_utils.generate_pools(subnet_request.subnet_cidr,
-                                              subnet_request.gateway_ip)
-        else:
-            pools = subnet_request.allocation_pools
-        # Create IPAM allocation pools and availability ranges
-        cls.create_allocation_pools(subnet_manager, session, pools,
-                                    subnet_request.subnet_cidr)
-
-        return cls(ipam_subnet_id,
-                   ctx,
-                   cidr=subnet_request.subnet_cidr,
-                   allocation_pools=pools,
-                   gateway_ip=subnet_request.gateway_ip,
-                   tenant_id=subnet_request.tenant_id,
-                   subnet_id=subnet_request.subnet_id)
-
-    @classmethod
-    def load(cls, neutron_subnet_id, ctx):
-        """Load an IPAM subnet from the database given its neutron ID.
-
-        :param neutron_subnet_id: neutron subnet identifier.
-        """
-        ipam_subnet = ipam_db_api.IpamSubnetManager.load_by_neutron_subnet_id(
-            ctx.session, neutron_subnet_id)
-        if not ipam_subnet:
-            LOG.error(_LE("IPAM subnet referenced to "
-                          "Neutron subnet %s does not exist"),
-                      neutron_subnet_id)
-            raise n_exc.SubnetNotFound(subnet_id=neutron_subnet_id)
-        pools = []
-        for pool in ipam_subnet.allocation_pools:
-            pools.append(netaddr.IPRange(pool['first_ip'], pool['last_ip']))
-
-        neutron_subnet = cls._fetch_subnet(ctx, neutron_subnet_id)
-
-        return cls(ipam_subnet['id'],
-                   ctx,
-                   cidr=neutron_subnet['cidr'],
-                   allocation_pools=pools,
-                   gateway_ip=neutron_subnet['gateway_ip'],
-                   tenant_id=neutron_subnet['tenant_id'],
-                   subnet_id=neutron_subnet_id)
-
-    @classmethod
-    def _fetch_subnet(cls, context, id):
-        plugin = manager.NeutronManager.get_plugin()
-        return plugin._get_subnet(context, id)
-
-    def __init__(self, internal_id, ctx, cidr=None,
-                 allocation_pools=None, gateway_ip=None, tenant_id=None,
-                 subnet_id=None):
-        # NOTE: In theory it could have been possible to grant the IPAM
-        # driver direct access to the database. While this is possible,
-        # it would have led to duplicate code and/or non-trivial
-        # refactorings in neutron.db.db_base_plugin_v2.
-        # This is because in the Neutron V2 plugin logic DB management is
-        # encapsulated within the plugin.
-        self._cidr = cidr
-        self._pools = allocation_pools
-        self._gateway_ip = gateway_ip
-        self._tenant_id = tenant_id
-        self._subnet_id = subnet_id
-        self.subnet_manager = ipam_db_api.IpamSubnetManager(internal_id,
-                                                            self._subnet_id)
-        self._context = ctx
-
-    def _verify_ip(self, session, ip_address):
-        """Verify whether IP address can be allocated on subnet.
-
-        :param session: database session
-        :param ip_address: String representing the IP address to verify
-        :raises: InvalidInput, IpAddressAlreadyAllocated
-        """
-        # Ensure that the IP's are unique
-        if not self.subnet_manager.check_unique_allocation(session,
-                                                           ip_address):
-            raise ipam_exc.IpAddressAlreadyAllocated(
-                subnet_id=self.subnet_manager.neutron_id,
-                ip=ip_address)
-
-        # Ensure that the IP is valid on the subnet
-        if not ipam_utils.check_subnet_ip(self._cidr, ip_address):
-            raise ipam_exc.InvalidIpForSubnet(
-                subnet_id=self.subnet_manager.neutron_id,
-                ip=ip_address)
-
-    def _allocate_specific_ip(self, session, ip_address,
-                              allocation_pool_id=None,
-                              auto_generated=False):
-        """Remove an IP address from subnet's availability ranges.
-
-        This method is supposed to be called from within a database
-        transaction, otherwise atomicity and integrity might not be
-        enforced and the operation might result in incosistent availability
-        ranges for the subnet.
-
-        :param session: database session
-        :param ip_address: ip address to mark as allocated
-        :param allocation_pool_id: identifier of the allocation pool from
-             which the ip address has been extracted. If not specified this
-             routine will scan all allocation pools.
-        :param auto_generated: indicates whether ip was auto generated
-        :returns: list of IP ranges as instances of IPAvailabilityRange
-        """
-        # Return immediately for EUI-64 addresses. For this
-        # class of subnets availability ranges do not apply
-        if ipv6_utils.is_eui64_address(ip_address):
-            return
-
-        LOG.debug("Removing %(ip_address)s from availability ranges for "
-                  "subnet id:%(subnet_id)s",
-                  {'ip_address': ip_address,
-                   'subnet_id': self.subnet_manager.neutron_id})
-        # Netaddr's IPRange and IPSet objects work very well even with very
-        # large subnets, including IPv6 ones.
-        final_ranges = []
-        ip_in_pools = False
-        if allocation_pool_id:
-            av_ranges = self.subnet_manager.list_ranges_by_allocation_pool(
-                session, allocation_pool_id)
-        else:
-            av_ranges = self.subnet_manager.list_ranges_by_subnet_id(session)
-        for db_range in av_ranges:
-            initial_ip_set = netaddr.IPSet(netaddr.IPRange(
-                db_range['first_ip'], db_range['last_ip']))
-            final_ip_set = initial_ip_set - netaddr.IPSet([ip_address])
-            if not final_ip_set:
-                ip_in_pools = True
-                # Range exhausted - bye bye
-                if not self.subnet_manager.delete_range(session, db_range):
-                    raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed)
-                continue
-            if initial_ip_set == final_ip_set:
-                # IP address does not fall within the current range, move
-                # to the next one
-                final_ranges.append(db_range)
-                continue
-            ip_in_pools = True
-            for new_range in final_ip_set.iter_ipranges():
-                # store new range in database
-                # use netaddr.IPAddress format() method which is equivalent
-                # to str(...) but also enables us to use different
-                # representation formats (if needed) for IPv6.
-                first_ip = netaddr.IPAddress(new_range.first)
-                last_ip = netaddr.IPAddress(new_range.last)
-                if (db_range['first_ip'] == first_ip.format() or
-                        db_range['last_ip'] == last_ip.format()):
-                    rows = self.subnet_manager.update_range(
-                        session, db_range, first_ip=first_ip, last_ip=last_ip)
-                    if not rows:
-                        raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed)
-                    LOG.debug("Adjusted availability range for pool %s",
-                              db_range['allocation_pool_id'])
-                    final_ranges.append(db_range)
-                else:
-                    new_ip_range = self.subnet_manager.create_range(
-                        session,
-                        db_range['allocation_pool_id'],
-                        first_ip.format(),
-                        last_ip.format())
-                    LOG.debug("Created availability range for pool %s",
-                              new_ip_range['allocation_pool_id'])
-                    final_ranges.append(new_ip_range)
-
-        # If ip is autogenerated it should be present in allocation pools,
-        # so retry if it is not there
-        if auto_generated and not ip_in_pools:
-            raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed)
-        # Most callers might ignore this return value, which is however
-        # useful for testing purposes
-        LOG.debug("Availability ranges for subnet id %(subnet_id)s "
-                  "modified: %(new_ranges)s",
-                  {'subnet_id': self.subnet_manager.neutron_id,
-                   'new_ranges': ", ".join(["[%s; %s]" %
-                                            (r['first_ip'], r['last_ip']) for
-                                            r in final_ranges])})
-        return final_ranges
-
-    def _rebuild_availability_ranges(self, session):
-        """Rebuild availability ranges.
-
-        This method should be called only when the availability ranges are
-        exhausted or when the subnet's allocation pools are updated,
-        which may trigger a deletion of the availability ranges.
-
-        For this operation to complete successfully, this method uses a
-        locking query to ensure that no IP is allocated while the regeneration
-        of availability ranges is in progress.
-
-        :param session: database session
-        """
-        # List all currently allocated addresses, and prevent further
-        # allocations with a write-intent lock.
-        # NOTE: because of this driver's logic the write intent lock is
-        # probably unnecessary as this routine is called when the availability
-        # ranges for a subnet are exhausted and no further address can be
-        # allocated.
-        # TODO(salv-orlando): devise, if possible, a more efficient solution
-        # for building the IPSet to ensure decent performances even with very
-        # large subnets.
-        allocations = netaddr.IPSet(
-            [netaddr.IPAddress(allocation['ip_address']) for
-             allocation in self.subnet_manager.list_allocations(
-                 session)])
-
-        # MEH MEH
-        # There should be no need to set a write intent lock on the allocation
-        # pool table. Indeed it is not important for the correctness of this
-        # operation if the allocation pools are updated by another operation,
-        # which will result in the generation of new availability ranges.
-        # NOTE: it might be argued that an allocation pool update should in
-        # theory preempt rebuilding the availability range. This is an option
-        # to consider for future developments.
-        LOG.debug("Rebuilding availability ranges for subnet %s",
-                  self.subnet_manager.neutron_id)
-
-        for pool in self.subnet_manager.list_pools(session):
-            # Create a set of all addresses in the pool
-            poolset = netaddr.IPSet(netaddr.IPRange(pool['first_ip'],
-                                                    pool['last_ip']))
-            # Use set difference to find free addresses in the pool
-            available = poolset - allocations
-            # Write the ranges to the db
-            for ip_range in available.iter_ipranges():
-                av_range = self.subnet_manager.create_range(
-                    session,
-                    pool['id'],
-                    netaddr.IPAddress(ip_range.first).format(),
-                    netaddr.IPAddress(ip_range.last).format())
-                session.add(av_range)
-
-    def _generate_ip(self, session):
-        try:
-            return self._try_generate_ip(session)
-        except ipam_exc.IpAddressGenerationFailure:
-            self._rebuild_availability_ranges(session)
-
-        return self._try_generate_ip(session)
-
-    def _try_generate_ip(self, session):
-        """Generate an IP address from availability ranges."""
-        ip_range = self.subnet_manager.get_first_range(session)
-        if not ip_range:
-            LOG.debug("All IPs from subnet %(subnet_id)s allocated",
-                      {'subnet_id': self.subnet_manager.neutron_id})
-            raise ipam_exc.IpAddressGenerationFailure(
-                subnet_id=self.subnet_manager.neutron_id)
-        # A suitable range was found. Return IP address.
-        ip_address = ip_range['first_ip']
-        LOG.debug("Allocated IP - %(ip_address)s from range "
-                  "[%(first_ip)s; %(last_ip)s]",
-                  {'ip_address': ip_address,
-                   'first_ip': ip_address,
-                   'last_ip': ip_range['last_ip']})
-        return ip_address, ip_range['allocation_pool_id']
-
-    def allocate(self, address_request):
-        # NOTE(salv-orlando): Creating a new db session might be a rather
-        # dangerous thing to do, if executed from within another database
-        # transaction. Therefore  the IPAM driver should never be
-        # called from within a database transaction, which is also good
-        # practice since in the general case these drivers may interact
-        # with remote backends
-        session = self._context.session
-        all_pool_id = None
-        auto_generated = False
-        with db_api.autonested_transaction(session):
-            # NOTE(salv-orlando): It would probably better to have a simpler
-            # model for address requests and just check whether there is a
-            # specific IP address specified in address_request
-            if isinstance(address_request, ipam_req.SpecificAddressRequest):
-                # This handles both specific and automatic address requests
-                # Check availability of requested IP
-                ip_address = str(address_request.address)
-                self._verify_ip(session, ip_address)
-            else:
-                ip_address, all_pool_id = self._generate_ip(session)
-                auto_generated = True
-            self._allocate_specific_ip(session, ip_address, all_pool_id,
-                                       auto_generated)
-            # Create IP allocation request object
-            # The only defined status at this stage is 'ALLOCATED'.
-            # More states will be available in the future - e.g.: RECYCLABLE
-            self.subnet_manager.create_allocation(session, ip_address)
-            return ip_address
-
-    def deallocate(self, address):
-        # This is almost a no-op because the Neutron DB IPAM driver does not
-        # delete IPAllocation objects, neither rebuilds availability ranges
-        # at every deallocation. The only operation it performs is to delete
-        # an IPRequest entry.
-        session = self._context.session
-
-        count = self.subnet_manager.delete_allocation(
-            session, address)
-        # count can hardly be greater than 1, but it can be 0...
-        if not count:
-            raise ipam_exc.IpAddressAllocationNotFound(
-                subnet_id=self.subnet_manager.neutron_id,
-                ip_address=address)
-
-    def update_allocation_pools(self, pools, cidr):
-        # Pools have already been validated in the subnet request object which
-        # was sent to the subnet pool driver. Further validation should not be
-        # required.
-        session = db_api.get_session()
-        self.subnet_manager.delete_allocation_pools(session)
-        self.create_allocation_pools(self.subnet_manager, session, pools, cidr)
-        self._pools = pools
-
-    def get_details(self):
-        """Return subnet data as a SpecificSubnetRequest"""
-        return ipam_req.SpecificSubnetRequest(
-            self._tenant_id, self.subnet_manager.neutron_id,
-            self._cidr, self._gateway_ip, self._pools)
-
-
-class NeutronDbPool(subnet_alloc.SubnetAllocator):
-    """Subnet pools backed by Neutron Database.
-
-    As this driver does not implement yet the subnet pool concept, most
-    operations are either trivial or no-ops.
-    """
-
-    def get_subnet(self, subnet_id):
-        """Retrieve an IPAM subnet.
-
-        :param subnet_id: Neutron subnet identifier
-        :returns: a NeutronDbSubnet instance
-        """
-        return NeutronDbSubnet.load(subnet_id, self._context)
-
-    def allocate_subnet(self, subnet_request):
-        """Create an IPAMSubnet object for the provided cidr.
-
-        This method does not actually do any operation in the driver, given
-        its simplified nature.
-
-        :param cidr: subnet's CIDR
-        :returns: a NeutronDbSubnet instance
-        """
-        if self._subnetpool:
-            subnet = super(NeutronDbPool, self).allocate_subnet(subnet_request)
-            subnet_request = subnet.get_details()
-
-        # SubnetRequest must be an instance of SpecificSubnet
-        if not isinstance(subnet_request, ipam_req.SpecificSubnetRequest):
-            raise ipam_exc.InvalidSubnetRequestType(
-                subnet_type=type(subnet_request))
-        return NeutronDbSubnet.create_from_subnet_request(subnet_request,
-                                                          self._context)
-
-    def update_subnet(self, subnet_request):
-        """Update subnet info the in the IPAM driver.
-
-        The only update subnet information the driver needs to be aware of
-        are allocation pools.
-        """
-        if not subnet_request.subnet_id:
-            raise ipam_exc.InvalidSubnetRequest(
-                reason=("An identifier must be specified when updating "
-                        "a subnet"))
-        if not subnet_request.allocation_pools:
-            LOG.debug("Update subnet request for subnet %s did not specify "
-                      "new allocation pools, there is nothing to do",
-                      subnet_request.subnet_id)
-            return
-        subnet = NeutronDbSubnet.load(subnet_request.subnet_id, self._context)
-        cidr = netaddr.IPNetwork(subnet._cidr)
-        subnet.update_allocation_pools(subnet_request.allocation_pools, cidr)
-        return subnet
-
-    def remove_subnet(self, subnet_id):
-        """Remove data structures for a given subnet.
-
-        IPAM-related data has no foreign key relationships to neutron subnet,
-        so removing ipam subnet manually
-        """
-        count = ipam_db_api.IpamSubnetManager.delete(self._context.session,
-                                                     subnet_id)
-        if count < 1:
-            LOG.error(_LE("IPAM subnet referenced to "
-                          "Neutron subnet %s does not exist"),
-                      subnet_id)
-            raise n_exc.SubnetNotFound(subnet_id=subnet_id)
diff --git a/neutron/ipam/exceptions.py b/neutron/ipam/exceptions.py
deleted file mode 100644 (file)
index 53d16a2..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2015 OpenStack LLC.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron._i18n import _
-from neutron.common import exceptions
-
-
-class InvalidSubnetRequestType(exceptions.BadRequest):
-    message = _("Cannot handle subnet of type %(subnet_type)s")
-
-
-class AddressCalculationFailure(exceptions.NeutronException):
-    message = _("Unable to calculate %(address_type)s address because of:"
-                "%(reason)s")
-
-
-class InvalidAddressType(exceptions.NeutronException):
-    message = _("Unknown address type %(address_type)s")
-
-
-class IpAddressAllocationNotFound(exceptions.NeutronException):
-    message = _("Unable to find IP address %(ip_address)s on subnet "
-                "%(subnet_id)s")
-
-
-class IpAddressAlreadyAllocated(exceptions.Conflict):
-    message = _("IP address %(ip)s already allocated in subnet %(subnet_id)s")
-
-
-class InvalidIpForSubnet(exceptions.BadRequest):
-    message = _("IP address %(ip)s does not belong to subnet %(subnet_id)s")
-
-
-class InvalidAddressRequest(exceptions.BadRequest):
-    message = _("The address allocation request could not be satisfied "
-                "because: %(reason)s")
-
-
-class InvalidSubnetRequest(exceptions.BadRequest):
-    message = _("The subnet request could not be satisfied because: "
-                "%(reason)s")
-
-
-class AllocationOnAutoAddressSubnet(exceptions.NeutronException):
-    message = _("IPv6 address %(ip)s cannot be directly "
-                "assigned to a port on subnet %(subnet_id)s as the "
-                "subnet is configured for automatic addresses")
-
-
-class IpAddressGenerationFailure(exceptions.Conflict):
-    message = _("No more IP addresses available for subnet %(subnet_id)s.")
-
-
-class IPAllocationFailed(exceptions.NeutronException):
-    message = _("IP allocation failed. Try again later.")
-
-
-class IpamAvailabilityRangeNoChanges(exceptions.NeutronException):
-    message = _("New value for first_ip or last_ip has to be specified.")
diff --git a/neutron/ipam/requests.py b/neutron/ipam/requests.py
deleted file mode 100644 (file)
index e636758..0000000
+++ /dev/null
@@ -1,304 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import netaddr
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-import six
-
-from neutron._i18n import _
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import ipv6_utils
-from neutron.common import utils as common_utils
-from neutron.ipam import exceptions as ipam_exc
-
-
-@six.add_metaclass(abc.ABCMeta)
-class SubnetPool(object):
-    """Represents a pool of IPs available inside an address scope."""
-
-
-@six.add_metaclass(abc.ABCMeta)
-class SubnetRequest(object):
-    """Carries the data needed to make a subnet request
-
-    The data validated and carried by an instance of this class is the data
-    that is common to any type of request.  This class shouldn't be
-    instantiated on its own.  Rather, a subclass of this class should be used.
-    """
-    def __init__(self, tenant_id, subnet_id,
-                 gateway_ip=None, allocation_pools=None):
-        """Initialize and validate
-
-        :param tenant_id: The tenant id who will own the subnet
-        :type tenant_id: str uuid
-        :param subnet_id: Neutron's subnet ID
-        :type subnet_id: str uuid
-        :param gateway_ip: An IP to reserve for the subnet gateway.
-        :type gateway_ip: None or convertible to netaddr.IPAddress
-        :param allocation_pools: The pool from which IPAM should allocate
-            addresses.   The allocator *may* allow allocating addresses outside
-            of this range if specifically requested.
-        :type allocation_pools: A list of netaddr.IPRange.  None if not
-            specified.
-        """
-        self._tenant_id = tenant_id
-        self._subnet_id = subnet_id
-        self._gateway_ip = None
-        self._allocation_pools = None
-
-        if gateway_ip is not None:
-            self._gateway_ip = netaddr.IPAddress(gateway_ip)
-
-        if allocation_pools is not None:
-            allocation_pools = sorted(allocation_pools)
-            previous = None
-            for pool in allocation_pools:
-                if not isinstance(pool, netaddr.ip.IPRange):
-                    raise TypeError(_("Ranges must be netaddr.IPRange"))
-                if previous and pool.first <= previous.last:
-                    raise ValueError(_("Ranges must not overlap"))
-                previous = pool
-            if 1 < len(allocation_pools):
-                # Checks that all the ranges are in the same IP version.
-                # IPRange sorts first by ip version so we can get by with just
-                # checking the first and the last range having sorted them
-                # above.
-                first_version = allocation_pools[0].version
-                last_version = allocation_pools[-1].version
-                if first_version != last_version:
-                    raise ValueError(_("Ranges must be in the same IP "
-                                       "version"))
-            self._allocation_pools = allocation_pools
-
-        if self.gateway_ip and self.allocation_pools:
-            if self.gateway_ip.version != self.allocation_pools[0].version:
-                raise ValueError(_("Gateway IP version inconsistent with "
-                                   "allocation pool version"))
-
-    @property
-    def tenant_id(self):
-        return self._tenant_id
-
-    @property
-    def subnet_id(self):
-        return self._subnet_id
-
-    @property
-    def gateway_ip(self):
-        return self._gateway_ip
-
-    @property
-    def allocation_pools(self):
-        return self._allocation_pools
-
-    def _validate_with_subnet(self, subnet_cidr):
-        if self.gateway_ip and cfg.CONF.force_gateway_on_subnet:
-            gw_ip = netaddr.IPAddress(self.gateway_ip)
-            if (gw_ip.version == 4 or (gw_ip.version == 6
-                                       and not gw_ip.is_link_local())):
-                if self.gateway_ip not in subnet_cidr:
-                    raise ValueError(_("gateway_ip is not in the subnet"))
-
-        if self.allocation_pools:
-            if subnet_cidr.version != self.allocation_pools[0].version:
-                raise ValueError(_("allocation_pools use the wrong ip "
-                                   "version"))
-            for pool in self.allocation_pools:
-                if pool not in subnet_cidr:
-                    raise ValueError(_("allocation_pools are not in the "
-                                       "subnet"))
-
-
-class AnySubnetRequest(SubnetRequest):
-    """A template for allocating an unspecified subnet from IPAM
-
-    Support for this type of request in a driver is optional. For example, the
-    initial reference implementation will not support this.  The API has no way
-    of creating a subnet without a specific address until subnet-allocation is
-    implemented.
-    """
-    WILDCARDS = {constants.IPv4: '0.0.0.0',
-                 constants.IPv6: '::'}
-
-    def __init__(self, tenant_id, subnet_id, version, prefixlen,
-                 gateway_ip=None, allocation_pools=None):
-        """
-        :param version: Either constants.IPv4 or constants.IPv6
-        :param prefixlen: The prefix len requested.  Must be within the min and
-            max allowed.
-        :type prefixlen: int
-        """
-        super(AnySubnetRequest, self).__init__(
-            tenant_id=tenant_id,
-            subnet_id=subnet_id,
-            gateway_ip=gateway_ip,
-            allocation_pools=allocation_pools)
-
-        net = netaddr.IPNetwork(self.WILDCARDS[version] + '/' + str(prefixlen))
-        self._validate_with_subnet(net)
-
-        self._prefixlen = prefixlen
-
-    @property
-    def prefixlen(self):
-        return self._prefixlen
-
-
-class SpecificSubnetRequest(SubnetRequest):
-    """A template for allocating a specified subnet from IPAM
-
-    The initial reference implementation will probably just allow any
-    allocation, even overlapping ones.  This can be expanded on by future
-    blueprints.
-    """
-    def __init__(self, tenant_id, subnet_id, subnet_cidr,
-                 gateway_ip=None, allocation_pools=None):
-        """
-        :param subnet: The subnet requested.  Can be IPv4 or IPv6.  However,
-            when IPAM tries to fulfill this request, the IP version must match
-            the version of the address scope being used.
-        :type subnet: netaddr.IPNetwork or convertible to one
-        """
-        super(SpecificSubnetRequest, self).__init__(
-            tenant_id=tenant_id,
-            subnet_id=subnet_id,
-            gateway_ip=gateway_ip,
-            allocation_pools=allocation_pools)
-
-        self._subnet_cidr = netaddr.IPNetwork(subnet_cidr)
-        self._validate_with_subnet(self._subnet_cidr)
-
-    @property
-    def subnet_cidr(self):
-        return self._subnet_cidr
-
-    @property
-    def prefixlen(self):
-        return self._subnet_cidr.prefixlen
-
-
-@six.add_metaclass(abc.ABCMeta)
-class AddressRequest(object):
-    """Abstract base class for address requests"""
-
-
-class SpecificAddressRequest(AddressRequest):
-    """For requesting a specified address from IPAM"""
-    def __init__(self, address):
-        """
-        :param address: The address being requested
-        :type address: A netaddr.IPAddress or convertible to one.
-        """
-        super(SpecificAddressRequest, self).__init__()
-        self._address = netaddr.IPAddress(address)
-
-    @property
-    def address(self):
-        return self._address
-
-
-class AnyAddressRequest(AddressRequest):
-    """Used to request any available address from the pool."""
-
-
-class AutomaticAddressRequest(SpecificAddressRequest):
-    """Used to create auto generated addresses, such as EUI64"""
-    EUI64 = 'eui64'
-
-    def _generate_eui64_address(self, **kwargs):
-        if set(kwargs) != set(['prefix', 'mac']):
-            raise ipam_exc.AddressCalculationFailure(
-                address_type='eui-64',
-                reason=_('must provide exactly 2 arguments - cidr and MAC'))
-        prefix = kwargs['prefix']
-        mac_address = kwargs['mac']
-        return ipv6_utils.get_ipv6_addr_by_EUI64(prefix, mac_address)
-
-    _address_generators = {EUI64: _generate_eui64_address}
-
-    def __init__(self, address_type=EUI64, **kwargs):
-        """
-        This constructor builds an automatic IP address. Parameter needed for
-        generating it can be passed as optional keyword arguments.
-
-        :param address_type: the type of address to generate.
-            It could be an eui-64 address, a random IPv6 address, or
-            an ipv4 link-local address.
-            For the Kilo release only eui-64 addresses will be supported.
-        """
-        address_generator = self._address_generators.get(address_type)
-        if not address_generator:
-            raise ipam_exc.InvalidAddressType(address_type=address_type)
-        address = address_generator(self, **kwargs)
-        super(AutomaticAddressRequest, self).__init__(address)
-
-
-class RouterGatewayAddressRequest(AddressRequest):
-    """Used to request allocating the special router gateway address."""
-
-
-class AddressRequestFactory(object):
-    """Builds request using ip info
-
-    Additional parameters(port and context) are not used in default
-    implementation, but planned to be used in sub-classes
-    provided by specific ipam driver,
-    """
-
-    @classmethod
-    def get_request(cls, context, port, ip_dict):
-        """
-        :param context: context (not used here, but can be used in sub-classes)
-        :param port: port dict (not used here, but can be used in sub-classes)
-        :param ip_dict: dict that can contain 'ip_address', 'mac' and
-            'subnet_cidr' keys. Request to generate is selected depending on
-             this ip_dict keys.
-        :return: returns prepared AddressRequest (specific or any)
-        """
-        if ip_dict.get('ip_address'):
-            return SpecificAddressRequest(ip_dict['ip_address'])
-        elif ip_dict.get('eui64_address'):
-            return AutomaticAddressRequest(prefix=ip_dict['subnet_cidr'],
-                                           mac=ip_dict['mac'])
-        else:
-            return AnyAddressRequest()
-
-
-class SubnetRequestFactory(object):
-    """Builds request using subnet info"""
-
-    @classmethod
-    def get_request(cls, context, subnet, subnetpool):
-        cidr = subnet.get('cidr')
-        subnet_id = subnet.get('id', uuidutils.generate_uuid())
-        is_any_subnetpool_request = not attributes.is_attr_set(cidr)
-
-        if is_any_subnetpool_request:
-            prefixlen = subnet['prefixlen']
-            if not attributes.is_attr_set(prefixlen):
-                prefixlen = int(subnetpool['default_prefixlen'])
-
-            return AnySubnetRequest(
-                subnet['tenant_id'],
-                subnet_id,
-                common_utils.ip_version_from_int(subnetpool['ip_version']),
-                prefixlen)
-        else:
-            return SpecificSubnetRequest(subnet['tenant_id'],
-                                         subnet_id,
-                                         cidr,
-                                         subnet.get('gateway_ip'),
-                                         subnet.get('allocation_pools'))
diff --git a/neutron/ipam/subnet_alloc.py b/neutron/ipam/subnet_alloc.py
deleted file mode 100644 (file)
index 7e90cee..0000000
+++ /dev/null
@@ -1,397 +0,0 @@
-# Copyright (c) 2015 Hewlett-Packard Co.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import math
-import operator
-
-import netaddr
-from oslo_db import exception as db_exc
-from oslo_utils import uuidutils
-
-from neutron._i18n import _
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.db import models_v2
-from neutron.ipam import driver
-from neutron.ipam import requests as ipam_req
-from neutron.ipam import utils as ipam_utils
-
-
-class SubnetAllocator(driver.Pool):
-    """Class for handling allocation of subnet prefixes from a subnet pool.
-
-       This class leverages the pluggable IPAM interface where possible to
-       make merging into IPAM framework easier in future cycles.
-    """
-
-    def __init__(self, subnetpool, context):
-        super(SubnetAllocator, self).__init__(subnetpool, context)
-        self._sp_helper = SubnetPoolHelper()
-
-    def _lock_subnetpool(self):
-        """Lock subnetpool associated row.
-
-        This method disallows to allocate concurrently 2 subnets in the same
-        subnetpool, it's required to ensure non-overlapping cidrs in the same
-        subnetpool.
-        """
-
-        current_hash = (self._context.session.query(models_v2.SubnetPool.hash)
-                        .filter_by(id=self._subnetpool['id']).scalar())
-        if current_hash is None:
-            # NOTE(cbrandily): subnetpool has been deleted
-            raise n_exc.SubnetPoolNotFound(
-                subnetpool_id=self._subnetpool['id'])
-        new_hash = uuidutils.generate_uuid()
-
-        # NOTE(cbrandily): the update disallows 2 concurrent subnet allocation
-        # to succeed: at most 1 transaction will succeed, others will be
-        # rolled back and be caught in neutron.db.v2.base
-        query = self._context.session.query(models_v2.SubnetPool).filter_by(
-            id=self._subnetpool['id'], hash=current_hash)
-        count = query.update({'hash': new_hash})
-        if not count:
-            raise db_exc.RetryRequest(n_exc.SubnetPoolInUse(
-                                      subnet_pool_id=self._subnetpool['id']))
-
-    def _get_allocated_cidrs(self):
-        query = self._context.session.query(models_v2.Subnet)
-        subnets = query.filter_by(subnetpool_id=self._subnetpool['id'])
-        return (x.cidr for x in subnets)
-
-    def _get_available_prefix_list(self):
-        prefixes = (x.cidr for x in self._subnetpool.prefixes)
-        allocations = self._get_allocated_cidrs()
-        prefix_set = netaddr.IPSet(iterable=prefixes)
-        allocation_set = netaddr.IPSet(iterable=allocations)
-        available_set = prefix_set.difference(allocation_set)
-        available_set.compact()
-        return sorted(available_set.iter_cidrs(),
-                      key=operator.attrgetter('prefixlen'),
-                      reverse=True)
-
-    def _num_quota_units_in_prefixlen(self, prefixlen, quota_unit):
-        return math.pow(2, quota_unit - prefixlen)
-
-    def _allocations_used_by_tenant(self, quota_unit):
-        subnetpool_id = self._subnetpool['id']
-        tenant_id = self._subnetpool['tenant_id']
-        with self._context.session.begin(subtransactions=True):
-            qry = self._context.session.query(models_v2.Subnet)
-            allocations = qry.filter_by(subnetpool_id=subnetpool_id,
-                                        tenant_id=tenant_id)
-            value = 0
-            for allocation in allocations:
-                prefixlen = netaddr.IPNetwork(allocation.cidr).prefixlen
-                value += self._num_quota_units_in_prefixlen(prefixlen,
-                                                            quota_unit)
-            return value
-
-    def _check_subnetpool_tenant_quota(self, tenant_id, prefixlen):
-        quota_unit = self._sp_helper.ip_version_subnetpool_quota_unit(
-                                               self._subnetpool['ip_version'])
-        quota = self._subnetpool.get('default_quota')
-
-        if quota:
-            used = self._allocations_used_by_tenant(quota_unit)
-            requested_units = self._num_quota_units_in_prefixlen(prefixlen,
-                                                                 quota_unit)
-
-            if used + requested_units > quota:
-                raise n_exc.SubnetPoolQuotaExceeded()
-
-    def _allocate_any_subnet(self, request):
-        with self._context.session.begin(subtransactions=True):
-            self._lock_subnetpool()
-            self._check_subnetpool_tenant_quota(request.tenant_id,
-                                                request.prefixlen)
-            prefix_pool = self._get_available_prefix_list()
-            for prefix in prefix_pool:
-                if request.prefixlen >= prefix.prefixlen:
-                    subnet = next(prefix.subnet(request.prefixlen))
-                    gateway_ip = request.gateway_ip
-                    if not gateway_ip:
-                        gateway_ip = subnet.network + 1
-                    pools = ipam_utils.generate_pools(subnet.cidr,
-                                                      gateway_ip)
-
-                    return IpamSubnet(request.tenant_id,
-                                      request.subnet_id,
-                                      subnet.cidr,
-                                      gateway_ip=gateway_ip,
-                                      allocation_pools=pools)
-            msg = _("Insufficient prefix space to allocate subnet size /%s")
-            raise n_exc.SubnetAllocationError(reason=msg %
-                                              str(request.prefixlen))
-
-    def _allocate_specific_subnet(self, request):
-        with self._context.session.begin(subtransactions=True):
-            self._lock_subnetpool()
-            self._check_subnetpool_tenant_quota(request.tenant_id,
-                                                request.prefixlen)
-            cidr = request.subnet_cidr
-            available = self._get_available_prefix_list()
-            matched = netaddr.all_matching_cidrs(cidr, available)
-            if len(matched) is 1 and matched[0].prefixlen <= cidr.prefixlen:
-                return IpamSubnet(request.tenant_id,
-                                  request.subnet_id,
-                                  cidr,
-                                  gateway_ip=request.gateway_ip,
-                                  allocation_pools=request.allocation_pools)
-            msg = _("Cannot allocate requested subnet from the available "
-                    "set of prefixes")
-            raise n_exc.SubnetAllocationError(reason=msg)
-
-    def allocate_subnet(self, request):
-        max_prefixlen = int(self._subnetpool['max_prefixlen'])
-        min_prefixlen = int(self._subnetpool['min_prefixlen'])
-        if request.prefixlen > max_prefixlen:
-            raise n_exc.MaxPrefixSubnetAllocationError(
-                              prefixlen=request.prefixlen,
-                              max_prefixlen=max_prefixlen)
-        if request.prefixlen < min_prefixlen:
-            raise n_exc.MinPrefixSubnetAllocationError(
-                              prefixlen=request.prefixlen,
-                              min_prefixlen=min_prefixlen)
-
-        if isinstance(request, ipam_req.AnySubnetRequest):
-            return self._allocate_any_subnet(request)
-        elif isinstance(request, ipam_req.SpecificSubnetRequest):
-            return self._allocate_specific_subnet(request)
-        else:
-            msg = _("Unsupported request type")
-            raise n_exc.SubnetAllocationError(reason=msg)
-
-    def get_subnet(self, subnet_id):
-        raise NotImplementedError()
-
-    def update_subnet(self, request):
-        raise NotImplementedError()
-
-    def remove_subnet(self, subnet_id):
-        raise NotImplementedError()
-
-
-class IpamSubnet(driver.Subnet):
-
-    def __init__(self,
-                 tenant_id,
-                 subnet_id,
-                 cidr,
-                 gateway_ip=None,
-                 allocation_pools=None):
-        self._req = ipam_req.SpecificSubnetRequest(
-            tenant_id,
-            subnet_id,
-            cidr,
-            gateway_ip=gateway_ip,
-            allocation_pools=allocation_pools)
-
-    def allocate(self, address_request):
-        raise NotImplementedError()
-
-    def deallocate(self, address):
-        raise NotImplementedError()
-
-    def get_details(self):
-        return self._req
-
-
-class SubnetPoolReader(object):
-    '''Class to assist with reading a subnetpool, loading defaults, and
-       inferring IP version from prefix list. Provides a common way of
-       reading a stored model or a create request with default table
-       attributes.
-    '''
-    MIN_PREFIX_TYPE = 'min'
-    MAX_PREFIX_TYPE = 'max'
-    DEFAULT_PREFIX_TYPE = 'default'
-
-    _sp_helper = None
-
-    def __init__(self, subnetpool):
-        self._read_prefix_info(subnetpool)
-        self._sp_helper = SubnetPoolHelper()
-        self._read_id(subnetpool)
-        self._read_prefix_bounds(subnetpool)
-        self._read_attrs(subnetpool,
-                         ['tenant_id', 'name', 'is_default', 'shared'])
-        self._read_address_scope(subnetpool)
-        self.subnetpool = {'id': self.id,
-                           'name': self.name,
-                           'tenant_id': self.tenant_id,
-                           'prefixes': self.prefixes,
-                           'min_prefix': self.min_prefix,
-                           'min_prefixlen': self.min_prefixlen,
-                           'max_prefix': self.max_prefix,
-                           'max_prefixlen': self.max_prefixlen,
-                           'default_prefix': self.default_prefix,
-                           'default_prefixlen': self.default_prefixlen,
-                           'default_quota': self.default_quota,
-                           'address_scope_id': self.address_scope_id,
-                           'is_default': self.is_default,
-                           'shared': self.shared}
-
-    def _read_attrs(self, subnetpool, keys):
-        for key in keys:
-            setattr(self, key, subnetpool[key])
-
-    def _ip_version_from_cidr(self, cidr):
-        return netaddr.IPNetwork(cidr).version
-
-    def _prefixlen_from_cidr(self, cidr):
-        return netaddr.IPNetwork(cidr).prefixlen
-
-    def _read_id(self, subnetpool):
-        id = subnetpool.get('id', attributes.ATTR_NOT_SPECIFIED)
-        if id is attributes.ATTR_NOT_SPECIFIED:
-            id = uuidutils.generate_uuid()
-        self.id = id
-
-    def _read_prefix_bounds(self, subnetpool):
-        ip_version = self.ip_version
-        default_min = self._sp_helper.default_min_prefixlen(ip_version)
-        default_max = self._sp_helper.default_max_prefixlen(ip_version)
-
-        self._read_prefix_bound(self.MIN_PREFIX_TYPE,
-                                subnetpool,
-                                default_min)
-        self._read_prefix_bound(self.MAX_PREFIX_TYPE,
-                                subnetpool,
-                                default_max)
-        self._read_prefix_bound(self.DEFAULT_PREFIX_TYPE,
-                                subnetpool,
-                                self.min_prefixlen)
-
-        self._sp_helper.validate_min_prefixlen(self.min_prefixlen,
-                                               self.max_prefixlen)
-        self._sp_helper.validate_max_prefixlen(self.max_prefixlen,
-                                               ip_version)
-        self._sp_helper.validate_default_prefixlen(self.min_prefixlen,
-                                                self.max_prefixlen,
-                                                self.default_prefixlen)
-
-    def _read_prefix_bound(self, type, subnetpool, default_bound=None):
-        prefixlen_attr = type + '_prefixlen'
-        prefix_attr = type + '_prefix'
-        prefixlen = subnetpool.get(prefixlen_attr,
-                                   attributes.ATTR_NOT_SPECIFIED)
-        wildcard = self._sp_helper.wildcard(self.ip_version)
-
-        if prefixlen is attributes.ATTR_NOT_SPECIFIED and default_bound:
-            prefixlen = default_bound
-
-        if prefixlen is not attributes.ATTR_NOT_SPECIFIED:
-            prefix_cidr = '/'.join((wildcard,
-                                    str(prefixlen)))
-            setattr(self, prefix_attr, prefix_cidr)
-            setattr(self, prefixlen_attr, prefixlen)
-
-    def _read_prefix_info(self, subnetpool):
-        prefix_list = subnetpool['prefixes']
-        if not prefix_list:
-            raise n_exc.EmptySubnetPoolPrefixList()
-
-        ip_version = None
-        for prefix in prefix_list:
-            if not ip_version:
-                ip_version = netaddr.IPNetwork(prefix).version
-            elif netaddr.IPNetwork(prefix).version != ip_version:
-                raise n_exc.PrefixVersionMismatch()
-        self.default_quota = subnetpool.get('default_quota')
-
-        if self.default_quota is attributes.ATTR_NOT_SPECIFIED:
-            self.default_quota = None
-
-        self.ip_version = ip_version
-        self.prefixes = self._compact_subnetpool_prefix_list(prefix_list)
-
-    def _read_address_scope(self, subnetpool):
-        self.address_scope_id = subnetpool.get('address_scope_id',
-                                               attributes.ATTR_NOT_SPECIFIED)
-
-    def _compact_subnetpool_prefix_list(self, prefix_list):
-        """Compact any overlapping prefixes in prefix_list and return the
-           result
-        """
-        ip_set = netaddr.IPSet()
-        for prefix in prefix_list:
-            ip_set.add(netaddr.IPNetwork(prefix))
-        ip_set.compact()
-        return [str(x.cidr) for x in ip_set.iter_cidrs()]
-
-
-class SubnetPoolHelper(object):
-
-    _PREFIX_VERSION_INFO = {4: {'max_prefixlen': constants.IPv4_BITS,
-                               'wildcard': '0.0.0.0',
-                               'default_min_prefixlen': 8,
-                               # IPv4 quota measured in units of /32
-                               'quota_units': 32},
-                           6: {'max_prefixlen': constants.IPv6_BITS,
-                               'wildcard': '::',
-                               'default_min_prefixlen': 64,
-                               # IPv6 quota measured in units of /64
-                               'quota_units': 64}}
-
-    def validate_min_prefixlen(self, min_prefixlen, max_prefixlen):
-        if min_prefixlen < 0:
-            raise n_exc.UnsupportedMinSubnetPoolPrefix(prefix=min_prefixlen,
-                                                       version=4)
-        if min_prefixlen > max_prefixlen:
-            raise n_exc.IllegalSubnetPoolPrefixBounds(
-                                             prefix_type='min_prefixlen',
-                                             prefixlen=min_prefixlen,
-                                             base_prefix_type='max_prefixlen',
-                                             base_prefixlen=max_prefixlen)
-
-    def validate_max_prefixlen(self, prefixlen, ip_version):
-        max = self._PREFIX_VERSION_INFO[ip_version]['max_prefixlen']
-        if prefixlen > max:
-            raise n_exc.IllegalSubnetPoolPrefixBounds(
-                                            prefix_type='max_prefixlen',
-                                            prefixlen=prefixlen,
-                                            base_prefix_type='ip_version_max',
-                                            base_prefixlen=max)
-
-    def validate_default_prefixlen(self,
-                                   min_prefixlen,
-                                   max_prefixlen,
-                                   default_prefixlen):
-        if default_prefixlen < min_prefixlen:
-            raise n_exc.IllegalSubnetPoolPrefixBounds(
-                                             prefix_type='default_prefixlen',
-                                             prefixlen=default_prefixlen,
-                                             base_prefix_type='min_prefixlen',
-                                             base_prefixlen=min_prefixlen)
-        if default_prefixlen > max_prefixlen:
-            raise n_exc.IllegalSubnetPoolPrefixBounds(
-                                             prefix_type='default_prefixlen',
-                                             prefixlen=default_prefixlen,
-                                             base_prefix_type='max_prefixlen',
-                                             base_prefixlen=max_prefixlen)
-
-    def wildcard(self, ip_version):
-        return self._PREFIX_VERSION_INFO[ip_version]['wildcard']
-
-    def default_max_prefixlen(self, ip_version):
-        return self._PREFIX_VERSION_INFO[ip_version]['max_prefixlen']
-
-    def default_min_prefixlen(self, ip_version):
-        return self._PREFIX_VERSION_INFO[ip_version]['default_min_prefixlen']
-
-    def ip_version_subnetpool_quota_unit(self, ip_version):
-        return self._PREFIX_VERSION_INFO[ip_version]['quota_units']
diff --git a/neutron/ipam/utils.py b/neutron/ipam/utils.py
deleted file mode 100644 (file)
index 434cbcf..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2015 OpenStack LLC.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-
-
-def check_subnet_ip(cidr, ip_address):
-    """Validate that the IP address is on the subnet."""
-    ip = netaddr.IPAddress(ip_address)
-    net = netaddr.IPNetwork(cidr)
-    # Check that the IP is valid on subnet. This cannot be the
-    # network or the broadcast address (which exists only in IPv4)
-    return (ip != net.network
-            and (net.version == 6 or ip != net[-1])
-            and net.netmask & ip == net.network)
-
-
-def check_gateway_in_subnet(cidr, gateway):
-    """Validate that the gateway is on the subnet."""
-    ip = netaddr.IPAddress(gateway)
-    if ip.version == 4 or (ip.version == 6 and not ip.is_link_local()):
-        return check_subnet_ip(cidr, gateway)
-    return True
-
-
-def generate_pools(cidr, gateway_ip):
-    """Create IP allocation pools for a specified subnet
-
-    The Neutron API defines a subnet's allocation pools as a list of
-    IPRange objects for defining the pool range.
-    """
-    # Auto allocate the pool around gateway_ip
-    net = netaddr.IPNetwork(cidr)
-    ip_version = net.version
-    first = netaddr.IPAddress(net.first, ip_version)
-    last = netaddr.IPAddress(net.last, ip_version)
-    if first == last:
-        # handle single address subnet case
-        return [netaddr.IPRange(first, last)]
-    first_ip = first + 1
-    # last address is broadcast in v4
-    last_ip = last - (ip_version == 4)
-    if first_ip >= last_ip:
-        # /31 lands here
-        return []
-    ipset = netaddr.IPSet(netaddr.IPRange(first_ip, last_ip))
-    if gateway_ip:
-        ipset.remove(netaddr.IPAddress(gateway_ip, ip_version))
-    return list(ipset.iter_ipranges())
diff --git a/neutron/locale/de/LC_MESSAGES/neutron.po b/neutron/locale/de/LC_MESSAGES/neutron.po
deleted file mode 100644 (file)
index 1d18fd7..0000000
+++ /dev/null
@@ -1,2437 +0,0 @@
-# German translations for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-09-06 10:15+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: de\n"
-"Language-Team: German\n"
-"Plural-Forms: nplurals=2; plural=(n != 1)\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#, python-format
-msgid ""
-"\n"
-"Command: %(cmd)s\n"
-"Exit code: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-msgstr ""
-"\n"
-"Befehl: %(cmd)s\n"
-"Beendigungscode: %(code)s\n"
-"Standardeingabe: %(stdin)s\n"
-"Standardausgabe: %(stdout)s\n"
-"Standardfehler: %(stderr)s"
-
-#, python-format
-msgid "%(driver)s: Internal driver error."
-msgstr "%(driver)s: Interner Treiberfehler."
-
-#, python-format
-msgid "%(id)s is not a valid %(type)s identifier"
-msgstr "%(id)s ist keine gültige ID für %(type)s"
-
-#, python-format
-msgid ""
-"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' "
-"and '%(desc)s'"
-msgstr ""
-"%(invalid_dirs)s ist ein ungültiger Wert für 'sort_dirs'; gültige Werte sind "
-"'%(asc)s' und '%(desc)s'"
-
-#, python-format
-msgid "%(key)s prohibited for %(tunnel)s provider network"
-msgstr "%(key)s untersagt für %(tunnel)s-Provider-Netz"
-
-#, python-format
-msgid ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-msgstr ""
-"%(method)s aufgerufen mit den Netzeinstellungen %(current)s (ursprüngliche "
-"Einstellungen %(original)s) und Netzsegmente %(segments)s"
-
-#, python-format
-msgid ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-msgstr ""
-"%(method)s aufgerufen mit den Teilnetzeinstellungen %(current)s "
-"(ursprüngliche Einstellungen '%(original)s')"
-
-#, python-format
-msgid "%(method)s failed."
-msgstr "%(method)s fehlgeschlagen."
-
-#, python-format
-msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'"
-msgstr ""
-"%(name)s '%(addr)s' stimmt nicht mit 'ip_version' '%(ip_version)s' überein"
-
-#, python-format
-msgid "%s cannot be called while in offline mode"
-msgstr "%s kann nicht im Offlinemodus aufgerufen werden"
-
-#, python-format
-msgid "%s is invalid attribute for sort_key"
-msgstr "%s ist ein ungültiges Attribut für 'sort_key'"
-
-#, python-format
-msgid "%s is invalid attribute for sort_keys"
-msgstr "%s ist ein ungültiges Attribut für 'sort_keys'"
-
-#, python-format
-msgid "%s is not a valid VLAN tag"
-msgstr "%s ist kein gültiger VLAN-Tag"
-
-#, python-format
-msgid "%s must implement get_port_from_device or get_ports_from_devices."
-msgstr ""
-"%s muss get_port_from_device oder get_ports_from_devices implementieren."
-
-#, python-format
-msgid "%s prohibited for VLAN provider network"
-msgstr "%s untersagt für VLAN-Provider-Netz"
-
-#, python-format
-msgid "%s prohibited for flat provider network"
-msgstr "%s untersagt für einfaches Provider-Netz"
-
-#, python-format
-msgid "%s prohibited for local provider network"
-msgstr "%s untersagt für lokales Provider-Netz"
-
-#, python-format
-msgid "'%(data)s' exceeds maximum length of %(max_len)s"
-msgstr "'%(data)s' überschreitet die Höchstlänge von %(max_len)s"
-
-#, python-format
-msgid "'%(data)s' is not in %(valid_values)s"
-msgstr "'%(data)s' befindet sich nicht in %(valid_values)s"
-
-#, python-format
-msgid "'%(data)s' is too large - must be no larger than '%(limit)d'"
-msgstr "'%(data)s' ist zu groß - darf höchstens '%(limit)d' sein"
-
-#, python-format
-msgid "'%(data)s' is too small - must be at least '%(limit)d'"
-msgstr "'%(data)s' ist zu klein - muss mindestens '%(limit)d' sein"
-
-#, python-format
-msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended"
-msgstr ""
-"'%(data)s' ist keine erkannte IP-Teilnetz-CIDR, '%(cidr)s' wird empfohlen"
-
-#, python-format
-msgid "'%(host)s' is not a valid nameserver. %(msg)s"
-msgstr "'%(host)s' ist kein gültiger Namensserver. %(msg)s"
-
-#, python-format
-msgid "'%s' Blank strings are not permitted"
-msgstr "'%s' leere Zeichenfolgen sind nicht zulässig"
-
-#, python-format
-msgid "'%s' cannot be converted to boolean"
-msgstr "'%s' kann nicht in boolesche Zahl umgewandelt werden"
-
-#, python-format
-msgid "'%s' contains whitespace"
-msgstr "'%s' enthält Leerzeichen"
-
-#, python-format
-msgid "'%s' is not a dictionary"
-msgstr "%s ist kein Verzeichnis"
-
-#, python-format
-msgid "'%s' is not a list"
-msgstr "'%s' ist keine Liste"
-
-#, python-format
-msgid "'%s' is not a valid IP address"
-msgstr "'%s' ist keine gültige IP-Adresse"
-
-#, python-format
-msgid "'%s' is not a valid IP subnet"
-msgstr "'%s' ist kein gültiges IP-Teilnetz"
-
-#, python-format
-msgid "'%s' is not a valid MAC address"
-msgstr "'%s' ist keine gültige MAC-Adresse"
-
-#, python-format
-msgid "'%s' is not a valid UUID"
-msgstr "'%s' ist keine gültige UUID"
-
-#, python-format
-msgid "'%s' is not a valid boolean value"
-msgstr "'%s' ist kein gültiger boolescher Wert"
-
-#, python-format
-msgid "'%s' is not a valid input"
-msgstr "'%s' ist keine gültige Eingabe"
-
-#, python-format
-msgid "'%s' is not a valid string"
-msgstr "'%s' ist keine gültige Zeichenfolge"
-
-#, python-format
-msgid "'%s' is not an integer"
-msgstr "'%s' ist keine Ganzzahl"
-
-#, python-format
-msgid "'%s' is not an integer or uuid"
-msgstr "'%s' ist keine Ganzzahl und keine UUID"
-
-#, python-format
-msgid "'%s' is not of the form <key>=[value]"
-msgstr "'%s' ist nicht in der Form <key>=[value]"
-
-#, python-format
-msgid "'%s' should be non-negative"
-msgstr "'%s' sollte nicht negativ sein"
-
-msgid "0 is not allowed as CIDR prefix length"
-msgstr "0 ist als Länge für CIDR-Präfix nicht zulässig"
-
-msgid "A cidr must be specified in the absence of a subnet pool"
-msgstr "Ein cidr muss angegeben werden, wenn kein Teilnetzpool vorhanden ist"
-
-msgid ""
-"A list of mappings of physical networks to MTU values. The format of the "
-"mapping is <physnet>:<mtu val>. This mapping allows specifying a physical "
-"network MTU value that differs from the default segment_mtu value."
-msgstr ""
-"Eine Liste der Zuordnungen von physischen Netzen zu MTU-Werten. Das Format "
-"der Zuordnung ist <physnet>:<mtu val>. Diese Zuordnung lässt die Angabe "
-"eines physischen Netz-MTU-Werts zu, der sich vom Standardwert für "
-"segment_mtu unterscheidet."
-
-msgid "A metering driver must be specified"
-msgstr "Ein Messungstreiber muss angegeben sein"
-
-msgid "API for retrieving service providers for Neutron advanced services"
-msgstr "API zum Abrufen von Serviceprovidern für erweiterte Neutron-Services"
-
-msgid "Access to this resource was denied."
-msgstr "Zugriff auf diese Ressource wurde verweigert."
-
-msgid "Action to be executed when a child process dies"
-msgstr ""
-"Aktion, die ausgeführt werden soll, wenn ein untergeordneter Prozess "
-"abgebrochen wird"
-
-msgid "Adds external network attribute to network resource."
-msgstr "Fügt ein externes Netzattribut zur Netzressource hinzu."
-
-msgid "Adds test attributes to core resources."
-msgstr "Fügt Testattribute zu Kernressourcen hinzu."
-
-#, python-format
-msgid "Agent %(id)s could not be found"
-msgstr "Agent %(id)s konnte nicht gefunden werden"
-
-#, python-format
-msgid "Agent %(id)s is not a L3 Agent or has been disabled"
-msgstr "Agent %(id)s ist kein L3-Agent oder wurde inaktiviert"
-
-#, python-format
-msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled"
-msgstr "Agent %(id)s ist kein gültiger DHCP-Agent oder wurde inaktiviert"
-
-#, python-format
-msgid "Agent updated: %(payload)s"
-msgstr "Agent aktualisiert: %(payload)s"
-
-#, python-format
-msgid ""
-"Agent with agent_type=%(agent_type)s and host=%(host)s could not be found"
-msgstr ""
-"Agent mit 'agent_type=%(agent_type)s' und 'host=%(host)s' konnte nicht "
-"gefunden werden"
-
-msgid "Allow auto scheduling networks to DHCP agent."
-msgstr "Automatische Netzzuordnung zum DHCP-Agenten zulassen."
-
-msgid "Allow auto scheduling of routers to L3 agent."
-msgstr "Automatische Routerzuordnung zum L3-Agenten zulassen."
-
-msgid "Allow running metadata proxy."
-msgstr "Aktiven Metadaten-Proxy zulassen."
-
-msgid "Allow sending resource operation notification to DHCP agent"
-msgstr ""
-"Senden von Benachrichtigungen zu Ressourcenoperationen an den DHCP-Agenten "
-"zulassen"
-
-msgid "Allow the usage of the bulk API"
-msgstr "Nutzung der Massenzuweisungs-API zulassen"
-
-msgid "Allow the usage of the pagination"
-msgstr "Nutzung der Paginierung zulassen"
-
-msgid "Allow the usage of the sorting"
-msgstr "Nutzung der Sortierung zulassen"
-
-msgid "Allow to perform insecure SSL (https) requests to nova metadata"
-msgstr ""
-"Durchführung von unsicheren SSL-Anforderungen (HTTPS) an Nova-Metadaten"
-
-msgid "AllowedAddressPair must contain ip_address"
-msgstr "AllowedAddressPair muss ip_address enthalten"
-
-msgid "An interface driver must be specified"
-msgstr "Ein Schnittstellentreiber muss angegeben sein"
-
-msgid ""
-"An ordered list of networking mechanism driver entrypoints to be loaded from "
-"the neutron.ml2.mechanism_drivers namespace."
-msgstr ""
-"Sortierte Liste der Eingangspunkte für Netzmechanismustreiber die aus dem "
-"Namensbereich neutron.ml2.mechanism_drivers geladen werden."
-
-msgid "An unknown error has occurred. Please try your request again."
-msgstr ""
-"Ein unbekannter Fehler ist aufgetreten. Stellen Sie Ihre Anforderung erneut."
-
-msgid "An unknown exception occurred."
-msgstr "Eine unbekannte Ausnahme ist aufgetreten."
-
-#, python-format
-msgid "Attribute '%s' not allowed in POST"
-msgstr "Attribut %s in POST nicht zulässig"
-
-msgid "Automatically remove networks from offline DHCP agents."
-msgstr "Netze automatisch von DHCP-Agenten, die offline sind, entfernen."
-
-msgid ""
-"Automatically reschedule routers from offline L3 agents to online L3 agents."
-msgstr ""
-"Automatische Neuterminierung für Router von Offline-L3-Agenten zu Online-L3-"
-"Agenten."
-
-msgid "Available commands"
-msgstr "Verfügbare Befehle"
-
-msgid "Backend does not support VLAN Transparency."
-msgstr "Backend unterstützt keine VLAN-Transparenz."
-
-#, python-format
-msgid ""
-"Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, "
-"%(mac)s:"
-msgstr ""
-"Falsches Präfix- oder MAC-Format für das Generieren der IPv6-Adresse durch "
-"EUI-64: %(prefix)s, %(mac)s:"
-
-#, python-format
-msgid "Bad prefix type for generate IPv6 address by EUI-64: %s"
-msgstr ""
-"Falscher Präfixtyp für das Generieren der IPv6-Adresse durch EUI-64: %s"
-
-#, python-format
-msgid "Base MAC: %s"
-msgstr "Basis-MAC-Adresse: %s"
-
-#, python-format
-msgid "Bridge %(bridge)s does not exist."
-msgstr "Brücke %(bridge)s ist nicht vorhanden."
-
-msgid "Bulk operation not supported"
-msgstr "Massenoperation nicht unterstützt"
-
-msgid "CIDR to monitor"
-msgstr "Zu überwachendes CIDR"
-
-#, python-format
-msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip"
-msgstr ""
-"Dynamische IP-Adresse kann nicht zu Port auf Teilnetz %s ohne 'gateway_ip' "
-"hinzugefügt werden"
-
-msgid "Cannot allocate requested subnet from the available set of prefixes"
-msgstr ""
-"Das angeforderte Teilnetz kann nicht aus der verfügbaren Gruppe mit Präfixen "
-"zugeordnet werden"
-
-#, python-format
-msgid ""
-"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port "
-"%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a "
-"floating IP on external network %(net_id)s."
-msgstr ""
-"Dynamische IP-Adresse %(floating_ip_address)s (%(fip_id)s) kann Port "
-"%(port_id)s nicht über statische IP-Adresse %(fixed_ip)s zugeordnet werden, "
-"da diese statische IP-Adresse bereits über eine dynamische IP-Adresse im "
-"externen Netz %(net_id)s verfügt."
-
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to Port %s, since that port is owned "
-"by a different tenant."
-msgstr ""
-"Dynamische IP-Adresse kann nicht erstellt und an Port %s gebunden werden, da "
-"dieser Port einem anderen Nutzer gehört."
-
-msgid "Cannot create resource for another tenant"
-msgstr "Erstellen von Ressource für einen weiteren Nutzer nicht möglich"
-
-msgid "Cannot disable enable_dhcp with ipv6 attributes set"
-msgstr ""
-"enable_dhcp kann nicht inaktiviert werden, wenn ipv6-Attribute gesetzt sind"
-
-#, python-format
-msgid ""
-"Cannot have multiple router ports with the same network id if both contain "
-"IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s"
-msgstr ""
-"Mehrere Routerports können nicht dieselbe Netz-ID verwenden, wenn beide IPv6-"
-"Teilnetze enthalten. Der vorhandene Port %(p)s verfügt über das IPv6-"
-"Teilnetz und die Netz-ID  %(nid)s"
-
-#, python-format
-msgid ""
-"Cannot host %(router_type)s router %(router_id)s on %(agent_mode)s L3 agent "
-"%(agent_id)s."
-msgstr ""
-"Hosten von %(router_type)s-Router %(router_id)s auf %(agent_mode)s-L3-Agent "
-"%(agent_id)s nicht möglich."
-
-msgid "Cannot match priority on flow deletion or modification"
-msgstr ""
-"Abgleichen von Priorität bei Ablauflöschung oder Änderung nicht möglich"
-
-msgid "Cannot specify both subnet-id and port-id"
-msgstr "Angabe sowohl von Teilnetz-ID als auch von Port-ID nicht möglich"
-
-msgid "Cannot understand JSON"
-msgstr "Kann JSON nicht verstehen"
-
-#, python-format
-msgid "Cannot update read-only attribute %s"
-msgstr "Schreibgeschütztes Attribut %s kann nicht aktualisiert werden"
-
-msgid "Certificate Authority public key (CA cert) file for ssl"
-msgstr "Öffentliche Schlüsseldatei der Zertifizierungsstelle für SSL"
-
-msgid "Check for ARP responder support"
-msgstr "Überprüfen Sie, ob ARP-Responder unterstützt werden"
-
-msgid "Check for OVS vxlan support"
-msgstr "Überprüfen Sie, ob OVS-VXLAN-Unterstützung vorliegt"
-
-msgid "Check for VF management support"
-msgstr "Überprüfen Sie, ob VF-Management unterstützt wird"
-
-msgid "Check for iproute2 vxlan support"
-msgstr "Überprüfen Sie, ob iproute2-VXLAN-Unterstützung vorliegt"
-
-msgid "Check for nova notification support"
-msgstr "Überprüfen Sie, ob Nova-Benachrichtigungen unterstützt werden"
-
-msgid "Check for patch port support"
-msgstr "Überprüfen Sie, ob Patch-Ports unterstützt werden"
-
-msgid "Check minimal dnsmasq version"
-msgstr "Überprüfen Sie die Mindestversion für dnsmasq"
-
-msgid "Check netns permission settings"
-msgstr "Überprüfen Sie die netns-Berechtigungseinstellungen"
-
-msgid "Check ovsdb native interface support"
-msgstr "Unterstützung für native ovsdb-Schnittstelle überprüfen"
-
-#, python-format
-msgid ""
-"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of "
-"subnet %(sub_id)s"
-msgstr ""
-"Überschneidungen zwischen CIDR %(subnet_cidr)s von Teilnetz %(subnet_id)s "
-"und CIDR %(cidr)s von Teilnetz %(sub_id)s"
-
-msgid "Client certificate for nova metadata api server."
-msgstr "Clientzertifikat zu API-Server für Nova-Metadaten."
-
-msgid ""
-"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE "
-"tunnel IDs that are available for tenant network allocation"
-msgstr ""
-"Durch Kommas getrennte Liste von <Tun_min>:<Tun_max> Tupeln, die Bereiche "
-"von GRE-Tunnel-IDs aufzählen, die für eine Nutzernetzzuordnung verfügbar sind"
-
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"VXLAN VNI IDs that are available for tenant network allocation"
-msgstr ""
-"Durch Kommas getrennte Liste von <VNI_min>:<VNI_max> Tupeln, die Bereiche "
-"von VXLAN-VNI-IDs aufzählen, die für eine Nutzernetzzuordnung verfügbar sind"
-
-msgid ""
-"Comma-separated list of the DNS servers which will be used as forwarders."
-msgstr ""
-"Durch Kommas getrennte Liste der DNS-Server, die künftig als "
-"Weiterleitungsserver verwendet werden."
-
-msgid "Command to execute"
-msgstr "Auszuführender Befehl"
-
-msgid "Config file for interface driver (You may also use l3_agent.ini)"
-msgstr ""
-"Konfigurationsdatei für Schnittstellentreiber (Sie können auch 'l3_agent."
-"ini' verwenden)"
-
-#, python-format
-msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s"
-msgstr "Kollidierender Wert bei Ethernet-Typ %(ethertype)s für CIDR %(cidr)s"
-
-msgid ""
-"Controls whether the neutron security group API is enabled in the server. It "
-"should be false when using no security groups or using the nova security "
-"group API."
-msgstr ""
-"Steuert, ob die Neutron-Sicherheitsgruppen-API im Server aktiviert ist. "
-"Sollte 'false' sein, wenn keine Sicherheitsgruppen verwendet werden oder "
-"wenn die Nova-Sicherheitsgruppen-API verwendet wird."
-
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds"
-msgstr ""
-"Keine Bindung an %(host)s:%(port)s möglich nach Versuch über %(time)d "
-"Sekunden"
-
-msgid "Could not deserialize data"
-msgstr "Daten konnten nicht deserialisiert werden"
-
-#, python-format
-msgid "Creation failed. %(dev_name)s already exists."
-msgstr "Erstellung fehlgeschlagen. %(dev_name)s ist bereits vorhanden."
-
-#, python-format
-msgid ""
-"Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable "
-"to update."
-msgstr ""
-"Aktuelle Gateway-IP-Adresse %(ip_address)s wird bereits verwendet von Port "
-"%(port_id)s. Aktualisierung nicht möglich."
-
-msgid "Currently distributed HA routers are not supported."
-msgstr "Die derzeit verteilten HA-Router werden nicht unterstützt."
-
-msgid ""
-"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite "
-"lease times."
-msgstr ""
-"DHCP-Leasedauer (in Sekunden). Verwenden Sie -1, damit dnsmasq unbegrenzte "
-"Leasedauern verwendet."
-
-msgid "Default driver to use for quota checks"
-msgstr "Standardtreiber zur Verwendung für Quotenprüfungen"
-
-msgid ""
-"Default number of resource allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Standardanzahl an zulässigen Ressourcen pro Nutzer. Ein negativer Wert "
-"bedeutet unbegrenzt."
-
-msgid "Default security group"
-msgstr "Standardsicherheitsgruppe"
-
-msgid "Default security group already exists."
-msgstr "Standardsicherheitsgruppe ist bereits vorhanden."
-
-msgid ""
-"Defines providers for advanced services using the format: <service_type>:"
-"<name>:<driver>[:default]"
-msgstr ""
-"Definiert Provider für erweiterte Services mit dem folgenden Format: "
-"<Servicetyp>:<Name>:<Treiber>[:Standard]"
-
-msgid ""
-"Delay within which agent is expected to update existing ports whent it "
-"restarts"
-msgstr ""
-"Verzögerung, in der der Agent die vorhandenen Ports aktualisieren soll, wenn "
-"ein Neustart erfolgt"
-
-msgid "Delete the namespace by removing all devices."
-msgstr "Löschen Sie den Namensbereich durch Entfernen aller Geräte."
-
-#, python-format
-msgid "Deleting port %s"
-msgstr "Port %s wird gelöscht"
-
-#, python-format
-msgid "Device %(dev_name)s in mapping: %(mapping)s not unique"
-msgstr "Einheit %(dev_name)s in Zuordnung %(mapping)s nicht eindeutig"
-
-msgid "Device has no virtual functions"
-msgstr "Einheit verfügt über keine virtuellen Funktionen"
-
-#, python-format
-msgid "Device name %(dev_name)s is missing from physical_device_mappings"
-msgstr "Einheitenname %(dev_name)s fehlt in physical_device_mappings"
-
-msgid "Device not found"
-msgstr "Einheit nicht gefunden"
-
-#, python-format
-msgid ""
-"Distributed Virtual Router Mac Address for host %(host)s does not exist."
-msgstr ""
-"MAC-Adresse von verteiltem virtuellem Router für Host %(host)s ist nicht "
-"vorhanden."
-
-msgid "Domain to use for building the hostnames"
-msgstr "Für das Erstellen von Hostnamen zu verwendende Domäne"
-
-msgid "Downgrade no longer supported"
-msgstr "Herabstufung wird nicht mehr unterstützt"
-
-#, python-format
-msgid "Driver %s is not unique across providers"
-msgstr "Treiber %s ist für Provider nicht eindeutig"
-
-msgid "Driver for security groups firewall in the L2 agent"
-msgstr "Treiber für Sicherheitsgruppen-Firewall im L2-Agenten"
-
-msgid "Driver to use for scheduling network to DHCP agent"
-msgstr "Zu verwendender Treiber bei Netzzuordnung zum DHCP-Agenten"
-
-msgid "Driver to use for scheduling router to a default L3 agent"
-msgstr "Zu verwendender Treiber bei Routerzuordnung zum Standard-L3-Agenten"
-
-#, python-format
-msgid "Duplicate IP address '%s'"
-msgstr "Doppelte IP-Adresse '%s'"
-
-msgid "Duplicate Metering Rule in POST."
-msgstr "Doppelte Messungsregel in POST."
-
-msgid "Duplicate Security Group Rule in POST."
-msgstr "Doppelte Sicherheitsgruppenregel in POST."
-
-#, python-format
-msgid "Duplicate hostroute '%s'"
-msgstr "Doppelte Hostroute '%s'"
-
-#, python-format
-msgid "Duplicate items in the list: '%s'"
-msgstr "Doppelte Elemente in der Liste: '%s'"
-
-#, python-format
-msgid "Duplicate nameserver '%s'"
-msgstr "Doppelter Namensserver '%s'"
-
-msgid "Duplicate segment entry in request."
-msgstr "Doppelter Segmenteintrag in Anforderung."
-
-#, python-format
-msgid "ERROR: %s"
-msgstr "FEHLER: %s"
-
-msgid ""
-"ERROR: Unable to find configuration file via the default search paths (~/."
-"neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!"
-msgstr ""
-"FEHLER: Konfigurationsdatei kann über die Standardsuchpfade (~/.neutron/, "
-"~/, /etc/neutron/, /etc/) und über die Option '--config-file' nicht gefunden "
-"werden!"
-
-msgid ""
-"Either one of parameter network_id or router_id must be passed to _get_ports "
-"method."
-msgstr ""
-"Einer der Parameter network_id und router_id muss an die Methode _get_ports "
-"übergeben werden."
-
-msgid "Either subnet_id or port_id must be specified"
-msgstr "Entweder 'subnet_id' oder 'port_id' muss angegeben sein"
-
-msgid "Empty physical network name."
-msgstr "Leerer Name für physisches Netz."
-
-msgid "Enable FWaaS"
-msgstr "FWaaS aktivieren"
-
-msgid "Enable HA mode for virtual routers."
-msgstr "Hochverfügbarkeitsmodus für virtuelle Router aktivieren."
-
-msgid "Enable SSL on the API server"
-msgstr "SSL auf dem API-Server aktivieren"
-
-msgid ""
-"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 "
-"plugin using linuxbridge mechanism driver"
-msgstr ""
-"VXLAN auf dem Agenten aktivieren. Kann aktiviert werden, wenn der Agent vom "
-"ml2-Plug-in mithilfe eines Linuxbridge-Mechanismus-Treibers verwaltet wird"
-
-msgid ""
-"Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 "
-"l2population driver. Allows the switch (when supporting an overlay) to "
-"respond to an ARP request locally without performing a costly ARP broadcast "
-"into the overlay."
-msgstr ""
-"Aktivieren Sie den lokalen ARP-Responder, wenn dies unterstützt wird. Dies "
-"erfordert OVS 2.1 und einen ML2-l2population-Treiber. Dadurch wird es dem "
-"Switch (bei Unterstützung eines Overlay) ermöglicht, lokal auf eine ARP-"
-"Anforderung zu reagieren, ohne einen aufwändigen ARP-Broadcast in das "
-"Overlay durchzuführen."
-
-msgid ""
-"Enable services on an agent with admin_state_up False. If this option is "
-"False, when admin_state_up of an agent is turned False, services on it will "
-"be disabled. Agents with admin_state_up False are not selected for automatic "
-"scheduling regardless of this option. But manual scheduling to such agents "
-"is available if this option is True."
-msgstr ""
-"Aktivieren Sie Services auf einem Agenten mit admin_state_up False. Wenn "
-"diese Option 'False' lautet und wenn admin_state_up eines Agenten auf "
-"'False' gesetzt wird, werden die Services darauf inaktiviert. Agenten mit "
-"admin_state_up False werden, unabhängig von dieser Option, nicht für die "
-"automatische Planung ausgewählt. Die manuelle Planung ist für solche Agenten "
-"jedoch verfügbar, wenn diese Option auf 'True' gesetzt ist."
-
-msgid ""
-"Enable/Disable log watch by metadata proxy. It should be disabled when "
-"metadata_proxy_user/group is not allowed to read/write its log file and "
-"copytruncate logrotate option must be used if logrotate is enabled on "
-"metadata proxy log files. Option default value is deduced from "
-"metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent "
-"effective user id/name."
-msgstr ""
-"Protokollüberwachung über Metadaten-Proxy aktivieren/inaktivieren. Sie "
-"sollte inaktiviert werden, wenn metadata_proxy_user/group keine Lese- und "
-"Schreibberechtigung für die Protokolldatei hat. Außerdem muss die Option "
-"'copytruncate logrotate' verwendet werden, wenn 'logrotate' für "
-"Protokolldateien des Metadaten-Proxy aktiviert ist. Der Standardwert für die "
-"Option wird von metadata_proxy_user abgeleitet: 'watch log' ist aktiviert, "
-"wenn metadata_proxy_user die Agent-ausführende Benutzer-ID/-Name ist."
-
-msgid "Encountered an empty component."
-msgstr "Leere Komponente gefunden."
-
-msgid "End of VLAN range is less than start of VLAN range"
-msgstr "Ende des VLAN-Bereichs ist kleiner als Anfang des VLAN-Bereichs"
-
-msgid "End of tunnel range is less than start of tunnel range"
-msgstr "Ende des Tunnelbereichs ist kleiner als Anfang des Tunnelbereichs"
-
-#, python-format
-msgid "Error importing FWaaS device driver: %s"
-msgstr "Fehler beim Importieren von FWaas-Treiber: %s"
-
-#, python-format
-msgid "Error parsing dns address %s"
-msgstr "Fehler bei Auswertung der DNS-Adresse %s"
-
-#, python-format
-msgid "Error while reading %s"
-msgstr "Fehler beim Lesen von %s"
-
-msgid "Existing prefixes must be a subset of the new prefixes"
-msgstr "Vorhandene Präfixe müssen eine Untergruppe der neuen Präfixe sein"
-
-msgid ""
-"Extension to use alongside ml2 plugin's l2population mechanism driver. It "
-"enables the plugin to populate VXLAN forwarding table."
-msgstr ""
-"Erweiterung zur Verwendung mit dem l2population-Mechanismus-Treiber des ml2-"
-"Plug-ins. Sie ermöglicht dem Plug-in das Belegen der VXLAN-"
-"Weiterleitungstabelle."
-
-#, python-format
-msgid "Extension with alias %s does not exist"
-msgstr "Erweiterung mit Alias %s ist nicht vorhanden"
-
-#, python-format
-msgid "External IP %s is the same as the gateway IP"
-msgstr "Externe IP %s entspricht der Gateway-IP"
-
-#, python-format
-msgid ""
-"External network %(external_network_id)s is not reachable from subnet "
-"%(subnet_id)s.  Therefore, cannot associate Port %(port_id)s with a Floating "
-"IP."
-msgstr ""
-"Externes Netz %(external_network_id)s ist von Teilnetz %(subnet_id)s aus "
-"nicht erreichbar. Daher kann Port %(port_id)s keiner dynamischen IP-Adresse "
-"zugeordnet werden."
-
-#, python-format
-msgid ""
-"External network %(net_id)s cannot be updated to be made non-external, since "
-"it has existing gateway ports"
-msgstr ""
-"Externes Netz %(net_id)s kann nicht so aktualisiert werden, dass es nicht "
-"mehr extern ist, da es über Gateway-Ports verfügt"
-
-#, python-format
-msgid "ExtraDhcpOpt %(id)s could not be found"
-msgstr "ExtraDhcpOpt %(id)s konnte nicht gefunden werden"
-
-msgid ""
-"FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-"
-"agent."
-msgstr ""
-"FWaaS-Plug-in ist auf der Serverseite konfiguriert, aber FWaaS ist für L3-"
-"Agent inaktiviert."
-
-#, python-format
-msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found."
-msgstr ""
-"Fehler bei Neuterminierung von Router %(router_id)s: kein auswählbarer L3-"
-"Agent gefunden."
-
-#, python-format
-msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s."
-msgstr ""
-"Zuordnung des Routers %(router_id)s zum L3-Agenten %(agent_id)s ist "
-"fehlgeschlagen."
-
-#, python-format
-msgid ""
-"Failed to allocate a VRID in the network %(network_id)s for the router "
-"%(router_id)s after %(max_tries)s tries."
-msgstr ""
-"Das Zuordnen der ID eines virtuellen Routers im Netz %(network_id)s für den "
-"Router %(router_id)s ist nach %(max_tries)s Versuchen fehlgeschlagen."
-
-#, python-format
-msgid ""
-"Failed to create port on network %(network_id)s, because fixed_ips included "
-"invalid subnet %(subnet_id)s"
-msgstr ""
-"Port auf Netz %(network_id)s wurde nicht erstellt, da 'fixed_ips' ungültiges "
-"Teilnetz %(subnet_id)s enthielt"
-
-#, python-format
-msgid "Failed to parse request. Parameter '%s' not specified"
-msgstr "Anforderung wurde nicht analysiert. Parameter '%s' nicht angegeben"
-
-#, python-format
-msgid "Failed to parse request. Required attribute '%s' not specified"
-msgstr ""
-"Anforderung wurde nicht analysiert. Erforderliches Attribut %s nicht "
-"angegeben"
-
-msgid "Failed to remove supplemental groups"
-msgstr "Fehler beim Entfernen zusätzlicher Gruppen"
-
-#, python-format
-msgid "Failed to set gid %s"
-msgstr "Fehler beim Festlegen von GID %s"
-
-#, python-format
-msgid "Failed to set uid %s"
-msgstr "Fehler beim Festlegen von Benutzer-ID %s"
-
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr "Fehler bei der Konfiguration eines %(type)s-Tunnel-Ports auf %(ip)s"
-
-#, python-format
-msgid "Floating IP %(floatingip_id)s could not be found"
-msgstr "Dynamische IP-Adresse %(floatingip_id)s konnte nicht gefunden werden"
-
-msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max"
-msgstr "Für TCP/UDP-Protokolle muss 'port_range_min' '<= port_range_max' sein"
-
-msgid "Force ip_lib calls to use the root helper"
-msgstr "ip_lib-Aufrufe erzwingen, um Roothilfeprogramm zu verwenden"
-
-#, python-format
-msgid ""
-"Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet "
-"%(subnet_cidr)s."
-msgstr ""
-"Überschneidung bei Zuordnungspools %(pool_1)s %(pool_2)s für Teilnetz "
-"%(subnet_cidr)s gefunden."
-
-#, python-format
-msgid ""
-"Gateway cannot be updated for router %(router_id)s, since a gateway to "
-"external network %(net_id)s is required by one or more floating IPs."
-msgstr ""
-"Gateway kann nicht für Router %(router_id)s aktualisiert werden, da ein "
-"Gateway zum externen Netz %(net_id)s für eine oder mehrere dynamische IP-"
-"Adressen erforderlich ist. "
-
-msgid "Gateway is not valid on subnet"
-msgstr "Gateway ist auf Teilnetz nicht gültig"
-
-msgid "Group (gid or name) running metadata proxy after its initialization"
-msgstr ""
-"Gruppe (Gruppen-ID oder Name), die Metadaten-Proxy nach der Initialisierung "
-"ausführt"
-
-msgid ""
-"Group (gid or name) running metadata proxy after its initialization (if "
-"empty: agent effective group)."
-msgstr ""
-"Gruppe (Gruppen-ID oder Name), die Metadaten-Proxy nach der Initialisierung "
-"ausführt (falls leer: Agent-ausführende Gruppe)."
-
-msgid "Group (gid or name) running this process after its initialization"
-msgstr ""
-"Gruppe (Gruppen-ID oder Name), die diesen Prozess nach der Initialisierung "
-"ausführt"
-
-msgid "How many times Neutron will retry MAC generation"
-msgstr "Wie oft Neutron die MAC-Adressenerstellung erneut versuchen wird"
-
-#, python-format
-msgid ""
-"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-"
-"min) is missing."
-msgstr ""
-"ICMP-Code (port-range-max) %(value)s ist angegeben, aber ICMP-Typ (port-"
-"range-min) fehlt."
-
-msgid "ID of network"
-msgstr "Netz-ID"
-
-msgid "ID of network to probe"
-msgstr "ID von Netz, das überprüft werden soll"
-
-msgid "ID of probe port to delete"
-msgstr "ID von Überprüfungsport, der gelöscht werden soll"
-
-msgid "ID of probe port to execute command"
-msgstr "ID von Überprüfungsport zum Ausführen des Befehls"
-
-msgid "ID of the router"
-msgstr "ID des Routers"
-
-#, python-format
-msgid ""
-"IP address %(ip_address)s is not a valid IP for any of the subnets on the "
-"specified network."
-msgstr ""
-"IP-Adresse %(ip_address)s ist keine gültige IP für die Teilnetze im "
-"angegebenen Netz."
-
-#, python-format
-msgid "IP address %(ip_address)s is not a valid IP for the specified subnet."
-msgstr ""
-"IP-Adresse %(ip_address)s ist keine gültige IP für das angegebene Teilnetz."
-
-msgid "IP address used by Nova metadata server."
-msgstr "Von Nova-Metadatenserver verwendete IP-Adresse."
-
-msgid "IP allocation requires subnet_id or ip_address"
-msgstr "'subnet_id' oder 'ip_address' für IP-Zuordnung erforderlich"
-
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables rules:\n"
-"%s"
-msgstr ""
-"IPTablesManager.apply hat den folgenden Satz an iptables-Regeln nicht "
-"angewendet:\n"
-"%s"
-
-#, python-format
-msgid ""
-"IPv6 address %(address)s can not be directly assigned to a port on subnet "
-"%(id)s since the subnet is configured for automatic addresses"
-msgstr ""
-"IPv6-Adresse %(address)s kann einem Port im Teilnetz %(id)s nicht direkt "
-"zugeordnet werden, da das Teilnetz für automatische Adressen konfiguriert "
-"wurde"
-
-#, python-format
-msgid ""
-"IPv6 subnet %s configured to receive RAs from an external router cannot be "
-"added to Neutron Router."
-msgstr ""
-"IPv6-Teilnetz %s, das für den Empfang von RAs von einem externen Router "
-"konfiguriert ist, kann nicht zum Neutron-Router hinzugefügt werden."
-
-msgid ""
-"If True, effort is made to advertise MTU settings to VMs via network methods "
-"(DHCP and RA MTU options) when the network's preferred MTU is known."
-msgstr ""
-"Bei 'True' wird versucht, MTU-Einstellungen über Netzmethoden für VMs "
-"zugänglich zu machen (DHCP und RA-MTU-Optionen), wenn die bevorzugte MTU des "
-"Netzes bekannt ist."
-
-msgid ""
-"If True, then allow plugins that support it to create VLAN transparent "
-"networks."
-msgstr ""
-"Bei 'True' sollen Plug-ins, die dies unterstützen, VLAN-transparente Netze "
-"erstellen dürfen."
-
-msgid "Illegal IP version number"
-msgstr "Illegale IP-Versionsnummer"
-
-#, python-format
-msgid "Insufficient prefix space to allocate subnet size /%s"
-msgstr ""
-"Unzureichender Präfixspeicherplatz für die Zuordnung von Teilnetzgröße /%s"
-
-msgid "Insufficient rights for removing default security group."
-msgstr ""
-"Berechtigungen sind für das Entfernen der Standardsicherheitsgruppe nicht "
-"ausreichend."
-
-msgid "Interface to monitor"
-msgstr "Zu überwachende Schnittstelle"
-
-msgid ""
-"Interval between checks of child process liveness (seconds), use 0 to disable"
-msgstr ""
-"Intervall zwischen Überprüfungen der Aktivität von untergeordneten Prozessen "
-"(Sekunden), verwenden Sie zum Inaktivieren '0'"
-
-msgid "Interval between two metering measures"
-msgstr "Intervall zwischen zwei Messungsmaßnahmen"
-
-msgid "Interval between two metering reports"
-msgstr "Intervall zwischen zwei Messungsberichten"
-
-#, python-format
-msgid ""
-"Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address "
-"format, which requires the prefix to be /64."
-msgstr ""
-"Ungültiges CIDR %s für IPv6-Adressenmodus. OpenStack verwendet das EUI-64-"
-"Adressformat, für das das Präfix /64 lauten muss."
-
-#, python-format
-msgid "Invalid Device %(dev_name)s: %(reason)s"
-msgstr "Ungültige Einheit %(dev_name)s: %(reason)s"
-
-#, python-format
-msgid ""
-"Invalid authentication type: %(auth_type)s, valid types are: "
-"%(valid_auth_types)s"
-msgstr ""
-"Ungültiger Authentifizierungstyp: %(auth_type)s, gültige Typen sind: "
-"%(valid_auth_types)s"
-
-#, python-format
-msgid "Invalid data format for IP pool: '%s'"
-msgstr "Ungültiges Datenformat für IP-Pool: '%s'"
-
-#, python-format
-msgid "Invalid data format for extra-dhcp-opt: %(data)s"
-msgstr "Ungültiges Datenformat für extra-dhcp-opt: %(data)s"
-
-#, python-format
-msgid "Invalid data format for fixed IP: '%s'"
-msgstr "Ungültiges Datenformat für statische IP: '%s'"
-
-#, python-format
-msgid "Invalid data format for hostroute: '%s'"
-msgstr "Ungültiges Datenformat für Hostroute: '%s'"
-
-#, python-format
-msgid "Invalid data format for nameserver: '%s'"
-msgstr "Ungültiges Datenformat für Namensserver: '%s'"
-
-#, python-format
-msgid "Invalid format for routes: %(routes)s, %(reason)s"
-msgstr "Ungültiges Format für Routen: %(routes)s, %(reason)s"
-
-#, python-format
-msgid "Invalid format: %s"
-msgstr "Ungültiges Format: %s"
-
-#, python-format
-msgid "Invalid input for %(attr)s. Reason: %(reason)s."
-msgstr "Ungültige Eingabe für %(attr)s. Grund: %(reason)s."
-
-#, python-format
-msgid "Invalid input for operation: %(error_message)s."
-msgstr "Ungültige Eingabe für Operation: %(error_message)s."
-
-#, python-format
-msgid ""
-"Invalid input. '%(target_dict)s' must be a dictionary with keys: "
-"%(expected_keys)s"
-msgstr ""
-"Ungültige Eingabe. '%(target_dict)s' muss ein Verzeichnis mit Schlüsseln "
-"sein: %(expected_keys)s"
-
-#, python-format
-msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s"
-msgstr ""
-"Ungültiger Instanzstatus: %(state)s, gültige Status sind: %(valid_states)s"
-
-#, python-format
-msgid "Invalid mapping: '%s'"
-msgstr "Ungültige Zuordnung: '%s'"
-
-#, python-format
-msgid "Invalid pci slot %(pci_slot)s"
-msgstr "Ungültiger PCI-Steckplatz %(pci_slot)s"
-
-#, python-format
-msgid "Invalid provider format. Last part should be 'default' or empty: %s"
-msgstr ""
-"Ungültiges Providerformat. Letzter Teil sollte 'default' oder leer sein: %s"
-
-#, python-format
-msgid "Invalid route: %s"
-msgstr "Ungültige Route: %s"
-
-msgid "Invalid service provider format"
-msgstr "Ungültiges Service-Provider-Format"
-
-#, python-format
-msgid ""
-"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255."
-msgstr ""
-"Ungültiger Wert für ICMP %(field)s (%(attr)s) %(value)s. Er muss zwischen 0 "
-"und 255 liegen."
-
-#, python-format
-msgid "Invalid value for port %(port)s"
-msgstr "Ungültiger Wert für Port %(port)s"
-
-msgid "Keepalived didn't respawn"
-msgstr "Keepalived wurde nicht generiert"
-
-#, python-format
-msgid "Key %(key)s in mapping: '%(mapping)s' not unique"
-msgstr "Schlüssel %(key)s in Zuordnung: '%(mapping)s' nicht eindeutig"
-
-#, python-format
-msgid "Limit must be an integer 0 or greater and not '%d'"
-msgstr "Der Grenzwert muss eine Ganzzahl größer 0 sein und nicht '%d'"
-
-msgid "Limit number of leases to prevent a denial-of-service."
-msgstr "Anzahl von Leases begrenzen, um eine Dienstverweigerung zu verhindern."
-
-msgid ""
-"List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> "
-"specifying physical_network names usable for VLAN provider and tenant "
-"networks, as well as ranges of VLAN tags on each available for allocation to "
-"tenant networks."
-msgstr ""
-"Liste mit <physisches_Netz>:<VLAN_min>:<VLAN_max> oder <physisches_Netz>, "
-"die physical_network-Namen angeben, die für VLAN-Provider- und Nutzer-Netze "
-"verwendet werden können, wie auch als Bereiche von VLAN-Tags für jedes "
-"verfügbare Netz für die Zuordnung zu Nutzernetzen."
-
-msgid ""
-"List of network type driver entrypoints to be loaded from the neutron.ml2."
-"type_drivers namespace."
-msgstr ""
-"Liste der Netztypentreibereingangspunkte, die aus dem Namensbereich neutron."
-"ml2.type_drivers geladen werden."
-
-msgid "Local IP address of the VXLAN endpoints."
-msgstr "Lokale IP-Adresse von VXLAN-Endpunkten."
-
-msgid "Local IP address of tunnel endpoint."
-msgstr "Lokale IP-Adresse von Tunnelendpunkt."
-
-msgid "Location for Metadata Proxy UNIX domain socket."
-msgstr "Position für UNIX-Domänensocket von Metadaten-Proxy."
-
-msgid "Location of Metadata Proxy UNIX domain socket"
-msgstr "Position von UNIX-Domänensocket von Metadatenproxy"
-
-msgid "Location of pid file of this process."
-msgstr "Position der PID-Datei für diesen Prozess."
-
-msgid "Location to store DHCP server config files"
-msgstr "Position zum Speichern von Konfigurationsdateien des DHCP-Servers"
-
-msgid "Location to store IPv6 RA config files"
-msgstr "Position zum Speichern von IPv6-RA-Konfigurationsdateien"
-
-msgid "Location to store child pid files"
-msgstr "Position zum Speichern von untergeordneten PID-Dateien"
-
-msgid "Location to store keepalived/conntrackd config files"
-msgstr "Position zum Speichern von keepalived/conntrackd-Konfigurationsdateien"
-
-msgid "MTU setting for device."
-msgstr "MTU-Einstellung für Gerät."
-
-msgid "MTU size of veth interfaces"
-msgstr "MTU-Größe von Veth-Schnittstellen"
-
-msgid "Make the l2 agent run in DVR mode."
-msgstr "L2-Agent im DVR-Modus ausführen."
-
-msgid "Malformed request body"
-msgstr "Fehlerhafter Anforderungshauptteil"
-
-msgid "Maximum number of allowed address pairs"
-msgstr "Maximale Anzahl an zulässigen Adresspaaren"
-
-msgid "Maximum number of host routes per subnet"
-msgstr "Maximale Anzahl an Hostroutes pro Teilnetz"
-
-msgid "Metering driver"
-msgstr "Messungstreiber"
-
-#, python-format
-msgid "Metering label %(label_id)s does not exist"
-msgstr "Messungsbezeichnung %(label_id)s ist nicht vorhanden"
-
-#, python-format
-msgid "Metering label rule %(rule_id)s does not exist"
-msgstr "Messungsbezeichnungsregel %(rule_id)s ist nicht vorhanden"
-
-#, python-format
-msgid ""
-"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps "
-"another"
-msgstr ""
-"Messungsbezeichnungsregel mit remote_ip_prefix %(remote_ip_prefix)s weist "
-"eine Überschneidung mit einer anderen auf"
-
-msgid "Minimize polling by monitoring ovsdb for interface changes."
-msgstr ""
-"Abfrage minimieren durch Überwachung von ovsdb auf Schnittstellenänderungen."
-
-#, python-format
-msgid "Missing key in mapping: '%s'"
-msgstr "Fehlender Schlüssel in Zuordnung: '%s'"
-
-#, python-format
-msgid "Missing value in mapping: '%s'"
-msgstr "Fehlender Wert in Zuordnung: '%s'"
-
-#, python-format
-msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found"
-msgstr ""
-"Mehrere Agenten mit 'agent_type=%(agent_type)s' und 'host=%(host)s' wurden "
-"gefunden"
-
-#, python-format
-msgid "Multiple default providers for service %s"
-msgstr "Mehrere Standardprovider für Service %s"
-
-#, python-format
-msgid "Multiple plugins for service %s were configured"
-msgstr "Mehrere Plug-ins für Service %s wurden konfiguriert"
-
-#, python-format
-msgid "Multiple providers specified for service %s"
-msgstr "Mehrere Provider angegeben für Service %s"
-
-msgid "Multiple tenant_ids in bulk security group rule create not allowed"
-msgstr ""
-"Mehrere 'tenant_ids' bei Erstellung von Sicherheitsgruppenregel für "
-"Massenerstellung nicht zulässig"
-
-msgid "Must also specifiy protocol if port range is given."
-msgstr ""
-"Bei angegebenem Portbereich muss ebenfalls ein Protokoll angegeben werden."
-
-msgid "Must specify one or more actions on flow addition or modification"
-msgstr ""
-"Angabe von einer oder mehreren Aktionen für Ablaufhinzufügung oder Änderung "
-"erforderlich"
-
-#, python-format
-msgid ""
-"Name '%s' must be 1-63 characters long, each of which can only be "
-"alphanumeric or a hyphen."
-msgstr ""
-"Der Name '%s' muss eine Länge von 1 - 63 Zeichen haben, die nur "
-"alphanumerisch oder ein Bindestrich sein dürfen."
-
-#, python-format
-msgid "Name '%s' must not start or end with a hyphen."
-msgstr "Der Name '%s' darf nicht mit einem Bindestrich beginnen oder enden."
-
-msgid "Name of Open vSwitch bridge to use"
-msgstr "Name der zu verwendenden Open vSwitch-Brücke"
-
-msgid ""
-"Name of nova region to use. Useful if keystone manages more than one region."
-msgstr ""
-"Name der zu verwendenden Nova-Region. Nützlich, wenn Keystone mehrere "
-"Regionen verwaltet. "
-
-msgid "Name of the FWaaS Driver"
-msgstr "Name des FWaaS-Treibers"
-
-msgid "Namespace of the router"
-msgstr "Namensbereich des Routers"
-
-msgid "Native pagination depend on native sorting"
-msgstr "Die native Paginierung ist von der nativen Sortierung abhängig"
-
-msgid "Negative delta (downgrade) not supported"
-msgstr "Negatives Delta (Herabstufung) nicht unterstützt"
-
-msgid "Negative relative revision (downgrade) not supported"
-msgstr "Negative relative Revision (Herabstufung) nicht unterstützt"
-
-#, python-format
-msgid "Network %s is not a valid external network"
-msgstr "Netz %s ist kein gültiges externes Netz"
-
-#, python-format
-msgid "Network %s is not an external network"
-msgstr "Netz %s ist kein externes Netz"
-
-#, python-format
-msgid ""
-"Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges "
-"%(excluded_ranges)s was not found."
-msgstr ""
-"Netz der Größe %(size)s, aus IP-Bereich %(parent_range)s ausschließlich der "
-"IP-Bereiche %(excluded_ranges)s wurde nicht gefunden."
-
-msgid "Network that will have instance metadata proxied."
-msgstr "Netz, das über Proxy mit den Instanzmetadaten verbunden sein wird."
-
-#, python-format
-msgid "Network type value '%s' not supported"
-msgstr "Netztypwert '%s' wird nicht unterstützt"
-
-msgid "Network type value needed by the ML2 plugin"
-msgstr "Netztypwert für ML2-Plug-in erforderlich"
-
-msgid "Network types supported by the agent (gre and/or vxlan)."
-msgstr ""
-"Netztypen, die vom Agenten unterstützt werden ('gre' und/oder 'vxlan')."
-
-msgid "Neutron Service Type Management"
-msgstr "Neutron-Servicetypverwaltung"
-
-msgid "Neutron core_plugin not configured!"
-msgstr "Neutron-'core_plugin' nicht konfiguriert!"
-
-msgid "Neutron plugin provider module"
-msgstr "Provider-Modul für Neutron-Plug-in"
-
-msgid "Neutron quota driver class"
-msgstr "Neutron-Quotentreiberklasse"
-
-#, python-format
-msgid "No eligible l3 agent associated with external network %s found"
-msgstr "Kein auswählbarer dem externen Netz %s zugeordneter L3-Agent gefunden"
-
-#, python-format
-msgid "No more IP addresses available on network %(net_id)s."
-msgstr "Keine weiteren IP-Adressen auf Netz %(net_id)s verfügbar."
-
-#, python-format
-msgid ""
-"No more Virtual Router Identifier (VRID) available when creating router "
-"%(router_id)s. The limit of number of HA Routers per tenant is 254."
-msgstr ""
-"Es war keine ID für virtuelle Router (VRID - Virtual Router Identifier) "
-"verfügbar beim Erstellen von Router %(router_id)s. Der Grenzwert für die "
-"Anzahl an Hochverfügbarkeitsroutern pro Nutzer ist 254."
-
-#, python-format
-msgid "No providers specified for '%s' service, exiting"
-msgstr "Keine Provider angegeben für Service '%s', wird beendet"
-
-#, python-format
-msgid ""
-"Not allowed to manually assign a %(router_type)s router %(router_id)s from "
-"an existing DVR node to another L3 agent %(agent_id)s."
-msgstr ""
-"Es ist nicht zulässig, eine %(router_type)s-Router-%(router_id)s eines "
-"vorhandenen DVR-Knotens manuell einem anderen L3-Agenten %(agent_id)s "
-"zuzuordnen."
-
-msgid "Not authorized."
-msgstr "Nicht autorisiert."
-
-#, python-format
-msgid ""
-"Not enough l3 agents available to ensure HA. Minimum required "
-"%(min_agents)s, available %(num_agents)s."
-msgstr ""
-"Es sind nicht genügend L3-Agenten zum Sicherstellen der hohen Verfügbarkeit "
-"verfügbar. Die erforderliche Mindestanzahl ist %(min_agents)s, verfügbar "
-"sind %(num_agents)s."
-
-msgid "Number of RPC worker processes for service"
-msgstr "Anzahl der RPC-Verarbeitungsprozesse für den Service"
-
-msgid "Number of backlog requests to configure the metadata server socket with"
-msgstr ""
-"Anzahl der Rückstandanforderungen, mit denen der Metadatenserver-Socket "
-"konfiguriert werden soll"
-
-msgid "Number of backlog requests to configure the socket with"
-msgstr ""
-"Anzahl der Rückstandanforderungen, mit denen der Socket konfiguriert werden "
-"soll"
-
-msgid ""
-"Number of floating IPs allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Anzahl an zulässigen dynamischen IPs pro Nutzer. Ein negativer Wert bedeutet "
-"unbegrenzt."
-
-msgid ""
-"Number of networks allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Anzahl an zulässigen Netzen pro Nutzer. Ein negativer Wert bedeutet "
-"unbegrenzt."
-
-msgid "Number of ports allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Anzahl an zulässigen Ports pro Nutzer. Ein negativer Wert bedeutet "
-"unbegrenzt."
-
-msgid "Number of routers allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Anzahl an zulässigen Routern pro Nutzer. Ein negativer Wert bedeutet "
-"unbegrenzt."
-
-msgid ""
-"Number of seconds between sending events to nova if there are any events to "
-"send."
-msgstr ""
-"Anzahl der Sekunden zwischen dem Senden von Ereignissen an Nova, wenn "
-"Ereignisse zum Senden vorhanden sind. "
-
-msgid "Number of seconds to keep retrying to listen"
-msgstr ""
-"Anzahl der Sekunden, in denen wiederholt versucht wird, empfangsbereit zu "
-"sein"
-
-msgid ""
-"Number of security groups allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Anzahl an zulässigen Sicherheitsgruppen pro Nutzer. Ein negativer Wert "
-"bedeutet unbegrenzt."
-
-msgid ""
-"Number of security rules allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Anzahl an zulässigen Sicherheitsregeln pro Nutzer. Ein negativer Wert "
-"bedeutet unbegrenzt."
-
-msgid "Number of subnets allowed per tenant, A negative value means unlimited."
-msgstr ""
-"Anzahl an zulässigen Teilnetzen pro Nutzer. Ein negativer Wert bedeutet "
-"unbegrenzt."
-
-msgid "OK"
-msgstr "OK"
-
-msgid "Only admin can view or configure quota"
-msgstr "Nur Admins können Quoten anzeigen oder konfigurieren"
-
-msgid "Only admin is authorized to access quotas for another tenant"
-msgstr ""
-"Nur Administratoren sind dazu berechtigt, auf Quoten für andere Nutzer "
-"zuzugreifen"
-
-msgid "Only allowed to update rules for one security profile at a time"
-msgstr ""
-"Aktualisierung von Regeln nicht für mehrere Sicherheitsprofile gleichzeitig "
-"zulässig"
-
-msgid "Only remote_ip_prefix or remote_group_id may be provided."
-msgstr "Nur Angabe von 'remote_ip_prefix' oder 'remote_group_id' ist zulässig."
-
-#, python-format
-msgid ""
-"Operation %(op)s is not supported for device_owner %(device_owner)s on port "
-"%(port_id)s."
-msgstr ""
-"Operation %(op)s wird nicht unterstützt für device_owner %(device_owner)s "
-"auf Port %(port_id)s."
-
-msgid "Override the default dnsmasq settings with this file"
-msgstr "Standard-'dnsmasq'-Einstellungen mit dieser Datei außer Kraft setzen"
-
-msgid "Owner type of the device: network/compute"
-msgstr "Eigentümertyp des Geräts: Netz/Rechenknoten"
-
-msgid "POST requests are not supported on this resource."
-msgstr "POST-Anforderungen werden auf dieser Ressource nicht unterstützt."
-
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr "Analysieren von 'bridge_mappings' fehlgeschlagen: %s."
-
-msgid "Parsing supported pci_vendor_devs failed"
-msgstr "Die Analyse von unterstützten pci_vendor_devs ist fehlgeschlagen"
-
-msgid "Path to PID file for this process"
-msgstr "Pfad zur PID-Datei für diesen Prozess"
-
-msgid "Path to the router directory"
-msgstr "Pfad zum Routerverzeichnis"
-
-msgid "Peer patch port in integration bridge for tunnel bridge."
-msgstr "Peer-Patch-Port in Integrationsbrücke für Tunnelbrücke."
-
-msgid "Peer patch port in tunnel bridge for integration bridge."
-msgstr "Peer-Patch-Port in Tunnelbrücke für Integrationsbrücke."
-
-msgid "Ping timeout"
-msgstr "Ping-Zeitlimitüberschreitung"
-
-msgid "Plugin does not support updating provider attributes"
-msgstr ""
-"Aktualisieren von Provider-Attributen wird von Plug-in nicht unterstützt"
-
-#, python-format
-msgid "Port %(id)s does not have fixed ip %(address)s"
-msgstr "Port %(id)s verfügt nicht über statische IP-Adresse %(address)s"
-
-#, python-format
-msgid ""
-"Port %(port_id)s is associated with a different tenant than Floating IP "
-"%(floatingip_id)s and therefore cannot be bound."
-msgstr ""
-"Port %(port_id)s ist einem anderen Nutzer zugeordnet als die dynamische IP-"
-"Adresse %(floatingip_id)s und kann daher nicht gebunden werden."
-
-msgid ""
-"Port Security must be enabled in order to have allowed address pairs on a "
-"port."
-msgstr ""
-"Portsicherheit muss aktiviert werden, damit zulässige Adresspaare für einen "
-"Port vorhanden sind."
-
-msgid "Port does not have port security binding."
-msgstr "Port verfügt nicht über Portsicherheitsbindung."
-
-msgid ""
-"Port has security group associated. Cannot disable port security or ip "
-"address until security group is removed"
-msgstr ""
-"Dem Port ist eine Sicherheitsgruppe zugeordnet. Inaktivieren von "
-"Portsicherheit oder IP-Adresse nur nach Entfernen der Sicherheitsgruppe "
-"möglich"
-
-msgid ""
-"Port security must be enabled and port must have an IP address in order to "
-"use security groups."
-msgstr ""
-"Portsicherheit muss aktiviert sein und Port muss über eine IP-Adresse "
-"verfügen, damit Sicherheitsgruppen verwendet werden können."
-
-msgid "Private key of client certificate."
-msgstr "Privater Schlüssel für Clientzertifikat."
-
-#, python-format
-msgid "Probe %s deleted"
-msgstr "Stichprobe %s gelöscht"
-
-#, python-format
-msgid "Probe created : %s "
-msgstr "Stichprobe erstellt: %s "
-
-msgid "Process is already started"
-msgstr "Prozess wurde bereits gestartet"
-
-msgid "Process is not running."
-msgstr "Prozess läuft nicht."
-
-msgid "Protocol to access nova metadata, http or https"
-msgstr "Protokoll für den Zugriff auf Nova-Metadaten, HTTP oder HTTPS"
-
-msgid ""
-"Range of seconds to randomly delay when starting the periodic task scheduler "
-"to reduce stampeding. (Disable by setting to 0)"
-msgstr ""
-"Dauer in Sekunden, für die zufallsgeneriert beim Starten des Schedulers für "
-"regelmäßige Tasks gewartet werden soll, um die Belastung zu reduzieren. "
-"(Inaktivierung durch Festlegen auf 0)"
-
-msgid "Remote metadata server experienced an internal server error."
-msgstr "Interner Serverfehler bei fernem Metadatenserver."
-
-msgid ""
-"Representing the resource type whose load is being reported by the agent. "
-"This can be \"networks\", \"subnets\" or \"ports\". When specified (Default "
-"is networks), the server will extract particular load sent as part of its "
-"agent configuration object from the agent report state, which is the number "
-"of resources being consumed, at every report_interval.dhcp_load_type can be "
-"used in combination with network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is "
-"WeightScheduler, dhcp_load_type can be configured to represent the choice "
-"for the resource being balanced. Example: dhcp_load_type=networks"
-msgstr ""
-"Darstellung des Ressourcentyps, zu dessen Arbeitslast vom Agenten Bericht "
-"erstattet wird. Dies kann \"networks\", \"subnets\" oder \"ports\" sein. Bei "
-"Angabe (Standardwert ist 'networks') extrahiert der Server bei jedem "
-"report_interval eine bestimmte Arbeitslast, die als Teil des "
-"Agentenkonfigurationsobjekts vom Agentenberichtsstatus, der der Anzahl der "
-"konsumierten Ressourcen entspricht, gesendet wird. dhcp_load_type kann in "
-"Verbindung mit network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler verwendet werden. Wenn der "
-"network_scheduler_driver WeightScheduler ist, kann dhcp_load_type so "
-"konfiguriert werden, dass die Auswahl für die Ressource mit Lastausgleich "
-"dargestellt wird. Beispiel: dhcp_load_type=networks"
-
-msgid "Request Failed: internal server error while processing your request."
-msgstr ""
-"Anforderung fehlgeschlagen: interner Serverfehler bei Verarbeitung Ihrer "
-"Anforderung."
-
-#, python-format
-msgid ""
-"Request contains duplicate address pair: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-msgstr ""
-"Anforderung enthält doppeltes Adresspaar: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-
-#, python-format
-msgid ""
-"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps "
-"with another subnet"
-msgstr ""
-"Angefordertes Teilnetz mit CIDR: %(cidr)s für Netz: %(network_id)s enthält "
-"Überschneidungen mit einem anderen Teilnetz"
-
-#, python-format
-msgid ""
-"Resource '%(resource_id)s' is already associated with provider "
-"'%(provider)s' for service type '%(service_type)s'"
-msgstr ""
-"Ressource '%(resource_id)s' ist bereits Provider '%(provider)s' für "
-"Servicetyp '%(service_type)s' zugeordnet"
-
-msgid "Resource body required"
-msgstr "Ressourcen-Nachrichtentext erforderlich"
-
-msgid "Resource not found."
-msgstr "Ressource nicht gefunden."
-
-msgid "Resources required"
-msgstr "Ressourcen erforderlich"
-
-msgid "Root helper daemon application to use when possible."
-msgstr "Wenn möglich, Dämonanwendung für Roothilfeprogramm verwenden."
-
-msgid "Root permissions are required to drop privileges."
-msgstr "Rootberechtigungen sind zum Löschen von Berechtigungen erforderlich."
-
-#, python-format
-msgid "Router %(router_id)s %(reason)s"
-msgstr "Router %(router_id)s %(reason)s"
-
-#, python-format
-msgid "Router %(router_id)s could not be found"
-msgstr "Router %(router_id)s konnte nicht gefunden werden"
-
-#, python-format
-msgid "Router %(router_id)s does not have an interface with id %(port_id)s"
-msgstr ""
-"Router %(router_id)s verfügt über keine Schnittstelle mit ID %(port_id)s"
-
-#, python-format
-msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s"
-msgstr ""
-"Router %(router_id)s verfügt über keine Schnittstelle auf Teilnetz "
-"%(subnet_id)s"
-
-#, python-format
-msgid "Router already has a port on subnet %s"
-msgstr "Router verfügt bereits über einen Port auf Teilnetz %s"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more floating IPs."
-msgstr ""
-"Routerschnittstelle für Teilnetz %(subnet_id)s auf Router %(router_id)s kann "
-"nicht gelöscht werden, da sie für eine oder mehrere dynamische IP-Adressen "
-"erforderlich ist."
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more routes."
-msgstr ""
-"Routerschnittstelle für Teilnetz %(subnet_id)s auf Router %(router_id)s kann "
-"nicht gelöscht werden, da sie für eine oder mehrere Routen erforderlich ist."
-
-msgid "Router that will have connected instances' metadata proxied."
-msgstr ""
-"Router, mit dem die Metadaten der verbundenen Instanzen über Proxy verbunden "
-"sein werden."
-
-msgid "Run as daemon."
-msgstr "Als Dämon ausführen."
-
-msgid ""
-"Seconds between nodes reporting state to server; should be less than "
-"agent_down_time, best if it is half or less than agent_down_time."
-msgstr ""
-"Sekunden zwischen Status-Berichten von Knoten an Server; sollte geringer "
-"sein als agent_down_time; am besten sollte es die Hälfte oder weniger von "
-"agent_down_time betragen."
-
-msgid "Seconds between running periodic tasks"
-msgstr "Sekunden zwischen Ausführungen regelmäßiger Tasks"
-
-msgid ""
-"Seconds to regard the agent is down; should be at least twice "
-"report_interval, to be sure the agent is down for good."
-msgstr ""
-"Sekunden bis zur Annahme, dass der Agent inaktiv ist; sollte mindestens "
-"doppelt so hoch sein wie report_interval, damit sichergestellt ist, dass der "
-"Agent wirklich inaktiv ist."
-
-#, python-format
-msgid "Security group %(id)s does not exist"
-msgstr "Sicherheitsgruppe %(id)s ist nicht vorhanden"
-
-#, python-format
-msgid "Security group rule %(id)s does not exist"
-msgstr "Sicherheitsgruppenregel %(id)s ist nicht vorhanden"
-
-#, python-format
-msgid "Security group rule already exists. Rule id is %(id)s."
-msgstr "Sicherheitsgruppenregel ist bereits vorhanden. Regel-ID ist %(id)s."
-
-msgid "Segments and provider values cannot both be set."
-msgstr ""
-"Es können nicht Segment- und Providerwerte gleichzeitig festgelegt werden."
-
-msgid ""
-"Send notification to nova when port data (fixed_ips/floatingip) changes so "
-"nova can update its cache."
-msgstr ""
-"Benachrichtigung an Nova senden, wenn sich die Portdaten (fixed_ips/"
-"floatingip) ändern, damit Nova den Cache aktualisieren kann. "
-
-msgid "Send notification to nova when port status changes"
-msgstr "Benachrichtigung an Nova senden, wenn sich der Portstatus ändert"
-
-msgid ""
-"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the "
-"feature is disabled"
-msgstr ""
-"Senden Sie diese Anzahl an Gratuitous ARPs für "
-"Hochverfügbarkeitskonfiguration; wenn der Wert kleiner oder gleich 0 ist, "
-"wird die Funktion inaktiviert "
-
-#, python-format
-msgid ""
-"Service provider '%(provider)s' could not be found for service type "
-"%(service_type)s"
-msgstr ""
-"Service-Provider '%(provider)s' konnte nicht für Servicetyp %(service_type)s "
-"gefunden werden "
-
-#, python-format
-msgid "Service type %(service_type)s does not have a default service provider"
-msgstr "Servicetyp %(service_type)s weist keinen Standard-Service-Provider auf"
-
-msgid ""
-"Set new timeout in seconds for new rpc calls after agent receives SIGTERM. "
-"If value is set to 0, rpc timeout won't be changed"
-msgstr ""
-"Neues Zeitlimit in Sekunden für neue RCP-Aufrufe festlegen, nachdem Agent "
-"SIGTERM empfängt. Wenn der Wert auf 0 gesetzt ist, wird das RPC-Zeitlimit "
-"nicht geändert"
-
-msgid ""
-"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/"
-"VXLAN tunnel."
-msgstr ""
-"DF-Bit (Don't Fragment) auf GRE/VXLAN-Tunnel für abgehende IP-Pakete "
-"festlegen oder die Festlegung aufheben."
-
-#, python-format
-msgid ""
-"Some tenants have more than one security group named 'default': "
-"%(duplicates)s. All duplicate 'default' security groups must be resolved "
-"before upgrading the database."
-msgstr ""
-"Einige Nutzer verfügen über mehrere Sicherheitsgruppen mit dem Namen "
-"'default': %(duplicates)s. Alle doppelten Sicherheitsgruppen mit dem Namen "
-"'default' müssen aufgelöst werden, bevor die Datenbank aktualisiert wird."
-
-msgid ""
-"Specifying 'tenant_id' other than authenticated tenant in request requires "
-"admin privileges"
-msgstr ""
-"Um für 'tenant_id' einen anderen Wert als die in der Anforderung "
-"authentifizierte Nutzer-ID anzugeben, sind Administratorberechtigungen "
-"erforderlich"
-
-msgid "Subnet for router interface must have a gateway IP"
-msgstr ""
-"Teilnetz für Routerschnittstelle muss über eine Gateway-IP-Adresse verfügen"
-
-msgid "Subnet pool has existing allocations"
-msgstr "Der Teilnetzpool verfügt über vorhandene Zuordnungen"
-
-msgid "Subnet used for the l3 HA admin network."
-msgstr ""
-"Teilnetz, das für das L3-Verwaltungsnetz für hohe Verfügbarkeit verwendet "
-"wird."
-
-msgid ""
-"System-wide flag to determine the type of router that tenants can create. "
-"Only admin can override."
-msgstr ""
-"Systemweites Flag zum Bestimmen des Routertyps, den Nutzer erstellen können. "
-"Kann nur vom Administrator überschrieben werden."
-
-msgid "TCP Port to listen for metadata server requests."
-msgstr "TCP-Port zum Empfangen von Anforderungen des Metadatenservers."
-
-msgid "TCP Port used by Neutron metadata namespace proxy."
-msgstr "Von Neutron-Metadaten-Namensbereichsproxy verwendeter TCP-Port."
-
-msgid "TCP Port used by Nova metadata server."
-msgstr "Von Nova-Metadatenserver verwendeter TCP-Port."
-
-#, python-format
-msgid "TLD '%s' must not be all numeric"
-msgstr "TLD '%s' darf nicht ausschließlich numerisch sein"
-
-msgid "TOS for vxlan interface protocol packets."
-msgstr "TOS für VXLAN-Schnittstellenprotokollpakete."
-
-msgid "TTL for vxlan interface protocol packets."
-msgstr "TTL für VXLAN-Schnittstellenprotokollpakete."
-
-#, python-format
-msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network"
-msgstr "Nutzer %(tenant_id)s darf %(resource)s auf diesem Netz nicht erstellen"
-
-msgid "Tenant network creation is not enabled."
-msgstr "Erstellung von Nutzernetzen ist nicht aktiviert."
-
-msgid ""
-"The 'gateway_external_network_id' option must be configured for this agent "
-"as Neutron has more than one external network."
-msgstr ""
-"Die Option 'gateway_external_network_id' muss für diesen Agenten "
-"konfiguriert werden, da Neutron über mehr als ein externes Netz verfügt."
-
-#, python-format
-msgid ""
-"The HA Network CIDR specified in the configuration file isn't valid; "
-"%(cidr)s."
-msgstr ""
-"Das in der Konfigurationsdatei angegebene CIDR für das "
-"Hochverfügbarkeitsnetz ist nicht gültig; %(cidr)s."
-
-msgid "The UDP port to use for VXLAN tunnels."
-msgstr "UDP-Port für VXLAN-Tunnel."
-
-msgid "The advertisement interval in seconds"
-msgstr "Ankündigungsintervall in Sekunden"
-
-#, python-format
-msgid "The allocation pool %(pool)s is not valid."
-msgstr "Der Zuordnungspool %(pool)s ist nicht gültig."
-
-#, python-format
-msgid ""
-"The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s."
-msgstr ""
-"Der Zuordnungspool %(pool)s geht über das Teilnetz-CIDR %(subnet_cidr)s "
-"hinaus."
-
-#, python-format
-msgid ""
-"The attribute '%(attr)s' is reference to other resource, can't used by sort "
-"'%(resource)s'"
-msgstr ""
-"Das Attribut %(attr)s ist ein Verweis auf eine andere Ressource und kann bei "
-"der Sortierung von %(resource)s nicht verwendet werden"
-
-msgid "The core plugin Neutron will use"
-msgstr "Core-Plug-in, das Neutron verwenden wird"
-
-msgid "The driver used to manage the DHCP server."
-msgstr "Der für die Verwaltung des DHCP-Servers verwendete Treiber."
-
-msgid "The driver used to manage the virtual interface."
-msgstr ""
-"Der für die Verwaltung der virtuellen Schnittstelle verwendete Treiber."
-
-#, python-format
-msgid ""
-"The following device_id %(device_id)s is not owned by your tenant or matches "
-"another tenants router."
-msgstr ""
-"Die folgende device_id %(device_id)s gehört weder Ihrem Nutzer, noch "
-"entspricht sie dem Router eines anderen Nutzers."
-
-msgid "The host IP to bind to"
-msgstr "Das Host-IP, an das gebunden werden soll"
-
-msgid "The interface for interacting with the OVSDB"
-msgstr "Die Schnittstelle zur Kommunikation mit OVSDB"
-
-msgid ""
-"The maximum number of items returned in a single response, value was "
-"'infinite' or negative integer means no limit"
-msgstr ""
-"Maximale Anzahl an in einer einzelnen Antwort zurückgegebenen Elementen. Der "
-"Wert 'infinite' oder eine negative Ganzzahl bedeuten, dass es keine "
-"Begrenzung gibt"
-
-#, python-format
-msgid ""
-"The network %(network_id)s has been already hosted by the DHCP Agent "
-"%(agent_id)s."
-msgstr ""
-"Das Netz %(network_id)s wurde bereits vom DHCP-Agenten %(agent_id)s gehostet."
-
-#, python-format
-msgid ""
-"The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s."
-msgstr ""
-"Das Netz %(network_id)s wird nicht vom DHCP-Agenten %(agent_id)s gehostet."
-
-#, python-format
-msgid "The number of allowed address pair exceeds the maximum %(quota)s."
-msgstr ""
-"Die Anzahl an zulässigen Adresspaaren überschreitet das Maximum %(quota)s."
-
-msgid ""
-"The number of seconds the agent will wait between polling for local device "
-"changes."
-msgstr ""
-"Die Anzahl an Sekunden, die der Agent zwischen Abfragen lokaler "
-"Geräteänderungen wartet."
-
-msgid ""
-"The number of seconds to wait before respawning the ovsdb monitor after "
-"losing communication with it."
-msgstr ""
-"Die Anzahl an Sekunden, die gewartet werden soll, bevor die ovsdb-"
-"Überwachung nach einer Kommunikationsunterbrechung erneut generiert wird."
-
-msgid "The number of sort_keys and sort_dirs must be same"
-msgstr "Die Anzahl an 'sort_keys' und 'sort_dirs' muss gleich sein"
-
-#, python-format
-msgid "The port '%s' was deleted"
-msgstr "Port '%s' wurde gelöscht"
-
-msgid "The port to bind to"
-msgstr "Der Port, an den gebunden werden soll"
-
-#, python-format
-msgid "The requested content type %s is invalid."
-msgstr "Der angeforderte Inhaltstyp %s ist ungültig."
-
-msgid "The resource could not be found."
-msgstr "Die Ressource konnte nicht gefunden werden."
-
-#, python-format
-msgid ""
-"The router %(router_id)s has been already hosted by the L3 Agent "
-"%(agent_id)s."
-msgstr ""
-"Der Router %(router_id)s wurde bereits vom L3-Agenten %(agent_id)s gehostet."
-
-msgid ""
-"The server has either erred or is incapable of performing the requested "
-"operation."
-msgstr ""
-"Auf dem Server ist entweder ein Fehler aufgetreten oder der Server kann die "
-"angeforderte Operation nicht ausführen."
-
-msgid "The service plugins Neutron will use"
-msgstr "Service-Plug-ins, die Neutron verwenden wird"
-
-msgid "The type of authentication to use"
-msgstr "Der zu verwendende Authentifizierungtyp"
-
-#, python-format
-msgid "The value '%(value)s' for %(element)s is not valid."
-msgstr "Der Wert '%(value)s' für %(element)s ist ungültig."
-
-msgid ""
-"The working mode for the agent. Allowed modes are: 'legacy' - this preserves "
-"the existing behavior where the L3 agent is deployed on a centralized "
-"networking node to provide L3 services like DNAT, and SNAT. Use this mode if "
-"you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality "
-"and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - "
-"this enables centralized SNAT support in conjunction with DVR.  This mode "
-"must be used for an L3 agent running on a centralized node (or in single-"
-"host deployments, e.g. devstack)"
-msgstr ""
-"Der Betriebsmodus für den Agenten. Zulässige Modi sind: 'legacy' - Hierbei "
-"wird das aktuelle Verhalten beibehalten, bei dem der Agent der Ebene 3 (L3 - "
-"Level 3) auf einem zentralisierten Netzknoten implementiert wird, um L3-"
-"Services wie DNAT und SNAT bereitzustellen. Verwenden Sie diesen Modus, wenn "
-"Sie DVR nicht annehmen möchten. 'dvr' - Mit diesem Modus wird die DVR-"
-"Funktionalität aktiviert. Er muss für L3-Agenten verwendet werden, die auf "
-"einem Rechenhost ausgeführt werden. 'dvr_snat' - Hiermit wird die "
-"zentralisierte SNAT-Unterstützung in Kombination mit DVR aktiviert.  Dieser "
-"Modus muss für L3-Agenten verwendet werden, die auf einem zentralisierten "
-"Knoten (oder in Implementierungen mit einem einzelnen Host, z. B. devstack) "
-"ausgeführt werden."
-
-msgid ""
-"True to delete all ports on all the OpenvSwitch bridges. False to delete "
-"ports created by Neutron on integration and external network bridges."
-msgstr ""
-"'True' zum Löschen aller Ports auf den OpenvSwitch-Brücken. 'False' zum "
-"Löschen von Ports, die von Neutron auf Integrationsbrücken und externen "
-"Netzbrücken erstellt wurden."
-
-msgid "Tunnel IP value needed by the ML2 plugin"
-msgstr "Tunnel-IP-Wert für ML2-Plug-in erforderlich"
-
-msgid "Tunnel bridge to use."
-msgstr "Zu verwendende Tunnelbrücke."
-
-msgid "URL to database"
-msgstr "URL an Datenbank"
-
-#, python-format
-msgid "Unable to access %s"
-msgstr "Kein Zugriff auf %s möglich"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(router_id)s. The number of routes exceeds "
-"the maximum %(quota)s."
-msgstr ""
-"Operation kann für %(router_id)s nicht abgeschlossen werden. Die Anzahl an "
-"Routen überschreitet den maximalen Wert %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of DNS "
-"nameservers exceeds the limit %(quota)s."
-msgstr ""
-"Operation kann für %(subnet_id)s nicht abgeschlossen werden. Die Anzahl an "
-"DNS-Namensservern überschreitet den Grenzwert %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of host routes "
-"exceeds the limit %(quota)s."
-msgstr ""
-"Operation kann für %(subnet_id)s nicht abgeschlossen werden. Die Anzahl an "
-"Hostroutes überschreitet den Grenzwert %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The IP address "
-"%(ip_address)s is in use."
-msgstr ""
-"Operation kann für Netz %(net_id)s nicht abgeschlossen werden. Die IP-"
-"Adresse %(ip_address)s ist belegt."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The mac address %(mac)s "
-"is in use."
-msgstr ""
-"Operation kann für Netz %(net_id)s nicht abgeschlossen werden. Die MAC-"
-"Adresse %(mac)s ist belegt."
-
-#, python-format
-msgid ""
-"Unable to complete operation on network %(net_id)s. There are one or more "
-"ports still in use on the network."
-msgstr ""
-"Operation auf Netz %(net_id)s kann nicht abgeschlossen werden. Ein oder "
-"mehrere Ports werden im Netz noch verwendet."
-
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s for network %(net_id)s. "
-"Port already has an attached device %(device_id)s."
-msgstr ""
-"Operation auf Port %(port_id)s kann für Netz %(net_id)s nicht abgeschlossen "
-"werden. Port verfügt bereits über eine angeschlossene Einheit %(device_id)s."
-
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr "Wert in %s kann nicht konvertiert werden"
-
-msgid "Unable to create the Agent Gateway Port"
-msgstr "Agent-Gateway-Port kann nicht erstellt werden"
-
-msgid "Unable to create the SNAT Interface Port"
-msgstr "SNAT-Schnittstellenport kann nicht erstellt werden"
-
-#, python-format
-msgid ""
-"Unable to create the flat network. Physical network %(physical_network)s is "
-"in use."
-msgstr ""
-"Das einfache Netz kann nicht erstellt werden. Das physische Netz "
-"%(physical_network)s ist belegt."
-
-msgid ""
-"Unable to create the network. No available network found in maximum allowed "
-"attempts."
-msgstr ""
-"Das Netz kann nicht erstellt werden. Es wurde bei den maximal zulässigen "
-"Versuchen kein verfügbares Netz gefunden."
-
-msgid ""
-"Unable to create the network. No tenant network is available for allocation."
-msgstr ""
-"Das Netz kann nicht erstellt werden. Es ist kein Nutzernetz für die "
-"Zuordnung verfügbar."
-
-#, python-format
-msgid ""
-"Unable to create the network. The VLAN %(vlan_id)s on physical network "
-"%(physical_network)s is in use."
-msgstr ""
-"Das Netz kann nicht erstellt werden. Das VLAN %(vlan_id)s auf dem physischen "
-"Netz %(physical_network)s ist belegt."
-
-#, python-format
-msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use."
-msgstr ""
-"Das Netz kann nicht erstellt werden. Die Tunnel-ID %(tunnel_id)s ist belegt."
-
-#, python-format
-msgid "Unable to determine mac address for %s"
-msgstr "MAC-Adresse für %s kann nicht bestimmt werden"
-
-#, python-format
-msgid "Unable to find '%s' in request body"
-msgstr "'%s' kann in Anforderungshauptteil nicht gefunden werden"
-
-#, python-format
-msgid "Unable to find any IP address on external network %(net_id)s."
-msgstr ""
-"Es können keine IP-Adressen im externen Netz %(net_id)s gefunden werden."
-
-#, python-format
-msgid "Unable to find resource name in %s"
-msgstr "Ressourcenname kann nicht in %s gefunden werden"
-
-msgid "Unable to generate IP address by EUI64 for IPv4 prefix"
-msgstr ""
-"IP-Adresse kann nicht mithilfe von EUI64 mit dem IPv4-Präfix generiert werden"
-
-#, python-format
-msgid "Unable to generate unique DVR mac for host %(host)s."
-msgstr ""
-"Eindeutige DVR-MAC-Adresse for Host %(host)s kann nicht generiert werden."
-
-#, python-format
-msgid "Unable to generate unique mac on network %(net_id)s."
-msgstr ""
-"Eindeutige MAC-Adresse kann auf Netz %(net_id)s nicht generiert werden."
-
-#, python-format
-msgid ""
-"Unable to identify a target field from:%s. Match should be in the form "
-"%%(<field_name>)s"
-msgstr ""
-"Zielfeld kann nicht aus %s identifiziert werden. Übereinstimmung sollte im "
-"Format %%(<Feldname>)s vorliegen"
-
-#, python-format
-msgid ""
-"Unable to verify match:%(match)s as the parent resource: %(res)s was not "
-"found"
-msgstr ""
-"Übereinstimmung %(match)s kann nicht als übergeordnete Ressource bestätigt "
-"werden: %(res)s wurde nicht gefunden"
-
-#, python-format
-msgid "Unexpected response code: %s"
-msgstr "Unerwarteter Antwortcode: %s"
-
-#, python-format
-msgid "Unexpected response: %s"
-msgstr "Unerwartete Antwort: %s"
-
-msgid "Unimplemented commands"
-msgstr "Nicht implementierte Befehle"
-
-msgid "Unknown API version specified"
-msgstr "Unbekannte API-Version angegeben"
-
-#, python-format
-msgid "Unknown attribute '%s'."
-msgstr "Unbekanntes Attribut '%s'."
-
-#, python-format
-msgid "Unknown chain: %r"
-msgstr "Unbekannte Kette: %r"
-
-#, python-format
-msgid "Unknown quota resources %(unknown)s."
-msgstr "Unbekannte Quotenressourcen %(unknown)s."
-
-msgid "Unmapped error"
-msgstr "Nicht zugeordneter Fehler"
-
-msgid "Unrecognized action"
-msgstr "Nicht erkannte Aktion"
-
-#, python-format
-msgid "Unrecognized attribute(s) '%s'"
-msgstr "Nicht erkannte(s) Attribut(e) '%s'"
-
-msgid "Unsupported Content-Type"
-msgstr "Nicht unterstützter Inhaltstyp"
-
-#, python-format
-msgid "Unsupported network type %(net_type)s."
-msgstr "Nicht unterstützter Netztyp %(net_type)s."
-
-msgid "Unsupported request type"
-msgstr "Nicht unterstützter Anforderungstyp"
-
-msgid "Updating default security group not allowed."
-msgstr "Aktualisieren von Standardsicherheitsgruppe nicht zulässig."
-
-msgid ""
-"Use ML2 l2population mechanism driver to learn remote MAC and IPs and "
-"improve tunnel scalability."
-msgstr ""
-"ML2-l2population-Mechanismus-Treiber verwenden, um ferne MAC- und IP-"
-"Adressen abzurufen und die Tunnelskalierbarkeit zu verbessern."
-
-msgid "Use broadcast in DHCP replies"
-msgstr "Broadcast in DHCP-Antworten verwenden"
-
-msgid "Use either --delta or relative revision, not both"
-msgstr ""
-"Verwenden Sie entweder --delta oder relative Revision, nicht beides gemeinsam"
-
-msgid "User (uid or name) running metadata proxy after its initialization"
-msgstr ""
-"Benutzer (Benutzer-ID oder Name), der Metadaten-Proxy nach der "
-"Initialisierung ausführt"
-
-msgid ""
-"User (uid or name) running metadata proxy after its initialization (if "
-"empty: agent effective user)."
-msgstr ""
-"Benutzer (Benutzer-ID oder Name), der Metadaten-Proxy nach der "
-"Initialisierung ausführt (falls leer: Agent-ausführender Benutzer)."
-
-msgid "User (uid or name) running this process after its initialization"
-msgstr ""
-"Benutzer (Benutzer-ID oder Name), der diesen Prozess nach der "
-"Initialisierung ausführt"
-
-msgid "VRRP authentication password"
-msgstr "VRRP-Authentifizierungskennwort"
-
-msgid "VRRP authentication type"
-msgstr "VRRP-Authentifizierungstyp"
-
-#, python-format
-msgid ""
-"Validation of dictionary's keys failed. Expected keys: %(expected_keys)s "
-"Provided keys: %(provided_keys)s"
-msgstr ""
-"Überprüfung der Schlüssel für das Verzeichnis ist fehlgeschlagen. Erwartete "
-"Schlüssel: %(expected_keys)s Angegebene Schlüssel: %(provided_keys)s"
-
-#, python-format
-msgid "Validator '%s' does not exist."
-msgstr "Der Validator '%s' ist nicht vorhanden."
-
-#, python-format
-msgid "Value %(value)s in mapping: '%(mapping)s' not unique"
-msgstr "Wert %(value)s in Zuordnung: '%(mapping)s' nicht eindeutig"
-
-msgid ""
-"Watch file log. Log watch should be disabled when metadata_proxy_user/group "
-"has no read/write permissions on metadata proxy log file."
-msgstr ""
-"Überwachungsdateiprotokoll. Protokollüberwachung sollte inaktiviert sein, "
-"wenn metadata_proxy_user/group über keine Lese- und Schreibberechtigung für "
-"die Protokolldatei des Metadaten-Proxys verfügt."
-
-msgid ""
-"Where to store Neutron state files. This directory must be writable by the "
-"agent."
-msgstr ""
-"Position zum Speichern von Neutron-Statusdateien. Dieses Verzeichnis muss "
-"für den Agenten beschreibbar sein."
-
-msgid ""
-"With IPv6, the network used for the external gateway does not need to have "
-"an associated subnet, since the automatically assigned link-local address "
-"(LLA) can be used. However, an IPv6 gateway address is needed for use as the "
-"next-hop for the default route. If no IPv6 gateway address is configured "
-"here, (and only then) the neutron router will be configured to get its "
-"default route from router advertisements (RAs) from the upstream router; in "
-"which case the upstream router must also be configured to send these RAs. "
-"The ipv6_gateway, when configured, should be the LLA of the interface on the "
-"upstream router. If a next-hop using a global unique address (GUA) is "
-"desired, it needs to be done via a subnet allocated to the network and not "
-"through this parameter. "
-msgstr ""
-"Mit IPv6 benötigt das Netz, das für das externe Gateway verwendet wird, kein "
-"zugehöriges Teilnetz, da die automatisch zugewiesene LLA (Link-Local "
-"Address) verwendet werden kann. Eine IPv6-Gateway-Adresse ist jedoch für die "
-"Verwendung als Next-Hop für die Standardroute erforderlich. Ist hier keine "
-"IPv6-Gateway-Adresse konfiguriert (und nur dann), wird der Neutron-Router so "
-"konfiguriert, dass er die Standardroute von RAs (Router Advertisements) vom "
-"vorgeschalteten Router erhält; in diesem Fall muss der vorgeschaltete Router "
-"ebenfalls zum Senden dieser RAs konfiguriert sein. Wenn das ipv6_gateway "
-"konfiguriert ist, sollte es die LLA der Schnittstelle auf dem "
-"vorgeschalteten Router sein. Wenn ein Next-Hop benötigt wird, der eine GUA "
-"(Global Unique Address) verwendet, muss dies über ein Teilnetz geschehen, "
-"das dem Netz zugeordnet ist, nicht über diesen Parameter. "
-
-msgid "You must implement __call__"
-msgstr "Sie müssen '__call__' implementieren"
-
-msgid ""
-"You must provide a config file for bridge - either --config-file or "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-msgstr ""
-"Sie müssen eine Konfigurationsdatei für die Brücke angeben: entweder '--"
-"config-file' oder env[NEUTRON_TEST_CONFIG_FILE]"
-
-msgid "You must provide a revision or relative delta"
-msgstr "Sie müssen eine Überarbeitung oder ein relatives Delta bereitstellen"
-
-msgid "allocation_pools allowed only for specific subnet requests."
-msgstr ""
-"allocation_pools sind nur für bestimmte Teilnetzanforderungen zulässig."
-
-msgid "binding:profile value too large"
-msgstr "Bindung: Profilwert zu groß"
-
-msgid "cidr and prefixlen must not be supplied together"
-msgstr "cidr und prefixlen dürfen nicht gemeinsam angegeben werden"
-
-#, python-format
-msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid."
-msgstr "dhcp_agents_per_network muss >= 1 sein. '%s' ist ungültig."
-
-msgid "fixed_ip_address cannot be specified without a port_id"
-msgstr "'fixed_ip_address' kann nicht ohne 'port_id' angegeben werden"
-
-#, python-format
-msgid "has device owner %s"
-msgstr "hat Einheiteneigentümer %s"
-
-#, python-format
-msgid "ip command failed on device %(dev_name)s: %(reason)s"
-msgstr "IP-Befehl fehlgeschlagen auf Einheit %(dev_name)s: %(reason)s"
-
-#, python-format
-msgid "ip link capability %(capability)s is not supported"
-msgstr "IP-Link-Fähigkeit %(capability)s wird nicht unterstützt"
-
-#, python-format
-msgid "ip link command is not supported: %(reason)s"
-msgstr "IP-Link-Befehl wird nicht unterstützt: %(reason)s"
-
-msgid "ip_version must be specified in the absence of cidr and subnetpool_id"
-msgstr ""
-"ip_version muss angegeben werden, wenn cidr und subnetpool_id nicht "
-"angegeben sind"
-
-msgid "ipv6_address_mode is not valid when ip_version is 4"
-msgstr "ipv6_address_mode ist nicht gültig, wenn ip_version 4 ist"
-
-msgid "ipv6_ra_mode is not valid when ip_version is 4"
-msgstr "ipv6_ra_mode ist nicht gültig, wenn ip_version 4 ist"
-
-msgid ""
-"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to "
-"False."
-msgstr ""
-"ipv6_ra_mode oder ipv6_address_mode darf nicht gesetzt sein, wenn "
-"enable_dhcp auf 'False' gesetzt ist."
-
-#, python-format
-msgid ""
-"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to "
-"'%(addr_mode)s' is not valid. If both attributes are set, they must be the "
-"same value"
-msgstr ""
-"ipv6_ra_mode kann nicht auf '%(ra_mode)s' gesetzt sein, wenn "
-"ipv6_address_mode auf '%(addr_mode)s' gesetzt ist. Sind beide Attribute "
-"gesetzt, müssen sie denselben Wert aufweisen"
-
-msgid "mac address update"
-msgstr "MAC-Adressaktualisierung"
-
-#, python-format
-msgid ""
-"max_l3_agents_per_router %(max_agents)s config parameter is not valid. It "
-"has to be greater than or equal to min_l3_agents_per_router %(min_agents)s."
-msgstr ""
-"Der Konfigurationsparameter max_l3_agents_per_router %(max_agents)s ist "
-"ungültig. Ermuss größer-gleich min_l3_agents_per_router %(min_agents)s sein."
-
-#, python-format
-msgid ""
-"min_l3_agents_per_router config parameter is not valid. It has to be equal "
-"to or more than %s for HA."
-msgstr ""
-"Konfigurationsparameter min_l3_agents_per_router ist nicht gültig. Der Wert "
-"muss für hohe Verfügbarkeit größer-gleich %s sein."
-
-msgid "network_type required"
-msgstr "network_type erforderlich"
-
-#, python-format
-msgid "network_type value '%s' not supported"
-msgstr "network_type-Wert '%s' wird nicht unterstützt"
-
-msgid "new subnet"
-msgstr "Neues Teilnetz"
-
-#, python-format
-msgid "physical_network '%s' unknown  for VLAN provider network"
-msgstr "physical_network '%s' unbekannt für VLAN-Provider-Netz"
-
-#, python-format
-msgid "physical_network '%s' unknown for flat provider network"
-msgstr "physical_network '%s' unbekannt für einfaches Provider-Netz"
-
-msgid "physical_network required for flat provider network"
-msgstr "physical_network erforderlich für einfaches Provider-Netz"
-
-#, python-format
-msgid "provider:physical_network specified for %s network"
-msgstr "'provider:physical_network' für %s-Netz angegeben"
-
-msgid "respawn_interval must be >= 0 if provided."
-msgstr "respawn_interval muss >= 0 sein, falls angegeben."
-
-#, python-format
-msgid "segmentation_id out of range (%(min)s through %(max)s)"
-msgstr ""
-"'segmentation_id' außerhalb des gültigen Bereichs (%(min)s bis %(max)s)"
-
-msgid "segmentation_id requires physical_network for VLAN provider network"
-msgstr "segmentation_id erfordert physical_network für VLAN-Provider-Netz"
-
-msgid "the nexthop is not connected with router"
-msgstr "Der nächste Hop ist nicht mit dem Router verbunden"
-
-msgid "the nexthop is used by router"
-msgstr "Der nächste Hop wird vom Router verwendet"
-
-msgid ""
-"uuid provided from the command line so external_process can track us via /"
-"proc/cmdline interface."
-msgstr ""
-"UUID von der Befehlszeile angegeben, damit external_process uns über /proc/"
-"cmdline-Schnittstelle verfolgen kann."
diff --git a/neutron/locale/es/LC_MESSAGES/neutron.po b/neutron/locale/es/LC_MESSAGES/neutron.po
deleted file mode 100644 (file)
index 43f6553..0000000
+++ /dev/null
@@ -1,2427 +0,0 @@
-# Spanish translations for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-09-06 10:15+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: es\n"
-"Language-Team: Spanish\n"
-"Plural-Forms: nplurals=2; plural=(n != 1)\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#, python-format
-msgid ""
-"\n"
-"Command: %(cmd)s\n"
-"Exit code: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-msgstr ""
-"\n"
-"Mandato: %(cmd)s\n"
-"Código de salida: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Salida estándar: %(stdout)s\n"
-"Error estándar: %(stderr)s"
-
-#, python-format
-msgid "%(driver)s: Internal driver error."
-msgstr "%(driver)s: Error de controlador interno."
-
-#, python-format
-msgid "%(id)s is not a valid %(type)s identifier"
-msgstr "%(id)s no es un identificador %(type)s válido"
-
-#, python-format
-msgid ""
-"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' "
-"and '%(desc)s'"
-msgstr ""
-"%(invalid_dirs)s es un valor no válido para sort_dirs, los valores válidos "
-"son '%(asc)s' y '%(desc)s'"
-
-#, python-format
-msgid "%(key)s prohibited for %(tunnel)s provider network"
-msgstr "%(key)s prohibido para red de proveedor %(tunnel)s"
-
-#, python-format
-msgid ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-msgstr ""
-"%(method)s llamado con configuraciones de red %(current)s (valores "
-"originales %(original)s) y segmentos de red %(segments)s"
-
-#, python-format
-msgid ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-msgstr ""
-"%(method)s llamado con ajustes de subred %(current)s (ajustes originales "
-"%(original)s)"
-
-#, python-format
-msgid "%(method)s failed."
-msgstr "%(method)s ha fallado."
-
-#, python-format
-msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'"
-msgstr "%(name)s '%(addr)s' no coincide con la versión de IP '%(ip_version)s'"
-
-#, python-format
-msgid "%s cannot be called while in offline mode"
-msgstr "%s no puede invocarse en la modalidad fuera de línea"
-
-#, python-format
-msgid "%s is invalid attribute for sort_key"
-msgstr "%s es un atributo no válido para sort_key"
-
-#, python-format
-msgid "%s is invalid attribute for sort_keys"
-msgstr "%s es un atributo no válido para sort_keys"
-
-#, python-format
-msgid "%s is not a valid VLAN tag"
-msgstr "%s no es una etiqueta VLAN válida"
-
-#, python-format
-msgid "%s must implement get_port_from_device or get_ports_from_devices."
-msgstr "%s debe implementar get_port_from_device o get_ports_from_devices."
-
-#, python-format
-msgid "%s prohibited for VLAN provider network"
-msgstr "%s prohibido para la red de proveedor VLAN"
-
-#, python-format
-msgid "%s prohibited for flat provider network"
-msgstr "%s prohibido para la red de proveedor simple"
-
-#, python-format
-msgid "%s prohibited for local provider network"
-msgstr "%s prohibido para la red de proveedor local"
-
-#, python-format
-msgid "'%(data)s' exceeds maximum length of %(max_len)s"
-msgstr "'%(data)s' supera la longitud máxima de %(max_len)s"
-
-#, python-format
-msgid "'%(data)s' is not in %(valid_values)s"
-msgstr "'%(data)s' no está en %(valid_values)s"
-
-#, python-format
-msgid "'%(data)s' is too large - must be no larger than '%(limit)d'"
-msgstr "'%(data)s' es muy grande, no debe ser más grande que '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' is too small - must be at least '%(limit)d'"
-msgstr "'%(data)s' es muy pequeño, debe ser al menos '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended"
-msgstr ""
-"'%(data)s' no es un cidr de subred IP reconocido, se recomienda '%(cidr)s'"
-
-#, python-format
-msgid "'%(host)s' is not a valid nameserver. %(msg)s"
-msgstr "'%(host)s' no es un servidor de nombres válido. %(msg)s"
-
-#, python-format
-msgid "'%s' Blank strings are not permitted"
-msgstr "No se permiten '%s' series en blanco"
-
-#, python-format
-msgid "'%s' cannot be converted to boolean"
-msgstr "'%s' no se puede convertir a booleano"
-
-#, python-format
-msgid "'%s' contains whitespace"
-msgstr "'%s' contiene espacios en blanco"
-
-#, python-format
-msgid "'%s' is not a dictionary"
-msgstr "'%s' no es un diccionario"
-
-#, python-format
-msgid "'%s' is not a list"
-msgstr "'%s' no es una lista"
-
-#, python-format
-msgid "'%s' is not a valid IP address"
-msgstr "'%s' no es una dirección IP válida"
-
-#, python-format
-msgid "'%s' is not a valid IP subnet"
-msgstr "'%s' no es una subred IP válida"
-
-#, python-format
-msgid "'%s' is not a valid MAC address"
-msgstr "'%s' no es una dirección MAC válida"
-
-#, python-format
-msgid "'%s' is not a valid UUID"
-msgstr "'%s' no es un UUID válido"
-
-#, python-format
-msgid "'%s' is not a valid boolean value"
-msgstr "'%s' no es un valor booleano"
-
-#, python-format
-msgid "'%s' is not a valid input"
-msgstr "'%s' no es una entrada válida"
-
-#, python-format
-msgid "'%s' is not a valid string"
-msgstr "'%s' no es una serie válida"
-
-#, python-format
-msgid "'%s' is not an integer"
-msgstr "'%s' no es un entero"
-
-#, python-format
-msgid "'%s' is not an integer or uuid"
-msgstr "'%s' no es un entero o uuid"
-
-#, python-format
-msgid "'%s' is not of the form <key>=[value]"
-msgstr "'%s' no tiene el formato <clave>=[valor]"
-
-#, python-format
-msgid "'%s' should be non-negative"
-msgstr "'%s' debe ser no negativo"
-
-msgid "0 is not allowed as CIDR prefix length"
-msgstr "0 no está permitido como longitud del prefijo de CIDR"
-
-msgid "A cidr must be specified in the absence of a subnet pool"
-msgstr "Debe especificarse un cidr en ausencia de una agrupación de subred"
-
-msgid ""
-"A list of mappings of physical networks to MTU values. The format of the "
-"mapping is <physnet>:<mtu val>. This mapping allows specifying a physical "
-"network MTU value that differs from the default segment_mtu value."
-msgstr ""
-"Una lista de correlaciones de redes físicas para valores MTU. El formato de "
-"la correlación es <physnet>:<mtu val>. Esta correlación permite especificar "
-"un valor de MUT de red físca que difiere del valor segment_mtu "
-"predeterminado."
-
-msgid "A metering driver must be specified"
-msgstr "Se debe especificar un controlador de medición"
-
-msgid "API for retrieving service providers for Neutron advanced services"
-msgstr ""
-"API para recuperar los proveedores de servicio para servicios avanzados de "
-"Neutron"
-
-msgid "Access to this resource was denied."
-msgstr "Se ha denegado el acceso a este recurso."
-
-msgid "Action to be executed when a child process dies"
-msgstr "Acción para ejecutar cuando termina un proceso secundario"
-
-msgid "Adds external network attribute to network resource."
-msgstr "Añade atributo de red externa a recurso de red."
-
-msgid "Adds test attributes to core resources."
-msgstr "Añade atributos de prueba a recursos de núcleo."
-
-#, python-format
-msgid "Agent %(id)s could not be found"
-msgstr "No se ha podido encontrar el agente %(id)s."
-
-#, python-format
-msgid "Agent %(id)s is not a L3 Agent or has been disabled"
-msgstr "El agente %(id)s no es un agente L3 válido o se ha inhabilitado"
-
-#, python-format
-msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled"
-msgstr "El agente %(id)s no es un agente DHCP válido o se ha inhabilitado"
-
-#, python-format
-msgid "Agent updated: %(payload)s"
-msgstr "El agente se ha actualizado: %(payload)s"
-
-#, python-format
-msgid ""
-"Agent with agent_type=%(agent_type)s and host=%(host)s could not be found"
-msgstr ""
-"El agente con agent_type=%(agent_type)s y host=%(host)s no se ha podido "
-"encontrar"
-
-msgid "Allow auto scheduling networks to DHCP agent."
-msgstr "Permita la planificación automática de redes para el agente DHCP."
-
-msgid "Allow auto scheduling of routers to L3 agent."
-msgstr "Permitir auto programación de enrutadores al agente L3."
-
-msgid "Allow running metadata proxy."
-msgstr "Permitir ejecutar el proxy de metadatos."
-
-msgid "Allow sending resource operation notification to DHCP agent"
-msgstr ""
-"Notificación de la operación de permitir el envío de recurso al agente DHCP"
-
-msgid "Allow the usage of the bulk API"
-msgstr "Permitir el uso de la API masiva"
-
-msgid "Allow the usage of the pagination"
-msgstr "Permitir el uso de la paginación"
-
-msgid "Allow the usage of the sorting"
-msgstr "Permitir el uso de la ordenación"
-
-msgid "Allow to perform insecure SSL (https) requests to nova metadata"
-msgstr ""
-"Permitir ejecutar solicitudes SSL (https) no seguras en los metadatos de Nova"
-
-msgid "AllowedAddressPair must contain ip_address"
-msgstr "AllowedAddressPair debe contener ip_address"
-
-msgid "An interface driver must be specified"
-msgstr "Se debe especificar un controlador de interfaz"
-
-msgid ""
-"An ordered list of networking mechanism driver entrypoints to be loaded from "
-"the neutron.ml2.mechanism_drivers namespace."
-msgstr ""
-"Una lista ordenada de puntos de entrada de controlador de mecanismo de red a "
-"cargar desde el espacio de nombres neutron.ml2.mechanism_drivers."
-
-msgid "An unknown error has occurred. Please try your request again."
-msgstr "Se ha producido un error desconocido. Intente la solicitud otra vez."
-
-msgid "An unknown exception occurred."
-msgstr "Se ha producido una excepción desconocida."
-
-#, python-format
-msgid "Attribute '%s' not allowed in POST"
-msgstr "El atributo '%s' no está permitido en POST"
-
-msgid "Automatically remove networks from offline DHCP agents."
-msgstr "Eliminar automáticamente las redes de los agentes DHCP fuera de línea."
-
-msgid ""
-"Automatically reschedule routers from offline L3 agents to online L3 agents."
-msgstr ""
-"Volver a planificar automáticamente los direccionadores de los agentes L3 "
-"fuera de línea a los agentes L3 en línea."
-
-msgid "Available commands"
-msgstr "Mandatos disponibles"
-
-msgid "Backend does not support VLAN Transparency."
-msgstr "El programa de fondo no soporta la transparencia de VLAN."
-
-#, python-format
-msgid ""
-"Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, "
-"%(mac)s:"
-msgstr ""
-"Prefijo o formato de mac incorrecto para generar dirección IPv6 por EUI-64: "
-"%(prefix)s, %(mac)s:"
-
-#, python-format
-msgid "Bad prefix type for generate IPv6 address by EUI-64: %s"
-msgstr "Tipo de prefijo incorrecto para generar dirección IPv6 por EUI-64: %s"
-
-#, python-format
-msgid "Base MAC: %s"
-msgstr "MAC base: %s"
-
-#, python-format
-msgid "Bridge %(bridge)s does not exist."
-msgstr "El puente %(bridge)s no existe."
-
-msgid "Bulk operation not supported"
-msgstr "No se soporta operación masiva"
-
-msgid "CIDR to monitor"
-msgstr "CIDR a supervisar"
-
-#, python-format
-msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip"
-msgstr ""
-"No se puede añadir IP flotante al puerto en la subred %s que no tiene IP de "
-"pasarela"
-
-msgid "Cannot allocate requested subnet from the available set of prefixes"
-msgstr ""
-"No se puede asignar la subred solicitada a partir del conjunto disponible de "
-"prefijos"
-
-#, python-format
-msgid ""
-"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port "
-"%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a "
-"floating IP on external network %(net_id)s."
-msgstr ""
-"No se puede asociar la IP flotante %(floating_ip_address)s (%(fip_id)s) con "
-"el puerto %(port_id)s mediante la IP fija %(fixed_ip)s, porque esa IP fija "
-"ya tiene  una IP flotante en la red externa %(net_id)s."
-
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to Port %s, since that port is owned "
-"by a different tenant."
-msgstr ""
-"No se puede crear IP flotante y enlazarla al puerto %s, porque ese puerto es "
-"propiedad de un arrendatario diferente."
-
-msgid "Cannot create resource for another tenant"
-msgstr "No se puede crear el recurso para otro arrendatario"
-
-msgid "Cannot disable enable_dhcp with ipv6 attributes set"
-msgstr "No se puede inhabilitar enable_dhcp con atributos ipv6 establecidos"
-
-#, python-format
-msgid ""
-"Cannot have multiple router ports with the same network id if both contain "
-"IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s"
-msgstr ""
-"No puede tener varios puertos de direccionador con el mismo ID de red si "
-"amboscontienen subredes IPv6. El puerto existente %(p)s tiene subredes IPv6 "
-"y un id de red %(nid)s"
-
-#, python-format
-msgid ""
-"Cannot host %(router_type)s router %(router_id)s on %(agent_mode)s L3 agent "
-"%(agent_id)s."
-msgstr ""
-"No se puede alojar el direccionador %(router_type)s %(router_id)s en el "
-"agente L3 %(agent_mode)s %(agent_id)s."
-
-msgid "Cannot match priority on flow deletion or modification"
-msgstr ""
-"No se puede hacer coincidir la prioridad en la supresión o modificación de "
-"flujo"
-
-msgid "Cannot specify both subnet-id and port-id"
-msgstr "No se puede especificar el ID de subred y el ID de puerto"
-
-msgid "Cannot understand JSON"
-msgstr "No se puede entender JSON"
-
-#, python-format
-msgid "Cannot update read-only attribute %s"
-msgstr "No se puede actualizar el atributo de sólo lectura %s"
-
-msgid "Certificate Authority public key (CA cert) file for ssl"
-msgstr ""
-"Archivo de clave pública de entidad emisora de certificados (cert CA) para "
-"ssl"
-
-msgid "Check for ARP responder support"
-msgstr "Comprobar el soporte de encuestado de ARP"
-
-msgid "Check for OVS vxlan support"
-msgstr "Comprobar el soporte vxlan OVS"
-
-msgid "Check for VF management support"
-msgstr "Comprobar el soporte de gestión VF"
-
-msgid "Check for iproute2 vxlan support"
-msgstr "Comprobar el soporte vxlan iproute2"
-
-msgid "Check for nova notification support"
-msgstr "Comprobar el soporte de notificación nova"
-
-msgid "Check for patch port support"
-msgstr "Comprobar el soporte de puerto de parche"
-
-msgid "Check minimal dnsmasq version"
-msgstr "Comprobar la versión mínima de dnsmasq"
-
-msgid "Check netns permission settings"
-msgstr "Comprobar los valores de permiso netns"
-
-msgid "Check ovsdb native interface support"
-msgstr "Comprobar el soporte de interfaz nativa ovsdb"
-
-#, python-format
-msgid ""
-"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of "
-"subnet %(sub_id)s"
-msgstr ""
-"El Cidr %(subnet_cidr)s de la subred %(subnet_id)s se solapa con el cidr "
-"%(cidr)s de la subred %(sub_id)s"
-
-msgid "Client certificate for nova metadata api server."
-msgstr "Certificado de cliente para el servidor de la API de metadatos de Nova"
-
-msgid ""
-"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE "
-"tunnel IDs that are available for tenant network allocation"
-msgstr ""
-"La lista separada por comas de conjuntos de variables <tun_min>:<tun_max> "
-"enumera los rangos de Los ID de túnel GRE que están disponibles para la "
-"asignación de red de arrendatario"
-
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"VXLAN VNI IDs that are available for tenant network allocation"
-msgstr ""
-"Lista separada por comas de conjuntos de variables <vni_min>:<vni_max> que "
-"enumeran los rangos de ID de VXLAN VNI que están disponibles para la "
-"asignación de red de arrendatario"
-
-msgid ""
-"Comma-separated list of the DNS servers which will be used as forwarders."
-msgstr ""
-"Lista separada por comas de los servidores DNS que se utilizarán como "
-"reenviadores."
-
-msgid "Command to execute"
-msgstr "Mandato a ejecutar"
-
-msgid "Config file for interface driver (You may also use l3_agent.ini)"
-msgstr ""
-"Archivo de configuración para controlador de interfaz (También puede "
-"utilizar l3_agent.ini)"
-
-#, python-format
-msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s"
-msgstr "Valor ethertype %(ethertype)s en conflicto para CIDR %(cidr)s"
-
-msgid ""
-"Controls whether the neutron security group API is enabled in the server. It "
-"should be false when using no security groups or using the nova security "
-"group API."
-msgstr ""
-"Controla si la API de grupo de seguridad neutron está habilitada en el "
-"servidor. Debe ser false cuando no hay grupos de seguridad o se utiliza la "
-"API de grupo de seguridad nova."
-
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds"
-msgstr ""
-"No se puede enlazar a %(host)s:%(port)s después de intentar por %(time)d "
-"segundos"
-
-msgid "Could not deserialize data"
-msgstr "No se han podido deserializar los datos"
-
-#, python-format
-msgid "Creation failed. %(dev_name)s already exists."
-msgstr "La creación ha fallado. %(dev_name)s ya existe."
-
-#, python-format
-msgid ""
-"Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable "
-"to update."
-msgstr ""
-"IP de puerta de enlace actual %(ip_address)s ya está en uso por el puerto "
-"%(port_id)s. No es posible actualizar."
-
-msgid "Currently distributed HA routers are not supported."
-msgstr "No se admiten los direccionadores HA distribuidos actualmente."
-
-msgid ""
-"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite "
-"lease times."
-msgstr ""
-"Duración de concesión de DHCP (en segundos). Utilice -1 para indicar a "
-"dnsmasq que utilice tiempos de concesión infinitos."
-
-msgid "Default driver to use for quota checks"
-msgstr "Controlador predeterminado a utilizar para comprobaciones de cuota"
-
-msgid ""
-"Default number of resource allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Número predeterminado de recursos permitidos por arrendatario. Un valor "
-"negativo significa ilimitados."
-
-msgid "Default security group"
-msgstr "Grupo de seguridad predeterminado"
-
-msgid "Default security group already exists."
-msgstr "El grupo de seguridad predeterminado ya existe."
-
-msgid ""
-"Defines providers for advanced services using the format: <service_type>:"
-"<name>:<driver>[:default]"
-msgstr ""
-"Define proveedores para servicios avanzados con el formato: <service_type>:"
-"<name>:<driver>[:predeterminados]"
-
-msgid ""
-"Delay within which agent is expected to update existing ports whent it "
-"restarts"
-msgstr ""
-"Retardo dentro del cual se espera que el agente actualice los puertos "
-"existentes cuando reinicios"
-
-msgid "Delete the namespace by removing all devices."
-msgstr "Suprimir el espacio de nombres eliminando todos los dispositivos. "
-
-#, python-format
-msgid "Deleting port %s"
-msgstr "Suprimiendo el puerto %s"
-
-#, python-format
-msgid "Device %(dev_name)s in mapping: %(mapping)s not unique"
-msgstr ""
-"El dispositivo %(dev_name)s en la correlación: %(mapping)s no es exclusivo"
-
-msgid "Device has no virtual functions"
-msgstr "El dispositivo no tiene funciones virtuales"
-
-#, python-format
-msgid "Device name %(dev_name)s is missing from physical_device_mappings"
-msgstr ""
-"Falta el nombre de dispositivo %(dev_name)s en physical_device_mappings"
-
-msgid "Device not found"
-msgstr "No se ha encontrado el dispositivo"
-
-#, python-format
-msgid ""
-"Distributed Virtual Router Mac Address for host %(host)s does not exist."
-msgstr ""
-"La dirección Mac del direccionador virtual distribuido para el host %(host)s "
-"no existe."
-
-msgid "Domain to use for building the hostnames"
-msgstr "Dominio a utilizar par crear los nombres de host"
-
-msgid "Downgrade no longer supported"
-msgstr "La degradación ya no está soportada"
-
-#, python-format
-msgid "Driver %s is not unique across providers"
-msgstr "El controlador %s no es único entre los proveedores"
-
-msgid "Driver for security groups firewall in the L2 agent"
-msgstr "Controlador para el cortafuegos de grupos de seguridad en el agente L2"
-
-msgid "Driver to use for scheduling network to DHCP agent"
-msgstr ""
-"Controlador que utilizar para la planificación de la red para el agente DHCP"
-
-msgid "Driver to use for scheduling router to a default L3 agent"
-msgstr ""
-"Controlador que utilizar para la planificación del direccionador para un "
-"agente L3 predeterminado"
-
-#, python-format
-msgid "Duplicate IP address '%s'"
-msgstr "Dirección IP duplicada '%s'"
-
-msgid "Duplicate Metering Rule in POST."
-msgstr "Regla de medición duplicada en POST."
-
-msgid "Duplicate Security Group Rule in POST."
-msgstr "Regla de grupo de seguridad duplicada en POST."
-
-#, python-format
-msgid "Duplicate hostroute '%s'"
-msgstr "Ruta de host '%s' duplicada"
-
-#, python-format
-msgid "Duplicate items in the list: '%s'"
-msgstr "Elementos duplicados en la lista: '%s'"
-
-#, python-format
-msgid "Duplicate nameserver '%s'"
-msgstr "Servidor de nombres '%s' duplicado"
-
-msgid "Duplicate segment entry in request."
-msgstr "Entrada de segmento duplicada en la solicitud."
-
-#, python-format
-msgid "ERROR: %s"
-msgstr "ERROR: %s"
-
-msgid ""
-"ERROR: Unable to find configuration file via the default search paths (~/."
-"neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!"
-msgstr ""
-"ERROR: no se ha podido encontrar el archivo de configuración por medio de "
-"las rutas de búsqueda predeterminada (~/.neutron/, ~/, /etc/neutron/, /etc/) "
-"¡y la opción '--config-file'!"
-
-msgid ""
-"Either one of parameter network_id or router_id must be passed to _get_ports "
-"method."
-msgstr "Debe pasarse un parámetro network_id o router_id al método _get_ports."
-
-msgid "Either subnet_id or port_id must be specified"
-msgstr "Se debe especificar el ID de subred o el ID de puerto"
-
-msgid "Empty physical network name."
-msgstr "Nombre de red física vacío."
-
-msgid "Enable FWaaS"
-msgstr "Habilitar FWaaS"
-
-msgid "Enable HA mode for virtual routers."
-msgstr "Habilitar modo HA para direccionadores virtuales."
-
-msgid "Enable SSL on the API server"
-msgstr "Habilitar SSL en el servidor API"
-
-msgid ""
-"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 "
-"plugin using linuxbridge mechanism driver"
-msgstr ""
-"Habilitar VXLAN en el agente. Se puede habilitar cuando el agente es "
-"gestionado por ml2 plugin usando controlador de mecanismo linuxbridge"
-
-msgid ""
-"Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 "
-"l2population driver. Allows the switch (when supporting an overlay) to "
-"respond to an ARP request locally without performing a costly ARP broadcast "
-"into the overlay."
-msgstr ""
-"Habilite el encuestado de ARP local si está soportado. Requiere OVS 2.1 y el "
-"controlador ML2 l2population. Permite que el conmutador (cuando da soporte a "
-"una superposición) responda a una solicitud ARP localmente sin realizar una "
-"difusión de ARP costosa en la superposición."
-
-msgid ""
-"Enable services on an agent with admin_state_up False. If this option is "
-"False, when admin_state_up of an agent is turned False, services on it will "
-"be disabled. Agents with admin_state_up False are not selected for automatic "
-"scheduling regardless of this option. But manual scheduling to such agents "
-"is available if this option is True."
-msgstr ""
-"Habilite servicios en un agente con admin_state_up False. Si esta opción es "
-"False, cuando el valor admin_state_up de un agente se convierte en False, "
-"los servicios en élse inhabilitarán. Los agentes con admin_state_up False no "
-"se seleccionan para laplanificación automática independientemente de esta "
-"opción. No obstante, la planificación manual paraestos agentes está "
-"disponible si esta opción es True."
-
-msgid ""
-"Enable/Disable log watch by metadata proxy. It should be disabled when "
-"metadata_proxy_user/group is not allowed to read/write its log file and "
-"copytruncate logrotate option must be used if logrotate is enabled on "
-"metadata proxy log files. Option default value is deduced from "
-"metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent "
-"effective user id/name."
-msgstr ""
-"Habilitar/inhabilitar observador de registro por proxy de metadatos. Debe "
-"inhabilitarse cuandometadata_proxy_user/group no tiene permiso para leer/"
-"grabar en su archivo de registro ydebe utilizarse la opción copytruncate "
-"logrotate si logrotate se habilita en los archivos de registro de proxy de "
-"metadatos. El valor predeterminado de la opción se deduce "
-"demetadata_proxy_user: el registro de observador está habilitado si "
-"metadata_proxy_user es unid/nombre de usuario efectivo de agente."
-
-msgid "Encountered an empty component."
-msgstr "Se ha encontrado un componente vacío."
-
-msgid "End of VLAN range is less than start of VLAN range"
-msgstr "El final del rango VLAN es menor que el inicio del rango VLAN"
-
-msgid "End of tunnel range is less than start of tunnel range"
-msgstr "El final del rango de túnel es menor que el inicio del rango de túnel"
-
-#, python-format
-msgid "Error importing FWaaS device driver: %s"
-msgstr "Error al importar controlador de dispositivo FWaaS: %s"
-
-#, python-format
-msgid "Error parsing dns address %s"
-msgstr "Error al analizar la dirección dns %s"
-
-#, python-format
-msgid "Error while reading %s"
-msgstr "Error al leer %s "
-
-msgid "Existing prefixes must be a subset of the new prefixes"
-msgstr "Los prefijos existentes deben ser una subred de los prefijos nuevos"
-
-msgid ""
-"Extension to use alongside ml2 plugin's l2population mechanism driver. It "
-"enables the plugin to populate VXLAN forwarding table."
-msgstr ""
-"Extensión para usar unto con el controlador de mecanismo l2population del "
-"plug-in ml2. Este habilita el plugin para completar la tabla de reenvío "
-"VXLAN."
-
-#, python-format
-msgid "Extension with alias %s does not exist"
-msgstr "La ampliación con el alias %s no existe"
-
-#, python-format
-msgid "External IP %s is the same as the gateway IP"
-msgstr "El IP externo %s es el mismo que el IP de pasarela"
-
-#, python-format
-msgid ""
-"External network %(external_network_id)s is not reachable from subnet "
-"%(subnet_id)s.  Therefore, cannot associate Port %(port_id)s with a Floating "
-"IP."
-msgstr ""
-"No se puede alcanzar la red externa %(external_network_id)s desde la subred "
-"%(subnet_id)s. Por tanto, no se puede asociar el puerto %(port_id)s con una "
-"IP flotante."
-
-#, python-format
-msgid ""
-"External network %(net_id)s cannot be updated to be made non-external, since "
-"it has existing gateway ports"
-msgstr ""
-"La red externa %(net_id)s no se puede actualizar para convertirla en no "
-"externa, ya que tiene puertos de pasarela existentes."
-
-#, python-format
-msgid "ExtraDhcpOpt %(id)s could not be found"
-msgstr "ExtraDhcpOpt %(id)s no se ha podido encontrar"
-
-msgid ""
-"FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-"
-"agent."
-msgstr ""
-"El plug-in FWaaS está configurado en el lado del servidor, pero FWasS está "
-"inhabilitado en el agente L3."
-
-#, python-format
-msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found."
-msgstr ""
-"No se ha podido volver a programar el direccionador %(router_id)s: no se ha "
-"encontrado ningún agente l3 elegible."
-
-#, python-format
-msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s."
-msgstr ""
-"Se ha encontrado un error la planificación del direccionador %(router_id)s "
-"para el agente L3 %(agent_id)s."
-
-#, python-format
-msgid ""
-"Failed to allocate a VRID in the network %(network_id)s for the router "
-"%(router_id)s after %(max_tries)s tries."
-msgstr ""
-"No se ha podido asignar un VRID en la red %(network_id)s para el "
-"direccionador %(router_id)s después de %(max_tries)s intentos."
-
-#, python-format
-msgid ""
-"Failed to create port on network %(network_id)s, because fixed_ips included "
-"invalid subnet %(subnet_id)s"
-msgstr ""
-"No se ha podido Se ha encontrado un error al crear un puerto en la red "
-"%(network_id)s, porque fixed_ips incluía una subred no válida %(subnet_id)s"
-
-#, python-format
-msgid "Failed to parse request. Parameter '%s' not specified"
-msgstr ""
-"No se ha podido analizar la solicitud. No se ha especificado el parámetro "
-"'%s'"
-
-#, python-format
-msgid "Failed to parse request. Required attribute '%s' not specified"
-msgstr ""
-"No se ha podido analizar la solicitud. No se ha especificado el atributo "
-"necesario '%s'"
-
-msgid "Failed to remove supplemental groups"
-msgstr "No se han podido eliminar los grupos suplementarios"
-
-#, python-format
-msgid "Failed to set gid %s"
-msgstr "No se ha podido establecer el gid %s"
-
-#, python-format
-msgid "Failed to set uid %s"
-msgstr "No se ha podido establecer el uid %s"
-
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr "Ha fallado al configurar %(type)s el puerto de túnel a %(ip)s"
-
-#, python-format
-msgid "Floating IP %(floatingip_id)s could not be found"
-msgstr "No se ha podido encontrar la IP flotante %(floatingip_id)s."
-
-msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max"
-msgstr "Para los protocolos TCP/UDP, port_range_min debe ser <= port_range_max"
-
-msgid "Force ip_lib calls to use the root helper"
-msgstr "Forzar llamadas ip_lib para usar el ayudante raíz"
-
-#, python-format
-msgid ""
-"Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet "
-"%(subnet_cidr)s."
-msgstr ""
-"Se ha encontrado solapamiento de agrupaciones de asignación:%(pool_1)s "
-"%(pool_2)s para subred %(subnet_cidr)s."
-
-#, python-format
-msgid ""
-"Gateway cannot be updated for router %(router_id)s, since a gateway to "
-"external network %(net_id)s is required by one or more floating IPs."
-msgstr ""
-"La pasarela no se puede actualizar para el direccionador %(router_id)s, "
-"porque una o más IP flotantes necesitan una pasarela a la red externa "
-"%(net_id)s."
-
-msgid "Gateway is not valid on subnet"
-msgstr "La pasarela no es válida en la subred"
-
-msgid "Group (gid or name) running metadata proxy after its initialization"
-msgstr ""
-"Grupo (gid o nombre) que ejecuta el proxy de metadatos después de su "
-"inicialización"
-
-msgid ""
-"Group (gid or name) running metadata proxy after its initialization (if "
-"empty: agent effective group)."
-msgstr ""
-"Grupo (gid o nombre) que ejecuta el proxy de metadatos después de su "
-"inicialización (si está vacío: grupo efectivo del agente)."
-
-msgid "Group (gid or name) running this process after its initialization"
-msgstr ""
-"Grupo (gid o nombre) que ejecuta este proceso después de su inicialización"
-
-msgid "How many times Neutron will retry MAC generation"
-msgstr "Cuántas veces Neutron intentará de nuevo la generación MAC"
-
-#, python-format
-msgid ""
-"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-"
-"min) is missing."
-msgstr ""
-"Se proporciona el código ICMP (port-range-max) %(value)s, pero falta el tipo "
-"ICMP (port-range-min)."
-
-msgid "ID of network"
-msgstr "ID de red"
-
-msgid "ID of network to probe"
-msgstr "ID de red a analizar"
-
-msgid "ID of probe port to delete"
-msgstr "ID de puerto de analizador a suprimir"
-
-msgid "ID of probe port to execute command"
-msgstr "ID de puerto de analizador para ejecutar mandato"
-
-msgid "ID of the router"
-msgstr "ID del direccionador"
-
-#, python-format
-msgid ""
-"IP address %(ip_address)s is not a valid IP for any of the subnets on the "
-"specified network."
-msgstr ""
-"La dirección IP %(ip_address)s no es una IP válida para las subredes en la "
-"red especificada."
-
-#, python-format
-msgid "IP address %(ip_address)s is not a valid IP for the specified subnet."
-msgstr ""
-"La dirección IP %(ip_address)s no es una IP válida para la subred "
-"especificada"
-
-msgid "IP address used by Nova metadata server."
-msgstr "Dirección IP utilizada por servidor de metadatos de Nova."
-
-msgid "IP allocation requires subnet_id or ip_address"
-msgstr "La asignación de IP necesita subnet_id o ip_address"
-
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables rules:\n"
-"%s"
-msgstr ""
-"IPTablesManager.apply no ha podido aplicar el siguiente conjunto de reglas "
-"de iptables:\n"
-"%s"
-
-#, python-format
-msgid ""
-"IPv6 address %(address)s can not be directly assigned to a port on subnet "
-"%(id)s since the subnet is configured for automatic addresses"
-msgstr ""
-"La dirección IPv6 %(address)s no se puede asignar directamente a un puerto "
-"en la subred %(id)s, ya que la subred está configurada para direcciones "
-"automáticas"
-
-#, python-format
-msgid ""
-"IPv6 subnet %s configured to receive RAs from an external router cannot be "
-"added to Neutron Router."
-msgstr ""
-"La subred IPv6 %s configurada para recibir RA de un direccionador externo no "
-"se puede añadir al direccionador de Neutron."
-
-msgid ""
-"If True, effort is made to advertise MTU settings to VMs via network methods "
-"(DHCP and RA MTU options) when the network's preferred MTU is known."
-msgstr ""
-"Si es True, se realiza el esfuerzo para anunciar valores de MTU a VMs a "
-"través de métodos de red (opciones DHCP y RA MTU) cuando la MTU preferida de "
-"red es conocida."
-
-msgid ""
-"If True, then allow plugins that support it to create VLAN transparent "
-"networks."
-msgstr ""
-"Si es True, permite a los plug-in que la soportan crear redes VLAN "
-"transparentes."
-
-msgid "Illegal IP version number"
-msgstr "Número de versión IP no permitido"
-
-#, python-format
-msgid "Insufficient prefix space to allocate subnet size /%s"
-msgstr "Espacio de prefijo insuficiente para asignar el tamaño de subred %s"
-
-msgid "Insufficient rights for removing default security group."
-msgstr ""
-"No hay derechos suficientes para eliminar el grupo de seguridad "
-"predeterminado."
-
-msgid "Interface to monitor"
-msgstr "Interfaz a supervisar"
-
-msgid ""
-"Interval between checks of child process liveness (seconds), use 0 to disable"
-msgstr ""
-"Intervalo entre comprobaciones de vida de procesos secundarios (segundos), "
-"utilice 0 para inhabilitarlo"
-
-msgid "Interval between two metering measures"
-msgstr "Intervalo entre dos medidas de medición"
-
-msgid "Interval between two metering reports"
-msgstr "Intervalo entre dos informes de medición"
-
-#, python-format
-msgid ""
-"Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address "
-"format, which requires the prefix to be /64."
-msgstr ""
-"CIDR %s no válido para la modalidad de dirección IPv6. OpenStack utiliza el "
-"formato de dirección EUI-64, que requiere que el prefijo sea /64."
-
-#, python-format
-msgid "Invalid Device %(dev_name)s: %(reason)s"
-msgstr "Dispositivo no válido %(dev_name)s: %(reason)s"
-
-#, python-format
-msgid ""
-"Invalid authentication type: %(auth_type)s, valid types are: "
-"%(valid_auth_types)s"
-msgstr ""
-"Tipo de autenticación no válida: %(auth_type)s, los tipos válidos son: "
-"%(valid_auth_types)s"
-
-#, python-format
-msgid "Invalid data format for IP pool: '%s'"
-msgstr "Formato de datos no válido para agrupación de IP: '%s'"
-
-#, python-format
-msgid "Invalid data format for extra-dhcp-opt: %(data)s"
-msgstr "Formato de datos no válido para extra-dhcp-opt: %(data)s"
-
-#, python-format
-msgid "Invalid data format for fixed IP: '%s'"
-msgstr "Formato de datos no válido para IP fija: '%s'"
-
-#, python-format
-msgid "Invalid data format for hostroute: '%s'"
-msgstr "Formato de datos no válido para ruta de host: '%s'"
-
-#, python-format
-msgid "Invalid data format for nameserver: '%s'"
-msgstr "Formato de datos no válido para servidor de nombres: '%s'"
-
-#, python-format
-msgid "Invalid format for routes: %(routes)s, %(reason)s"
-msgstr "Formato no válido: %(routes)s, %(reason)s"
-
-#, python-format
-msgid "Invalid format: %s"
-msgstr "Formato no válido: %s"
-
-#, python-format
-msgid "Invalid input for %(attr)s. Reason: %(reason)s."
-msgstr "Entrada no válida para %(attr)s. Razón: %(reason)s."
-
-#, python-format
-msgid "Invalid input for operation: %(error_message)s."
-msgstr "Entrada no válida para operación: %(error_message)s."
-
-#, python-format
-msgid ""
-"Invalid input. '%(target_dict)s' must be a dictionary with keys: "
-"%(expected_keys)s"
-msgstr ""
-"Entrada no válida. '%(target_dict)s' debe ser un diccionario con claves: "
-"%(expected_keys)s"
-
-#, python-format
-msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s"
-msgstr ""
-"Estado de instancia no válido: %(state)s, los estados válidos son: "
-"%(valid_states)s"
-
-#, python-format
-msgid "Invalid mapping: '%s'"
-msgstr "Correlación no válida: '%s'"
-
-#, python-format
-msgid "Invalid pci slot %(pci_slot)s"
-msgstr "Ranura pci no válida %(pci_slot)s"
-
-#, python-format
-msgid "Invalid provider format. Last part should be 'default' or empty: %s"
-msgstr ""
-"Formato de proveedor no válido. La última parte debe ser 'predeterminado' o "
-"vacío: %s"
-
-#, python-format
-msgid "Invalid route: %s"
-msgstr "Ruta no válida: %s"
-
-msgid "Invalid service provider format"
-msgstr "Formato de proveedor de servicio no válido"
-
-#, python-format
-msgid ""
-"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255."
-msgstr ""
-"Valor no válido para ICMP %(field)s (%(attr)s) %(value)s. Debe ser 0 a 255."
-
-#, python-format
-msgid "Invalid value for port %(port)s"
-msgstr "Valor no válido para el puerto %(port)s"
-
-msgid "Keepalived didn't respawn"
-msgstr "Keepalived no se ha vuelto a generar"
-
-#, python-format
-msgid "Key %(key)s in mapping: '%(mapping)s' not unique"
-msgstr "Clave %(key)s en correlación: '%(mapping)s' no exclusiva"
-
-#, python-format
-msgid "Limit must be an integer 0 or greater and not '%d'"
-msgstr "Limit debe ser un entero mayor o igual a 0 y no '%d'"
-
-msgid "Limit number of leases to prevent a denial-of-service."
-msgstr "Límite de número de alquileres para evitar denegación de servicio."
-
-msgid ""
-"List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> "
-"specifying physical_network names usable for VLAN provider and tenant "
-"networks, as well as ranges of VLAN tags on each available for allocation to "
-"tenant networks."
-msgstr ""
-"Lista de <red_física>:<mín_vlan>:<máx_vlan> o <red_física> especificar "
-"nombres de physical_network utilizables para proveedor de VLAN y "
-"arrendatario redes, así como rangos de etiquetas VLAN en cada uno disponible "
-"para asignación para las redes de arrendatarios."
-
-msgid ""
-"List of network type driver entrypoints to be loaded from the neutron.ml2."
-"type_drivers namespace."
-msgstr ""
-"Lista de puntos de entrada del controlador de tipo de red a cargar desde el "
-"espacio de nombres neutron.ml2.type_drivers."
-
-msgid "Local IP address of the VXLAN endpoints."
-msgstr "Dirección IP local de puntos finales VXLAN."
-
-msgid "Local IP address of tunnel endpoint."
-msgstr "Dirección IP local del punto final de túnel."
-
-msgid "Location for Metadata Proxy UNIX domain socket."
-msgstr "Ubicación para socket de dominio UNIX de proxy de metadatos."
-
-msgid "Location of Metadata Proxy UNIX domain socket"
-msgstr "Ubicación de socket de dominio UNIX de proxy de metadatos"
-
-msgid "Location of pid file of this process."
-msgstr "Ubicación del archivo pid de este proceso."
-
-msgid "Location to store DHCP server config files"
-msgstr "Ubicación para almacenar archivos de configuración de servidor DHCP"
-
-msgid "Location to store IPv6 RA config files"
-msgstr "Ubicación para almacenar archivos de configuración de IPv6 RA"
-
-msgid "Location to store child pid files"
-msgstr "Ubicación para almacenar archivos pid hijos"
-
-msgid "Location to store keepalived/conntrackd config files"
-msgstr ""
-"Ubicación para almacenar los archivos de configuración keepalived/conntrackd"
-
-msgid "MTU setting for device."
-msgstr "valor de MTU para dispositivo."
-
-msgid "MTU size of veth interfaces"
-msgstr "Tamaño de MTU de la interfaz de veth"
-
-msgid "Make the l2 agent run in DVR mode."
-msgstr "Hacer que el agente l2 se ejecute en modalidad DVR."
-
-msgid "Malformed request body"
-msgstr "Cuerpo de solicitud formado incorrectamente"
-
-msgid "Maximum number of allowed address pairs"
-msgstr "Número máximo de pares de direcciones permitidos"
-
-msgid "Maximum number of host routes per subnet"
-msgstr "Número máximo de rutas de host por subred"
-
-msgid "Metering driver"
-msgstr "Controlador de medición"
-
-#, python-format
-msgid "Metering label %(label_id)s does not exist"
-msgstr "La etiqueta de medición %(label_id)s no existe"
-
-#, python-format
-msgid "Metering label rule %(rule_id)s does not exist"
-msgstr "La regla de etiqueta de medición %(rule_id)s no existe"
-
-#, python-format
-msgid ""
-"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps "
-"another"
-msgstr ""
-"Regla de etiqueta de medición con remote_ip_prefix %(remote_ip_prefix)s se "
-"solapa otro"
-
-msgid "Minimize polling by monitoring ovsdb for interface changes."
-msgstr "Minimizar sondeo supervisando ovsdb para cambios de interfaz."
-
-#, python-format
-msgid "Missing key in mapping: '%s'"
-msgstr "Falta clave en correlación: '%s'"
-
-#, python-format
-msgid "Missing value in mapping: '%s'"
-msgstr "Falta valor en correlación: '%s'"
-
-#, python-format
-msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found"
-msgstr ""
-"Se han encontrado varios agentes con agent_type=%(agent_type)s y host="
-"%(host)s"
-
-#, python-format
-msgid "Multiple default providers for service %s"
-msgstr "Múltiples proveedores predeterminados para servicio %s"
-
-#, python-format
-msgid "Multiple plugins for service %s were configured"
-msgstr "Se han configurado varios complementos para el servicio %s"
-
-#, python-format
-msgid "Multiple providers specified for service %s"
-msgstr "Múltiples proveedores especificados para servicio %s"
-
-msgid "Multiple tenant_ids in bulk security group rule create not allowed"
-msgstr ""
-"No se permiten varios Id de arrendatario en creación de regla de grupo de "
-"seguridad masiva"
-
-msgid "Must also specifiy protocol if port range is given."
-msgstr ""
-"También se debe especificar el protocolo si se proporciona el rango de "
-"puertos. "
-
-msgid "Must specify one or more actions on flow addition or modification"
-msgstr ""
-"Debe especificar una o más acciones en la adición o modificación de flujo"
-
-#, python-format
-msgid ""
-"Name '%s' must be 1-63 characters long, each of which can only be "
-"alphanumeric or a hyphen."
-msgstr ""
-"El nombre '%s' debe tener 1-63 caracteres de longitud, y sólo pueden ser "
-"alfanuméricos o guiones."
-
-#, python-format
-msgid "Name '%s' must not start or end with a hyphen."
-msgstr "El nombre '%s' no debe comenzar o terminar con un guión."
-
-msgid "Name of Open vSwitch bridge to use"
-msgstr "Nombre de puente de Open vSwitch a utilizar"
-
-msgid ""
-"Name of nova region to use. Useful if keystone manages more than one region."
-msgstr ""
-"Nombre de región de nova a utilizar. Es útil si keystone gestiona más de una "
-"región."
-
-msgid "Name of the FWaaS Driver"
-msgstr "Nombre del controlador FWasS"
-
-msgid "Namespace of the router"
-msgstr "Espacio de nombres del direccionador"
-
-msgid "Native pagination depend on native sorting"
-msgstr "La paginación nativa depende de la ordenación nativa"
-
-msgid "Negative delta (downgrade) not supported"
-msgstr "El delta negativo (degradación) no está soportado"
-
-msgid "Negative relative revision (downgrade) not supported"
-msgstr "La revisión relativa negativa (degradación) no está soportada"
-
-#, python-format
-msgid "Network %s is not a valid external network"
-msgstr "La red %s no es una red externa válida"
-
-#, python-format
-msgid "Network %s is not an external network"
-msgstr "La red %s no es una red externa"
-
-#, python-format
-msgid ""
-"Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges "
-"%(excluded_ranges)s was not found."
-msgstr ""
-"No se ha encontrado la red de tamaño %(size)s, de rango de IP "
-"%(parent_range)s, excluyendo los rangos %(excluded_ranges)s."
-
-msgid "Network that will have instance metadata proxied."
-msgstr "Red en la que se ejecutará un proxy en los metadatos de instancia."
-
-#, python-format
-msgid "Network type value '%s' not supported"
-msgstr "No hay soporte para el valor de tipo de red '%s'"
-
-msgid "Network type value needed by the ML2 plugin"
-msgstr "El plugin ML2 necesita el valor de tipo de red"
-
-msgid "Network types supported by the agent (gre and/or vxlan)."
-msgstr "Tipos de red admitidos por el agente (gre o vxlan)."
-
-msgid "Neutron Service Type Management"
-msgstr "Administración del tipo de servicio Neutron"
-
-msgid "Neutron core_plugin not configured!"
-msgstr "¡Neutron core_plugin no está configurado!"
-
-msgid "Neutron plugin provider module"
-msgstr "Módulo de proveedor de plugin de Neutron"
-
-msgid "Neutron quota driver class"
-msgstr "Clase de controlador de cuota Neutron"
-
-#, python-format
-msgid "No eligible l3 agent associated with external network %s found"
-msgstr ""
-"No se ha encontrado ningún agente l3 elegible asociado con la red externa %s"
-
-#, python-format
-msgid "No more IP addresses available on network %(net_id)s."
-msgstr "No hay más direcciones IP disponibles en la red %(net_id)s. "
-
-#, python-format
-msgid ""
-"No more Virtual Router Identifier (VRID) available when creating router "
-"%(router_id)s. The limit of number of HA Routers per tenant is 254."
-msgstr ""
-"No hay ningún identificador de direccionador virtual (VRID) al crear el "
-"direccionador %(router_id)s. El límite del número de direccionadores HA por "
-"arrendatario es 254."
-
-#, python-format
-msgid "No providers specified for '%s' service, exiting"
-msgstr "No hay proveedores especificados para '%s' servicio, salir"
-
-#, python-format
-msgid ""
-"Not allowed to manually assign a %(router_type)s router %(router_id)s from "
-"an existing DVR node to another L3 agent %(agent_id)s."
-msgstr ""
-"No está permitido asignar manualmente un direccionador de %(router_type)s "
-"%(router_id)s de un nodo DVR existente a otro agente L3 %(agent_id)s."
-
-msgid "Not authorized."
-msgstr "No autorizado."
-
-#, python-format
-msgid ""
-"Not enough l3 agents available to ensure HA. Minimum required "
-"%(min_agents)s, available %(num_agents)s."
-msgstr ""
-"No hay suficientes agentes 13 disponibles para garantizar HA. El mínimo "
-"necesario es %(min_agents)s, disponibles %(num_agents)s."
-
-msgid "Number of RPC worker processes for service"
-msgstr "Número de procesos de trabajador RPC para servicio"
-
-msgid "Number of backlog requests to configure the metadata server socket with"
-msgstr ""
-"Número de solicitudes de retraso para configurar el socket de servidor de "
-"metadatos con"
-
-msgid "Number of backlog requests to configure the socket with"
-msgstr ""
-"Número de solicitudes de registro de reserva para configurar el socket con"
-
-msgid ""
-"Number of floating IPs allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Número de IP flotantes permitidas por arrendatario. Un valor negativo "
-"significa ilimitados."
-
-msgid ""
-"Number of networks allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Número de redes permitidas por arrendatario. Un valor negativo significa "
-"ilimitado."
-
-msgid "Number of ports allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Número de puertos permitidos por arrendatario. Un valor negativo significa "
-"ilimitado."
-
-msgid "Number of routers allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Número de direccionadores permitidos por arrendatario. Un valor negativo "
-"significa ilimitado."
-
-msgid ""
-"Number of seconds between sending events to nova if there are any events to "
-"send."
-msgstr ""
-"Número de segundos entre en el envío de sucesos a nova si hay sucesos a "
-"enviar."
-
-msgid "Number of seconds to keep retrying to listen"
-msgstr "Número de segundos en seguir intentando escuchar"
-
-msgid ""
-"Number of security groups allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Número de grupos de seguridad permitidos por arrendatario. Un valor negativo "
-"significa ilimitados."
-
-msgid ""
-"Number of security rules allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Número de reglas de seguridad permitidas por arrendatario. Un valor negativo "
-"significa ilimitados."
-
-msgid "Number of subnets allowed per tenant, A negative value means unlimited."
-msgstr ""
-"Número de subredes permitidas por arrendatario. Un valor negativo significa "
-"ilimitado."
-
-msgid "OK"
-msgstr "OK"
-
-msgid "Only admin can view or configure quota"
-msgstr "Solo los administradores pueden ver o configurar cuotas"
-
-msgid "Only admin is authorized to access quotas for another tenant"
-msgstr ""
-"Sólo está autorizado el administrador para acceder a cuotas para otro "
-"arrendatario"
-
-msgid "Only allowed to update rules for one security profile at a time"
-msgstr "Solo se permite actualizar reglas para un perfil de seguridad a la vez"
-
-msgid "Only remote_ip_prefix or remote_group_id may be provided."
-msgstr "Solo se puede proporcionar remote_ip_prefix o remote_group_id."
-
-#, python-format
-msgid ""
-"Operation %(op)s is not supported for device_owner %(device_owner)s on port "
-"%(port_id)s."
-msgstr ""
-"No hay soporte para la operación %(op)s para device_owner %(device_owner)s "
-"en el puerto %(port_id)s."
-
-msgid "Override the default dnsmasq settings with this file"
-msgstr ""
-"Alterar temporalmente los valores dnsmasq predeterminados con este archivo"
-
-msgid "Owner type of the device: network/compute"
-msgstr "Tipo de propietario del dispositivo: red/cálculo"
-
-msgid "POST requests are not supported on this resource."
-msgstr "Las solicitudes de POST no son admitidas en este recurso."
-
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr "El análisis de bridge_mappings ha fallado: %s."
-
-msgid "Parsing supported pci_vendor_devs failed"
-msgstr "El análisis de los pci_vendor_devs soportados ha fallado"
-
-msgid "Path to PID file for this process"
-msgstr "Vía de acceso al archivo de PID para este proceso"
-
-msgid "Path to the router directory"
-msgstr "Vía de acceso al directorio de direccionador"
-
-msgid "Peer patch port in integration bridge for tunnel bridge."
-msgstr ""
-"Puerto de parche de igual en puente de integración para puente de túnel."
-
-msgid "Peer patch port in tunnel bridge for integration bridge."
-msgstr ""
-"Puerto de parche de igual en puente de túnel para puente de integración."
-
-msgid "Ping timeout"
-msgstr "Tiempo de espera de ping"
-
-msgid "Plugin does not support updating provider attributes"
-msgstr "El plug-in no soporta la actualización de atributos de proveedor"
-
-#, python-format
-msgid "Port %(id)s does not have fixed ip %(address)s"
-msgstr "El puerto %(id)s no tiene una IP fija %(address)s"
-
-#, python-format
-msgid ""
-"Port %(port_id)s is associated with a different tenant than Floating IP "
-"%(floatingip_id)s and therefore cannot be bound."
-msgstr ""
-"El puerto %(port_id)s está asociado con un arrendatario diferente a la IP "
-"flotante %(floatingip_id)s y, por lo tanto, no se puede enlazar."
-
-msgid ""
-"Port Security must be enabled in order to have allowed address pairs on a "
-"port."
-msgstr ""
-"Seguridad de puerto debe habilitar para tener pares de dirección admitida en "
-"un puerto."
-
-msgid "Port does not have port security binding."
-msgstr "El puerto no tiene enlace de seguridad de puerto."
-
-msgid ""
-"Port has security group associated. Cannot disable port security or ip "
-"address until security group is removed"
-msgstr ""
-"El puerto tiene asociado un grupo de seguridad. No se puede inhabilitar la "
-"seguridad de puerto o la dirección IP hasta que se elimine el grupo de "
-"seguridad."
-
-msgid ""
-"Port security must be enabled and port must have an IP address in order to "
-"use security groups."
-msgstr ""
-"Se debe habilitar la seguridad de puerto y el puerto debe tener una "
-"dirección IP para utilizar grupos de seguridad."
-
-msgid "Private key of client certificate."
-msgstr "Clave privada del certificado de cliente."
-
-#, python-format
-msgid "Probe %s deleted"
-msgstr "Se ha suprimido el analizador %s"
-
-#, python-format
-msgid "Probe created : %s "
-msgstr "Se ha creado analizador: %s "
-
-msgid "Process is already started"
-msgstr "El proceso ya se ha iniciado"
-
-msgid "Process is not running."
-msgstr "El proceso no se está ejecutando."
-
-msgid "Protocol to access nova metadata, http or https"
-msgstr "El protocolo para acceder a los metadatos de Nova, http o https"
-
-msgid ""
-"Range of seconds to randomly delay when starting the periodic task scheduler "
-"to reduce stampeding. (Disable by setting to 0)"
-msgstr ""
-"Rango de segundos para retrasar aleatoriamente al iniciar la tarea periódica "
-"programador para reducir avalanchas. (Inhabilitar al establecer en 0)"
-
-msgid "Remote metadata server experienced an internal server error."
-msgstr ""
-"El servidor de metadatos remoto ha experimentado un error de servidor "
-"interno. "
-
-msgid ""
-"Representing the resource type whose load is being reported by the agent. "
-"This can be \"networks\", \"subnets\" or \"ports\". When specified (Default "
-"is networks), the server will extract particular load sent as part of its "
-"agent configuration object from the agent report state, which is the number "
-"of resources being consumed, at every report_interval.dhcp_load_type can be "
-"used in combination with network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is "
-"WeightScheduler, dhcp_load_type can be configured to represent the choice "
-"for the resource being balanced. Example: dhcp_load_type=networks"
-msgstr ""
-"Representando el tipo de recurso cuya carga está notificando el agente. "
-"Puede ser \"networks\", \"subnets\" o \"ports\". Cuando se especifica (el "
-"valor predeterminado es redes), el servidor extraerá la carga particular "
-"enviada como parte del objeto de configuración de agentes desde el estado de "
-"informe del agente, que es el número de recursos que se está consumiendo, en "
-"cada report_interval.dhcp_load_type que puede utilizarse junto con "
-"network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler."
-"WeightScheduler. Cuando network_scheduler_driver es WeightScheduler, "
-"dhcp_load_type se puede configurar para representar la opción para el "
-"recurso que se está equilibrando. Ejemplo: dhcp_load_type=networks"
-
-msgid "Request Failed: internal server error while processing your request."
-msgstr ""
-"Ha fallado la solicitar: error interno de servidor al procesar la solicitud."
-
-#, python-format
-msgid ""
-"Request contains duplicate address pair: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-msgstr ""
-"La solicitud contiene par de dirección duplicada: mac_address "
-"%(mac_address)s ip_address %(ip_address)s."
-
-#, python-format
-msgid ""
-"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps "
-"with another subnet"
-msgstr ""
-"La subred solicitada con cidr: %(cidr)s para la red: %(network_id)s se "
-"solapa con otra subred"
-
-#, python-format
-msgid ""
-"Resource '%(resource_id)s' is already associated with provider "
-"'%(provider)s' for service type '%(service_type)s'"
-msgstr ""
-"El recurso '%(resource_id)s' ya está asociado con el proveedor "
-"'%(provider)s' para el tipo de servicio '%(service_type)s'"
-
-msgid "Resource body required"
-msgstr "Se necesita cuerpo de recurso"
-
-msgid "Resource not found."
-msgstr "Recurso no encontrado."
-
-msgid "Resources required"
-msgstr "Recursos necesarios "
-
-msgid "Root helper daemon application to use when possible."
-msgstr "Aplicación de daemon de ayudante raíz a utilizar cuando sea posible."
-
-msgid "Root permissions are required to drop privileges."
-msgstr "Se necesitan permisos de root para descartar privilegios."
-
-#, python-format
-msgid "Router %(router_id)s %(reason)s"
-msgstr "Direccionador %(router_id)s %(reason)s"
-
-#, python-format
-msgid "Router %(router_id)s could not be found"
-msgstr "No se ha podido encontrar el direccionador %(router_id)s."
-
-#, python-format
-msgid "Router %(router_id)s does not have an interface with id %(port_id)s"
-msgstr ""
-"El direccionador %(router_id)s no tiene una interfaz con el id %(port_id)s"
-
-#, python-format
-msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s"
-msgstr ""
-"El direccionador %(router_id)s no tiene ninguna interfaz en la subred "
-"%(subnet_id)s"
-
-#, python-format
-msgid "Router already has a port on subnet %s"
-msgstr "El direccionador ya tiene un puerto en la subred %s"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more floating IPs."
-msgstr ""
-"La interfaz de direccionador para la subred %(subnet_id)s en el "
-"direccionador %(router_id)s no se puede suprimir, porque la necesitan una o "
-"más IP flotantes."
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more routes."
-msgstr ""
-"La interfaz de direccionador para la subred %(subnet_id)s en el "
-"direccionador %(router_id)s no se puede suprimir, porque la necesitan una o "
-"más rutas."
-
-msgid "Router that will have connected instances' metadata proxied."
-msgstr ""
-"Direccionador en la que se ejecutará un proxy en los metadatos de las "
-"instancias conectadas."
-
-msgid "Run as daemon."
-msgstr "Ejecutar como daemon."
-
-msgid ""
-"Seconds between nodes reporting state to server; should be less than "
-"agent_down_time, best if it is half or less than agent_down_time."
-msgstr ""
-"Segundos entre nodos que informan del estado al servidor; debe ser menor que "
-"agent_down_time, mejor si es la mitad o menos que agent_down_time."
-
-msgid "Seconds between running periodic tasks"
-msgstr "Segundos entre tareas periódicas en ejecución"
-
-msgid ""
-"Seconds to regard the agent is down; should be at least twice "
-"report_interval, to be sure the agent is down for good."
-msgstr ""
-"Segundos para considerar que el agente está inactivo; debe ser como mínimo "
-"el doble de report_interval, para asegurarse de que el agente está inactivo "
-"definitivamente."
-
-#, python-format
-msgid "Security group %(id)s does not exist"
-msgstr "El grupo de seguridad %(id)s no existe"
-
-#, python-format
-msgid "Security group rule %(id)s does not exist"
-msgstr "La regla de grupo de seguridad %(id)s no existe"
-
-#, python-format
-msgid "Security group rule already exists. Rule id is %(id)s."
-msgstr "La regla de grupo de seguridad ya existe. El id de regla es %(id)s."
-
-msgid "Segments and provider values cannot both be set."
-msgstr ""
-"Los valores de segmentos y proveedor no pueden estar establecidos ambos."
-
-msgid ""
-"Send notification to nova when port data (fixed_ips/floatingip) changes so "
-"nova can update its cache."
-msgstr ""
-"Envíe notificación a nova cuando cambien los datos de puerto (fixed_ips/"
-"floatingip) para que nova pueda actualizar la memoria caché."
-
-msgid "Send notification to nova when port status changes"
-msgstr "Envíe notificación a nova cuando cambie el estado de puerto"
-
-msgid ""
-"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the "
-"feature is disabled"
-msgstr ""
-"Envíe todos estos ARP innecesarios para la configuración de HA, si el número "
-"es inferior o igual a 0, la característica se inhabilita"
-
-#, python-format
-msgid ""
-"Service provider '%(provider)s' could not be found for service type "
-"%(service_type)s"
-msgstr ""
-"El proveedor de servicio '%(provider)s' no se ha podido encontrar para el "
-"tipo de servicio %(service_type)s"
-
-#, python-format
-msgid "Service type %(service_type)s does not have a default service provider"
-msgstr ""
-"El tipo de servicio %(service_type)s no tiene un proveedor de servicio "
-"predeterminado"
-
-msgid ""
-"Set new timeout in seconds for new rpc calls after agent receives SIGTERM. "
-"If value is set to 0, rpc timeout won't be changed"
-msgstr ""
-"Establecer el nuevo tiempo de espera en segundos para nuevas llamadas rpc "
-"después de que el agente reciba SIGTERM. Si el valor se establece en 0, no "
-"se modificará el tiempo de espera de rpc"
-
-msgid ""
-"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/"
-"VXLAN tunnel."
-msgstr ""
-"Establecer o anular el establecimiento del bit DF (don't fragment) en el "
-"paquete de IP saliente que lleva el túnel GRE/VXLAN."
-
-#, python-format
-msgid ""
-"Some tenants have more than one security group named 'default': "
-"%(duplicates)s. All duplicate 'default' security groups must be resolved "
-"before upgrading the database."
-msgstr ""
-"Algunos arrendatarios tiene más de un grupo de seguridad denominado "
-"'default': %(duplicates)s. Deben resolverse todos los grupos de seguridad "
-"'default' duplicados antes de actualizar la base de datos."
-
-msgid ""
-"Specifying 'tenant_id' other than authenticated tenant in request requires "
-"admin privileges"
-msgstr ""
-"Para especificar un 'tenant_id' distinto del arrendatario autenticado en la "
-"solicitud requiere privilegios administrativos"
-
-msgid "Subnet for router interface must have a gateway IP"
-msgstr ""
-"La subred para la interfaz de direccionador debe tener una IP de pasarela"
-
-msgid "Subnet pool has existing allocations"
-msgstr "La agrupación de subred tiene asignaciones existentes"
-
-msgid "Subnet used for the l3 HA admin network."
-msgstr "Subred utilizada con la red de administradores HA l3."
-
-msgid ""
-"System-wide flag to determine the type of router that tenants can create. "
-"Only admin can override."
-msgstr ""
-"Distintivo en todo el sistema para determinar el tipo de direccionador que "
-"pueden crear los arrendatarios. Sólo el administrador puede alterarlo "
-"temporalmente."
-
-msgid "TCP Port to listen for metadata server requests."
-msgstr "Puerto TCP para escuchar solicitudes de servidor de metadatos."
-
-msgid "TCP Port used by Neutron metadata namespace proxy."
-msgstr ""
-"Puerto TCP usado por el proxy de espacio de nombre de metadatos Neutron."
-
-msgid "TCP Port used by Nova metadata server."
-msgstr "Puerto TCP utilizado por el servidor de metadatos de Nova."
-
-#, python-format
-msgid "TLD '%s' must not be all numeric"
-msgstr "El TLD '%s' no debe ser enteramente numérico"
-
-msgid "TOS for vxlan interface protocol packets."
-msgstr "TOS para paquetes de protocolo de interfaz vxlan."
-
-msgid "TTL for vxlan interface protocol packets."
-msgstr "TTL para paquetes de protocolo de interfaz vxlan."
-
-#, python-format
-msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network"
-msgstr ""
-"El arrendatario %(tenant_id)s no está autorizado a crear %(resource)s en "
-"esta red"
-
-msgid "Tenant network creation is not enabled."
-msgstr "La creación de red de arrendatario no se ha habilitado."
-
-msgid ""
-"The 'gateway_external_network_id' option must be configured for this agent "
-"as Neutron has more than one external network."
-msgstr ""
-"La opción 'gateway_external_network_id' se debe configurar para este agente "
-"ya que Neutron tiene más de una red externa."
-
-#, python-format
-msgid ""
-"The HA Network CIDR specified in the configuration file isn't valid; "
-"%(cidr)s."
-msgstr ""
-"El CIDR de red HA especificado en el archivo de configuración no es válido; "
-"%(cidr)s."
-
-msgid "The UDP port to use for VXLAN tunnels."
-msgstr "El puerto UDP para a usar para los túneles VXLAN."
-
-msgid "The advertisement interval in seconds"
-msgstr "Intervalo de anuncio en segundos"
-
-#, python-format
-msgid "The allocation pool %(pool)s is not valid."
-msgstr "La agrupación de asignación %(pool)s no es válida. "
-
-#, python-format
-msgid ""
-"The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s."
-msgstr ""
-"La agrupación de asignación %(pool)s abarca más allá de cidr de subred "
-"%(subnet_cidr)s."
-
-#, python-format
-msgid ""
-"The attribute '%(attr)s' is reference to other resource, can't used by sort "
-"'%(resource)s'"
-msgstr ""
-"Otro recurso hace referencia al atributo '%(attr)s', la ordenación "
-"'%(resource)s no puede usarlo'"
-
-msgid "The core plugin Neutron will use"
-msgstr "El core plugin Neutron usará"
-
-msgid "The driver used to manage the DHCP server."
-msgstr "El controlador utilizado para gestionar el servidor DHCP."
-
-msgid "The driver used to manage the virtual interface."
-msgstr "El controlador utilizado para gestionar la interfaz virtual."
-
-#, python-format
-msgid ""
-"The following device_id %(device_id)s is not owned by your tenant or matches "
-"another tenants router."
-msgstr ""
-"El siguiente device_id %(device_id)s no es propiedad de su arrendatario o "
-"coincide con el direccionador de otros arrendatarios."
-
-msgid "The host IP to bind to"
-msgstr "El IP de host al que enlazar"
-
-msgid "The interface for interacting with the OVSDB"
-msgstr "Interfaz para la interacción con la OVSDB"
-
-msgid ""
-"The maximum number of items returned in a single response, value was "
-"'infinite' or negative integer means no limit"
-msgstr ""
-"El número máximo de elementos devueltos en una única respuesta, el valor "
-"'infinite' o un entero negativo significa que no hay límite"
-
-#, python-format
-msgid ""
-"The network %(network_id)s has been already hosted by the DHCP Agent "
-"%(agent_id)s."
-msgstr ""
-"La red %(network_id)s ya está alojada por el agente de DHCP %(agent_id)s."
-
-#, python-format
-msgid ""
-"The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s."
-msgstr ""
-"La red %(network_id)s no está alojada por el agente de DHCP %(agent_id)s."
-
-#, python-format
-msgid "The number of allowed address pair exceeds the maximum %(quota)s."
-msgstr ""
-"El número de pares de direcciones permitidos excede el máximo de %(quota)s."
-
-msgid ""
-"The number of seconds the agent will wait between polling for local device "
-"changes."
-msgstr ""
-"El número de segundos que el agente esperará entre sondeos de cambios de "
-"dispositivo local."
-
-msgid ""
-"The number of seconds to wait before respawning the ovsdb monitor after "
-"losing communication with it."
-msgstr ""
-"Número de segundos a esperar antes de volver a generar el supervisor ovsdb "
-"después de perder la comunicación con él."
-
-msgid "The number of sort_keys and sort_dirs must be same"
-msgstr "el número de sort_keys y sort_dirs debe ser igual"
-
-#, python-format
-msgid "The port '%s' was deleted"
-msgstr "El puerto '%s' se ha suprimido"
-
-msgid "The port to bind to"
-msgstr "El puerto al que enlazar"
-
-#, python-format
-msgid "The requested content type %s is invalid."
-msgstr "El tipo de contenido solicitado %s no es válido."
-
-msgid "The resource could not be found."
-msgstr "El recurso no se ha podido encontrar."
-
-#, python-format
-msgid ""
-"The router %(router_id)s has been already hosted by the L3 Agent "
-"%(agent_id)s."
-msgstr ""
-"El direccionador %(router_id)s ya está alojado por el agente L3 %(agent_id)s."
-
-msgid ""
-"The server has either erred or is incapable of performing the requested "
-"operation."
-msgstr ""
-"El servidor tiene un error o no puede ejecutar la operación solicitada."
-
-msgid "The service plugins Neutron will use"
-msgstr "Los plug-ins de servicio que utilizará Neutron"
-
-msgid "The type of authentication to use"
-msgstr "El tipo de autenticación a utilizar"
-
-#, python-format
-msgid "The value '%(value)s' for %(element)s is not valid."
-msgstr "El valor de '%(value)s' para %(element)s no es válido."
-
-msgid ""
-"The working mode for the agent. Allowed modes are: 'legacy' - this preserves "
-"the existing behavior where the L3 agent is deployed on a centralized "
-"networking node to provide L3 services like DNAT, and SNAT. Use this mode if "
-"you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality "
-"and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - "
-"this enables centralized SNAT support in conjunction with DVR.  This mode "
-"must be used for an L3 agent running on a centralized node (or in single-"
-"host deployments, e.g. devstack)"
-msgstr ""
-"La modalidad de trabajo del agente. Las modalidades permitidas son: "
-"'heredada' - conserva el comportamiento existente, donde el agente L3 se "
-"despliega en un nodo de red centralizado para proporcionar servicios de L3 "
-"como DNAT y SNAT. Utilice esta modalidad si no desea adoptar DVR. 'dvr' - "
-"esta modalidad habilita la funcionalidad DVR y debe utilizarse para un "
-"agente L3 que se ejecuta en un host de cálculo. 'dvr_snat' - habilita el "
-"soporte SNAT centralizado conjuntamente con DVR. Esta modalidad debe "
-"utilizarse para un agente L3 que se ejecuta en un nodo centralizado (o en "
-"despliegues de un solo host, por ejemplo, devstack)"
-
-msgid ""
-"True to delete all ports on all the OpenvSwitch bridges. False to delete "
-"ports created by Neutron on integration and external network bridges."
-msgstr ""
-"Verdadero para suprimir todos los puertos en todos los puentes OpenvSwitch. "
-"Falso para suprimir puertos creados por Neutron por los puentes de red "
-"externos y de integración."
-
-msgid "Tunnel IP value needed by the ML2 plugin"
-msgstr "El plugin ML2 necesita el valor de IP de túnel"
-
-msgid "Tunnel bridge to use."
-msgstr "Puente de túnel para utilizar."
-
-msgid "URL to database"
-msgstr "URL en base de datos"
-
-#, python-format
-msgid "Unable to access %s"
-msgstr "No se puede acceder a %s "
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(router_id)s. The number of routes exceeds "
-"the maximum %(quota)s."
-msgstr ""
-"No se ha podido completar la operación para %(router_id)s. El número de "
-"rutas supera el máximo de %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of DNS "
-"nameservers exceeds the limit %(quota)s."
-msgstr ""
-"No se ha podido completar la operación para %(subnet_id)s. El número de "
-"servidores de nombres de DNS supera el límite %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of host routes "
-"exceeds the limit %(quota)s."
-msgstr ""
-"No se ha podido completar la operación para %(subnet_id)s. El número de "
-"rutas de host supera el límite %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The IP address "
-"%(ip_address)s is in use."
-msgstr ""
-"No se ha podido completar la operación para la red %(net_id)s. La dirección "
-"IP %(ip_address)s está en uso."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The mac address %(mac)s "
-"is in use."
-msgstr ""
-"No se ha podido completar la operación para la red %(net_id)s. La dirección "
-"MAC %(mac)s está en uso."
-
-#, python-format
-msgid ""
-"Unable to complete operation on network %(net_id)s. There are one or more "
-"ports still in use on the network."
-msgstr ""
-"No se puede completar la operación en la red %(net_id)s. Hay uno o más "
-"puertos que aún se utilizan en la red."
-
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s for network %(net_id)s. "
-"Port already has an attached device %(device_id)s."
-msgstr ""
-"No es posible completar la operación en el puerto %(port_id)s para la red "
-"%(net_id)s. El puerto ya tiene un dispositivo conectado %(device_id)s."
-
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr "No se puede convertir el valor en %s "
-
-msgid "Unable to create the Agent Gateway Port"
-msgstr "No se puede crear el puerto de pasarela de agente"
-
-msgid "Unable to create the SNAT Interface Port"
-msgstr "No se puede crear el puerto de interfaz SNAT"
-
-#, python-format
-msgid ""
-"Unable to create the flat network. Physical network %(physical_network)s is "
-"in use."
-msgstr ""
-"No se ha podido crear la red plana. La red física %(physical_network)s está "
-"en uso."
-
-msgid ""
-"Unable to create the network. No available network found in maximum allowed "
-"attempts."
-msgstr ""
-"No se ha podido crear la red. No se ha encontrado ninguna red disponible en "
-"el máximo de intentos permitidos."
-
-msgid ""
-"Unable to create the network. No tenant network is available for allocation."
-msgstr ""
-"No se ha podido crear la red. No hay ninguna red de arrendatario disponible "
-"para asignación."
-
-#, python-format
-msgid ""
-"Unable to create the network. The VLAN %(vlan_id)s on physical network "
-"%(physical_network)s is in use."
-msgstr ""
-"No se ha podido crear la red. La VLAN %(vlan_id)s en la red física "
-"%(physical_network)s está en uso."
-
-#, python-format
-msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use."
-msgstr ""
-"No se puede crear la red. El ID de túnel %(tunnel_id)s se está utilizando. "
-
-#, python-format
-msgid "Unable to determine mac address for %s"
-msgstr "No se ha podido determinar la dirección mac para %s"
-
-#, python-format
-msgid "Unable to find '%s' in request body"
-msgstr "No se puede encontrar '%s' en el cuerpo de solicitud "
-
-#, python-format
-msgid "Unable to find any IP address on external network %(net_id)s."
-msgstr ""
-"No se ha podido encontrar ninguna dirección IP en la red externa %(net_id)s."
-
-#, python-format
-msgid "Unable to find resource name in %s"
-msgstr "No se ha podido encontrar el nombre del recurso en %s"
-
-msgid "Unable to generate IP address by EUI64 for IPv4 prefix"
-msgstr "No se puede generar dirección IP por EUI64 para el prefijo de IPv4"
-
-#, python-format
-msgid "Unable to generate unique DVR mac for host %(host)s."
-msgstr "No se puede generar la mac DVR exclusiva para el host %(host)s."
-
-#, python-format
-msgid "Unable to generate unique mac on network %(net_id)s."
-msgstr "No se puede generar mac exclusivo en la red %(net_id)s. "
-
-#, python-format
-msgid ""
-"Unable to identify a target field from:%s. Match should be in the form "
-"%%(<field_name>)s"
-msgstr ""
-"No se ha podido identificar un campo destino desde: %s. La coincidencia debe "
-"tener la forma %%(<field_name>)s"
-
-#, python-format
-msgid ""
-"Unable to verify match:%(match)s as the parent resource: %(res)s was not "
-"found"
-msgstr ""
-"No se ha podido verificar la coincidencia:%(match)s como recurso primario: "
-"%(res)s no se ha encontrado"
-
-#, python-format
-msgid "Unexpected response code: %s"
-msgstr "Código de respuesta inesperado: %s"
-
-#, python-format
-msgid "Unexpected response: %s"
-msgstr "Respuesta inesperada : %s "
-
-msgid "Unimplemented commands"
-msgstr "Mandatos no implementados"
-
-msgid "Unknown API version specified"
-msgstr "Versión API desconocida especificada"
-
-#, python-format
-msgid "Unknown attribute '%s'."
-msgstr "Atributo desconocido '%s'."
-
-#, python-format
-msgid "Unknown chain: %r"
-msgstr "Cadena desconocida: %r"
-
-#, python-format
-msgid "Unknown quota resources %(unknown)s."
-msgstr "Recursos de cuota desconocidos %(unknown)s."
-
-msgid "Unmapped error"
-msgstr "Error no correlacionado"
-
-msgid "Unrecognized action"
-msgstr "Acción no reconocida"
-
-#, python-format
-msgid "Unrecognized attribute(s) '%s'"
-msgstr "Atributo(s) no reconocido(s) '%s'"
-
-msgid "Unsupported Content-Type"
-msgstr "Tipo de contenido no soportado"
-
-#, python-format
-msgid "Unsupported network type %(net_type)s."
-msgstr "Tipo de red no soportado %(net_type)s."
-
-msgid "Unsupported request type"
-msgstr "Tipo de solicitud no soportado"
-
-msgid "Updating default security group not allowed."
-msgstr "Actualización del grupo de seguridad predeterminado no permitida."
-
-msgid ""
-"Use ML2 l2population mechanism driver to learn remote MAC and IPs and "
-"improve tunnel scalability."
-msgstr ""
-"Use el controlador del mecanismo ML2 l2population para aprender el uso "
-"remoto MAC e IPs y mejorar la escalabilidad del túnel."
-
-msgid "Use broadcast in DHCP replies"
-msgstr "Utilizar la difusión en respuestas DHCP"
-
-msgid "Use either --delta or relative revision, not both"
-msgstr "Utilice --delta o la revisión relativa, pero no ambas"
-
-msgid "User (uid or name) running metadata proxy after its initialization"
-msgstr ""
-"Usuario (uid o nombre) que ejecuta el proxy de metadatos después de su "
-"inicialización"
-
-msgid ""
-"User (uid or name) running metadata proxy after its initialization (if "
-"empty: agent effective user)."
-msgstr ""
-"Usuario (uid o nombre) que ejecuta el proxy de metadatos después de su "
-"inicialización (si está vacío: usuario efectivo del agente)."
-
-msgid "User (uid or name) running this process after its initialization"
-msgstr ""
-"Usuario (uid o nombre) que ejecuta este proceso después de su inicialización"
-
-msgid "VRRP authentication password"
-msgstr "Contraseña de autenticación de VRRP"
-
-msgid "VRRP authentication type"
-msgstr "Tipo de autenticación VRRP"
-
-#, python-format
-msgid ""
-"Validation of dictionary's keys failed. Expected keys: %(expected_keys)s "
-"Provided keys: %(provided_keys)s"
-msgstr ""
-"La validación de las claves del diccionario ha fallado. Claves esperadas: "
-"%(expected_keys)s Claves proporcionadas: %(provided_keys)s"
-
-#, python-format
-msgid "Validator '%s' does not exist."
-msgstr "El validador '%s' no existe."
-
-#, python-format
-msgid "Value %(value)s in mapping: '%(mapping)s' not unique"
-msgstr "Valor %(value)s en correlación: '%(mapping)s' no exclusiva"
-
-msgid ""
-"Watch file log. Log watch should be disabled when metadata_proxy_user/group "
-"has no read/write permissions on metadata proxy log file."
-msgstr ""
-"Registro de archivo de observador. El observador de registro debe "
-"inhabilitarse cuandometadata_proxy_user/group no tiene permisos de lectura-"
-"grabación en el archivo de registro de proxy de metadatos."
-
-msgid ""
-"Where to store Neutron state files. This directory must be writable by the "
-"agent."
-msgstr ""
-"Dónde almacenar archivos de estado Neutron. Este directorio se debe poder "
-"escribir por el agente."
-
-msgid ""
-"With IPv6, the network used for the external gateway does not need to have "
-"an associated subnet, since the automatically assigned link-local address "
-"(LLA) can be used. However, an IPv6 gateway address is needed for use as the "
-"next-hop for the default route. If no IPv6 gateway address is configured "
-"here, (and only then) the neutron router will be configured to get its "
-"default route from router advertisements (RAs) from the upstream router; in "
-"which case the upstream router must also be configured to send these RAs. "
-"The ipv6_gateway, when configured, should be the LLA of the interface on the "
-"upstream router. If a next-hop using a global unique address (GUA) is "
-"desired, it needs to be done via a subnet allocated to the network and not "
-"through this parameter. "
-msgstr ""
-"Con IPv6, la red utilizada para la pasarela externa no debetener una subred "
-"asociada, ya que puede utilizarse la dirección de enlace local(LLA) asignada "
-"automáticamente. No obstante, se necesita una dirección de pasarela IPv6 "
-"parautilizarla como siguiente salto para la ruta predeterminada. Si no se "
-"configura aquí ningunadirección de pasarela IPv6, (y sólo entonces) se "
-"configurará un direccionador de Neutronpara obtener su ruta predeterminada "
-"de los avisos de direccionador (RA) deldireccionador en sentido ascendente; "
-"en este caso, el direccionador en sentido ascendente también "
-"debeconfigurarse para enviar estos RA. ipv6_gateway, cuando se configurada, "
-"debeser la LLA de interfaz en el direccionador en sentido ascendente. Si "
-"desea un siguiente salto utilizando una dirección exclusivo global (GUA), "
-"debe hacerse utilizando una subred asignada a la red, no mediante este "
-"parámetro."
-
-msgid "You must implement __call__"
-msgstr "Debe implementar __call__"
-
-msgid ""
-"You must provide a config file for bridge - either --config-file or "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-msgstr ""
-"Debe proporcionar un archivo config para puente, ya sea --config-file o "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-
-msgid "You must provide a revision or relative delta"
-msgstr "Debe proporcionar una revisión o delta relativa"
-
-msgid "allocation_pools allowed only for specific subnet requests."
-msgstr ""
-"allocation_pools sólo se permite para solicitudes de subred específicas."
-
-msgid "binding:profile value too large"
-msgstr "Valor de binding:profile demasiado grande"
-
-msgid "cidr and prefixlen must not be supplied together"
-msgstr "cidr y prefixlen no pueden proporcionarse conjuntamente"
-
-#, python-format
-msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid."
-msgstr "dhcp_agents_per_network debe ser >= 1. '%s' no es válido."
-
-msgid "fixed_ip_address cannot be specified without a port_id"
-msgstr "fixed_ip_address no se puede especificar sin un port_id"
-
-#, python-format
-msgid "has device owner %s"
-msgstr "tiene el propietario de dispositivo %s"
-
-#, python-format
-msgid "ip command failed on device %(dev_name)s: %(reason)s"
-msgstr "El mandato ip ha fallado en el dispositivo %(dev_name)s: %(reason)s"
-
-#, python-format
-msgid "ip link capability %(capability)s is not supported"
-msgstr "No hay soporte para la función de ip link %(capability)s"
-
-#, python-format
-msgid "ip link command is not supported: %(reason)s"
-msgstr "No hay soporte para el mandato ip link: %(reason)s"
-
-msgid "ip_version must be specified in the absence of cidr and subnetpool_id"
-msgstr "ip_version debe especificarse en ausencia de cidr y subnetpool_id"
-
-msgid "ipv6_address_mode is not valid when ip_version is 4"
-msgstr "ipv6_address_mode no es válido cuando ip_version es 4"
-
-msgid "ipv6_ra_mode is not valid when ip_version is 4"
-msgstr "ipv6_ra_mode no es válido cuando ip_version es 4"
-
-msgid ""
-"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to "
-"False."
-msgstr ""
-"ipv6_ra_mode o ipv6_address_mode no se pueden establecer cuando enable_dhcp "
-"está establecido en False."
-
-#, python-format
-msgid ""
-"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to "
-"'%(addr_mode)s' is not valid. If both attributes are set, they must be the "
-"same value"
-msgstr ""
-"ipv6_ra_mode establecido en '%(ra_mode)s' con ipv6_address_mode establecido "
-"en '%(addr_mode)s' no es válido. Si se establecen ambos atributos, deben "
-"tener el mismo valor"
-
-msgid "mac address update"
-msgstr "Actualización de la dirección MAC"
-
-#, python-format
-msgid ""
-"max_l3_agents_per_router %(max_agents)s config parameter is not valid. It "
-"has to be greater than or equal to min_l3_agents_per_router %(min_agents)s."
-msgstr ""
-"El parámetro de configuración max_l3_agents_per_router %(max_agents)s no es "
-"válido. Debe ser mayor o igual que min_l3_agents_per_router %(min_agents)s."
-
-#, python-format
-msgid ""
-"min_l3_agents_per_router config parameter is not valid. It has to be equal "
-"to or more than %s for HA."
-msgstr ""
-"El parámetro de configuración min_l3_agents_per_router no es válido. Tiene "
-"que ser igual o mayor que %s para HA."
-
-msgid "network_type required"
-msgstr "network_type requerido"
-
-#, python-format
-msgid "network_type value '%s' not supported"
-msgstr "valor network_type '%s' no admitido"
-
-msgid "new subnet"
-msgstr "nueva subred"
-
-#, python-format
-msgid "physical_network '%s' unknown  for VLAN provider network"
-msgstr "physical_network '%s' desconocido para la red del proveedor VLAN"
-
-#, python-format
-msgid "physical_network '%s' unknown for flat provider network"
-msgstr "physical_network '%s' desconocida para la red de proveedor simple"
-
-msgid "physical_network required for flat provider network"
-msgstr "se requiere physical_network para la red de proveedor simple"
-
-#, python-format
-msgid "provider:physical_network specified for %s network"
-msgstr "proveedor:physical_network especificado para la red %s"
-
-msgid "record"
-msgstr "registro"
-
-msgid "respawn_interval must be >= 0 if provided."
-msgstr "respawn_interval debe ser >= 0 si se proporciona."
-
-#, python-format
-msgid "segmentation_id out of range (%(min)s through %(max)s)"
-msgstr "segmentation_id fuera de rango (%(min)s a %(max)s)"
-
-msgid "segmentation_id requires physical_network for VLAN provider network"
-msgstr ""
-"segmentation_id requiere physical_network para la red de proveedor VLAN"
-
-msgid "the nexthop is not connected with router"
-msgstr "el siguiente salto no está conectado con el direccionador"
-
-msgid "the nexthop is used by router"
-msgstr "el siguiente salto lo está utilizando el direccionador"
-
-msgid ""
-"uuid provided from the command line so external_process can track us via /"
-"proc/cmdline interface."
-msgstr ""
-"uuid proporcionada desde la línea de mandatos para que external_process "
-"puede realizar un seguimiento a través de la interfaz /proc/cmdline."
diff --git a/neutron/locale/fr/LC_MESSAGES/neutron.po b/neutron/locale/fr/LC_MESSAGES/neutron.po
deleted file mode 100644 (file)
index 89895ee..0000000
+++ /dev/null
@@ -1,2684 +0,0 @@
-# French translations for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-11-27 11:20+0000\n"
-"Last-Translator: Maxime Coquerel <max.coquerel@gmail.com>\n"
-"Language: fr\n"
-"Language-Team: French\n"
-"Plural-Forms: nplurals=2; plural=(n > 1)\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#, python-format
-msgid ""
-"\n"
-"Command: %(cmd)s\n"
-"Exit code: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-msgstr ""
-"\n"
-"Commande : %(cmd)s\n"
-"Code de sortie : %(code)s\n"
-"Stdin : %(stdin)s\n"
-"Stdout : %(stdout)s\n"
-"Stderr : %(stderr)s"
-
-#, python-format
-msgid "%(driver)s: Internal driver error."
-msgstr "%(driver)s: erreur pilote interne."
-
-#, python-format
-msgid "%(id)s is not a valid %(type)s identifier"
-msgstr "%(id)s n'est pas un identificateur %(type)s valide"
-
-#, python-format
-msgid ""
-"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' "
-"and '%(desc)s'"
-msgstr ""
-"%(invalid_dirs)s est une valeur non valide pour sort_dirs ; les valeurs "
-"valides sont '%(asc)s' et '%(desc)s'"
-
-#, python-format
-msgid "%(key)s prohibited for %(tunnel)s provider network"
-msgstr "%(key)s interdit pour le réseau de fournisseur %(tunnel)s"
-
-#, python-format
-msgid ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-msgstr ""
-"%(method)s appelé avec les paramètres réseau %(current)s (paramètres "
-"d'origine %(original)s) et segments de réseau %(segments)s"
-
-#, python-format
-msgid ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-msgstr ""
-"%(method)s a appelé les paramètres de sous-réseau %(current)s (paramètres "
-"d'origine %(original)s)"
-
-#, python-format
-msgid "%(method)s failed."
-msgstr "%(method)s a échoué."
-
-#, python-format
-msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'"
-msgstr "%(name)s '%(addr)s' ne correspond pas à ip_version '%(ip_version)s'"
-
-#, python-format
-msgid "%s cannot be called while in offline mode"
-msgstr "%s ne peut pas être appelé en mode hors ligne"
-
-#, python-format
-msgid "%s is invalid attribute for sort_key"
-msgstr "%s est un attribut non valide pour sort_key"
-
-#, python-format
-msgid "%s is invalid attribute for sort_keys"
-msgstr "%s est un attribut non valide pour sort_keys"
-
-#, python-format
-msgid "%s is not a valid VLAN tag"
-msgstr "%s n'est pas une balise VLAN (réseau local virtuel) valide"
-
-#, python-format
-msgid "%s must implement get_port_from_device or get_ports_from_devices."
-msgstr "%s doit implémenter get_port_from_device ou get_ports_from_devices."
-
-#, python-format
-msgid "%s prohibited for VLAN provider network"
-msgstr "%s interdit pour le réseau de fournisseurs de réseau local virtuel"
-
-#, python-format
-msgid "%s prohibited for flat provider network"
-msgstr "%s interdit pour le réseau de fournisseurs non hiérarchique"
-
-#, python-format
-msgid "%s prohibited for local provider network"
-msgstr "%s interdit pour le réseau de fournisseurs local"
-
-#, python-format
-msgid "'%(data)s' exceeds maximum length of %(max_len)s"
-msgstr "'%(data)s' dépasse la longueur maximale de %(max_len)s."
-
-#, python-format
-msgid "'%(data)s' is not in %(valid_values)s"
-msgstr "'%(data)s' n'est pas dans %(valid_values)s"
-
-#, python-format
-msgid "'%(data)s' is too large - must be no larger than '%(limit)d'"
-msgstr "'%(data)s' est trop grand - ne doit pas être supérieur à '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' is too small - must be at least '%(limit)d'"
-msgstr "'%(data)s' est trop petit - doit être au moins '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended"
-msgstr ""
-"'%(data)s' n'est pas un routage CIDR de sous-réseau IP reconnu, '%(cidr)s' "
-"est recommandé"
-
-#, python-format
-msgid "'%(host)s' is not a valid nameserver. %(msg)s"
-msgstr "'%(host)s' n'est pas un serveur de noms valide. %(msg)s"
-
-#, python-format
-msgid "'%s' Blank strings are not permitted"
-msgstr "'%s' les chaines de caractères vides ne sont pas autorisés"
-
-#, python-format
-msgid "'%s' cannot be converted to boolean"
-msgstr "'%s' ne peut pas être converti en valeur booléenne."
-
-#, python-format
-msgid "'%s' contains whitespace"
-msgstr "'%s' contient des espaces blanc"
-
-#, python-format
-msgid "'%s' is not a dictionary"
-msgstr "'%s' n'est pas un dictionnaire."
-
-#, python-format
-msgid "'%s' is not a list"
-msgstr "'%s' n'est pas une liste."
-
-#, python-format
-msgid "'%s' is not a valid IP address"
-msgstr "'%s' n'est pas une adresse IP valide."
-
-#, python-format
-msgid "'%s' is not a valid IP subnet"
-msgstr "'%s' n'est pas un sous-réseau IP valide."
-
-#, python-format
-msgid "'%s' is not a valid MAC address"
-msgstr "'%s' n'est pas une adresse MAC valide."
-
-#, python-format
-msgid "'%s' is not a valid UUID"
-msgstr "'%s' n'est pas un identificateur unique universel valide."
-
-#, python-format
-msgid "'%s' is not a valid boolean value"
-msgstr "'%s' n'est pas une valeur booléen valide "
-
-#, python-format
-msgid "'%s' is not a valid input"
-msgstr "'%s' n'est pas une entrée valide."
-
-#, python-format
-msgid "'%s' is not a valid string"
-msgstr "'%s' n'est pas une chaîne valide."
-
-#, python-format
-msgid "'%s' is not an integer"
-msgstr "'%s' n'est pas un entier."
-
-#, python-format
-msgid "'%s' is not an integer or uuid"
-msgstr "'%s' n'est pas un entier ou un identificateur unique universel."
-
-#, python-format
-msgid "'%s' is not of the form <key>=[value]"
-msgstr "'%s'n'a pas la forme <key> = [value]"
-
-#, python-format
-msgid "'%s' must be a non negative decimal."
-msgstr "'%s' doit être un nombre décimal non négatif."
-
-#, python-format
-msgid "'%s' should be non-negative"
-msgstr "'%s' ne doit pas être négatif."
-
-msgid "'.' searches are not implemented"
-msgstr "'.' recherches ne sont pas implémentées. "
-
-msgid "0 is not allowed as CIDR prefix length"
-msgstr "La longueur 0 n'est pas autorisée pour le préfixe CIDR"
-
-msgid "A cidr must be specified in the absence of a subnet pool"
-msgstr ""
-"Une valeur cidr doit être indiquée si aucun pool de sous-réseau n'est défini"
-
-msgid ""
-"A list of mappings of physical networks to MTU values. The format of the "
-"mapping is <physnet>:<mtu val>. This mapping allows specifying a physical "
-"network MTU value that differs from the default segment_mtu value."
-msgstr ""
-"Liste de mappages de réseaux physiques en valeurs d'unité de transmission "
-"maximale. Le format du mappage est <physnet>:<mtu val>. Ce mappage permet de "
-"définir une valeur d'unité de transmission maximale de réseau physique "
-"différente de la valeur segment_mtu par défaut."
-
-msgid "A metering driver must be specified"
-msgstr "Un pilote de mesure doit être spécifié."
-
-msgid "API for retrieving service providers for Neutron advanced services"
-msgstr ""
-"API d'extraction des fournisseurs de service pour les services avancés de "
-"Neutron"
-
-msgid "Access to this resource was denied."
-msgstr "L'accès a cette ressource était refusé"
-
-msgid "Action to be executed when a child process dies"
-msgstr "Action à exécuter quand un processus enfant meurt"
-
-msgid "Address not present on interface"
-msgstr "Une adresse n'est pas présente sur l'interface"
-
-msgid "Adds external network attribute to network resource."
-msgstr "Ajoute l'attribut de réseau externe à la ressource du réseau."
-
-msgid "Adds test attributes to core resources."
-msgstr "Ajoute les attributs de test aux ressources principales."
-
-#, python-format
-msgid "Agent %(id)s could not be found"
-msgstr "Agent %(id)s introuvable"
-
-#, python-format
-msgid "Agent %(id)s is not a L3 Agent or has been disabled"
-msgstr "L'agent %(id)s n'est pas un agent L3 ou a été désactivé"
-
-#, python-format
-msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled"
-msgstr "L'agent %(id)s n'est pas un agent DHCP valide ou a été désactivé"
-
-#, python-format
-msgid "Agent updated: %(payload)s"
-msgstr "Mise à jour de l'agent: %(payload)s"
-
-#, python-format
-msgid ""
-"Agent with agent_type=%(agent_type)s and host=%(host)s could not be found"
-msgstr "Agent introuvable avec agent_type=%(agent_type)s et host=%(host)s"
-
-msgid "Allow auto scheduling networks to DHCP agent."
-msgstr "Autorise la planification automatique des réseaux de l'agent DHCP."
-
-msgid "Allow auto scheduling of routers to L3 agent."
-msgstr "Autorise la planification automatique des routeurs vers l'agent L3."
-
-msgid "Allow running metadata proxy."
-msgstr "Autorisez le proxy de métadonnées en cours d'exécution."
-
-msgid "Allow sending resource operation notification to DHCP agent"
-msgstr ""
-"Autoriser l'envoi de notifications d'opérations de ressources à l'agent DHCP"
-
-msgid "Allow the usage of the bulk API"
-msgstr "Autoriser l'utilisation de l'API de traitement en bloc"
-
-msgid "Allow the usage of the pagination"
-msgstr "Autoriser l'utilisation de la mise en page"
-
-msgid "Allow the usage of the sorting"
-msgstr "Autoriser l'utilisation du tri"
-
-msgid "Allow to perform insecure SSL (https) requests to nova metadata"
-msgstr ""
-"Permet d'effectuer des requêtes (https) non sécurisées aux métadonnées de "
-"nova"
-
-msgid "AllowedAddressPair must contain ip_address"
-msgstr "AllowedAddressPair doit contenir ip_address"
-
-msgid "An interface driver must be specified"
-msgstr "Un pilote d'interface doit être spécifié."
-
-#, python-format
-msgid "An invalid value was provided for %(opt_name)s: %(opt_value)s."
-msgstr "Une valeur non valide a été fournie pour %(opt_name)s : %(opt_value)s."
-
-msgid ""
-"An ordered list of networking mechanism driver entrypoints to be loaded from "
-"the neutron.ml2.mechanism_drivers namespace."
-msgstr ""
-"Liste ordonnée de points d'entrée de pilote de mécanisme à charger à partir "
-"de l'espace de nom neutron.ml2.mechanism_drivers."
-
-msgid "An unknown error has occurred. Please try your request again."
-msgstr "Une erreur inconnue s'est produite. Renouvelez votre demande."
-
-msgid "An unknown exception occurred."
-msgstr "Exception inconnue générée."
-
-#, python-format
-msgid "Attribute '%s' not allowed in POST"
-msgstr "Attribut '%s non autorisé dans l'autotest à la mise sous tension"
-
-msgid "Automatically remove networks from offline DHCP agents."
-msgstr "Supprime automatiquement les réseaux des agents DHCP hors ligne."
-
-msgid ""
-"Automatically reschedule routers from offline L3 agents to online L3 agents."
-msgstr ""
-"Replanifier automatiquement les routeurs pour qu'ils passent d'agents L3 "
-"hors connexion aux agents L3 connectés."
-
-msgid "Availability zone of this node"
-msgstr "Zone de disponibilité du noeud"
-
-msgid "Available commands"
-msgstr "Commandes disponibles"
-
-msgid "Backend does not support VLAN Transparency."
-msgstr "Le backend ne prend pas en charge la transparence VLAN."
-
-#, python-format
-msgid ""
-"Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, "
-"%(mac)s:"
-msgstr ""
-"Mauvais type de préfixe ou mauvais format d'adresse mac pour générer une "
-"adresse IPv6 par EUI-64: %(prefix)s, %(mac)s:"
-
-#, python-format
-msgid "Bad prefix type for generate IPv6 address by EUI-64: %s"
-msgstr ""
-"Type de préfixe erroné pour la génération de l'adresse IPv6 par EUI-64 : %s"
-
-#, python-format
-msgid "Base MAC: %s"
-msgstr "MAC de base : %s"
-
-msgid "Body contains invalid data"
-msgstr "Le corps contient des données non valides"
-
-#, python-format
-msgid "Bridge %(bridge)s does not exist."
-msgstr "Le pont %(bridge)s n'existe pas."
-
-#, python-format
-msgid "Bridge %s does not exist"
-msgstr "Le pont %s est inexistant."
-
-msgid "Bulk operation not supported"
-msgstr "Opération globale non prise en charge"
-
-msgid "CIDR to monitor"
-msgstr "CIDR à surveiller"
-
-#, python-format
-msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip"
-msgstr ""
-"Impossible d'ajouter une adresse IP flottante au port sur le sous-réseau %s "
-"qui n'a pas d'adresse IP passerelle"
-
-msgid "Cannot allocate requested subnet from the available set of prefixes"
-msgstr ""
-"Impossible d'allouer le sous-réseau demandé à partir de l'ensemble de "
-"préfixes disponibles"
-
-#, python-format
-msgid ""
-"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port "
-"%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a "
-"floating IP on external network %(net_id)s."
-msgstr ""
-"Impossible d'associer l'adresse IP flottante %(floating_ip_address)s "
-"(%(fip_id)s) avec le port %(port_id)s en utilisant l'adresse IP fixe "
-"%(fixed_ip)s, car cette adresse IP fixe a déjà une adresse IP flottante sur "
-"le réseau externe %(net_id)s."
-
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to Port %s, since that port is owned "
-"by a different tenant."
-msgstr ""
-"Impossible de créer une adresse IP flottante et de la lier au port %s, car "
-"ce port appartient à un locataire différent."
-
-msgid "Cannot create resource for another tenant"
-msgstr "Impossible de créer une ressource pour un autre titulaire"
-
-msgid "Cannot disable enable_dhcp with ipv6 attributes set"
-msgstr "Impossible de désactiver enable_dhcp avec des attributs ipv6 définis"
-
-#, python-format
-msgid "Cannot find %(table)s with %(col)s=%(match)s"
-msgstr "Impossible de trouver %(table)s avec %(col)s=%(match)s"
-
-#, python-format
-msgid ""
-"Cannot have multiple router ports with the same network id if both contain "
-"IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s"
-msgstr ""
-"Impossible d'avoir plusieurs ports de routeur avec le même ID réseau s'ils "
-"contiennent tous des sous-réseaux IPv6. Le port %(p)s existant a un ou "
-"plusieurs sous-réseaux IPv6 et l'ID réseau %(nid)s"
-
-#, python-format
-msgid ""
-"Cannot host %(router_type)s router %(router_id)s on %(agent_mode)s L3 agent "
-"%(agent_id)s."
-msgstr ""
-"Impossible d'héberger l'hôte %(router_type)s, routeur %(router_id)s sur "
-"l'agent %(agent_id)s en mode L3 %(agent_mode)s ."
-
-msgid "Cannot match priority on flow deletion or modification"
-msgstr ""
-"Impossible de mettre en correspondance la priorité lors de la suppression ou "
-"de la modification de flux"
-
-msgid "Cannot specify both subnet-id and port-id"
-msgstr "Impossible de spécifier l'ID sous-réseau et l'ID port"
-
-msgid "Cannot understand JSON"
-msgstr "Impossible de comprendre JSON"
-
-#, python-format
-msgid "Cannot update read-only attribute %s"
-msgstr "Impossible de mettre à jour l'attribut en lecture seule %s"
-
-msgid "Certificate Authority public key (CA cert) file for ssl"
-msgstr ""
-"Fichier de clés publiques de l'autorité de certification (CA cert) pour SSL"
-
-msgid "Check ebtables installation"
-msgstr "Vérifier l'installation ebtables "
-
-msgid "Check for ARP responder support"
-msgstr "Vérifier le support de programme de réponse ARP"
-
-msgid "Check for OVS Geneve support"
-msgstr "Vérifier le support OVS Geneve"
-
-msgid "Check for OVS vxlan support"
-msgstr "Vérifier le support OVS vxlan"
-
-msgid "Check for VF management support"
-msgstr "Vérifier le support de gestion VF"
-
-msgid "Check for iproute2 vxlan support"
-msgstr "Vérifier le support iproute2 vxlan"
-
-msgid "Check for nova notification support"
-msgstr "Vérifier le support de notification de Nova"
-
-msgid "Check for patch port support"
-msgstr "Vérifier le support de port de correctif"
-
-msgid "Check ipset installation"
-msgstr "Vérifier l'installation ipset"
-
-msgid "Check minimal dibbler version"
-msgstr "Vérifier la version minimale de dibbler"
-
-msgid "Check minimal dnsmasq version"
-msgstr "Vérifier la version minimale de dnsmasq"
-
-msgid "Check netns permission settings"
-msgstr "Vérifier les autorisations netns"
-
-msgid "Check ovsdb native interface support"
-msgstr "Consulter le support d'interface native ovsdb"
-
-#, python-format
-msgid ""
-"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of "
-"subnet %(sub_id)s"
-msgstr ""
-"Le routage CIDR %(subnet_cidr)s du sous-réseau %(subnet_id)s chevauche le "
-"routage CIDR %(cidr)s du sous-réseau %(sub_id)s"
-
-msgid "Class not found."
-msgstr "Classe non trouvé."
-
-msgid "Client certificate for nova metadata api server."
-msgstr "Certificat client pour le serveur d'API des métadonnées nova."
-
-msgid ""
-"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE "
-"tunnel IDs that are available for tenant network allocation"
-msgstr ""
-"Liste d'uplets <tun_min>:<tun_max> séparés par des virgules énumérant des "
-"plages d'ID GRE disponibles pour l'allocation de réseau locataire"
-
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"VXLAN VNI IDs that are available for tenant network allocation"
-msgstr ""
-"Liste d'uplets <vni_min>:<vni_max> séparés par des virgules énumérant des "
-"plages d'ID VNI VXLAN disponibles pour l'allocation de réseau locataire"
-
-msgid ""
-"Comma-separated list of the DNS servers which will be used as forwarders."
-msgstr ""
-"Liste séparée par des virgules des serveurs DNS qui seront utilisés comme "
-"réexpéditeurs."
-
-msgid "Command to execute"
-msgstr "Commande à exécuter"
-
-msgid "Config file for interface driver (You may also use l3_agent.ini)"
-msgstr ""
-"Fichier de configuration du pilote d'interface (vous pouvez aussi utiliser "
-"l3_agent.ini)"
-
-#, python-format
-msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s"
-msgstr "Valeur en conflit ethertype %(ethertype)s pour le CIDR %(cidr)s"
-
-msgid ""
-"Controls whether the neutron security group API is enabled in the server. It "
-"should be false when using no security groups or using the nova security "
-"group API."
-msgstr ""
-"Indique si l'API de groupe de sécurité neutron est activée sur le serveur. "
-"Elle doit être false si aucun groupe de sécurité n'est utilisé ou en cas "
-"d'utilisation de l'API du groupe de sécurité neutron."
-
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds"
-msgstr ""
-"Echec lors de la liaison à %(host)s:%(port)s après attente de %(time)d "
-"secondes"
-
-msgid "Could not deserialize data"
-msgstr "Impossible de désérialiser des données"
-
-#, python-format
-msgid "Creation failed. %(dev_name)s already exists."
-msgstr "Echec de la création. %(dev_name)s existe déjà."
-
-#, python-format
-msgid ""
-"Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable "
-"to update."
-msgstr ""
-"L'adresse IP de la passerelle en cours %(ip_address)s est déjà en cours "
-"d'utilisation par le port %(port_id)s. Impossible de mettre à jour"
-
-msgid "Currently distributed HA routers are not supported."
-msgstr ""
-"Les routeurs haute disponibilité (HA) distribués ne sont actuellement pas "
-"pris en charge."
-
-msgid ""
-"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite "
-"lease times."
-msgstr ""
-"Durée de bail DHCP (en secondes). Utiliser -1 pour ordonner dnsmasq pour "
-"utiliser des durées de bail illimitées."
-
-msgid "Default driver to use for quota checks"
-msgstr "Pilote par défaut à utiliser pour les vérifications de quota"
-
-msgid ""
-"Default number of resource allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Nombre de ressources par défaut autorisées par le locataire. Une valeur "
-"négative signifie illimité."
-
-msgid "Default security group"
-msgstr "Groupe de sécurité par défaut"
-
-msgid "Default security group already exists."
-msgstr "Le groupe de sécurité par défaut existe déjà."
-
-msgid ""
-"Defines providers for advanced services using the format: <service_type>:"
-"<name>:<driver>[:default]"
-msgstr ""
-"Définit des fournisseurs pour les services avancés utilisant le format : "
-"<service_type>:<name>:<driver>[:default]"
-
-msgid ""
-"Delay within which agent is expected to update existing ports whent it "
-"restarts"
-msgstr ""
-"Temps d'attente durant lequel l'agent doit mettre à jour les ports existants "
-"lors de son redémarrage"
-
-msgid "Delete the namespace by removing all devices."
-msgstr "Supprimez l'espace de nom en supprimant toutes les unités."
-
-#, python-format
-msgid "Deleting port %s"
-msgstr "Suppression du port %s"
-
-msgid "Destroy IPsets even if there is an iptables reference."
-msgstr "Détruire les IPsets même s'il y a une référence iptables."
-
-msgid "Destroy all IPsets."
-msgstr "Destruction de tous les IPsets."
-
-#, python-format
-msgid "Device %(dev_name)s in mapping: %(mapping)s not unique"
-msgstr "Périphérique %(dev_name)s non unique dans le mappage '%(mapping)s'"
-
-msgid "Device has no virtual functions"
-msgstr "L'unité n'a aucune fonction virtuelle"
-
-#, python-format
-msgid "Device name %(dev_name)s is missing from physical_device_mappings"
-msgstr ""
-"Le nom de périphérique %(dev_name)s est manquant dans "
-"physical_device_mappings"
-
-msgid "Device not found"
-msgstr "Equipement non trouvé"
-
-#, python-format
-msgid ""
-"Distributed Virtual Router Mac Address for host %(host)s does not exist."
-msgstr ""
-"L'adresse MAC DVR (routeur virtuel distribué) n'existe pas pour l'hôte "
-"%(host)s."
-
-msgid "Domain to use for building the hostnames"
-msgstr "Domaine à utiliser pour générer les noms d'hôte"
-
-msgid ""
-"Domain to use for building the hostnames.This option is deprecated. It has "
-"been moved to neutron.conf as dns_domain. It will removed from here in a "
-"future release"
-msgstr ""
-"Domaine à utiliser pour construire les noms d’hôtes. Cette option est "
-"devenue obsolète.  Elle a été remplacée par dns_domain dans neutron.conf. "
-"Elle sera  supprimée dans une version future."
-
-msgid "Downgrade no longer supported"
-msgstr "La rétromigration n'est plus prise en charge"
-
-#, python-format
-msgid "Driver %s is not unique across providers"
-msgstr "Le pilote %s n'est pas unique entre les fournisseurs"
-
-msgid "Driver for security groups firewall in the L2 agent"
-msgstr ""
-"Pilote pour le pare-feu de groupes de sécurité dans l'agent de niveau 2"
-
-msgid "Driver to use for scheduling network to DHCP agent"
-msgstr "Pilote à utiliser pour la planification du réseau de l'agent DHCP"
-
-msgid "Driver to use for scheduling router to a default L3 agent"
-msgstr ""
-"Pilote à utiliser pour la planification du routeur de l'agent L3 par défaut"
-
-#, python-format
-msgid "Duplicate IP address '%s'"
-msgstr "Adresse IP en double '%s'"
-
-msgid "Duplicate Metering Rule in POST."
-msgstr "Règle de mesure en double dans POST."
-
-msgid "Duplicate Security Group Rule in POST."
-msgstr ""
-"Règle de groupe de sécurité en double dans l'autotest à la mise sous tension."
-
-#, python-format
-msgid "Duplicate hostroute '%s'"
-msgstr "Route hôte en double '%s'"
-
-#, python-format
-msgid "Duplicate items in the list: '%s'"
-msgstr "Elément en double dans la liste : '%s'"
-
-#, python-format
-msgid "Duplicate nameserver '%s'"
-msgstr "Serveur de noms en double '%s'"
-
-msgid "Duplicate segment entry in request."
-msgstr "Entrée de segment en double dans la demande."
-
-#, python-format
-msgid "ERROR: %s"
-msgstr "ERREUR : %s"
-
-msgid ""
-"ERROR: Unable to find configuration file via the default search paths (~/."
-"neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!"
-msgstr ""
-"ERREUR : Impossible de trouver le fichier de configuration via les chemins "
-"de recherche par défaut (~/.neutron/, ~/, /etc/neutron/, /etc/) et l'option "
-"'--config-file' !"
-
-msgid ""
-"Either one of parameter network_id or router_id must be passed to _get_ports "
-"method."
-msgstr ""
-"Le paramètre network_id ou le paramètre router_id doit être passé à la "
-"méthode _get_ports."
-
-msgid "Either subnet_id or port_id must be specified"
-msgstr "L'ID sous-réseau ou l'ID port doit être spécifié."
-
-msgid "Empty physical network name."
-msgstr "Nom du Réseau Physique vide."
-
-msgid "Enable FWaaS"
-msgstr "Activer FWaaS"
-
-msgid "Enable HA mode for virtual routers."
-msgstr "Activer le mode haute disponibilité pour les routeurs virtuels."
-
-msgid "Enable SSL on the API server"
-msgstr "Active SSL sur le serveur API"
-
-msgid ""
-"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 "
-"plugin using linuxbridge mechanism driver"
-msgstr ""
-"Activer VXLAN sur l'agent. Il peut être activé lorsque l'agent est géré par "
-"le plug-in ml2 utilisant le pilote de mécanisme linuxbridge"
-
-msgid ""
-"Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 "
-"l2population driver. Allows the switch (when supporting an overlay) to "
-"respond to an ARP request locally without performing a costly ARP broadcast "
-"into the overlay."
-msgstr ""
-"Activez le canal répondeur ARP local s'il est pris en charge. Requiert le "
-"pilote l2population OVS 2.1 et ML2. Permet au commutateur (lors de la prise "
-"en charge d'une superposition) de répondre à une demande ARP locale sans "
-"effectuer de diffusion ARP coûteuse dans le réseau Overlay."
-
-msgid ""
-"Enable services on an agent with admin_state_up False. If this option is "
-"False, when admin_state_up of an agent is turned False, services on it will "
-"be disabled. Agents with admin_state_up False are not selected for automatic "
-"scheduling regardless of this option. But manual scheduling to such agents "
-"is available if this option is True."
-msgstr ""
-"Activer les services sur un agent ayant admin_state_up avec une valeur "
-"False. Si cette option est False, lorsque admin_state_up d'un agent se voit "
-"attribuer la valeur False, les services qui y sont associés seront "
-"automatiquement désactivés. Les agents ayant admin_state_up avec la valeur "
-"False ne sont pas sélectionnés pour la planification automatique, quelle que "
-"soit la valeur de cette option. Toutefois, il est possible de procéder à une "
-"planification manuelle pour ces agents si cette option a pour valeur True."
-
-msgid ""
-"Enable/Disable log watch by metadata proxy. It should be disabled when "
-"metadata_proxy_user/group is not allowed to read/write its log file and "
-"copytruncate logrotate option must be used if logrotate is enabled on "
-"metadata proxy log files. Option default value is deduced from "
-"metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent "
-"effective user id/name."
-msgstr ""
-"Activer/Désactiver la surveillance de journaux par proxy de métadonnées. "
-"Elle doit être désactivée lorsque metadata_proxy_user/group n'est pas "
-"autorisé à lire/écrire son fichier journal et l'option copytruncate "
-"logrotate doit être utilisée si logrotate est activée sur les fichiers "
-"journaux de proxy de métadonnées. La valeur par défaut de l'option est "
-"déduite de metadata_proxy_user : la surveillance des journaux est activée si "
-"metadata_proxy_user correspondà l'ID/au nom de l'utilisateur effectif de "
-"l'agent."
-
-msgid "Encountered an empty component."
-msgstr "Un composant vide a été trouvé."
-
-msgid "End of VLAN range is less than start of VLAN range"
-msgstr "La fin de la plage de réseaux locaux virtuels est inférieure au début"
-
-msgid "End of tunnel range is less than start of tunnel range"
-msgstr "La fin de la plage de tunnels est inférieure au début"
-
-#, python-format
-msgid "Error importing FWaaS device driver: %s"
-msgstr "Erreur d'importation du pilote de périphérique FWaaS : %s"
-
-#, python-format
-msgid "Error parsing dns address %s"
-msgstr "Erreur lors de l'analyse syntaxique de l'adresse DNS %s"
-
-#, python-format
-msgid "Error while reading %s"
-msgstr "Erreur lors de la lecture de %s"
-
-msgid "Existing prefixes must be a subset of the new prefixes"
-msgstr ""
-"Les préfixes existants doivent être un sous-réseau des nouveaux préfixes"
-
-#, python-format
-msgid ""
-"Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: "
-"%(stderr)s"
-msgstr ""
-"Code de sortie: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; "
-"Stderr: %(stderr)s"
-
-msgid ""
-"Extension to use alongside ml2 plugin's l2population mechanism driver. It "
-"enables the plugin to populate VXLAN forwarding table."
-msgstr ""
-"Extension à utiliser avec le pilote de mécanisme l2population du plug-in "
-"ml2. Elle permet au plug-in de remplir la table de réacheminement VXLAN."
-
-#, python-format
-msgid "Extension with alias %s does not exist"
-msgstr "L'extension avec l'alias %s n'existe pas"
-
-msgid "Extensions list to use"
-msgstr "Liste d'extensions à utiliser."
-
-#, python-format
-msgid "Extensions not found: %(extensions)s."
-msgstr "Extensions non trouvé: %(extensions)s  "
-
-#, python-format
-msgid "External IP %s is the same as the gateway IP"
-msgstr "L'adresse IP externe %s est identique à l'adresse IP de passerelle"
-
-#, python-format
-msgid ""
-"External network %(external_network_id)s is not reachable from subnet "
-"%(subnet_id)s.  Therefore, cannot associate Port %(port_id)s with a Floating "
-"IP."
-msgstr ""
-"Le réseau externe %(external_network_id)s n'est pas accessible à partir du "
-"sous-réseau %(subnet_id)s. Par conséquent, il est impossible d'associer le "
-"port %(port_id)s avec une adresse IP flottante."
-
-#, python-format
-msgid ""
-"External network %(net_id)s cannot be updated to be made non-external, since "
-"it has existing gateway ports"
-msgstr ""
-"Le réseau externe %(net_id)s ne peut pas être mis à jour pour devenir non "
-"externe car il a des ports de passerelle existants"
-
-#, python-format
-msgid "ExtraDhcpOpt %(id)s could not be found"
-msgstr "ExtraDhcpOpt %(id)s introuvable"
-
-msgid ""
-"FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-"
-"agent."
-msgstr ""
-"Le plug-in FWaaS est configuré côté serveur, mais FWaaS est désactivé dans "
-"l'agent L3."
-
-#, python-format
-msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found."
-msgstr ""
-"Echec de la replanification du routeur %(router_id)s : aucun agent l3 "
-"éligible trouvé."
-
-#, python-format
-msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s."
-msgstr ""
-"Echec de planification du routeur %(router_id)s vers l'agent L3 %(agent_id)s."
-
-#, python-format
-msgid ""
-"Failed to allocate a VRID in the network %(network_id)s for the router "
-"%(router_id)s after %(max_tries)s tries."
-msgstr ""
-"Echec de l'allocation d'un identificateur de routeur virtuel dans le réseau "
-"%(network_id)s pour le routeur %(router_id)s après %(max_tries)s tentatives."
-
-#, python-format
-msgid "Failed to check policy %(policy)s because %(reason)s."
-msgstr "Échec pour vérifier la politique %(policy)s car %(reason)s."
-
-#, python-format
-msgid ""
-"Failed to create port on network %(network_id)s, because fixed_ips included "
-"invalid subnet %(subnet_id)s"
-msgstr ""
-"Echec de la création de port sur le réseau %(network_id)s car les adresses "
-"IP fixes incluent le sous-réseau non valide %(subnet_id)s "
-
-#, python-format
-msgid "Failed to locate source for %s."
-msgstr "Échec pour localiser la source de %s."
-
-#, python-format
-msgid "Failed to parse request. Parameter '%s' not specified"
-msgstr "Echec de l'analyse de la demande. Paramètre '%s' non spécifié"
-
-#, python-format
-msgid "Failed to parse request. Required attribute '%s' not specified"
-msgstr ""
-"Echec de l'analyse de la demande. Attribut obligatoire '%s' non spécifié"
-
-msgid "Failed to remove supplemental groups"
-msgstr "Echec de la suppression des groupes supplémentaires"
-
-#, python-format
-msgid "Failed to set gid %s"
-msgstr "Echec de la définition du GID %s"
-
-#, python-format
-msgid "Failed to set uid %s"
-msgstr "Echec de la définition de l'UID %s"
-
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr "Echec de la configuration du port de tunnel %(type)s sur %(ip)s"
-
-msgid "Failure applying iptables rules"
-msgstr "Échec lors de la mise à jour des règles iptables"
-
-#, python-format
-msgid "Floating IP %(floatingip_id)s could not be found"
-msgstr "L'adresse IP flottante %(floatingip_id)s est introuvable"
-
-msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max"
-msgstr "Pour les protocole TCP/UDP, port_range_min doit être <= port_range_max"
-
-msgid "Force ip_lib calls to use the root helper"
-msgstr "Forcez les appels ip_lib à utiliser Root Helper"
-
-#, python-format
-msgid "Found duplicate extension: %(alias)s."
-msgstr "Extension en double trouvée : %(alias)s."
-
-#, python-format
-msgid ""
-"Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet "
-"%(subnet_cidr)s."
-msgstr ""
-"Chevauchement de pools d'allocation trouvé :%(pool_1)s %(pool_2)s pour le "
-"sous-réseau %(subnet_cidr)s."
-
-#, python-format
-msgid ""
-"Gateway cannot be updated for router %(router_id)s, since a gateway to "
-"external network %(net_id)s is required by one or more floating IPs."
-msgstr ""
-"La passerelle ne peut pas être mise à jour pour le routeur %(router_id)s, "
-"car une passerelle vers le réseau externe %(net_id)s est requise par une ou "
-"plusieurs adresses IP flottantes."
-
-#, python-format
-msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s."
-msgstr ""
-"Conflits de l'IP passerelle %(ip_address)s avec le pool d'allocation "
-"%(pool)s."
-
-msgid "Gateway is not valid on subnet"
-msgstr "La passerelle n'est pas valide sur le sous-réseau."
-
-msgid "Group (gid or name) running metadata proxy after its initialization"
-msgstr ""
-"Groupe (UID ou nom) exécutant le proxy de métadonnées après son "
-"initialisation"
-
-msgid ""
-"Group (gid or name) running metadata proxy after its initialization (if "
-"empty: agent effective group)."
-msgstr ""
-"Groupe (UID ou nom) exécutant le proxy de métadonnées après son "
-"initialisation (si vide : groupe effectif de l'agent)."
-
-msgid "Group (gid or name) running this process after its initialization"
-msgstr "Groupe (UID ou nom) exécutant ce processus après son initialisation"
-
-msgid "How many times Neutron will retry MAC generation"
-msgstr ""
-"Nombre de nouvelles tentatives de génération MAC ultérieurement effectuées "
-"par Neutron"
-
-#, python-format
-msgid ""
-"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-"
-"min) is missing."
-msgstr ""
-"Le code ICMP (port-range-max) %(value)s est fourni mais le type ICMP (port-"
-"range-min) est manquant."
-
-msgid "ID of network"
-msgstr "ID du réseau"
-
-msgid "ID of network to probe"
-msgstr "ID du réseau à sonder"
-
-msgid "ID of probe port to delete"
-msgstr "ID du port sonde à supprimer"
-
-msgid "ID of probe port to execute command"
-msgstr "ID du port sonde pour exécuter la commande"
-
-msgid "ID of the router"
-msgstr "Identifiant du routeur"
-
-#, python-format
-msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s"
-msgstr "L'adresse IP %(ip)s est déjà dans le sous-réseaux %(subnet_id)s"
-
-#, python-format
-msgid ""
-"IP address %(ip_address)s is not a valid IP for any of the subnets on the "
-"specified network."
-msgstr ""
-"L'adresse IP %(ip_address)s n'est pas une adresse IP valide pour les sous-"
-"réseaux du réseau indiqué."
-
-#, python-format
-msgid "IP address %(ip_address)s is not a valid IP for the specified subnet."
-msgstr ""
-"L'adresse IP %(ip_address)s n'est pas une adresse IP valide pour le sous-"
-"réseau indiqué."
-
-msgid "IP address used by Nova metadata server."
-msgstr "Adresse IP utilisée par le serveur de métadonnées Nova"
-
-msgid "IP allocation requires subnet_id or ip_address"
-msgstr "L'allocation d'adresse IP requiert subnet_id ou ip_address"
-
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables rules:\n"
-"%s"
-msgstr ""
-"IPTablesManager.apply n'a pas pu appliquer l'ensemble d'iptables suivant "
-"iptables :\n"
-"%s"
-
-#, python-format
-msgid ""
-"IPv6 address %(address)s can not be directly assigned to a port on subnet "
-"%(id)s since the subnet is configured for automatic addresses"
-msgstr ""
-"L'adresse IPv6 %(address)s ne peut pas être directement affectée à un port "
-"du sous-réseau %(id)s car celui-ci est configuré pour l'obtention "
-"automatique d'adresses"
-
-#, python-format
-msgid ""
-"IPv6 subnet %s configured to receive RAs from an external router cannot be "
-"added to Neutron Router."
-msgstr ""
-"Le sous-réseau IPv6 %s configuré pour recevoir les avertissements (RA) d'un "
-"routeur externe ne peut pas être ajouté au routeur Neutron."
-
-msgid ""
-"If True, effort is made to advertise MTU settings to VMs via network methods "
-"(DHCP and RA MTU options) when the network's preferred MTU is known."
-msgstr ""
-"Si True, l'effort est mis sur la publication des paramètres d'unité de "
-"transmission maximale dans les machines virtuelles via les méthodes réseau "
-"(options d'unité de transmission maximale DHCP et RA) lorsque l'unité de "
-"transmission maximale préférée du réseau est connue."
-
-msgid ""
-"If True, then allow plugins that support it to create VLAN transparent "
-"networks."
-msgstr ""
-"Si True, autorisez les plug-in qui les prennent en charge pour créer les "
-"réseaux VLAN transparents."
-
-msgid "Illegal IP version number"
-msgstr "Numéro de version IP illégal"
-
-#, python-format
-msgid "Insufficient prefix space to allocate subnet size /%s"
-msgstr ""
-"Espace préfixe insuffisant pour l'allocation de la taille de sous-réseau /%s"
-
-msgid "Insufficient rights for removing default security group."
-msgstr "Droits insuffisants pour retirer le groupe de sécurité par défaut."
-
-msgid "Interface to monitor"
-msgstr "Interface à surveiller"
-
-msgid ""
-"Interval between checks of child process liveness (seconds), use 0 to disable"
-msgstr ""
-"Intervalle entre les vérifications de l'activité du processus enfant (en "
-"secondes). Utilisez 0 pour désactiver"
-
-msgid "Interval between two metering measures"
-msgstr "Intervalle entre deux mesures"
-
-msgid "Interval between two metering reports"
-msgstr "Intervalle entre deux rapports de mesures"
-
-#, python-format
-msgid ""
-"Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address "
-"format, which requires the prefix to be /64."
-msgstr ""
-"CIDR %s non valide pour le mode d'adressage IPv6. OpenStack utilise le "
-"format d'adresse EUI-64 qui exige le préfixe /64."
-
-#, python-format
-msgid "Invalid Device %(dev_name)s: %(reason)s"
-msgstr "Périphérique non valide %(dev_name)s : %(reason)s"
-
-#, python-format
-msgid ""
-"Invalid authentication type: %(auth_type)s, valid types are: "
-"%(valid_auth_types)s"
-msgstr ""
-"Type d'authentification non valide : %(auth_type)s, les types valides sont : "
-"%(valid_auth_types)s"
-
-#, python-format
-msgid "Invalid content type %(content_type)s."
-msgstr "Le type de contenu %(content_type)s est invalide."
-
-#, python-format
-msgid "Invalid data format for IP pool: '%s'"
-msgstr "Format de données non valide pour le pool IP : '%s'"
-
-#, python-format
-msgid "Invalid data format for extra-dhcp-opt: %(data)s"
-msgstr "Format de données non valide pour extra-dhcp-opt : %(data)s"
-
-#, python-format
-msgid "Invalid data format for fixed IP: '%s'"
-msgstr "Format de données non valide pour l'adresse IP fixe : '%s'"
-
-#, python-format
-msgid "Invalid data format for hostroute: '%s'"
-msgstr "Format de données non valide pour la route hôte : '%s'"
-
-#, python-format
-msgid "Invalid data format for nameserver: '%s'"
-msgstr "Format de données non valide pour le serveur de noms : '%s'"
-
-#, python-format
-msgid "Invalid extension environment: %(reason)s."
-msgstr "Environnement d'extensions non valide : %(reason)s."
-
-#, python-format
-msgid "Invalid format for routes: %(routes)s, %(reason)s"
-msgstr "Format de routes non valide : %(routes)s, %(reason)s"
-
-#, python-format
-msgid "Invalid format: %s"
-msgstr "Format non valide : %s"
-
-#, python-format
-msgid "Invalid input for %(attr)s. Reason: %(reason)s."
-msgstr "Entrée non valide pour %(attr)s. Cause : %(reason)s."
-
-#, python-format
-msgid "Invalid input for operation: %(error_message)s."
-msgstr "Entrée non valide pour l'opération : %(error_message)s."
-
-#, python-format
-msgid ""
-"Invalid input. '%(target_dict)s' must be a dictionary with keys: "
-"%(expected_keys)s"
-msgstr ""
-"Entrée non valide. '%(target_dict)s' doit être un dictionnaire avec les "
-"clés : %(expected_keys)s"
-
-#, python-format
-msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s"
-msgstr ""
-"Etat d'instance non valide : %(state)s, les états valides sont : "
-"%(valid_states)s"
-
-#, python-format
-msgid "Invalid mapping: '%s'"
-msgstr "Mappage non valide : '%s'"
-
-#, python-format
-msgid "Invalid network VXLAN port range: '%(vxlan_range)s'."
-msgstr "Réseau non valide pour le range port VXLAN: '%(vxlan_range)s'."
-
-#, python-format
-msgid "Invalid pci slot %(pci_slot)s"
-msgstr "Port pci invalide %(pci_slot)s"
-
-#, python-format
-msgid "Invalid provider format. Last part should be 'default' or empty: %s"
-msgstr ""
-"Format de fournisseur non valide. La dernière partie doit être 'default' ou "
-"vide : %s"
-
-#, python-format
-msgid "Invalid resource type %(resource_type)s"
-msgstr "Ressource type %(resource_type)s non valide"
-
-#, python-format
-msgid "Invalid route: %s"
-msgstr "Chemin non valide : %s"
-
-msgid "Invalid service provider format"
-msgstr "Format de fournisseur de service non valide"
-
-#, python-format
-msgid ""
-"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255."
-msgstr ""
-"Valeur non valide pour ICMP %(field)s (%(attr)s) %(value)s. Elle doit être "
-"comprise entre 0 et 255."
-
-#, python-format
-msgid "Invalid value for port %(port)s"
-msgstr "Valeur non valide pour le port %(port)s"
-
-msgid "Keepalived didn't respawn"
-msgstr "Keepalived n'a pas été relancée"
-
-#, python-format
-msgid "Key %(key)s in mapping: '%(mapping)s' not unique"
-msgstr "Clé %(key)s non unique dans le mappage '%(mapping)s'"
-
-#, python-format
-msgid "Limit must be an integer 0 or greater and not '%d'"
-msgstr "La limite doit être un entier supérieur ou égal à 0, et non '%d'"
-
-msgid "Limit number of leases to prevent a denial-of-service."
-msgstr "Limiter le nombre de baux pour éviter un déni de service."
-
-msgid ""
-"List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> "
-"specifying physical_network names usable for VLAN provider and tenant "
-"networks, as well as ranges of VLAN tags on each available for allocation to "
-"tenant networks."
-msgstr ""
-"Liste de <physical_network>:<vlan_min>:<vlan_max> ou <physical_network> "
-"indiquant des noms physical_network utilisables pour les réseaux de "
-"fournisseurs VLAN et de locataires, ainsi que les plages d'étiquettes VLAN "
-"disponibles dans le cadre de l'allocation aux réseaux locataires."
-
-msgid ""
-"List of network type driver entrypoints to be loaded from the neutron.ml2."
-"type_drivers namespace."
-msgstr ""
-"Liste des points d'entrées du pilote de type de réseau à charger à partir de "
-"l'espace de nom neutron.ml2.type_drivers."
-
-msgid "Local IP address of the VXLAN endpoints."
-msgstr "Adresse IP locale des points de terminaison VXLAN."
-
-msgid "Local IP address of tunnel endpoint."
-msgstr "Adresse IP locale de noeud final de tunnel."
-
-msgid "Location for Metadata Proxy UNIX domain socket."
-msgstr "Emplacement du socket de domaine UNIX du proxy de métadonnées."
-
-msgid "Location of Metadata Proxy UNIX domain socket"
-msgstr "Emplacement du socket de domaine UNIX du proxy de métadonnées"
-
-msgid "Location of pid file of this process."
-msgstr "Emplacement du fichier pid de ce processus."
-
-msgid "Location to store DHCP server config files"
-msgstr "Emplacement de stockage des fichiers de configuration du serveur DHCP"
-
-msgid "Location to store IPv6 PD files."
-msgstr "Emplacement pour stocker les fichiers IPv6 PD"
-
-msgid "Location to store IPv6 RA config files"
-msgstr "Emplacement de stockage des fichiers de configuration IPv6 RA"
-
-msgid "Location to store child pid files"
-msgstr "Emplacement de stockage des fichiers de PID enfant"
-
-msgid "Location to store keepalived/conntrackd config files"
-msgstr ""
-"Emplacement de stockage des fichiers de configuration keepalived/conntrackd"
-
-msgid "MTU setting for device."
-msgstr "Paramètre MTU de l'unité."
-
-msgid "MTU size of veth interfaces"
-msgstr "Taille de MTU des interfaces veth"
-
-msgid "Make the l2 agent run in DVR mode."
-msgstr "Exécuter l'agent l2 dans le mode DVR."
-
-msgid "Malformed request body"
-msgstr "Format de corps de demande incorrect"
-
-#, python-format
-msgid "Malformed request body: %(reason)s."
-msgstr "Format de corps de demande incorrect : %(reason)s"
-
-msgid "Maximum number of allowed address pairs"
-msgstr "Nombre maximal de paires d'adresses autorisé"
-
-msgid "Maximum number of host routes per subnet"
-msgstr "Nombre maximal de routes hôte par sous-réseau"
-
-msgid "Metering driver"
-msgstr "Pilote de décompte"
-
-#, python-format
-msgid "Metering label %(label_id)s does not exist"
-msgstr "L'étiquette de mesure %(label_id)s n'existe pas"
-
-#, python-format
-msgid "Metering label rule %(rule_id)s does not exist"
-msgstr "La règle d'étiquette de mesure %(rule_id)s n'existe pas"
-
-#, python-format
-msgid ""
-"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps "
-"another"
-msgstr ""
-"La règle d'étiquette de mesure avec remote_ip_prefix %(remote_ip_prefix)s "
-"chevauche un(e) autre"
-
-msgid "Minimize polling by monitoring ovsdb for interface changes."
-msgstr ""
-"Réduire au minimum l'interrogation en surveillant les changements "
-"d'interface de l'ovsdb."
-
-#, python-format
-msgid "Missing key in mapping: '%s'"
-msgstr "Clé manquante dans le mappage : '%s'"
-
-#, python-format
-msgid "Missing value in mapping: '%s'"
-msgstr "Valeur manquante dans le mappage : '%s'"
-
-msgid "More than one external network exists."
-msgstr "Plusieurs réseaux externes existent."
-
-#, python-format
-msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found"
-msgstr ""
-"Plusieurs agents trouvés avec agent_type=%(agent_type)s et host=%(host)s"
-
-#, python-format
-msgid "Multiple default providers for service %s"
-msgstr "Fournisseurs multiples par défaut pour le service %s"
-
-#, python-format
-msgid "Multiple plugins for service %s were configured"
-msgstr "Plusieurs plug-in pour le service %s ont été configurés."
-
-#, python-format
-msgid "Multiple providers specified for service %s"
-msgstr "Fournisseurs multiples indiqués pour le service %s"
-
-msgid "Multiple tenant_ids in bulk security group rule create not allowed"
-msgstr ""
-"L'existence de plusieurs ID titulaire n'est pas autorisée lors de la "
-"création du règle de groupe de sécurité en bloc."
-
-msgid "Must also specifiy protocol if port range is given."
-msgstr ""
-"Le protocole doit également être spécifié si une plage de ports est indiquée."
-
-msgid "Must specify one or more actions on flow addition or modification"
-msgstr ""
-"Doit indiquer une ou plusieurs actions sur l'ajout ou la modification de flux"
-
-#, python-format
-msgid ""
-"Name '%s' must be 1-63 characters long, each of which can only be "
-"alphanumeric or a hyphen."
-msgstr ""
-"Le nom '%s' doit comprendre entre 1 et 63 caractères (seuls les caractères "
-"alphanumériques et le trait d'union sont admis)."
-
-#, python-format
-msgid "Name '%s' must not start or end with a hyphen."
-msgstr "Le nom '%s' ne doit pas commencer ni se terminer par un trait d'union."
-
-msgid "Name of Open vSwitch bridge to use"
-msgstr "Nom du pont Open vSwitch à utiliser"
-
-msgid ""
-"Name of nova region to use. Useful if keystone manages more than one region."
-msgstr ""
-"Nom de la région nova à utiliser. Utile si keystone gère plusieurs régions."
-
-msgid "Name of the FWaaS Driver"
-msgstr "Nom du pilote FWaaS"
-
-msgid "Namespace of the router"
-msgstr "Espace de nom du routeur"
-
-msgid "Native pagination depend on native sorting"
-msgstr "La mise en page native dépend du tri natif"
-
-msgid "Negative delta (downgrade) not supported"
-msgstr "Delta négatif (rétromigration) non pris en charge"
-
-msgid "Negative relative revision (downgrade) not supported"
-msgstr "Révision relative négative (rétromigration) non prise en charge"
-
-#, python-format
-msgid "Network %(net_id)s could not be found."
-msgstr "Le réseau %(net_id)s est introuvable."
-
-#, python-format
-msgid "Network %s is not a valid external network"
-msgstr "Le réseau %s n'est pas un réseau externe valide."
-
-#, python-format
-msgid "Network %s is not an external network"
-msgstr "Réseau %s n'est pas un réseau externe"
-
-#, python-format
-msgid ""
-"Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges "
-"%(excluded_ranges)s was not found."
-msgstr ""
-"Le réseau de taille %(size)s, de plage IP %(parent_range)s (hors plages IP "
-"%(excluded_ranges)s) est introuvable."
-
-msgid "Network that will have instance metadata proxied."
-msgstr "Réseau dont les métadonnées d'instance seront mandatées."
-
-#, python-format
-msgid "Network type value '%s' not supported"
-msgstr "Valeur de type de réseau '%s' non prise en charge"
-
-msgid "Network type value needed by the ML2 plugin"
-msgstr "Valeur de type de réseau requise par le plug-in ML2"
-
-msgid "Network types supported by the agent (gre and/or vxlan)."
-msgstr "Types de réseau pris en charge par l'agent (gre et/ou vxlan)."
-
-msgid "Neutron Service Type Management"
-msgstr "Gestion du type de service Neutron"
-
-msgid "Neutron core_plugin not configured!"
-msgstr "Neutron core_plugin n'est pas configuré!"
-
-msgid "Neutron plugin provider module"
-msgstr "Module du fournisseur de plug-in Neutron"
-
-msgid "Neutron quota driver class"
-msgstr "Classe de pilote du quota Neutron"
-
-#, python-format
-msgid "No eligible l3 agent associated with external network %s found"
-msgstr "Aucun agent l3 admissible trouvé associé au réseau %s"
-
-#, python-format
-msgid "No more IP addresses available on network %(net_id)s."
-msgstr "Pas d'autres adresses IP disponibles sur le réseau %(net_id)s."
-
-#, python-format
-msgid ""
-"No more Virtual Router Identifier (VRID) available when creating router "
-"%(router_id)s. The limit of number of HA Routers per tenant is 254."
-msgstr ""
-"Plus d'identificateur de routeur virtuel disponible lors de la création du "
-"routeur %(router_id)s. Le nombre maximum de routeurs haute disponibilité par "
-"locataire est 254."
-
-#, python-format
-msgid "No providers specified for '%s' service, exiting"
-msgstr "Aucun fournisseur indiqué pour le service de '%s', sortie"
-
-#, python-format
-msgid ""
-"Not allowed to manually assign a %(router_type)s router %(router_id)s from "
-"an existing DVR node to another L3 agent %(agent_id)s."
-msgstr ""
-"Vous ne pouvez pas affecter manuellement le routeur %(router_type)s "
-"%(router_id)s du noeud DVR existant vers l'agent L3 %(agent_id)s."
-
-msgid "Not authorized."
-msgstr "Non autorisé."
-
-#, python-format
-msgid ""
-"Not enough l3 agents available to ensure HA. Minimum required "
-"%(min_agents)s, available %(num_agents)s."
-msgstr ""
-"Nombre d'agents L3 insuffisant pour assurer la haute disponibilité. Nombre "
-"minimum requis : %(min_agents)s, nombre disponible : %(num_agents)s."
-
-msgid "Number of RPC worker processes for service"
-msgstr "Nombre de processus RPC pour le service"
-
-msgid "Number of backlog requests to configure the metadata server socket with"
-msgstr ""
-"Nombre de demandes en attente avec lequel configurer le socket du serveur de "
-"métadonnées"
-
-msgid "Number of backlog requests to configure the socket with"
-msgstr "Nombre de demandes en attente avec lequel configurer le socket"
-
-msgid ""
-"Number of floating IPs allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Nombre d'adresses IP flottantes autorisées par locataire. Une valeur "
-"négative signifie illimité."
-
-msgid ""
-"Number of networks allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Nombre de réseaux autorisés par le locataire. Une valeur négative signifie "
-"illimité"
-
-msgid "Number of ports allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Nombre de ports autorisés par le locataire. Une valeur négative signifie "
-"illimité"
-
-msgid "Number of routers allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Nombre de routeurs autorisés par locataire. Une valeur négative signifie "
-"illimité"
-
-msgid ""
-"Number of seconds between sending events to nova if there are any events to "
-"send."
-msgstr ""
-"Nombre de secondes entre deux envois d'événements à nova s'il y a des "
-"événements à envoyer."
-
-msgid "Number of seconds to keep retrying to listen"
-msgstr "Nombre de secondes a attendre avant d'essayer d'écouter à nouveau"
-
-msgid ""
-"Number of security groups allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Nombre de groupes de sécurité autorisés par locataire. Une valeur négative "
-"signifie illimité."
-
-msgid ""
-"Number of security rules allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Nombre de règles de sécurité autorisées par locataire. Une valeur négative "
-"signifie illimité."
-
-msgid "Number of subnets allowed per tenant, A negative value means unlimited."
-msgstr ""
-"Nombre de sous-réseaux autorisés par le locataire. Une valeur négative "
-"signifie illimité"
-
-msgid "OK"
-msgstr "OK"
-
-#, python-format
-msgid "OVSDB Error: %s"
-msgstr "Erreur OVSDB: %s "
-
-#, python-format
-msgid "Object %(id)s not found."
-msgstr "Objet %(id)s non trouvé."
-
-#, python-format
-msgid "Object action %(action)s failed because: %(reason)s."
-msgstr "L'action de l'objet %(action)s a échoué car : %(reason)s"
-
-msgid "Only admin can view or configure quota"
-msgstr "Seul l'administrateur peut afficher ou configurer des quotas"
-
-msgid "Only admin is authorized to access quotas for another tenant"
-msgstr ""
-"Seul l'administrateur est autorisé à accéder aux quotas d'un autre locataire"
-
-msgid "Only allowed to update rules for one security profile at a time"
-msgstr ""
-"Les règles peuvent être mises à jour pour un seul profil de sécurité à la "
-"fois."
-
-msgid "Only remote_ip_prefix or remote_group_id may be provided."
-msgstr "Seul remote_ip_prefix ou remote_group_id peut être fourni."
-
-#, python-format
-msgid ""
-"Operation %(op)s is not supported for device_owner %(device_owner)s on port "
-"%(port_id)s."
-msgstr ""
-"L'opération %(op)s n'est pas prise en charge pour device_owner "
-"%(device_owner)s sur le port %(port_id)s."
-
-msgid "Override the default dnsmasq settings with this file"
-msgstr "Remplacez les paramètres dnsmasq par défaut par ce fichier."
-
-msgid "Owner type of the device: network/compute"
-msgstr "Type de propriétaire de l'unité : réseau/ordinateur"
-
-msgid "POST requests are not supported on this resource."
-msgstr "Les requêtes POST ne sont pas prises en charge sur cette ressource."
-
-#, python-format
-msgid "Package %s not installed"
-msgstr "Le package %s n'est pas installé"
-
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr "Echec de l'analyse syntaxique bridge_mappings : %s."
-
-msgid "Parsing supported pci_vendor_devs failed"
-msgstr "Echec de l'analyse syntaxique des pci_vendor_devs pris en charge"
-
-msgid "Path to PID file for this process"
-msgstr "Chemin d'accès au fichier PID pour ce processus"
-
-msgid "Path to the router directory"
-msgstr "Chemin d'accès au répertoire du routeur"
-
-msgid "Peer patch port in integration bridge for tunnel bridge."
-msgstr ""
-"Port correctif homologue dans le pont d'intégration pour le pont de tunnel."
-
-msgid "Peer patch port in tunnel bridge for integration bridge."
-msgstr ""
-"Port correctif homologue dans le pont d'intégration tunnel pour le pont "
-"d'intégration."
-
-msgid "Ping timeout"
-msgstr "Délai d'expiration de la commande ping"
-
-#, python-format
-msgid "Plugin '%s' not found."
-msgstr "Le plugin '%s' n'est pas trouvé."
-
-msgid "Plugin does not support updating provider attributes"
-msgstr ""
-"Le plug-in ne prend pas en charge la mise à jour des attributs de fournisseur"
-
-#, python-format
-msgid "Port %(id)s does not have fixed ip %(address)s"
-msgstr "Le port %(id)s ne dispose pas de l'adresse IP fixe %(address)s."
-
-#, python-format
-msgid "Port %(port)s does not exist on %(bridge)s!"
-msgstr "Le port %(port)s au sein du pont %(bridge)s"
-
-#, python-format
-msgid "Port %(port_id)s could not be found on network %(net_id)s."
-msgstr "Le port %(port_id)s est inexistant au sein du réseau %(net_id)s."
-
-#, python-format
-msgid "Port %(port_id)s could not be found."
-msgstr "Le port %(port_id)s est introuvable."
-
-#, python-format
-msgid ""
-"Port %(port_id)s is associated with a different tenant than Floating IP "
-"%(floatingip_id)s and therefore cannot be bound."
-msgstr ""
-"Le port %(port_id)s est associé à un titulaire différent de celui de "
-"l'adresse IP flottante %(floatingip_id)s et ne peut donc pas être lié. "
-
-#, python-format
-msgid "Port %s does not exist"
-msgstr "Le port %s est inexistant."
-
-msgid ""
-"Port Security must be enabled in order to have allowed address pairs on a "
-"port."
-msgstr ""
-"La sécurité du port doit être activée pour avoir les paires d'adresse "
-"autorisées sur un port."
-
-msgid "Port does not have port security binding."
-msgstr "Le port ne comporte pas de liaison de sécurité."
-
-msgid ""
-"Port has security group associated. Cannot disable port security or ip "
-"address until security group is removed"
-msgstr ""
-"Un groupe de sécurité est associé au port. Impossible de désactiver la "
-"sécurité ou l'adresse IP du port tant que le groupe de sécurité est supprimé"
-
-msgid ""
-"Port security must be enabled and port must have an IP address in order to "
-"use security groups."
-msgstr ""
-"La sécurité du port doit être activée et le port doit avoir une adresse IP "
-"pour utiliser les groupes de sécurité."
-
-msgid "Private key of client certificate."
-msgstr "Clé privée pour le certificat client."
-
-#, python-format
-msgid "Probe %s deleted"
-msgstr "Sonde %s supprimée"
-
-#, python-format
-msgid "Probe created : %s "
-msgstr "Sonde créée : %s "
-
-msgid "Process is already started"
-msgstr "Le processus est déjà démarré"
-
-msgid "Process is not running."
-msgstr "Le processus n'est pas en fonctionnement."
-
-msgid "Protocol to access nova metadata, http or https"
-msgstr "Protocole d'accès aux métadonnées de nova, HTTP ou https"
-
-#, python-format
-msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s."
-msgstr ""
-"Stratégie QoS %(policy_id)s est utilisée par %(object_type)s %(object_id)s."
-
-#, python-format
-msgid "QoS policy %(policy_id)s could not be found."
-msgstr "La politique de QoS %(policy_id)s est introuvable."
-
-#, python-format
-msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found."
-msgstr ""
-"La règle QoS %(rule_id)s pour la politique %(policy_id)s est inexistante."
-
-#, python-format
-msgid "Quota exceeded for resources: %(overs)s."
-msgstr "Quota dépassé pour les ressources : %(overs)s"
-
-msgid ""
-"Range of seconds to randomly delay when starting the periodic task scheduler "
-"to reduce stampeding. (Disable by setting to 0)"
-msgstr ""
-"Intervalle en secondes de retard au hasard lors du démarrage du "
-"planificateur de tâches périodiques de manière à réduire les encombrements "
-"(définissez ce chiffre sur 0 pour désactiver cette fonction)."
-
-msgid "Remote metadata server experienced an internal server error."
-msgstr ""
-"Le serveur de métadonnées distant a subi une erreur de serveur interne."
-
-msgid ""
-"Representing the resource type whose load is being reported by the agent. "
-"This can be \"networks\", \"subnets\" or \"ports\". When specified (Default "
-"is networks), the server will extract particular load sent as part of its "
-"agent configuration object from the agent report state, which is the number "
-"of resources being consumed, at every report_interval.dhcp_load_type can be "
-"used in combination with network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is "
-"WeightScheduler, dhcp_load_type can be configured to represent the choice "
-"for the resource being balanced. Example: dhcp_load_type=networks"
-msgstr ""
-"Représentation du type de ressource dont la charge est signalée par l'agent. "
-"Il peut s'agir de \"réseaux\", \"sous-réseaux\" ou \"ports\". Lorsqu'il est "
-"spécifié (la valeur par défaut est réseaux), le serveur extrait la charge "
-"particulière envoyée en tant que composant de son objet de configuration "
-"d'agent depuis l'état de rapport d'agent, qui correspond au nombre de "
-"ressources consommées, à chaque intervalle report_interval.dhcp_load_type, "
-"et pouvant être utilisées en combinaison avec network_scheduler_driver = "
-"neutron.scheduler.dhcp_agent_scheduler.WeightScheduler Lorsque "
-"network_scheduler_driver est WeightScheduler, dhcp_load_type peut être "
-"configuré pour représenter le choix pour la ressource équilibrée. Exemple : "
-"dhcp_load_type=networks"
-
-msgid "Request Failed: internal server error while processing your request."
-msgstr ""
-"Echec de la demande : erreur de serveur interne lors du traitement de votre "
-"demande."
-
-#, python-format
-msgid ""
-"Request contains duplicate address pair: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-msgstr ""
-"La demande contient la paire d'adresse en double : mac_address "
-"%(mac_address)s ip_address %(ip_address)s."
-
-#, python-format
-msgid ""
-"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps "
-"with another subnet"
-msgstr ""
-"Le sous-réseau demandé avec le routage CIDR : %(cidr)s pour le réseau : "
-"%(network_id)s chevauche un autre sous-réseau"
-
-#, python-format
-msgid "Resource %(resource_id)s of type %(resource_type)s not found"
-msgstr "Ressource %(resource_id)s de type %(resource_type)s non trouvée."
-
-#, python-format
-msgid ""
-"Resource '%(resource_id)s' is already associated with provider "
-"'%(provider)s' for service type '%(service_type)s'"
-msgstr ""
-"La ressource '%(resource_id)s' est déjà associée au fournisseur "
-"'%(provider)s' pour le type de service '%(service_type)s'"
-
-msgid "Resource body required"
-msgstr "Corps de ressource obligatoire"
-
-msgid "Resource not found."
-msgstr "Ressource non trouvé."
-
-msgid "Resources required"
-msgstr "Ressources obligatoires"
-
-msgid "Root helper daemon application to use when possible."
-msgstr ""
-"Application de démon d'assistant racine à utiliser en cas de possibilité."
-
-msgid "Root permissions are required to drop privileges."
-msgstr "Les droits root sont obligatoires pour supprimer des privilèges."
-
-#, python-format
-msgid "Router %(router_id)s %(reason)s"
-msgstr "Routeur %(router_id)s %(reason)s"
-
-#, python-format
-msgid "Router %(router_id)s could not be found"
-msgstr "Le routeur %(router_id)s est introuvable."
-
-#, python-format
-msgid "Router %(router_id)s does not have an interface with id %(port_id)s"
-msgstr ""
-"Le routeur %(router_id)s ne comporte pas d'interface avec l'ID %(port_id)s."
-
-#, python-format
-msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s"
-msgstr ""
-"Le routeur %(router_id)s ne comporte pas d'interface sur le sous-réseau "
-"%(subnet_id)s."
-
-#, python-format
-msgid "Router already has a port on subnet %s"
-msgstr "Le routeur dispose déjà d'un port sur le sous-réseau %s."
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more floating IPs."
-msgstr ""
-"L'interface de routeur du sous-réseau %(subnet_id)s sur le routeur "
-"%(router_id)s être supprimée car elle est requise par une ou plusieurs "
-"adresses IP flottantes."
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more routes."
-msgstr ""
-"L'interface de routeur du sous-réseau %(subnet_id)s sur le routeur "
-"%(router_id)s être supprimée car elle est requise par une ou plusieurs "
-"routes."
-
-msgid "Router that will have connected instances' metadata proxied."
-msgstr "Routeur dont les métadonnées d'instance connectées seront mandatées."
-
-msgid "Run as daemon."
-msgstr "Exécuter en tant que démon."
-
-msgid ""
-"Seconds between nodes reporting state to server; should be less than "
-"agent_down_time, best if it is half or less than agent_down_time."
-msgstr ""
-"Secondes entre les noeuds signalant l'état au serveur ; cette valeur doit "
-"être inférieure à agent_down_time, et au mieux, inférieure ou égale à la "
-"moitié de agent_down_time."
-
-msgid "Seconds between running periodic tasks"
-msgstr "Temps en secondes entre deux tâches périodiques"
-
-msgid ""
-"Seconds to regard the agent is down; should be at least twice "
-"report_interval, to be sure the agent is down for good."
-msgstr ""
-"Nombre de secondes avant de considérer que l'agent est arrêté ; cette valeur "
-"doit être au moins le double de report_interval, pour s'assurer que l'agent "
-"est effectivement arrêté."
-
-#, python-format
-msgid "Security group %(id)s does not exist"
-msgstr "Le groupe de sécurité %(id)s n'existe pas."
-
-#, python-format
-msgid "Security group rule %(id)s does not exist"
-msgstr "La règle de groupe de sécurité %(id)s n'existe pas."
-
-#, python-format
-msgid "Security group rule already exists. Rule id is %(id)s."
-msgstr "La règle de groupe de sécurité existe déjà. L'ID règle est %(id)s."
-
-msgid "Segments and provider values cannot both be set."
-msgstr ""
-"Il n'est pas possible de définir à la fois des segments et des valeurs de "
-"fournisseur."
-
-msgid ""
-"Send notification to nova when port data (fixed_ips/floatingip) changes so "
-"nova can update its cache."
-msgstr ""
-"Envoyer une notification à nova lors de la modification des données de port "
-"(fixed_ips/floatingip) pour que nova puisse mettre à jour son cache."
-
-msgid "Send notification to nova when port status changes"
-msgstr ""
-"Envoyer une notification à nova lors de la modification du statut de port"
-
-msgid ""
-"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the "
-"feature is disabled"
-msgstr ""
-"Envoyez ces nombreux protocoles de résolution d'adresse gratuits pour la "
-"configuration haute disponibilité (HA), si la valeur est inférieure ou égale "
-"à 0, la fonction est désactivée"
-
-#, python-format
-msgid ""
-"Service provider '%(provider)s' could not be found for service type "
-"%(service_type)s"
-msgstr ""
-"Fournisseur de services '%(provider)s' introuvable pour le type de service "
-"%(service_type)s"
-
-#, python-format
-msgid "Service type %(service_type)s does not have a default service provider"
-msgstr ""
-"Le type de service %(service_type)s ne possède pas de fournisseur de "
-"services par défaut"
-
-msgid ""
-"Set new timeout in seconds for new rpc calls after agent receives SIGTERM. "
-"If value is set to 0, rpc timeout won't be changed"
-msgstr ""
-"Redéfinir le délai d'attente (en secondes) des nouveaux appels RPC observé "
-"une fois que l'agent a reçu SIGTERM. Si la valeur est définie sur 0, le "
-"délai d'attente RPC reste inchangé"
-
-msgid ""
-"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/"
-"VXLAN tunnel."
-msgstr ""
-"Définissez ou annulez la définition du bit de fragment sur le paquet IP "
-"sortant véhiculant le tunnel GRE/VXLAN."
-
-#, python-format
-msgid ""
-"Some tenants have more than one security group named 'default': "
-"%(duplicates)s. All duplicate 'default' security groups must be resolved "
-"before upgrading the database."
-msgstr ""
-"Certains locataires possèdent plusieurs groupes de sécurité nommés "
-"'default': %(duplicates)s. Tous les groupes de sécurité 'default' en double "
-"doivent être résolus avant la mise à niveau de la base de données."
-
-msgid ""
-"Specifying 'tenant_id' other than authenticated tenant in request requires "
-"admin privileges"
-msgstr ""
-"Pour indiquer un 'tenant_id' autre qu'un titulaire authentifié dans la "
-"demande, vous devez disposer de droits admin "
-
-#, python-format
-msgid "Sub-project %s not installed."
-msgstr "Le sous projet %s n'est pas installé."
-
-#, python-format
-msgid "Subnet %(subnet_id)s could not be found."
-msgstr "Le sous-réseau %(subnet_id)s est introuvable."
-
-msgid "Subnet for router interface must have a gateway IP"
-msgstr ""
-"Le sous-réseau de l'interface de routeur doit avoir une adresse IP "
-"passerelle."
-
-#, python-format
-msgid "Subnet pool %(subnetpool_id)s could not be found."
-msgstr "Le pool de sous-réseaux %(subnetpool_id)s est introuvable."
-
-msgid "Subnet pool has existing allocations"
-msgstr "Le pool de sous-réseau dispose d'allocations existantes"
-
-msgid "Subnet used for the l3 HA admin network."
-msgstr ""
-"Sous-réseau utilisé pour le réseau administrateur haute disponibilité L3."
-
-msgid ""
-"System-wide flag to determine the type of router that tenants can create. "
-"Only admin can override."
-msgstr ""
-"Indicateur système pour déterminer le type de router que les locataires "
-"peuvent créer. Seul l'administrateur peut outrepasser cela"
-
-msgid "TCP Port to listen for metadata server requests."
-msgstr "Port TCP d'écoute des demandes du serveur de métadonnées"
-
-msgid "TCP Port used by Neutron metadata namespace proxy."
-msgstr "Port TCP utilisé par le proxy d'espace de nom de métadonnées Neutron"
-
-msgid "TCP Port used by Nova metadata server."
-msgstr "Port TCP utilisé par le serveur de métadonnées Nova"
-
-#, python-format
-msgid "TLD '%s' must not be all numeric"
-msgstr "TLD '%s' ne doit pas être entièrement numérique"
-
-msgid "TOS for vxlan interface protocol packets."
-msgstr "TOS pour les paquets du protocole d'interface vxlan."
-
-msgid "TTL for vxlan interface protocol packets."
-msgstr "Durée de vie pour les paquets du protocole d'interface vxlan."
-
-#, python-format
-msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network"
-msgstr ""
-"Titulaire %(tenant_id)s non autorisé à créer %(resource)s sur ce réseau"
-
-msgid "Tenant network creation is not enabled."
-msgstr "La création de réseau titulaire n'est pas activée."
-
-msgid "Tenant-id was missing from quota request."
-msgstr "ID titulaire manquant dans la demande de quota."
-
-msgid ""
-"The 'gateway_external_network_id' option must be configured for this agent "
-"as Neutron has more than one external network."
-msgstr ""
-"L'option 'gateway_external_network_id' doit être configuré pour cet agent "
-"car Neutron a plus d'un réseau externe."
-
-#, python-format
-msgid ""
-"The HA Network CIDR specified in the configuration file isn't valid; "
-"%(cidr)s."
-msgstr ""
-"Le routage CIDR du réseau haute disponibilité indiqué dans le fichier de "
-"configuration n'est pas valide ; %(cidr)s."
-
-msgid "The UDP port to use for VXLAN tunnels."
-msgstr "Port UDP a utiliser pour les tunnels VXLAN."
-
-msgid "The advertisement interval in seconds"
-msgstr "Intervalle de publication en secondes"
-
-#, python-format
-msgid "The allocation pool %(pool)s is not valid."
-msgstr "Le pool d'allocation %(pool)s n'est pas valide."
-
-#, python-format
-msgid ""
-"The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s."
-msgstr ""
-"Le pool d'allocation %(pool)s s'étend au-delà du routage CIDR de sous-réseau "
-"%(subnet_cidr)s."
-
-#, python-format
-msgid ""
-"The attribute '%(attr)s' is reference to other resource, can't used by sort "
-"'%(resource)s'"
-msgstr ""
-"L'attribut '%(attr)s' fait référence à une autre ressource, impossible de "
-"l'utiliser pour le type '%(resource)s'"
-
-msgid "The core plugin Neutron will use"
-msgstr "Le core plugin de Neutron va etre utiliser"
-
-msgid "The driver used to manage the DHCP server."
-msgstr "Pilote utilisé pour gérer le serveur DHCP"
-
-msgid "The driver used to manage the virtual interface."
-msgstr "Pilote utilisé pour gérer l'interface virtuelle"
-
-#, python-format
-msgid ""
-"The following device_id %(device_id)s is not owned by your tenant or matches "
-"another tenants router."
-msgstr ""
-"Le device_id %(device_id)s suivant n'appartient pas à votre locataire ou "
-"correspond au routeur d'un autre locataire."
-
-msgid "The host IP to bind to"
-msgstr "Protocole IP hôte à connecter"
-
-msgid "The interface for interacting with the OVSDB"
-msgstr "Interface d'interaction avec OVSDB"
-
-msgid ""
-"The maximum number of items returned in a single response, value was "
-"'infinite' or negative integer means no limit"
-msgstr ""
-"Nombre maximal d'éléments renvoyés dans une seule réponse, valeur définie "
-"sur 'infinite' ou sur un entier négatif qui signifie illimité"
-
-#, python-format
-msgid ""
-"The network %(network_id)s has been already hosted by the DHCP Agent "
-"%(agent_id)s."
-msgstr ""
-"Le réseau %(network_id)s est déjà hébergé par l'agent DHCP %(agent_id)s."
-
-#, python-format
-msgid ""
-"The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s."
-msgstr ""
-"Le réseau %(network_id)s n'est pas hébergé par l'agent DHCP %(agent_id)s."
-
-#, python-format
-msgid "The number of allowed address pair exceeds the maximum %(quota)s."
-msgstr "Le nombre de paires d'adreses autorisé dépasse le maximum %(quota)s."
-
-msgid ""
-"The number of seconds the agent will wait between polling for local device "
-"changes."
-msgstr ""
-"Temps en secondes pendant lequel l'agent attendra les interrogations sur les "
-"modifications de l'unité locale."
-
-msgid ""
-"The number of seconds to wait before respawning the ovsdb monitor after "
-"losing communication with it."
-msgstr ""
-"Le nombre de secondes d'attente avant de régénérer le moniteur ovsdb après "
-"avoir perdu la communication avec ce dernier."
-
-msgid "The number of sort_keys and sort_dirs must be same"
-msgstr ""
-"Le nombre de clés de tri (sort_keys) et de répertoires de tri (sort_dirs) "
-"doit être identique"
-
-#, python-format
-msgid "The port '%s' was deleted"
-msgstr "Le port '%s' a été supprimé"
-
-msgid "The port to bind to"
-msgstr "Port à connecter"
-
-#, python-format
-msgid "The requested content type %s is invalid."
-msgstr "Le type de contenu %s de la requete est invalide."
-
-msgid "The resource could not be found."
-msgstr "La ressource est introuvable."
-
-msgid "The resource is in use."
-msgstr "La ressource est en cours d'utilisation."
-
-#, python-format
-msgid ""
-"The router %(router_id)s has been already hosted by the L3 Agent "
-"%(agent_id)s."
-msgstr "Le routeur %(router_id)s est déjà hébergé par l'agent L3 %(agent_id)s."
-
-msgid ""
-"The server has either erred or is incapable of performing the requested "
-"operation."
-msgstr ""
-"Le serveur a perdu la connexion ou est incapable d'effectuer l'opération "
-"demandée."
-
-msgid "The service is unavailable."
-msgstr "Le service n'est pas disponible."
-
-msgid "The service plugins Neutron will use"
-msgstr "Plug-in de service utilisés ultérieurement par Neutron"
-
-msgid "The type of authentication to use"
-msgstr "Type d'authentification à utiliser"
-
-#, python-format
-msgid "The value '%(value)s' for %(element)s is not valid."
-msgstr "La valeur '%(value)s' pour %(element)s n'est pas valide."
-
-msgid ""
-"The working mode for the agent. Allowed modes are: 'legacy' - this preserves "
-"the existing behavior where the L3 agent is deployed on a centralized "
-"networking node to provide L3 services like DNAT, and SNAT. Use this mode if "
-"you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality "
-"and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - "
-"this enables centralized SNAT support in conjunction with DVR.  This mode "
-"must be used for an L3 agent running on a centralized node (or in single-"
-"host deployments, e.g. devstack)"
-msgstr ""
-"Mode de fonctionnement de l'agent. Les modes sont : 'legacy' - ceci préserve "
-"le comportement existant où l'agent de niveau 3 est déployé sur un noeud "
-"centralisé de mise en réseau pour fournir des services de niveau 3 comme "
-"DNAT et SNAT. Utilisez ce mode si vous ne voulez pas adopter le routeur "
-"virtuel distribué (DVR). 'dvr' - ce mode active la fonctionnalité DVR et "
-"doit être utilisé pour un agent de niveau 3 qui s'exécute sur un hôte de "
-"traitement. 'dvr_snat' - active la prise en charge SNAT centralisée "
-"conjointement avec DVR.  Ce mode doit être utilisé pour un agent de niveau 3 "
-"fonctionnant sur un noeud centralisé (ou dans des déploiements à un seul "
-"hôte, par ex. devstack)"
-
-msgid ""
-"True to delete all ports on all the OpenvSwitch bridges. False to delete "
-"ports created by Neutron on integration and external network bridges."
-msgstr ""
-"La valeur est vraie pour la suppression de tous les ports sur tous les ponts "
-"OpenvSwitch. Elle est fausse pour la suppression des ports créés par Neutron "
-"lors de l'intégration et des ponts de réseau externes."
-
-msgid "Tunnel IP value needed by the ML2 plugin"
-msgstr "Valeur IP de tunnel requise par le plug-in ML2"
-
-msgid "Tunnel bridge to use."
-msgstr "Pont de tunnel à utiliser."
-
-msgid "URL to database"
-msgstr "URL de la base de données"
-
-#, python-format
-msgid "Unable to access %s"
-msgstr "Impossible d'accéder à %s"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(router_id)s. The number of routes exceeds "
-"the maximum %(quota)s."
-msgstr ""
-"Impossible de terminer l'opération pour %(router_id)s. Le nombre de routes "
-"dépasse le maximum %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of DNS "
-"nameservers exceeds the limit %(quota)s."
-msgstr ""
-"Impossible de terminer l'opération pour le sous-réseau %(subnet_id)s. Le "
-"nombre de serveurs DNS dépasse la limite %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of host routes "
-"exceeds the limit %(quota)s."
-msgstr ""
-"Impossible de terminer l'opération pour le sous-réseau %(subnet_id)s. Le "
-"nombre de routes hôtes dépasse la limite %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The IP address "
-"%(ip_address)s is in use."
-msgstr ""
-"Impossible de terminer l'opération pour le réseau %(net_id)s. L'adresse IP "
-"%(ip_address)s est en cours d'utilisation."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The mac address %(mac)s "
-"is in use."
-msgstr ""
-"Impossible de terminer l'opération pour le réseau %(net_id)s. L'adresse Mac "
-"%(mac)s est en cours d'utilisation."
-
-#, python-format
-msgid ""
-"Unable to complete operation on network %(net_id)s. There are one or more "
-"ports still in use on the network."
-msgstr ""
-"Impossible de terminer l'opération sur le réseau %(net_id)s. Un ou plusieurs "
-"ports sont encore en cours d'utilisation sur le réseau."
-
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s for network %(net_id)s. "
-"Port already has an attached device %(device_id)s."
-msgstr ""
-"Impossible de terminer l'opération sur le port %(port_id)s pour le réseau "
-"%(net_id)s. Le port a déjà une unité connectée %(device_id)s."
-
-#, python-format
-msgid "Unable to complete operation on subnet %(subnet_id)s %(reason)s."
-msgstr ""
-"Impossible de finaliser l'opération sur le sous réseau %(subnet_id)s  pour "
-"la raison %(reason)s."
-
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr "Impossible de convertir la valeur en %s"
-
-msgid "Unable to create the Agent Gateway Port"
-msgstr "Impossible de créer le port de passerelle d'agent"
-
-msgid "Unable to create the SNAT Interface Port"
-msgstr "Impossible de créer l'interface du port SNAT"
-
-#, python-format
-msgid ""
-"Unable to create the flat network. Physical network %(physical_network)s is "
-"in use."
-msgstr ""
-"Impossible de créer le réseau centralisé. Le réseau physique "
-"%(physical_network)s est en cours d'utilisation "
-
-msgid ""
-"Unable to create the network. No available network found in maximum allowed "
-"attempts."
-msgstr ""
-"Impossible de créer le réseau. Aucun réseau disponible trouvé dans le "
-"maximum de tentatives autorisées."
-
-msgid ""
-"Unable to create the network. No tenant network is available for allocation."
-msgstr ""
-"Impossible de créer le réseau. Aucun réseau titulaire n'est disponible pour "
-"l'allocation. "
-
-#, python-format
-msgid ""
-"Unable to create the network. The VLAN %(vlan_id)s on physical network "
-"%(physical_network)s is in use."
-msgstr ""
-"Impossible de créer le réseau. Le réseau local virtuel %(vlan_id)s situé sur "
-"le réseau physique %(physical_network)s est en cours d'utilisation. "
-
-#, python-format
-msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use."
-msgstr ""
-"Incapable de créer le réseau. L' ID tunnel %(tunnel_id)s est en cours "
-"d'utilisation."
-
-#, python-format
-msgid "Unable to delete subnet pool: %(reason)s."
-msgstr "Impossible de supprimer le pool de sous-réseau : %(reason)s."
-
-#, python-format
-msgid "Unable to determine mac address for %s"
-msgstr "Impossible de déterminer l'adresse mac pour %s"
-
-#, python-format
-msgid "Unable to find '%s' in request body"
-msgstr "Impossible de trouver '%s' dans la corps de demande"
-
-#, python-format
-msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s"
-msgstr ""
-"Impossible de trouver l'adresse IP %(ip_address)s dans le sous réseau "
-"%(subnet_id)s"
-
-#, python-format
-msgid "Unable to find any IP address on external network %(net_id)s."
-msgstr "Impossible de trouver une adresse IP sur le réseau externe %(net_id)s."
-
-#, python-format
-msgid "Unable to find resource name in %s"
-msgstr "Impossible de trouver le nom de la ressource dans %s"
-
-msgid "Unable to generate IP address by EUI64 for IPv4 prefix"
-msgstr "Impossible de générer l'adresse IP par EUI64 pour le préfixe IPv4"
-
-#, python-format
-msgid "Unable to generate unique DVR mac for host %(host)s."
-msgstr "Impossible de générer une adresse MAC unique pour l'hôte %(host)s."
-
-#, python-format
-msgid "Unable to generate unique mac on network %(net_id)s."
-msgstr "Impossible de générer une adresse MAC unique sur le réseau %(net_id)s."
-
-#, python-format
-msgid ""
-"Unable to identify a target field from:%s. Match should be in the form "
-"%%(<field_name>)s"
-msgstr ""
-"Impossible d'identifier une zone cible à partir de : %s. La correspondance "
-"doit être au format %%(<field_name>)s"
-
-#, python-format
-msgid ""
-"Unable to verify match:%(match)s as the parent resource: %(res)s was not "
-"found"
-msgstr ""
-"Impossible de vérifier la correspondance %(match)s comme ressource parent : "
-"%(res)s n'a pas été trouvée"
-
-#, python-format
-msgid "Unexpected response code: %s"
-msgstr "Code de réponse inattendu : %s"
-
-#, python-format
-msgid "Unexpected response: %s"
-msgstr "Réponse inattendue : %s"
-
-msgid "Unimplemented commands"
-msgstr "Commandes non implémentées"
-
-msgid "Unknown API version specified"
-msgstr "Version de l'API spécifié inconnu"
-
-#, python-format
-msgid "Unknown address type %(address_type)s"
-msgstr "Type d'adresse inconnu %(address_type)s"
-
-#, python-format
-msgid "Unknown attribute '%s'."
-msgstr "Attribut inconnu '%s'."
-
-#, python-format
-msgid "Unknown chain: %r"
-msgstr "Chaîne inconnue : %r"
-
-#, python-format
-msgid "Unknown quota resources %(unknown)s."
-msgstr "Ressources de quota inconnues %(unknown)s."
-
-msgid "Unmapped error"
-msgstr "Erreur de non-correspondance"
-
-msgid "Unrecognized action"
-msgstr "Action inconnu"
-
-#, python-format
-msgid "Unrecognized attribute(s) '%s'"
-msgstr "Attribut(s) non reconnu(s) '%s'"
-
-msgid "Unsupported Content-Type"
-msgstr "Type de contenu non pris en charge"
-
-#, python-format
-msgid "Unsupported network type %(net_type)s."
-msgstr "Le type de réseau %(net_type)s n'est pas pris en charge."
-
-#, python-format
-msgid "Unsupported port state: %(port_state)s."
-msgstr "L'état du port n'est pas supporté: %(port_state)s."
-
-msgid "Unsupported request type"
-msgstr "Type de demande non pris en charge"
-
-msgid "Updating default security group not allowed."
-msgstr "Mise à jour du groupe de sécurité par défaut non autorisée"
-
-msgid ""
-"Use ML2 l2population mechanism driver to learn remote MAC and IPs and "
-"improve tunnel scalability."
-msgstr ""
-"Utilisez le pilote de mécanisme l2population ML2 pour connaître les adresses "
-"MAC et IP et pour améliorer l'évolutivité du tunnel."
-
-msgid "Use broadcast in DHCP replies"
-msgstr "Utilisez diffusion dans les réponses DHCP"
-
-msgid "Use either --delta or relative revision, not both"
-msgstr ""
-"Utiliser soit un --delta, soit une révision relative, mais pas les deux"
-
-msgid "User (uid or name) running metadata proxy after its initialization"
-msgstr ""
-"Utilisateur (UID ou nom) exécutant le proxy de métadonnées après son "
-"initialisation"
-
-msgid ""
-"User (uid or name) running metadata proxy after its initialization (if "
-"empty: agent effective user)."
-msgstr ""
-"Utilisateur (UID ou nom) exécutant le proxy de métadonnées après son "
-"initialisation (si vide : utilisateur effectif de l'agent)."
-
-msgid "User (uid or name) running this process after its initialization"
-msgstr "Utilisateur (UID ou nom) exécutant ce process après son initialisation"
-
-#, python-format
-msgid "User does not have admin privileges: %(reason)s."
-msgstr ""
-"L'utilisateur n'a pas les privilèges administrateur pour la raison: "
-"%(reason)s"
-
-msgid "VRRP authentication password"
-msgstr "Mot de passe pour l'authentification VRRP"
-
-msgid "VRRP authentication type"
-msgstr "Type d'authentification VRRP"
-
-msgid "VXLAN network unsupported."
-msgstr "Réseau VXLAN non supporté."
-
-#, python-format
-msgid ""
-"Validation of dictionary's keys failed. Expected keys: %(expected_keys)s "
-"Provided keys: %(provided_keys)s"
-msgstr ""
-"Echec de la validation des clés du dictionnaire. Clés attendues : "
-"%(expected_keys)s Clés fournies : %(provided_keys)s"
-
-#, python-format
-msgid "Validator '%s' does not exist."
-msgstr "Le validateur '%s' n'existe pas."
-
-#, python-format
-msgid "Value %(value)s in mapping: '%(mapping)s' not unique"
-msgstr "Valeur %(value)s non unique dans le mappage '%(mapping)s'"
-
-msgid ""
-"Watch file log. Log watch should be disabled when metadata_proxy_user/group "
-"has no read/write permissions on metadata proxy log file."
-msgstr ""
-"Surveillance des fichiers journaux. La surveillance des journaux doit être "
-"désactivée lorsque metadata_proxy_user/group ne dispose pas des droits de "
-"lecture/d'écriture sur le fichier journal du proxy de métadonnées."
-
-msgid ""
-"Where to store Neutron state files. This directory must be writable by the "
-"agent."
-msgstr ""
-"Où stocker des fichiers d'état de Neutron. Ce répertoire doit être "
-"accessible en écriture par l'agent."
-
-msgid ""
-"With IPv6, the network used for the external gateway does not need to have "
-"an associated subnet, since the automatically assigned link-local address "
-"(LLA) can be used. However, an IPv6 gateway address is needed for use as the "
-"next-hop for the default route. If no IPv6 gateway address is configured "
-"here, (and only then) the neutron router will be configured to get its "
-"default route from router advertisements (RAs) from the upstream router; in "
-"which case the upstream router must also be configured to send these RAs. "
-"The ipv6_gateway, when configured, should be the LLA of the interface on the "
-"upstream router. If a next-hop using a global unique address (GUA) is "
-"desired, it needs to be done via a subnet allocated to the network and not "
-"through this parameter. "
-msgstr ""
-"Avec IPv6, le réseau utilisé pour la passerelle externe ne doit pas "
-"obligatoirement disposer d'un sous-réseau associé, étant donné que l'adresse "
-"link-local (LLA) automatiquement affectée peut être utilisée. En revanche, "
-"une adresse de passerelle IPv6 est nécessaire pour pouvoir faire un saut sur "
-"le chemin par défaut. Si aucune adresse de passerelle IPv6 n'estconfigurée "
-"dans ce cas, le routeur Neutron sera configuré pour obtenir son chemin par "
-"défaut (et uniquement dans ce but) à partir des annonces du routeur en "
-"amont ; dans cette situation, le routeur en amont doit être également "
-"configuré pour envoyer lesdites annonces. ipv6_gateway, lorsqu'il est "
-"configuré, doit constituer la LLA de l'interface du routeur en amont. Si un "
-"saut utilisantune adresse unique globale (GUA) est souhaité, il doit être "
-"effectué via un sous-réseau attribué au réseau, et non pas par "
-"l'intermédiaire de ce paramètre. "
-
-msgid "You must implement __call__"
-msgstr "Vous devez implémenter __call__"
-
-msgid ""
-"You must provide a config file for bridge - either --config-file or "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-msgstr ""
-"Vous devez fournir un fichier de configuration pour le pont --config-file ou "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-
-msgid "You must provide a revision or relative delta"
-msgstr "Vous devez fournir une révision ou un delta relatif."
-
-msgid "allocation_pools allowed only for specific subnet requests."
-msgstr ""
-"allocation_pools autorisé uniquement pour les requêtes de sous-réseau "
-"spécifiques."
-
-msgid "binding:profile value too large"
-msgstr "Valeur de liaison:profil excessive"
-
-msgid "cidr and prefixlen must not be supplied together"
-msgstr "cidr et prefixlen ne doivent pas être fournis ensemble"
-
-#, python-format
-msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid."
-msgstr "dhcp_agents_per_network doit etre >= 1. '%s' n'est pas valide"
-
-msgid "fixed_ip_address cannot be specified without a port_id"
-msgstr "Impossible de spécifier une adresse IP fixe sans ID port"
-
-#, python-format
-msgid "has device owner %s"
-msgstr "a le propriétaire de terminal %s"
-
-#, python-format
-msgid "ip command failed on device %(dev_name)s: %(reason)s"
-msgstr "Echec de la commande sur le périphérique %(dev_name)s : %(reason)s"
-
-#, python-format
-msgid "ip link capability %(capability)s is not supported"
-msgstr "Fonctionnalité de liaison IP %(capability)s non prise en charge"
-
-#, python-format
-msgid "ip link command is not supported: %(reason)s"
-msgstr "Commande link IP non prise en charge : %(reason)s"
-
-msgid "ip_version must be specified in the absence of cidr and subnetpool_id"
-msgstr ""
-"ip_version doit être indiqué si cidr et subnetpool_id ne sont pas définis"
-
-msgid "ipv6_address_mode is not valid when ip_version is 4"
-msgstr "ipv6_address_mode est non valide quand ip_version est 4"
-
-msgid "ipv6_ra_mode is not valid when ip_version is 4"
-msgstr "ipv6_ra_mode est non valide quand ip_version est 4"
-
-msgid ""
-"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to "
-"False."
-msgstr ""
-"ipv6_ra_mode ou ipv6_address_mode ne peut pas être défini si enable_dhcp a "
-"la valeur False."
-
-#, python-format
-msgid ""
-"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to "
-"'%(addr_mode)s' is not valid. If both attributes are set, they must be the "
-"same value"
-msgstr ""
-"ipv6_ra_mode défini sur '%(ra_mode)s' avec ipv6_address_mode défini sur "
-"'%(addr_mode)s' n'est pas correct. Si les deux attributs sont définis, ils "
-"doivent avoir la même valeur"
-
-msgid "mac address update"
-msgstr "Mise à jour d'adresse MAC"
-
-#, python-format
-msgid ""
-"max_l3_agents_per_router %(max_agents)s config parameter is not valid. It "
-"has to be greater than or equal to min_l3_agents_per_router %(min_agents)s."
-msgstr ""
-"Le paramètre de configuration max_l3_agents_per_router %(max_agents)s n'est "
-"pas valide. Il doit être supérieur ou égal à min_l3_agents_per_router "
-"%(min_agents)s."
-
-#, python-format
-msgid ""
-"min_l3_agents_per_router config parameter is not valid. It has to be equal "
-"to or more than %s for HA."
-msgstr ""
-"Le paramètre de configuration min_l3_agents_per_router n'est pas valide. Il "
-"doit être supérieur ou égal à %s pour la haute disponibilité."
-
-msgid "network_type required"
-msgstr "network_type requis"
-
-#, python-format
-msgid "network_type value '%s' not supported"
-msgstr "Valeur network_type '%s' non prise en charge"
-
-msgid "new subnet"
-msgstr "nouveau sous-réseau"
-
-#, python-format
-msgid "physical_network '%s' unknown  for VLAN provider network"
-msgstr ""
-"physical_network '%s' inconnu pour le réseau de fournisseurs de réseau local "
-"virtuel"
-
-#, python-format
-msgid "physical_network '%s' unknown for flat provider network"
-msgstr ""
-"physical_network '%s' inconnu pour le réseau de fournisseurs non hiérarchique"
-
-msgid "physical_network required for flat provider network"
-msgstr ""
-"physical_network obligatoire pour le réseau de fournisseurs non hiérarchique"
-
-#, python-format
-msgid "provider:physical_network specified for %s network"
-msgstr "provider:physical_network spécifié pour le réseau %s"
-
-msgid "record"
-msgstr "enregistrement "
-
-msgid "respawn_interval must be >= 0 if provided."
-msgstr "respawn_interval doit être >= 0 si fourni."
-
-#, python-format
-msgid "segmentation_id out of range (%(min)s through %(max)s)"
-msgstr "segmentation_id hors plage (%(min)s à %(max)s)"
-
-msgid "segmentation_id requires physical_network for VLAN provider network"
-msgstr ""
-"segmentation_id requiert physical_network pour le réseau de fournisseurs de "
-"réseau local virtuel"
-
-msgid "the nexthop is not connected with router"
-msgstr "nexthop n'est pas connecté au routeur"
-
-msgid "the nexthop is used by router"
-msgstr "nexthop est utilisé par le routeur"
-
-msgid ""
-"uuid provided from the command line so external_process can track us via /"
-"proc/cmdline interface."
-msgstr ""
-"Identificateur unique universel fourni dans la ligne de commande afin de "
-"permettre à external_process d'effectuer le suivi de l'uuid via l'interface /"
-"proc/cmdline."
diff --git a/neutron/locale/it/LC_MESSAGES/neutron.po b/neutron/locale/it/LC_MESSAGES/neutron.po
deleted file mode 100644 (file)
index b7e7e93..0000000
+++ /dev/null
@@ -1,2405 +0,0 @@
-# Italian translations for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-09-06 10:15+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: it\n"
-"Language-Team: Italian\n"
-"Plural-Forms: nplurals=2; plural=(n != 1)\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#, python-format
-msgid ""
-"\n"
-"Command: %(cmd)s\n"
-"Exit code: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-msgstr ""
-"\n"
-"Comando: %(cmd)s\n"
-"Codice di uscita: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-
-#, python-format
-msgid "%(driver)s: Internal driver error."
-msgstr "%(driver)s: errore di driver interno."
-
-#, python-format
-msgid "%(id)s is not a valid %(type)s identifier"
-msgstr "%(id)s non è un identificativo %(type)s valido"
-
-#, python-format
-msgid ""
-"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' "
-"and '%(desc)s'"
-msgstr ""
-"%(invalid_dirs)s non è un valore valido per sort_dirs, il valore valido è "
-"'%(asc)s' e '%(desc)s'"
-
-#, python-format
-msgid "%(key)s prohibited for %(tunnel)s provider network"
-msgstr "%(key)s non consentito per la rete del provider %(tunnel)s"
-
-#, python-format
-msgid ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-msgstr ""
-"%(method)s è stato chiamato con le impostazioni di rete %(current)s "
-"(impostazioni originali %(original)s) e segmenti di rete %(segments)s"
-
-#, python-format
-msgid ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-msgstr ""
-"%(method)s è stato chiamato con le impostazioni di sottorete %(current)s "
-"(impostazioni originali %(original)s)"
-
-#, python-format
-msgid "%(method)s failed."
-msgstr "%(method)s non riuscito."
-
-#, python-format
-msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'"
-msgstr "%(name)s '%(addr)s' non corrisponde alla ip_version '%(ip_version)s'"
-
-#, python-format
-msgid "%s cannot be called while in offline mode"
-msgstr "%s Impossibile chiamare durante la modalità offline"
-
-#, python-format
-msgid "%s is invalid attribute for sort_key"
-msgstr "%s è un attributo non valido per sort_key"
-
-#, python-format
-msgid "%s is invalid attribute for sort_keys"
-msgstr "%s è un attributo non valido per sort_keys"
-
-#, python-format
-msgid "%s is not a valid VLAN tag"
-msgstr "%s non un tag VLAN valido"
-
-#, python-format
-msgid "%s must implement get_port_from_device or get_ports_from_devices."
-msgstr "%s deve implementare get_port_from_device o get_ports_from_devices."
-
-#, python-format
-msgid "%s prohibited for VLAN provider network"
-msgstr "%s vietato per la rete del provider VLAN"
-
-#, python-format
-msgid "%s prohibited for flat provider network"
-msgstr "%s vietato per rete flat del provider"
-
-#, python-format
-msgid "%s prohibited for local provider network"
-msgstr "%s è vietato per la rete del provider locale"
-
-#, python-format
-msgid "'%(data)s' exceeds maximum length of %(max_len)s"
-msgstr "'%(data)s' supera la lunghezza massima di %(max_len)s"
-
-#, python-format
-msgid "'%(data)s' is not in %(valid_values)s"
-msgstr "'%(data)s' non è valido in %(valid_values)s"
-
-#, python-format
-msgid "'%(data)s' is too large - must be no larger than '%(limit)d'"
-msgstr "'%(data)s' è troppo esteso - non deve superare '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' is too small - must be at least '%(limit)d'"
-msgstr "'%(data)s' è troppo piccolo - deve essere almeno '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended"
-msgstr ""
-"'%(data)s' non è un cidr della sottorete IP riconosciuto, si consiglia "
-"'%(cidr)s'"
-
-#, python-format
-msgid "'%(host)s' is not a valid nameserver. %(msg)s"
-msgstr "'%(host)s' non è un nameserver valido. %(msg)s"
-
-#, python-format
-msgid "'%s' Blank strings are not permitted"
-msgstr "'%s' stringhe vuote non consentite"
-
-#, python-format
-msgid "'%s' cannot be converted to boolean"
-msgstr "'%s' impossibile convertirlo in booleano"
-
-#, python-format
-msgid "'%s' contains whitespace"
-msgstr "'%s' contiene spazi vuoti"
-
-#, python-format
-msgid "'%s' is not a dictionary"
-msgstr "'%s' non è un dizionario"
-
-#, python-format
-msgid "'%s' is not a list"
-msgstr "'%s' non è un elenco"
-
-#, python-format
-msgid "'%s' is not a valid IP address"
-msgstr "'%s' non è un indirizzo IP valido"
-
-#, python-format
-msgid "'%s' is not a valid IP subnet"
-msgstr "'%s' non è una sottorete IP valida"
-
-#, python-format
-msgid "'%s' is not a valid MAC address"
-msgstr "'%s' non è un'indirizzo MAC valido"
-
-#, python-format
-msgid "'%s' is not a valid UUID"
-msgstr "'%s' non è un valido UUID"
-
-#, python-format
-msgid "'%s' is not a valid boolean value"
-msgstr "'%s' non è un valore booleano valido"
-
-#, python-format
-msgid "'%s' is not a valid input"
-msgstr "'%s' non è un input valido"
-
-#, python-format
-msgid "'%s' is not a valid string"
-msgstr "'%s' non è una stringa valida"
-
-#, python-format
-msgid "'%s' is not an integer"
-msgstr "'%s' non è un numero intero"
-
-#, python-format
-msgid "'%s' is not an integer or uuid"
-msgstr "'%s' non è un numero intero o uuid"
-
-#, python-format
-msgid "'%s' is not of the form <key>=[value]"
-msgstr "'%s' non è nel formato <key>=[value]"
-
-#, python-format
-msgid "'%s' should be non-negative"
-msgstr "'%s' non dovrebbe essere negativo"
-
-msgid "0 is not allowed as CIDR prefix length"
-msgstr "0 non è consentito come lunghezza del prefisso CIDR"
-
-msgid "A cidr must be specified in the absence of a subnet pool"
-msgstr "È necessario specificare un cidr in assenza di un pool di sottoreti"
-
-msgid ""
-"A list of mappings of physical networks to MTU values. The format of the "
-"mapping is <physnet>:<mtu val>. This mapping allows specifying a physical "
-"network MTU value that differs from the default segment_mtu value."
-msgstr ""
-"Un elenco di associazioni di reti fisiche ai valori MTU. Il formato "
-"dell'associazione è <physnet>:<mtu val>. Questa associazione consente di "
-"specificare un valore MTU di rete fisica che differisce dal valore "
-"segment_mtu predefinito."
-
-msgid "A metering driver must be specified"
-msgstr "Specificare un driver di misurazione"
-
-msgid "API for retrieving service providers for Neutron advanced services"
-msgstr ""
-"API per il richiamo dei provider del servizio per i servizi Neutron avanzati"
-
-msgid "Access to this resource was denied."
-msgstr "L'accesso a questa risorsa è stato negato."
-
-msgid "Action to be executed when a child process dies"
-msgstr "Azione da eseguire quando termina un processo child"
-
-msgid "Adds external network attribute to network resource."
-msgstr "Aggiunge l'attributo della rete esterna alla risorsa di rete."
-
-msgid "Adds test attributes to core resources."
-msgstr "Aggiunge gli attributi di test alle risorse principali."
-
-#, python-format
-msgid "Agent %(id)s could not be found"
-msgstr "Impossibile trovare l'agent %(id)s"
-
-#, python-format
-msgid "Agent %(id)s is not a L3 Agent or has been disabled"
-msgstr "L'agent %(id)s non è un agent L3 oppure è stato disabilitato"
-
-#, python-format
-msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled"
-msgstr "Agent %(id)s non è un agent DHCP valido oppure è stato disabilitato"
-
-#, python-format
-msgid "Agent updated: %(payload)s"
-msgstr "Agent aggiornato: %(payload)s"
-
-#, python-format
-msgid ""
-"Agent with agent_type=%(agent_type)s and host=%(host)s could not be found"
-msgstr ""
-"Impossibile trovare l'agent con agent_type=%(agent_type)s e host=%(host)s"
-
-msgid "Allow auto scheduling networks to DHCP agent."
-msgstr "Consenti pianificazione automatica delle reti nell'agent DHCP."
-
-msgid "Allow auto scheduling of routers to L3 agent."
-msgstr "Consenti pianificazione automatica dei router nell'agent L3."
-
-msgid "Allow running metadata proxy."
-msgstr "Consenti l'esecuzione del proxy di metadati."
-
-msgid "Allow sending resource operation notification to DHCP agent"
-msgstr "Consenti notifica operazione di invio risorse all'agent DHCP"
-
-msgid "Allow the usage of the bulk API"
-msgstr "Consenti l'utilizzo dell'API bulk"
-
-msgid "Allow the usage of the pagination"
-msgstr "Consenti utilizzo paginazione"
-
-msgid "Allow the usage of the sorting"
-msgstr "Consenti utilizzo ordinamento"
-
-msgid "Allow to perform insecure SSL (https) requests to nova metadata"
-msgstr ""
-"Consentire l'esecuzione di richieste SSL (https) non protette sui metadati "
-"nova"
-
-msgid "AllowedAddressPair must contain ip_address"
-msgstr "AllowedAddressPair deve contenere ip_address"
-
-msgid "An interface driver must be specified"
-msgstr "È necessario specificare un driver di interfaccia"
-
-msgid ""
-"An ordered list of networking mechanism driver entrypoints to be loaded from "
-"the neutron.ml2.mechanism_drivers namespace."
-msgstr ""
-"Un elenco ordinato dei punti di ingresso del driver del meccanismo di rete "
-"da caricare dallo spazio dei nomi neutron.ml2.mechanism_drivers."
-
-msgid "An unknown error has occurred. Please try your request again."
-msgstr "Si è verificato un errore sconosciuto. Ritentare la richiesta."
-
-msgid "An unknown exception occurred."
-msgstr "Si è verificata un'eccezione sconosciuta."
-
-#, python-format
-msgid "Attribute '%s' not allowed in POST"
-msgstr "Attributo '%s' non consentito in POST"
-
-msgid "Automatically remove networks from offline DHCP agents."
-msgstr "Rimuove automaticamente le reti dagli agent DHCP offline."
-
-msgid ""
-"Automatically reschedule routers from offline L3 agents to online L3 agents."
-msgstr ""
-"Ripianifica automaticamente i router dagli agent L3 offline agli agent L3 "
-"online."
-
-msgid "Available commands"
-msgstr "Comandi disponibili"
-
-msgid "Backend does not support VLAN Transparency."
-msgstr "Il backend non supporta la trasparenza VLAN."
-
-#, python-format
-msgid ""
-"Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, "
-"%(mac)s:"
-msgstr ""
-"Prefisso errato o formato rac per la generazione dell'indirizzo IPv6 da "
-"EUI-64: %(prefix)s, %(mac)s:"
-
-#, python-format
-msgid "Bad prefix type for generate IPv6 address by EUI-64: %s"
-msgstr ""
-"Tipo di prefisso errato per la generazione dell'indirizzo IPv6 da EUI-64: %s"
-
-#, python-format
-msgid "Base MAC: %s"
-msgstr "MAC base: %s"
-
-#, python-format
-msgid "Bridge %(bridge)s does not exist."
-msgstr "Il bridge %(bridge)s non esiste."
-
-msgid "Bulk operation not supported"
-msgstr "Operazione massiccia non supportata"
-
-msgid "CIDR to monitor"
-msgstr "CIDR da monitorare"
-
-#, python-format
-msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip"
-msgstr ""
-"Impossibile aggiungere un IP mobile alla porta sulla sottorete %s che non "
-"dispone di un gateway_ip"
-
-msgid "Cannot allocate requested subnet from the available set of prefixes"
-msgstr ""
-"Impossibile assegnare la sottorete richiesta dall'insieme di prefissi "
-"disponibili"
-
-#, python-format
-msgid ""
-"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port "
-"%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a "
-"floating IP on external network %(net_id)s."
-msgstr ""
-"Impossibile associare un IP mobile %(floating_ip_address)s (%(fip_id)s) alla "
-"porta %(port_id)s utilizzando un IP fisso %(fixed_ip)s, in quanto quell'IP "
-"fisso ha già un IP mobile nella rete esterna %(net_id)s."
-
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to Port %s, since that port is owned "
-"by a different tenant."
-msgstr ""
-"Impossibile creare l'IP mobile e collegarlo alla porta %s, poiché tale porta "
-"è di proprietà di un tenant differente."
-
-msgid "Cannot create resource for another tenant"
-msgstr "Impossibile creare la risorsa per un altro tenant"
-
-msgid "Cannot disable enable_dhcp with ipv6 attributes set"
-msgstr "Impossibile disabilitare enable_dhcp con gli attributi ipv6 impostati"
-
-#, python-format
-msgid ""
-"Cannot have multiple router ports with the same network id if both contain "
-"IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s"
-msgstr ""
-"Impossibile avere più porte router con lo stesso ID di rete se entrambe "
-"contengono sottoreti IPv6. La porta esistente %(p)s ha sottoreti IPv6 e ID "
-"di rete %(nid)s"
-
-#, python-format
-msgid ""
-"Cannot host %(router_type)s router %(router_id)s on %(agent_mode)s L3 agent "
-"%(agent_id)s."
-msgstr ""
-"Impossibile ospitare l'host %(router_type)s router %(router_id)s sull'agent "
-"%(agent_mode)s L3 %(agent_id)s."
-
-msgid "Cannot match priority on flow deletion or modification"
-msgstr ""
-"Impossibile seguire la priorità nell'eliminazione o modifica del flusso"
-
-msgid "Cannot specify both subnet-id and port-id"
-msgstr "Impossibile specificare entrambi subnet_id e port_id"
-
-msgid "Cannot understand JSON"
-msgstr "Impossibile riconoscere JSON"
-
-#, python-format
-msgid "Cannot update read-only attribute %s"
-msgstr "Impossibile aggiornare l'attributo di sola lettura %s"
-
-msgid "Certificate Authority public key (CA cert) file for ssl"
-msgstr "File di chiave pubblica Certificate Authority (CA cert) per ssl"
-
-msgid "Check for ARP responder support"
-msgstr "Verifica il supporto responder ARP"
-
-msgid "Check for OVS vxlan support"
-msgstr "Verifica il supporto OVS vxlan"
-
-msgid "Check for VF management support"
-msgstr "Verifica il supporto di gestione VF management"
-
-msgid "Check for iproute2 vxlan support"
-msgstr "Verifica il supporto iproute2 vxlan"
-
-msgid "Check for nova notification support"
-msgstr "Verifica il supporto di notifica nova"
-
-msgid "Check for patch port support"
-msgstr "Verifica il supporto porta patch"
-
-msgid "Check minimal dnsmasq version"
-msgstr "Verifica versione dnsmasq minima"
-
-msgid "Check netns permission settings"
-msgstr "Verifica le impostazioni di autorizzazione netns"
-
-msgid "Check ovsdb native interface support"
-msgstr "Verifica supporto interfaccia nativa ovsdb"
-
-#, python-format
-msgid ""
-"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of "
-"subnet %(sub_id)s"
-msgstr ""
-"Cidr %(subnet_cidr)s della sottorete %(subnet_id)s si sovrappone con il cidr "
-"%(cidr)s della sottorete %(sub_id)s"
-
-msgid "Client certificate for nova metadata api server."
-msgstr "Certificato client per il server api dei metadati nova"
-
-msgid ""
-"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE "
-"tunnel IDs that are available for tenant network allocation"
-msgstr ""
-"Elenco separato da virgole di intervalli di enumerazione tuple <tun_min>:"
-"<tun_max> ID tunnel GRE disponibili per l'assegnazione di rete tenant"
-
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"VXLAN VNI IDs that are available for tenant network allocation"
-msgstr ""
-"Elenco separato da virgole di intervalli di enumerazione tuple <vni_min>:"
-"<vni_max> di VXLAN VNI ID disponibili per l'assegnazione della rete tenant"
-
-msgid ""
-"Comma-separated list of the DNS servers which will be used as forwarders."
-msgstr ""
-"Elenco separato da virgole dei server DNS che verranno utilizzati come "
-"server di inoltro."
-
-msgid "Command to execute"
-msgstr "Comando da eseguire"
-
-msgid "Config file for interface driver (You may also use l3_agent.ini)"
-msgstr ""
-"File di configurazione per il driver di interfaccia (È possibile utilizzare "
-"anche l3_agent.ini)"
-
-#, python-format
-msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s"
-msgstr "Valore ethertype %(ethertype)s in conflitto per CIDR %(cidr)s"
-
-msgid ""
-"Controls whether the neutron security group API is enabled in the server. It "
-"should be false when using no security groups or using the nova security "
-"group API."
-msgstr ""
-"Controlla se l'API del gruppo di sicurezza neutron è abilitata sul server. "
-"Dovrebbe essere impostata su false quando non si utilizzano gruppi di "
-"sicurezza o si utilizza l'API del gruppo di sicurezza nova."
-
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds"
-msgstr ""
-"Impossibile effettuare il bind a %(host)s:%(port)s dopo aver provato per "
-"%(time)d secondi"
-
-msgid "Could not deserialize data"
-msgstr "Impossibile deserializzare i dati"
-
-#, python-format
-msgid "Creation failed. %(dev_name)s already exists."
-msgstr "Creazione non riuscita. %(dev_name)s già esiste."
-
-#, python-format
-msgid ""
-"Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable "
-"to update."
-msgstr ""
-"L'ip gateway corrente %(ip_address)s è già in uso dalla porta %(port_id)s. "
-"Impossibile effettuare l'aggiornamento."
-
-msgid "Currently distributed HA routers are not supported."
-msgstr "I router HA attualmente distribuiti non sono supportati."
-
-msgid ""
-"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite "
-"lease times."
-msgstr ""
-"Durata rilascio DHCP (in secondi). Utilizzare -1 per informare dnsmasq di "
-"utilizzare infinite volte il rilascio."
-
-msgid "Default driver to use for quota checks"
-msgstr "Driver predefinito da utilizzare per i controlli di quota"
-
-msgid ""
-"Default number of resource allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Numero predefinito di risorse consentite per tenant. Un valore negativo "
-"indica un numero illimitato."
-
-msgid "Default security group"
-msgstr "Gruppo di sicurezza predefinito"
-
-msgid "Default security group already exists."
-msgstr "Il gruppo di sicurezza predefinito già esiste."
-
-msgid ""
-"Defines providers for advanced services using the format: <service_type>:"
-"<name>:<driver>[:default]"
-msgstr ""
-"Definisce i provider per i servizi avanzati utilizzando il formato: "
-"<service_type>:<name>:<driver>[:default]"
-
-msgid ""
-"Delay within which agent is expected to update existing ports whent it "
-"restarts"
-msgstr ""
-"Ritardo in cui è previsto che l'agent aggiorni le porte esistenti quando "
-"viene riavviato"
-
-msgid "Delete the namespace by removing all devices."
-msgstr "Elimina lo spazio dei nomi rimuovendo tutti i dispositivi."
-
-#, python-format
-msgid "Deleting port %s"
-msgstr "Eliminazione della porta %s"
-
-#, python-format
-msgid "Device %(dev_name)s in mapping: %(mapping)s not unique"
-msgstr "Dispositivo %(dev_name)s nell'associazione: %(mapping)s non univoco"
-
-msgid "Device has no virtual functions"
-msgstr "Il dispositivo non ha funzioni virtuali"
-
-#, python-format
-msgid "Device name %(dev_name)s is missing from physical_device_mappings"
-msgstr "Il nome dispositivo %(dev_name)s manca da physical_device_mappings"
-
-msgid "Device not found"
-msgstr "Dispositivo non trovato"
-
-#, python-format
-msgid ""
-"Distributed Virtual Router Mac Address for host %(host)s does not exist."
-msgstr ""
-"L'indirizzo MAC del router virtuale distribuito per l'host %(host)s non "
-"esiste."
-
-msgid "Domain to use for building the hostnames"
-msgstr "Dominio da utilizzare per creare i nomi host"
-
-msgid "Downgrade no longer supported"
-msgstr "Riduzione non più supportata"
-
-#, python-format
-msgid "Driver %s is not unique across providers"
-msgstr "Il driver %s non è univoco tra i provider"
-
-msgid "Driver for security groups firewall in the L2 agent"
-msgstr "Driver per il firewall dei gruppi di sicurezza nell'agent L2"
-
-msgid "Driver to use for scheduling network to DHCP agent"
-msgstr "Driver da utilizzare per la pianificazione della rete nell'agent DHCP"
-
-msgid "Driver to use for scheduling router to a default L3 agent"
-msgstr ""
-"Driver da utilizzare per la pianificazione del router nell'agent L3 "
-"predefinito"
-
-#, python-format
-msgid "Duplicate IP address '%s'"
-msgstr "Indirizzo IP duplicato '%s'"
-
-msgid "Duplicate Metering Rule in POST."
-msgstr "Regola di misurazione duplicata in POST."
-
-msgid "Duplicate Security Group Rule in POST."
-msgstr "Regola del gruppo di sicurezza duplicata in POST."
-
-#, python-format
-msgid "Duplicate hostroute '%s'"
-msgstr "Hostroute duplicato '%s'"
-
-#, python-format
-msgid "Duplicate items in the list: '%s'"
-msgstr "Elementi duplicati nell'elenco: '%s'"
-
-#, python-format
-msgid "Duplicate nameserver '%s'"
-msgstr "Nameserver duplicato '%s'"
-
-msgid "Duplicate segment entry in request."
-msgstr "Voce del segmento duplicata nella richiesta."
-
-#, python-format
-msgid "ERROR: %s"
-msgstr "ERRORE: %s"
-
-msgid ""
-"ERROR: Unable to find configuration file via the default search paths (~/."
-"neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!"
-msgstr ""
-"ERRORE: Impossibile trovare il file di configurazione utilizzando i percorsi "
-"di ricerca predefiniti (~/.neutron/, ~/, /etc/neutron/, /etc/) e l'opzione "
-"'--config-file'!"
-
-msgid ""
-"Either one of parameter network_id or router_id must be passed to _get_ports "
-"method."
-msgstr ""
-"Uno dei parametri network_id o router_id deve essere passato al metodo "
-"_get_ports."
-
-msgid "Either subnet_id or port_id must be specified"
-msgstr "È necessario specificare subnet_id o port_id"
-
-msgid "Empty physical network name."
-msgstr "Nome rete fisica vuoto."
-
-msgid "Enable FWaaS"
-msgstr "Abilita FWaaS"
-
-msgid "Enable HA mode for virtual routers."
-msgstr "Abilitare la modalità  HA per i router virtuali."
-
-msgid "Enable SSL on the API server"
-msgstr "Abilitazione di SSL sul server API"
-
-msgid ""
-"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 "
-"plugin using linuxbridge mechanism driver"
-msgstr ""
-"Abilitare VXLAN sull'agent. Può essere abilitata quando l'agent è gestito "
-"dal plugin ml2 utilizzando il driver del meccanismo linuxbridge"
-
-msgid ""
-"Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 "
-"l2population driver. Allows the switch (when supporting an overlay) to "
-"respond to an ARP request locally without performing a costly ARP broadcast "
-"into the overlay."
-msgstr ""
-"Abilitare il responder ARP locale se è supportato. Richiede il driver OVS "
-"2.1 e ML2 l2population. Consentire allo switch (quando supporta una "
-"sovrapposizione) di rispondere ad una richiesta ARP in locale senza eseguire "
-"un broadcast ARP oneroso nella sovrapposizione."
-
-msgid ""
-"Enable services on an agent with admin_state_up False. If this option is "
-"False, when admin_state_up of an agent is turned False, services on it will "
-"be disabled. Agents with admin_state_up False are not selected for automatic "
-"scheduling regardless of this option. But manual scheduling to such agents "
-"is available if this option is True."
-msgstr ""
-"Abilitare i servizi sull'agent con admin_state_up False. Se questa opzione è "
-"False, quando admin_state_up di un agent è su False, verranno disabilitati i "
-"servizi su tale agent. Gli agent con admin_state_up False non vengono "
-"selezionati per la pianificazione automatica indipendentemente da questa "
-"opzione. Ma è disponibile la pianificazione manuale di tali agent se questa "
-"opzione è impostata su True."
-
-msgid ""
-"Enable/Disable log watch by metadata proxy. It should be disabled when "
-"metadata_proxy_user/group is not allowed to read/write its log file and "
-"copytruncate logrotate option must be used if logrotate is enabled on "
-"metadata proxy log files. Option default value is deduced from "
-"metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent "
-"effective user id/name."
-msgstr ""
-"Abilita/disabilita logwatch mediante il proxy di metadati. Dovrebbe essere "
-"disabilitato quando non è consentita l'opzione metadata_proxy_user/group per "
-"leggere/scrivere il relativo file di log e deve essere utilizzata l'opzione "
-"copytruncate logrotate se è abilitata logrotate sui file di log del proxy di "
-"metadati. Il valore predefinito dell'opzione viene dedotto da "
-"metadata_proxy_user: il logwatch è abilitato se metadata_proxy_user è l'ID/"
-"nomedell'utente operativo dell'agent."
-
-msgid "Encountered an empty component."
-msgstr "È stato rilevato un componente vuoto."
-
-msgid "End of VLAN range is less than start of VLAN range"
-msgstr "La fine dell'intervallo VLAN è minore dell'inizio dell'intervallo VLAN"
-
-msgid "End of tunnel range is less than start of tunnel range"
-msgstr ""
-"L'intervallo finale del tunnel è inferiore all'intervallo iniziale del "
-"tunnel."
-
-#, python-format
-msgid "Error importing FWaaS device driver: %s"
-msgstr "Errore durante l'importazione del driver dell'unità FWaaS: %s"
-
-#, python-format
-msgid "Error parsing dns address %s"
-msgstr "Errore durante l'analisi dell'indirizzo dns %s"
-
-#, python-format
-msgid "Error while reading %s"
-msgstr "Errore durante le lettura di %s"
-
-msgid "Existing prefixes must be a subset of the new prefixes"
-msgstr "I prefissi esistenti devono essere un sottoinsieme dei nuovi prefissi"
-
-msgid ""
-"Extension to use alongside ml2 plugin's l2population mechanism driver. It "
-"enables the plugin to populate VXLAN forwarding table."
-msgstr ""
-"Estensione per utilizzare insieme del driver del meccanismo l2population del "
-"plugin m12. Essa abilita il plugin per popolare la tabella di inoltro VXLAN."
-
-#, python-format
-msgid "Extension with alias %s does not exist"
-msgstr "L'estensione con alias %s non esiste"
-
-#, python-format
-msgid "External IP %s is the same as the gateway IP"
-msgstr "L'IP esterno %s è uguale all'IP gateway"
-
-#, python-format
-msgid ""
-"External network %(external_network_id)s is not reachable from subnet "
-"%(subnet_id)s.  Therefore, cannot associate Port %(port_id)s with a Floating "
-"IP."
-msgstr ""
-"La rete esterna %(external_network_id)s non è raggiungibile dalla sottorete "
-"%(subnet_id)s.  Pertanto, non è possibile associare la porta %(port_id)s a "
-"un IP mobile."
-
-#, python-format
-msgid ""
-"External network %(net_id)s cannot be updated to be made non-external, since "
-"it has existing gateway ports"
-msgstr ""
-"La rete esterna %(net_id)s non può essere aggiornata per diventare una rete "
-"non esterna in quanto ha già le porte gateway esistenti"
-
-#, python-format
-msgid "ExtraDhcpOpt %(id)s could not be found"
-msgstr "Impossibile trovare ExtraDhcpOpt %(id)s"
-
-msgid ""
-"FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-"
-"agent."
-msgstr ""
-"Il plugin FWaaS è configurato sul lato server, ma FWaaS è disabilitato "
-"nell'agent L3."
-
-#, python-format
-msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found."
-msgstr ""
-"Impossibile ripianificare il router  %(router_id)s: non è stato trovato "
-"nessun agent L3 adatto."
-
-#, python-format
-msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s."
-msgstr ""
-"Impossibile pianificare il router %(router_id)s per l'agent L3 %(agent_id)s."
-
-#, python-format
-msgid ""
-"Failed to allocate a VRID in the network %(network_id)s for the router "
-"%(router_id)s after %(max_tries)s tries."
-msgstr ""
-"Impossibile allocare un VRID nella rete %(network_id)s per il router "
-"%(router_id)s dopo %(max_tries)s tentativi."
-
-#, python-format
-msgid ""
-"Failed to create port on network %(network_id)s, because fixed_ips included "
-"invalid subnet %(subnet_id)s"
-msgstr ""
-"Impossibile creare la porta nella rete %(network_id)s perché fixed_ips ha "
-"incluso una sottorete %(subnet_id)s non valida"
-
-#, python-format
-msgid "Failed to parse request. Parameter '%s' not specified"
-msgstr ""
-"Impossibile analizzare la richiesta. Il parametro '%s' non è specificato"
-
-#, python-format
-msgid "Failed to parse request. Required attribute '%s' not specified"
-msgstr ""
-"Impossibile analizzare la richiesta. È necessario l'attributo '%s' non "
-"specificato"
-
-msgid "Failed to remove supplemental groups"
-msgstr "Impossibile rimuovere i gruppi supplementari"
-
-#, python-format
-msgid "Failed to set gid %s"
-msgstr "Impossibile impostare il gid %s"
-
-#, python-format
-msgid "Failed to set uid %s"
-msgstr "Impossibile impostare l'uid %s"
-
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr "Impossibile impostare la porta tunnel %(type)s su %(ip)s"
-
-#, python-format
-msgid "Floating IP %(floatingip_id)s could not be found"
-msgstr "Impossibile trovare l'IP mobile %(floatingip_id)s"
-
-msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max"
-msgstr "Per i protocolli TCP/UDP, port_range_min deve essere <= port_range_max"
-
-msgid "Force ip_lib calls to use the root helper"
-msgstr "Forzare le chiamate ip_lib ad utilizzare root helper"
-
-#, python-format
-msgid ""
-"Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet "
-"%(subnet_cidr)s."
-msgstr ""
-"Trovati pool di allocazione di sovrapposizione:%(pool_1)s %(pool_2)s per la "
-"sottorete %(subnet_cidr)s."
-
-#, python-format
-msgid ""
-"Gateway cannot be updated for router %(router_id)s, since a gateway to "
-"external network %(net_id)s is required by one or more floating IPs."
-msgstr ""
-"Non è possibile aggiornare il gateway per il router %(router_id)s, in quanto "
-"un gateway per la rete esterna %(net_id)s è richiesto da uno o più IP mobili."
-
-msgid "Gateway is not valid on subnet"
-msgstr "Il gateway non è valido sulla sottorete"
-
-msgid "Group (gid or name) running metadata proxy after its initialization"
-msgstr ""
-"Gruppo (gid o nome) che esegue il proxy di metadati dopo la relativa "
-"inizializzazione"
-
-msgid ""
-"Group (gid or name) running metadata proxy after its initialization (if "
-"empty: agent effective group)."
-msgstr ""
-"Gruppo (gid o nome) che esegue il proxy di metadati dopo la relativa "
-"inizializzazione (se vuoto: gruppo operativo dell'agent)."
-
-msgid "Group (gid or name) running this process after its initialization"
-msgstr ""
-"Gruppo (gid o name) che esegue questo processo dopo la relativa "
-"inizializzazione"
-
-msgid "How many times Neutron will retry MAC generation"
-msgstr "Quante volte Neutron richiamerà la generazione MAC"
-
-#, python-format
-msgid ""
-"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-"
-"min) is missing."
-msgstr ""
-"Il codice ICMP (port-range-max) %(value)s è stato fornito, ma il tipo ICMP "
-"(port-range-min) manca."
-
-msgid "ID of network"
-msgstr "ID della rete"
-
-msgid "ID of network to probe"
-msgstr "ID di rete per probe"
-
-msgid "ID of probe port to delete"
-msgstr "ID della porta probe da eliminare"
-
-msgid "ID of probe port to execute command"
-msgstr "ID della porta probe per eseguire il comando"
-
-msgid "ID of the router"
-msgstr "ID del router"
-
-#, python-format
-msgid ""
-"IP address %(ip_address)s is not a valid IP for any of the subnets on the "
-"specified network."
-msgstr ""
-"L'indirizzo IP %(ip_address)s non è un IP valido per nessuna delle sottoreti "
-"sulla rete specificata."
-
-#, python-format
-msgid "IP address %(ip_address)s is not a valid IP for the specified subnet."
-msgstr ""
-"L'indirizzo IP %(ip_address)s non è un IP valido per la sottorete "
-"specificata."
-
-msgid "IP address used by Nova metadata server."
-msgstr "Indirizzo IP utilizzato dal server di metadati Nova."
-
-msgid "IP allocation requires subnet_id or ip_address"
-msgstr "L'assegnazione IP richiede subnet_id o ip_address"
-
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables rules:\n"
-"%s"
-msgstr ""
-"IPTablesManager.apply non è riuscito ad applicare la seguente serie di "
-"regole iptables:\n"
-"%s"
-
-#, python-format
-msgid ""
-"IPv6 address %(address)s can not be directly assigned to a port on subnet "
-"%(id)s since the subnet is configured for automatic addresses"
-msgstr ""
-"L'indirizzo IPv6 %(address)s non può essere assegnato direttamente ad una "
-"porta sulla sottorete %(id)s perché la sottorete è configurata per gli "
-"indirizzi automatici"
-
-#, python-format
-msgid ""
-"IPv6 subnet %s configured to receive RAs from an external router cannot be "
-"added to Neutron Router."
-msgstr ""
-"La sottorete IPv6 %s configurata per ricevere RA da un router esterno non "
-"può essere aggiunta a Neutron Router."
-
-msgid ""
-"If True, effort is made to advertise MTU settings to VMs via network methods "
-"(DHCP and RA MTU options) when the network's preferred MTU is known."
-msgstr ""
-"Se True, l'impegno è effettuato per annunciare le impostazioni MTU alle VM "
-"tramite metodi di rete (opzioni DHCP e RA MTU) quando la MTU preferita di "
-"rete è sconosciuta."
-
-msgid ""
-"If True, then allow plugins that support it to create VLAN transparent "
-"networks."
-msgstr ""
-"Se True, consentire ai plugin che lo supportano di creare reti VLAN "
-"trasparenti."
-
-msgid "Illegal IP version number"
-msgstr "Numero della versione IP non valido"
-
-#, python-format
-msgid "Insufficient prefix space to allocate subnet size /%s"
-msgstr ""
-"Spazio prefisso insufficiente per assegnare la dimensione della sottorete /%s"
-
-msgid "Insufficient rights for removing default security group."
-msgstr ""
-"Diritti non sufficienti per rimuovere il gruppo di sicurezza predefinito."
-
-msgid "Interface to monitor"
-msgstr "Interfaccia da monitorare"
-
-msgid ""
-"Interval between checks of child process liveness (seconds), use 0 to disable"
-msgstr ""
-"Intervallo tra i controlli dell'attività del processo child (secondi), "
-"utilizzare 0 per disabilitare"
-
-msgid "Interval between two metering measures"
-msgstr "Intervallo tra due misure"
-
-msgid "Interval between two metering reports"
-msgstr "Intervallo tra due report di misurazione"
-
-#, python-format
-msgid ""
-"Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address "
-"format, which requires the prefix to be /64."
-msgstr ""
-"CIDR %s non valido per la modalità indirizzi IPv6. OpenStack utilizza il "
-"formato di indirizzi EUI-64, che richiede che il prefisso sia /64."
-
-#, python-format
-msgid "Invalid Device %(dev_name)s: %(reason)s"
-msgstr "Dispositivo non valido %(dev_name)s: %(reason)s"
-
-#, python-format
-msgid ""
-"Invalid authentication type: %(auth_type)s, valid types are: "
-"%(valid_auth_types)s"
-msgstr ""
-"Tipo di autenticazione non valido: %(auth_type)s, i tipi validi sono: "
-"%(valid_auth_types)s"
-
-#, python-format
-msgid "Invalid data format for IP pool: '%s'"
-msgstr "Formato dati invalido per il pool IP: '%s'"
-
-#, python-format
-msgid "Invalid data format for extra-dhcp-opt: %(data)s"
-msgstr "Formato di dati non valido per extra-dhcp-opt: %(data)s"
-
-#, python-format
-msgid "Invalid data format for fixed IP: '%s'"
-msgstr "Formato dati invalido per l'IP fisso: '%s'"
-
-#, python-format
-msgid "Invalid data format for hostroute: '%s'"
-msgstr "Formato dati invalido per hostroute: '%s'"
-
-#, python-format
-msgid "Invalid data format for nameserver: '%s'"
-msgstr "Formato dati invalido per il nameserver: '%s'"
-
-#, python-format
-msgid "Invalid format for routes: %(routes)s, %(reason)s"
-msgstr "Formato non valido per gli instradamenti: %(routes)s, %(reason)s"
-
-#, python-format
-msgid "Invalid format: %s"
-msgstr "Formato non valido: %s"
-
-#, python-format
-msgid "Invalid input for %(attr)s. Reason: %(reason)s."
-msgstr "Input non valido per %(attr)s. Motivo: %(reason)s."
-
-#, python-format
-msgid "Invalid input for operation: %(error_message)s."
-msgstr "Input invalido per l'operazione: %(error_message)s."
-
-#, python-format
-msgid ""
-"Invalid input. '%(target_dict)s' must be a dictionary with keys: "
-"%(expected_keys)s"
-msgstr ""
-"Input non valido. '%(target_dict)s' deve essere un dizionario con chiavi: "
-"%(expected_keys)s"
-
-#, python-format
-msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s"
-msgstr ""
-"Stato istanza non valido: %(state)s, gli stati validi sono: %(valid_states)s"
-
-#, python-format
-msgid "Invalid mapping: '%s'"
-msgstr "Associazione non valida: '%s'"
-
-#, python-format
-msgid "Invalid pci slot %(pci_slot)s"
-msgstr "pci slot non valido %(pci_slot)s"
-
-#, python-format
-msgid "Invalid provider format. Last part should be 'default' or empty: %s"
-msgstr ""
-"Formato del provider non valido. L'ultima parte deve essere 'default' o "
-"vuota: %s"
-
-#, python-format
-msgid "Invalid route: %s"
-msgstr "Route invalido: %s"
-
-msgid "Invalid service provider format"
-msgstr "Formato del provider del servizio non valido"
-
-#, python-format
-msgid ""
-"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255."
-msgstr ""
-"Valore non valido per ICMP %(field)s (%(attr)s) %(value)s. Deve essere "
-"compreso tra 0 e 255."
-
-#, python-format
-msgid "Invalid value for port %(port)s"
-msgstr "Valore invalido per la porta %(port)s"
-
-msgid "Keepalived didn't respawn"
-msgstr "Keepalived non ha eseguito la nuova generazione"
-
-#, python-format
-msgid "Key %(key)s in mapping: '%(mapping)s' not unique"
-msgstr "Chiave %(key)s nell'associazione: '%(mapping)s' non univoca"
-
-#, python-format
-msgid "Limit must be an integer 0 or greater and not '%d'"
-msgstr "Il limite deve essere un numero intero 0 o superiore e non '%d'"
-
-msgid "Limit number of leases to prevent a denial-of-service."
-msgstr "Limitare il numero di lease per evitare un denial-of-service."
-
-msgid ""
-"List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> "
-"specifying physical_network names usable for VLAN provider and tenant "
-"networks, as well as ranges of VLAN tags on each available for allocation to "
-"tenant networks."
-msgstr ""
-"Elenco di <physical_network>:<vlan_min>:<vlan_max> o <physical_network> che "
-"specificano nomi physical_network utilizzabili per le reti tenant e provider "
-"VLAN, come anche gli intervalli di tag VLAN su ciascuno disponibile per "
-"l'assegnazione alle reti tenant."
-
-msgid ""
-"List of network type driver entrypoints to be loaded from the neutron.ml2."
-"type_drivers namespace."
-msgstr ""
-"Elenco dei punti di ingresso del driver del tipo di rete da caricare dallo "
-"spazio dei nomi neutron.ml2.type_drivers."
-
-msgid "Local IP address of the VXLAN endpoints."
-msgstr "Indirizzo IP locale degli endpoint VXLAN."
-
-msgid "Local IP address of tunnel endpoint."
-msgstr "Indirizzo IP locale dell'endpoint tunnel."
-
-msgid "Location for Metadata Proxy UNIX domain socket."
-msgstr "Ubicazione per il socket del dominio UNIX del proxy di metadati."
-
-msgid "Location of Metadata Proxy UNIX domain socket"
-msgstr "Ubicazione del socket del dominio UNIX del proxy di metadati"
-
-msgid "Location of pid file of this process."
-msgstr "Ubicazione del file pid di questo processo."
-
-msgid "Location to store DHCP server config files"
-msgstr "Ubicazione per archiviare i file di configurazione del server DHCP"
-
-msgid "Location to store IPv6 RA config files"
-msgstr "Ubicazione per memorizzare i file di configurazione IPv6 RA"
-
-msgid "Location to store child pid files"
-msgstr "Ubicazione per archiviare i file pid dell'elemento child"
-
-msgid "Location to store keepalived/conntrackd config files"
-msgstr ""
-"Ubicazione per archiviare i file di configurazione keepalived/conntrackd"
-
-msgid "MTU setting for device."
-msgstr "Impostazione MTU per l'unità."
-
-msgid "MTU size of veth interfaces"
-msgstr "Dimensione MTU delle interfacce veth"
-
-msgid "Make the l2 agent run in DVR mode."
-msgstr "Eseguire l'agent L2 in modalità DVR."
-
-msgid "Malformed request body"
-msgstr "Corpo richiesta non corretto"
-
-msgid "Maximum number of allowed address pairs"
-msgstr "Numero massimo di coppie di indirizzi consentito"
-
-msgid "Maximum number of host routes per subnet"
-msgstr "Numero massimo di route host per la sottorete"
-
-msgid "Metering driver"
-msgstr "Driver di misurazione"
-
-#, python-format
-msgid "Metering label %(label_id)s does not exist"
-msgstr "L'etichetta di misurazione %(label_id)s non esiste"
-
-#, python-format
-msgid "Metering label rule %(rule_id)s does not exist"
-msgstr "La regola di etichetta di misurazione %(rule_id)s non esiste"
-
-#, python-format
-msgid ""
-"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps "
-"another"
-msgstr ""
-"La regola di etichetta di misurazione remote_ip_prefix %(remote_ip_prefix)s "
-"si sovrappone ad un'altra"
-
-msgid "Minimize polling by monitoring ovsdb for interface changes."
-msgstr ""
-"Ridurre al minimo il polling controllando ovsdb per le modifiche "
-"all'interfaccia."
-
-#, python-format
-msgid "Missing key in mapping: '%s'"
-msgstr "Chiave mancante nell'associazione: '%s'"
-
-#, python-format
-msgid "Missing value in mapping: '%s'"
-msgstr "Valore mancante nell'associazione: '%s'"
-
-#, python-format
-msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found"
-msgstr "Trovati più agent con agent_type=%(agent_type)s e host=%(host)s"
-
-#, python-format
-msgid "Multiple default providers for service %s"
-msgstr "Più provider predefiniti per il servizio %s"
-
-#, python-format
-msgid "Multiple plugins for service %s were configured"
-msgstr "Sono stati configurati più plugin per il servizio %s"
-
-#, python-format
-msgid "Multiple providers specified for service %s"
-msgstr "Più provider specificati per il servizio %s"
-
-msgid "Multiple tenant_ids in bulk security group rule create not allowed"
-msgstr ""
-"La creazione in massa di più tenant_id nella regola del gruppo di sicurezza "
-"non è consentita"
-
-msgid "Must also specifiy protocol if port range is given."
-msgstr ""
-"È necessario anche specificare il protocollo se è fornito l'intervallo di "
-"porta."
-
-msgid "Must specify one or more actions on flow addition or modification"
-msgstr ""
-"È necessario specificare una o più azioni nell'aggiunta o modifica del flusso"
-
-#, python-format
-msgid ""
-"Name '%s' must be 1-63 characters long, each of which can only be "
-"alphanumeric or a hyphen."
-msgstr ""
-"Il nome '%s' deve contenere da 1 a 63 caratteri, ciascuno dei quali può "
-"essere solo alfanumerico o un trattino."
-
-#, python-format
-msgid "Name '%s' must not start or end with a hyphen."
-msgstr "Il nome '%s' non deve iniziare o terminare con un trattino."
-
-msgid "Name of Open vSwitch bridge to use"
-msgstr "Nome del bridge Open vSwitch da utilizzare"
-
-msgid ""
-"Name of nova region to use. Useful if keystone manages more than one region."
-msgstr ""
-"Nome della regione nova da utilizzare. Utile nel caso in cui keystone "
-"gestisce più di una regione."
-
-msgid "Name of the FWaaS Driver"
-msgstr "Nome del driver FWaaS"
-
-msgid "Namespace of the router"
-msgstr "Spazio dei nomi del router"
-
-msgid "Native pagination depend on native sorting"
-msgstr "La paginazione nativa deipende dall'ordinamento nativo"
-
-msgid "Negative delta (downgrade) not supported"
-msgstr "Delta negativo (riduzione) non supportato"
-
-msgid "Negative relative revision (downgrade) not supported"
-msgstr "Revisione relativa negativa (riduzione) non suportata"
-
-#, python-format
-msgid "Network %s is not a valid external network"
-msgstr "La rete %s non è una rete esterna valida"
-
-#, python-format
-msgid "Network %s is not an external network"
-msgstr "La rete %s non è una rete esterna"
-
-#, python-format
-msgid ""
-"Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges "
-"%(excluded_ranges)s was not found."
-msgstr ""
-"Rete di dimensione %(size)s, dall'intervallo IP %(parent_range)s esclusi gli "
-"intervalli IP %(excluded_ranges)s non trovata."
-
-msgid "Network that will have instance metadata proxied."
-msgstr "Rete che avrà i metadati dell'istanza con proxy."
-
-#, python-format
-msgid "Network type value '%s' not supported"
-msgstr "Valore del tipo di rete '%s' non supportato"
-
-msgid "Network type value needed by the ML2 plugin"
-msgstr "Valore Tipo di rete richiesto dal plugin ML2"
-
-msgid "Network types supported by the agent (gre and/or vxlan)."
-msgstr "Tipi di reti supportati dall'agent (gre e/o vxlan)."
-
-msgid "Neutron Service Type Management"
-msgstr "Gestione tipo servizio Neutron"
-
-msgid "Neutron core_plugin not configured!"
-msgstr "Neutron core_plugin non configurato!"
-
-msgid "Neutron plugin provider module"
-msgstr "Modulo del provider di plugin Neutron"
-
-msgid "Neutron quota driver class"
-msgstr "Classe driver quota Neutron"
-
-#, python-format
-msgid "No eligible l3 agent associated with external network %s found"
-msgstr ""
-"Non è stato trovato nessun agent L3 adatto associato alla rete esterna %s"
-
-#, python-format
-msgid "No more IP addresses available on network %(net_id)s."
-msgstr "Indirizzi IP non più disponibili nella rete %(net_id)s."
-
-#, python-format
-msgid ""
-"No more Virtual Router Identifier (VRID) available when creating router "
-"%(router_id)s. The limit of number of HA Routers per tenant is 254."
-msgstr ""
-"Nessun altro VRID (Virtual Router Identifier) disponibile durante la "
-"creazione del router %(router_id)s. Il limite del numero di router HA per "
-"tenant è 254."
-
-#, python-format
-msgid "No providers specified for '%s' service, exiting"
-msgstr "Nessun provider specificato per il servizio '%s', uscita in corso"
-
-#, python-format
-msgid ""
-"Not allowed to manually assign a %(router_type)s router %(router_id)s from "
-"an existing DVR node to another L3 agent %(agent_id)s."
-msgstr ""
-"Non consentito assegnare manualmente un router %(router_type)s %(router_id)s "
-"da un nodo DVR esistente ad un altro agent L3 %(agent_id)s."
-
-msgid "Not authorized."
-msgstr "Non autorizzato."
-
-#, python-format
-msgid ""
-"Not enough l3 agents available to ensure HA. Minimum required "
-"%(min_agents)s, available %(num_agents)s."
-msgstr ""
-"Non sono presenti agent L3 sufficienti per garantire HA. Sono richiesti "
-"minimo %(min_agents)s, disponibili %(num_agents)s."
-
-msgid "Number of RPC worker processes for service"
-msgstr "Numero di processi RPC worker per servizio"
-
-msgid "Number of backlog requests to configure the metadata server socket with"
-msgstr ""
-"Numero di richieste di backlog con cui configurare il socket server dei "
-"metadati"
-
-msgid "Number of backlog requests to configure the socket with"
-msgstr "Numero di richieste di backlog per configurare il socket con"
-
-msgid ""
-"Number of floating IPs allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Numero di IP mobili consentiti per tenant. Un valore negativo indica un "
-"numero illimitato."
-
-msgid ""
-"Number of networks allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Numero di reti consentite per tenant. Un valore negativo indica un numero "
-"illimitato."
-
-msgid "Number of ports allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Numero di porte consentite per tenant. Un valore negativo indica un numero "
-"illimitato."
-
-msgid "Number of routers allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Numero di router consentiti per tenant. Un valore negativo indica un numero "
-"illimitato."
-
-msgid ""
-"Number of seconds between sending events to nova if there are any events to "
-"send."
-msgstr ""
-"Numero di secondi tra l'invio di eventi a nova se vi sono eventuali eventi "
-"da inviare."
-
-msgid "Number of seconds to keep retrying to listen"
-msgstr "Numero di secondi per trattenere i nuovi tentativi di ascolto"
-
-msgid ""
-"Number of security groups allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Numero di gruppi di sicurezza consentiti per tenant. Un valore negativo "
-"indica un numero illimitato."
-
-msgid ""
-"Number of security rules allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Numero di regole di sicurezza consentite per tenant. Un valore negativo "
-"indica un numero illimitato."
-
-msgid "Number of subnets allowed per tenant, A negative value means unlimited."
-msgstr ""
-"Numero di sottoreti consentite per tenant. Un valore negativo indica un "
-"numero illimitato."
-
-msgid "Only admin can view or configure quota"
-msgstr "Solo admin può visualizzare o configurare una quota"
-
-msgid "Only admin is authorized to access quotas for another tenant"
-msgstr "Solo l'admin è autorizzato ad accedere alle quote per un altro tenant"
-
-msgid "Only allowed to update rules for one security profile at a time"
-msgstr ""
-"Al momento è consentito solo aggiornare le regole per un profilo di "
-"sicurezza."
-
-msgid "Only remote_ip_prefix or remote_group_id may be provided."
-msgstr "È possibile fornire solo remote_ip_prefix o remote_group_id."
-
-#, python-format
-msgid ""
-"Operation %(op)s is not supported for device_owner %(device_owner)s on port "
-"%(port_id)s."
-msgstr ""
-"Operazione %(op)s non supportata per device_owner %(device_owner)s sulla "
-"porta %(port_id)s."
-
-msgid "Override the default dnsmasq settings with this file"
-msgstr ""
-"Sostituisci le impostazioni dnsmasq predefinite utilizzando questo file"
-
-msgid "Owner type of the device: network/compute"
-msgstr "Tipo proprietario dell'unità: rete/compute"
-
-msgid "POST requests are not supported on this resource."
-msgstr "Le richieste POST non sono supportate su questa risorsa."
-
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr "Analisi bridge_mappings non riuscita: %s."
-
-msgid "Parsing supported pci_vendor_devs failed"
-msgstr "Analisi pci_vendor_devs supportati non riuscita"
-
-msgid "Path to PID file for this process"
-msgstr "Percorso per il file PID per questo processo"
-
-msgid "Path to the router directory"
-msgstr "Percorso per la directory del router"
-
-msgid "Peer patch port in integration bridge for tunnel bridge."
-msgstr "Porta patch peer nel bridge di integrazione per il bridge tunnel."
-
-msgid "Peer patch port in tunnel bridge for integration bridge."
-msgstr "Porta patch peer nel bridge tunnel per il bridge di integrazione."
-
-msgid "Ping timeout"
-msgstr "Timeout di ping"
-
-msgid "Plugin does not support updating provider attributes"
-msgstr "Il plugin non supporta l'aggiornamento degli attributi provider"
-
-#, python-format
-msgid "Port %(id)s does not have fixed ip %(address)s"
-msgstr "La porta %(id)s non dispone di un ip fisso %(address)s"
-
-#, python-format
-msgid ""
-"Port %(port_id)s is associated with a different tenant than Floating IP "
-"%(floatingip_id)s and therefore cannot be bound."
-msgstr ""
-"La porta %(port_id)s è associata ad un diverso tenant rispetto all'IP mobile "
-"%(floatingip_id)s e pertanto non è possibile unirlo."
-
-msgid ""
-"Port Security must be enabled in order to have allowed address pairs on a "
-"port."
-msgstr ""
-"Abilitare la sicurezza della porta per disporre di coppie di indirizzo "
-"consentite ad una porta."
-
-msgid "Port does not have port security binding."
-msgstr "La porta non dispone di un bind di sicurezza della porta."
-
-msgid ""
-"Port has security group associated. Cannot disable port security or ip "
-"address until security group is removed"
-msgstr ""
-"La porta ha un gruppo sicurezza associato. Impossibile disabilitare la "
-"sicurezza della porta o l'indirizzo ip finché il gruppo sicrezza non viene "
-"rimosso"
-
-msgid ""
-"Port security must be enabled and port must have an IP address in order to "
-"use security groups."
-msgstr ""
-"La sicurezza della porta deve essere abilitata e la porta deve avere un "
-"indirizzo IP per utilizzare i gruppi sicurezza."
-
-msgid "Private key of client certificate."
-msgstr "Chiave privata del certificato client."
-
-#, python-format
-msgid "Probe %s deleted"
-msgstr "Probe %s eliminato"
-
-#, python-format
-msgid "Probe created : %s "
-msgstr "Probe creato : %s "
-
-msgid "Process is already started"
-msgstr "Processo già avviato"
-
-msgid "Process is not running."
-msgstr "Il processo non è in esecuzione."
-
-msgid "Protocol to access nova metadata, http or https"
-msgstr "Protocollo per accedere ai metadati nova, http o https"
-
-msgid ""
-"Range of seconds to randomly delay when starting the periodic task scheduler "
-"to reduce stampeding. (Disable by setting to 0)"
-msgstr ""
-"Intervallo di secondi per ritardare casualmente l'avvio di attività "
-"periodiche programma di pianificazione per ridurre la modifica data/ora. "
-"(Disabilitare impostando questa opzione a 0)"
-
-msgid "Remote metadata server experienced an internal server error."
-msgstr "Il server di metadati remoto ha rilevato un errore di server interno."
-
-msgid ""
-"Representing the resource type whose load is being reported by the agent. "
-"This can be \"networks\", \"subnets\" or \"ports\". When specified (Default "
-"is networks), the server will extract particular load sent as part of its "
-"agent configuration object from the agent report state, which is the number "
-"of resources being consumed, at every report_interval.dhcp_load_type can be "
-"used in combination with network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is "
-"WeightScheduler, dhcp_load_type can be configured to represent the choice "
-"for the resource being balanced. Example: dhcp_load_type=networks"
-msgstr ""
-"Rappresentazione del tipo di risorsa il cui carico è segnalato dall'agent. "
-"Può essere \"networks\", \"subnets\" o \"ports\". Quando specificato "
-"(L'impostazione predefinita è networks), il server estrarrà il carico "
-"particolare inviato come parte del relativo oggetto di configurazione agent "
-"dallo stato del report agent, il quale rappresenta il numero di risorse "
-"utilizzate, ad ogni report_interval. dhcp_load_type può essere utilizzato in "
-"combinazione con network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler Quando network_scheduler_driver è "
-"WeightScheduler, dhcp_load_type può essere configurato per rappresentare la "
-"scelta per la risorsa in fase di bilanciamento. Esempio: "
-"dhcp_load_type=networks"
-
-msgid "Request Failed: internal server error while processing your request."
-msgstr ""
-"Richiesta non riuscita: errore server interno durante l'elaborazione della "
-"richiesta."
-
-#, python-format
-msgid ""
-"Request contains duplicate address pair: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-msgstr ""
-"La richiesta contiene una coppia di indirizzo duplicata: mac_address "
-"%(mac_address)s ip_address %(ip_address)s."
-
-#, python-format
-msgid ""
-"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps "
-"with another subnet"
-msgstr ""
-"Sottorete richiesta con cidr: %(cidr)s per la rete: %(network_id)s si "
-"sovrappone con un'altra sottorete"
-
-#, python-format
-msgid ""
-"Resource '%(resource_id)s' is already associated with provider "
-"'%(provider)s' for service type '%(service_type)s'"
-msgstr ""
-"La risorsa '%(resource_id)s' è già associata al provider '%(provider)s' per "
-"il tipo di servizio '%(service_type)s'"
-
-msgid "Resource body required"
-msgstr "Corpo risorsa richiesto"
-
-msgid "Resource not found."
-msgstr "Risorsa non trovata."
-
-msgid "Resources required"
-msgstr "Risorse richieste"
-
-msgid "Root helper daemon application to use when possible."
-msgstr "Applicazione daemon root helper da utilizzare quando possibile."
-
-msgid "Root permissions are required to drop privileges."
-msgstr "Per rilasciare i privilegi sono necessarie le autorizzazioni root."
-
-#, python-format
-msgid "Router %(router_id)s %(reason)s"
-msgstr "Router %(router_id)s %(reason)s"
-
-#, python-format
-msgid "Router %(router_id)s could not be found"
-msgstr "Impossibile trovare il router %(router_id)s"
-
-#, python-format
-msgid "Router %(router_id)s does not have an interface with id %(port_id)s"
-msgstr ""
-"Il router %(router_id)s non dispone di un interfaccia con id %(port_id)s"
-
-#, python-format
-msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s"
-msgstr ""
-"Il router %(router_id)s non dispone di un'interfaccia sulla sottorete "
-"%(subnet_id)s"
-
-#, python-format
-msgid "Router already has a port on subnet %s"
-msgstr "Il router dispone già di una porta sulla sottorete %s"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more floating IPs."
-msgstr ""
-"L'interfaccia del router per la sottorete %(subnet_id)s nel router "
-"%(router_id)s non può essere eliminata, in quanto è richiesta da uno o più "
-"IP mobili."
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more routes."
-msgstr ""
-"L'interfaccia del router per la sottorete %(subnet_id)s nel router "
-"%(router_id)s non può essere eliminata, in quanto è richiesta da uno o più "
-"instradamenti."
-
-msgid "Router that will have connected instances' metadata proxied."
-msgstr "Router che avrà i metadati dell'istanza connessi con proxy."
-
-msgid "Run as daemon."
-msgstr "Esegui come daemon."
-
-msgid ""
-"Seconds between nodes reporting state to server; should be less than "
-"agent_down_time, best if it is half or less than agent_down_time."
-msgstr ""
-"Secondi tra lo stato riportato dai nodi al server; deve essere inferiore di "
-"agent_down_time, è preferibile che sia la metà o meno di agent_down_time."
-
-msgid "Seconds between running periodic tasks"
-msgstr "Secondi tra l'esecuzione delle attività periodiche"
-
-msgid ""
-"Seconds to regard the agent is down; should be at least twice "
-"report_interval, to be sure the agent is down for good."
-msgstr ""
-"Secondi per considerare che l'agent è inattivo; deve essere almeno il doppio "
-"di report_interval, per essere sicuri che l'agente è definitivamente "
-"inattivo."
-
-#, python-format
-msgid "Security group %(id)s does not exist"
-msgstr "Il gruppo di sicurezza %(id)s non esiste"
-
-#, python-format
-msgid "Security group rule %(id)s does not exist"
-msgstr "La regola del gruppo di sicurezza %(id)s non esiste"
-
-#, python-format
-msgid "Security group rule already exists. Rule id is %(id)s."
-msgstr "La regola del gruppo di sicurezza già esiste. L'ID regola è %(id)s."
-
-msgid "Segments and provider values cannot both be set."
-msgstr "Impossibile impostare i segmenti e i valori del provider."
-
-msgid ""
-"Send notification to nova when port data (fixed_ips/floatingip) changes so "
-"nova can update its cache."
-msgstr ""
-"Invia una notifica a nova quando i dati porta (fixed_ips/floatingip) vengono "
-"modificati e in tal modo nova può aggiornare la propria cache."
-
-msgid "Send notification to nova when port status changes"
-msgstr "Invia una notifica a nova quando lo stato della porta cambia"
-
-msgid ""
-"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the "
-"feature is disabled"
-msgstr ""
-"Inviare questi ARPs gratuiti per la configurazione HA, se inferiore o uguale "
-"a 0, la funzione è disabilitata"
-
-#, python-format
-msgid ""
-"Service provider '%(provider)s' could not be found for service type "
-"%(service_type)s"
-msgstr ""
-"Provider del servizio '%(provider)s' non trovato per il tipo di servizio "
-"%(service_type)s"
-
-#, python-format
-msgid "Service type %(service_type)s does not have a default service provider"
-msgstr ""
-"Il tipo del servizio %(service_type)s non ha un provider del servizio "
-"predefinito"
-
-msgid ""
-"Set new timeout in seconds for new rpc calls after agent receives SIGTERM. "
-"If value is set to 0, rpc timeout won't be changed"
-msgstr ""
-"Impostare il nuovo timeout in secondi per le nuove chiamate rpc dopo che "
-"l'agent riceve SIGTERM. Se il valore è impostato su 0, il timeout rpc non "
-"verrà modificato"
-
-msgid ""
-"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/"
-"VXLAN tunnel."
-msgstr ""
-"Impostare o annullare l'impostazione del bit del frammento non DF sul "
-"pacchetto IP in uscita che trasporta il tunnel GRE/VXLAN."
-
-#, python-format
-msgid ""
-"Some tenants have more than one security group named 'default': "
-"%(duplicates)s. All duplicate 'default' security groups must be resolved "
-"before upgrading the database."
-msgstr ""
-"Alcuni tenant dispongono di più di un gruppo di sicurezza denominato "
-"'default': %(duplicates)s. Tutti i gruppi di sicurezza 'default' duplicati "
-"devono essere risolti prima dell'aggiornamento del database."
-
-msgid ""
-"Specifying 'tenant_id' other than authenticated tenant in request requires "
-"admin privileges"
-msgstr ""
-"La specifica di 'tenant_id' diverso da quello autenticato nella richiesta, "
-"richiede i privilegi admin"
-
-msgid "Subnet for router interface must have a gateway IP"
-msgstr "La sottorete per l'interfaccia del router deve avere un IP gateway"
-
-msgid "Subnet pool has existing allocations"
-msgstr "Il pool di sottoreti ha assegnazioni esistenti"
-
-msgid "Subnet used for the l3 HA admin network."
-msgstr "Sottorete utilizzata per la rete admin HA L3"
-
-msgid ""
-"System-wide flag to determine the type of router that tenants can create. "
-"Only admin can override."
-msgstr ""
-"L'indicatore lato sistema per determinare il tipo di router che i tenant "
-"possono creare. Solo l'Admin può sovrascrivere."
-
-msgid "TCP Port to listen for metadata server requests."
-msgstr "Porta TCP in ascolto per le richieste del server di metadati."
-
-msgid "TCP Port used by Neutron metadata namespace proxy."
-msgstr "Porta TCP utilizzata dal proxy spazio dei nomi dei metadati Neutron."
-
-msgid "TCP Port used by Nova metadata server."
-msgstr "Porta TCP utilizzata dal server di metadati Nova."
-
-#, python-format
-msgid "TLD '%s' must not be all numeric"
-msgstr "TLD '%s' non deve contenere tutti caratteri numerici"
-
-msgid "TOS for vxlan interface protocol packets."
-msgstr "Pacchetti del protocollo dell'interfaccia TOS per vxlan."
-
-msgid "TTL for vxlan interface protocol packets."
-msgstr "Pacchetti del protocollo dell'interfaccia TTL per vxlan."
-
-#, python-format
-msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network"
-msgstr ""
-"Tenant %(tenant_id)s non consentito per creare %(resource)s su questa rete"
-
-msgid "Tenant network creation is not enabled."
-msgstr "La creazione della rete tenant non è consentita."
-
-msgid ""
-"The 'gateway_external_network_id' option must be configured for this agent "
-"as Neutron has more than one external network."
-msgstr ""
-"L'opzione 'gateway_external_network_id' deve essere configurata per questo "
-"agent poiché Neutron ha più di una rete esterna."
-
-#, python-format
-msgid ""
-"The HA Network CIDR specified in the configuration file isn't valid; "
-"%(cidr)s."
-msgstr ""
-"Il CIDR della rete HA specificato nel file di configurazione non è valido; "
-"%(cidr)s."
-
-msgid "The UDP port to use for VXLAN tunnels."
-msgstr "La porta UDP da utilizzare per i tunnel VXLAN."
-
-msgid "The advertisement interval in seconds"
-msgstr "L'intervallo di annuncio in secondi"
-
-#, python-format
-msgid "The allocation pool %(pool)s is not valid."
-msgstr "Il pool di allocazione %(pool)s non è valido."
-
-#, python-format
-msgid ""
-"The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s."
-msgstr ""
-"Il pool di allocazione %(pool)s si estende oltre il cidr della sottorete "
-"%(subnet_cidr)s."
-
-#, python-format
-msgid ""
-"The attribute '%(attr)s' is reference to other resource, can't used by sort "
-"'%(resource)s'"
-msgstr ""
-"L'attributo '%(attr)s' è di riferimento ad altre risorse, non può essere "
-"utilizzato dall'ordinamento '%(resource)s'"
-
-msgid "The core plugin Neutron will use"
-msgstr "Il plugin principale che Neutron utilizzerà"
-
-msgid "The driver used to manage the DHCP server."
-msgstr "Il driver utilizzato per gestire il server DHCP."
-
-msgid "The driver used to manage the virtual interface."
-msgstr "Il driver utilizzato per gestire l'interfaccia virtuale."
-
-#, python-format
-msgid ""
-"The following device_id %(device_id)s is not owned by your tenant or matches "
-"another tenants router."
-msgstr ""
-"Il seguente device_id %(device_id)s non è posseduto dal proprio tenant o "
-"corrisponde ad un altro router tenant."
-
-msgid "The host IP to bind to"
-msgstr "IP host per collegarsi a"
-
-msgid "The interface for interacting with the OVSDB"
-msgstr "L'interfaccia per l'interazione con OVSDB"
-
-msgid ""
-"The maximum number of items returned in a single response, value was "
-"'infinite' or negative integer means no limit"
-msgstr ""
-"Il numero massimo di elementi restituiti in una singola risposta, il valore "
-"era 'infinite' oppure un numero intero negativo che indica nessun limite"
-
-#, python-format
-msgid ""
-"The network %(network_id)s has been already hosted by the DHCP Agent "
-"%(agent_id)s."
-msgstr ""
-"La rete %(network_id)s è stata già ospitata dall'agent DHCP %(agent_id)s."
-
-#, python-format
-msgid ""
-"The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s."
-msgstr ""
-"La rete %(network_id)s non è stata ospitata dall'agent DHCP %(agent_id)s."
-
-#, python-format
-msgid "The number of allowed address pair exceeds the maximum %(quota)s."
-msgstr ""
-"Il numero di coppie di indirizzi consentite supera quello massimo %(quota)s."
-
-msgid ""
-"The number of seconds the agent will wait between polling for local device "
-"changes."
-msgstr ""
-"Il numero di secondi in l'agent attenderà tra i polling per le modifiche "
-"dell'unità locale."
-
-msgid ""
-"The number of seconds to wait before respawning the ovsdb monitor after "
-"losing communication with it."
-msgstr ""
-"Il numero di secondi di attesa prima di generare nuovamente il monitor ovsdb "
-"dopo la perdita di comunicazione."
-
-msgid "The number of sort_keys and sort_dirs must be same"
-msgstr "Il numero di sort_keys e sort_dirs deve essere uguale"
-
-#, python-format
-msgid "The port '%s' was deleted"
-msgstr "La porta '%s' è stata eliminata"
-
-msgid "The port to bind to"
-msgstr "La porta a cui collegarsi"
-
-#, python-format
-msgid "The requested content type %s is invalid."
-msgstr "Il tipo di contenuto richiesto %s non è valido."
-
-msgid "The resource could not be found."
-msgstr "Impossibile trovare la risorsa."
-
-#, python-format
-msgid ""
-"The router %(router_id)s has been already hosted by the L3 Agent "
-"%(agent_id)s."
-msgstr ""
-"Il router %(router_id)s è stato già ospitato dall'agent L3 %(agent_id)s."
-
-msgid ""
-"The server has either erred or is incapable of performing the requested "
-"operation."
-msgstr ""
-"Il server è in errore o non è capace di eseguire l'operazione richiesta."
-
-msgid "The service plugins Neutron will use"
-msgstr "Il plugin del servizio che Neutron utilizzerà"
-
-msgid "The type of authentication to use"
-msgstr "Il tipo di autenticazione da utilizzare"
-
-#, python-format
-msgid "The value '%(value)s' for %(element)s is not valid."
-msgstr "Il valore '%(value)s' per %(element)s non è valido."
-
-msgid ""
-"The working mode for the agent. Allowed modes are: 'legacy' - this preserves "
-"the existing behavior where the L3 agent is deployed on a centralized "
-"networking node to provide L3 services like DNAT, and SNAT. Use this mode if "
-"you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality "
-"and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - "
-"this enables centralized SNAT support in conjunction with DVR.  This mode "
-"must be used for an L3 agent running on a centralized node (or in single-"
-"host deployments, e.g. devstack)"
-msgstr ""
-"Modalità di funzionamento per l'agent. le modalità consentite sono: 'legacy' "
-"- questa conserva il comportamento esistente in cui l'agent L3 viene "
-"distribuito in un nodo di rete centralizzato per fornire i servizi L3 come "
-"DNAT e SNAT. Utilizzare questa modalità se non si desidera adottare DVR. "
-"'dvr' - questa modalità consente la funzionalità DVR e deve essere "
-"utilizzata per un agent L3 che viene eseguito su un host di elaborazione. "
-"'dvr_snat' - questa consente il supporto SNAT centralizzato insieme a DVR.  "
-"Questa modalità deve essere utilizzata per un agent L3 in esecuzione su un "
-"nodo centralizzato (o in distribuzioni a singolo host, ad esempio devstack)"
-
-msgid ""
-"True to delete all ports on all the OpenvSwitch bridges. False to delete "
-"ports created by Neutron on integration and external network bridges."
-msgstr ""
-"True per eliminare tutte le porte su tutti i bridge OpenvSwitch. False per "
-"eliminare le porte create da Neutron nell'integrazione e i bridge di reti "
-"esterne."
-
-msgid "Tunnel IP value needed by the ML2 plugin"
-msgstr "Valore IP tunnel IP richiesto dal plugin ML2"
-
-msgid "Tunnel bridge to use."
-msgstr "Bridge del tunnel da utilizzare."
-
-msgid "URL to database"
-msgstr "URL per il database"
-
-#, python-format
-msgid "Unable to access %s"
-msgstr "Impossibile accedere a %s"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(router_id)s. The number of routes exceeds "
-"the maximum %(quota)s."
-msgstr ""
-"Impossibile completare l'operazione per %(router_id)s. Il numero di "
-"instradamenti supera la quota massima %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of DNS "
-"nameservers exceeds the limit %(quota)s."
-msgstr ""
-"Impossibile completare l'operazione per %(subnet_id)s. Il numero di server "
-"nome DNS supera il limite %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of host routes "
-"exceeds the limit %(quota)s."
-msgstr ""
-"Impossibile completare l'operazione per %(subnet_id)s. Il numero di route "
-"host supera il limite %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The IP address "
-"%(ip_address)s is in use."
-msgstr ""
-"Impossibile completare l'operazione per la rete %(net_id)s. L'indirizzo IP "
-"%(ip_address)s è in uso."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The mac address %(mac)s "
-"is in use."
-msgstr ""
-"Impossibile completare l'operazione per la rete %(net_id)s. L'indirizzo mac "
-"%(mac)s è in uso."
-
-#, python-format
-msgid ""
-"Unable to complete operation on network %(net_id)s. There are one or more "
-"ports still in use on the network."
-msgstr ""
-"Impossibile completare l'operazione nella rete %(net_id)s. Ci sono una o più "
-"porte ancora in uso nella rete."
-
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s for network %(net_id)s. "
-"Port already has an attached device %(device_id)s."
-msgstr ""
-"Impossibile completare l'operazione sulla porta %(port_id)s per la rete "
-"%(net_id)s. La porta ha già un dispositivo collegato %(device_id)s."
-
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr "Impossibile convertire il valore in %s"
-
-msgid "Unable to create the Agent Gateway Port"
-msgstr "Impossibile creare la porta gateway agent"
-
-msgid "Unable to create the SNAT Interface Port"
-msgstr "Impossibile creare la porta dell'interfaccia SNAT"
-
-#, python-format
-msgid ""
-"Unable to create the flat network. Physical network %(physical_network)s is "
-"in use."
-msgstr ""
-"Impossibile creare la rete flat. La rete fisica %(physical_network)s è in "
-"uso."
-
-msgid ""
-"Unable to create the network. No available network found in maximum allowed "
-"attempts."
-msgstr ""
-"Impossibile creare la rete. Non è stata trovata alcuna rete nel numero "
-"massimo di tentativi consentiti."
-
-msgid ""
-"Unable to create the network. No tenant network is available for allocation."
-msgstr ""
-"Impossibile creare la rete. Nessuna rete tenant è disponibile per "
-"l'allocazione."
-
-#, python-format
-msgid ""
-"Unable to create the network. The VLAN %(vlan_id)s on physical network "
-"%(physical_network)s is in use."
-msgstr ""
-"Impossibile creare la rete. La VLAN %(vlan_id)s nella rete fisica "
-"%(physical_network)s è in uso."
-
-#, python-format
-msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use."
-msgstr "Impossibile creare la rete. l'ID tunnel %(tunnel_id)s è in uso."
-
-#, python-format
-msgid "Unable to determine mac address for %s"
-msgstr "Impossibile determinare l'indirizzo mac per %s"
-
-#, python-format
-msgid "Unable to find '%s' in request body"
-msgstr "Impossibile trovare '%s' nel corpo della richiesta"
-
-#, python-format
-msgid "Unable to find any IP address on external network %(net_id)s."
-msgstr "Impossibile trovare alcun indirizzo IP sulla rete esterna %(net_id)s."
-
-#, python-format
-msgid "Unable to find resource name in %s"
-msgstr "Impossibile trovare il nome risorsa in %s"
-
-msgid "Unable to generate IP address by EUI64 for IPv4 prefix"
-msgstr "Impossibile generare l'indirizzo IP da EUI64 per il prefisso IPv4"
-
-#, python-format
-msgid "Unable to generate unique DVR mac for host %(host)s."
-msgstr "Impossibile generare mac DVR univoco per l'host %(host)s."
-
-#, python-format
-msgid "Unable to generate unique mac on network %(net_id)s."
-msgstr "Impossibile generare mac univoco sulla rete %(net_id)s."
-
-#, python-format
-msgid ""
-"Unable to identify a target field from:%s. Match should be in the form "
-"%%(<field_name>)s"
-msgstr ""
-"Impossibile identificare un campo di destinazione da:%s. La corrispondenza "
-"deve essere presente nel modulo %%(<field_name>)s"
-
-#, python-format
-msgid ""
-"Unable to verify match:%(match)s as the parent resource: %(res)s was not "
-"found"
-msgstr ""
-"Impossibile verificare la corrispondenza:%(match)s come risorsa parent: "
-"%(res)s non è stata trovata"
-
-#, python-format
-msgid "Unexpected response code: %s"
-msgstr "Imprevisto codice di risposta: %s"
-
-#, python-format
-msgid "Unexpected response: %s"
-msgstr "Risposta imprevista: %s"
-
-msgid "Unimplemented commands"
-msgstr "Comandi non implementati"
-
-msgid "Unknown API version specified"
-msgstr "Specificata versione API sconosciuta"
-
-#, python-format
-msgid "Unknown attribute '%s'."
-msgstr "Attributo sconosciuto '%s'."
-
-#, python-format
-msgid "Unknown chain: %r"
-msgstr "Catena sconosciuta: %r"
-
-#, python-format
-msgid "Unknown quota resources %(unknown)s."
-msgstr "Risorse quota sconosciute %(unknown)s."
-
-msgid "Unmapped error"
-msgstr "Errore non associato"
-
-msgid "Unrecognized action"
-msgstr "Azione non riconosciuta"
-
-#, python-format
-msgid "Unrecognized attribute(s) '%s'"
-msgstr "Attributi non riconosciuti '%s'"
-
-msgid "Unsupported Content-Type"
-msgstr "Tipo-contenuto non supportato"
-
-#, python-format
-msgid "Unsupported network type %(net_type)s."
-msgstr "Tipo di rete non supportato %(net_type)s."
-
-msgid "Unsupported request type"
-msgstr "Tipo di richiesta non supportato"
-
-msgid "Updating default security group not allowed."
-msgstr "L'aggiornamento del gruppo di sicurezza predefinito non è consentito."
-
-msgid ""
-"Use ML2 l2population mechanism driver to learn remote MAC and IPs and "
-"improve tunnel scalability."
-msgstr ""
-"utilizzare il driver del meccanismo ML2 l2population per conoscere MAC e IP "
-"remoti e migliorare la scalabilità del tunnel."
-
-msgid "Use broadcast in DHCP replies"
-msgstr "Utilizzare broadcast nelle risposte DHCP"
-
-msgid "Use either --delta or relative revision, not both"
-msgstr "Utilizzare  --revisione delta o relativa, non entrambe"
-
-msgid "User (uid or name) running metadata proxy after its initialization"
-msgstr ""
-"Utente (uid o nome) che esegue il proxy di metadati dopo la relativa "
-"inizializzazione"
-
-msgid ""
-"User (uid or name) running metadata proxy after its initialization (if "
-"empty: agent effective user)."
-msgstr ""
-"Utente (uid o nome) che esegue il proxy di metadati dopo la relativa "
-"inizializzazione (se vuoto: utente operativo dell'agent)."
-
-msgid "User (uid or name) running this process after its initialization"
-msgstr ""
-"Utente (uid o name) che esegue questo processo dopo la relativa "
-"inizializzazione"
-
-msgid "VRRP authentication password"
-msgstr "Password di autenticazione VRRP"
-
-msgid "VRRP authentication type"
-msgstr "Tipo di autenticazione VRRP"
-
-#, python-format
-msgid ""
-"Validation of dictionary's keys failed. Expected keys: %(expected_keys)s "
-"Provided keys: %(provided_keys)s"
-msgstr ""
-"La convalida delle chiavi del dizionario non è riuscita. Chiavi previste: "
-"%(expected_keys)s Chiavi fornite: %(provided_keys)s"
-
-#, python-format
-msgid "Validator '%s' does not exist."
-msgstr "Il programma di convalida '%s' non eiste."
-
-#, python-format
-msgid "Value %(value)s in mapping: '%(mapping)s' not unique"
-msgstr "Valore %(value)s nell'associazione: '%(mapping)s' non univoco"
-
-msgid ""
-"Watch file log. Log watch should be disabled when metadata_proxy_user/group "
-"has no read/write permissions on metadata proxy log file."
-msgstr ""
-"Osserva log file. Logwatch deve essere disabilitato quando "
-"metadata_proxy_user/group non dispone delle autorizzazioni di lettura/"
-"scrittura sul file di logdel proxy di metadati."
-
-msgid ""
-"Where to store Neutron state files. This directory must be writable by the "
-"agent."
-msgstr ""
-"Dove memorizzare i file di stato Neutron. Questa directory deve essere "
-"scrivibile dall'agent."
-
-msgid ""
-"With IPv6, the network used for the external gateway does not need to have "
-"an associated subnet, since the automatically assigned link-local address "
-"(LLA) can be used. However, an IPv6 gateway address is needed for use as the "
-"next-hop for the default route. If no IPv6 gateway address is configured "
-"here, (and only then) the neutron router will be configured to get its "
-"default route from router advertisements (RAs) from the upstream router; in "
-"which case the upstream router must also be configured to send these RAs. "
-"The ipv6_gateway, when configured, should be the LLA of the interface on the "
-"upstream router. If a next-hop using a global unique address (GUA) is "
-"desired, it needs to be done via a subnet allocated to the network and not "
-"through this parameter. "
-msgstr ""
-"Con IPv6, non è necessario che la rete utilizzata per il gateway esterno "
-"disponga di una sottorete associata, poiché verrà utilizzato il LLA (link-"
-"local address) assegnato automaticamente. Tuttavia, è necessario un "
-"indirizzo gateway IPv6  per l'utilizzo come successivo hop per "
-"l'instradamento predefinito. Se qui non è configuratonessun indirizzo "
-"gateway Ipv6 (e solo poi) verrà configurato il router Neutron per ottenere "
-"il relativo instradamento predefinito da RA (Router Advertisement) dal "
-"router upstream; in tal caso il router upstream deve essere anche "
-"configuratoper inviare questi RA. Ipv6_gateway, quando configurato, "
-"deveessere il LLA dell'interfaccia sul router upstream. Se si desidera un "
-"hop successivo che utilizzi un GUA (Global Uunique Address) è necessario "
-"ottenerlo mediante una sottorete assegnata alla rete e non attraverso questo "
-"parametro."
-
-msgid "You must implement __call__"
-msgstr "È necessario implementare __call__"
-
-msgid ""
-"You must provide a config file for bridge - either --config-file or "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-msgstr ""
-"È necessario fornire un file di configurazione per il bridge - --config-file "
-"o env[NEUTRON_TEST_CONFIG_FILE]"
-
-msgid "You must provide a revision or relative delta"
-msgstr "È necessario fornire una revisione o delta relativo"
-
-msgid "allocation_pools allowed only for specific subnet requests."
-msgstr ""
-"allocation_pools consentita solo per specifiche richieste della sottorete."
-
-msgid "binding:profile value too large"
-msgstr "valore binding:profile troppo esteso"
-
-msgid "cidr and prefixlen must not be supplied together"
-msgstr "non devono essere forniti insieme cidr e prefixlen"
-
-#, python-format
-msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid."
-msgstr "dhcp_agents_per_network deve essere >= 1. '%s' non è valido."
-
-msgid "fixed_ip_address cannot be specified without a port_id"
-msgstr "Impossibile specificare un fixed_ip_address senza un porta_id"
-
-#, python-format
-msgid "has device owner %s"
-msgstr "ha il proprietario del dispositivo %s"
-
-#, python-format
-msgid "ip command failed on device %(dev_name)s: %(reason)s"
-msgstr "comando ip non riuscito sul dispositivo %(dev_name)s: %(reason)s"
-
-#, python-format
-msgid "ip link capability %(capability)s is not supported"
-msgstr "La funzione ip link %(capability)s non è supportata"
-
-#, python-format
-msgid "ip link command is not supported: %(reason)s"
-msgstr "Il comando ip link non è supportato: %(reason)s"
-
-msgid "ip_version must be specified in the absence of cidr and subnetpool_id"
-msgstr "è necessario specificare ip_version in assenza di cidr e subnetpool_id"
-
-msgid "ipv6_address_mode is not valid when ip_version is 4"
-msgstr "ipv6_address_mode non è valida quando ip_version è 4"
-
-msgid "ipv6_ra_mode is not valid when ip_version is 4"
-msgstr "ipv6_ra_mode non è valida quando ip_version è 4"
-
-msgid ""
-"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to "
-"False."
-msgstr ""
-"ipv6_ra_mode o ipv6_address_mode non possono essere impostati quando "
-"enable_dhcp è impostato su False."
-
-#, python-format
-msgid ""
-"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to "
-"'%(addr_mode)s' is not valid. If both attributes are set, they must be the "
-"same value"
-msgstr ""
-"ipv6_ra_mode impostato su '%(ra_mode)s' con ipv6_address_mode impostato su "
-"'%(addr_mode)s' non è valido. Se sono impostati entrambi gli attributi, essi "
-"devono avere lo stesso valore"
-
-msgid "mac address update"
-msgstr "aggiornamento indirizzo mac"
-
-#, python-format
-msgid ""
-"max_l3_agents_per_router %(max_agents)s config parameter is not valid. It "
-"has to be greater than or equal to min_l3_agents_per_router %(min_agents)s."
-msgstr ""
-"Il parametro di configurazione max_l3_agents_per_router %(max_agents)s non è "
-"valido. Deve essere maggiore o uguale a min_l3_agents_per_router "
-"%(min_agents)s."
-
-#, python-format
-msgid ""
-"min_l3_agents_per_router config parameter is not valid. It has to be equal "
-"to or more than %s for HA."
-msgstr ""
-"il parametro di configurazione min_l3_agents_per_router non è valido. Deve "
-"essere uguale o maggiore di %s per HA."
-
-msgid "network_type required"
-msgstr "network_type obbligatorio"
-
-#, python-format
-msgid "network_type value '%s' not supported"
-msgstr "Valore network_type '%s' non supportato"
-
-msgid "new subnet"
-msgstr "nuova sottorete"
-
-#, python-format
-msgid "physical_network '%s' unknown  for VLAN provider network"
-msgstr "physical_network '%s' sconosciuta per la rete del provider VLAN"
-
-#, python-format
-msgid "physical_network '%s' unknown for flat provider network"
-msgstr "physical_network '%s' sconosciuta per rete flat del provider"
-
-msgid "physical_network required for flat provider network"
-msgstr "physical_network richiesta per rete flat del provider"
-
-#, python-format
-msgid "provider:physical_network specified for %s network"
-msgstr "provider:physical_network specificata per la rete %s"
-
-msgid "respawn_interval must be >= 0 if provided."
-msgstr "respawn_interval deve essere >= 0 se fornito."
-
-#, python-format
-msgid "segmentation_id out of range (%(min)s through %(max)s)"
-msgstr "segmentation_id fuori dall'intervallo (da %(min)s a %(max)s)"
-
-msgid "segmentation_id requires physical_network for VLAN provider network"
-msgstr ""
-"segmentation_id richiede physical_network per la rete del provider VLAN"
-
-msgid "the nexthop is not connected with router"
-msgstr "l'hop successivo non è connesso al router"
-
-msgid "the nexthop is used by router"
-msgstr "l'hop successivo è utilizzato dal router"
-
-msgid ""
-"uuid provided from the command line so external_process can track us via /"
-"proc/cmdline interface."
-msgstr ""
-"uuid fornito da riga comandi pertanto external_process può tenere traccia "
-"dell'utente mediante l'interfaccia /proc/cmdline."
diff --git a/neutron/locale/ja/LC_MESSAGES/neutron.po b/neutron/locale/ja/LC_MESSAGES/neutron.po
deleted file mode 100644 (file)
index ca9f6fd..0000000
+++ /dev/null
@@ -1,2368 +0,0 @@
-# Japanese translations for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-09-28 04:52+0000\n"
-"Last-Translator: Akihiro Motoki <amotoki@gmail.com>\n"
-"Language: ja\n"
-"Language-Team: Japanese\n"
-"Plural-Forms: nplurals=1; plural=0\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#, python-format
-msgid ""
-"\n"
-"Command: %(cmd)s\n"
-"Exit code: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-msgstr ""
-"\n"
-"コマンド: %(cmd)s\n"
-"終了コード: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-
-#, python-format
-msgid "%(driver)s: Internal driver error."
-msgstr "%(driver)s: 内部ドライバーエラー。"
-
-#, python-format
-msgid "%(id)s is not a valid %(type)s identifier"
-msgstr "%(id)s は、有効な %(type)s ID ではありません"
-
-#, python-format
-msgid ""
-"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' "
-"and '%(desc)s'"
-msgstr ""
-"%(invalid_dirs)s は sort_dirs には無効な値です。有効な値は '%(asc)s' および "
-"'%(desc)s' です"
-
-#, python-format
-msgid "%(key)s prohibited for %(tunnel)s provider network"
-msgstr "%(key)s は %(tunnel)s プロバイダーネットワークで禁止されています"
-
-#, python-format
-msgid ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-msgstr ""
-"%(method)s が、ネットワーク設定 %(current)s (元の設定 %(original)s) および"
-"ネットワークセグメント %(segments)s を使用して呼び出されました"
-
-#, python-format
-msgid ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-msgstr ""
-"サブネット設定 %(current)s を使用して %(method)s が呼び出されました (元の設"
-"定: %(original)s)"
-
-#, python-format
-msgid "%(method)s failed."
-msgstr "%(method)s が失敗しました。"
-
-#, python-format
-msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'"
-msgstr "%(name)s '%(addr)s' が ip_version '%(ip_version)s' と一致しません"
-
-#, python-format
-msgid "%s cannot be called while in offline mode"
-msgstr "オフラインモードでは、%s を呼び出せません"
-
-#, python-format
-msgid "%s is invalid attribute for sort_key"
-msgstr "%s は、sort_key には無効な属性です"
-
-#, python-format
-msgid "%s is invalid attribute for sort_keys"
-msgstr "%sは、sort_keys には無効な属性です"
-
-#, python-format
-msgid "%s is not a valid VLAN tag"
-msgstr "%s は有効な VLAN タグではありません"
-
-#, python-format
-msgid "%s must implement get_port_from_device or get_ports_from_devices."
-msgstr ""
-"%s は get_port_from_device または get_ports_from_devices を実装していなければ"
-"なりません。"
-
-#, python-format
-msgid "%s prohibited for VLAN provider network"
-msgstr "%s は VLAN プロバイダーネットワークで禁止されています"
-
-#, python-format
-msgid "%s prohibited for flat provider network"
-msgstr "%s は flat プロバイダーネットワークで禁止されています"
-
-#, python-format
-msgid "%s prohibited for local provider network"
-msgstr "%s は local プロバイダーネットワークで禁止されています"
-
-#, python-format
-msgid "'%(data)s' exceeds maximum length of %(max_len)s"
-msgstr "'%(data)s が最大長 %(max_len)s を超えています"
-
-#, python-format
-msgid "'%(data)s' is not in %(valid_values)s"
-msgstr "'%(data)s が %(valid_values)s の中にありません"
-
-#, python-format
-msgid "'%(data)s' is too large - must be no larger than '%(limit)d'"
-msgstr "'%(data)s' は大きすぎます - '%(limit)d' を超えてはなりません"
-
-#, python-format
-msgid "'%(data)s' is too small - must be at least '%(limit)d'"
-msgstr ""
-"'%(data)s' は小さすぎます - 少なくとも '%(limit)d' でなければなりません"
-
-#, python-format
-msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended"
-msgstr ""
-"'%(data)s' は認識される IP サブネット cidr ではありません。'%(cidr)s' が推奨"
-"されます"
-
-#, python-format
-msgid "'%(host)s' is not a valid nameserver. %(msg)s"
-msgstr "'%(host)s' は有効なネームサーバーではありません。%(msg)s"
-
-#, python-format
-msgid "'%s' Blank strings are not permitted"
-msgstr "'%s' ブランクストリングは許可されていません"
-
-#, python-format
-msgid "'%s' cannot be converted to boolean"
-msgstr "'%s' はブール値に変換できません"
-
-#, python-format
-msgid "'%s' contains whitespace"
-msgstr "'%s' に空白が含まれています"
-
-#, python-format
-msgid "'%s' is not a dictionary"
-msgstr "'%s' はディクショナリーではありません"
-
-#, python-format
-msgid "'%s' is not a list"
-msgstr "'%s' はリストではありません"
-
-#, python-format
-msgid "'%s' is not a valid IP address"
-msgstr "'%s' が有効な IP アドレスではありません"
-
-#, python-format
-msgid "'%s' is not a valid IP subnet"
-msgstr "'%s' は有効な IP サブネットではありません"
-
-#, python-format
-msgid "'%s' is not a valid MAC address"
-msgstr "'%s' が有効な MAC アドレスではありません"
-
-#, python-format
-msgid "'%s' is not a valid UUID"
-msgstr "'%s' は有効な UUID ではありません"
-
-#, python-format
-msgid "'%s' is not a valid boolean value"
-msgstr "'%s' は有効なブール値ではありません"
-
-#, python-format
-msgid "'%s' is not a valid input"
-msgstr "'%s' は有効な入力ではありません"
-
-#, python-format
-msgid "'%s' is not a valid string"
-msgstr "'%s' が有効な文字列ではありません"
-
-#, python-format
-msgid "'%s' is not an integer"
-msgstr "'%s' は整数ではありません"
-
-#, python-format
-msgid "'%s' is not an integer or uuid"
-msgstr "'%s' は整数または UUID ではありません"
-
-#, python-format
-msgid "'%s' is not of the form <key>=[value]"
-msgstr "'%s' は <key>=[value] 形式ではありません"
-
-#, python-format
-msgid "'%s' should be non-negative"
-msgstr "'%s' は負以外でなければなりません"
-
-msgid "0 is not allowed as CIDR prefix length"
-msgstr "0 は、CIDR プレフィックスの長さとして許可されていません"
-
-msgid "A cidr must be specified in the absence of a subnet pool"
-msgstr "サブネットプールがない場合、cidr の指定は必須です"
-
-msgid ""
-"A list of mappings of physical networks to MTU values. The format of the "
-"mapping is <physnet>:<mtu val>. This mapping allows specifying a physical "
-"network MTU value that differs from the default segment_mtu value."
-msgstr ""
-"物理ネットワークから MTU 値へのマッピングのリスト。このマッピングの形式は"
-"<physnet>:<mtu val> です。このマッピングを使用して、デフォルトのsegment_mtu "
-"値とは異なる物理ネットワーク MTU 値を指定できます。"
-
-msgid "A metering driver must be specified"
-msgstr "計測ドライバーを指定する必要があります"
-
-msgid "API for retrieving service providers for Neutron advanced services"
-msgstr "Neutron 拡張サービス用のサービスプロバイダーを取得するための API"
-
-msgid "Access to this resource was denied."
-msgstr "このリソースへのアクセスは拒否されました"
-
-msgid "Action to be executed when a child process dies"
-msgstr "子プロセスが異常終了したときに実行されるアクション"
-
-msgid "Adds external network attribute to network resource."
-msgstr "外部ネットワーク属性がネットワークリソースに追加されます。"
-
-msgid "Adds test attributes to core resources."
-msgstr "テスト属性をコアリソースに追加します。"
-
-#, python-format
-msgid "Agent %(id)s could not be found"
-msgstr "エージェント %(id)s が見つかりませんでした"
-
-#, python-format
-msgid "Agent %(id)s is not a L3 Agent or has been disabled"
-msgstr ""
-"エージェント %(id)s は、L3 エージェントでないか、使用不可になっています"
-
-#, python-format
-msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled"
-msgstr ""
-"エージェント %(id)s は、有効な DHCP エージェントでないか、使用不可になってい"
-"ます"
-
-#, fuzzy, python-format
-msgid "Agent updated: %(payload)s"
-msgstr "エージェントがアップデートされました: %(payload)s"
-
-#, python-format
-msgid ""
-"Agent with agent_type=%(agent_type)s and host=%(host)s could not be found"
-msgstr ""
-"agent_type=%(agent_type)s および host=%(host)s のエージェントが見つかりません"
-"でした"
-
-msgid "Allow auto scheduling networks to DHCP agent."
-msgstr "DHCP エージェントに対するネットワークの自動スケジューリングを許可"
-
-msgid "Allow auto scheduling of routers to L3 agent."
-msgstr ""
-"L3 エージェントに対するルーターの自動スケジューリングを許可してください。"
-
-msgid "Allow running metadata proxy."
-msgstr "メタデータプロキシーの動作許可します"
-
-msgid "Allow sending resource operation notification to DHCP agent"
-msgstr "DHCP エージェントへのリソース操作通知の送信を許可"
-
-msgid "Allow the usage of the bulk API"
-msgstr "Bulk API の使用を許可"
-
-msgid "Allow the usage of the pagination"
-msgstr "ページ編集の使用を許可"
-
-msgid "Allow the usage of the sorting"
-msgstr "ソートの使用を許可"
-
-msgid "Allow to perform insecure SSL (https) requests to nova metadata"
-msgstr ""
-"Nova メタデータに対する非セキュアな SSL (https) 要求を実行することを許可しま"
-"す"
-
-msgid "AllowedAddressPair must contain ip_address"
-msgstr "AllowedAddressPair には ip_address が含まれていなければなりません"
-
-msgid "An interface driver must be specified"
-msgstr "インターフェースドライバーを指定してください"
-
-msgid ""
-"An ordered list of networking mechanism driver entrypoints to be loaded from "
-"the neutron.ml2.mechanism_drivers namespace."
-msgstr ""
-"neutron.ml2.mechanism_drivers 名前空間からロードするネットワーキングメカニズ"
-"ムドライバーのエンドポイントの順序付きリスト。"
-
-msgid "An unknown error has occurred. Please try your request again."
-msgstr "不明なエラーが発生しました。要求を再試行してください。"
-
-msgid "An unknown exception occurred."
-msgstr "不明な例外が発生しました。"
-
-#, python-format
-msgid "Attribute '%s' not allowed in POST"
-msgstr "属性 '%s' は POST では許可されません"
-
-msgid "Automatically remove networks from offline DHCP agents."
-msgstr "ネットワークをオフライン DHCP エージェントから自動的に削除します。"
-
-msgid ""
-"Automatically reschedule routers from offline L3 agents to online L3 agents."
-msgstr ""
-"ルーターのスケジュールをオフライン L3 エージェントからオンライン L3 エージェ"
-"ントに自動的に変更します。"
-
-msgid "Available commands"
-msgstr "使用可能なコマンド"
-
-msgid "Backend does not support VLAN Transparency."
-msgstr "バックエンドでは VLAN Transparency はサポートされていません。"
-
-#, python-format
-msgid ""
-"Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, "
-"%(mac)s:"
-msgstr ""
-"EUI-64 による IPv6 アドレス生成用のプレフィックスまたは mac の形式が正しくあ"
-"りません: %(prefix)s、%(mac)s:"
-
-#, python-format
-msgid "Bad prefix type for generate IPv6 address by EUI-64: %s"
-msgstr ""
-"EUI-64 による IPv6 アドレス生成用のプレフィックスタイプが正しくありません: %s"
-
-#, python-format
-msgid "Base MAC: %s"
-msgstr "ベース MAC: %s"
-
-#, python-format
-msgid "Bridge %(bridge)s does not exist."
-msgstr "ブリッジ %(bridge)s は存在しません。"
-
-msgid "Bulk operation not supported"
-msgstr "バルク操作はサポートされていません"
-
-msgid "CIDR to monitor"
-msgstr "モニター対象の CIDR"
-
-#, python-format
-msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip"
-msgstr ""
-"gateway_ip のないサブネット %s 上のポートには Floating IP を追加できません"
-
-msgid "Cannot allocate requested subnet from the available set of prefixes"
-msgstr ""
-"要求されたサブネットを使用可能なプレフィックスのセットから割り振ることができ"
-"ません"
-
-#, python-format
-msgid ""
-"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port "
-"%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a "
-"floating IP on external network %(net_id)s."
-msgstr ""
-"Fixed IP %(fixed_ip)s には、既に外部ネットワーク %(net_id)s 上の Floating IP "
-"があるため、その Fixed IP を使用して Floating IP %(floating_ip_address)s "
-"(%(fip_id)s) をポート %(port_id)s と関連付けることはできません。"
-
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to Port %s, since that port is owned "
-"by a different tenant."
-msgstr ""
-"ポート %s は別のテナントによって所有されているため、 Floating IP を作成して、"
-"そのポートにバインドすることはできません。"
-
-msgid "Cannot create resource for another tenant"
-msgstr "別のテナントのリソースを作成できません"
-
-msgid "Cannot disable enable_dhcp with ipv6 attributes set"
-msgstr "ipv6 属性が設定された状態で enable_dhcp を無効にすることはできません"
-
-#, python-format
-msgid ""
-"Cannot have multiple router ports with the same network id if both contain "
-"IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s"
-msgstr ""
-"同じネットワーク ID を持つ複数のルーターポートのいずれにも IPv6 サブネットが"
-"含まれる場合、これらのポートは使用できません。既存のポート %(p)s には IPv6 サ"
-"ブネットがあり、ネットワーク ID は %(nid)s です"
-
-#, python-format
-msgid ""
-"Cannot host %(router_type)s router %(router_id)s on %(agent_mode)s L3 agent "
-"%(agent_id)s."
-msgstr ""
-"%(router_type)s ルーター %(router_id)s を %(agent_mode)s L3 エージェント "
-"%(agent_id)s でホストできません。"
-
-#, fuzzy
-msgid "Cannot match priority on flow deletion or modification"
-msgstr "flowの削除か設定の優先度が一致できません"
-
-msgid "Cannot specify both subnet-id and port-id"
-msgstr "subnet-id と port-id の両方を指定することはできません"
-
-msgid "Cannot understand JSON"
-msgstr "JSON を解釈できません"
-
-#, python-format
-msgid "Cannot update read-only attribute %s"
-msgstr "読み取り専用属性 %s を更新できません"
-
-msgid "Certificate Authority public key (CA cert) file for ssl"
-msgstr "ssl の認証局公開鍵 (CA cert) ファイル"
-
-msgid "Check for ARP responder support"
-msgstr "ARP 応答側サポートを検査します"
-
-msgid "Check for OVS vxlan support"
-msgstr "OVS vxlan サポートを検査します"
-
-msgid "Check for VF management support"
-msgstr "VF 管理サポートを検査します"
-
-msgid "Check for iproute2 vxlan support"
-msgstr "iproute2 vxlan サポートを検査します"
-
-msgid "Check for nova notification support"
-msgstr "Nova 通知サポートを検査します"
-
-msgid "Check for patch port support"
-msgstr "パッチポートのサポートを検査します"
-
-msgid "Check minimal dnsmasq version"
-msgstr "最小 dnsmasq バージョンを検査します"
-
-msgid "Check netns permission settings"
-msgstr "netns 許可設定を検査します"
-
-msgid "Check ovsdb native interface support"
-msgstr "ovsdb ネイティブインターフェースのサポートの検査"
-
-#, python-format
-msgid ""
-"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of "
-"subnet %(sub_id)s"
-msgstr ""
-"サブネット %(subnet_id)s の CIDR %(subnet_cidr)s がサブネット %(sub_id)s の "
-"CIDR %(cidr)s とオーバーラップしています"
-
-msgid "Client certificate for nova metadata api server."
-msgstr "Nova メタデータ API サーバー用のクライアント証明書。"
-
-msgid ""
-"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE "
-"tunnel IDs that are available for tenant network allocation"
-msgstr ""
-"テナントネットワークの割り振りに使用可能な GRE トンネル ID の範囲を列挙する "
-"<tun_min>:<tun_max> タプルのコンマ区切りリスト"
-
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"VXLAN VNI IDs that are available for tenant network allocation"
-msgstr ""
-"テナントネットワークの割り振りに使用可能な VXLAN VNI ID の範囲を列挙する "
-"<vni_min>:<vni_max> タプルのコンマ区切りリスト"
-
-msgid ""
-"Comma-separated list of the DNS servers which will be used as forwarders."
-msgstr "フォワーダーとして使用される DNS サーバーのカンマ区切りのリスト。"
-
-msgid "Command to execute"
-msgstr "実行するコマンド"
-
-msgid "Config file for interface driver (You may also use l3_agent.ini)"
-msgstr ""
-"インターフェースドライバーの構成ファイル (l3_agent.ini を使用することもできま"
-"す)"
-
-#, python-format
-msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s"
-msgstr "CIDR %(cidr)s のイーサネットタイプ値 %(ethertype)s が競合しています"
-
-#, fuzzy
-msgid ""
-"Controls whether the neutron security group API is enabled in the server. It "
-"should be false when using no security groups or using the nova security "
-"group API."
-msgstr ""
-"Neutron セキュリティーグループ API をサーバーで有効にするかどうかを制御しま"
-"す。セキュリティーグループを使用しない場合、または Nova セキュリティーグルー"
-"プ API を使用する場合には、False にする必要があります。"
-
-#, fuzzy, python-format
-msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds"
-msgstr "%(time)d 秒間の試行後に %(host)s:%(port)s にバインドできませんでした"
-
-#, fuzzy
-msgid "Could not deserialize data"
-msgstr "データを非直列化することができませんでした"
-
-#, python-format
-msgid "Creation failed. %(dev_name)s already exists."
-msgstr "作成に失敗しました。%(dev_name)s は既に存在します。"
-
-#, python-format
-msgid ""
-"Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable "
-"to update."
-msgstr ""
-"現在のゲートウェイ IP %(ip_address)s はポート %(port_id)s によって既に使用さ"
-"れています。更新できません。"
-
-msgid "Currently distributed HA routers are not supported."
-msgstr "現在、分散 HA ルーターはサポートされていません。"
-
-msgid ""
-"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite "
-"lease times."
-msgstr ""
-"DHCP リース期間 (秒)。dnsmasq で無制限のリース時間を使用するよう指示するに"
-"は、-1 を使用します。"
-
-msgid "Default driver to use for quota checks"
-msgstr "割り当て量の検査に使用するデフォルトのドライバー"
-
-msgid ""
-"Default number of resource allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"テナント当たりに許可されるリソースのデフォルト数。負の値は無制限を意味しま"
-"す。"
-
-msgid "Default security group"
-msgstr "デフォルトセキュリティグループ"
-
-msgid "Default security group already exists."
-msgstr "デフォルトのセキュリティーグループが既に存在します。"
-
-msgid ""
-"Defines providers for advanced services using the format: <service_type>:"
-"<name>:<driver>[:default]"
-msgstr ""
-"次のフォーマットを使用して拡張サービスのプロバイダーが定義されます: "
-"<service_type>:<name>:<driver>[:default]"
-
-msgid ""
-"Delay within which agent is expected to update existing ports whent it "
-"restarts"
-msgstr ""
-"エージェントが再始動時に既存のポートを更新することが期待される遅延の期間"
-
-msgid "Delete the namespace by removing all devices."
-msgstr "すべてのデバイスを削除して、名前空間を削除します。"
-
-#, python-format
-msgid "Deleting port %s"
-msgstr "ポート %s を削除しています"
-
-#, python-format
-msgid "Device %(dev_name)s in mapping: %(mapping)s not unique"
-msgstr "マッピング: %(mapping)s 内のデバイス %(dev_name)s が固有ではありません"
-
-msgid "Device has no virtual functions"
-msgstr "デバイスに仮想関数が含まれていません"
-
-#, python-format
-msgid "Device name %(dev_name)s is missing from physical_device_mappings"
-msgstr "デバイス名 %(dev_name)s が physical_device_mappings にありません"
-
-msgid "Device not found"
-msgstr "デバイスが見つかりません"
-
-#, python-format
-msgid ""
-"Distributed Virtual Router Mac Address for host %(host)s does not exist."
-msgstr "ホスト %(host)s の分散仮想ルーター MAC アドレスが存在しません。"
-
-msgid "Domain to use for building the hostnames"
-msgstr "ホスト名の作成に使用するドメイン"
-
-msgid "Downgrade no longer supported"
-msgstr "ダウングレードは現在ではサポートされていません"
-
-#, python-format
-msgid "Driver %s is not unique across providers"
-msgstr "ドライバー %s はプロバイダー全体で固有ではありません"
-
-#, fuzzy
-msgid "Driver for security groups firewall in the L2 agent"
-msgstr "L2エージェントのセキュリティグループファイアウォールのドライバ"
-
-msgid "Driver to use for scheduling network to DHCP agent"
-msgstr ""
-"DHCP エージェントに対するネットワークのスケジューリングに使用するドライバー"
-
-msgid "Driver to use for scheduling router to a default L3 agent"
-msgstr ""
-"デフォルトの L3 エージェントに対するルーターのスケジューリングに使用するドラ"
-"イバー"
-
-#, python-format
-msgid "Duplicate IP address '%s'"
-msgstr "重複 IP アドレス '%s'"
-
-msgid "Duplicate Metering Rule in POST."
-msgstr "POST で計測規則が重複しています。"
-
-msgid "Duplicate Security Group Rule in POST."
-msgstr "POST に重複するセキュリティーグループルールがあります。"
-
-#, python-format
-msgid "Duplicate hostroute '%s'"
-msgstr "重複するホスト経路 '%s'"
-
-#, python-format
-msgid "Duplicate items in the list: '%s'"
-msgstr "リスト内で重複する項目: '%s'"
-
-#, python-format
-msgid "Duplicate nameserver '%s'"
-msgstr "重複するネームサーバー '%s'"
-
-msgid "Duplicate segment entry in request."
-msgstr "重複するセグメントエントリーが要求に含まれています。"
-
-#, python-format
-msgid "ERROR: %s"
-msgstr "エラー: %s"
-
-msgid ""
-"ERROR: Unable to find configuration file via the default search paths (~/."
-"neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!"
-msgstr ""
-"エラー: デフォルトの検索パス (~/.neutron/, ~/, /etc/neutron/, /etc/) および "
-"'--config-file' オプションを使用して、構成ファイルが見つかりません。"
-
-msgid ""
-"Either one of parameter network_id or router_id must be passed to _get_ports "
-"method."
-msgstr ""
-"パラメーター network_id または router_id のいずれかを _get_ports メソッドに渡"
-"す必要があります。"
-
-msgid "Either subnet_id or port_id must be specified"
-msgstr "subnet_id または port_id のいずれかを指定する必要があります"
-
-msgid "Empty physical network name."
-msgstr "物理ネットワーク名が空です。"
-
-msgid "Enable FWaaS"
-msgstr "FWaaS を有効にします"
-
-msgid "Enable HA mode for virtual routers."
-msgstr "仮想ルータのためのHA mode有効化"
-
-msgid "Enable SSL on the API server"
-msgstr "API サーバー上で SSL を有効にします"
-
-msgid ""
-"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 "
-"plugin using linuxbridge mechanism driver"
-msgstr ""
-"エージェントで VXLAN を有効にしてください。linuxbridge メカニズムドライバーを"
-"使用してエージェントが ml2 プラグインによって管理されているときに、VXLAN を有"
-"効にできます"
-
-msgid ""
-"Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 "
-"l2population driver. Allows the switch (when supporting an overlay) to "
-"respond to an ARP request locally without performing a costly ARP broadcast "
-"into the overlay."
-msgstr ""
-"ローカルでの ARP 応答がサポートされている場合、これを有効にします。OVS 2.1 お"
-"よび ML2 l2population ドライバーが必要です。スイッチが、コストのかかる、オー"
-"バーレイへの ARP ブロードキャストを実行せずに、ARP 要求にローカルで応答するよ"
-"うにします (オーバーレイがサポートされている場合)。"
-
-msgid ""
-"Enable services on an agent with admin_state_up False. If this option is "
-"False, when admin_state_up of an agent is turned False, services on it will "
-"be disabled. Agents with admin_state_up False are not selected for automatic "
-"scheduling regardless of this option. But manual scheduling to such agents "
-"is available if this option is True."
-msgstr ""
-"admin_state_up が False のエージェントでサービスを有効にします。このオプショ"
-"ンが False の場合、エージェントの admin_state_up が False に変更されると、そ"
-"のエージェントでのサービスは無効になります。admin_state_up が False のエー"
-"ジェントは、このオプションとは無関係に、自動スケジューリング用には選択されま"
-"せん。ただし、このオプションが True の場合、このようなエージェントに対しては"
-"手動スケジューリングが使用可能です。"
-
-msgid ""
-"Enable/Disable log watch by metadata proxy. It should be disabled when "
-"metadata_proxy_user/group is not allowed to read/write its log file and "
-"copytruncate logrotate option must be used if logrotate is enabled on "
-"metadata proxy log files. Option default value is deduced from "
-"metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent "
-"effective user id/name."
-msgstr ""
-"メタデータプロキシーによるログ監視を有効/無効にします。metadata_proxy_user/"
-"group がログファイルの読み取り/書き込みを許可されていない場合は無効にする必要"
-"があり、logrotate がメタデータプロキシーのログファイルで有効になっている場合"
-"は copytruncate logrotate オプションを使用する必要があります。オプションのデ"
-"フォルト値は metadata_proxy_user から推測されます。監視ログは、"
-"metadata_proxy_user がエージェント有効ユーザーの ID または名前である場合に有"
-"効になります。"
-
-msgid "Encountered an empty component."
-msgstr "空のコンポーネントが検出されました。"
-
-msgid "End of VLAN range is less than start of VLAN range"
-msgstr "VLAN 範囲の終わりが VLAN 範囲の開始より小さくなっています"
-
-msgid "End of tunnel range is less than start of tunnel range"
-msgstr "トンネル範囲の終わりが、トンネル範囲の開始より小さくなっています"
-
-#, python-format
-msgid "Error importing FWaaS device driver: %s"
-msgstr "FWaaS デバイスドライバーのインポート中にエラーが発生しました: %s"
-
-#, python-format
-msgid "Error parsing dns address %s"
-msgstr "DNS アドレス %s の解析中にエラーが発生しました"
-
-#, python-format
-msgid "Error while reading %s"
-msgstr "%s の読み取り中にエラーが発生しました"
-
-msgid "Existing prefixes must be a subset of the new prefixes"
-msgstr ""
-"既存のプレフィックスは新規プレフィックスのサブセットでなければなりません"
-
-msgid ""
-"Extension to use alongside ml2 plugin's l2population mechanism driver. It "
-"enables the plugin to populate VXLAN forwarding table."
-msgstr ""
-"ml2 プラグインの l2population メカニズムドライバーとともに使用する拡張機能。"
-"これにより、このプラグインは VXLAN 転送テーブルにデータを追加できるようになり"
-"ます。"
-
-#, python-format
-msgid "Extension with alias %s does not exist"
-msgstr "エイリアス %s を持つ拡張は存在しません"
-
-#, python-format
-msgid "External IP %s is the same as the gateway IP"
-msgstr "外部 IP %s はゲートウェイ IP と同一です"
-
-#, python-format
-msgid ""
-"External network %(external_network_id)s is not reachable from subnet "
-"%(subnet_id)s.  Therefore, cannot associate Port %(port_id)s with a Floating "
-"IP."
-msgstr ""
-"外部ネットワーク %(external_network_id)s は、サブネット %(subnet_id)s から到"
-"達可能ではありません。そのため、ポート %(port_id)s を Floating IP と関連付け"
-"ることができません。"
-
-#, python-format
-msgid ""
-"External network %(net_id)s cannot be updated to be made non-external, since "
-"it has existing gateway ports"
-msgstr ""
-"外部ネットワーク %(net_id)s は、既存のゲートウェイポートを保持しているため、"
-"このネットワークを外部以外にするための更新は実行できません"
-
-#, python-format
-msgid "ExtraDhcpOpt %(id)s could not be found"
-msgstr "ExtraDhcpOpt %(id)s が見つかりませんでした"
-
-msgid ""
-"FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-"
-"agent."
-msgstr ""
-"FWaaS プラグインがサーバー側で設定されていますが、 L3 エージェントでは FWaaS "
-"は無効になっています。"
-
-#, python-format
-msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found."
-msgstr ""
-"ルーター %(router_id)s のスケジュール変更に失敗しました: 適格な L3 エージェン"
-"トが見つかりません。"
-
-#, python-format
-msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s."
-msgstr ""
-"L3 エージェント %(agent_id)s に対するルーター %(router_id)s のスケジューリン"
-"グに失敗しました。"
-
-#, python-format
-msgid ""
-"Failed to allocate a VRID in the network %(network_id)s for the router "
-"%(router_id)s after %(max_tries)s tries."
-msgstr ""
-"%(max_tries)s 回の試行の後、ルーター %(router_id)s のネットワーク "
-"%(network_id)s で VRID を割り振ることができませんでした。"
-
-#, python-format
-msgid ""
-"Failed to create port on network %(network_id)s, because fixed_ips included "
-"invalid subnet %(subnet_id)s"
-msgstr ""
-"fixed_ips が無効なサブネット %(subnet_id)s に含まれていたため、ネットワーク "
-"%(network_id)s でポートを作成できませんでした"
-
-#, python-format
-msgid "Failed to parse request. Parameter '%s' not specified"
-msgstr "要求を解析できません。パラメーター '%s' が指定されていません"
-
-#, python-format
-msgid "Failed to parse request. Required attribute '%s' not specified"
-msgstr "要求を解析できません。必須属性 '%s' が指定されていません"
-
-msgid "Failed to remove supplemental groups"
-msgstr "補足グループの削除に失敗しました"
-
-#, python-format
-msgid "Failed to set gid %s"
-msgstr "gid %s の設定に失敗しました。"
-
-#, python-format
-msgid "Failed to set uid %s"
-msgstr "uid %s の設定に失敗しました"
-
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr "%(ip)s に対する %(type)s トンネルポートをセットアップできませんでした"
-
-#, python-format
-msgid "Floating IP %(floatingip_id)s could not be found"
-msgstr "Floating IP %(floatingip_id)s が見つかりませんでした"
-
-msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max"
-msgstr ""
-"TCP/UDP プロトコルの場合、port_range_min は port_range_max 以下でなければなり"
-"ません"
-
-msgid "Force ip_lib calls to use the root helper"
-msgstr "ip_lib 呼び出しでルートヘルパーを強制的に使用します"
-
-#, python-format
-msgid ""
-"Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet "
-"%(subnet_cidr)s."
-msgstr ""
-"サブネットの重なり合った割り振りプール %(pool_1)s %(pool_2)s が見つかりまし"
-"た%(subnet_cidr)s。"
-
-#, python-format
-msgid ""
-"Gateway cannot be updated for router %(router_id)s, since a gateway to "
-"external network %(net_id)s is required by one or more floating IPs."
-msgstr ""
-"外部ネットワーク %(net_id)s へのゲートウェイは、1 つ以上の Floating IP で必要"
-"なため、ルーター %(router_id)s のゲートウェイを更新できません。"
-
-msgid "Gateway is not valid on subnet"
-msgstr "ゲートウェイがサブネット上で無効です"
-
-msgid "Group (gid or name) running metadata proxy after its initialization"
-msgstr "メタデータプロキシーを初期化後に実行しているグループ (gid または名前)"
-
-msgid ""
-"Group (gid or name) running metadata proxy after its initialization (if "
-"empty: agent effective group)."
-msgstr ""
-"初期化後にメタデータプロキシーを実行しているグループ (gid または名前) (空の場"
-"合: エージェント有効グループ)。"
-
-msgid "Group (gid or name) running this process after its initialization"
-msgstr "初期化後にこのプロセスを実行するグループ (gid または名前)"
-
-msgid "How many times Neutron will retry MAC generation"
-msgstr "Neutron が MAC の生成を再試行する回数"
-
-#, python-format
-msgid ""
-"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-"
-"min) is missing."
-msgstr ""
-"ICMP コード (port-range-max) %(value)s が指定されましたが、ICMP タイプ (port-"
-"range-min) がありません。"
-
-msgid "ID of network"
-msgstr "ネットワークの ID"
-
-msgid "ID of network to probe"
-msgstr "プローブするネットワークの ID"
-
-msgid "ID of probe port to delete"
-msgstr "削除するプローブポートの ID"
-
-msgid "ID of probe port to execute command"
-msgstr "コマンドを実行するプローブ ポートの ID"
-
-msgid "ID of the router"
-msgstr "ルータの ID"
-
-#, python-format
-msgid ""
-"IP address %(ip_address)s is not a valid IP for any of the subnets on the "
-"specified network."
-msgstr ""
-"IP アドレス %(ip_address)s は、指定されたネットワーク上のどのサブネットに対し"
-"ても有効な IP ではありません。"
-
-#, python-format
-msgid "IP address %(ip_address)s is not a valid IP for the specified subnet."
-msgstr ""
-"IP アドレス %(ip_address)s は、指定されたサブネットに対して有効な IP ではあり"
-"ません。"
-
-msgid "IP address used by Nova metadata server."
-msgstr "Nova メタデータサーバーによって使用される IP アドレス。"
-
-msgid "IP allocation requires subnet_id or ip_address"
-msgstr "IP 割り振りでは subnet_id または ip_address が必要です"
-
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables rules:\n"
-"%s"
-msgstr ""
-"IPTablesManager.apply が、次の一連の iptables 規則の適用に失敗しました: \n"
-"%s"
-
-#, python-format
-msgid ""
-"IPv6 address %(address)s can not be directly assigned to a port on subnet "
-"%(id)s since the subnet is configured for automatic addresses"
-msgstr ""
-"サブネットは自動アドレス用に構成されているため、IPv6 アドレス %(address)s を"
-"サブネット %(id)s 上のポートに直接割り当てることはできません"
-
-#, python-format
-msgid ""
-"IPv6 subnet %s configured to receive RAs from an external router cannot be "
-"added to Neutron Router."
-msgstr ""
-"外部ルーターから RA を受け取るように構成された IPv6 サブネット %s をNeutron "
-"ルーターに追加することはできません。"
-
-msgid ""
-"If True, effort is made to advertise MTU settings to VMs via network methods "
-"(DHCP and RA MTU options) when the network's preferred MTU is known."
-msgstr ""
-"True の場合、ネットワークの優先 MTU が認識されていると、ネットワーク方式 "
-"(DHCP および RA MTU オプション) 経由での MTU 設定の VM への通知が試行されま"
-"す。"
-
-msgid ""
-"If True, then allow plugins that support it to create VLAN transparent "
-"networks."
-msgstr ""
-"True の場合、対応しているプラグインによって VLAN トランスペアレントネットワー"
-"クが作成されます。"
-
-msgid "Illegal IP version number"
-msgstr "IP バージョン番号が正しくありません"
-
-#, python-format
-msgid "Insufficient prefix space to allocate subnet size /%s"
-msgstr "サブネットサイズ /%s を割り振るためのプレフィックス空間が不十分です"
-
-msgid "Insufficient rights for removing default security group."
-msgstr "デフォルトのセキュリティーグループを削除するための権限が不十分です。"
-
-msgid "Interface to monitor"
-msgstr "モニター対象のインターフェース"
-
-msgid ""
-"Interval between checks of child process liveness (seconds), use 0 to disable"
-msgstr "子プロセスの動作状況を確認する間隔 (秒)、無効にするには 0 を指定します"
-
-msgid "Interval between two metering measures"
-msgstr "2 つの計測間の間隔"
-
-msgid "Interval between two metering reports"
-msgstr "2 つの計測レポート間の間隔"
-
-#, python-format
-msgid ""
-"Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address "
-"format, which requires the prefix to be /64."
-msgstr ""
-"IPv6 アドレスモードの CIDR %s が無効です。OpenStack ではプレフィックス /64 を"
-"必要とする EUI-64 アドレス形式が使用されます。"
-
-#, python-format
-msgid "Invalid Device %(dev_name)s: %(reason)s"
-msgstr "無効なデバイス %(dev_name)s: %(reason)s"
-
-#, python-format
-msgid ""
-"Invalid authentication type: %(auth_type)s, valid types are: "
-"%(valid_auth_types)s"
-msgstr ""
-"認証タイプ %(auth_type)s は無効です。有効なタイプは %(valid_auth_types)s です"
-
-#, python-format
-msgid "Invalid data format for IP pool: '%s'"
-msgstr "IP プールに無効なデータ形式: '%s'"
-
-#, python-format
-msgid "Invalid data format for extra-dhcp-opt: %(data)s"
-msgstr "extra-dhcp-opt のデータ形式が無効です: %(data)s"
-
-#, python-format
-msgid "Invalid data format for fixed IP: '%s'"
-msgstr "Fixed IP に無効なデータ形式: '%s'"
-
-#, python-format
-msgid "Invalid data format for hostroute: '%s'"
-msgstr "ホスト経路に無効なデータ形式: '%s'"
-
-#, python-format
-msgid "Invalid data format for nameserver: '%s'"
-msgstr "ネームサーバーに無効なデータ形式: '%s'"
-
-#, python-format
-msgid "Invalid format for routes: %(routes)s, %(reason)s"
-msgstr "ルートの形式が無効です: %(routes)s、%(reason)s"
-
-#, python-format
-msgid "Invalid format: %s"
-msgstr "無効な形式: %s"
-
-#, python-format
-msgid "Invalid input for %(attr)s. Reason: %(reason)s."
-msgstr "%(attr)s に無効な入力です。理由: %(reason)s。"
-
-#, python-format
-msgid "Invalid input for operation: %(error_message)s."
-msgstr "操作に無効な入力: %(error_message)s。"
-
-#, python-format
-msgid ""
-"Invalid input. '%(target_dict)s' must be a dictionary with keys: "
-"%(expected_keys)s"
-msgstr ""
-"無効な入力です。'%(target_dict)s' は、キー %(expected_keys)s を持つディクショ"
-"ナリーでなければなりません。"
-
-#, python-format
-msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s"
-msgstr ""
-"インスタンス状態 %(state)s は無効です。有効な状態は %(valid_states)s です"
-
-#, python-format
-msgid "Invalid mapping: '%s'"
-msgstr "無効なマッピング: '%s'"
-
-#, python-format
-msgid "Invalid pci slot %(pci_slot)s"
-msgstr "無効な PCI スロット %(pci_slot)s"
-
-#, python-format
-msgid "Invalid provider format. Last part should be 'default' or empty: %s"
-msgstr ""
-"プロバイダーの指定形式が無効です。最後の部分は 'default' または空にしてくださ"
-"い: %s"
-
-#, python-format
-msgid "Invalid route: %s"
-msgstr "無効な経路: %s"
-
-msgid "Invalid service provider format"
-msgstr "サービスプロバイダーの指定形式が無効です"
-
-#, python-format
-msgid ""
-"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255."
-msgstr ""
-"ICMP %(field)s (%(attr)s) %(value)s に無効な値です。これは 0 から 255 までで"
-"なければなりません。"
-
-#, python-format
-msgid "Invalid value for port %(port)s"
-msgstr "ポート %(port)s の無効値"
-
-msgid "Keepalived didn't respawn"
-msgstr "keepalived が再作成されませんでした"
-
-#, python-format
-msgid "Key %(key)s in mapping: '%(mapping)s' not unique"
-msgstr "マッピング '%(mapping)s' 内のキー %(key)s が固有ではありません"
-
-#, python-format
-msgid "Limit must be an integer 0 or greater and not '%d'"
-msgstr "limit は整数 0 以上でなければならず、'%d' にはしないようにしてください"
-
-msgid "Limit number of leases to prevent a denial-of-service."
-msgstr "Denial-of-Service を防ぐためにリースの数を制限してください。"
-
-msgid ""
-"List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> "
-"specifying physical_network names usable for VLAN provider and tenant "
-"networks, as well as ranges of VLAN tags on each available for allocation to "
-"tenant networks."
-msgstr ""
-"<physical_network>:<vlan_min>:<vlan_max> または <physical_network> のリスト。"
-"このリストには、VLAN プロバイダー/テナントネットワークに使用できる "
-"physical_network 名が指定されるだけでなく、テナントネットワークに割り振ること"
-"ができる各物理ネットワークの VLAN タグの範囲も指定されます。"
-
-msgid ""
-"List of network type driver entrypoints to be loaded from the neutron.ml2."
-"type_drivers namespace."
-msgstr ""
-"neutron.ml2.type_drivers 名前空間からロードするネットワークタイプドライバーエ"
-"ンドポイントのリスト。"
-
-msgid "Local IP address of the VXLAN endpoints."
-msgstr "VXLAN エンドポイントのローカル IP アドレス。"
-
-msgid "Local IP address of tunnel endpoint."
-msgstr "トンネルエンドポイントのローカル IP アドレス。"
-
-msgid "Location for Metadata Proxy UNIX domain socket."
-msgstr "メタデータプロキシー UNIX ドメインソケットのロケーション"
-
-msgid "Location of Metadata Proxy UNIX domain socket"
-msgstr "メタデータプロキシーの UNIX ドメインソケットの場所"
-
-msgid "Location of pid file of this process."
-msgstr "このプロセスの pid ファイルのロケーション。"
-
-msgid "Location to store DHCP server config files"
-msgstr "DHCP サーバー設定ファイルを保持する場所"
-
-msgid "Location to store IPv6 RA config files"
-msgstr "IPv6 設定ファイルを格納する場所"
-
-msgid "Location to store child pid files"
-msgstr "子プロセスの PID ファイルを保持する場所"
-
-msgid "Location to store keepalived/conntrackd config files"
-msgstr "keepalived/conntrackd 設定ファイルを保持する場所"
-
-msgid "MTU setting for device."
-msgstr "デバイスの MTU 設定"
-
-msgid "MTU size of veth interfaces"
-msgstr "veth インターフェースの MTU サイズ"
-
-msgid "Make the l2 agent run in DVR mode."
-msgstr "L2 エージェントを DVR モードで実行します。"
-
-#, fuzzy
-msgid "Malformed request body"
-msgstr "誤った形式の要求本体"
-
-msgid "Maximum number of allowed address pairs"
-msgstr "許可されたアドレスペアの最大数"
-
-msgid "Maximum number of host routes per subnet"
-msgstr "サブネットあたりのホスト経路の最大数"
-
-msgid "Metering driver"
-msgstr "計測ドライバー"
-
-#, python-format
-msgid "Metering label %(label_id)s does not exist"
-msgstr "計測ラベル %(label_id)s は存在しません"
-
-#, python-format
-msgid "Metering label rule %(rule_id)s does not exist"
-msgstr "計測ラベル規則 %(rule_id)s は存在しません"
-
-#, python-format
-msgid ""
-"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps "
-"another"
-msgstr ""
-"remote_ip_prefix %(remote_ip_prefix)s を持つ計測ラベル規則が他の計測ラベル規"
-"則と重なり合っています"
-
-msgid "Minimize polling by monitoring ovsdb for interface changes."
-msgstr ""
-"インターフェース変更の検出に関して ovsdb をモニターすることでポーリングが最小"
-"化されます。"
-
-#, python-format
-msgid "Missing key in mapping: '%s'"
-msgstr "マッピングにキーがありません: '%s'"
-
-#, python-format
-msgid "Missing value in mapping: '%s'"
-msgstr "マッピングに値がありません: '%s'"
-
-#, python-format
-msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found"
-msgstr ""
-"agent_type=%(agent_type)s および host=%(host)s のエージェントが複数見つかりま"
-"した"
-
-#, python-format
-msgid "Multiple default providers for service %s"
-msgstr "サービス %s のデフォルトのプロバイダーが複数あります"
-
-#, fuzzy, python-format
-msgid "Multiple plugins for service %s were configured"
-msgstr "サービス %s の複数のプラグインが構成されていました"
-
-#, python-format
-msgid "Multiple providers specified for service %s"
-msgstr "複数のプロバイダーがサービス %s に対して指定されました"
-
-msgid "Multiple tenant_ids in bulk security group rule create not allowed"
-msgstr ""
-"バルクセキュリティーグループルールの作成で複数の tenant_id は許可されません"
-
-msgid "Must also specifiy protocol if port range is given."
-msgstr "ポートの範囲を指定する場合は、プロトコルも指定する必要があります。"
-
-msgid "Must specify one or more actions on flow addition or modification"
-msgstr "フローの追加または変更について、1 つ以上のアクションを指定してください"
-
-#, python-format
-msgid ""
-"Name '%s' must be 1-63 characters long, each of which can only be "
-"alphanumeric or a hyphen."
-msgstr ""
-"名前「%s」は 1 から 63 文字の長さでなければなりません。それぞれの文字には英数"
-"字またはハイフンを使用できます。"
-
-#, python-format
-msgid "Name '%s' must not start or end with a hyphen."
-msgstr "名前「%s」の先頭または末尾にハイフンを使用してはなりません。"
-
-msgid "Name of Open vSwitch bridge to use"
-msgstr "使用する Open vSwitch ブリッジの名前"
-
-msgid ""
-"Name of nova region to use. Useful if keystone manages more than one region."
-msgstr ""
-"使用する nova 領域の名前。keystone で複数の領域を管理する場合に役立ちます。"
-
-msgid "Name of the FWaaS Driver"
-msgstr "FWaaS ドライバーの名前"
-
-msgid "Namespace of the router"
-msgstr "ルーターの名前空間"
-
-msgid "Native pagination depend on native sorting"
-msgstr "ネイティブページ編集はネイティブソートに依存します"
-
-msgid "Negative delta (downgrade) not supported"
-msgstr "負のデルタ (ダウングレード) はサポートされていません"
-
-msgid "Negative relative revision (downgrade) not supported"
-msgstr "負の相対リビジョン (ダウングレード) はサポートされていません"
-
-#, python-format
-msgid "Network %s is not a valid external network"
-msgstr "ネットワーク %s は有効な外部ネットワークではありません"
-
-#, python-format
-msgid "Network %s is not an external network"
-msgstr "ネットワーク %s は外部ネットワークではありません"
-
-#, python-format
-msgid ""
-"Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges "
-"%(excluded_ranges)s was not found."
-msgstr ""
-"IP 範囲 %(parent_range)s (IP 範囲 %(excluded_ranges)s を除く) からのサイズ "
-"%(size)s のネットワークが見つかりませんでした。"
-
-msgid "Network that will have instance metadata proxied."
-msgstr "インスタンスメタデータがプロキシー処理されるネットワーク。"
-
-#, python-format
-msgid "Network type value '%s' not supported"
-msgstr "ネットワークタイプ値「%s」はサポートされていません"
-
-msgid "Network type value needed by the ML2 plugin"
-msgstr "ネットワークタイプ値が ML2 プラグインに必要です"
-
-msgid "Network types supported by the agent (gre and/or vxlan)."
-msgstr ""
-"エージェントによってサポートされるネットワークタイプ (gre と vxlan のいずれ"
-"か、または両方)。"
-
-msgid "Neutron Service Type Management"
-msgstr "Neutron サービスタイプ管理"
-
-msgid "Neutron core_plugin not configured!"
-msgstr "Neutron core_plugin が設定されていません。"
-
-msgid "Neutron plugin provider module"
-msgstr "Neutronプラグインプロバイダモジュール"
-
-msgid "Neutron quota driver class"
-msgstr "Neutron 割り当て量ドライバークラス"
-
-#, python-format
-msgid "No eligible l3 agent associated with external network %s found"
-msgstr ""
-"外部ネットワーク %s に関連付けられる適格な L3 エージェントが見つかりません"
-
-#, python-format
-msgid "No more IP addresses available on network %(net_id)s."
-msgstr "ネットワーク %(net_id)s で使用可能な IP アドレスはこれ以上ありません。"
-
-#, python-format
-msgid ""
-"No more Virtual Router Identifier (VRID) available when creating router "
-"%(router_id)s. The limit of number of HA Routers per tenant is 254."
-msgstr ""
-"ルーター %(router_id)s の作成中、これ以上の仮想ルーター ID (VRID) がありませ"
-"んでした。テナントごとの HA ルーター数の制限は 254 です。"
-
-#, python-format
-msgid "No providers specified for '%s' service, exiting"
-msgstr "'%s' サービスに対して指定されたプロバイダーはありません。終了します"
-
-#, python-format
-msgid ""
-"Not allowed to manually assign a %(router_type)s router %(router_id)s from "
-"an existing DVR node to another L3 agent %(agent_id)s."
-msgstr ""
-"%(router_type)s ルーター %(router_id)s を既存の DVR ノードから別のL3 エージェ"
-"ント %(agent_id)s に手動で割り当てることは許可されていません。"
-
-msgid "Not authorized."
-msgstr "許可されていません。"
-
-#, python-format
-msgid ""
-"Not enough l3 agents available to ensure HA. Minimum required "
-"%(min_agents)s, available %(num_agents)s."
-msgstr ""
-"HA を確実にするためには、l3 エージェントが十分ではありません。必要な最小数 "
-"%(min_agents)s、使用可能 %(num_agents)s。"
-
-#, fuzzy
-msgid "Number of RPC worker processes for service"
-msgstr "サービスの RPC ワーカプロセスの数"
-
-msgid "Number of backlog requests to configure the metadata server socket with"
-msgstr "メタデータサーバーソケットの構成に使用されるバックログ要求の数"
-
-#, fuzzy
-msgid "Number of backlog requests to configure the socket with"
-msgstr "ソケットを構成するためのバックログ要求の数"
-
-msgid ""
-"Number of floating IPs allowed per tenant. A negative value means unlimited."
-msgstr ""
-"テナント当たりに許可される Floating IP 数。負の値は無制限を意味します。 "
-
-msgid ""
-"Number of networks allowed per tenant. A negative value means unlimited."
-msgstr "テナント当たりに許可されるネットワーク数。負の値は無制限を意味します。"
-
-msgid "Number of ports allowed per tenant. A negative value means unlimited."
-msgstr "テナント当たりに許可されるポート数。負の値は無制限を意味します。"
-
-msgid "Number of routers allowed per tenant. A negative value means unlimited."
-msgstr "テナント当たりに許可されるルーター数。負の値は無制限を意味します。"
-
-msgid ""
-"Number of seconds between sending events to nova if there are any events to "
-"send."
-msgstr "送信するイベントがある場合の nova へのイベント送信間の秒数。"
-
-msgid "Number of seconds to keep retrying to listen"
-msgstr "listen を試行し続ける秒数"
-
-msgid ""
-"Number of security groups allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"テナント当たりに許可されるセキュリティーグループ数。負の値は無制限を意味しま"
-"す。 "
-
-msgid ""
-"Number of security rules allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"テナント当たりに許可されるセキュリティールール数。負の値は無制限を意味しま"
-"す。 "
-
-msgid "Number of subnets allowed per tenant, A negative value means unlimited."
-msgstr "テナント当たりに許可されるサブネット数。負の値は無制限を意味します。"
-
-msgid "Only admin can view or configure quota"
-msgstr "admin のみが割り当て量を表示または構成できます"
-
-msgid "Only admin is authorized to access quotas for another tenant"
-msgstr "別のテナントの割り当て量へのアクセスが許可されているのは管理者のみです"
-
-msgid "Only allowed to update rules for one security profile at a time"
-msgstr "一度に 1 つのセキュリティープロファイルのルールのみを更新できます"
-
-msgid "Only remote_ip_prefix or remote_group_id may be provided."
-msgstr "remote_ip_prefix または remote_group_id のみを指定できます。"
-
-#, python-format
-msgid ""
-"Operation %(op)s is not supported for device_owner %(device_owner)s on port "
-"%(port_id)s."
-msgstr ""
-"操作 %(op)s はポート %(port_id)s の device_owner %(device_owner)s ではサポー"
-"トされていません。"
-
-msgid "Override the default dnsmasq settings with this file"
-msgstr "デフォルトの dnsmasq 設定をこのファイルで上書きします"
-
-msgid "Owner type of the device: network/compute"
-msgstr "デバイスの所有者タイプ: network/compute"
-
-msgid "POST requests are not supported on this resource."
-msgstr "POST 要求は、このリソースではサポートされていません。"
-
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr "bridge_mappings の解析に失敗しました: %s。"
-
-msgid "Parsing supported pci_vendor_devs failed"
-msgstr "サポートされている pci_vendor_devs の解析に失敗しました"
-
-msgid "Path to PID file for this process"
-msgstr "このプロセスの PID ファイルのパス"
-
-msgid "Path to the router directory"
-msgstr "ルーターディレクトリーのパス"
-
-msgid "Peer patch port in integration bridge for tunnel bridge."
-msgstr "トンネルブリッジの統合ブリッジ内のピアパッチポート。"
-
-msgid "Peer patch port in tunnel bridge for integration bridge."
-msgstr "統合ブリッジのトンネルブリッジ内のピアパッチポート。"
-
-msgid "Ping timeout"
-msgstr "ping タイムアウト"
-
-msgid "Plugin does not support updating provider attributes"
-msgstr "プラグインでは、プロバイダー属性の更新はサポートされていません"
-
-#, python-format
-msgid "Port %(id)s does not have fixed ip %(address)s"
-msgstr "ポート %(id)s に Fixed IP %(address)s がありません"
-
-#, python-format
-msgid ""
-"Port %(port_id)s is associated with a different tenant than Floating IP "
-"%(floatingip_id)s and therefore cannot be bound."
-msgstr ""
-"ポート %(port_id)s は、Floating IP %(floatingip_id)s とは異なるテナントに関連"
-"付けられているため、バインドできません。"
-
-msgid ""
-"Port Security must be enabled in order to have allowed address pairs on a "
-"port."
-msgstr ""
-"ポートセキュリティーは、ポート上で許可されたアドレスペアを持つために有効にす"
-"る必要があります。"
-
-msgid "Port does not have port security binding."
-msgstr "ポートにポートセキュリティーバインディングがありません。"
-
-msgid ""
-"Port has security group associated. Cannot disable port security or ip "
-"address until security group is removed"
-msgstr ""
-"ポートにセキュリティーグループが関連付けられています。セキュリティーグループ"
-"を削除するまで、ポートセキュリティーおよび IP アドレスを使用不可にすることは"
-"できません"
-
-msgid ""
-"Port security must be enabled and port must have an IP address in order to "
-"use security groups."
-msgstr ""
-"セキュリティーグループを使用するには、ポートセキュリティーを使用可能にする必"
-"要があり、ポートには IP アドレスが必要です。"
-
-msgid "Private key of client certificate."
-msgstr "クライアント証明書の秘密鍵。"
-
-#, python-format
-msgid "Probe %s deleted"
-msgstr "プローブ %s が削除されました"
-
-#, python-format
-msgid "Probe created : %s "
-msgstr "作成されたプローブ: %s "
-
-msgid "Process is already started"
-msgstr "プロセスが既に実行されています"
-
-msgid "Process is not running."
-msgstr "プロセスが実行されていません"
-
-msgid "Protocol to access nova metadata, http or https"
-msgstr "Nova メタデータ、http、または https にアクセスするためのプロトコル"
-
-msgid ""
-"Range of seconds to randomly delay when starting the periodic task scheduler "
-"to reduce stampeding. (Disable by setting to 0)"
-msgstr ""
-"集中状態を緩和するため、定期タスクスケジューラーの開始時に挿入するランダムな"
-"遅延時間 (秒) の範囲。(無効にするには 0 に設定)"
-
-msgid "Remote metadata server experienced an internal server error."
-msgstr "リモートメタデータサーバーで内部サーバーエラーが発生しました。"
-
-msgid ""
-"Representing the resource type whose load is being reported by the agent. "
-"This can be \"networks\", \"subnets\" or \"ports\". When specified (Default "
-"is networks), the server will extract particular load sent as part of its "
-"agent configuration object from the agent report state, which is the number "
-"of resources being consumed, at every report_interval.dhcp_load_type can be "
-"used in combination with network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is "
-"WeightScheduler, dhcp_load_type can be configured to represent the choice "
-"for the resource being balanced. Example: dhcp_load_type=networks"
-msgstr ""
-"負荷がエージェントによって報告されているリソースタイプを表します。このタイプ"
-"には、「networks」、「subnets」、または「ports」があります。指定した場合 (デ"
-"フォルトは networks)、サーバーは、エージェントレポート状態 (report_interval "
-"ごとに消費されるリソース数) からそのエージェント構成オブジェクトの一部として"
-"送信された特定の負荷を抽出します。dhcp_load_type は network_scheduler_driver "
-"= neutron.scheduler.dhcp_agent_scheduler.WeightScheduler と組み合わせて使用で"
-"きます。network_scheduler_driver が WeightScheduler の場合、dhcp_load_type は"
-"平衡を取るリソースの選択肢を表すように構成することができます。例: "
-"dhcp_load_type=networks"
-
-msgid "Request Failed: internal server error while processing your request."
-msgstr "要求が失敗しました。要求の処理中に内部サーバーエラーが発生しました。"
-
-#, python-format
-msgid ""
-"Request contains duplicate address pair: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-msgstr ""
-"重複するアドレスペアが要求に含まれています: mac_address %(mac_address)s "
-"ip_address %(ip_address)s"
-
-#, python-format
-msgid ""
-"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps "
-"with another subnet"
-msgstr ""
-"ネットワーク %(network_id)s の CIDR %(cidr)s を持つ要求されたサブネットは、別"
-"のサブネットとオーバーラップしています"
-
-#, python-format
-msgid ""
-"Resource '%(resource_id)s' is already associated with provider "
-"'%(provider)s' for service type '%(service_type)s'"
-msgstr ""
-"リソース '%(resource_id)s' は既にサービスタイプ '%(service_type)s' のプロバイ"
-"ダー '%(provider)s' に関連付けられています"
-
-msgid "Resource body required"
-msgstr "リソース本体が必要です"
-
-msgid "Resource not found."
-msgstr "リソースが見つかりません。"
-
-msgid "Resources required"
-msgstr "リソースが必要です"
-
-msgid "Root helper daemon application to use when possible."
-msgstr "利用可能な場合に使用するルートヘルパーのデーモンアプリケーション。"
-
-msgid "Root permissions are required to drop privileges."
-msgstr "特権を除去するにはルート許可が必要です。"
-
-#, python-format
-msgid "Router %(router_id)s %(reason)s"
-msgstr "ルーター %(router_id)s %(reason)s"
-
-#, python-format
-msgid "Router %(router_id)s could not be found"
-msgstr "ルーター %(router_id)s が見つかりませんでした"
-
-#, python-format
-msgid "Router %(router_id)s does not have an interface with id %(port_id)s"
-msgstr ""
-"ルーター %(router_id)s に、ID %(port_id)s のインターフェースがありません"
-
-#, python-format
-msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s"
-msgstr ""
-"ルーター %(router_id)s に、サブネット %(subnet_id)s 上のインターフェースがあ"
-"りません"
-
-#, python-format
-msgid "Router already has a port on subnet %s"
-msgstr "ルーターに、既にサブネット %s 上のポートがあります"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more floating IPs."
-msgstr ""
-"ルーター %(router_id)s 上のサブネット %(subnet_id)s のルーターインターフェー"
-"スは、1 つ以上の Floating IP で必要なため削除できません。"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more routes."
-msgstr ""
-"ルーター %(router_id)s 上のサブネット %(subnet_id)s のルーターインターフェー"
-"スは、1 つ以上のルートで必要なため削除できません。"
-
-msgid "Router that will have connected instances' metadata proxied."
-msgstr "接続済みインスタンスのメタデータがプロキシー処理されるルーター。"
-
-msgid "Run as daemon."
-msgstr "デーモンとして実行します。"
-
-msgid ""
-"Seconds between nodes reporting state to server; should be less than "
-"agent_down_time, best if it is half or less than agent_down_time."
-msgstr ""
-"サーバーにノードが状態を報告する間隔 (秒)。agent_down_time 未満であるべきで"
-"す。agent_down_time の半分以下であれば最良です。"
-
-msgid "Seconds between running periodic tasks"
-msgstr "定期タスクの実行間隔 (秒)"
-
-msgid ""
-"Seconds to regard the agent is down; should be at least twice "
-"report_interval, to be sure the agent is down for good."
-msgstr ""
-"エージェントがダウンしていると見なすまでの時間 (秒)。エージェントが完全にダウ"
-"ンしていることを確実にするには、この値を少なくとも report_interval の 2 倍に"
-"してください。"
-
-#, python-format
-msgid "Security group %(id)s does not exist"
-msgstr "セキュリティーグループ %(id)s は存在しません"
-
-#, python-format
-msgid "Security group rule %(id)s does not exist"
-msgstr "セキュリティーグループルール %(id)s は存在しません"
-
-#, python-format
-msgid "Security group rule already exists. Rule id is %(id)s."
-msgstr ""
-"セキュリティーグループルールが既に存在します。ルール ID は %(id)s です。"
-
-msgid "Segments and provider values cannot both be set."
-msgstr "セグメントとプロバイダーの両方を設定することはできません。"
-
-msgid ""
-"Send notification to nova when port data (fixed_ips/floatingip) changes so "
-"nova can update its cache."
-msgstr ""
-"nova がそのキャッシュを更新できるように、ポートデータ (fixed_ips/floatingip) "
-"が変更されたときに通知を nova に送信します。"
-
-msgid "Send notification to nova when port status changes"
-msgstr "ポート状況の変更時に通知を nova に送信"
-
-msgid ""
-"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the "
-"feature is disabled"
-msgstr ""
-"HA セットアップ用に指定数の Gratuitous ARP を送信します。値が 0 以下の場合、"
-"この機能は無効です"
-
-#, python-format
-msgid ""
-"Service provider '%(provider)s' could not be found for service type "
-"%(service_type)s"
-msgstr ""
-"サービスタイプ %(service_type)s のサービスプロバイダー '%(provider)s' は見つ"
-"かりませんでした"
-
-#, python-format
-msgid "Service type %(service_type)s does not have a default service provider"
-msgstr ""
-"サービスタイプ %(service_type)s にはデフォルトのサービスプロバイダーがありま"
-"せん"
-
-msgid ""
-"Set new timeout in seconds for new rpc calls after agent receives SIGTERM. "
-"If value is set to 0, rpc timeout won't be changed"
-msgstr ""
-"エージェントによる SIGTERM 受信後の新規 rpc 呼び出しの新規タイムアウト(秒) を"
-"設定します。値を 0 に設定すると、rpc タイムアウトは変更されません"
-
-msgid ""
-"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/"
-"VXLAN tunnel."
-msgstr ""
-"GRE/VXLAN トンネルを構成した発信 IP パケットで、フラグメント禁止 (DF) ビット"
-"を設定または設定解除します。"
-
-#, python-format
-msgid ""
-"Some tenants have more than one security group named 'default': "
-"%(duplicates)s. All duplicate 'default' security groups must be resolved "
-"before upgrading the database."
-msgstr ""
-"一部のテナントには、名前が「default」の複数のセキュリティーグループがありま"
-"す: %(duplicates)s。データベースをアップグレードする前に、重複する「default」"
-"セキュリティーグループをすべて解決する必要があります。"
-
-msgid ""
-"Specifying 'tenant_id' other than authenticated tenant in request requires "
-"admin privileges"
-msgstr ""
-"認証されているテナント以外の 'tenant_id' を要求で指定するには、管理者特権が必"
-"要です"
-
-msgid "Subnet for router interface must have a gateway IP"
-msgstr "ルーターインターフェースのサブネットにはゲートウェイ IP が必要です"
-
-msgid "Subnet pool has existing allocations"
-msgstr "サブネットプールに既存の割り振りがあります"
-
-msgid "Subnet used for the l3 HA admin network."
-msgstr "l3 HA 管理ネットワークに使用されるサブネット。"
-
-msgid ""
-"System-wide flag to determine the type of router that tenants can create. "
-"Only admin can override."
-msgstr ""
-"テナントで作成可能なルーターのタイプを判別するためのシステム全体のフラグ。管"
-"理者のみがオーバーライドできます。"
-
-msgid "TCP Port to listen for metadata server requests."
-msgstr "メタデータサーバー要求を listen するための TCP ポート。"
-
-msgid "TCP Port used by Neutron metadata namespace proxy."
-msgstr "Neutron メタデータ名前空間プロキシーが使用する TCP Port"
-
-msgid "TCP Port used by Nova metadata server."
-msgstr "Nova メタデータサーバーによって使用される TCP ポート。"
-
-#, python-format
-msgid "TLD '%s' must not be all numeric"
-msgstr "TLD「%s」をすべて数値にすることはできません"
-
-msgid "TOS for vxlan interface protocol packets."
-msgstr "vxlan インターフェースプロトコルパケットの TOS。"
-
-msgid "TTL for vxlan interface protocol packets."
-msgstr "vxlan インターフェースプロトコルパケットの TTL。"
-
-#, python-format
-msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network"
-msgstr ""
-"テナント %(tenant_id)s は、このネットワークでの %(resource)s の作成を許可され"
-"ていません"
-
-msgid "Tenant network creation is not enabled."
-msgstr "テナントネットワークの作成は使用できません。"
-
-msgid ""
-"The 'gateway_external_network_id' option must be configured for this agent "
-"as Neutron has more than one external network."
-msgstr ""
-"Neutron に複数の外部ネットワークがあるため、このエージェントに対して "
-"'gateway_external_network_id' オプションを設定する必要があります。"
-
-#, python-format
-msgid ""
-"The HA Network CIDR specified in the configuration file isn't valid; "
-"%(cidr)s."
-msgstr "構成ファイルに指定されている HA ネットワーク CIDR が無効です: %(cidr)s"
-
-msgid "The UDP port to use for VXLAN tunnels."
-msgstr "VXLAN トンネルで使用する UDP ポート。"
-
-msgid "The advertisement interval in seconds"
-msgstr "通知間隔 (秒)"
-
-#, python-format
-msgid "The allocation pool %(pool)s is not valid."
-msgstr "割り振りプール %(pool)s が無効です。"
-
-#, python-format
-msgid ""
-"The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s."
-msgstr ""
-"割り振りプール %(pool)s がサブネット CIDR %(subnet_cidr)s を越えています。"
-
-#, python-format
-msgid ""
-"The attribute '%(attr)s' is reference to other resource, can't used by sort "
-"'%(resource)s'"
-msgstr ""
-"属性 '%(attr)s' は他のリソースへの参照であり、ソート '%(resource)s' によって"
-"使用できません"
-
-msgid "The core plugin Neutron will use"
-msgstr "Neutron が使用するコアプラグイン"
-
-msgid "The driver used to manage the DHCP server."
-msgstr "DHCP サーバーの管理に使用されるドライバー。"
-
-msgid "The driver used to manage the virtual interface."
-msgstr "仮想インターフェースの管理に使用されるドライバー。"
-
-#, python-format
-msgid ""
-"The following device_id %(device_id)s is not owned by your tenant or matches "
-"another tenants router."
-msgstr ""
-"次の device_id %(device_id)s はテナントによって所有されていないか、または別の"
-"テナントルーターと一致します。"
-
-msgid "The host IP to bind to"
-msgstr "バインド先のホスト IP"
-
-msgid "The interface for interacting with the OVSDB"
-msgstr "OVSDB と相互作用するためのインターフェース"
-
-msgid ""
-"The maximum number of items returned in a single response, value was "
-"'infinite' or negative integer means no limit"
-msgstr ""
-"1 回の応答で最大数の項目が返されました。値は 'infinite' または (無制限を意味"
-"する) 負の整数でした"
-
-#, python-format
-msgid ""
-"The network %(network_id)s has been already hosted by the DHCP Agent "
-"%(agent_id)s."
-msgstr ""
-"ネットワーク %(network_id)s は、既に DHCP エージェント %(agent_id)s によって"
-"ホストされています。"
-
-#, python-format
-msgid ""
-"The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s."
-msgstr ""
-"ネットワーク %(network_id)s は DHCP エージェント %(agent_id)s によってホスト"
-"されていません。"
-
-#, python-format
-msgid "The number of allowed address pair exceeds the maximum %(quota)s."
-msgstr "許可されたアドレスペアの数が最大の %(quota)s を超えています。"
-
-msgid ""
-"The number of seconds the agent will wait between polling for local device "
-"changes."
-msgstr ""
-"ローカルデバイスの変更のポーリング間にエージェントが待機する間隔 (秒)。"
-
-msgid ""
-"The number of seconds to wait before respawning the ovsdb monitor after "
-"losing communication with it."
-msgstr ""
-"ovsdb モニターとの通信が途絶えた後で ovsdb モニターを再作成する前に待機する時"
-"間 (秒)"
-
-msgid "The number of sort_keys and sort_dirs must be same"
-msgstr "sort_keys と sort_dirs の数は同じでなければなりません"
-
-#, python-format
-msgid "The port '%s' was deleted"
-msgstr "ポート '%s' が削除されました"
-
-msgid "The port to bind to"
-msgstr "バインド先のポート"
-
-#, python-format
-msgid "The requested content type %s is invalid."
-msgstr "要求されたコンテンツタイプ %s は無効です"
-
-msgid "The resource could not be found."
-msgstr "リソースが見つかりませんでした。"
-
-#, python-format
-msgid ""
-"The router %(router_id)s has been already hosted by the L3 Agent "
-"%(agent_id)s."
-msgstr ""
-"ルーター %(router_id)s は、既に L3 エージェント %(agent_id)s によってホストさ"
-"れています。"
-
-msgid ""
-"The server has either erred or is incapable of performing the requested "
-"operation."
-msgstr ""
-"サーバーに誤りがあるか、または要求された操作を実行することができません。"
-
-msgid "The service plugins Neutron will use"
-msgstr "Neutron が使用するサービスプラグイン"
-
-msgid "The type of authentication to use"
-msgstr "使用する認証のタイプ"
-
-#, python-format
-msgid "The value '%(value)s' for %(element)s is not valid."
-msgstr "%(element)s の値「%(value)s」は無効です。"
-
-msgid ""
-"The working mode for the agent. Allowed modes are: 'legacy' - this preserves "
-"the existing behavior where the L3 agent is deployed on a centralized "
-"networking node to provide L3 services like DNAT, and SNAT. Use this mode if "
-"you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality "
-"and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - "
-"this enables centralized SNAT support in conjunction with DVR.  This mode "
-"must be used for an L3 agent running on a centralized node (or in single-"
-"host deployments, e.g. devstack)"
-msgstr ""
-"エージェントの処理モード。許可されるモードは次のとおりです。「既存」 - この"
-"モードは、L3 エージェントを中央ネットワーキングノードにデプロイして L3 サービ"
-"ス (DNAT や SNAT など) を提供する、既存の動作を保持します。DVR を採用しない場"
-"合、このモードを使用します。「dvr」- このモードは、DVR 機能を有効にします。計"
-"算ホスト上で実行される L3 エージェントの場合、このモードを使用する必要があり"
-"ます。「dvr_snat」- このモードは、DVR とともに中央 SNAT サポートを有効にしま"
-"す。中央ノード (または devstack などの単一ホストでのデプロイメント) 上で実行"
-"中の L3 の場合、このモードを使用する必要があります。"
-
-msgid ""
-"True to delete all ports on all the OpenvSwitch bridges. False to delete "
-"ports created by Neutron on integration and external network bridges."
-msgstr ""
-"すべての OpenvSwitch ブリッジですべてのポートを削除する場合は True。統合およ"
-"び外部ネットワークブリッジで Neutron によって作成されたポートを削除する場合"
-"は False。"
-
-msgid "Tunnel IP value needed by the ML2 plugin"
-msgstr "トンネル IP 値が ML2 プラグインに必要です"
-
-msgid "Tunnel bridge to use."
-msgstr "使用するトンネルブリッジ。"
-
-msgid "URL to database"
-msgstr "データベースへの URL"
-
-#, python-format
-msgid "Unable to access %s"
-msgstr "%s にアクセスできません"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(router_id)s. The number of routes exceeds "
-"the maximum %(quota)s."
-msgstr ""
-"%(router_id)s の操作を完了できません。ルートの数が最大数 %(quota)s を超えてい"
-"ます。"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of DNS "
-"nameservers exceeds the limit %(quota)s."
-msgstr ""
-"%(subnet_id)s の操作を完了できません。DNS ネームサーバーの数が制限 %(quota)s "
-"を超えています。"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of host routes "
-"exceeds the limit %(quota)s."
-msgstr ""
-"%(subnet_id)s の操作を完了できません。ホスト経路の数が制限 %(quota)s を超えて"
-"います。"
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The IP address "
-"%(ip_address)s is in use."
-msgstr ""
-"ネットワーク %(net_id)s の操作を完了できません。IP アドレス %(ip_address)s は"
-"使用中です。"
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The mac address %(mac)s "
-"is in use."
-msgstr ""
-"ネットワーク %(net_id)s の操作を完了できません。MAC アドレス %(mac)s は使用中"
-"です。"
-
-#, python-format
-msgid ""
-"Unable to complete operation on network %(net_id)s. There are one or more "
-"ports still in use on the network."
-msgstr ""
-"ネットワーク %(net_id)s で操作を完了できません。ネットワークでまだ使用中の"
-"ポートが 1 つ以上あります。"
-
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s for network %(net_id)s. "
-"Port already has an attached device %(device_id)s."
-msgstr ""
-"ネットワーク %(net_id)s のポート %(port_id)s で操作を完了できません。ポートに"
-"は既に接続されたデバイス %(device_id)s があります。"
-
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr "%s で値を変換できません"
-
-msgid "Unable to create the Agent Gateway Port"
-msgstr "エージェントゲートウェイポートの作成ができません"
-
-msgid "Unable to create the SNAT Interface Port"
-msgstr "SNATインターフェースポートの作成ができません"
-
-#, python-format
-msgid ""
-"Unable to create the flat network. Physical network %(physical_network)s is "
-"in use."
-msgstr ""
-"フラットネットワークを作成できません。物理ネットワーク %(physical_network)s "
-"は使用中です。"
-
-msgid ""
-"Unable to create the network. No available network found in maximum allowed "
-"attempts."
-msgstr ""
-"ネットワークを作成できません。許可される最大試行回数で、使用可能なネットワー"
-"クが見つかりません。"
-
-msgid ""
-"Unable to create the network. No tenant network is available for allocation."
-msgstr ""
-"ネットワークを作成できません。テナントネットワークは割り振りに使用できませ"
-"ん。"
-
-#, python-format
-msgid ""
-"Unable to create the network. The VLAN %(vlan_id)s on physical network "
-"%(physical_network)s is in use."
-msgstr ""
-"ネットワークを作成できません。物理ネットワーク %(physical_network)s 上の "
-"VLAN %(vlan_id)s は使用中です。"
-
-#, python-format
-msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use."
-msgstr "ネットワークを作成できません。トンネル ID %(tunnel_id)s は使用中です。"
-
-#, fuzzy, python-format
-msgid "Unable to determine mac address for %s"
-msgstr "%s のMACアドレス収集ができません"
-
-#, python-format
-msgid "Unable to find '%s' in request body"
-msgstr "要求本体で '%s' が見つかりません"
-
-#, python-format
-msgid "Unable to find any IP address on external network %(net_id)s."
-msgstr "外部ネットワーク %(net_id)s 上で IP アドレスが見つかりません"
-
-#, fuzzy, python-format
-msgid "Unable to find resource name in %s"
-msgstr "%sの中のリソース名を見つけることはできません"
-
-msgid "Unable to generate IP address by EUI64 for IPv4 prefix"
-msgstr "EUI64 によって IPv4 プレフィックスの IP アドレスを生成できません"
-
-#, python-format
-msgid "Unable to generate unique DVR mac for host %(host)s."
-msgstr "ホスト %(host)s に固有の DVR MAC を生成できません。"
-
-#, python-format
-msgid "Unable to generate unique mac on network %(net_id)s."
-msgstr "ネットワーク %(net_id)s で固有の MAC を生成できません。"
-
-#, python-format
-msgid ""
-"Unable to identify a target field from:%s. Match should be in the form "
-"%%(<field_name>)s"
-msgstr ""
-"%s からターゲットフィールドを特定できません。一致の形式は %%(<field_name>)s "
-"でなければなりません"
-
-#, fuzzy, python-format
-msgid ""
-"Unable to verify match:%(match)s as the parent resource: %(res)s was not "
-"found"
-msgstr ""
-"一致 %(match)s を親リソースとして検査できません: %(res)s が見つかりませんでし"
-"た"
-
-#, python-format
-msgid "Unexpected response code: %s"
-msgstr "予期しない応答コード: %s"
-
-#, python-format
-msgid "Unexpected response: %s"
-msgstr "予期しない応答: %s"
-
-msgid "Unimplemented commands"
-msgstr "実装されていないコマンド"
-
-msgid "Unknown API version specified"
-msgstr "不明な API バージョンが指定されました"
-
-#, python-format
-msgid "Unknown attribute '%s'."
-msgstr "属性「%s」が不明です。"
-
-#, python-format
-msgid "Unknown chain: %r"
-msgstr "不明なチェーン: %r"
-
-#, python-format
-msgid "Unknown quota resources %(unknown)s."
-msgstr "不明な割り当て量リソース %(unknown)s。"
-
-msgid "Unmapped error"
-msgstr "マップ解除エラー"
-
-msgid "Unrecognized action"
-msgstr "認識されないアクション"
-
-#, python-format
-msgid "Unrecognized attribute(s) '%s'"
-msgstr "認識されない属性 '%s'"
-
-msgid "Unsupported Content-Type"
-msgstr "サポートされない Content-Type"
-
-#, python-format
-msgid "Unsupported network type %(net_type)s."
-msgstr "サポートされないネットワークタイプ %(net_type)s"
-
-msgid "Unsupported request type"
-msgstr "サポートされない要求タイプです"
-
-msgid "Updating default security group not allowed."
-msgstr "デフォルトのセキュリティーグループの更新は許可されません。"
-
-msgid ""
-"Use ML2 l2population mechanism driver to learn remote MAC and IPs and "
-"improve tunnel scalability."
-msgstr ""
-"リモート MAC および IP を認識してトンネルのスケーラビリティーを向上させるに"
-"は、ML2 l2population メカニズムドライバーを使用してください。"
-
-msgid "Use broadcast in DHCP replies"
-msgstr "DHCPリプライでブロードキャストを使う"
-
-msgid "Use either --delta or relative revision, not both"
-msgstr "--delta と相対リビジョンの両方ではなく一方を使用してください"
-
-msgid "User (uid or name) running metadata proxy after its initialization"
-msgstr "メタデータプロキシーを初期化後に実行しているユーザー (uid または名前)"
-
-msgid ""
-"User (uid or name) running metadata proxy after its initialization (if "
-"empty: agent effective user)."
-msgstr ""
-"初期化後にメタデータプロキシーを実行しているユーザー (uid または名前) (空の場"
-"合: エージェント有効ユーザー)。"
-
-msgid "User (uid or name) running this process after its initialization"
-msgstr "初期化後にこのプロセスを実行するユーザー (uid または名前)"
-
-msgid "VRRP authentication password"
-msgstr "VRRP 認証パスワード"
-
-msgid "VRRP authentication type"
-msgstr "VRRP 認証タイプ"
-
-#, python-format
-msgid ""
-"Validation of dictionary's keys failed. Expected keys: %(expected_keys)s "
-"Provided keys: %(provided_keys)s"
-msgstr ""
-"ディクショナリーのキーの検証に失敗しました。予期されたキー: "
-"%(expected_keys)s、指定されたキー: %(provided_keys)s"
-
-#, python-format
-msgid "Validator '%s' does not exist."
-msgstr "バリデーター '%s' は存在しません。"
-
-#, python-format
-msgid "Value %(value)s in mapping: '%(mapping)s' not unique"
-msgstr "マッピング '%(mapping)s' 内の値 %(value)s が固有ではありません"
-
-msgid ""
-"Watch file log. Log watch should be disabled when metadata_proxy_user/group "
-"has no read/write permissions on metadata proxy log file."
-msgstr ""
-"ファイルログを監視します。metadata_proxy_user/group にメタデータプロキシーの"
-"ログファイルに対する読み取り/書き込み許可がない場合は、ログ監視を無効にする必"
-"要があります。"
-
-msgid ""
-"Where to store Neutron state files. This directory must be writable by the "
-"agent."
-msgstr ""
-"Neutron 状態ファイルの保管場所。このディレクトリーは、エージェントが書き込み"
-"を行える場所でなければなりません。"
-
-msgid ""
-"With IPv6, the network used for the external gateway does not need to have "
-"an associated subnet, since the automatically assigned link-local address "
-"(LLA) can be used. However, an IPv6 gateway address is needed for use as the "
-"next-hop for the default route. If no IPv6 gateway address is configured "
-"here, (and only then) the neutron router will be configured to get its "
-"default route from router advertisements (RAs) from the upstream router; in "
-"which case the upstream router must also be configured to send these RAs. "
-"The ipv6_gateway, when configured, should be the LLA of the interface on the "
-"upstream router. If a next-hop using a global unique address (GUA) is "
-"desired, it needs to be done via a subnet allocated to the network and not "
-"through this parameter. "
-msgstr ""
-"IPv6 では、自動的に割り当てられたリンクローカルアドレス (LLA) を使用できるた"
-"め、外部ゲートウェイに使用するネットワークにはサブネットを関連付ける必要はあ"
-"りません。ただし、IPv6 ゲートウェイアドレスはデフォルト経路のネクストホップと"
-"して使用するために必要です。IPv6 ゲートウェイアドレスをここで構成しない場合に"
-"のみ、上流ルーターのルーター広告 (RA) からデフォルト経路を取得するように "
-"Neutron ルーターが構成されます。この場合、これらの RA を送信するように上流"
-"ルーターを構成することも必要です。ipv6_gateway を構成する場合、これは上流ルー"
-"ター上のインターフェースの LLA でなければなりません。グローバルユニークアドレ"
-"ス (GUA) を使用したネクストホップが必要な場合は、このパラメーターを使用するの"
-"ではなく、ネットワークに割り振られたサブネットを介してこれを行う必要がありま"
-"す。"
-
-msgid "You must implement __call__"
-msgstr "__call__ を実装する必要があります"
-
-msgid ""
-"You must provide a config file for bridge - either --config-file or "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-msgstr ""
-"ブリッジの構成ファイルとして --config-file または "
-"env[NEUTRON_TEST_CONFIG_FILE] のいずれかを指定する必要があります"
-
-msgid "You must provide a revision or relative delta"
-msgstr "改訂または相対デルタを指定する必要があります"
-
-msgid "allocation_pools allowed only for specific subnet requests."
-msgstr "allocation_pools は特定のサブネット要求にのみ許可されます。"
-
-msgid "binding:profile value too large"
-msgstr "binding:profile 値が大きすぎます"
-
-msgid "cidr and prefixlen must not be supplied together"
-msgstr "cidr と prefixlen を同時に指定してはなりません"
-
-#, python-format
-msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid."
-msgstr ""
-"dhcp_agents_per_network は 1 以上でなければなりません。'%s' は無効です。"
-
-msgid "fixed_ip_address cannot be specified without a port_id"
-msgstr "fixed_ip_address は、port_id なしでは指定できません"
-
-#, python-format
-msgid "has device owner %s"
-msgstr "デバイス所有者 %s"
-
-#, python-format
-msgid "ip command failed on device %(dev_name)s: %(reason)s"
-msgstr "ip コマンドがデバイス %(dev_name)s で失敗しました: %(reason)s"
-
-#, python-format
-msgid "ip link capability %(capability)s is not supported"
-msgstr "ip リンク機能 %(capability)s はサポートされていません"
-
-#, python-format
-msgid "ip link command is not supported: %(reason)s"
-msgstr "ip リンクコマンドはサポートされていません: %(reason)s"
-
-msgid "ip_version must be specified in the absence of cidr and subnetpool_id"
-msgstr "cidr および subnetpool_id がない場合、ip_version の指定は必須です"
-
-msgid "ipv6_address_mode is not valid when ip_version is 4"
-msgstr "ip_version が 4 の場合、ipv6_address_mode は無効です"
-
-msgid "ipv6_ra_mode is not valid when ip_version is 4"
-msgstr "ip_version が 4 の場合、ipv6_ra_mode は無効です"
-
-msgid ""
-"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to "
-"False."
-msgstr ""
-"enable_dhcp が False に設定されている場合、ipv6_ra_mode または "
-"ipv6_address_mode を設定することはできません。"
-
-#, python-format
-msgid ""
-"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to "
-"'%(addr_mode)s' is not valid. If both attributes are set, they must be the "
-"same value"
-msgstr ""
-"ipv6_ra_mode が '%(ra_mode)s' に、ipv6_address_mode が '%(addr_mode)s' に設定"
-"されていますが、これは無効です。両方の属性を設定する場合、これらは同じ値でな"
-"ければなりません"
-
-msgid "mac address update"
-msgstr "mac アドレス更新"
-
-#, python-format
-msgid ""
-"max_l3_agents_per_router %(max_agents)s config parameter is not valid. It "
-"has to be greater than or equal to min_l3_agents_per_router %(min_agents)s."
-msgstr ""
-"max_l3_agents_per_router %(max_agents)s 構成パラメーターが無効です。"
-"min_l3_agents_per_router %(min_agents)s 以上でなければなりません。"
-
-#, python-format
-msgid ""
-"min_l3_agents_per_router config parameter is not valid. It has to be equal "
-"to or more than %s for HA."
-msgstr ""
-"min_l3_agents_per_router 構成パラメーターが無効です。HA では、%s 以上でなけれ"
-"ばなりません。"
-
-msgid "network_type required"
-msgstr "network_type が必要です"
-
-#, python-format
-msgid "network_type value '%s' not supported"
-msgstr "network_type 値 '%s' はサポートされていません"
-
-msgid "new subnet"
-msgstr "新規サブネット"
-
-#, python-format
-msgid "physical_network '%s' unknown  for VLAN provider network"
-msgstr "VLAN プロバイダーネットワークの physical_network '%s' が不明です"
-
-#, python-format
-msgid "physical_network '%s' unknown for flat provider network"
-msgstr "flat プロバイダーネットワークの physical_network '%s' が不明です"
-
-msgid "physical_network required for flat provider network"
-msgstr "flat プロバイダーネットワークには physical_network が必要です"
-
-#, python-format
-msgid "provider:physical_network specified for %s network"
-msgstr "%s ネットワークに provider:physical_network が指定されました"
-
-msgid "record"
-msgstr "レコード"
-
-msgid "respawn_interval must be >= 0 if provided."
-msgstr "respawn_interval は、指定する場合は 0 以上にする必要があります。"
-
-#, python-format
-msgid "segmentation_id out of range (%(min)s through %(max)s)"
-msgstr "segmentation_id が範囲 (%(min)s から %(max)s) 外です"
-
-msgid "segmentation_id requires physical_network for VLAN provider network"
-msgstr ""
-"segmentation_id には、VLAN プロバイダーネットワークの physical_network が必要"
-"です"
-
-msgid "the nexthop is not connected with router"
-msgstr "ルーターによってネクストホップが接続されていません"
-
-msgid "the nexthop is used by router"
-msgstr "ネクストホップがルーターによって使用されています"
-
-msgid ""
-"uuid provided from the command line so external_process can track us via /"
-"proc/cmdline interface."
-msgstr ""
-"UUID がコマンドラインに指定されたため、external_process で /proc/cmdline イン"
-"ターフェースを追跡できます。"
diff --git a/neutron/locale/ko_KR/LC_MESSAGES/neutron.po b/neutron/locale/ko_KR/LC_MESSAGES/neutron.po
deleted file mode 100644 (file)
index da489ce..0000000
+++ /dev/null
@@ -1,2293 +0,0 @@
-# Korean (South Korea) translations for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-10-31 07:48+0000\n"
-"Last-Translator: ChungYoung Cho <openstack.cho@gmail.com>\n"
-"Language: ko_KR\n"
-"Language-Team: Korean (South Korea)\n"
-"Plural-Forms: nplurals=1; plural=0\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#, python-format
-msgid ""
-"\n"
-"Command: %(cmd)s\n"
-"Exit code: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-msgstr ""
-"\n"
-"명령: %(cmd)s\n"
-"종료 코드: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-
-#, python-format
-msgid "%(driver)s: Internal driver error."
-msgstr "%(driver)s: 내부 드라이버 오류."
-
-#, python-format
-msgid "%(id)s is not a valid %(type)s identifier"
-msgstr "%(id)s이(가) 올바른 %(type)s ID가 아님"
-
-#, python-format
-msgid ""
-"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' "
-"and '%(desc)s'"
-msgstr ""
-"%(invalid_dirs)s은(는) sort_dirs에 대해 올바르지 않은 값이며, 올바른 값은 "
-"'%(asc)s' 및 '%(desc)s'입니다. "
-
-#, python-format
-msgid "%(key)s prohibited for %(tunnel)s provider network"
-msgstr "%(tunnel)s 제공자 네트워크에 대해 %(key)s이(가) 금지됨"
-
-#, python-format
-msgid ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-msgstr ""
-"네트워크 설정 %(current)s과(와) 함께 %(method)s이(가) 호출됨(원래 설정 "
-"%(original)s) 및 네트워크 세그먼트 %(segments)s"
-
-#, python-format
-msgid ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-msgstr ""
-"%(method)s이(가) 서브넷 설정 %(current)s과(와) 함께 호출됨(원래 설정 "
-"%(original)s)"
-
-#, python-format
-msgid "%(method)s failed."
-msgstr "%(method)s이(가) 실패함"
-
-#, python-format
-msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'"
-msgstr ""
-"%(name)s '%(addr)s'이(가) ip_version '%(ip_version)s'과(와) 일치하지 않음"
-
-#, python-format
-msgid "%s cannot be called while in offline mode"
-msgstr "%s은(는) 오프라인 모드 중 호출할 수 없습니다. "
-
-#, python-format
-msgid "%s is invalid attribute for sort_key"
-msgstr "%s이(는) sort_keys에 대해 올바르지 않은 속성입니다. "
-
-#, python-format
-msgid "%s is invalid attribute for sort_keys"
-msgstr "%s이(는) sort_keys에 대해 올바르지 않은 속성입니다. "
-
-#, python-format
-msgid "%s is not a valid VLAN tag"
-msgstr "%s이(가) 올바른 VLAN 태그가 아님"
-
-#, python-format
-msgid "%s must implement get_port_from_device or get_ports_from_devices."
-msgstr ""
-"%s은(는) get_port_from_device 또는 get_ports_from_devices를 구현해야 합니다."
-
-#, python-format
-msgid "%s prohibited for VLAN provider network"
-msgstr "VLAN 제공자 네트워크에 대해 %s이(가) 금지됨"
-
-#, python-format
-msgid "%s prohibited for flat provider network"
-msgstr "플랫 제공자 네트워크에 대해 %s이(가) 금지됨"
-
-#, python-format
-msgid "%s prohibited for local provider network"
-msgstr "로컬 제공자 네트워크에 대해 %s이(가) 금지됨"
-
-#, python-format
-msgid "'%(data)s' exceeds maximum length of %(max_len)s"
-msgstr "'%(data)s'이(가) %(max_len)s의 최대 길이를 초과함"
-
-#, python-format
-msgid "'%(data)s' is not in %(valid_values)s"
-msgstr "'%(data)s'이(가) %(valid_values)s에 없음"
-
-#, python-format
-msgid "'%(data)s' is too large - must be no larger than '%(limit)d'"
-msgstr "'%(data)s'이(가) 너무 큼 - '%(limit)d' 이하여야 함"
-
-#, python-format
-msgid "'%(data)s' is too small - must be at least '%(limit)d'"
-msgstr "'%(data)s'이(가) 너무 작음 - 최소 '%(limit)d'이어야 함 "
-
-#, python-format
-msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended"
-msgstr ""
-"'%(data)s'이(가) 인식된 IP 서브넷 cidr이 아닙니다. '%(cidr)s'이(가) 권장됩니"
-"다. "
-
-#, python-format
-msgid "'%(host)s' is not a valid nameserver. %(msg)s"
-msgstr "'%(host)s'은(는) 올바른 이름 서버가 아닙니다. %(msg)s"
-
-#, python-format
-msgid "'%s' Blank strings are not permitted"
-msgstr "'%s' 공백 문자열이 허용되지 않음"
-
-#, python-format
-msgid "'%s' cannot be converted to boolean"
-msgstr "'%s'은(는) 부울로 변환될 수 없음"
-
-#, python-format
-msgid "'%s' contains whitespace"
-msgstr "'%s'에 공백 문자가 있습니다"
-
-#, python-format
-msgid "'%s' is not a dictionary"
-msgstr "'%s'이(가) 사전이 아님"
-
-#, python-format
-msgid "'%s' is not a list"
-msgstr "'%s'이(가) 목록이 아님"
-
-#, python-format
-msgid "'%s' is not a valid IP address"
-msgstr "'%s'이(가) 올바른 IP 주소가 아님"
-
-#, python-format
-msgid "'%s' is not a valid IP subnet"
-msgstr "'%s'이(가) 올바른 IP 서브넷이 아님"
-
-#, python-format
-msgid "'%s' is not a valid MAC address"
-msgstr "'%s'이(가) 올바른 MAC 주소가 아님"
-
-#, python-format
-msgid "'%s' is not a valid UUID"
-msgstr "'%s'이(가) 올바른 UUID가 아님"
-
-#, python-format
-msgid "'%s' is not a valid boolean value"
-msgstr "'%s'은(는) 올바른 부울린 값이 아닙니다"
-
-#, python-format
-msgid "'%s' is not a valid input"
-msgstr "'%s'이(가) 올바른 입력이 아님"
-
-#, python-format
-msgid "'%s' is not a valid string"
-msgstr "'%s'이(가) 올바른 문자열이 아님"
-
-#, python-format
-msgid "'%s' is not an integer"
-msgstr "'%s'이(가) 정수가 아님"
-
-#, python-format
-msgid "'%s' is not an integer or uuid"
-msgstr "'%s'이(가) 정수 또는 uuid가 아님"
-
-#, python-format
-msgid "'%s' is not of the form <key>=[value]"
-msgstr "'%s'의 양식이 <key>=[value]가 아님"
-
-#, python-format
-msgid "'%s' should be non-negative"
-msgstr "'%s'은(는) 음수가 아니어야 함"
-
-msgid "0 is not allowed as CIDR prefix length"
-msgstr "0은 CIDR 접두부 길이로 허용되지 않음"
-
-msgid "A cidr must be specified in the absence of a subnet pool"
-msgstr "서브넷 풀이 없는 경우 cidr을 지정해야 함"
-
-msgid ""
-"A list of mappings of physical networks to MTU values. The format of the "
-"mapping is <physnet>:<mtu val>. This mapping allows specifying a physical "
-"network MTU value that differs from the default segment_mtu value."
-msgstr ""
-"MTU 값에 대한 실제 네트워크의 맵핑 목록입니다. 맵핑 형식은 <physnet>:<mtu "
-"val>입니다. 이 맵핑을 사용하면 기본 segment_mtu 값과 다른 실제 네트워크 MTU "
-"값을 지정할 수 있습니다."
-
-msgid "A metering driver must be specified"
-msgstr "측정 드라이버를 지정해야 함"
-
-msgid "API for retrieving service providers for Neutron advanced services"
-msgstr "Neutron 고급 서비스에 대한 서비스 제공자를 검색하기 위한 API"
-
-msgid "Access to this resource was denied."
-msgstr "이 자원에 대한 액세스가 거부되었습니다."
-
-msgid "Action to be executed when a child process dies"
-msgstr "하위 프로세스가 정지될 때 조치가 실행됨"
-
-msgid "Adds external network attribute to network resource."
-msgstr "외부 네트워크 속성을 네트워크 자원에 추가합니다."
-
-msgid "Adds test attributes to core resources."
-msgstr "코어 자원에 테스트 속성을 추가합니다."
-
-#, python-format
-msgid "Agent %(id)s could not be found"
-msgstr "%(id)s 에이전트를 찾을 수 없음"
-
-#, python-format
-msgid "Agent %(id)s is not a L3 Agent or has been disabled"
-msgstr "%(id)s 에이전트가 L3 에이전트가 아니거나 사용 안함 상태임"
-
-#, python-format
-msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled"
-msgstr "%(id)s 에이전트가 올바른 DHCP 에이전트가 아니거나 사용 안함 상태임"
-
-#, python-format
-msgid "Agent updated: %(payload)s"
-msgstr "업데이트된 에이전트: %(payload)s"
-
-#, python-format
-msgid ""
-"Agent with agent_type=%(agent_type)s and host=%(host)s could not be found"
-msgstr "agent_type=%(agent_type)s 및 host=%(host)s인 에이전트를 찾을 수 없음"
-
-msgid "Allow auto scheduling networks to DHCP agent."
-msgstr "DHCP 에이전트에 대한 네트워크 자동 스케줄링을 허용합니다. "
-
-msgid "Allow auto scheduling of routers to L3 agent."
-msgstr "L3 에이전트에 대한 라우터 자동 스케줄링을 허용합니다."
-
-msgid "Allow running metadata proxy."
-msgstr "메타데이터 프록시 실행을 허용합니다."
-
-msgid "Allow sending resource operation notification to DHCP agent"
-msgstr "DHCP 에이전트에 자원 조작 알림 전송 허용"
-
-msgid "Allow the usage of the bulk API"
-msgstr "벌크 API 사용 허용"
-
-msgid "Allow the usage of the pagination"
-msgstr "페이지 번호 매기기 사용 허용"
-
-msgid "Allow the usage of the sorting"
-msgstr "정렬 사용 허용"
-
-msgid "Allow to perform insecure SSL (https) requests to nova metadata"
-msgstr "nova 메타데이터에 대한 비보안 SSL(https) 요청 수행 허용"
-
-msgid "AllowedAddressPair must contain ip_address"
-msgstr "AllowedAddressPair에 ip_address가 포함되어야 함"
-
-msgid "An interface driver must be specified"
-msgstr "인터페이스 드라이버가 지정되어야 함"
-
-msgid ""
-"An ordered list of networking mechanism driver entrypoints to be loaded from "
-"the neutron.ml2.mechanism_drivers namespace."
-msgstr ""
-"neutron.ml2.mechanism_drivers 네임스페이스로부터 로드할 네트워킹 메커니즘 드"
-"라이버 시작점의 정렬된 목록입니다."
-
-msgid "An unknown error has occurred. Please try your request again."
-msgstr "알 수 없는 오류가 발생했습니다. 요청을 다시 시도하십시오. "
-
-msgid "An unknown exception occurred."
-msgstr "알 수 없는 예외가 발생했습니다. "
-
-#, python-format
-msgid "Attribute '%s' not allowed in POST"
-msgstr "'%s' 속성은 POST에서 허용되지 않음"
-
-msgid "Automatically remove networks from offline DHCP agents."
-msgstr "오프라인 DHCP 에이전트에서 네트워크를 자동으로 제거합니다."
-
-msgid ""
-"Automatically reschedule routers from offline L3 agents to online L3 agents."
-msgstr ""
-"오프라인 L3 에이전트부터 온라인 L3 에이전트까지 라우트를 자동으로 다시 스케줄"
-"합니다."
-
-msgid "Available commands"
-msgstr "사용 가능한 명령"
-
-msgid "Backend does not support VLAN Transparency."
-msgstr "백엔드는 VLAN 투명도를 지원하지 않습니다."
-
-#, python-format
-msgid ""
-"Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, "
-"%(mac)s:"
-msgstr ""
-"EUI-64에 의해 IPv6 주소를 생성하기 위한 접두부 또는 mac 형식이 잘못되었습니"
-"다. %(prefix)s, %(mac)s:"
-
-#, python-format
-msgid "Bad prefix type for generate IPv6 address by EUI-64: %s"
-msgstr "EUI-64에 의해 IPv6 주소를 생성하기 위한 접두부 유형이 잘못됨: %s"
-
-#, python-format
-msgid "Base MAC: %s"
-msgstr "기본 MAC: %s"
-
-#, python-format
-msgid "Bridge %(bridge)s does not exist."
-msgstr "%(bridge)s 브릿지가 존재하지 않습니다. "
-
-msgid "Bulk operation not supported"
-msgstr "벌크 오퍼레이션은 지원되지 않음"
-
-msgid "CIDR to monitor"
-msgstr "모니터할 CIDR"
-
-#, python-format
-msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip"
-msgstr "gateway_ip를 갖지 않는 %s 서브넷의 포트에 부동 IP를 추가할 수 없음"
-
-msgid "Cannot allocate requested subnet from the available set of prefixes"
-msgstr "사용 가능한 접두부 세트에서 요청한 서브넷을 할당할 수 없음"
-
-#, python-format
-msgid ""
-"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port "
-"%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a "
-"floating IP on external network %(net_id)s."
-msgstr ""
-"고정 IP는 외부 네트워크 %(net_id)s에서 부동 IP를 가지고 있기 때문에 고정 IP "
-"%(fixed_ip)s을(를) 사용하여 부동 IP %(floating_ip_address)s(%(fip_id)s)을"
-"(를) 포트 %(port_id)s과(와) 연관시킬 수 없습니다. "
-
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to Port %s, since that port is owned "
-"by a different tenant."
-msgstr ""
-"부동 IP를 작성하여 포트 %s에 바인드할 수 없습니다. 해당 포트를 다른 테넌트가 "
-"소유하기 때문입니다. "
-
-msgid "Cannot create resource for another tenant"
-msgstr "다른 테넌트에 대한 자원을 작성할 수 없음"
-
-msgid "Cannot disable enable_dhcp with ipv6 attributes set"
-msgstr "ipv6 속성이 설정된 enable_dhcp를 사용할 수 없음"
-
-#, python-format
-msgid ""
-"Cannot have multiple router ports with the same network id if both contain "
-"IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s"
-msgstr ""
-"모두 IPv6 서브넷이 있는 경우 같은 네트워크 ID를 사용하는 라우터 포트를 여러 "
-"개 사용할 수 없음. 기존 포트 %(p)s에 IPv6 서브넷 및 네트워크 ID %(nid)s이"
-"(가) 있음."
-
-#, python-format
-msgid ""
-"Cannot host %(router_type)s router %(router_id)s on %(agent_mode)s L3 agent "
-"%(agent_id)s."
-msgstr ""
-"%(agent_mode)s L3 에이전트 %(agent_id)s에서 %(router_type)s 라우터 "
-"%(router_id)s을(를) 호스팅할 수 없습니다."
-
-msgid "Cannot match priority on flow deletion or modification"
-msgstr "플로우 삭제 또는 수정 시 우선순위와 일치할 수 없음"
-
-msgid "Cannot specify both subnet-id and port-id"
-msgstr "subnet-id와 port-id를 둘 다 지정할 수 없음"
-
-msgid "Cannot understand JSON"
-msgstr "JSON을 이해할 수 없음"
-
-#, python-format
-msgid "Cannot update read-only attribute %s"
-msgstr "읽기 전용 속성 %s을(를) 업데이트할 수 없음"
-
-msgid "Certificate Authority public key (CA cert) file for ssl"
-msgstr "ssl용 인증 기관 공개 키(CA cert) 파일 "
-
-msgid "Check for ARP responder support"
-msgstr "ARP 응답기 지원 확인"
-
-msgid "Check for OVS vxlan support"
-msgstr "OVS vxlan 지원 확인"
-
-msgid "Check for VF management support"
-msgstr "VF 관리 지원 확인"
-
-msgid "Check for iproute2 vxlan support"
-msgstr "iproute2 vxlan 지원 확인"
-
-msgid "Check for nova notification support"
-msgstr "nova 알림 지원 확인"
-
-msgid "Check for patch port support"
-msgstr "패치 포트 지원 확인"
-
-msgid "Check minimal dnsmasq version"
-msgstr "최소 dnsmasq 버전 확인"
-
-msgid "Check netns permission settings"
-msgstr "netns 권한 설정 확인"
-
-msgid "Check ovsdb native interface support"
-msgstr "ovsdb 네이티브 인터페이스 지원 확인"
-
-#, python-format
-msgid ""
-"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of "
-"subnet %(sub_id)s"
-msgstr ""
-"서브넷 %(subnet_id)s의 cidr %(subnet_cidr)s이(가) 서브넷 %(sub_id)s의 cidr "
-"%(cidr)s과(와) 겹침"
-
-msgid "Client certificate for nova metadata api server."
-msgstr "nova 메타데이터 api 서버에 대한 클라이언트 인증서입니다."
-
-msgid ""
-"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE "
-"tunnel IDs that are available for tenant network allocation"
-msgstr ""
-"테넌트 네트워크 할당에 사용 가능한 GRE 터널 ID의 범위를 열거한 <tun_min>:"
-"<tun_max> 튜플을 쉼표로 구분한 목록입니다."
-
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"VXLAN VNI IDs that are available for tenant network allocation"
-msgstr ""
-"테넌트 네트워크 할당에 사용 가능한 VXLAN VNI ID의 범위를 열거한  <vni_min>:"
-"<vni_max> 튜플의 쉼표로 구분된 목록입니다. "
-
-msgid ""
-"Comma-separated list of the DNS servers which will be used as forwarders."
-msgstr "쉼표로 분리된 DNS 서버의 목록이며 전달자로 사용됩니다."
-
-msgid "Command to execute"
-msgstr "실행할 명령"
-
-msgid "Config file for interface driver (You may also use l3_agent.ini)"
-msgstr "인터페이스 드라이버에 대한 구성 파일(l3_agent.ini도 사용할 수 있음)"
-
-#, python-format
-msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s"
-msgstr "CIDR %(cidr)s에 대한 충돌하는 값 ethertype %(ethertype)s"
-
-msgid ""
-"Controls whether the neutron security group API is enabled in the server. It "
-"should be false when using no security groups or using the nova security "
-"group API."
-msgstr ""
-"서버에서 neutron 보안 그룹 API가 사용되는지 여부를 제어합니다.보안 그룹을 사"
-"용하지 않거나 nova 보안 그룹 API를 사용할 때는 false이어야 합니다."
-
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds"
-msgstr "%(time)d후 시도한 다음 %(host)s:%(port)s에 바인딩할 수 없습니다"
-
-msgid "Could not deserialize data"
-msgstr "데이터를 직렬화 해제할 수 없음"
-
-#, python-format
-msgid "Creation failed. %(dev_name)s already exists."
-msgstr "작성 실패. %(dev_name)s이(가) 이미 존재합니다. "
-
-#, python-format
-msgid ""
-"Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable "
-"to update."
-msgstr ""
-"현재 게이트웨이 ip %(ip_address)s을(를) 포트 %(port_id)s에서 이미 사용하고 있"
-"습니다.업데이트할 수 없습니다."
-
-msgid "Currently distributed HA routers are not supported."
-msgstr "현재 분배된 HA 라우터는 지원되지 않습니다. "
-
-msgid ""
-"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite "
-"lease times."
-msgstr ""
-"DHCP 리스 기간(초)입니다. dnsmasq에 무한 리스 시간을 사용하도록 지시하려면 -1"
-"을 사용하십시오."
-
-msgid "Default driver to use for quota checks"
-msgstr "할당량 검사에 사용할 기본 드라이버"
-
-msgid ""
-"Default number of resource allowed per tenant. A negative value means "
-"unlimited."
-msgstr "테넌트당 허용되는 기본 자원 수입니다. 음수 값은 무제한을 의미합니다."
-
-msgid "Default security group"
-msgstr "기본 보안 그룹"
-
-msgid "Default security group already exists."
-msgstr "기본 보안 그룹이 이미 존재합니다. "
-
-msgid ""
-"Defines providers for advanced services using the format: <service_type>:"
-"<name>:<driver>[:default]"
-msgstr ""
-"다음 형식을 사용하여 고급 서비스에 대한 제공자 정의: <service_type>:<name>:"
-"<driver>[:default]"
-
-msgid ""
-"Delay within which agent is expected to update existing ports whent it "
-"restarts"
-msgstr ""
-"에이전트를 다시 시작할 경우 에이전트가 기존 포트를 업데이트할 것으로 예상되"
-"는 지연 시간"
-
-msgid "Delete the namespace by removing all devices."
-msgstr "모든 디바이스를 제거하여 네임스페이스를 삭제하십시오. "
-
-#, python-format
-msgid "Deleting port %s"
-msgstr "포트 %s 삭제 중"
-
-#, python-format
-msgid "Device %(dev_name)s in mapping: %(mapping)s not unique"
-msgstr "%(mapping)s 맵핑의 %(dev_name)s 디바이스가 고유하지 않음"
-
-msgid "Device has no virtual functions"
-msgstr "디바이스에 가상 기능이 없음"
-
-#, python-format
-msgid "Device name %(dev_name)s is missing from physical_device_mappings"
-msgstr "physical_device_mappings에서 디바이스 이름 %(dev_name)s이(가) 누락됨"
-
-msgid "Device not found"
-msgstr "디바이스를 찾을 수 없음"
-
-#, python-format
-msgid ""
-"Distributed Virtual Router Mac Address for host %(host)s does not exist."
-msgstr "%(host)s 호스트의 분산 가상 라우터 Mac 주소가 없습니다."
-
-msgid "Domain to use for building the hostnames"
-msgstr "호스트 이름 빌드에 사용할 도메인"
-
-msgid "Downgrade no longer supported"
-msgstr "다운그레이드는 현재 지원하지 않음"
-
-#, python-format
-msgid "Driver %s is not unique across providers"
-msgstr "%s 드라이버가 제공자에서 고유하지 않음"
-
-msgid "Driver for security groups firewall in the L2 agent"
-msgstr "L2 에이전트의 보안 그룹 방화벽에 대한 드라이버"
-
-msgid "Driver to use for scheduling network to DHCP agent"
-msgstr "DHCP 에이전트에 대한 네트워크 스케줄링에 사용할 드라이버"
-
-msgid "Driver to use for scheduling router to a default L3 agent"
-msgstr "기본 L3 에이전트에 대한 라우터 스케줄링에 사용할 드라이버"
-
-#, python-format
-msgid "Duplicate IP address '%s'"
-msgstr "중복 IP 주소 '%s'"
-
-msgid "Duplicate Metering Rule in POST."
-msgstr "POST에 중복 측정 규칙이 있음."
-
-msgid "Duplicate Security Group Rule in POST."
-msgstr "POST에 중복 보안 그룹 규칙이 있습니다. "
-
-#, python-format
-msgid "Duplicate hostroute '%s'"
-msgstr "중복 호스트 라우트 '%s'"
-
-#, python-format
-msgid "Duplicate items in the list: '%s'"
-msgstr "목록의 중복 항목: '%s'"
-
-#, python-format
-msgid "Duplicate nameserver '%s'"
-msgstr "중복 이름 서버 '%s'"
-
-msgid "Duplicate segment entry in request."
-msgstr "요청에 중복되는 세그먼트 항목이 있음."
-
-#, python-format
-msgid "ERROR: %s"
-msgstr "오류: %s"
-
-msgid ""
-"ERROR: Unable to find configuration file via the default search paths (~/."
-"neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!"
-msgstr ""
-"오류: 기본 검색 경로(~/.quantum/, ~/, /etc/quantum/, /etc/) 및 '--config-"
-"file' (~/.neutron/, ~/, /etc/neutron/, /etc/) 및 '--config-file' 옵션!"
-
-msgid ""
-"Either one of parameter network_id or router_id must be passed to _get_ports "
-"method."
-msgstr ""
-"매개변수 network_id 및 router_id 중 하나를 _get_ports 메소드에 전달해야 합니"
-"다."
-
-msgid "Either subnet_id or port_id must be specified"
-msgstr "subnet_id 또는 port_id 중 하나를 지정해야 함"
-
-msgid "Empty physical network name."
-msgstr "실제 네트워크 이름이 비어 있습니다."
-
-msgid "Enable FWaaS"
-msgstr "FWaaS 사용"
-
-msgid "Enable HA mode for virtual routers."
-msgstr "가상 라우터에 대해 HA 모드를 사용합니다."
-
-msgid "Enable SSL on the API server"
-msgstr "API 서버에서 SSL 연결 활성화"
-
-msgid ""
-"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 "
-"plugin using linuxbridge mechanism driver"
-msgstr ""
-"에이전트에서 VXLAN을 사용 가능하게 설정하십시오. linuxbridge 메커니즘 드라이"
-"버를 사용하여 ml2 플러그인이 에이전트를 관리할 경우 사용할 수 있습니다."
-
-msgid ""
-"Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 "
-"l2population driver. Allows the switch (when supporting an overlay) to "
-"respond to an ARP request locally without performing a costly ARP broadcast "
-"into the overlay."
-msgstr ""
-"로컬 ARP 응답기가 지원되는 경우 이를 사용합니다. OVS 2.1 및 ML2 l2population "
-"드라이버가 필요합니다. 스위치(오버레이를 지원하는 경우)가 오버레이로 비용이 "
-"많이 드는 ARP 브로드캐스트를 수행하지 않고 로컬로 ARP 요청에 응답할 수 있도"
-"록 합니다."
-
-msgid ""
-"Enable services on an agent with admin_state_up False. If this option is "
-"False, when admin_state_up of an agent is turned False, services on it will "
-"be disabled. Agents with admin_state_up False are not selected for automatic "
-"scheduling regardless of this option. But manual scheduling to such agents "
-"is available if this option is True."
-msgstr ""
-"admin_state_up False인 에이전트의 서비스 사용. 이 옵션이 False이면 에이전트"
-"의 admin_state_up이 False가 될 때 해당 서비스가 사용 안함으로 설정됩니다. "
-"admin_state_up False인 에이전트는 이 옵션과 관계 없이 자동 스케줄링에 사용하"
-"도록 선택하지 않습니다. 그러나 이 옵션이 True이면 이러한 에이전트에 수동 스케"
-"줄링을 사용할 수 있습니다."
-
-msgid ""
-"Enable/Disable log watch by metadata proxy. It should be disabled when "
-"metadata_proxy_user/group is not allowed to read/write its log file and "
-"copytruncate logrotate option must be used if logrotate is enabled on "
-"metadata proxy log files. Option default value is deduced from "
-"metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent "
-"effective user id/name."
-msgstr ""
-"메타데이터 프록시별로 로그 감시 사용/사용 안함. metadata_proxy_user/group에"
-"서 해당 로그 파일을 읽거나 쓸 수 없는 경우 사용 안함으로 설정해야 하며 메타데"
-"이터 프록시 로그 파일에서 logrotate를 사용하는 경우 copytruncate logrotate 옵"
-"션을 사용해야 합니다. 옵션 기본값은 metadata_proxy_user에서 도출된 값입니다. "
-"감시 로그는 metadata_proxy_user가 에이전트 유효 사용자 ID/이름인 경우 사용 설"
-"정됩니다."
-
-msgid "Encountered an empty component."
-msgstr "비어 있는 컴포넌트가 발생했습니다."
-
-msgid "End of VLAN range is less than start of VLAN range"
-msgstr "VLAN 범위의 끝이 VLAN 범위의 시작보다 작습니다. "
-
-msgid "End of tunnel range is less than start of tunnel range"
-msgstr "터널 범위의 끝이 터널 범위의 시작보다 작음"
-
-#, python-format
-msgid "Error importing FWaaS device driver: %s"
-msgstr "FWaaS 디바이스 드라이버를 가져오는 중에 오류 발생: %s"
-
-#, python-format
-msgid "Error parsing dns address %s"
-msgstr "DNS 주소 %s 구문 분석 오류"
-
-#, python-format
-msgid "Error while reading %s"
-msgstr "%s을(를) 읽는 중에 오류 발생"
-
-msgid "Existing prefixes must be a subset of the new prefixes"
-msgstr "기존 접두부는 새 접두부의 서브넷이어야 함"
-
-msgid ""
-"Extension to use alongside ml2 plugin's l2population mechanism driver. It "
-"enables the plugin to populate VXLAN forwarding table."
-msgstr ""
-"ml2 플러그인의 l2population 메커니즘 드라이버와 함께 사용할 확장기능. 이를 통"
-"해플러그인이 VXLAN 전달 테이블을 채울 수 있습니다."
-
-#, python-format
-msgid "Extension with alias %s does not exist"
-msgstr "별명이 %s인 확장이 존재하지 않음"
-
-#, python-format
-msgid "External IP %s is the same as the gateway IP"
-msgstr "외부 IP %s이(가) 게이트웨이 IP와 같음"
-
-#, python-format
-msgid ""
-"External network %(external_network_id)s is not reachable from subnet "
-"%(subnet_id)s.  Therefore, cannot associate Port %(port_id)s with a Floating "
-"IP."
-msgstr ""
-"서브넷 %(subnet_id)s에서 외부 네트워크 %(external_network_id)s에 도달할 수 없"
-"습니다. 따라서 포트 %(port_id)s을(를) 부동 IP와 연관시킬 수 없습니다. "
-
-#, python-format
-msgid ""
-"External network %(net_id)s cannot be updated to be made non-external, since "
-"it has existing gateway ports"
-msgstr ""
-"기존 게이트웨이 포트가 있어서 기존 네트워크 %(net_id)s이(가) 비외부 상태가 되"
-"도록 업데이트할 수 없습니다. "
-
-#, python-format
-msgid "ExtraDhcpOpt %(id)s could not be found"
-msgstr "ExtraDhcpOpt %(id)s을(를) 찾을 수 없음"
-
-msgid ""
-"FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-"
-"agent."
-msgstr ""
-"서버측에 FWaaS 플러그인이 구성되어 있지만 L3-agent에서 FWaaS가 사용되지 않습"
-"니다."
-
-#, python-format
-msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found."
-msgstr ""
-"%(router_id)s 라우터를 다시 스케줄하지 못함: 적합한 l3 에이전트를 찾을 수 없"
-"습니다."
-
-#, python-format
-msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s."
-msgstr ""
-"L3 에이전트 %(agent_id)s에 대한 %(router_id)s 라우터를 스케줄링하지 못했습니"
-"다. "
-
-#, python-format
-msgid ""
-"Failed to allocate a VRID in the network %(network_id)s for the router "
-"%(router_id)s after %(max_tries)s tries."
-msgstr ""
-"%(max_tries)s 번 시도한 후에 %(router_id)s 라우터의 %(network_id)s 네트워크에"
-"서 VRID를 할당하는 데 실패했습니다."
-
-#, python-format
-msgid ""
-"Failed to create port on network %(network_id)s, because fixed_ips included "
-"invalid subnet %(subnet_id)s"
-msgstr ""
-"fixed_ips에 올바르지 않은 서브넷 %(subnet_id)s이(가) 포함되어 있어서 네트워"
-"크 %(network_id)s에서 포트를 작성하지 못했습니다. "
-
-#, python-format
-msgid "Failed to parse request. Parameter '%s' not specified"
-msgstr "요청을 구문 분석하지 못했습니다. '%s' 매개변수가 지정되지 않았음"
-
-#, python-format
-msgid "Failed to parse request. Required attribute '%s' not specified"
-msgstr "요청을 구문 분석하지 못했습니다. 필수 속성 '%s'이(가) 지정되지 않음"
-
-msgid "Failed to remove supplemental groups"
-msgstr "보조 그룹을 제거하지 못함"
-
-#, python-format
-msgid "Failed to set gid %s"
-msgstr "gid %s을(를) 설정하지 못함"
-
-#, python-format
-msgid "Failed to set uid %s"
-msgstr "uid %s을(를) 설정하지 못함"
-
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr "%(type)s 터널 포트를 %(ip)s(으)로 설정하지 못함"
-
-#, python-format
-msgid "Floating IP %(floatingip_id)s could not be found"
-msgstr "%(floatingip_id)s 부동 IP를 찾을 수 없음"
-
-msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max"
-msgstr "TCP/UDP 프로토콜의 경우 port_range_min은 port_range_max 이하여야 함"
-
-msgid "Force ip_lib calls to use the root helper"
-msgstr "루트 헬퍼를 사용하는 ip_lib 호출을 강제합니다"
-
-#, python-format
-msgid ""
-"Gateway cannot be updated for router %(router_id)s, since a gateway to "
-"external network %(net_id)s is required by one or more floating IPs."
-msgstr ""
-"외부 네트워크 %(net_id)s에 대한 게이트웨이가 하나 이상의 부동 IP에서 필요로 "
-"하기 때문에 라우터 %(router_id)s에 대한 게이트웨이를 업데이트할 수 없습니다. "
-
-msgid "Gateway is not valid on subnet"
-msgstr "게이트웨이가 서브넷에서 올바르지 않음"
-
-msgid "Group (gid or name) running metadata proxy after its initialization"
-msgstr "초기화 후에 메타데이터 프록시를 실행하는 그룹(gid 또는 이름)"
-
-msgid ""
-"Group (gid or name) running metadata proxy after its initialization (if "
-"empty: agent effective group)."
-msgstr ""
-"초기화 후에 메타데이터 프록시를 실행하는 그룹(gid 또는 이름)(비어 있는 경우: "
-"에이전트 유효 그룹)."
-
-msgid "Group (gid or name) running this process after its initialization"
-msgstr "초기화 이후 이 프로세스를 실행하는 그룹(gid 또는 이름)"
-
-msgid "How many times Neutron will retry MAC generation"
-msgstr "Neutron이 MAC 생성을 재시도할 횟수"
-
-#, python-format
-msgid ""
-"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-"
-"min) is missing."
-msgstr ""
-"ICMP 코드 (port-range-max) %(value)s이(가) 제공되지만 ICMP 유형(port-range-"
-"min)이 누락되었습니다."
-
-msgid "ID of network"
-msgstr "네트워크의 ID"
-
-msgid "ID of network to probe"
-msgstr "프로브할 네트워크의 ID"
-
-msgid "ID of probe port to delete"
-msgstr "삭제할 프로브 포트의 ID"
-
-msgid "ID of probe port to execute command"
-msgstr "명령을 실행할 프로브 포트의 ID"
-
-msgid "ID of the router"
-msgstr "라우터의 ID"
-
-#, python-format
-msgid ""
-"IP address %(ip_address)s is not a valid IP for any of the subnets on the "
-"specified network."
-msgstr ""
-"IP 주소 %(ip_address)s이(가) 지정된 네트워크의 서브넷에 대해 올바른 IP가아닙"
-"니다. "
-
-#, python-format
-msgid "IP address %(ip_address)s is not a valid IP for the specified subnet."
-msgstr ""
-"IP 주소 %(ip_address)s이(가) 지정된 서브넷에 대해 올바른 IP가 아닙니다."
-
-msgid "IP address used by Nova metadata server."
-msgstr "Nova 메타데이터 서버가 사용한 IP 주소입니다. "
-
-msgid "IP allocation requires subnet_id or ip_address"
-msgstr "IP 할당은 subnet_id 또는 ip_address가 필요함"
-
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables rules:\n"
-"%s"
-msgstr ""
-"IPTablesManager.apply가 다음 iptables 규칙 세트를 적용하지 못함:\n"
-"%s"
-
-#, python-format
-msgid ""
-"IPv6 address %(address)s can not be directly assigned to a port on subnet "
-"%(id)s since the subnet is configured for automatic addresses"
-msgstr ""
-"서브넷을 자동 주소용으로 구성했으므로 IPv6 주소 %(address)s은(는) 서브넷 "
-"%(id)s의 포트에 직접 지정할 수 없습니다."
-
-#, python-format
-msgid ""
-"IPv6 subnet %s configured to receive RAs from an external router cannot be "
-"added to Neutron Router."
-msgstr ""
-"외부 라우터에서 RA를 수신하도록 구성된 IPv6 서브넷 %s을(를) Neutron 라우터에 "
-"추가할 수 없습니다."
-
-msgid ""
-"If True, effort is made to advertise MTU settings to VMs via network methods "
-"(DHCP and RA MTU options) when the network's preferred MTU is known."
-msgstr ""
-"True인 경우 네트워크 선호 MTU가 알려져 있으면 네트워크 메소드 (DHCP 및 RA "
-"MTU 옵션)를 통해 MTU 설정을 VM에 광고하려고 합니다. "
-
-msgid ""
-"If True, then allow plugins that support it to create VLAN transparent "
-"networks."
-msgstr ""
-"True인 경우 이를 지원하는 플러그인을 사용하여 VLAN 투명 네트워크를 작성할 수 "
-"있습니다."
-
-msgid "Illegal IP version number"
-msgstr "올바르지 않은 IP 버전 번호"
-
-#, python-format
-msgid "Insufficient prefix space to allocate subnet size /%s"
-msgstr "접두부 공간이 부족하여 서브넷 크기 /%s을(를) 할당할 수 없음"
-
-msgid "Insufficient rights for removing default security group."
-msgstr "기본 보안 그룹을 제거할 수 있는 권한이 없습니다."
-
-msgid "Interface to monitor"
-msgstr "모니터할 인터페이스"
-
-msgid ""
-"Interval between checks of child process liveness (seconds), use 0 to disable"
-msgstr ""
-"하위 프로세스 활동 확인 간격(초), 사용 안함으로 설정하려면 0을 지정하십시오."
-
-msgid "Interval between two metering measures"
-msgstr "2개의 측정 조치 간의 간격"
-
-msgid "Interval between two metering reports"
-msgstr "2개의 측정 보고서 간의 간격"
-
-#, python-format
-msgid ""
-"Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address "
-"format, which requires the prefix to be /64."
-msgstr ""
-"CIDR %s은(는) IPv6 주소 모드에 올바르지 않습니다. OpenStack에서는 접두부 /64"
-"를 사용하는 EUI-64 주소 형식을 사용합니다."
-
-#, python-format
-msgid "Invalid Device %(dev_name)s: %(reason)s"
-msgstr "올바르지 않은 디바이스 %(dev_name)s: %(reason)s"
-
-#, python-format
-msgid ""
-"Invalid authentication type: %(auth_type)s, valid types are: "
-"%(valid_auth_types)s"
-msgstr ""
-"올바르지 않은 인증 유형임: %(auth_type)s, 올바른 유형은 다음과 같음: "
-"%(valid_auth_types)s"
-
-#, python-format
-msgid "Invalid data format for IP pool: '%s'"
-msgstr "IP 풀에 대한 올바르지 않은 데이터 형식: '%s'"
-
-#, python-format
-msgid "Invalid data format for extra-dhcp-opt: %(data)s"
-msgstr "extra-dhcp-opt의 올바르지 않은 데이터 형식: %(data)s"
-
-#, python-format
-msgid "Invalid data format for fixed IP: '%s'"
-msgstr "고정 IP에 대한 올바르지 않은 데이터 형식: '%s'"
-
-#, python-format
-msgid "Invalid data format for hostroute: '%s'"
-msgstr "호스트 라우트에 대한 올바르지 않은 데이터 형식: '%s'"
-
-#, python-format
-msgid "Invalid data format for nameserver: '%s'"
-msgstr "이름 서버에 대한 올바르지 않은 데이터 형식: '%s'"
-
-#, python-format
-msgid "Invalid format for routes: %(routes)s, %(reason)s"
-msgstr "라우터의 형식이 올바르지 않음: %(routes)s, %(reason)s"
-
-#, python-format
-msgid "Invalid format: %s"
-msgstr "올바르지 않은 형식: %s"
-
-#, python-format
-msgid "Invalid input for %(attr)s. Reason: %(reason)s."
-msgstr "%(attr)s에 대한 올바르지 않은 입력입니다. 이유: %(reason)s."
-
-#, python-format
-msgid "Invalid input for operation: %(error_message)s."
-msgstr "오퍼레이션에 대한 올바르지 않은 입력: %(error_message)s."
-
-#, python-format
-msgid ""
-"Invalid input. '%(target_dict)s' must be a dictionary with keys: "
-"%(expected_keys)s"
-msgstr ""
-"올바르지 않은 입력. '%(target_dict)s'은(는) %(expected_keys)s 키가 있는 사전"
-"이어야 함"
-
-#, python-format
-msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s"
-msgstr ""
-"올바르지 않은 인스턴스 상태: %(state)s, 올바른 상태는 %(valid_states)s임"
-
-#, python-format
-msgid "Invalid mapping: '%s'"
-msgstr "올바르지 않은 맵핑: '%s'"
-
-#, python-format
-msgid "Invalid pci slot %(pci_slot)s"
-msgstr "올바르지 않은 pci 슬롯 %(pci_slot)s"
-
-#, python-format
-msgid "Invalid provider format. Last part should be 'default' or empty: %s"
-msgstr ""
-"올바르지 않은 제공자 형식. 마지막 부분이 '기본값'이거나 비어 있어야 함: %s"
-
-#, python-format
-msgid "Invalid route: %s"
-msgstr "올바르지 않은 라우트: %s"
-
-msgid "Invalid service provider format"
-msgstr "올바르지 않은 서비스 제공자 형식"
-
-#, python-format
-msgid ""
-"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255."
-msgstr ""
-"ICMP %(field)s (%(attr)s) %(value)s의 값이 올바르지 않음. 이 값은 0에서 255 "
-"사이여야 합니다. "
-
-#, python-format
-msgid "Invalid value for port %(port)s"
-msgstr "%(port)s 포트에 대한 올바르지 않은 값"
-
-msgid "Keepalived didn't respawn"
-msgstr "유휴되면 다시 파생되지 않음"
-
-#, python-format
-msgid "Key %(key)s in mapping: '%(mapping)s' not unique"
-msgstr "'%(mapping)s' 맵핑의 %(key)s 키가 고유하지 않음"
-
-#, python-format
-msgid "Limit must be an integer 0 or greater and not '%d'"
-msgstr "한계는 정수 0이상 및 '%d'이(가) 아닌 수여야 함"
-
-msgid "Limit number of leases to prevent a denial-of-service."
-msgstr "서비스 거부(DoS)를 막기 위해 리스 수를 제한합니다."
-
-msgid ""
-"List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> "
-"specifying physical_network names usable for VLAN provider and tenant "
-"networks, as well as ranges of VLAN tags on each available for allocation to "
-"tenant networks."
-msgstr ""
-"테넌트 네트워크에 대한 할당에 사용할 수 있는 각 VLAN 태그의 범위 및VLAN 제공"
-"자와 테넌트 네트워크에 사용할 수 있는 실제 네트워크 이름을 지정하는  "
-"<physical_network>:<vlan_min>:<vlan_max> 또는 <physical_network>의 목록입니"
-"다."
-
-msgid ""
-"List of network type driver entrypoints to be loaded from the neutron.ml2."
-"type_drivers namespace."
-msgstr ""
-"neutron.ml2.type_drivers 네임스페이스에서 로드할네트워크 유형 드라이버 시작점"
-"의 목록입니다. "
-
-msgid "Local IP address of the VXLAN endpoints."
-msgstr "VXLAN 엔드포인트의 로컬 IP 주소."
-
-msgid "Local IP address of tunnel endpoint."
-msgstr "터널 엔드포인트의 로컬 IP 주소입니다."
-
-msgid "Location for Metadata Proxy UNIX domain socket."
-msgstr "메타데이터 프록시 UNIX 도메인 소켓의 위치입니다."
-
-msgid "Location of Metadata Proxy UNIX domain socket"
-msgstr "메타데이터 프록시 UNIX 도메인 소켓의 위치"
-
-msgid "Location of pid file of this process."
-msgstr "이 프로세스의 pid 파일 위치입니다."
-
-msgid "Location to store DHCP server config files"
-msgstr "DHCP 서버 구성 파일을 저장할 위치"
-
-msgid "Location to store IPv6 RA config files"
-msgstr "IPv6 RA 구성 파일을 저장할 위치"
-
-msgid "Location to store child pid files"
-msgstr "하위 pid 파일을 저장할 위치"
-
-msgid "Location to store keepalived/conntrackd config files"
-msgstr "keepalived/conntrackd 구성 파일을 저장할 위치"
-
-msgid "MTU setting for device."
-msgstr "디바이스의 MTU 설정입니다. "
-
-msgid "MTU size of veth interfaces"
-msgstr "veth 인터페이스의 MTU 크기"
-
-msgid "Make the l2 agent run in DVR mode."
-msgstr "l2 에이전트를 DVR 모드에서 실행하십시오."
-
-msgid "Malformed request body"
-msgstr "형식이 틀린 요청 본문"
-
-msgid "Maximum number of allowed address pairs"
-msgstr "허용되는 주소 쌍 최대 수"
-
-msgid "Maximum number of host routes per subnet"
-msgstr "서브넷당 호스트 라우트의 최대 수"
-
-msgid "Metering driver"
-msgstr "측정 드라이버"
-
-#, python-format
-msgid "Metering label %(label_id)s does not exist"
-msgstr "측정 레이블 %(label_id)s이(가) 존재하지 않음"
-
-#, python-format
-msgid "Metering label rule %(rule_id)s does not exist"
-msgstr "측정 레이블 규칙 %(rule_id)s이(가) 존재하지 않음"
-
-#, python-format
-msgid ""
-"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps "
-"another"
-msgstr ""
-"remote_ip_prefix %(remote_ip_prefix)s을(를) 가진 측정 레이블 규칙이 다른 항목"
-"과 겹침"
-
-msgid "Minimize polling by monitoring ovsdb for interface changes."
-msgstr "인터페이스 변경사항에 대한 ovsdb를 모니터링하여 폴링을 최소화합니다."
-
-#, python-format
-msgid "Missing key in mapping: '%s'"
-msgstr "맵핑에서 키 누락: '%s'"
-
-#, python-format
-msgid "Missing value in mapping: '%s'"
-msgstr "맵핑에서 값 누락: '%s'"
-
-#, python-format
-msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found"
-msgstr "agent_type=%(agent_type)s 및 host=%(host)s인 에이전트를 여러 개 찾음"
-
-#, python-format
-msgid "Multiple default providers for service %s"
-msgstr "%s 서비스에 대한 다중 기본 제공자 "
-
-#, python-format
-msgid "Multiple plugins for service %s were configured"
-msgstr "%s 서비스에 대한 다중 플러그인이 구성되었음"
-
-#, python-format
-msgid "Multiple providers specified for service %s"
-msgstr "%s 서비스에 대해 다중 제공자가 지정됨"
-
-msgid "Multiple tenant_ids in bulk security group rule create not allowed"
-msgstr "벌크 보안 그룹 규칙 작성의 다중 tenant_id는 허용되지 않음"
-
-msgid "Must also specifiy protocol if port range is given."
-msgstr "포트 범위가 제공되는 경우 프로토콜도 지정해야 합니다. "
-
-msgid "Must specify one or more actions on flow addition or modification"
-msgstr "플로우 추가 또는 수정 시 하나 이상의 조치를 지정해야 함"
-
-#, python-format
-msgid ""
-"Name '%s' must be 1-63 characters long, each of which can only be "
-"alphanumeric or a hyphen."
-msgstr "이름 '%s'의 길이는 1-63자 여야 하며 각각 영숫자나 하이픈이어야 합니다."
-
-#, python-format
-msgid "Name '%s' must not start or end with a hyphen."
-msgstr "이름 '%s'은(는) 하이픈으로 시작하거나 끝날 수 없습니다."
-
-msgid "Name of Open vSwitch bridge to use"
-msgstr "사용할 열린 vSwitch 브릿지의 이름"
-
-msgid ""
-"Name of nova region to use. Useful if keystone manages more than one region."
-msgstr ""
-"사용할 nova 리젼의 이름입니다. 키스톤이 둘 이상의 리젼을 관리할 경우 유용합니"
-"다."
-
-msgid "Name of the FWaaS Driver"
-msgstr "FWaaS 드라이버 이름"
-
-msgid "Namespace of the router"
-msgstr "라우터의 네임스페이스"
-
-msgid "Native pagination depend on native sorting"
-msgstr "네이티브 페이지 번호 매기기는 네이티브 정렬에 따라 다름"
-
-msgid "Negative delta (downgrade) not supported"
-msgstr "음수의 델타(다운그레이드)는 지원하지 않음"
-
-msgid "Negative relative revision (downgrade) not supported"
-msgstr "음수의 상대적 개정판(다운그레이드)은 지원하지 않음"
-
-#, python-format
-msgid "Network %s is not a valid external network"
-msgstr "%s 네트워크가 올바른 외부 네트워크가 아님"
-
-#, python-format
-msgid "Network %s is not an external network"
-msgstr "%s 네트워크가 외부 네트워크가 아님"
-
-#, python-format
-msgid ""
-"Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges "
-"%(excluded_ranges)s was not found."
-msgstr ""
-"IP 범위가 %(parent_range)s이고 크기가 %(size)s인(IP 범위 %(excluded_ranges)s "
-"제외) 네트워크를 발견하지 못했습니다."
-
-msgid "Network that will have instance metadata proxied."
-msgstr "인스턴스 메타데이터가 프록시되는 네트워크입니다."
-
-#, python-format
-msgid "Network type value '%s' not supported"
-msgstr "네트워크 유형 값 '%s'이(가) 지원되지 않음"
-
-msgid "Network type value needed by the ML2 plugin"
-msgstr "ML2 플러그인에 네트워크 유형 값이 필요함"
-
-msgid "Network types supported by the agent (gre and/or vxlan)."
-msgstr "에이전트에서 지원하는 네트워크 유형(gre 및/또는 vxlan)입니다."
-
-msgid "Neutron Service Type Management"
-msgstr "Neutron 서비스 유형 관리"
-
-msgid "Neutron core_plugin not configured!"
-msgstr "Neutron core_plugin이 구성되지 않았습니다!"
-
-msgid "Neutron plugin provider module"
-msgstr "Neutron 플러그인 제공자 모듈"
-
-msgid "Neutron quota driver class"
-msgstr "Neutron 할당량 드라이버 클래스"
-
-#, python-format
-msgid "No eligible l3 agent associated with external network %s found"
-msgstr "외부 네트워크 %s과(와) 연관된 적합한 l3 에이전트를 찾을 수 없음"
-
-#, python-format
-msgid "No more IP addresses available on network %(net_id)s."
-msgstr "%(net_id)s 네트워크에서 추가 IP 주소를 사용할 수 없습니다. "
-
-#, python-format
-msgid ""
-"No more Virtual Router Identifier (VRID) available when creating router "
-"%(router_id)s. The limit of number of HA Routers per tenant is 254."
-msgstr ""
-"%(router_id)s 라우터 작성 시 VRID(Virtual Router Identifier)를 더 이상 사용"
-"할 수 없습니다.테넌트당 HA 라우터 수의 한계는 254입니다."
-
-#, python-format
-msgid "No providers specified for '%s' service, exiting"
-msgstr "'%s' 서비스에 대해 제공자가 지정되지 않음, 종료하는 중"
-
-#, python-format
-msgid ""
-"Not allowed to manually assign a %(router_type)s router %(router_id)s from "
-"an existing DVR node to another L3 agent %(agent_id)s."
-msgstr ""
-"%(router_type)s 라우터 %(router_id)s을(를) 기존 DVR 노드에서 다른 L3 에이전"
-"트 %(agent_id)s(으)로 수동으로 지정할 수 없습니다."
-
-msgid "Not authorized."
-msgstr "권한이 없습니다. "
-
-#, python-format
-msgid ""
-"Not enough l3 agents available to ensure HA. Minimum required "
-"%(min_agents)s, available %(num_agents)s."
-msgstr ""
-"HA를 확인하기 위해 사용 가능한 13개의 에이전트가 충분하지 않습니다. 최소 "
-"%(min_agents)s, 사용 가능한 %(num_agents)s이(가) 필요합니다."
-
-msgid "Number of RPC worker processes for service"
-msgstr "서비스에 대한 RPC 작업자 프로세스 수"
-
-msgid "Number of backlog requests to configure the metadata server socket with"
-msgstr "메타데이터 서버 소켓을 구성하기 위한 백로그 요청 수"
-
-msgid "Number of backlog requests to configure the socket with"
-msgstr "소켓을 설정하려는 백로그 요청 횟수"
-
-msgid ""
-"Number of floating IPs allowed per tenant. A negative value means unlimited."
-msgstr "테넌트당 허용된 부동 IP 수입니다. 음수 값은 무제한을 의미합니다."
-
-msgid ""
-"Number of networks allowed per tenant. A negative value means unlimited."
-msgstr "테넌트당 허용되는 네트워크 수입니다. 음수 값은 무제한을 의미합니다."
-
-msgid "Number of ports allowed per tenant. A negative value means unlimited."
-msgstr "테넌트당 허용되는 포트 수입니다. 음수 값은 무제한을 의미합니다."
-
-msgid "Number of routers allowed per tenant. A negative value means unlimited."
-msgstr "테넌트당 허용된 라우터 수입니다. 음수 값은 무제한을 의미합니다."
-
-msgid ""
-"Number of seconds between sending events to nova if there are any events to "
-"send."
-msgstr "보낼 이벤트가 있는 경우 nova에 전송하는 이벤트 간의 시간(초)입니다."
-
-msgid "Number of seconds to keep retrying to listen"
-msgstr "감청 재시도 계속할 시간"
-
-msgid ""
-"Number of security groups allowed per tenant. A negative value means "
-"unlimited."
-msgstr "테넌트당 허용된 보안 그룹 수입니다. 음수 값은 무제한을 의미합니다."
-
-msgid ""
-"Number of security rules allowed per tenant. A negative value means "
-"unlimited."
-msgstr "테넌트당 허용된 보안 규칙 수입니다. 음수 값은 무제한을 의미합니다."
-
-msgid "Number of subnets allowed per tenant, A negative value means unlimited."
-msgstr "테넌트당 허용되는 서브넷 수입니다. 음수 값은 무제한을 의미합니다."
-
-msgid "OK"
-msgstr "OK"
-
-msgid "Only admin can view or configure quota"
-msgstr "관리자만이 할당량을 보거나 구성할 수 있습니다. "
-
-msgid "Only admin is authorized to access quotas for another tenant"
-msgstr "관리자만 다른 테넌트의 할당량에 액세스할 수 있는 권한이 있음"
-
-msgid "Only allowed to update rules for one security profile at a time"
-msgstr "한 번에 하나의 보안 프로파일에 대한 규칙만 업데이트하도록 허용됨"
-
-msgid "Only remote_ip_prefix or remote_group_id may be provided."
-msgstr "remote_ip_prefix 또는 remote_group_id만이 제공될 수 있습니다. "
-
-#, python-format
-msgid ""
-"Operation %(op)s is not supported for device_owner %(device_owner)s on port "
-"%(port_id)s."
-msgstr ""
-"다음 포트의 device_owner %(device_owner)s에 대해 조작 %(op)s이(가) 지원되지 "
-"않음. 포트: %(port_id)s."
-
-msgid "Override the default dnsmasq settings with this file"
-msgstr "기본 dnsmasq 설정을 이 파일로 대체합니다."
-
-msgid "Owner type of the device: network/compute"
-msgstr "디바이스의 소유자 유형: network/compute"
-
-msgid "POST requests are not supported on this resource."
-msgstr "이 자원에서 POST 요청이 지원되지 않습니다."
-
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr "bridge_mappings 구문 분석 실패: %s."
-
-msgid "Parsing supported pci_vendor_devs failed"
-msgstr "지원되는 pci_vendor_devs 구문 분석 실패"
-
-msgid "Path to PID file for this process"
-msgstr "이 프로세스에 대한 PID 파일의 경로"
-
-msgid "Path to the router directory"
-msgstr "라우터 디렉토리의 경로"
-
-msgid "Peer patch port in integration bridge for tunnel bridge."
-msgstr "터널 브릿지에 대한 통합 브릿지에 있는 피어 패치 포트입니다."
-
-msgid "Peer patch port in tunnel bridge for integration bridge."
-msgstr "통합 브릿지에 대한 터널 브릿지에 있는 피어 패치 포트입니다."
-
-msgid "Ping timeout"
-msgstr "Ping 제한시간 초과"
-
-#, python-format
-msgid "Plugin '%s' not found."
-msgstr "플러그인 '%s'를 찾을 수 없습니다."
-
-msgid "Plugin does not support updating provider attributes"
-msgstr "플러그인이 제공자 속성 업데이트를 지원하지 않음"
-
-#, python-format
-msgid "Port %(id)s does not have fixed ip %(address)s"
-msgstr "%(id)s 포트가 고정 IP %(address)s을(를) 갖지 않음"
-
-#, python-format
-msgid ""
-"Port %(port_id)s is associated with a different tenant than Floating IP "
-"%(floatingip_id)s and therefore cannot be bound."
-msgstr ""
-"포트 %(port_id)s이(가) 부동 IP %(floatingip_id)s과(와) 다른 테넌트와 연관되"
-"어 있어서 바운드할 수 없습니다. "
-
-msgid ""
-"Port Security must be enabled in order to have allowed address pairs on a "
-"port."
-msgstr "포트에서 주소 쌍을 허용하려면 포트 보안을 사용 가능하게 해야 합니다."
-
-msgid "Port does not have port security binding."
-msgstr "포트에 포트 보안 바인딩이 없습니다. "
-
-msgid ""
-"Port has security group associated. Cannot disable port security or ip "
-"address until security group is removed"
-msgstr ""
-"포트에 보안 그룹이 연관되어 있습니다. 보안 그룹이 제거될 때까지 포트 보안 또"
-"는 IP 주소를 사용 안함으로 설정할 수 없습니다. "
-
-msgid ""
-"Port security must be enabled and port must have an IP address in order to "
-"use security groups."
-msgstr ""
-"보안 그룹을 사용하려면 포트 보안이 사용으로 설정되고 포트에 IP 주소가 있어야 "
-"합니다. "
-
-msgid "Private key of client certificate."
-msgstr "클라이언트 인증서의 개인 키입니다."
-
-#, python-format
-msgid "Probe %s deleted"
-msgstr "%s 프로브가 삭제되었음"
-
-#, python-format
-msgid "Probe created : %s "
-msgstr "프로브 작성: %s "
-
-msgid "Process is already started"
-msgstr "프로세스가 이미 시작됨"
-
-msgid "Process is not running."
-msgstr "프로세스가 실행 중이지 않습니다."
-
-msgid "Protocol to access nova metadata, http or https"
-msgstr "nova 메타데이터에 액세스하기 위한 프로토콜, http 또는 https"
-
-msgid ""
-"Range of seconds to randomly delay when starting the periodic task scheduler "
-"to reduce stampeding. (Disable by setting to 0)"
-msgstr ""
-"몰리지 않도록 주기적 태스크 스케줄러를 시작할 때 무작위로 지연할 시간의 범위"
-"(초)입니다. (0으로 설정하여 사용 안함) "
-
-msgid "Remote metadata server experienced an internal server error."
-msgstr "원격 메타데이터 서버에서 내부 서버 오류가 발생했습니다. "
-
-msgid ""
-"Representing the resource type whose load is being reported by the agent. "
-"This can be \"networks\", \"subnets\" or \"ports\". When specified (Default "
-"is networks), the server will extract particular load sent as part of its "
-"agent configuration object from the agent report state, which is the number "
-"of resources being consumed, at every report_interval.dhcp_load_type can be "
-"used in combination with network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is "
-"WeightScheduler, dhcp_load_type can be configured to represent the choice "
-"for the resource being balanced. Example: dhcp_load_type=networks"
-msgstr ""
-"에이전트에서 로드를 보고하는 자원 유형을 나타냅니다. 이는 \"네트워크\", \"서"
-"브넷\" 또는 \"포트\"입니다. 이를 지정하는 경우 (기본값은 네트워크임) 서버는 "
-"network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler."
-"WeightScheduler와의 조합에서 report_interval.dhcp_load_type을 사용할 수 있을 "
-"때마다 에이전트 보고 상태에서 에이전트 구성 오브젝트의 일부로 보낸 특정 로드"
-"를 추출하는데, 이는 이용 중인 자원 수입니다. network_scheduler_driver가 "
-"WeightScheduler인 경우 dhcp_load_type을 구성하여 밸런스 조정 중인 자원에 대"
-"한 선택을 표시할 수 있습니다. 예: dhcp_load_type=networks"
-
-msgid "Request Failed: internal server error while processing your request."
-msgstr "요청 실패: 요청을 처리하는 중에 내부 서버 오류가 발생했습니다. "
-
-#, python-format
-msgid ""
-"Request contains duplicate address pair: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-msgstr ""
-"요청에 중복되는 주소 쌍이 포함됨: mac_address %(mac_address)s ip_address "
-"%(ip_address)s."
-
-#, python-format
-msgid ""
-"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps "
-"with another subnet"
-msgstr ""
-"요청된 서브넷(%(network_id)s 네트워크의 cidr: %(cidr)s)이 다른 서브넷과 겹침"
-
-#, python-format
-msgid ""
-"Resource '%(resource_id)s' is already associated with provider "
-"'%(provider)s' for service type '%(service_type)s'"
-msgstr ""
-"'%(resource_id)s' 자원이 이미 '%(service_type)s' 서비스 유형에 대한 "
-"'%(provider)s' 제공자와 연관되어 있음"
-
-msgid "Resource body required"
-msgstr "자원 본문 필수"
-
-msgid "Resource not found."
-msgstr "자원을 찾을 수 없습니다."
-
-msgid "Resources required"
-msgstr "자원 필수"
-
-msgid "Root helper daemon application to use when possible."
-msgstr "가능한 경우 사용할 루트 헬퍼 디먼 애플리케이션."
-
-msgid "Root permissions are required to drop privileges."
-msgstr "권한을 삭제하려면 루트 권한이 필요합니다."
-
-#, python-format
-msgid "Router %(router_id)s %(reason)s"
-msgstr "라우터 %(router_id)s %(reason)s"
-
-#, python-format
-msgid "Router %(router_id)s could not be found"
-msgstr "%(router_id)s 라우터를 찾을 수 없음"
-
-#, python-format
-msgid "Router %(router_id)s does not have an interface with id %(port_id)s"
-msgstr "%(router_id)s 라우터에 ID가 %(port_id)s인 인터페이스가 없음"
-
-#, python-format
-msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s"
-msgstr "%(router_id)s 라우터에 %(subnet_id)s 서브넷의 인터페이스가 없음"
-
-#, python-format
-msgid "Router already has a port on subnet %s"
-msgstr "라우터가 이미 %s 서브넷에 포트를 갖고 있음"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more floating IPs."
-msgstr ""
-"하나 이상의 부동 IP에서 필요로 하므로 %(router_id)s 라우터의 %(subnet_id)s 서"
-"브넷에 대한 라우터 인터페이스를 삭제할 수 없습니다. "
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more routes."
-msgstr ""
-"하나 이상의 라우터에서 필요로 하므로 %(router_id)s 라우터의 %(subnet_id)s 서"
-"브넷에 대한 라우터 인터페이스를 삭제할 수 없습니다. "
-
-msgid "Router that will have connected instances' metadata proxied."
-msgstr "연결된 인스턴스의 메타데이터가 프록시되는 라우터입니다."
-
-msgid "Run as daemon."
-msgstr "디먼으로 실행됩니다."
-
-msgid ""
-"Seconds between nodes reporting state to server; should be less than "
-"agent_down_time, best if it is half or less than agent_down_time."
-msgstr ""
-"서버에 대한 상태를 보고하는 노드 사이의 시간(초)이며 agent_down_time보다 짧아"
-"야 하며 절반이거나 agent_down_time보다 짧은 경우 최적입니다."
-
-msgid "Seconds between running periodic tasks"
-msgstr "주기적 태스크 실행 사이의 시간(초)"
-
-msgid ""
-"Seconds to regard the agent is down; should be at least twice "
-"report_interval, to be sure the agent is down for good."
-msgstr ""
-"에이전트가 작동 중지되었다고 간주되는 시간(초)이며 에이전트가 계속 작동 중지 "
-"상태인지 확인할 수 있도록 report_interval의 두 배 이상이어야 합니다."
-
-#, python-format
-msgid "Security group %(id)s does not exist"
-msgstr "%(id)s 보안 그룹이 존재하지 않음"
-
-#, python-format
-msgid "Security group rule %(id)s does not exist"
-msgstr "보안 그룹 규칙 %(id)s이(가) 존재하지 않음"
-
-#, python-format
-msgid "Security group rule already exists. Rule id is %(id)s."
-msgstr "보안 그룹 규칙이 이미 있습니다. 규칙 ID는 %(id)s입니다."
-
-msgid "Segments and provider values cannot both be set."
-msgstr "세그먼트 및 제공자 값을 모두 설정할 수 없습니다."
-
-msgid ""
-"Send notification to nova when port data (fixed_ips/floatingip) changes so "
-"nova can update its cache."
-msgstr ""
-"포트 데이터(fixed_ips/floatingip)가 변경되면 알림을 nova에 보냅니다. 이에 따"
-"라 nova는 해당 캐시를 업데이트할 수 있습니다."
-
-msgid "Send notification to nova when port status changes"
-msgstr "포트 상태가 변경되면 알림을 nova에 보냅니다."
-
-msgid ""
-"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the "
-"feature is disabled"
-msgstr ""
-"HA 설정을 위해 불필요한 다수의 ARP를 전송합니다. 0 이하인 경우 기능이 사용 안"
-"함으로 설정됩니다."
-
-#, python-format
-msgid ""
-"Service provider '%(provider)s' could not be found for service type "
-"%(service_type)s"
-msgstr ""
-"서비스 유형에 대한 '%(provider)s' 서비스 제공자를 찾을 수 없음: "
-"%(service_type)s"
-
-#, python-format
-msgid "Service type %(service_type)s does not have a default service provider"
-msgstr "%(service_type)s 서비스 유형에 기본 서비스 제공자가 없음"
-
-msgid ""
-"Set new timeout in seconds for new rpc calls after agent receives SIGTERM. "
-"If value is set to 0, rpc timeout won't be changed"
-msgstr ""
-"에이전트에서 SIGTERM을 수신한 후에 새 rpc 호출에 대한 새 제한시간(초)을 설정"
-"합니다. 값을 0으로 설정하면 rpc 제한시간이 변경되지 않습니다."
-
-msgid ""
-"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/"
-"VXLAN tunnel."
-msgstr ""
-"GRE/VXLAN 터널을 전송하는 발신 IP 패킷에 DF(Don't Fragment) 비트를 설정하거"
-"나 설정 해제하십시오."
-
-#, python-format
-msgid ""
-"Some tenants have more than one security group named 'default': "
-"%(duplicates)s. All duplicate 'default' security groups must be resolved "
-"before upgrading the database."
-msgstr ""
-"일부 테넌트에 이름이 'default'인 보안 그룹이 두 개 이상 있음: "
-"%(duplicates)s. 데이터베이스를 업그레이드하기 전에 중복된 모든 'default' 보"
-"안 그룹을 해결해야 합니다."
-
-msgid ""
-"Specifying 'tenant_id' other than authenticated tenant in request requires "
-"admin privileges"
-msgstr ""
-"요청에서 인증된 테넌트가 아닌 'tenant_id'를 지정하려면 admin 권한이 필요함"
-
-msgid "Subnet for router interface must have a gateway IP"
-msgstr "라우터 인터페이스에 대한 서브넷은 게이트웨이 IP를 가져야 함"
-
-msgid "Subnet pool has existing allocations"
-msgstr "서브넷 풀에 기존 할당이 있음"
-
-msgid "Subnet used for the l3 HA admin network."
-msgstr "l3 HA 관리 네트워크에 사용된 서브넷입니다."
-
-msgid ""
-"System-wide flag to determine the type of router that tenants can create. "
-"Only admin can override."
-msgstr ""
-"테넌트가 작성할 수 있는 라우터 유형을 판별하는 시스템 범위 플래그입니다. 관리"
-"자만 대체할 수 있습니다."
-
-msgid "TCP Port to listen for metadata server requests."
-msgstr "메타데이터 서버 요청을 청취할 TCP 포트입니다. "
-
-msgid "TCP Port used by Neutron metadata namespace proxy."
-msgstr "Neutron 메타데이터 네임스페이스 프록시가 사용하는 TCP 포트입니다. "
-
-msgid "TCP Port used by Nova metadata server."
-msgstr "Nova 메타데이터 서버가 사용한 TCP 포트입니다. "
-
-#, python-format
-msgid "TLD '%s' must not be all numeric"
-msgstr "TLD '%s'에 숫자만 사용할 수 없음"
-
-msgid "TOS for vxlan interface protocol packets."
-msgstr "vxlan 인터페이스 프로토콜 패킷용 TOS."
-
-msgid "TTL for vxlan interface protocol packets."
-msgstr "vxlan 인터페이스 프로토콜 패킷용 TTL."
-
-#, python-format
-msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network"
-msgstr ""
-"%(tenant_id)s 테넌트는 이 네트워크에 %(resource)s을(를) 작성하도록 허용되지 "
-"않음"
-
-msgid "Tenant network creation is not enabled."
-msgstr "테넌트 네트워크 작성은 사용되지 않습니다. "
-
-msgid ""
-"The 'gateway_external_network_id' option must be configured for this agent "
-"as Neutron has more than one external network."
-msgstr ""
-"'gateway_external_network_id' 옵션은 Neutron이 두 개 이상의 외부 네트워크를 "
-"가지므로 이 에이전트에 대해구성되어야 합니다. "
-
-#, python-format
-msgid ""
-"The HA Network CIDR specified in the configuration file isn't valid; "
-"%(cidr)s."
-msgstr ""
-"구성 파일에 지정된 HA 네트워크 CIDR이 올바르지 않습니다.%(cidr)s과(와) 연관되"
-"어 있습니다."
-
-msgid "The UDP port to use for VXLAN tunnels."
-msgstr "VXLAN 터널에 사용하는 UDP 포트"
-
-msgid "The advertisement interval in seconds"
-msgstr "광고 간격(초)"
-
-#, python-format
-msgid "The allocation pool %(pool)s is not valid."
-msgstr "할당 풀 %(pool)s이(가) 올바르지 않습니다. "
-
-#, python-format
-msgid ""
-"The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s."
-msgstr ""
-"할당 풀 %(pool)s이(가) 서브넷 cidr %(subnet_cidr)s 이상으로 확장합니다. "
-
-#, python-format
-msgid ""
-"The attribute '%(attr)s' is reference to other resource, can't used by sort "
-"'%(resource)s'"
-msgstr ""
-"속성 '%(attr)s'은(는) 다른 자원에 대한 참조이지만 정렬 '%(resource)s'에서 사"
-"용될 수는 없습니다."
-
-msgid "The core plugin Neutron will use"
-msgstr "Neutron이 사용할 코어 플러그인"
-
-msgid "The driver used to manage the DHCP server."
-msgstr "DHCP 서버를 관리하는 데 사용되는 드라이버입니다. "
-
-msgid "The driver used to manage the virtual interface."
-msgstr "가상 인터페이스를 관리하는 데 사용되는 드라이버입니다. "
-
-#, python-format
-msgid ""
-"The following device_id %(device_id)s is not owned by your tenant or matches "
-"another tenants router."
-msgstr ""
-"device_id %(device_id)s이(가) 사용자 테넌트의 소유가 아니거나 다른 테넌트 라"
-"우터와 일치합니다."
-
-msgid "The host IP to bind to"
-msgstr "바인드할 호스트 IP"
-
-msgid "The interface for interacting with the OVSDB"
-msgstr "OVSDB와 상호작용하는 데 필요한 인터페이스"
-
-msgid ""
-"The maximum number of items returned in a single response, value was "
-"'infinite' or negative integer means no limit"
-msgstr ""
-"단일 응답으로 최대 항목 수가 리턴되었습니다. 값이 'infinite' 또는 음수인 경"
-"우 제한이 없다는 의미입니다. "
-
-#, python-format
-msgid ""
-"The network %(network_id)s has been already hosted by the DHCP Agent "
-"%(agent_id)s."
-msgstr ""
-"DHCP 에이전트 %(agent_id)s에서 %(network_id)s 네트워크를 이미 호스트하고 있습"
-"니다. "
-
-#, python-format
-msgid ""
-"The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s."
-msgstr ""
-"DHCP 에이전트 %(agent_id)s에서 %(network_id)s 네트워크를 호스트하지 않습니"
-"다. "
-
-#, python-format
-msgid "The number of allowed address pair exceeds the maximum %(quota)s."
-msgstr "허용되는 주소 쌍 수가 최대값 %(quota)s을(를) 초과합니다."
-
-msgid ""
-"The number of seconds the agent will wait between polling for local device "
-"changes."
-msgstr "에이전트가 로컬 디바이스 변경을 폴링하는 사이에 대기하는 시간(초). "
-
-msgid ""
-"The number of seconds to wait before respawning the ovsdb monitor after "
-"losing communication with it."
-msgstr ""
-"통신이 유실된 후에 ovsdb 모니터를 재파생하기 전에 대기할 시간(초)입니다."
-
-msgid "The number of sort_keys and sort_dirs must be same"
-msgstr "sort_keys 및 sort_dirs의 수가 같아야 함"
-
-#, python-format
-msgid "The port '%s' was deleted"
-msgstr "포트 '%s'이(가) 삭제됨"
-
-msgid "The port to bind to"
-msgstr "바인드할 포트"
-
-#, python-format
-msgid "The requested content type %s is invalid."
-msgstr "요청한 컨텐츠 유형 %s이(가) 올바르지 않습니다."
-
-msgid "The resource could not be found."
-msgstr "자원을 찾을 수 없습니다. "
-
-#, python-format
-msgid ""
-"The router %(router_id)s has been already hosted by the L3 Agent "
-"%(agent_id)s."
-msgstr ""
-"L3 에이전트 %(agent_id)s에서 %(router_id)s 라우터를 이미 호스트하고 있습니"
-"다. "
-
-msgid ""
-"The server has either erred or is incapable of performing the requested "
-"operation."
-msgstr "서버에 오류가 있거나 서버가 요청된 조작을 수행할 수 없습니다."
-
-msgid "The service plugins Neutron will use"
-msgstr "Neutron이 사용할 서비스 플러그인"
-
-msgid "The type of authentication to use"
-msgstr "사용할 인증 유형"
-
-#, python-format
-msgid "The value '%(value)s' for %(element)s is not valid."
-msgstr "%(element)s의 '%(value)s' 값이 올바르지 않습니다."
-
-msgid ""
-"The working mode for the agent. Allowed modes are: 'legacy' - this preserves "
-"the existing behavior where the L3 agent is deployed on a centralized "
-"networking node to provide L3 services like DNAT, and SNAT. Use this mode if "
-"you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality "
-"and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - "
-"this enables centralized SNAT support in conjunction with DVR.  This mode "
-"must be used for an L3 agent running on a centralized node (or in single-"
-"host deployments, e.g. devstack)"
-msgstr ""
-"에이전트에 대한 작업 모드입니다. 허용되는 모드는 다음과 같습니다. '레거시' - "
-"이 모드는 L3 에이전트가 중앙 네트워킹 노드에 배치되어 SNAT와 DNAT 같은 L3 서"
-"비스를 제공하는 기존 동작을 유지합니다. DVR을 채택하지 않으려면 이 모드를 사"
-"용하십시오. 'dvr' - 이 모드는 DVR 기능을 사용하며 컴퓨터 호스트에서 실행되는 "
-"L3 에이전트에 사용해야 합니다. 'dvr_snat' - 이 모드는 DVR과 함께 중앙 SNAT 지"
-"원을 사용합니다. 중앙 노드에서(또는 devstack과 같은 단일 호스트 배치에서) 실"
-"행 중인 L3 에이전트에는 이 모드를 사용해야 합니다."
-
-msgid ""
-"True to delete all ports on all the OpenvSwitch bridges. False to delete "
-"ports created by Neutron on integration and external network bridges."
-msgstr ""
-"모든 OpenvSwitch 브릿지의 모든 포트를 삭제하려면 true입니다. 통합 및 외부 네"
-"트워크 브릿지에 Neutron이 작성한 포트를 삭제하려면 false입니다. "
-
-msgid "Tunnel IP value needed by the ML2 plugin"
-msgstr "ML2 플러그인에 터널 IP 값이 필요함"
-
-msgid "Tunnel bridge to use."
-msgstr "사용할 터널 브릿지입니다."
-
-msgid "URL to database"
-msgstr "데이터베이스에 대한 URL"
-
-#, python-format
-msgid "Unable to access %s"
-msgstr "%s에 액세스할 수 없음"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(router_id)s. The number of routes exceeds "
-"the maximum %(quota)s."
-msgstr ""
-"%(router_id)s에 대한 조작을 완료할 수 없습니다. 라우트 수가 최대 %(quota)s을"
-"(를) 초과했습니다. "
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of DNS "
-"nameservers exceeds the limit %(quota)s."
-msgstr ""
-"%(subnet_id)s에 대한 조작을 완료할 수 없습니다. DNS 네임서버 수가 %(quota)s "
-"한계를 초과했습니다. "
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of host routes "
-"exceeds the limit %(quota)s."
-msgstr ""
-"%(subnet_id)s에 대한 조작을 완료할 수 없습니다. 호스트 라우트 수가 %(quota)s "
-"한계를 초과했습니다. "
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The IP address "
-"%(ip_address)s is in use."
-msgstr ""
-"%(net_id)s 네트워크에 대한 조작을 완료할 수 없습니다. IP 주소 %(ip_address)s"
-"이(가) 사용 중입니다. "
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The mac address %(mac)s "
-"is in use."
-msgstr ""
-"%(net_id)s 네트워크에 대한 조작을 완료할 수 없습니다. MAC 주소 %(mac)s이(가) "
-"사용 중입니다. "
-
-#, python-format
-msgid ""
-"Unable to complete operation on network %(net_id)s. There are one or more "
-"ports still in use on the network."
-msgstr ""
-"%(net_id)s네트워크에서 조작을 완료할 수 없습니다. 네트워크에 여전히 사용 중"
-"인 하나 이상의 포트가 있습니다. "
-
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s for network %(net_id)s. "
-"Port already has an attached device %(device_id)s."
-msgstr ""
-"%(net_id)s 네트워크에 대한 %(port_id)s 포트의 오퍼레이션을 완료할 수 없습니"
-"다. 포트에 이미 접속된 디바이스 %(device_id)s이(가) 있습니다. "
-
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr "%s의 값을 변환할 수 없음"
-
-msgid "Unable to create the Agent Gateway Port"
-msgstr "에이전트 게이트웨이 포트를 작성할 수 없음"
-
-msgid "Unable to create the SNAT Interface Port"
-msgstr "SNAT 인터페이스 포트를 작성할 수 없음"
-
-#, python-format
-msgid ""
-"Unable to create the flat network. Physical network %(physical_network)s is "
-"in use."
-msgstr ""
-"일반 네트워크를 작성할 수 없습니다. 실제 네트워크 %(physical_network)s이(가) "
-"사용 중입니다. "
-
-msgid ""
-"Unable to create the network. No available network found in maximum allowed "
-"attempts."
-msgstr ""
-"네트워크를 작성할 수 없습니다. 허용되는 최대 시도 수만큼 시도한 후 사용 가능"
-"한 네트워크를 찾을 수 없습니다."
-
-msgid ""
-"Unable to create the network. No tenant network is available for allocation."
-msgstr ""
-"네트워크를 작성할 수 없습니다. 테넌트 네트워크를 할당에 사용할 수 없습니다. "
-
-#, python-format
-msgid ""
-"Unable to create the network. The VLAN %(vlan_id)s on physical network "
-"%(physical_network)s is in use."
-msgstr ""
-"네트워크를 작성할 수 없습니다. 실제 네트워크 %(physical_network)s의 VLAN "
-"%(vlan_id)s이(가) 사용 중입니다. "
-
-#, python-format
-msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use."
-msgstr ""
-"네트워크를 작성할 수 없습니다. 터널 ID %(tunnel_id)s이(가) 사용 중입니다. "
-
-#, python-format
-msgid "Unable to determine mac address for %s"
-msgstr "%s의 맥 주소를 확인할 수 없습니다"
-
-#, python-format
-msgid "Unable to find '%s' in request body"
-msgstr "요청 본문에서 '%s'을(를) 찾을 수 없음"
-
-#, python-format
-msgid "Unable to find any IP address on external network %(net_id)s."
-msgstr "외부 네트워크 %(net_id)s에서 IP 주소를 찾을 수 없음"
-
-#, python-format
-msgid "Unable to find resource name in %s"
-msgstr "%s에서 자원 이름을 찾을 수 없음"
-
-msgid "Unable to generate IP address by EUI64 for IPv4 prefix"
-msgstr "IPv4 접두부에 대해 EUI64에 의해 IP 주소를 생성할 수 없습니다."
-
-#, python-format
-msgid "Unable to generate unique DVR mac for host %(host)s."
-msgstr "%(host)s 호스트에 대한 고유 DVR mac을 생성할 수 없습니다."
-
-#, python-format
-msgid "Unable to generate unique mac on network %(net_id)s."
-msgstr "%(net_id)s 네트워크에 고유 MAC을 생성할 수 없습니다. "
-
-#, python-format
-msgid ""
-"Unable to identify a target field from:%s. Match should be in the form "
-"%%(<field_name>)s"
-msgstr ""
-"%s에서 대상 필드를 식별할 수 없음. 일치가 다음 양식이어야 함."
-"%%(<field_name>)s"
-
-#, python-format
-msgid ""
-"Unable to verify match:%(match)s as the parent resource: %(res)s was not "
-"found"
-msgstr ""
-"상위 소스로서 일치 %(match)s을(를) 확인할 수 없음. %(res)s을(를) 찾을 수 없음"
-
-#, python-format
-msgid "Unexpected response code: %s"
-msgstr "예기치 않은 응답 코드: %s"
-
-#, python-format
-msgid "Unexpected response: %s"
-msgstr "예상치 않은 응답: %s"
-
-msgid "Unimplemented commands"
-msgstr "구현되지 않은 명령"
-
-msgid "Unknown API version specified"
-msgstr "알 수 없는 API 버전이 지정됨"
-
-#, python-format
-msgid "Unknown attribute '%s'."
-msgstr "알 수 없는 속성 '%s'입니다."
-
-#, python-format
-msgid "Unknown chain: %r"
-msgstr "알 수 없는 체인: %r"
-
-#, python-format
-msgid "Unknown quota resources %(unknown)s."
-msgstr "알 수 없는 할당량 자원 %(unknown)s."
-
-msgid "Unmapped error"
-msgstr "맵핑되지 않은 오류"
-
-msgid "Unrecognized action"
-msgstr "인식되지 않는 조치"
-
-#, python-format
-msgid "Unrecognized attribute(s) '%s'"
-msgstr "인식되지 않는 속성 '%s'"
-
-msgid "Unsupported Content-Type"
-msgstr "지원되지 않는 Content-Type"
-
-#, python-format
-msgid "Unsupported network type %(net_type)s."
-msgstr "지원되지 않는 네트워크 유형 %(net_type)s입니다."
-
-msgid "Unsupported request type"
-msgstr "지원되지 않는 요청 유형"
-
-msgid "Updating default security group not allowed."
-msgstr "기본 보안 그룹 업데이트가 허용되지 않습니다. "
-
-msgid ""
-"Use ML2 l2population mechanism driver to learn remote MAC and IPs and "
-"improve tunnel scalability."
-msgstr ""
-"원격 MAC 및 IP를 학습하고 터널 확장성을 개선하려면 ML2 l2population 메커니즘 "
-"드라이버를 사용하십시오."
-
-msgid "Use broadcast in DHCP replies"
-msgstr "DHCP 응답에 브로드캐스트 사용"
-
-msgid "Use either --delta or relative revision, not both"
-msgstr "--델타 또는 상대적 개정판 중 하나 사용"
-
-msgid "User (uid or name) running metadata proxy after its initialization"
-msgstr "초기화 후에 메타데이터 프록시를 실행하는 사용자(uid 또는 이름)"
-
-msgid ""
-"User (uid or name) running metadata proxy after its initialization (if "
-"empty: agent effective user)."
-msgstr ""
-"초기화 후에 메타데이터 프록시를 실행하는 사용자(uid 또는 이름)(비어 있는 경"
-"우: 에이전트 유효 사용자)."
-
-msgid "User (uid or name) running this process after its initialization"
-msgstr "초기화 이후 이 프로세스를 실행하는 사용자(uid 또는 이름)"
-
-msgid "VRRP authentication password"
-msgstr "VRRP 인증 비밀번호"
-
-msgid "VRRP authentication type"
-msgstr "VRRP 인증 유형"
-
-#, python-format
-msgid ""
-"Validation of dictionary's keys failed. Expected keys: %(expected_keys)s "
-"Provided keys: %(provided_keys)s"
-msgstr ""
-"사전의 키를 유효성 검증하지 못했습니다. 예상 키: %(expected_keys)s 제공된 "
-"키: %(provided_keys)s"
-
-#, python-format
-msgid "Validator '%s' does not exist."
-msgstr "유효성 검증기 '%s'이(가) 없습니다. "
-
-#, python-format
-msgid "Value %(value)s in mapping: '%(mapping)s' not unique"
-msgstr "'%(mapping)s' 맵핑의 %(value)s 값이 고유하지 않음"
-
-msgid ""
-"Watch file log. Log watch should be disabled when metadata_proxy_user/group "
-"has no read/write permissions on metadata proxy log file."
-msgstr ""
-"감시 파일 로그. metadata_proxy_user/group에 메타데이터 프록시 로그 파일에 대"
-"한 읽기/쓰기 권한이 없는 경우 로그 감시를 사용 안함으로 설정해야 합니다. "
-
-msgid ""
-"Where to store Neutron state files. This directory must be writable by the "
-"agent."
-msgstr ""
-"Neutron 상태 파일을 저장할 위치. 에이전트가 이 디렉토리에 쓸 수 있어야 합니"
-"다."
-
-msgid ""
-"With IPv6, the network used for the external gateway does not need to have "
-"an associated subnet, since the automatically assigned link-local address "
-"(LLA) can be used. However, an IPv6 gateway address is needed for use as the "
-"next-hop for the default route. If no IPv6 gateway address is configured "
-"here, (and only then) the neutron router will be configured to get its "
-"default route from router advertisements (RAs) from the upstream router; in "
-"which case the upstream router must also be configured to send these RAs. "
-"The ipv6_gateway, when configured, should be the LLA of the interface on the "
-"upstream router. If a next-hop using a global unique address (GUA) is "
-"desired, it needs to be done via a subnet allocated to the network and not "
-"through this parameter. "
-msgstr ""
-"IPv6를 사용하면 자동으로 지정되는 링크 로컬 주소(LLA)를 사용할 수 있으므로 외"
-"부 게이트웨이에 사용한 네트워크에 연관 서브넷이 필요하지 않습니다. 그러나 기"
-"본 라우트의 다음 홉으로 사용할 IPv6 게이트웨이 주소가 필요합니다. 여기서 "
-"IPv6 게이트웨이 주소를 구성하지 않으면(또한 이 경우에만)상위 라우터의 "
-"RA(Router Advertisement)에서 해당 기본 라우트를 가져오도록 Neutron 라우터를 "
-"구성할 수 있습니다. 이 경우 이러한 RA를 보내도록 상위 라우터를 구성할 수도 있"
-"습니다. ipv6_gateway를 구성한 경우, 이 게이트웨이가 상위 라우터의 인터페이스"
-"에 대한 LLA여야 합니다. 글로벌 고유 주소(GUA)를 사용하는 다음 합이 필요한 경"
-"우, 이 매개변수가 아닌 네트워크에 할당된 서브넷을 통해 수행해야 합니다. "
-
-msgid "You must implement __call__"
-msgstr "__call__을 구현해야 합니다. "
-
-msgid ""
-"You must provide a config file for bridge - either --config-file or "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-msgstr ""
-"브릿지에 대한 구성 파일, 즉 --config-file 또는 env[QUANTUM_TEST_CONFIG_FILE] "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-
-msgid "You must provide a revision or relative delta"
-msgstr "개정판 또는 상대적 델타를 제공해야 함"
-
-msgid "allocation_pools allowed only for specific subnet requests."
-msgstr "allocation_pools는 특정 서브넷 요청에만 사용할 수 있습니다."
-
-msgid "binding:profile value too large"
-msgstr "바인딩:프로파일 값이 너무 김"
-
-msgid "cidr and prefixlen must not be supplied together"
-msgstr "cidr 및 prefixlen을 함께 입력하지 않아야 함"
-
-#, python-format
-msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid."
-msgstr ""
-"dhcp_agents_per_network는 1 이상이어야 합니다. '%s'은(는) 올바르지 않습니다. "
-
-msgid "fixed_ip_address cannot be specified without a port_id"
-msgstr "fixed_ip_address는 port_id 없이 지정할 수 없음"
-
-#, python-format
-msgid "has device owner %s"
-msgstr "디바이스 소유자 %s이(가) 있음"
-
-#, python-format
-msgid "ip command failed on device %(dev_name)s: %(reason)s"
-msgstr "%(dev_name)s 디바이스에 대한 ip 명령 실패: %(reason)s"
-
-#, python-format
-msgid "ip link capability %(capability)s is not supported"
-msgstr "ip 링크 기능 %(capability)s이(가) 지원되지 않음"
-
-#, python-format
-msgid "ip link command is not supported: %(reason)s"
-msgstr "ip 링크 명령이 지원되지 않음: %(reason)s"
-
-msgid "ip_version must be specified in the absence of cidr and subnetpool_id"
-msgstr "cidr 및 subnetpool_id가 없는 경우 ip_version을 지정해야 함"
-
-msgid "ipv6_address_mode is not valid when ip_version is 4"
-msgstr "ip_version이 4인 경우 ipv6_address_mode가 올바르지 않음"
-
-msgid "ipv6_ra_mode is not valid when ip_version is 4"
-msgstr "ip_version이 4인 경우 ipv6_ra_mode가 올바르지 않음"
-
-msgid ""
-"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to "
-"False."
-msgstr ""
-"enable_dhcp가 False로 설정된 경우 ipv6_ra_mode 또는 ipv6_address_mode를 설정"
-"할 수 없습니다."
-
-#, python-format
-msgid ""
-"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to "
-"'%(addr_mode)s' is not valid. If both attributes are set, they must be the "
-"same value"
-msgstr ""
-"'%(ra_mode)s'(으)로 설정된 ipv6_ra_mode('%(addr_mode)s'(으)로 설정된 "
-"ipv6_address_mode 포함)가 올바르지 않습니다. 두 설정 다 설정된 경우 동일한 값"
-"이어야 합니다."
-
-msgid "mac address update"
-msgstr "mac 주소 업데이트"
-
-#, python-format
-msgid ""
-"max_l3_agents_per_router %(max_agents)s config parameter is not valid. It "
-"has to be greater than or equal to min_l3_agents_per_router %(min_agents)s."
-msgstr ""
-"max_l3_agents_per_router %(max_agents)s 구성 매개변수가 올바르지 않습니다. "
-"min_l3_agents_per_router와 같거나 이보다 커야 합니다.%(min_agents)s과(와) 연"
-"관되어 있습니다."
-
-#, python-format
-msgid ""
-"min_l3_agents_per_router config parameter is not valid. It has to be equal "
-"to or more than %s for HA."
-msgstr ""
-"min_l3_agents_per_router 구성 매개변수가 올바르지 않습니다.HA의 %s과(와) 동일"
-"하거나 이상이어야 합니다."
-
-msgid "network_type required"
-msgstr "network_type이 필요함"
-
-#, python-format
-msgid "network_type value '%s' not supported"
-msgstr "network_type에서 '%s' 값을 지원하지 않습니다"
-
-msgid "new subnet"
-msgstr "새 서브넷"
-
-#, python-format
-msgid "physical_network '%s' unknown  for VLAN provider network"
-msgstr "VLAN 제공자 네트워크에 대해 실제 네트워크 '%s'을(를) 알 수 없음. "
-
-#, python-format
-msgid "physical_network '%s' unknown for flat provider network"
-msgstr "플랫 제공자 네트워크에 대해 실제 네트워크 '%s'을(를) 알 수 없음. "
-
-msgid "physical_network required for flat provider network"
-msgstr "플랫 제공자 네트워크에 실제 네트워크 필요"
-
-#, python-format
-msgid "provider:physical_network specified for %s network"
-msgstr "%s 네트워크에 대해 지정된 provider:physical_network 입니다"
-
-msgid "respawn_interval must be >= 0 if provided."
-msgstr "respawn_interval은 >= 0이어야 합니다(제공된 경우)."
-
-#, python-format
-msgid "segmentation_id out of range (%(min)s through %(max)s)"
-msgstr "segmentation_id가 범위(%(min)s -  %(max)s)를 벗어남"
-
-msgid "segmentation_id requires physical_network for VLAN provider network"
-msgstr "segmentation_id는 VLAN 제공자 네트워크의 physical_network가 필요함"
-
-msgid "the nexthop is not connected with router"
-msgstr "nexthop이 라우터와 연결되지 않음"
-
-msgid "the nexthop is used by router"
-msgstr "라우터가 nexthop을 사용함"
-
-msgid ""
-"uuid provided from the command line so external_process can track us via /"
-"proc/cmdline interface."
-msgstr ""
-"external_process가 /proc/cmdline 인터페이스를 통해 추적할 수 있도록 명령행에"
-"서 제공된 uuid입니다."
diff --git a/neutron/locale/neutron-log-error.pot b/neutron/locale/neutron-log-error.pot
deleted file mode 100644 (file)
index f917ac4..0000000
+++ /dev/null
@@ -1,1209 +0,0 @@
-# Translations template for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#: neutron/policy.py:262
-#, python-format
-msgid "Policy check error while calling %s!"
-msgstr ""
-
-#: neutron/service.py:110 neutron/service.py:219
-msgid "Unrecoverable error: please check log for details."
-msgstr ""
-
-#: neutron/service.py:149
-msgid "done with wait"
-msgstr ""
-
-#: neutron/service.py:193
-#, python-format
-msgid "'rpc_workers = %d' ignored because start_rpc_listeners is not implemented."
-msgstr ""
-
-#: neutron/service.py:233
-msgid "No known API applications configured."
-msgstr ""
-
-#: neutron/service.py:338
-msgid "Exception occurs when timer stops"
-msgstr ""
-
-#: neutron/service.py:347
-msgid "Exception occurs when waiting for timer"
-msgstr ""
-
-#: neutron/wsgi.py:142
-#, python-format
-msgid "Unable to listen on %(host)s:%(port)s"
-msgstr ""
-
-#: neutron/wsgi.py:601
-#, python-format
-msgid "InvalidContentType: %s"
-msgstr ""
-
-#: neutron/wsgi.py:605
-#, python-format
-msgid "MalformedRequestBody: %s"
-msgstr ""
-
-#: neutron/wsgi.py:614
-msgid "Internal error"
-msgstr ""
-
-#: neutron/agent/common/ovs_lib.py:252 neutron/agent/common/ovs_lib.py:362
-#: neutron/agent/ovsdb/impl_vsctl.py:67
-#, python-format
-msgid "Unable to execute %(cmd)s. Exception: %(exception)s"
-msgstr ""
-
-#: neutron/agent/common/ovs_lib.py:273
-#, python-format
-msgid "Timed out retrieving ofport on port %s."
-msgstr ""
-
-#: neutron/agent/common/ovs_lib.py:643
-#, python-format
-msgid "OVS flows could not be applied on bridge %s"
-msgstr ""
-
-#: neutron/agent/common/utils.py:53
-#, python-format
-msgid "Error loading interface driver '%s'"
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:132
-#, python-format
-msgid "Unable to %(action)s dhcp for %(net_id)s."
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:159
-#, python-format
-msgid "Unable to sync network state on deleted network %s"
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:176
-msgid "Unable to sync network state."
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:207
-#, python-format
-msgid "Network %s info call failed."
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:572 neutron/agent/l3/agent.py:656
-#: neutron/agent/metadata/agent.py:275
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:845
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:155
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:323
-#: neutron/services/metering/agents/metering_agent.py:284
-msgid "Failed reporting state!"
-msgstr ""
-
-#: neutron/agent/l2/extensions/manager.py:68
-#, python-format
-msgid "Agent Extension '%(name)s' failed while handling port update"
-msgstr ""
-
-#: neutron/agent/l2/extensions/manager.py:82
-#, python-format
-msgid "Agent Extension '%(name)s' failed while handling port deletion"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:251
-msgid "An interface driver must be specified"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:258
-#, python-format
-msgid "%s used in config as ipv6_gateway is not a valid IPv6 link-local address."
-msgstr ""
-
-#: neutron/agent/l3/agent.py:345
-#, python-format
-msgid "Error while deleting router %s"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:400
-#, python-format
-msgid "The external network bridge '%s' does not exist"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:463
-#, python-format
-msgid "Failed to fetch router information for '%s'"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:490
-#, python-format
-msgid "Removing incompatible router '%s'"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:494
-#, python-format
-msgid "Failed to process compatible router '%s'"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:544
-msgid "Failed synchronizing routers due to RPC error"
-msgstr ""
-
-#: neutron/agent/l3/dvr_local_router.py:224
-msgid "DVR: Failed updating arp entry"
-msgstr ""
-
-#: neutron/agent/l3/dvr_local_router.py:309
-msgid "DVR: error adding redirection logic"
-msgstr ""
-
-#: neutron/agent/l3/dvr_local_router.py:311
-msgid "DVR: snat remove failed to clear the rule and device"
-msgstr ""
-
-#: neutron/agent/l3/dvr_local_router.py:441
-#, python-format
-msgid "No FloatingIP agent gateway port returned from server for 'network-id': %s"
-msgstr ""
-
-#: neutron/agent/l3/dvr_local_router.py:446
-msgid "Missing subnet/agent_gateway_port"
-msgstr ""
-
-#: neutron/agent/l3/dvr_router_base.py:49
-#, python-format
-msgid ""
-"DVR: SNAT port not found in the list %(snat_list)s for the given router  "
-"internal port %(int_p)s"
-msgstr ""
-
-#: neutron/agent/l3/ha_router.py:70
-#, python-format
-msgid "Error while writing HA state for %s"
-msgstr ""
-
-#: neutron/agent/l3/ha_router.py:81
-#, python-format
-msgid "Unable to process HA router %s without HA port"
-msgstr ""
-
-#: neutron/agent/l3/keepalived_state_change.py:76
-#, python-format
-msgid "Failed to process or handle event for line %s"
-msgstr ""
-
-#: neutron/agent/l3/namespace_manager.py:122
-msgid "RuntimeError in obtaining namespace list for namespace cleanup."
-msgstr ""
-
-#: neutron/agent/l3/namespace_manager.py:143
-#, python-format
-msgid "Failed to destroy stale namespace %s"
-msgstr ""
-
-#: neutron/agent/l3/namespaces.py:82
-#, python-format
-msgid "Failed trying to delete namespace: %s"
-msgstr ""
-
-#: neutron/agent/l3/router_info.py:671
-msgid "Failed to process floating IPs."
-msgstr ""
-
-#: neutron/agent/linux/async_process.py:182
-#, python-format
-msgid "An error occurred while killing [%s]."
-msgstr ""
-
-#: neutron/agent/linux/async_process.py:211
-#, python-format
-msgid "An error occurred while communicating with async process [%s]."
-msgstr ""
-
-#: neutron/agent/linux/async_process.py:242
-#, python-format
-msgid "Error received from [%(cmd)s]: %(err)s"
-msgstr ""
-
-#: neutron/agent/linux/async_process.py:246
-#, python-format
-msgid "Process [%(cmd)s] dies due to the error: %(err)s"
-msgstr ""
-
-#: neutron/agent/linux/bridge_lib.py:74
-#, python-format
-msgid "Failed running %s"
-msgstr ""
-
-#: neutron/agent/linux/daemon.py:128
-#, python-format
-msgid "Error while handling pidfile: %s"
-msgstr ""
-
-#: neutron/agent/linux/daemon.py:190
-msgid "Fork failed"
-msgstr ""
-
-#: neutron/agent/linux/daemon.py:243
-#, python-format
-msgid "Pidfile %s already exist. Daemon already running?"
-msgstr ""
-
-#: neutron/agent/linux/dhcp.py:409
-#, python-format
-msgid "Error while create dnsmasq base log dir: %s"
-msgstr ""
-
-#: neutron/agent/linux/dhcp.py:1239
-msgid "Exception during stale dhcp device cleanup"
-msgstr ""
-
-#: neutron/agent/linux/external_process.py:230
-#, python-format
-msgid ""
-"%(service)s for %(resource_type)s with uuid %(uuid)s not found. The "
-"process should not have died"
-msgstr ""
-
-#: neutron/agent/linux/external_process.py:256
-msgid "Exiting agent as programmed in check_child_processes_actions"
-msgstr ""
-
-#: neutron/agent/linux/external_process.py:267
-#, python-format
-msgid ""
-"Exiting agent because of a malfunction with the %(service)s process "
-"identified by uuid %(uuid)s"
-msgstr ""
-
-#: neutron/agent/linux/interface.py:64
-#, python-format
-msgid ""
-"IPv6 protocol requires a minimum MTU of %(min_mtu)s, while the configured"
-" value is %(current_mtu)s"
-msgstr ""
-
-#: neutron/agent/linux/interface.py:360 neutron/agent/linux/interface.py:416
-#: neutron/agent/linux/interface.py:452
-#, python-format
-msgid "Failed unplugging interface '%s'"
-msgstr ""
-
-#: neutron/agent/linux/ip_conntrack.py:75
-#, python-format
-msgid "Failed execute conntrack command %s"
-msgstr ""
-
-#: neutron/agent/linux/ip_lib.py:295
-#, python-format
-msgid "Failed deleting ingress connection state of floatingip %s"
-msgstr ""
-
-#: neutron/agent/linux/ip_lib.py:304
-#, python-format
-msgid "Failed deleting egress connection state of floatingip %s"
-msgstr ""
-
-#: neutron/agent/linux/ip_lib.py:993
-#, python-format
-msgid "Failed sending gratuitous ARP to %(addr)s on %(iface)s in namespace %(ns)s"
-msgstr ""
-
-#: neutron/agent/linux/ip_link_support.py:105
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:83
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:106
-msgid "Failed executing ip command"
-msgstr ""
-
-#: neutron/agent/linux/ip_monitor.py:44 neutron/agent/linux/ip_monitor.py:55
-#, python-format
-msgid "Unable to parse route \"%s\""
-msgstr ""
-
-#: neutron/agent/linux/iptables_manager.py:506
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables "
-"rules:\n"
-"%s"
-msgstr ""
-
-#: neutron/agent/linux/ovsdb_monitor.py:74
-msgid "Interface monitor is not active"
-msgstr ""
-
-#: neutron/agent/linux/utils.py:223
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr ""
-
-#: neutron/agent/metadata/agent.py:97
-#: neutron/agent/metadata/namespace_proxy.py:59
-msgid "Unexpected error."
-msgstr ""
-
-#: neutron/agent/ovsdb/impl_vsctl.py:127
-#, python-format
-msgid "Could not parse: %(raw_result)s. Exception: %(exception)s"
-msgstr ""
-
-#: neutron/agent/ovsdb/native/commands.py:40
-msgid "Error executing command"
-msgstr ""
-
-#: neutron/api/extensions.py:457
-#, python-format
-msgid "Error fetching extended attributes for extension '%s'"
-msgstr ""
-
-#: neutron/api/extensions.py:466
-#, python-format
-msgid ""
-"It was impossible to process the following extensions: %s because of "
-"missing requirements."
-msgstr ""
-
-#: neutron/api/extensions.py:482
-msgid "Exception loading extension"
-msgstr ""
-
-#: neutron/api/extensions.py:501
-#, python-format
-msgid "Extension path '%s' doesn't exist!"
-msgstr ""
-
-#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:102
-#, python-format
-msgid ""
-"Will not send event %(method)s for network %(net_id)s: no agent "
-"available. Payload: %(payload)s"
-msgstr ""
-
-#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:110
-#, python-format
-msgid ""
-"No plugin for L3 routing registered. Cannot notify agents with the "
-"message %s"
-msgstr ""
-
-#: neutron/api/v2/base.py:395
-#, python-format
-msgid "Unable to undo add for %(resource)s %(id)s"
-msgstr ""
-
-#: neutron/api/v2/resource.py:97 neutron/api/v2/resource.py:109
-#: neutron/api/v2/resource.py:129
-#, python-format
-msgid "%s failed"
-msgstr ""
-
-#: neutron/callbacks/manager.py:143
-#, python-format
-msgid "Error during notification for %(callback)s %(resource)s, %(event)s"
-msgstr ""
-
-#: neutron/cmd/ipset_cleanup.py:69
-#, python-format
-msgid "Error, unable to remove iptables rule for IPset: %s"
-msgstr ""
-
-#: neutron/cmd/ipset_cleanup.py:84
-#, python-format
-msgid "Error, unable to destroy IPset: %s"
-msgstr ""
-
-#: neutron/cmd/linuxbridge_cleanup.py:33
-#, python-format
-msgid "Parsing physical_interface_mappings failed: %s."
-msgstr ""
-
-#: neutron/cmd/linuxbridge_cleanup.py:41
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr ""
-
-#: neutron/cmd/linuxbridge_cleanup.py:57
-#, python-format
-msgid "Linux bridge %s delete failed"
-msgstr ""
-
-#: neutron/cmd/netns_cleanup.py:153
-#, python-format
-msgid "Error unable to destroy namespace: %s"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:58
-msgid ""
-"Check for Open vSwitch VXLAN support failed. Please ensure that the "
-"version of openvswitch being used has VXLAN support."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:67
-msgid ""
-"Check for Open vSwitch Geneve support failed. Please ensure that the "
-"version of openvswitch and kernel being used has Geneve support."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:76
-msgid ""
-"Check for iproute2 VXLAN support failed. Please ensure that the iproute2 "
-"has VXLAN support."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:84
-msgid ""
-"Check for Open vSwitch patch port support failed. Please ensure that the "
-"version of openvswitch being used has patch port support or disable "
-"features requiring patch ports (gre/vxlan, etc.)."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:101
-msgid ""
-"The user that is executing neutron does not have permissions to read the "
-"namespaces. Enable the use_helper_for_ns_read configuration option."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:118
-#, python-format
-msgid ""
-"The installed version of dnsmasq is too old. Please update to at least "
-"version %s."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:127
-msgid ""
-"The installed version of keepalived does not support IPv6. Please update "
-"to at least version 1.2.10 for IPv6 support."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:136
-#, python-format
-msgid ""
-"The installed version of dibbler-client is too old. Please update to at "
-"least version %s."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:145
-msgid ""
-"Nova notifications are enabled, but novaclient is not installed. Either "
-"disable nova notifications or install python-novaclient."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:154
-msgid ""
-"Check for Open vSwitch ARP responder support failed. Please ensure that "
-"the version of openvswitch being used has ARP flows support."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:163
-msgid ""
-"Check for Open vSwitch support of ARP header matching failed. ARP "
-"spoofing suppression will not work. A newer version of OVS is required."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:172
-msgid ""
-"Check for Open vSwitch support of ICMPv6 header matching failed. ICMPv6 "
-"Neighbor Advt spoofing (part of arp spoofing) suppression will not work. "
-"A newer version of OVS is required."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:182
-msgid ""
-"Check for VF management support failed. Please ensure that the version of"
-" ip link being used has VF support."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:192
-msgid "Check for native OVSDB support failed."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:199
-msgid "Cannot run ebtables. Please ensure that it is installed."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:207
-msgid "Cannot run ipset. Please ensure that it is installed."
-msgstr ""
-
-#: neutron/cmd/sanity/checks.py:105
-#, python-format
-msgid "Unexpected exception while checking supported feature via command: %s"
-msgstr ""
-
-#: neutron/cmd/sanity/checks.py:162
-msgid "Unexpected exception while checking supported ip link command"
-msgstr ""
-
-#: neutron/cmd/sanity/checks.py:326
-#, python-format
-msgid ""
-"Failed to import required modules. Ensure that the python-openvswitch "
-"package is installed. Error: %s"
-msgstr ""
-
-#: neutron/cmd/sanity/checks.py:330
-msgid "Unexpected exception occurred."
-msgstr ""
-
-#: neutron/common/utils.py:509
-msgid "Alias or class name is not set"
-msgstr ""
-
-#: neutron/common/utils.py:521
-msgid "Error loading class by alias"
-msgstr ""
-
-#: neutron/common/utils.py:523
-msgid "Error loading class by class name"
-msgstr ""
-
-#: neutron/db/agents_db.py:421
-#, python-format
-msgid ""
-"Message received from the host: %(host)s during the registration of "
-"%(agent_name)s has a timestamp: %(agent_time)s. This differs from the "
-"current server timestamp: %(serv_time)s by %(diff)s seconds, which is "
-"more than the threshold agent downtime: %(threshold)s."
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:230
-#, python-format
-msgid "Failed to schedule network %s"
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:315
-#, python-format
-msgid ""
-"Unexpected exception occurred while removing network %(net)s from agent "
-"%(agent)s"
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:326
-msgid "Exception encountered during network rescheduling"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:304 neutron/plugins/ml2/plugin.py:595
-#, python-format
-msgid "An exception occurred while creating the %(resource)s:%(item)s"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:1158
-#, python-format
-msgid "Unable to generate mac address after %s attempts"
-msgstr ""
-
-#: neutron/db/dvr_mac_db.py:105
-#, python-format
-msgid "MAC generation error after %s attempts"
-msgstr ""
-
-#: neutron/db/dvr_mac_db.py:181
-#, python-format
-msgid "Could not retrieve gateway port for subnet %s"
-msgstr ""
-
-#: neutron/db/ipam_pluggable_backend.py:72
-#, python-format
-msgid "IP deallocation failed on external system for %s"
-msgstr ""
-
-#: neutron/db/ipam_pluggable_backend.py:134
-#, python-format
-msgid "IP allocation failed on external system for %s"
-msgstr ""
-
-#: neutron/db/ipam_pluggable_backend.py:366
-msgid ""
-"An exception occurred during subnet update. Reverting allocation pool "
-"changes"
-msgstr ""
-
-#: neutron/db/l3_agentschedulers_db.py:144
-#, python-format
-msgid "Failed to reschedule router %s"
-msgstr ""
-
-#: neutron/db/l3_agentschedulers_db.py:149
-msgid "Exception encountered during router rescheduling."
-msgstr ""
-
-#: neutron/db/metering/metering_rpc.py:47
-#, python-format
-msgid "Unable to find agent %s."
-msgstr ""
-
-#: neutron/extensions/l3agentscheduler.py:49
-#: neutron/extensions/l3agentscheduler.py:92
-msgid "No plugin for L3 routing registered to handle router scheduling"
-msgstr ""
-
-#: neutron/ipam/drivers/neutrondb_ipam/driver.py:96
-#: neutron/ipam/drivers/neutrondb_ipam/driver.py:452
-#, python-format
-msgid "IPAM subnet referenced to Neutron subnet %s does not exist"
-msgstr ""
-
-#: neutron/notifiers/nova.py:214
-#, python-format
-msgid "Failed to notify nova on events: %s"
-msgstr ""
-
-#: neutron/notifiers/nova.py:218 neutron/notifiers/nova.py:234
-#, python-format
-msgid "Error response returned from nova: %s"
-msgstr ""
-
-#: neutron/pecan_wsgi/hooks/translation.py:38
-#, python-format
-msgid "An unexpected exception was caught: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/db.py:249 neutron/plugins/ml2/db.py:333
-#: neutron/plugins/ml2/plugin.py:1422
-#, python-format
-msgid "Multiple ports have port_id starting with %s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:62
-#, python-format
-msgid ""
-"Type driver '%(new_driver)s' ignored because type driver '%(old_driver)s'"
-" is already registered for type '%(type)s'"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:78
-#, python-format
-msgid "No type driver for tenant network_type: %s. Service terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:85
-#, python-format
-msgid "No type driver for external network_type: %s. Service terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:164
-#, python-format
-msgid "Network %s has no segments"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:263 neutron/plugins/ml2/managers.py:290
-#, python-format
-msgid "Failed to release segment '%s' because network type is not supported."
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:404
-#, python-format
-msgid "Mechanism driver '%(name)s' failed in %(method)s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:690 neutron/plugins/ml2/managers.py:751
-#, python-format
-msgid "Failed to bind port %(port)s on host %(host)s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:705
-#, python-format
-msgid ""
-"Exceeded maximum binding levels attempting to bind port %(port)s on host "
-"%(host)s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:748
-#, python-format
-msgid "Mechanism driver %s failed in bind_port"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:864
-#, python-format
-msgid "Extension driver '%(name)s' failed in %(method)s"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:299
-#, python-format
-msgid "Failed to commit binding results for %(port)s after %(max)s tries"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:475
-#, python-format
-msgid "Serialized vif_details DB value '%(value)s' for port %(port)s is invalid"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:486
-#, python-format
-msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:572
-#, python-format
-msgid "Could not find %s to delete."
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:575
-#, python-format
-msgid "Could not delete %(res)s %(id)s."
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:608
-#, python-format
-msgid ""
-"mechanism_manager.create_%(res)s_postcommit failed for %(res)s: "
-"'%(failed_id)s'. Deleting %(res)ss %(resource_ids)s"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:664
-#, python-format
-msgid "mechanism_manager.create_network_postcommit failed, deleting network '%s'"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:741
-#, python-format
-msgid "Exception auto-deleting port %s"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:753
-#, python-format
-msgid "Exception auto-deleting subnet %s"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:837
-msgid "mechanism_manager.delete_network_postcommit failed"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:860
-#, python-format
-msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:987
-#, python-format
-msgid "Exception deleting fixed_ip from port %s"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:996
-msgid "mechanism_manager.delete_subnet_postcommit failed"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:1059
-#, python-format
-msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:1071
-#, python-format
-msgid "_bind_port_if_needed failed, deleting port '%s'"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:1100
-#, python-format
-msgid "_bind_port_if_needed failed. Deleting all ports from create bulk '%s'"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:1239
-#, python-format
-msgid "mechanism_manager.update_port_postcommit failed for port %s"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:1286
-#, python-format
-msgid "No Host supplied to bind DVR Port %s"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:1403
-#, python-format
-msgid "mechanism_manager.delete_port_postcommit failed for port %s"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:1435
-#, python-format
-msgid "Binding info for DVR port %s not found"
-msgstr ""
-
-#: neutron/plugins/ml2/rpc.py:161
-#, python-format
-msgid "Failed to get details for device %s"
-msgstr ""
-
-#: neutron/plugins/ml2/rpc.py:249
-#, python-format
-msgid "Failed to update device %s up"
-msgstr ""
-
-#: neutron/plugins/ml2/rpc.py:263
-#, python-format
-msgid "Failed to update device %s down"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_geneve.py:90
-#: neutron/plugins/ml2/drivers/type_vxlan.py:85
-msgid "Failed to parse vni_ranges. Service terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_gre.py:79
-msgid "Failed to parse tunnel_id_ranges. Service terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_vlan.py:94
-msgid "Failed to parse network_vlan_ranges. Service terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:88
-#, python-format
-msgid ""
-"Interface %(intf)s for physical network %(net)s does not exist. Agent "
-"terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:96
-#, python-format
-msgid ""
-"Bridge %(brq)s for physical network %(net)s does not exist. Agent "
-"terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:105
-#, python-format
-msgid ""
-"Tunneling cannot be enabled without the local_ip bound to an interface on"
-" the host. Please configure local_ip %s on the host interface to be used "
-"for tunneling and restart the agent."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:188
-#, python-format
-msgid "Failed creating vxlan interface for %(segmentation_id)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:270
-#, python-format
-msgid ""
-"Unable to create VXLAN interface for VNI %s because it is in use by "
-"another interface."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:361
-#, python-format
-msgid "Unable to add %(interface)s to %(bridge_name)s! Exception: %(e)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:374
-#, python-format
-msgid "Unable to add vxlan interface for network %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:383
-#, python-format
-msgid "No bridge or interface mappings for physical network %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:395
-#, python-format
-msgid "Unknown network_type %(network_type)s for network %(network_id)s."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:554
-msgid "No valid Segmentation ID to perform UCAST test."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:678
-#, python-format
-msgid "Network %s is not available."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:855
-msgid "Unable to obtain MAC address for unique ID. Agent terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:918
-#, python-format
-msgid "Unable to get port details for %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1009
-#, python-format
-msgid "Error occurred while removing port %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1094
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:342
-#, python-format
-msgid "Error in agent loop. Devices info: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1122
-#, python-format
-msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1131
-#, python-format
-msgid "Parsing bridge_mappings failed: %s. Agent terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:51
-#, python-format
-msgid "Failed to get devices for %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:384
-#, python-format
-msgid "PCI slot %(pci_slot)s has no mapping to Embedded Switch; skipping"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:402
-msgid "Failed on Agent configuration parse. Agent terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:414
-msgid "Agent Initialization Failed"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py:63
-#, python-format
-msgid "Failed to set device %s max rate"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:88
-msgid "Failed to parse supported PCI vendor devices"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py:173
-msgid ""
-"DVR: Failed to obtain a valid local DVR MAC address - L2 Agent operating "
-"in Non-DVR Mode"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py:374
-#, python-format
-msgid "DVR: Duplicate DVR router interface detected for subnet %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py:382
-#, python-format
-msgid "DVR: Unable to retrieve subnet information for subnet_id %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py:517
-#, python-format
-msgid ""
-"Centralized-SNAT port %(port)s on subnet %(port_subnet)s already seen on "
-"a different subnet %(orig_subnet)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:456
-msgid "No tunnel_type specified, cannot create tunnels"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:459
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:482
-#, python-format
-msgid "tunnel_type %s not supported by agent"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:475
-msgid "No tunnel_ip specified, cannot delete tunnels"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:479
-msgid "No tunnel_type specified, cannot delete tunnels"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:627
-#, python-format
-msgid "No local VLAN available for net-id=%s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:658
-#, python-format
-msgid ""
-"Cannot provision %(network_type)s network for net-id=%(net_uuid)s - "
-"tunneling disabled"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:666
-#, python-format
-msgid ""
-"Cannot provision flat network for net-id=%(net_uuid)s - no bridge for "
-"physical_network %(physical_network)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:676
-#, python-format
-msgid ""
-"Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for "
-"physical_network %(physical_network)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:685
-#, python-format
-msgid ""
-"Cannot provision unknown network type %(network_type)s for net-"
-"id=%(net_uuid)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:745
-#, python-format
-msgid ""
-"Cannot reclaim unknown network type %(network_type)s for net-"
-"id=%(net_uuid)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:783
-#, python-format
-msgid "Expected port %s not found"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:847
-#, python-format
-msgid "Configuration for devices %s failed!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1016
-msgid ""
-"Failed to create OVS patch port. Cannot have tunneling enabled on this "
-"agent, since this version of OVS does not support tunnels or patch ports."
-" Agent terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1053
-#, python-format
-msgid ""
-"Bridge %(bridge)s for physical network %(physical_network)s does not "
-"exist. Agent terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1351
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1564
-#, python-format
-msgid ""
-"process_network_ports - iteration:%d - failure while retrieving port "
-"details from server"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1605
-#, python-format
-msgid ""
-"process_ancillary_network_ports - iteration:%d - failure while retrieving"
-" port details from server"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1828
-msgid "Error while synchronizing tunnels"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1898
-msgid "Error while processing VIF ports"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1956
-#, python-format
-msgid ""
-"Tunneling can't be enabled with invalid local_ip '%s'. IP couldn't be "
-"found on this host's interfaces."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1970
-#, python-format
-msgid "Invalid tunnel type specified: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1990
-#, python-format
-msgid "%s Agent terminated!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py:52
-msgid "Failed to communicate with the switch"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py:60
-msgid "Switch connection timeout"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py:73
-#, python-format
-msgid "ofctl request %(request)s error %(error)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py:84
-#, python-format
-msgid "ofctl request %(request)s timed out"
-msgstr ""
-
-#: neutron/quota/resource.py:205
-#, python-format
-msgid "Model class %s does not have a tenant_id attribute"
-msgstr ""
-
-#: neutron/scheduler/l3_agent_scheduler.py:286
-#, python-format
-msgid "Not enough candidates, a HA router needs at least %s agents"
-msgstr ""
-
-#: neutron/services/service_base.py:83
-#, python-format
-msgid "Error loading provider '%(provider)s' for service %(service_type)s"
-msgstr ""
-
-#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:139
-#, python-format
-msgid "Firewall Driver Error for %(func_name)s for fw: %(fwid)s"
-msgstr ""
-
-#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:154
-#, python-format
-msgid "FWaaS RPC failure in %(func_name)s for fw: %(fwid)s"
-msgstr ""
-
-#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:174
-#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:193
-#, python-format
-msgid "Firewall Driver Error on fw state %(fwmsg)s for fw: %(fwid)s"
-msgstr ""
-
-#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:233
-#, python-format
-msgid "FWaaS RPC info call failed for '%s'."
-msgstr ""
-
-#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:277
-msgid "Failed fwaas process services sync"
-msgstr ""
-
-#: neutron/services/metering/agents/metering_agent.py:60
-msgid "Failed synchronizing routers"
-msgstr ""
-
-#: neutron/services/metering/agents/metering_agent.py:178
-#, python-format
-msgid "Driver %(driver)s does not implement %(func)s"
-msgstr ""
-
-#: neutron/services/metering/agents/metering_agent.py:182
-#, python-format
-msgid "Driver %(driver)s:%(func)s runtime error"
-msgstr ""
-
-#: neutron/services/metering/drivers/iptables/iptables_driver.py:356
-#, python-format
-msgid "Failed to get traffic counters, router: %s"
-msgstr ""
-
diff --git a/neutron/locale/neutron-log-info.pot b/neutron/locale/neutron-log-info.pot
deleted file mode 100644 (file)
index cc3c8cf..0000000
+++ /dev/null
@@ -1,873 +0,0 @@
-# Translations template for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#: neutron/manager.py:114
-#, python-format
-msgid "Loading core plugin: %s"
-msgstr ""
-
-#: neutron/manager.py:161
-#, python-format
-msgid "Service %s is supported by the core plugin"
-msgstr ""
-
-#: neutron/manager.py:179
-#, python-format
-msgid "Loading Plugin: %s"
-msgstr ""
-
-#: neutron/service.py:238
-#, python-format
-msgid "Neutron service started, listening on %(host)s:%(port)s"
-msgstr ""
-
-#: neutron/wsgi.py:594
-#, python-format
-msgid "%(method)s %(url)s"
-msgstr ""
-
-#: neutron/wsgi.py:611
-#, python-format
-msgid "HTTP exception thrown: %s"
-msgstr ""
-
-#: neutron/wsgi.py:627
-#, python-format
-msgid "%(url)s returned with HTTP %(status)d"
-msgstr ""
-
-#: neutron/wsgi.py:630
-#, python-format
-msgid "%(url)s returned a fault: %(exception)s"
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:80
-msgid "Disabled security-group extension."
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:82
-msgid "Disabled allowed-address-pairs extension."
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:139
-#, python-format
-msgid ""
-"Skipping method %s as firewall is disabled or configured as "
-"NoopFirewallDriver."
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:151
-#, python-format
-msgid "Preparing filters for devices %s"
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:181
-#, python-format
-msgid "Security group rule updated %r"
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:189
-#, python-format
-msgid "Security group member updated %r"
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:213
-msgid "Provider rule updated"
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:225
-#, python-format
-msgid "Remove device filter for %r"
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:235
-msgid "Refresh firewall rules"
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:239
-msgid "No ports here to refresh firewall"
-msgstr ""
-
-#: neutron/agent/common/ovs_lib.py:470 neutron/agent/common/ovs_lib.py:503
-#, python-format
-msgid "Port %(port_id)s not present in bridge %(br_name)s"
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:91 neutron/agent/dhcp/agent.py:584
-msgid "DHCP agent started"
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:147
-msgid "Synchronizing state"
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:168
-msgid "Synchronizing state complete"
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:325
-#, python-format
-msgid "Trigger reload_allocations for port %s"
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:561
-msgid "Agent has just been revived. Scheduling full sync"
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:581 neutron/agent/l3/agent.py:669
-#: neutron/services/metering/agents/metering_agent.py:287
-#, python-format
-msgid "agent_updated by server side %s!"
-msgstr ""
-
-#: neutron/agent/l2/extensions/manager.py:44
-#, python-format
-msgid "Loaded agent extensions: %s"
-msgstr ""
-
-#: neutron/agent/l2/extensions/manager.py:57
-#, python-format
-msgid "Initializing agent extension '%s'"
-msgstr ""
-
-#: neutron/agent/l2/extensions/qos.py:224
-#, python-format
-msgid ""
-"QoS policy %(qos_policy_id)s applied to port %(port_id)s is not available"
-" on server, it has been deleted. Skipping."
-msgstr ""
-
-#: neutron/agent/l2/extensions/qos.py:261
-#, python-format
-msgid ""
-"QoS extension did have no information about the port %s that we were "
-"trying to reset"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:582 neutron/agent/l3/agent.py:660
-msgid "L3 agent started"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:645
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:840
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:318
-msgid "Agent has just been revived. Doing a full sync."
-msgstr ""
-
-#: neutron/agent/l3/ha.py:114
-#, python-format
-msgid "Router %(router_id)s transitioned to %(state)s"
-msgstr ""
-
-#: neutron/agent/l3/ha.py:121
-#, python-format
-msgid ""
-"Router %s is not managed by this agent. It was possibly deleted "
-"concurrently."
-msgstr ""
-
-#: neutron/agent/linux/daemon.py:115
-#, python-format
-msgid "Process runs with uid/gid: %(uid)s/%(gid)s"
-msgstr ""
-
-#: neutron/agent/linux/dhcp.py:855
-#, python-format
-msgid ""
-"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is "
-"not in port's address IP versions"
-msgstr ""
-
-#: neutron/agent/linux/dhcp.py:1108
-#, python-format
-msgid "Skipping DHCP port %s as it is already in use"
-msgstr ""
-
-#: neutron/agent/linux/interface.py:245
-#, python-format
-msgid "Device %s already exists"
-msgstr ""
-
-#: neutron/agent/linux/iptables_firewall.py:176
-#, python-format
-msgid "Attempted to update port filter which is not filtered %s"
-msgstr ""
-
-#: neutron/agent/linux/iptables_firewall.py:187
-#, python-format
-msgid "Attempted to remove port filter which is not filtered %r"
-msgstr ""
-
-#: neutron/api/extensions.py:381
-msgid "Initializing extension manager."
-msgstr ""
-
-#: neutron/api/extensions.py:535
-#, python-format
-msgid "Loaded extension: %s"
-msgstr ""
-
-#: neutron/api/v2/base.py:97
-msgid "Allow sorting is enabled because native pagination requires native sorting"
-msgstr ""
-
-#: neutron/api/v2/resource.py:94 neutron/api/v2/resource.py:106
-#, python-format
-msgid "%(action)s failed (client error): %(exc)s"
-msgstr ""
-
-#: neutron/cmd/ipset_cleanup.py:60
-#, python-format
-msgid "Removing iptables rule for IPset: %s"
-msgstr ""
-
-#: neutron/cmd/ipset_cleanup.py:79
-#, python-format
-msgid "Destroying IPset: %s"
-msgstr ""
-
-#: neutron/cmd/ipset_cleanup.py:89
-#, python-format
-msgid "Destroying IPsets with prefix: %s"
-msgstr ""
-
-#: neutron/cmd/ipset_cleanup.py:97
-msgid "IPset cleanup completed successfully"
-msgstr ""
-
-#: neutron/cmd/linuxbridge_cleanup.py:35
-#, python-format
-msgid "Interface mappings: %s."
-msgstr ""
-
-#: neutron/cmd/linuxbridge_cleanup.py:43
-#, python-format
-msgid "Bridge mappings: %s."
-msgstr ""
-
-#: neutron/cmd/linuxbridge_cleanup.py:55
-#, python-format
-msgid "Linux bridge %s deleted"
-msgstr ""
-
-#: neutron/cmd/linuxbridge_cleanup.py:58
-msgid "Linux bridge cleanup completed successfully"
-msgstr ""
-
-#: neutron/cmd/ovs_cleanup.py:72
-#, python-format
-msgid "Deleting port: %s"
-msgstr ""
-
-#: neutron/cmd/ovs_cleanup.py:102
-#, python-format
-msgid "Cleaning bridge: %s"
-msgstr ""
-
-#: neutron/cmd/ovs_cleanup.py:109
-msgid "OVS cleanup completed successfully"
-msgstr ""
-
-#: neutron/common/config.py:241
-msgid "Logging enabled!"
-msgstr ""
-
-#: neutron/common/config.py:242
-#, python-format
-msgid "%(prog)s version %(version)s"
-msgstr ""
-
-#: neutron/common/ipv6_utils.py:63
-msgid "IPv6 is not enabled on this system."
-msgstr ""
-
-#: neutron/db/agents_db.py:294
-#, python-format
-msgid ""
-"Heartbeat received from %(type)s agent on host %(host)s, uuid %(uuid)s "
-"after %(delta)s"
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:169
-msgid ""
-"Skipping periodic DHCP agent status check because automatic network "
-"rescheduling is disabled."
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:204
-#, python-format
-msgid "Scheduling unhosted network %s"
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:211
-#, python-format
-msgid ""
-"Failed to schedule network %s, no eligible agents or it might be already "
-"scheduled by another server"
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:219
-#, python-format
-msgid "Adding network %(net)s to agent %(agent)s on host %(host)s"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:840 neutron/plugins/ml2/plugin.py:936
-#, python-format
-msgid ""
-"Found port (%(port_id)s, %(ip)s) having IP allocation on subnet "
-"%(subnet)s, cannot delete"
-msgstr ""
-
-#: neutron/db/ipam_backend_mixin.py:65
-#, python-format
-msgid "Found invalid IP address in pool: %(start)s - %(end)s:"
-msgstr ""
-
-#: neutron/db/ipam_backend_mixin.py:230
-#, python-format
-msgid ""
-"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet "
-"%(subnet_id)s (CIDR: %(cidr)s)"
-msgstr ""
-
-#: neutron/db/ipam_backend_mixin.py:268
-msgid "Specified IP addresses do not match the subnet IP version"
-msgstr ""
-
-#: neutron/db/ipam_backend_mixin.py:272
-#, python-format
-msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s"
-msgstr ""
-
-#: neutron/db/ipam_backend_mixin.py:293
-#, python-format
-msgid "Found overlapping ranges: %(l_range)s and %(r_range)s"
-msgstr ""
-
-#: neutron/db/l3_agentschedulers_db.py:87
-msgid ""
-"Skipping period L3 agent status check because automatic router "
-"rescheduling is disabled."
-msgstr ""
-
-#: neutron/db/l3_db.py:1255
-#, python-format
-msgid "Skipping port %s as no IP is configure on it"
-msgstr ""
-
-#: neutron/db/l3_dvr_db.py:81
-#, python-format
-msgid "Centralizing distributed router %s is not supported"
-msgstr ""
-
-#: neutron/db/l3_dvr_db.py:603
-#, python-format
-msgid "Agent Gateway port does not exist, so create one: %s"
-msgstr ""
-
-#: neutron/db/l3_dvr_db.py:683
-#, python-format
-msgid "SNAT interface port list does not exist, so create one: %s"
-msgstr ""
-
-#: neutron/db/l3_dvrscheduler_db.py:386
-msgid "SNAT already bound to a service node."
-msgstr ""
-
-#: neutron/db/l3_hamode_db.py:211
-#, python-format
-msgid ""
-"Attempt %(count)s to allocate a VRID in the network %(network)s for the "
-"router %(router)s"
-msgstr ""
-
-#: neutron/db/l3_hamode_db.py:300
-#, python-format
-msgid ""
-"Number of active agents lower than max_l3_agents_per_router. L3 agents "
-"available: %s"
-msgstr ""
-
-#: neutron/db/l3_hamode_db.py:510
-#, python-format
-msgid "HA network %s can not be deleted."
-msgstr ""
-
-#: neutron/db/l3_hamode_db.py:516
-#, python-format
-msgid ""
-"HA network %(network)s was deleted as no HA routers are present in tenant"
-" %(tenant)s."
-msgstr ""
-
-#: neutron/debug/commands.py:107
-#, python-format
-msgid "%d probe(s) deleted"
-msgstr ""
-
-#: neutron/extensions/vlantransparent.py:46
-msgid "Disabled vlantransparent extension."
-msgstr ""
-
-#: neutron/notifiers/nova.py:232
-#, python-format
-msgid "Nova event response: %s"
-msgstr ""
-
-#: neutron/pecan_wsgi/startup.py:96
-#, python-format
-msgid ""
-"Added controller for resource %(resource)s via URI path "
-"segment:%(collection)s"
-msgstr ""
-
-#: neutron/plugins/common/utils.py:178
-#, python-format
-msgid ""
-"The requested interface name %(requested_name)s exceeds the %(limit)d "
-"character limitation. It was shortened to %(new_name)s to fit."
-msgstr ""
-
-#: neutron/plugins/ml2/db.py:60
-#, python-format
-msgid "Added segment %(id)s of type %(network_type)s for network %(network_id)s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:48
-#, python-format
-msgid "Configured type driver names: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:53
-#, python-format
-msgid "Loaded type driver names: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:70
-#, python-format
-msgid "Registered types: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:81
-#, python-format
-msgid "Tenant network_types: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:181
-#, python-format
-msgid "Initializing driver for type '%s'"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:306
-#, python-format
-msgid "Configured mechanism driver names: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:312
-#, python-format
-msgid "Loaded mechanism driver names: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:324
-#, python-format
-msgid "Registered mechanism drivers: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:347
-#, python-format
-msgid ""
-"%(rule_types)s rule types disabled for ml2 because %(driver)s does not "
-"support them"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:371
-#, python-format
-msgid "Initializing mechanism driver '%s'"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:782
-#, python-format
-msgid "Configured extension driver names: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:788
-#, python-format
-msgid "Loaded extension driver names: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:799
-#, python-format
-msgid "Registered extension drivers: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:805
-#, python-format
-msgid "Initializing extension driver '%s'"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:814
-#, python-format
-msgid "Got %(alias)s extension from driver '%(drv)s'"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:825
-#, python-format
-msgid "Extension driver '%(name)s' failed in %(method)s"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:155
-msgid "Modular L2 Plugin initialization complete"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:290
-#, python-format
-msgid "Attempt %(count)s to bind port %(port)s"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:738
-#, python-format
-msgid "Port %s was deleted concurrently"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:749
-#, python-format
-msgid "Subnet %s was deleted concurrently"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:1448
-#, python-format
-msgid ""
-"Binding info for port %s was not found, it might have been deleted "
-"already."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_flat.py:74
-msgid "Arbitrary flat physical_network names allowed"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_flat.py:77
-msgid "Flat networks are disabled"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_flat.py:79
-#, python-format
-msgid "Allowable flat physical_network names: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_flat.py:86
-msgid "ML2 FlatTypeDriver initialization complete"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_local.py:38
-msgid "ML2 LocalTypeDriver initialization complete"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_tunnel.py:135
-#, python-format
-msgid "%(type)s ID ranges: %(range)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_tunnel.py:362
-#, python-format
-msgid ""
-"Tunnel IP %(ip)s was used by host %(host)s and will be assigned to "
-"%(new_host)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_vlan.py:97
-#, python-format
-msgid "Network VLAN ranges: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_vlan.py:164
-msgid "VlanTypeDriver initialization complete"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py:33
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:859
-#, python-format
-msgid ""
-"Skipping ARP spoofing rules for port '%s' because it has port security "
-"disabled"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py:90
-#, python-format
-msgid "Clearing orphaned ARP spoofing entries for devices %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:673
-#, python-format
-msgid "Physical network %s is defined in bridge_mappings and cannot be deleted."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:824
-msgid "Stopping linuxbridge agent."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:859
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:124
-#, python-format
-msgid "RPC agent_id: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:927
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:256
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1427
-#, python-format
-msgid "Port %(device)s updated. Details: %(details)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:994
-#, python-format
-msgid "Device %s not defined on plugin"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1001
-#, python-format
-msgid "Attachment %s removed"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1013
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1517
-#, python-format
-msgid "Port %s updated."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1071
-msgid "LinuxBridge Agent RPC Daemon Started!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1083
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:323
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1724
-msgid "Agent out of sync with plugin!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1125
-#, python-format
-msgid "Interface mappings: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1134
-#, python-format
-msgid "Bridge mappings: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1142
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:417
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1907
-msgid "Agent initialized successfully, now running... "
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:213
-#, python-format
-msgid "Device %(device)s spoofcheck %(spoofcheck)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:237
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py:65
-#, python-format
-msgid "No device with MAC %s defined on agent."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:268
-#, python-format
-msgid "Device with MAC %s not defined on plugin"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:276
-#, python-format
-msgid "Removing device with MAC address %(mac)s and PCI slot %(pci_slot)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:303
-#, python-format
-msgid "Port with MAC %(mac)s and PCI slot %(pci_slot)s updated."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:316
-msgid "SRIOV NIC Agent RPC Daemon Started!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:405
-#, python-format
-msgid "Physical Devices mappings: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:406
-#, python-format
-msgid "Exclude Devices: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py:210
-#, python-format
-msgid "L2 Agent operating in DVR Mode with MAC %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:636
-#, python-format
-msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:700
-#, python-format
-msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:852
-#, python-format
-msgid "Configuration for devices up %(up)s and devices down %(down)s completed."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:918
-#, python-format
-msgid "port_unbound(): net_uuid %s not in local_vlan_map"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:985
-#, python-format
-msgid "Adding %s to list of bridges."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1047
-#, python-format
-msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1310
-#, python-format
-msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1421
-#, python-format
-msgid ""
-"Port %s was not found on the integration bridge and will therefore not be"
-" processed"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1472
-#, python-format
-msgid "Ancillary Ports %s added"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1489
-#, python-format
-msgid "Ports %s removed"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1505
-#, python-format
-msgid "Ancillary ports %s removed"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1712
-#, python-format
-msgid "Cleaning stale %s flows"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1783
-msgid "rpc_loop doing a full sync."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1824
-msgid "Agent tunnel out of sync with plugin!"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1927
-msgid "Agent caught SIGTERM, quitting daemon loop."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1931
-msgid "Agent caught SIGHUP, resetting."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py:43
-#, python-format
-msgid "Bridge %(br_name)s has datapath-ID %(dpid)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py:57
-#, python-format
-msgid "Bridge %(br_name)s changed its datapath-ID from %(old)s to %(new)s"
-msgstr ""
-
-#: neutron/plugins/ml2/extensions/port_security.py:34
-msgid "PortSecurityExtensionDriver initialization complete"
-msgstr ""
-
-#: neutron/quota/__init__.py:208
-msgid ""
-"ConfDriver is used as quota_driver because the loaded plugin does not "
-"support 'quotas' table."
-msgstr ""
-
-#: neutron/quota/__init__.py:219
-#, python-format
-msgid "Loaded quota_driver: %s."
-msgstr ""
-
-#: neutron/quota/resource_registry.py:168
-#, python-format
-msgid "Creating instance of CountableResource for resource:%s"
-msgstr ""
-
-#: neutron/quota/resource_registry.py:174
-#, python-format
-msgid "Creating instance of TrackedResource for resource:%s"
-msgstr ""
-
-#: neutron/scheduler/dhcp_agent_scheduler.py:160
-#, python-format
-msgid "Agent %s already present"
-msgstr ""
-
-#: neutron/server/rpc_eventlet.py:33
-msgid "Eventlet based AMQP RPC server starting..."
-msgstr ""
-
-#: neutron/server/rpc_eventlet.py:37 neutron/server/wsgi_eventlet.py:34
-msgid "RPC was already started in parent process by plugin."
-msgstr ""
-
-#: neutron/server/wsgi_pecan.py:35
-msgid "Pecan WSGI server starting..."
-msgstr ""
-
-#: neutron/services/service_base.py:94
-#, python-format
-msgid "Default provider is not specified for service type %s"
-msgstr ""
-
-#: neutron/services/metering/agents/metering_agent.py:95
-#, python-format
-msgid "Loading Metering driver %s"
-msgstr ""
-
-#: neutron/services/metering/drivers/iptables/iptables_driver.py:89
-#, python-format
-msgid "Loading interface driver %s"
-msgstr ""
-
-#: neutron/services/qos/notification_drivers/manager.py:70
-#, python-format
-msgid "Loading %(name)s (%(description)s) notification driver for QoS plugin"
-msgstr ""
-
diff --git a/neutron/locale/neutron-log-warning.pot b/neutron/locale/neutron-log-warning.pot
deleted file mode 100644 (file)
index c8a3c66..0000000
+++ /dev/null
@@ -1,634 +0,0 @@
-# Translations template for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#: neutron/policy.py:111
-#, python-format
-msgid "Unable to find data type descriptor for attribute %s"
-msgstr ""
-
-#: neutron/agent/rpc.py:121
-msgid "DVR functionality requires a server upgrade."
-msgstr ""
-
-#: neutron/agent/rpc.py:199
-msgid "Tunnel synchronization requires a server upgrade."
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:67
-#: neutron/agent/securitygroups_rpc.py:100
-msgid "Driver configuration doesn't match with enable_security_group"
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:127
-msgid ""
-"security_group_info_for_devices rpc call not supported by the server, "
-"falling back to old security_group_rules_for_devices which scales worse."
-msgstr ""
-
-#: neutron/agent/common/ovs_lib.py:423
-#, python-format
-msgid "Found not yet ready openvswitch port: %s"
-msgstr ""
-
-#: neutron/agent/common/ovs_lib.py:426
-#, python-format
-msgid "Found failed openvswitch port: %s"
-msgstr ""
-
-#: neutron/agent/common/ovs_lib.py:485
-#, python-format
-msgid "ofport: %(ofport)s for VIF: %(vif)s is not a positive integer"
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:115
-#, python-format
-msgid ""
-"Unable to %(action)s dhcp for %(net_id)s: there is a conflict with its "
-"current state; please check that the network and/or its subnet(s) still "
-"exist."
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:130 neutron/agent/dhcp/agent.py:203
-#, python-format
-msgid "Network %s has been deleted."
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:220
-#, python-format
-msgid ""
-"Network %s may have been deleted and its resources may have already been "
-"disposed."
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:370
-#, python-format
-msgid ""
-"%(port_num)d router ports found on the metadata access network. Only the "
-"port %(port_id)s, for router %(router_id)s will be considered"
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:566 neutron/agent/l3/agent.py:651
-#: neutron/agent/metadata/agent.py:270
-#: neutron/services/metering/agents/metering_agent.py:279
-msgid ""
-"Neutron server does not support state report. State report for this agent"
-" will be disabled."
-msgstr ""
-
-#: neutron/agent/l2/extensions/qos.py:96
-#, python-format
-msgid "Unsupported QoS rule type for %(rule_id)s: %(rule_type)s; skipping"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:204
-#, python-format
-msgid ""
-"l3-agent cannot check service plugins enabled at the neutron server when "
-"startup due to RPC error. It happens when the server does not support "
-"this RPC API. If the error is UnsupportedVersion you can ignore this "
-"warning. Detail message: %s"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:216
-#, python-format
-msgid ""
-"l3-agent cannot check service plugins enabled on the neutron server. "
-"Retrying. Detail message: %s"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:353
-#, python-format
-msgid "Info for router %s was not found. Performing router cleanup"
-msgstr ""
-
-#: neutron/agent/l3/dvr_local_router.py:216
-#, python-format
-msgid ""
-"Device %s does not exist so ARP entry cannot be updated, will cache "
-"information to be applied later when the device exists"
-msgstr ""
-
-#: neutron/agent/l3/router_info.py:187
-#, python-format
-msgid "Unable to configure IP address for floating IP: %s"
-msgstr ""
-
-#: neutron/agent/linux/dhcp.py:231
-#, python-format
-msgid "Failed trying to delete interface: %s"
-msgstr ""
-
-#: neutron/agent/linux/dhcp.py:238
-#, python-format
-msgid "Failed trying to delete namespace: %s"
-msgstr ""
-
-#: neutron/agent/linux/external_process.py:250
-#, python-format
-msgid "Respawning %(service)s for uuid %(uuid)s"
-msgstr ""
-
-#: neutron/agent/linux/iptables_manager.py:253
-#, python-format
-msgid ""
-"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r "
-"%(top)r"
-msgstr ""
-
-#: neutron/agent/linux/iptables_manager.py:609
-#, python-format
-msgid ""
-"Duplicate iptables %(thing)s detected. This may indicate a bug in the the"
-" iptables %(thing)s generation code. Line: %(line)s"
-msgstr ""
-
-#: neutron/agent/linux/iptables_manager.py:648
-#, python-format
-msgid "Attempted to get traffic counters of chain %s which does not exist"
-msgstr ""
-
-#: neutron/agent/metadata/agent.py:203
-msgid ""
-"The remote metadata server responded with Forbidden. This response "
-"usually occurs when shared secrets do not match."
-msgstr ""
-
-#: neutron/api/api_common.py:105
-#, python-format
-msgid ""
-"Invalid value for pagination_max_limit: %s. It should be an integer "
-"greater to 0"
-msgstr ""
-
-#: neutron/api/extensions.py:517
-#, python-format
-msgid "Did not find expected name \"%(ext_name)s\" in %(file)s"
-msgstr ""
-
-#: neutron/api/extensions.py:525
-#, python-format
-msgid "Extension file %(f)s wasn't loaded due to %(exception)s"
-msgstr ""
-
-#: neutron/api/extensions.py:563
-#, python-format
-msgid "Extension %s not supported by any of loaded plugins"
-msgstr ""
-
-#: neutron/api/extensions.py:575
-#, python-format
-msgid "Loaded plugins do not implement extension %s interface"
-msgstr ""
-
-#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:73
-#, python-format
-msgid ""
-"Unable to schedule network %s: no agents available; will retry on "
-"subsequent port and subnet creation events."
-msgstr ""
-
-#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:90
-#, python-format
-msgid ""
-"Only %(active)d of %(total)d DHCP agents associated with network "
-"'%(net_id)s' are marked as active, so notifications may be sent to "
-"inactive agents."
-msgstr ""
-
-#: neutron/api/rpc/handlers/dhcp_rpc.py:107
-#, python-format
-msgid ""
-"Action %(action)s for network %(net_id)s could not complete successfully:"
-" %(reason)s"
-msgstr ""
-
-#: neutron/api/rpc/handlers/dhcp_rpc.py:159
-#, python-format
-msgid "Network %s could not be found, it might have been deleted concurrently."
-msgstr ""
-
-#: neutron/api/rpc/handlers/dhcp_rpc.py:187
-#, python-format
-msgid "Updating lease expiration is now deprecated. Issued  from host %s."
-msgstr ""
-
-#: neutron/api/rpc/handlers/securitygroups_rpc.py:180
-msgid ""
-"Security group agent binding currently not set. This should be set by the"
-" end of the init process."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:94
-msgid ""
-"The user that is executing neutron can read the namespaces without using "
-"the root_helper. Disable the use_helper_for_ns_read option to avoid a "
-"performance impact."
-msgstr ""
-
-#: neutron/db/agents_db.py:186
-#, python-format
-msgid "%(agent_type)s agent %(agent_id)s is not active"
-msgstr ""
-
-#: neutron/db/agents_db.py:199
-#, python-format
-msgid "Configuration for agent %(agent_type)s on host %(host)s is invalid."
-msgstr ""
-
-#: neutron/db/agents_db.py:265
-#, python-format
-msgid ""
-"Agent healthcheck: found %(count)s dead agents out of %(total)s:\n"
-"%(data)s"
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:146
-#, python-format
-msgid ""
-"Time since last %s agent reschedule check has exceeded the interval "
-"between checks. Waiting before check to allow agents to send a heartbeat "
-"in case there was a clock adjustment."
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:285
-msgid "No DHCP agents available, skipping rescheduling"
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:289
-#, python-format
-msgid ""
-"Removing network %(network)s from agent %(agent)s because the agent did "
-"not report to the server in the last %(dead_time)s seconds."
-msgstr ""
-
-#: neutron/db/l3_agentschedulers_db.py:127
-#, python-format
-msgid ""
-"L3 DVR agent on node %(host)s is down. Not rescheduling from agent in "
-"'dvr' mode."
-msgstr ""
-
-#: neutron/db/l3_agentschedulers_db.py:131
-#, python-format
-msgid ""
-"Rescheduling router %(router)s from agent %(agent)s because the agent did"
-" not report to the server in the last %(dead_time)s seconds."
-msgstr ""
-
-#: neutron/db/l3_agentschedulers_db.py:340
-#, python-format
-msgid ""
-"Failed to notify L3 agent on host %(host)s about added router. Attempt "
-"%(attempt)d out of %(max_attempts)d"
-msgstr ""
-
-#: neutron/db/l3_dvr_db.py:778
-#, python-format
-msgid "Router %s was not found. Skipping agent notification."
-msgstr ""
-
-#: neutron/db/l3_dvrscheduler_db.py:374
-msgid "No active L3 agents found for SNAT"
-msgstr ""
-
-#: neutron/db/l3_dvrscheduler_db.py:379
-msgid "No candidates found for SNAT"
-msgstr ""
-
-#: neutron/db/securitygroups_rpc_base.py:378
-#, python-format
-msgid "No valid gateway port on subnet %s is found for IPv6 RA"
-msgstr ""
-
-#: neutron/debug/debug_agent.py:111
-#, python-format
-msgid "Failed to delete namespace %s"
-msgstr ""
-
-#: neutron/notifiers/nova.py:161
-msgid "Port ID not set! Nova will not be notified of port status change."
-msgstr ""
-
-#: neutron/notifiers/nova.py:211
-#, python-format
-msgid "Nova returned NotFound for event: %s"
-msgstr ""
-
-#: neutron/notifiers/nova.py:229
-#, python-format
-msgid "Nova event: %s returned with failed status"
-msgstr ""
-
-#: neutron/pecan_wsgi/startup.py:53
-#, python-format
-msgid "No plugin found for:%s"
-msgstr ""
-
-#: neutron/pecan_wsgi/controllers/root.py:110
-#, python-format
-msgid "No controller found for: %s - returning response code 404"
-msgstr ""
-
-#: neutron/plugins/hyperv/agent/security_groups_driver.py:27
-#, python-format
-msgid ""
-"You are using the deprecated firewall driver: %(deprecated)s. Use the "
-"recommended driver %(new)s instead."
-msgstr ""
-
-#: neutron/plugins/ml2/driver_context.py:198
-#, python-format
-msgid "Could not expand segment %s"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:356
-#, python-format
-msgid "%s does not support QoS; no rule types available"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:556
-#, python-format
-msgid ""
-"In _notify_port_updated(), no bound segment for port %(port_id)s on "
-"network %(network_id)s"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:825
-msgid "A concurrent port creation has occurred"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:1508
-#, python-format
-msgid "Port %s not found during update"
-msgstr ""
-
-#: neutron/plugins/ml2/rpc.py:79
-#, python-format
-msgid "Device %(device)s requested by agent %(agent_id)s not found in database"
-msgstr ""
-
-#: neutron/plugins/ml2/rpc.py:93
-#, python-format
-msgid ""
-"Device %(device)s requested by agent %(agent_id)s on network "
-"%(network_id)s not bound, vif_type: %(vif_type)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_agent.py:76
-#, python-format
-msgid "Refusing to bind port %(pid)s to dead agent: %(agent)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_flat.py:138
-#, python-format
-msgid "No flat network found on physical network %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_tunnel.py:238
-#, python-format
-msgid "%(type)s tunnel %(id)s not found"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_tunnel.py:304
-#, python-format
-msgid "Endpoint with ip %s already exists"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_vlan.py:257
-#, python-format
-msgid "No vlan_id %(vlan_id)s found on physical network %(physical_network)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:108
-#, python-format
-msgid "unable to modify mac_address of ACTIVE port %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:162
-#, python-format
-msgid "Unable to retrieve active L2 agent on host %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:120
-msgid "Invalid Network ID, will lead to incorrect bridge name"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:127
-msgid "Invalid VLAN ID, will lead to incorrect subinterface name"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:134
-msgid "Invalid Interface ID, will lead to incorrect tap device name"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:143
-#, python-format
-msgid "Invalid Segmentation ID: %s, will lead to incorrect vxlan device name"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:156
-#, python-format
-msgid ""
-"Invalid VXLAN Group: %s, must be an address or network (in CIDR notation)"
-" in a multicast range"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:539
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:576
-#, python-format
-msgid ""
-"Option \"%(option)s\" must be supported by command \"%(command)s\" to "
-"enable %(mode)s mode"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:570
-msgid ""
-"VXLAN muticast group(s) must be provided in vxlan_group option to enable "
-"VXLAN MCAST mode"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:200
-#, python-format
-msgid "Cannot find vf index for pci slot %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:356
-#, python-format
-msgid "device pci mismatch: %(device_mac)s - %(pci_slot)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:380
-#, python-format
-msgid ""
-"VF with PCI slot %(pci_slot)s is already assigned; skipping reset maximum"
-" rate"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:164
-#, python-format
-msgid "Cannot find vfs %(vfs)s in device %(dev_name)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:180
-#, python-format
-msgid "failed to parse vf link show line %(line)s: for %(device)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:211
-#, python-format
-msgid "Failed to set spoofcheck for device %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:220
-#, python-format
-msgid "Device %s does not support state change"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:223
-#, python-format
-msgid "Failed to set device %s state"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:289
-#, python-format
-msgid "port_id to device with MAC %s not found"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:124
-#, python-format
-msgid "Attempting to bind with dead agent: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py:168
-#, python-format
-msgid ""
-"L2 agent could not get DVR MAC address at startup due to RPC error.  It "
-"happens when the server does not support this RPC API.  Detailed message:"
-" %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py:195
-#, python-format
-msgid ""
-"L2 agent could not get DVR MAC address from server. Retrying. Detailed "
-"message: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:581
-#, python-format
-msgid "Action %s not supported"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1327
-#, python-format
-msgid "VIF port: %s has no ofport configured, and might not be able to transmit"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1450
-#, python-format
-msgid "Device %s not defined on plugin"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1626
-#, python-format
-msgid "Invalid remote IP: %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1670
-msgid "OVS is restarted. OVSNeutronAgent will reset bridges and recover ports."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1673
-msgid ""
-"OVS is dead. OVSNeutronAgent will keep running and checking OVS status "
-"periodically."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1728
-#, python-format
-msgid "Clearing cache of registered ports, retries to resync were > %s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py:142
-#, python-format
-msgid "Deleting flow with cookie 0x%(cookie)x"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py:109
-#, python-format
-msgid "Deleting flow %s"
-msgstr ""
-
-#: neutron/quota/__init__.py:214
-msgid ""
-"The quota driver neutron.quota.ConfDriver is deprecated as of Liberty. "
-"neutron.db.quota.driver.DbQuotaDriver should be used in its place"
-msgstr ""
-
-#: neutron/quota/resource.py:310
-#, python-format
-msgid "No sqlalchemy event for resource %s found"
-msgstr ""
-
-#: neutron/quota/resource_registry.py:215
-#, python-format
-msgid "%s is already registered"
-msgstr ""
-
-#: neutron/scheduler/dhcp_agent_scheduler.py:62
-#, python-format
-msgid "DHCP agent %s is not active"
-msgstr ""
-
-#: neutron/scheduler/dhcp_agent_scheduler.py:210
-msgid "No more DHCP agents"
-msgstr ""
-
-#: neutron/scheduler/l3_agent_scheduler.py:159
-#, python-format
-msgid "No routers compatible with L3 agent configuration on host %s"
-msgstr ""
-
-#: neutron/scheduler/l3_agent_scheduler.py:185
-msgid "No active L3 agents"
-msgstr ""
-
-#: neutron/scheduler/l3_agent_scheduler.py:194
-#, python-format
-msgid "No L3 agents can host the router %s"
-msgstr ""
-
-#: neutron/server/wsgi_pecan.py:52
-#, python-format
-msgid "Development Server Serving on http://%(host)s:%(port)s"
-msgstr ""
-
-#: neutron/services/provider_configuration.py:125
-#, python-format
-msgid ""
-"The configured driver %(driver)s has been moved, automatically using "
-"%(new_driver)s instead. Please update your config files, as this "
-"automatic fixup will be removed in a future release."
-msgstr ""
-
-#: neutron/services/qos/notification_drivers/message_queue.py:30
-#, python-format
-msgid "Received %(resource)s %(policy_id)s without context"
-msgstr ""
-
diff --git a/neutron/locale/neutron.pot b/neutron/locale/neutron.pot
deleted file mode 100644 (file)
index df3ac79..0000000
+++ /dev/null
@@ -1,4259 +0,0 @@
-# Translations template for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#: neutron/manager.py:73
-#, python-format
-msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid."
-msgstr ""
-
-#: neutron/manager.py:85
-msgid "Neutron core_plugin not configured!"
-msgstr ""
-
-#: neutron/manager.py:145
-#, python-format
-msgid "Plugin '%s' not found."
-msgstr ""
-
-#: neutron/manager.py:187
-#, python-format
-msgid "Multiple plugins for service %s were configured"
-msgstr ""
-
-#: neutron/policy.py:197
-#, python-format
-msgid ""
-"Unable to identify a target field from:%s. Match should be in the form "
-"%%(<field_name>)s"
-msgstr ""
-
-#: neutron/policy.py:227
-#, python-format
-msgid "Unable to find resource name in %s"
-msgstr ""
-
-#: neutron/policy.py:236
-#, python-format
-msgid ""
-"Unable to verify match:%(match)s as the parent resource: %(res)s was not "
-"found"
-msgstr ""
-
-#: neutron/service.py:42
-msgid "Seconds between running periodic tasks"
-msgstr ""
-
-#: neutron/service.py:44
-msgid ""
-"Number of separate API worker processes for service. If not specified, "
-"the default is equal to the number of CPUs available for best "
-"performance."
-msgstr ""
-
-#: neutron/service.py:49
-msgid "Number of RPC worker processes for service"
-msgstr ""
-
-#: neutron/service.py:52
-msgid "Number of RPC worker processes dedicated to state reports queue"
-msgstr ""
-
-#: neutron/service.py:56
-msgid ""
-"Range of seconds to randomly delay when starting the periodic task "
-"scheduler to reduce stampeding. (Disable by setting to 0)"
-msgstr ""
-
-#: neutron/wsgi.py:50
-msgid "Number of backlog requests to configure the socket with"
-msgstr ""
-
-#: neutron/wsgi.py:54
-msgid "Number of seconds to keep retrying to listen"
-msgstr ""
-
-#: neutron/wsgi.py:57
-msgid "Enable SSL on the API server"
-msgstr ""
-
-#: neutron/wsgi.py:159
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds"
-msgstr ""
-
-#: neutron/wsgi.py:389
-msgid "Cannot understand JSON"
-msgstr ""
-
-#: neutron/wsgi.py:555
-msgid "You must implement __call__"
-msgstr ""
-
-#: neutron/wsgi.py:600
-msgid "Unsupported Content-Type"
-msgstr ""
-
-#: neutron/wsgi.py:604
-msgid "Malformed request body"
-msgstr ""
-
-#: neutron/wsgi.py:741
-#, python-format
-msgid "The requested content type %s is invalid."
-msgstr ""
-
-#: neutron/wsgi.py:794
-msgid "Could not deserialize data"
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:34
-msgid "Driver for security groups firewall in the L2 agent"
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:38
-msgid ""
-"Controls whether the neutron security group API is enabled in the server."
-" It should be false when using no security groups or using the nova "
-"security group API."
-msgstr ""
-
-#: neutron/agent/securitygroups_rpc.py:45
-msgid ""
-"Use ipset to speed-up the iptables based security groups. Enabling ipset "
-"support requires that ipset is installed on L2 agent node."
-msgstr ""
-
-#: neutron/agent/common/config.py:26
-msgid ""
-"Root helper application. Use 'sudo neutron-rootwrap "
-"/etc/neutron/rootwrap.conf' to use the real root filter facility. Change "
-"to 'sudo' to skip the filtering and just run the command directly."
-msgstr ""
-
-#: neutron/agent/common/config.py:33
-msgid ""
-"Use the root helper when listing the namespaces on a system. This may not"
-" be required depending on the security configuration. If the root helper "
-"is not required, set this to False for a performance improvement."
-msgstr ""
-
-#: neutron/agent/common/config.py:43
-msgid "Root helper daemon application to use when possible."
-msgstr ""
-
-#: neutron/agent/common/config.py:48
-msgid ""
-"Seconds between nodes reporting state to server; should be less than "
-"agent_down_time, best if it is half or less than agent_down_time."
-msgstr ""
-
-#: neutron/agent/common/config.py:52
-msgid "Log agent heartbeats"
-msgstr ""
-
-#: neutron/agent/common/config.py:57
-msgid "The driver used to manage the virtual interface."
-msgstr ""
-
-#: neutron/agent/common/config.py:62
-msgid ""
-"Add comments to iptables rules. Set to false to disallow the addition of "
-"comments to generated iptables rules that describe each rule's purpose. "
-"System must support the iptables comments module for addition of "
-"comments."
-msgstr ""
-
-#: neutron/agent/common/config.py:72
-msgid "Action to be executed when a child process dies"
-msgstr ""
-
-#: neutron/agent/common/config.py:74
-msgid ""
-"Interval between checks of child process liveness (seconds), use 0 to "
-"disable"
-msgstr ""
-
-#: neutron/agent/common/config.py:82
-msgid "Availability zone of this node"
-msgstr ""
-
-#: neutron/agent/common/config.py:88
-msgid ""
-"Name of bridge used for external network traffic. This should be set to "
-"an empty value for the Linux Bridge. When this parameter is set, each L3 "
-"agent can be associated with no more than one external network. This "
-"option is deprecated and will be removed in the M release."
-msgstr ""
-
-#: neutron/agent/common/config.py:158 neutron/common/config.py:174
-msgid ""
-"Where to store Neutron state files. This directory must be writable by "
-"the agent."
-msgstr ""
-
-#: neutron/agent/common/ovs_lib.py:50
-msgid ""
-"Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs "
-"commands will fail with ALARMCLOCK error."
-msgstr ""
-
-#: neutron/agent/common/ovs_lib.py:521
-#, python-format
-msgid "Unable to determine mac address for %s"
-msgstr ""
-
-#: neutron/agent/common/ovs_lib.py:659
-msgid "Cannot match priority on flow deletion or modification"
-msgstr ""
-
-#: neutron/agent/common/ovs_lib.py:664
-msgid "Must specify one or more actions on flow addition or modification"
-msgstr ""
-
-#: neutron/agent/dhcp/agent.py:579
-#, python-format
-msgid "Agent updated: %(payload)s"
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:23
-msgid ""
-"The DHCP agent will resync its state with Neutron to recover from any "
-"transient notification or RPC errors. The interval is number of seconds "
-"between attempts."
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:28
-msgid "The driver used to manage the DHCP server."
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:30
-msgid ""
-"The DHCP server can assist with providing metadata support on isolated "
-"networks. Setting this value to True will cause the DHCP server to append"
-" specific host routes to the DHCP request. The metadata service will only"
-" be activated when the subnet does not contain any router port. The guest"
-" instance must be configured to request host routes via DHCP (Option "
-"121). This option doesn't have any effect when force_metadata is set to "
-"True."
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:40
-msgid ""
-"In some cases the Neutron router is not present to provide the metadata "
-"IP but the DHCP server can be used to provide this info. Setting this "
-"value will force the DHCP server to append specific host routes to the "
-"DHCP request. If this option is set, then the metadata service will be "
-"activated for all the networks."
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:48
-msgid ""
-"Allows for serving metadata requests coming from a dedicated metadata "
-"access network whose CIDR is 169.254.169.254/16 (or larger prefix), and "
-"is connected to a Neutron router from which the VMs send metadata:1 "
-"request. In this case DHCP Option 121 will not be injected in VMs, as "
-"they will be able to reach 169.254.169.254 through a router. This option "
-"requires enable_isolated_metadata = True."
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:57
-msgid ""
-"Number of threads to use during sync process. Should not exceed "
-"connection pool size configured on server."
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:65
-msgid "Location to store DHCP server config files"
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:68
-msgid ""
-"Domain to use for building the hostnames.This option is deprecated. It "
-"has been moved to neutron.conf as dns_domain. It will removed from here "
-"in a future release"
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:78
-msgid "Override the default dnsmasq settings with this file"
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:80
-msgid "Comma-separated list of the DNS servers which will be used as forwarders."
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:84
-msgid ""
-"Base log dir for dnsmasq logging. The log contains DHCP and DNS log "
-"information and is useful for debugging issues with either DHCP or DNS. "
-"If this section is null, disable dnsmasq log."
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:89
-msgid ""
-"Enables the dnsmasq service to provide name resolution for instances via "
-"DNS resolvers on the host running the DHCP agent. Effectively removes the"
-" '--no-resolv' option from the dnsmasq process arguments. Adding custom "
-"DNS resolvers to the 'dnsmasq_dns_servers' option disables this feature."
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:98
-msgid "Limit number of leases to prevent a denial-of-service."
-msgstr ""
-
-#: neutron/agent/dhcp/config.py:100
-msgid "Use broadcast in DHCP replies"
-msgstr ""
-
-#: neutron/agent/l2/extensions/manager.py:29
-msgid "Extensions list to use"
-msgstr ""
-
-#: neutron/agent/l3/agent.py:290
-msgid ""
-"The 'gateway_external_network_id' option must be configured for this "
-"agent as Neutron has more than one external network."
-msgstr ""
-
-#: neutron/agent/l3/config.py:29
-msgid ""
-"The working mode for the agent. Allowed modes are: 'legacy' - this "
-"preserves the existing behavior where the L3 agent is deployed on a "
-"centralized networking node to provide L3 services like DNAT, and SNAT. "
-"Use this mode if you do not want to adopt DVR. 'dvr' - this mode enables "
-"DVR functionality and must be used for an L3 agent that runs on a compute"
-" host. 'dvr_snat' - this enables centralized SNAT support in conjunction "
-"with DVR.  This mode must be used for an L3 agent running on a "
-"centralized node (or in single-host deployments, e.g. devstack)"
-msgstr ""
-
-#: neutron/agent/l3/config.py:43
-msgid "TCP Port used by Neutron metadata namespace proxy."
-msgstr ""
-
-#: neutron/agent/l3/config.py:46
-msgid ""
-"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, "
-"the feature is disabled"
-msgstr ""
-
-#: neutron/agent/l3/config.py:50
-msgid ""
-"If non-empty, the l3 agent can only configure a router that has the "
-"matching router ID."
-msgstr ""
-
-#: neutron/agent/l3/config.py:54
-msgid ""
-"Indicates that this L3 agent should also handle routers that do not have "
-"an external network gateway configured. This option should be True only "
-"for a single agent in a Neutron deployment, and may be False for all "
-"agents if all routers must have an external network gateway."
-msgstr ""
-
-#: neutron/agent/l3/config.py:61
-msgid ""
-"When external_network_bridge is set, each L3 agent can be associated with"
-" no more than one external network. This value should be set to the UUID "
-"of that external network. To allow L3 agent support multiple external "
-"networks, both the external_network_bridge and "
-"gateway_external_network_id must be left empty."
-msgstr ""
-
-#: neutron/agent/l3/config.py:68
-msgid ""
-"With IPv6, the network used for the external gateway does not need to "
-"have an associated subnet, since the automatically assigned link-local "
-"address (LLA) can be used. However, an IPv6 gateway address is needed for"
-" use as the next-hop for the default route. If no IPv6 gateway address is"
-" configured here, (and only then) the neutron router will be configured "
-"to get its default route from router advertisements (RAs) from the "
-"upstream router; in which case the upstream router must also be "
-"configured to send these RAs. The ipv6_gateway, when configured, should "
-"be the LLA of the interface on the upstream router. If a next-hop using a"
-" global unique address (GUA) is desired, it needs to be done via a subnet"
-" allocated to the network and not through this parameter. "
-msgstr ""
-
-#: neutron/agent/l3/config.py:86
-msgid ""
-"Driver used for ipv6 prefix delegation. This needs to be an entry point "
-"defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg "
-"for entry points included with the neutron source."
-msgstr ""
-
-#: neutron/agent/l3/config.py:92
-msgid "Allow running metadata proxy."
-msgstr ""
-
-#: neutron/agent/l3/config.py:95
-msgid ""
-"Iptables mangle mark used to mark metadata valid requests. This mark will"
-" be masked with 0xffff so that only the lower 16 bits will be used."
-msgstr ""
-
-#: neutron/agent/l3/config.py:100
-msgid ""
-"Iptables mangle mark used to mark ingress from external network. This "
-"mark will be masked with 0xffff so that only the lower 16 bits will be "
-"used."
-msgstr ""
-
-#: neutron/agent/l3/ha.py:36
-msgid "Location to store keepalived/conntrackd config files"
-msgstr ""
-
-#: neutron/agent/l3/ha.py:41
-msgid "VRRP authentication type"
-msgstr ""
-
-#: neutron/agent/l3/ha.py:43
-msgid "VRRP authentication password"
-msgstr ""
-
-#: neutron/agent/l3/ha.py:47
-msgid "The advertisement interval in seconds"
-msgstr ""
-
-#: neutron/agent/l3/keepalived_state_change.py:95
-#, python-format
-msgid "Unexpected response: %s"
-msgstr ""
-
-#: neutron/agent/l3/keepalived_state_change.py:102
-msgid "ID of the router"
-msgstr ""
-
-#: neutron/agent/l3/keepalived_state_change.py:104
-msgid "Namespace of the router"
-msgstr ""
-
-#: neutron/agent/l3/keepalived_state_change.py:106
-msgid "Path to the router directory"
-msgstr ""
-
-#: neutron/agent/l3/keepalived_state_change.py:108
-msgid "Interface to monitor"
-msgstr ""
-
-#: neutron/agent/l3/keepalived_state_change.py:110
-msgid "CIDR to monitor"
-msgstr ""
-
-#: neutron/agent/l3/keepalived_state_change.py:112
-msgid "Path to PID file for this process"
-msgstr ""
-
-#: neutron/agent/l3/keepalived_state_change.py:114
-msgid "User (uid or name) running this process after its initialization"
-msgstr ""
-
-#: neutron/agent/l3/keepalived_state_change.py:117
-msgid "Group (gid or name) running this process after its initialization"
-msgstr ""
-
-#: neutron/agent/l3/keepalived_state_change.py:122
-#: neutron/agent/metadata/namespace_proxy.py:158
-#: neutron/tests/functional/agent/l3/test_keepalived_state_change.py:34
-msgid "Location of Metadata Proxy UNIX domain socket"
-msgstr ""
-
-#: neutron/agent/linux/async_process.py:77
-msgid "respawn_interval must be >= 0 if provided."
-msgstr ""
-
-#: neutron/agent/linux/async_process.py:111
-msgid "Process is already started"
-msgstr ""
-
-#: neutron/agent/linux/async_process.py:131
-msgid "Process is not running."
-msgstr ""
-
-#: neutron/agent/linux/daemon.py:54
-#, python-format
-msgid "Failed to set uid %s"
-msgstr ""
-
-#: neutron/agent/linux/daemon.py:68
-#, python-format
-msgid "Failed to set gid %s"
-msgstr ""
-
-#: neutron/agent/linux/daemon.py:99
-msgid "Root permissions are required to drop privileges."
-msgstr ""
-
-#: neutron/agent/linux/daemon.py:107
-msgid "Failed to remove supplemental groups"
-msgstr ""
-
-#: neutron/agent/linux/dhcp.py:244
-#, python-format
-msgid "Error while reading %s"
-msgstr ""
-
-#: neutron/agent/linux/dhcp.py:251
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr ""
-
-#: neutron/agent/linux/dhcp.py:253
-#, python-format
-msgid "Unable to access %s"
-msgstr ""
-
-#: neutron/agent/linux/external_process.py:38
-msgid "Location to store child pid files"
-msgstr ""
-
-#: neutron/agent/linux/interface.py:37
-msgid "Name of Open vSwitch bridge to use"
-msgstr ""
-
-#: neutron/agent/linux/interface.py:40
-msgid ""
-"Uses veth for an OVS interface or not. Support kernels with limited "
-"namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True."
-msgstr ""
-
-#: neutron/agent/linux/interface.py:45
-msgid "MTU setting for device."
-msgstr ""
-
-#: neutron/agent/linux/ip_lib.py:36
-msgid "Force ip_lib calls to use the root helper"
-msgstr ""
-
-#: neutron/agent/linux/ip_lib.py:61
-#, python-format
-msgid "Failure waiting for address %(address)s to become ready: %(reason)s"
-msgstr ""
-
-#: neutron/agent/linux/ip_lib.py:608
-msgid "Address not present on interface"
-msgstr ""
-
-#: neutron/agent/linux/ip_lib.py:613
-msgid "Duplicate address detected"
-msgstr ""
-
-#: neutron/agent/linux/ip_lib.py:614
-#, python-format
-msgid "Exceeded %s second limit waiting for address to leave the tentative state."
-msgstr ""
-
-#: neutron/agent/linux/ip_link_support.py:33
-#, python-format
-msgid "ip link command is not supported: %(reason)s"
-msgstr ""
-
-#: neutron/agent/linux/ip_link_support.py:37
-#, python-format
-msgid "ip link capability %(capability)s is not supported"
-msgstr ""
-
-#: neutron/agent/linux/iptables_manager.py:216
-#, python-format
-msgid "Unknown chain: %r"
-msgstr ""
-
-#: neutron/agent/linux/iptables_manager.py:408
-msgid "Failure applying iptables rules"
-msgstr ""
-
-#: neutron/agent/linux/keepalived.py:54
-#, python-format
-msgid ""
-"Network of size %(size)s, from IP range %(parent_range)s excluding IP "
-"ranges %(excluded_ranges)s was not found."
-msgstr ""
-
-#: neutron/agent/linux/keepalived.py:63
-#, python-format
-msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s"
-msgstr ""
-
-#: neutron/agent/linux/keepalived.py:73
-#, python-format
-msgid ""
-"Invalid authentication type: %(auth_type)s, valid types are: "
-"%(valid_auth_types)s"
-msgstr ""
-
-#: neutron/agent/linux/pd.py:40
-msgid "Service to handle DHCPv6 Prefix delegation."
-msgstr ""
-
-#: neutron/agent/linux/pd_driver.py:26
-msgid "Location to store IPv6 PD files."
-msgstr ""
-
-#: neutron/agent/linux/pd_driver.py:29
-msgid ""
-"A decimal value as Vendor's Registered Private Enterprise Number as "
-"required by RFC3315 DUID-EN."
-msgstr ""
-
-#: neutron/agent/linux/ra.py:40
-msgid "Location to store IPv6 RA config files"
-msgstr ""
-
-#: neutron/agent/linux/utils.py:128
-#, python-format
-msgid ""
-"Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: "
-"%(stderr)s"
-msgstr ""
-
-#: neutron/agent/metadata/agent.py:98
-#: neutron/agent/metadata/namespace_proxy.py:60
-msgid "An unknown error has occurred. Please try your request again."
-msgstr ""
-
-#: neutron/agent/metadata/agent.py:153
-msgid ""
-"Either one of parameter network_id or router_id must be passed to "
-"_get_ports method."
-msgstr ""
-
-#: neutron/agent/metadata/agent.py:215
-#: neutron/agent/metadata/namespace_proxy.py:106
-msgid "Remote metadata server experienced an internal server error."
-msgstr ""
-
-#: neutron/agent/metadata/agent.py:222
-#: neutron/agent/metadata/namespace_proxy.py:113
-#, python-format
-msgid "Unexpected response code: %s"
-msgstr ""
-
-#: neutron/agent/metadata/config.py:24
-msgid "Location for Metadata Proxy UNIX domain socket."
-msgstr ""
-
-#: neutron/agent/metadata/config.py:27
-msgid ""
-"User (uid or name) running metadata proxy after its initialization (if "
-"empty: agent effective user)."
-msgstr ""
-
-#: neutron/agent/metadata/config.py:32
-msgid ""
-"Group (gid or name) running metadata proxy after its initialization (if "
-"empty: agent effective group)."
-msgstr ""
-
-#: neutron/agent/metadata/config.py:40
-msgid ""
-"Enable/Disable log watch by metadata proxy. It should be disabled when "
-"metadata_proxy_user/group is not allowed to read/write its log file and "
-"copytruncate logrotate option must be used if logrotate is enabled on "
-"metadata proxy log files. Option default value is deduced from "
-"metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent"
-" effective user id/name."
-msgstr ""
-
-#: neutron/agent/metadata/config.py:54
-msgid "Certificate Authority public key (CA cert) file for ssl"
-msgstr ""
-
-#: neutron/agent/metadata/config.py:57
-msgid "IP address used by Nova metadata server."
-msgstr ""
-
-#: neutron/agent/metadata/config.py:60
-msgid "TCP Port used by Nova metadata server."
-msgstr ""
-
-#: neutron/agent/metadata/config.py:63
-msgid ""
-"When proxying metadata requests, Neutron signs the Instance-ID header "
-"with a shared secret to prevent spoofing. You may select any string for a"
-" secret, but it must match here and in the configuration used by the Nova"
-" Metadata Server. NOTE: Nova uses the same config key, but in [neutron] "
-"section."
-msgstr ""
-
-#: neutron/agent/metadata/config.py:73
-msgid "Protocol to access nova metadata, http or https"
-msgstr ""
-
-#: neutron/agent/metadata/config.py:75
-msgid "Allow to perform insecure SSL (https) requests to nova metadata"
-msgstr ""
-
-#: neutron/agent/metadata/config.py:79
-msgid "Client certificate for nova metadata api server."
-msgstr ""
-
-#: neutron/agent/metadata/config.py:82
-msgid "Private key of client certificate."
-msgstr ""
-
-#: neutron/agent/metadata/config.py:96
-msgid ""
-"Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': "
-"deduce mode from metadata_proxy_user/group values, 'user': set metadata "
-"proxy socket mode to 0o644, to use when metadata_proxy_user is agent "
-"effective user or root, 'group': set metadata proxy socket mode to 0o664,"
-" to use when metadata_proxy_group is agent effective group or root, "
-"'all': set metadata proxy socket mode to 0o666, to use otherwise."
-msgstr ""
-
-#: neutron/agent/metadata/config.py:110
-msgid ""
-"Number of separate worker processes for metadata server (defaults to half"
-" of the number of CPUs)"
-msgstr ""
-
-#: neutron/agent/metadata/config.py:114
-msgid "Number of backlog requests to configure the metadata server socket with"
-msgstr ""
-
-#: neutron/agent/metadata/namespace_proxy.py:142
-msgid "Network that will have instance metadata proxied."
-msgstr ""
-
-#: neutron/agent/metadata/namespace_proxy.py:145
-msgid "Router that will have connected instances' metadata proxied."
-msgstr ""
-
-#: neutron/agent/metadata/namespace_proxy.py:148
-#: neutron/tests/functional/agent/linux/simple_daemon.py:43
-msgid "Location of pid file of this process."
-msgstr ""
-
-#: neutron/agent/metadata/namespace_proxy.py:151
-msgid "Run as daemon."
-msgstr ""
-
-#: neutron/agent/metadata/namespace_proxy.py:154
-msgid "TCP Port to listen for metadata server requests."
-msgstr ""
-
-#: neutron/agent/metadata/namespace_proxy.py:161
-msgid "User (uid or name) running metadata proxy after its initialization"
-msgstr ""
-
-#: neutron/agent/metadata/namespace_proxy.py:164
-msgid "Group (gid or name) running metadata proxy after its initialization"
-msgstr ""
-
-#: neutron/agent/metadata/namespace_proxy.py:168
-msgid ""
-"Watch file log. Log watch should be disabled when "
-"metadata_proxy_user/group has no read/write permissions on metadata proxy"
-" log file."
-msgstr ""
-
-#: neutron/agent/ovsdb/api.py:34
-msgid "The interface for interacting with the OVSDB"
-msgstr ""
-
-#: neutron/agent/ovsdb/api.py:37
-msgid ""
-"The connection string for the native OVSDB backend. Requires the native "
-"ovsdb_interface to be enabled."
-msgstr ""
-
-#: neutron/agent/ovsdb/impl_idl.py:94
-#, python-format
-msgid "OVSDB Error: %s"
-msgstr ""
-
-#: neutron/agent/ovsdb/native/commands.py:95
-#, python-format
-msgid "Bridge %s does not exist"
-msgstr ""
-
-#: neutron/agent/ovsdb/native/commands.py:323
-#, python-format
-msgid "Port %s does not exist"
-msgstr ""
-
-#: neutron/agent/ovsdb/native/commands.py:334
-#, python-format
-msgid "Port %(port)s does not exist on %(bridge)s!"
-msgstr ""
-
-#: neutron/agent/ovsdb/native/commands.py:428
-#, python-format
-msgid ""
-"Row doesn't exist in the DB. Request info: Table=%(table)s. "
-"Columns=%(columns)s. Records=%(records)s."
-msgstr ""
-
-#: neutron/agent/ovsdb/native/idlutils.py:49
-#, python-format
-msgid "Cannot find %(table)s with %(col)s=%(match)s"
-msgstr ""
-
-#: neutron/agent/ovsdb/native/idlutils.py:79
-#, python-format
-msgid "Table %s can only be queried by UUID"
-msgstr ""
-
-#: neutron/agent/ovsdb/native/idlutils.py:81
-msgid "'.' searches are not implemented"
-msgstr ""
-
-#: neutron/agent/ovsdb/native/idlutils.py:86
-msgid "record"
-msgstr ""
-
-#: neutron/agent/windows/utils.py:64
-#, python-format
-msgid ""
-"\n"
-"Command: %(cmd)s\n"
-"Exit code: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-msgstr ""
-
-#: neutron/api/api_common.py:119
-#, python-format
-msgid "Limit must be an integer 0 or greater and not '%d'"
-msgstr ""
-
-#: neutron/api/api_common.py:136
-msgid "The number of sort_keys and sort_dirs must be same"
-msgstr ""
-
-#: neutron/api/api_common.py:141
-#, python-format
-msgid "%s is invalid attribute for sort_keys"
-msgstr ""
-
-#: neutron/api/api_common.py:145
-#, python-format
-msgid ""
-"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s'"
-" and '%(desc)s'"
-msgstr ""
-
-#: neutron/api/api_common.py:329 neutron/api/v2/base.py:665
-#, python-format
-msgid "Unable to find '%s' in request body"
-msgstr ""
-
-#: neutron/api/api_common.py:336
-#, python-format
-msgid "Failed to parse request. Parameter '%s' not specified"
-msgstr ""
-
-#: neutron/api/extensions.py:231 neutron/pecan_wsgi/controllers/root.py:135
-#, python-format
-msgid "Extension with alias %s does not exist"
-msgstr ""
-
-#: neutron/api/extensions.py:235 neutron/api/extensions.py:239
-msgid "Resource not found."
-msgstr ""
-
-#: neutron/api/versions.py:42
-msgid "Unknown API version specified"
-msgstr ""
-
-#: neutron/api/rpc/callbacks/exceptions.py:18
-#, python-format
-msgid "Callback for %(resource_type)s returned wrong resource type"
-msgstr ""
-
-#: neutron/api/rpc/callbacks/exceptions.py:22
-#, python-format
-msgid "Callback for %(resource_type)s not found"
-msgstr ""
-
-#: neutron/api/rpc/callbacks/exceptions.py:26
-#, python-format
-msgid "Cannot add multiple callbacks for %(resource_type)s"
-msgstr ""
-
-#: neutron/api/rpc/handlers/dhcp_rpc.py:90
-msgid "Unrecognized action"
-msgstr ""
-
-#: neutron/api/rpc/handlers/resources_rpc.py:39
-#, python-format
-msgid "Invalid resource type %(resource_type)s"
-msgstr ""
-
-#: neutron/api/rpc/handlers/resources_rpc.py:43
-#, python-format
-msgid "Resource %(resource_id)s of type %(resource_type)s not found"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:56
-#, python-format
-msgid ""
-"Invalid input. '%(target_dict)s' must be a dictionary with keys: "
-"%(expected_keys)s"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:68
-#, python-format
-msgid ""
-"Validation of dictionary's keys failed. Expected keys: %(expected_keys)s "
-"Provided keys: %(provided_keys)s"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:83
-#, python-format
-msgid "'%s' is not a list"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:87
-#, python-format
-msgid "Duplicate items in the list: '%s'"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:98
-#, python-format
-msgid "'%(data)s' is not in %(valid_values)s"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:114
-#, python-format
-msgid "'%s' Blank strings are not permitted"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:126
-#, python-format
-msgid "'%s' is not a valid string"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:131
-#, python-format
-msgid "'%(data)s' exceeds maximum length of %(max_len)s"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:145
-#, python-format
-msgid "'%s' is not a valid boolean value"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:164 neutron/api/v2/attributes.py:490
-#: neutron/api/v2/attributes.py:527
-#, python-format
-msgid "'%s' is not an integer"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:168
-#, python-format
-msgid "'%(data)s' is too small - must be at least '%(limit)d'"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:173
-#, python-format
-msgid "'%(data)s' is too large - must be no larger than '%(limit)d'"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:182
-#, python-format
-msgid "'%s' contains whitespace"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:201
-#, python-format
-msgid "'%s' is not a valid MAC address"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:232 neutron/api/v2/attributes.py:241
-#, python-format
-msgid "'%s' is not a valid IP address"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:238
-#, python-format
-msgid "'%(data)s' is not an accepted IP address, '%(ip)s' is recommended"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:253
-#, python-format
-msgid "Invalid data format for IP pool: '%s'"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:270 neutron/api/v2/attributes.py:277
-#, python-format
-msgid "Invalid data format for fixed IP: '%s'"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:285
-#, python-format
-msgid "Duplicate IP address '%s'"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:300
-#, python-format
-msgid "Invalid data format for nameserver: '%s'"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:309
-#, python-format
-msgid "'%(host)s' is not a valid nameserver. %(msg)s"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:314
-#, python-format
-msgid "Duplicate nameserver '%s'"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:322
-#, python-format
-msgid "Invalid data format for hostroute: '%s'"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:339
-#, python-format
-msgid "Duplicate hostroute '%s'"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:355
-#: neutron/tests/unit/api/v2/test_attributes.py:545
-#: neutron/tests/unit/api/v2/test_attributes.py:553
-#: neutron/tests/unit/api/v2/test_attributes.py:567
-#: neutron/tests/unit/api/v2/test_attributes.py:575
-#, python-format
-msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:361
-#, python-format
-msgid "'%s' is not a valid IP subnet"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:383
-#, python-format
-msgid "'%s' is not a valid input"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:405
-#, python-format
-msgid "'%s' is not a valid UUID"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:434
-#, python-format
-msgid "Validator '%s' does not exist."
-msgstr ""
-
-#: neutron/api/v2/attributes.py:446
-#, python-format
-msgid "'%s' is not a dictionary"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:495
-#, python-format
-msgid "'%s' should be non-negative"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:514
-#, python-format
-msgid "'%s' cannot be converted to boolean"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:550
-#, python-format
-msgid "'%s' must be a non negative decimal."
-msgstr ""
-
-#: neutron/api/v2/attributes.py:564
-#, python-format
-msgid "'%s' is not of the form <key>=[value]"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:914
-#, python-format
-msgid "Failed to parse request. Required attribute '%s' not specified"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:921
-#, python-format
-msgid "Attribute '%s' not allowed in POST"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:940
-#, python-format
-msgid "Invalid input for %(attr)s. Reason: %(reason)s."
-msgstr ""
-
-#: neutron/api/v2/attributes.py:949
-msgid ""
-"Specifying 'tenant_id' other than authenticated tenant in request "
-"requires admin privileges"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:957
-msgid "Running without keystone AuthN requires that tenant_id is specified"
-msgstr ""
-
-#: neutron/api/v2/attributes.py:965
-#: neutron/extensions/allowedaddresspairs.py:81
-#: neutron/extensions/multiprovidernet.py:46
-#, python-format
-msgid "Unrecognized attribute(s) '%s'"
-msgstr ""
-
-#: neutron/api/v2/base.py:94
-msgid "Native pagination depend on native sorting"
-msgstr ""
-
-#: neutron/api/v2/base.py:205 neutron/api/v2/base.py:364
-#: neutron/api/v2/base.py:542 neutron/api/v2/base.py:606
-#: neutron/extensions/l3agentscheduler.py:51
-#: neutron/extensions/l3agentscheduler.py:94
-#: neutron/pecan_wsgi/hooks/policy_enforcement.py:64
-msgid "The resource could not be found."
-msgstr ""
-
-#: neutron/api/v2/base.py:567
-#, python-format
-msgid "Invalid format: %s"
-msgstr ""
-
-#: neutron/api/v2/base.py:643
-msgid "Resource body required"
-msgstr ""
-
-#: neutron/api/v2/base.py:649
-msgid "Bulk operation not supported"
-msgstr ""
-
-#: neutron/api/v2/base.py:652
-msgid "Resources required"
-msgstr ""
-
-#: neutron/api/v2/base.py:662
-msgid "Body contains invalid data"
-msgstr ""
-
-#: neutron/api/v2/base.py:677
-#, python-format
-msgid "Cannot update read-only attribute %s"
-msgstr ""
-
-#: neutron/api/v2/base.py:699
-#: neutron/pecan_wsgi/hooks/ownership_validation.py:52
-#, python-format
-msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network"
-msgstr ""
-
-#: neutron/api/v2/resource.py:131
-#: neutron/tests/unit/api/v2/test_resource.py:249
-msgid "Request Failed: internal server error while processing your request."
-msgstr ""
-
-#: neutron/callbacks/exceptions.py:18
-#, python-format
-msgid "The value '%(value)s' for %(element)s is not valid."
-msgstr ""
-
-#: neutron/cmd/ipset_cleanup.py:38
-msgid "Destroy all IPsets."
-msgstr ""
-
-#: neutron/cmd/ipset_cleanup.py:41
-msgid "Destroy IPsets even if there is an iptables reference."
-msgstr ""
-
-#: neutron/cmd/ipset_cleanup.py:45
-msgid "String prefix used to match IPset names."
-msgstr ""
-
-#: neutron/cmd/netns_cleanup.py:66
-msgid "Delete the namespace by removing all devices."
-msgstr ""
-
-#: neutron/cmd/ovs_cleanup.py:40
-msgid ""
-"True to delete all ports on all the OpenvSwitch bridges. False to delete "
-"ports created by Neutron on integration and external network bridges."
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:215
-msgid "Check for OVS vxlan support"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:217
-msgid "Check for OVS Geneve support"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:219
-msgid "Check for iproute2 vxlan support"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:221
-msgid "Check for patch port support"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:223
-msgid "Check for nova notification support"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:225
-msgid "Check for ARP responder support"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:227
-msgid "Check for ARP header match support"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:229
-msgid "Check for ICMPv6 header match support"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:231
-msgid "Check for VF management support"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:233
-msgid "Check netns permission settings"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:235
-msgid "Check minimal dnsmasq version"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:237
-msgid "Check ovsdb native interface support"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:239
-msgid "Check ebtables installation"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:241
-msgid "Check keepalived IPv6 support"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:243
-msgid "Check minimal dibbler version"
-msgstr ""
-
-#: neutron/cmd/sanity_check.py:245
-msgid "Check ipset installation"
-msgstr ""
-
-#: neutron/common/config.py:40
-msgid "The host IP to bind to"
-msgstr ""
-
-#: neutron/common/config.py:42
-msgid "The port to bind to"
-msgstr ""
-
-#: neutron/common/config.py:44
-msgid ""
-"The path for API extensions. Note that this can be a colon-separated list"
-" of paths. For example: api_extensions_path = "
-"extensions:/path/to/more/exts:/even/more/exts. The __path__ of "
-"neutron.extensions is appended to this, so if your extensions are in "
-"there you don't need to specify them here."
-msgstr ""
-
-#: neutron/common/config.py:52
-msgid "The type of authentication to use"
-msgstr ""
-
-#: neutron/common/config.py:54
-msgid "The core plugin Neutron will use"
-msgstr ""
-
-#: neutron/common/config.py:56
-msgid "The service plugins Neutron will use"
-msgstr ""
-
-#: neutron/common/config.py:58
-msgid ""
-"The base MAC address Neutron will use for VIFs. The first 3 octets will "
-"remain unchanged. If the 4th octet is not 00, it will also be used. The "
-"others will be randomly generated."
-msgstr ""
-
-#: neutron/common/config.py:63
-msgid "How many times Neutron will retry MAC generation"
-msgstr ""
-
-#: neutron/common/config.py:65
-msgid "Allow the usage of the bulk API"
-msgstr ""
-
-#: neutron/common/config.py:67
-msgid "Allow the usage of the pagination"
-msgstr ""
-
-#: neutron/common/config.py:69
-msgid "Allow the usage of the sorting"
-msgstr ""
-
-#: neutron/common/config.py:71
-msgid ""
-"The maximum number of items returned in a single response, value was "
-"'infinite' or negative integer means no limit"
-msgstr ""
-
-#: neutron/common/config.py:75
-msgid ""
-"Default value of availability zone hints. The availability zone aware "
-"schedulers use this when the resources availability_zone_hints is empty. "
-"Multiple availability zones can be specified by a comma separated string."
-" This value can be empty. In this case, even if availability_zone_hints "
-"for a resource is empty, availability zone is considered for high "
-"availability while scheduling the resource."
-msgstr ""
-
-#: neutron/common/config.py:85
-msgid "Maximum number of DNS nameservers per subnet"
-msgstr ""
-
-#: neutron/common/config.py:87
-msgid "Maximum number of host routes per subnet"
-msgstr ""
-
-#: neutron/common/config.py:90
-msgid ""
-"Maximum number of fixed ips per port. This option is deprecated and will "
-"be removed in the N release."
-msgstr ""
-
-#: neutron/common/config.py:94
-msgid ""
-"Default IPv4 subnet pool to be used for automatic subnet CIDR allocation."
-" Specifies by UUID the pool to be used in case where creation of a subnet"
-" is being called without a subnet pool ID. If not set then no pool will "
-"be used unless passed explicitly to the subnet create. If no pool is "
-"used, then a CIDR must be passed to create a subnet and that subnet will "
-"not be allocated from any pool; it will be considered part of the "
-"tenant's private address space. This option is deprecated for removal in "
-"the N release."
-msgstr ""
-
-#: neutron/common/config.py:106
-msgid ""
-"Default IPv6 subnet pool to be used for automatic subnet CIDR allocation."
-" Specifies by UUID the pool to be used in case where creation of a subnet"
-" is being called without a subnet pool ID. See the description for "
-"default_ipv4_subnet_pool for more information. This option is deprecated "
-"for removal in the N release."
-msgstr ""
-
-#: neutron/common/config.py:114
-msgid ""
-"Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set "
-"to True to enable IPv6 Prefix Delegation for subnet allocation in a PD-"
-"capable environment. Users making subnet creation requests for IPv6 "
-"subnets without providing a CIDR or subnetpool ID will be given a CIDR "
-"via the Prefix Delegation mechanism. Note that enabling PD will override "
-"the behavior of the default IPv6 subnetpool."
-msgstr ""
-
-#: neutron/common/config.py:125
-msgid ""
-"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite "
-"lease times."
-msgstr ""
-
-#: neutron/common/config.py:129
-msgid "Domain to use for building the hostnames"
-msgstr ""
-
-#: neutron/common/config.py:131
-msgid "Allow sending resource operation notification to DHCP agent"
-msgstr ""
-
-#: neutron/common/config.py:134
-msgid ""
-"Allow overlapping IP support in Neutron. Attention: the following "
-"parameter MUST be set to False if Neutron is being used in conjunction "
-"with Nova security groups."
-msgstr ""
-
-#: neutron/common/config.py:140
-msgid ""
-"Hostname to be used by the Neutron server, agents and services running on"
-" this machine. All the agents and services running on this machine must "
-"use the same host value."
-msgstr ""
-
-#: neutron/common/config.py:145
-msgid ""
-"Ensure that configured gateway is on subnet. For IPv6, validate only if "
-"gateway is not a link local address."
-msgstr ""
-
-#: neutron/common/config.py:149
-msgid "Send notification to nova when port status changes"
-msgstr ""
-
-#: neutron/common/config.py:151
-msgid ""
-"Send notification to nova when port data (fixed_ips/floatingip) changes "
-"so nova can update its cache."
-msgstr ""
-
-#: neutron/common/config.py:154
-msgid ""
-"Number of seconds between sending events to nova if there are any events "
-"to send."
-msgstr ""
-
-#: neutron/common/config.py:157
-msgid ""
-"If True, effort is made to advertise MTU settings to VMs via network "
-"methods (DHCP and RA MTU options) when the network's preferred MTU is "
-"known."
-msgstr ""
-
-#: neutron/common/config.py:161
-msgid ""
-"Neutron IPAM (IP address management) driver to use. If ipam_driver is not"
-" set (default behavior), no IPAM driver is used. In order to use the "
-"reference implementation of Neutron IPAM driver, use 'internal'."
-msgstr ""
-
-#: neutron/common/config.py:167
-msgid ""
-"If True, then allow plugins that support it to create VLAN transparent "
-"networks."
-msgstr ""
-
-#: neutron/common/config.py:205
-msgid ""
-"Name of nova region to use. Useful if keystone manages more than one "
-"region."
-msgstr ""
-
-#: neutron/common/config.py:210
-msgid ""
-"Type of the nova endpoint to use.  This endpoint will be looked up in the"
-" keystone catalog and should be one of public, internal or admin."
-msgstr ""
-
-#: neutron/common/config.py:233
-#, python-format
-msgid "Base MAC: %s"
-msgstr ""
-
-#: neutron/common/exceptions.py:33
-msgid "An unknown exception occurred."
-msgstr ""
-
-#: neutron/common/exceptions.py:58
-#, python-format
-msgid "Bad %(resource)s request: %(msg)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:70
-msgid "Not authorized."
-msgstr ""
-
-#: neutron/common/exceptions.py:74
-msgid "The service is unavailable."
-msgstr ""
-
-#: neutron/common/exceptions.py:78
-#, python-format
-msgid "User does not have admin privileges: %(reason)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:82
-#, python-format
-msgid "Object %(id)s not found."
-msgstr ""
-
-#: neutron/common/exceptions.py:86
-#, python-format
-msgid "Network %(net_id)s could not be found."
-msgstr ""
-
-#: neutron/common/exceptions.py:90
-#, python-format
-msgid "Subnet %(subnet_id)s could not be found."
-msgstr ""
-
-#: neutron/common/exceptions.py:94
-#, python-format
-msgid "Subnet pool %(subnetpool_id)s could not be found."
-msgstr ""
-
-#: neutron/common/exceptions.py:98
-#, python-format
-msgid "Port %(port_id)s could not be found."
-msgstr ""
-
-#: neutron/common/exceptions.py:102
-#, python-format
-msgid "QoS policy %(policy_id)s could not be found."
-msgstr ""
-
-#: neutron/common/exceptions.py:106
-#, python-format
-msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found."
-msgstr ""
-
-#: neutron/common/exceptions.py:111
-#, python-format
-msgid "Port %(port_id)s could not be found on network %(net_id)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:116
-#, python-format
-msgid ""
-"QoS binding for port %(port_id)s and policy %(policy_id)s could not be "
-"found."
-msgstr ""
-
-#: neutron/common/exceptions.py:121
-#, python-format
-msgid ""
-"QoS binding for network %(net_id)s and policy %(policy_id)s could not be "
-"found."
-msgstr ""
-
-#: neutron/common/exceptions.py:126
-msgid "Policy configuration policy.json could not be found."
-msgstr ""
-
-#: neutron/common/exceptions.py:130
-#, python-format
-msgid "Failed to init policy %(policy)s because %(reason)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:134
-#, python-format
-msgid "Failed to check policy %(policy)s because %(reason)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:138
-#, python-format
-msgid "Unsupported port state: %(port_state)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:142
-msgid "The resource is in use."
-msgstr ""
-
-#: neutron/common/exceptions.py:146
-#, python-format
-msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:151
-#, python-format
-msgid ""
-"Unable to complete operation on network %(net_id)s. There are one or more"
-" ports still in use on the network."
-msgstr ""
-
-#: neutron/common/exceptions.py:156
-#, python-format
-msgid "Unable to complete operation on subnet %(subnet_id)s %(reason)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:161
-msgid "One or more ports have an IP allocation from this subnet."
-msgstr ""
-
-#: neutron/common/exceptions.py:167
-#, python-format
-msgid ""
-"Unable to complete operation on subnet pool %(subnet_pool_id)s. "
-"%(reason)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:172
-msgid "Two or more concurrent subnets allocated."
-msgstr ""
-
-#: neutron/common/exceptions.py:177
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s for network %(net_id)s. "
-"Port already has an attached device %(device_id)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:183
-#, python-format
-msgid "Port %(port_id)s cannot be deleted directly via the port API: %(reason)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:188
-#, python-format
-msgid "Port %(port_id)s is already acquired by another DHCP agent"
-msgstr ""
-
-#: neutron/common/exceptions.py:192
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s, port is already bound, "
-"port type: %(vif_type)s, old_mac %(old_mac)s, new_mac %(new_mac)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:198
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The mac address "
-"%(mac)s is in use."
-msgstr ""
-
-#: neutron/common/exceptions.py:204
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of host routes"
-" exceeds the limit %(quota)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:210
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of DNS "
-"nameservers exceeds the limit %(quota)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:215
-#, python-format
-msgid ""
-"IP address %(ip_address)s is not a valid IP for any of the subnets on the"
-" specified network."
-msgstr ""
-
-#: neutron/common/exceptions.py:220
-#, python-format
-msgid "IP address %(ip_address)s is not a valid IP for the specified subnet."
-msgstr ""
-
-#: neutron/common/exceptions.py:225
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The IP address "
-"%(ip_address)s is in use."
-msgstr ""
-
-#: neutron/common/exceptions.py:230
-#, python-format
-msgid ""
-"Unable to create the network. The VLAN %(vlan_id)s on physical network "
-"%(physical_network)s is in use."
-msgstr ""
-
-#: neutron/common/exceptions.py:236
-#, python-format
-msgid ""
-"Unable to create the flat network. Physical network %(physical_network)s "
-"is in use."
-msgstr ""
-
-#: neutron/common/exceptions.py:241
-#, python-format
-msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use."
-msgstr ""
-
-#: neutron/common/exceptions.py:246
-msgid "Tenant network creation is not enabled."
-msgstr ""
-
-#: neutron/common/exceptions.py:254
-msgid ""
-"Unable to create the network. No tenant network is available for "
-"allocation."
-msgstr ""
-
-#: neutron/common/exceptions.py:259
-msgid ""
-"Unable to create the network. No available network found in maximum "
-"allowed attempts."
-msgstr ""
-
-#: neutron/common/exceptions.py:264
-#, python-format
-msgid ""
-"Subnet on port %(port_id)s does not match the requested subnet "
-"%(subnet_id)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:269
-#, python-format
-msgid "Malformed request body: %(reason)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:279
-#, python-format
-msgid "Invalid input for operation: %(error_message)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:283
-#, python-format
-msgid "The allocation pool %(pool)s is not valid."
-msgstr ""
-
-#: neutron/common/exceptions.py:287
-#, python-format
-msgid ""
-"Operation %(op)s is not supported for device_owner %(device_owner)s on "
-"port %(port_id)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:292
-#, python-format
-msgid ""
-"Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet "
-"%(subnet_cidr)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:297
-#, python-format
-msgid "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:302
-#, python-format
-msgid "Unable to generate unique mac on network %(net_id)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:306
-#, python-format
-msgid "No more IP addresses available on network %(net_id)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:310
-#, python-format
-msgid "Bridge %(bridge)s does not exist."
-msgstr ""
-
-#: neutron/common/exceptions.py:314
-#, python-format
-msgid "Creation failed. %(dev_name)s already exists."
-msgstr ""
-
-#: neutron/common/exceptions.py:318
-#, python-format
-msgid "Unknown quota resources %(unknown)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:322
-#, python-format
-msgid "Quota exceeded for resources: %(overs)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:326
-msgid "Tenant-id was missing from quota request."
-msgstr ""
-
-#: neutron/common/exceptions.py:330
-#, python-format
-msgid ""
-"Change would make usage less than 0 for the following resources: "
-"%(unders)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:335
-#, python-format
-msgid ""
-"Unable to reconfigure sharing settings for network %(network)s. Multiple "
-"tenants are using it."
-msgstr ""
-
-#: neutron/common/exceptions.py:340
-#, python-format
-msgid "Invalid extension environment: %(reason)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:344
-#, python-format
-msgid "Extensions not found: %(extensions)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:348
-#, python-format
-msgid "Invalid content type %(content_type)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:352
-#, python-format
-msgid "Unable to find any IP address on external network %(net_id)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:357
-msgid "More than one external network exists."
-msgstr ""
-
-#: neutron/common/exceptions.py:361
-#, python-format
-msgid "An invalid value was provided for %(opt_name)s: %(opt_value)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:366
-#, python-format
-msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:371
-#, python-format
-msgid ""
-"Current gateway ip %(ip_address)s already in use by port %(port_id)s. "
-"Unable to update."
-msgstr ""
-
-#: neutron/common/exceptions.py:376
-#, python-format
-msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'."
-msgstr ""
-
-#: neutron/common/exceptions.py:386
-msgid "Empty physical network name."
-msgstr ""
-
-#: neutron/common/exceptions.py:390
-#, python-format
-msgid "Invalid network tunnel range: '%(tunnel_range)s' - %(error)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:401
-#, python-format
-msgid "Invalid network VXLAN port range: '%(vxlan_range)s'."
-msgstr ""
-
-#: neutron/common/exceptions.py:405
-msgid "VXLAN network unsupported."
-msgstr ""
-
-#: neutron/common/exceptions.py:409
-#, python-format
-msgid "Found duplicate extension: %(alias)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:413
-#, python-format
-msgid ""
-"The following device_id %(device_id)s is not owned by your tenant or "
-"matches another tenants router."
-msgstr ""
-
-#: neutron/common/exceptions.py:418
-#, python-format
-msgid "Invalid CIDR %(input)s given as IP prefix."
-msgstr ""
-
-#: neutron/common/exceptions.py:422
-#, python-format
-msgid "Router '%(router_id)s' is not compatible with this agent."
-msgstr ""
-
-#: neutron/common/exceptions.py:426
-#, python-format
-msgid "Router '%(router_id)s' cannot be both DVR and HA."
-msgstr ""
-
-#: neutron/common/exceptions.py:447
-msgid "Both network_id and router_id are None. One must be provided."
-msgstr ""
-
-#: neutron/common/exceptions.py:452
-msgid "Aborting periodic_sync_routers_task due to an error."
-msgstr ""
-
-#: neutron/common/exceptions.py:464
-#, python-format
-msgid "%(driver)s: Internal driver error."
-msgstr ""
-
-#: neutron/common/exceptions.py:468
-msgid "Unspecified minimum subnet pool prefix."
-msgstr ""
-
-#: neutron/common/exceptions.py:472
-msgid "Empty subnet pool prefix list."
-msgstr ""
-
-#: neutron/common/exceptions.py:476
-msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool."
-msgstr ""
-
-#: neutron/common/exceptions.py:480
-#, python-format
-msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool."
-msgstr ""
-
-#: neutron/common/exceptions.py:484
-#, python-format
-msgid ""
-"Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, "
-"%(base_prefix_type)s=%(base_prefixlen)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:489
-#, python-format
-msgid "Illegal update to prefixes: %(msg)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:493
-#, python-format
-msgid "Failed to allocate subnet: %(reason)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:497
-msgid ""
-"Failed to associate address scope: subnetpools within an address scope "
-"must have unique prefixes."
-msgstr ""
-
-#: neutron/common/exceptions.py:502
-#, python-format
-msgid ""
-"Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot be "
-"associated with address scope %(address_scope_id)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:508
-#, python-format
-msgid ""
-"Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot "
-"associate with address scope %(address_scope_id)s because subnetpool "
-"ip_version is not %(ip_version)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:514
-#, python-format
-msgid "Illegal subnetpool update : %(reason)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:518
-#, python-format
-msgid ""
-"Unable to allocate subnet with prefix length %(prefixlen)s, minimum "
-"allowed prefix is %(min_prefixlen)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:523
-#, python-format
-msgid ""
-"Unable to allocate subnet with prefix length %(prefixlen)s, maximum "
-"allowed prefix is %(max_prefixlen)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:528
-#, python-format
-msgid "Unable to delete subnet pool: %(reason)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:532
-msgid "Per-tenant subnet pool prefix quota exceeded."
-msgstr ""
-
-#: neutron/common/exceptions.py:536
-#, python-format
-msgid "Device '%(device_name)s' does not exist."
-msgstr ""
-
-#: neutron/common/exceptions.py:540
-msgid ""
-"Subnets hosted on the same network must be allocated from the same subnet"
-" pool."
-msgstr ""
-
-#: neutron/common/exceptions.py:545
-#, python-format
-msgid "Object action %(action)s failed because: %(reason)s."
-msgstr ""
-
-#: neutron/common/exceptions.py:549
-msgid "IPtables conntrack zones exhausted, iptables rules cannot be applied."
-msgstr ""
-
-#: neutron/common/ipv6_utils.py:36
-msgid "Unable to generate IP address by EUI64 for IPv4 prefix"
-msgstr ""
-
-#: neutron/common/ipv6_utils.py:43
-#, python-format
-msgid ""
-"Bad prefix or mac format for generating IPv6 address by EUI-64: "
-"%(prefix)s, %(mac)s:"
-msgstr ""
-
-#: neutron/common/ipv6_utils.py:47
-#, python-format
-msgid "Bad prefix type for generate IPv6 address by EUI-64: %s"
-msgstr ""
-
-#: neutron/common/utils.py:223
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py:37
-#, python-format
-msgid "Invalid mapping: '%s'"
-msgstr ""
-
-#: neutron/common/utils.py:226
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py:40
-#, python-format
-msgid "Missing key in mapping: '%s'"
-msgstr ""
-
-#: neutron/common/utils.py:229
-#, python-format
-msgid "Missing value in mapping: '%s'"
-msgstr ""
-
-#: neutron/common/utils.py:231
-#, python-format
-msgid "Key %(key)s in mapping: '%(mapping)s' not unique"
-msgstr ""
-
-#: neutron/common/utils.py:234
-#, python-format
-msgid "Value %(value)s in mapping: '%(mapping)s' not unique"
-msgstr ""
-
-#: neutron/common/utils.py:442
-msgid "Illegal IP version number"
-msgstr ""
-
-#: neutron/common/utils.py:510 neutron/common/utils.py:525
-msgid "Class not found."
-msgstr ""
-
-#: neutron/db/address_scope_db.py:95
-msgid "Shared address scope can't be unshared"
-msgstr ""
-
-#: neutron/db/agents_db.py:44
-msgid ""
-"Seconds to regard the agent is down; should be at least twice "
-"report_interval, to be sure the agent is down for good."
-msgstr ""
-
-#: neutron/db/agents_db.py:49
-msgid ""
-"Representing the resource type whose load is being reported by the agent."
-" This can be \"networks\", \"subnets\" or \"ports\". When specified "
-"(Default is networks), the server will extract particular load sent as "
-"part of its agent configuration object from the agent report state, which"
-" is the number of resources being consumed, at every "
-"report_interval.dhcp_load_type can be used in combination with "
-"network_scheduler_driver = "
-"neutron.scheduler.dhcp_agent_scheduler.WeightScheduler When the "
-"network_scheduler_driver is WeightScheduler, dhcp_load_type can be "
-"configured to represent the choice for the resource being balanced. "
-"Example: dhcp_load_type=networks"
-msgstr ""
-
-#: neutron/db/agents_db.py:65
-msgid ""
-"Agent starts with admin_state_up=False when enable_new_agents=False. In "
-"the case, user's resources will not be scheduled automatically to the "
-"agent until admin changes admin_state_up to True."
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:45
-msgid "Driver to use for scheduling network to DHCP agent"
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:47
-msgid "Allow auto scheduling networks to DHCP agent."
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:49
-msgid "Automatically remove networks from offline DHCP agents."
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:52
-msgid ""
-"Number of DHCP agents scheduled to host a tenant network. If this number "
-"is greater than 1, the scheduler automatically assigns multiple DHCP "
-"agents for a given tenant network, providing high availability for DHCP "
-"service."
-msgstr ""
-
-#: neutron/db/agentschedulers_db.py:59
-msgid ""
-"Enable services on an agent with admin_state_up False. If this option is "
-"False, when admin_state_up of an agent is turned False, services on it "
-"will be disabled. Agents with admin_state_up False are not selected for "
-"automatic scheduling regardless of this option. But manual scheduling to "
-"such agents is available if this option is True."
-msgstr ""
-
-#: neutron/db/common_db_mixin.py:177
-msgid "Cannot create resource for another tenant"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:136
-msgid "Only admins can manipulate policies on networks they do not own."
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:195 neutron/db/db_base_plugin_v2.py:199
-#, python-format
-msgid "Invalid route: %s"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:251
-#, python-format
-msgid ""
-"Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address "
-"format, which requires the prefix to be /64."
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:259
-#, python-format
-msgid ""
-"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to "
-"'%(addr_mode)s' is not valid. If both attributes are set, they must be "
-"the same value"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:267
-msgid ""
-"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set "
-"to False."
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:273
-msgid "Cannot disable enable_dhcp with ipv6 attributes set"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:416
-#, python-format
-msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:443
-msgid "Subnet has a prefix length that is incompatible with DHCP service enabled."
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:451
-msgid "Multicast IP subnet is not supported if enable_dhcp is True."
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:455
-msgid "Loopback IP subnet is not supported if enable_dhcp is True."
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:464
-msgid "Gateway is not valid on subnet"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:484 neutron/db/db_base_plugin_v2.py:498
-msgid "new subnet"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:491
-#, python-format
-msgid "Error parsing dns address %s"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:507
-msgid "ipv6_ra_mode is not valid when ip_version is 4"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:511
-msgid "ipv6_address_mode is not valid when ip_version is 4"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:519
-msgid "Prefix Delegation can only be used with IPv6 subnets."
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:529
-msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation."
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:535
-msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation."
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:614
-msgid "ip_version must be specified in the absence of cidr and subnetpool_id"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:641
-msgid "cidr and prefixlen must not be supplied together"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:666
-msgid "A cidr must be specified in the absence of a subnet pool"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:927
-#, python-format
-msgid ""
-"subnetpool %(subnetpool_id)s cannot be updated when associated with "
-"shared address scope %(address_scope_id)s"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:941
-msgid ""
-"A default subnetpool for this IP family has already been set. Only one "
-"default may exist per IP family"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:998
-msgid "Existing prefixes must be a subset of the new prefixes"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:1083
-msgid "Subnet pool has existing allocations"
-msgstr ""
-
-#: neutron/db/db_base_plugin_v2.py:1091
-msgid "mac address update"
-msgstr ""
-
-#: neutron/db/dvr_mac_db.py:38
-msgid ""
-"The base mac address used for unique DVR instances by Neutron. The first "
-"3 octets will remain unchanged. If the 4th octet is not 00, it will also "
-"be used. The others will be randomly generated. The 'dvr_base_mac' *must*"
-" be different from 'base_mac' to avoid mixing them up with MAC's "
-"allocated for tenant ports. A 4 octet example would be dvr_base_mac = "
-"fa:16:3f:4f:00:00. The default is 3 octet"
-msgstr ""
-
-#: neutron/db/extraroute_db.py:37
-msgid "Maximum number of routes per router"
-msgstr ""
-
-#: neutron/db/extraroute_db.py:92
-msgid "the nexthop is not connected with router"
-msgstr ""
-
-#: neutron/db/extraroute_db.py:97
-msgid "the nexthop is used by router"
-msgstr ""
-
-#: neutron/db/ipam_backend_mixin.py:83
-msgid "allocation_pools allowed only for specific subnet requests."
-msgstr ""
-
-#: neutron/db/ipam_backend_mixin.py:94
-#, python-format
-msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool"
-msgstr ""
-
-#: neutron/db/ipam_backend_mixin.py:214
-msgid "0 is not allowed as CIDR prefix length"
-msgstr ""
-
-#: neutron/db/ipam_backend_mixin.py:225
-#, python-format
-msgid ""
-"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps"
-" with another subnet"
-msgstr ""
-
-#: neutron/db/ipam_backend_mixin.py:306
-msgid "Exceeded maximum amount of fixed ips per port."
-msgstr ""
-
-#: neutron/db/ipam_backend_mixin.py:313
-#, python-format
-msgid ""
-"Failed to create port on network %(network_id)s, because fixed_ips "
-"included invalid subnet %(subnet_id)s"
-msgstr ""
-
-#: neutron/db/ipam_backend_mixin.py:327
-msgid "IP allocation requires subnet_id or ip_address"
-msgstr ""
-
-#: neutron/db/ipam_non_pluggable_backend.py:260
-#: neutron/db/ipam_pluggable_backend.py:249
-#, python-format
-msgid ""
-"IPv6 address %(address)s can not be directly assigned to a port on subnet"
-" %(id)s since the subnet is configured for automatic addresses"
-msgstr ""
-
-#: neutron/db/l3_agentschedulers_db.py:49
-msgid "Driver to use for scheduling router to a default L3 agent"
-msgstr ""
-
-#: neutron/db/l3_agentschedulers_db.py:52
-msgid "Allow auto scheduling of routers to L3 agent."
-msgstr ""
-
-#: neutron/db/l3_agentschedulers_db.py:54
-msgid ""
-"Automatically reschedule routers from offline L3 agents to online L3 "
-"agents."
-msgstr ""
-
-#: neutron/db/l3_db.py:284
-#, python-format
-msgid "No eligible l3 agent associated with external network %s found"
-msgstr ""
-
-#: neutron/db/l3_db.py:322
-#, python-format
-msgid "Network %s is not an external network"
-msgstr ""
-
-#: neutron/db/l3_db.py:332
-#, python-format
-msgid "External IP %s is the same as the gateway IP"
-msgstr ""
-
-#: neutron/db/l3_db.py:482
-#, python-format
-msgid "Router already has a port on subnet %s"
-msgstr ""
-
-#: neutron/db/l3_db.py:499
-#, python-format
-msgid ""
-"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s "
-"of subnet %(sub_id)s"
-msgstr ""
-
-#: neutron/db/l3_db.py:515
-msgid "Either subnet_id or port_id must be specified"
-msgstr ""
-
-#: neutron/db/l3_db.py:519
-msgid "Cannot specify both subnet-id and port-id"
-msgstr ""
-
-#: neutron/db/l3_db.py:530
-msgid "Router port must have at least one fixed IP"
-msgstr ""
-
-#: neutron/db/l3_db.py:552
-#, python-format
-msgid ""
-"Cannot have multiple router ports with the same network id if both "
-"contain IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network "
-"id %(nid)s"
-msgstr ""
-
-#: neutron/db/l3_db.py:573
-msgid "Cannot have multiple IPv4 subnets on router port"
-msgstr ""
-
-#: neutron/db/l3_db.py:592
-msgid "Subnet for router interface must have a gateway IP"
-msgstr ""
-
-#: neutron/db/l3_db.py:596
-#, python-format
-msgid ""
-"IPv6 subnet %s configured to receive RAs from an external router cannot "
-"be added to Neutron Router."
-msgstr ""
-
-#: neutron/db/l3_db.py:807
-#, python-format
-msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip"
-msgstr ""
-
-#: neutron/db/l3_db.py:852
-#, python-format
-msgid ""
-"Port %(port_id)s is associated with a different tenant than Floating IP "
-"%(floatingip_id)s and therefore cannot be bound."
-msgstr ""
-
-#: neutron/db/l3_db.py:856
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to Port %s, since that port is "
-"owned by a different tenant."
-msgstr ""
-
-#: neutron/db/l3_db.py:868
-#, python-format
-msgid ""
-"Floating IP %(floatingip_id)s is associated with non-IPv4 address "
-"%s(internal_ip)s and therefore cannot be bound."
-msgstr ""
-
-#: neutron/db/l3_db.py:872
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to %s, since that is not an IPv4 "
-"address."
-msgstr ""
-
-#: neutron/db/l3_db.py:880
-#, python-format
-msgid "Port %(id)s does not have fixed ip %(address)s"
-msgstr ""
-
-#: neutron/db/l3_db.py:887
-#, python-format
-msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses"
-msgstr ""
-
-#: neutron/db/l3_db.py:891
-#, python-format
-msgid ""
-"Port %s has multiple fixed IPv4 addresses.  Must provide a specific IPv4 "
-"address when assigning a floating IP"
-msgstr ""
-
-#: neutron/db/l3_db.py:920
-msgid "fixed_ip_address cannot be specified without a port_id"
-msgstr ""
-
-#: neutron/db/l3_db.py:964
-#, python-format
-msgid "Network %s is not a valid external network"
-msgstr ""
-
-#: neutron/db/l3_db.py:968
-#, python-format
-msgid "Network %s does not contain any IPv4 subnet"
-msgstr ""
-
-#: neutron/db/l3_db.py:1120
-#, python-format
-msgid "has device owner %s"
-msgstr ""
-
-#: neutron/db/l3_dvr_db.py:45
-msgid ""
-"System-wide flag to determine the type of router that tenants can create."
-" Only admin can override."
-msgstr ""
-
-#: neutron/db/l3_dvr_db.py:85
-msgid "Migration from distributed router to centralized is not supported"
-msgstr ""
-
-#: neutron/db/l3_dvr_db.py:91
-msgid ""
-"Cannot upgrade active router to distributed. Please set router "
-"admin_state_up to False prior to upgrade."
-msgstr ""
-
-#: neutron/db/l3_dvr_db.py:617
-msgid "Unable to create the Agent Gateway Port"
-msgstr ""
-
-#: neutron/db/l3_dvr_db.py:648
-msgid "Unable to create the SNAT Interface Port"
-msgstr ""
-
-#: neutron/db/l3_gwmode_db.py:28
-msgid ""
-"Define the default value of enable_snat if not provided in "
-"external_gateway_info."
-msgstr ""
-
-#: neutron/db/l3_hamode_db.py:51
-msgid "Enable HA mode for virtual routers."
-msgstr ""
-
-#: neutron/db/l3_hamode_db.py:54
-msgid ""
-"Maximum number of L3 agents which a HA router will be scheduled on. If it"
-" is set to 0 then the router will be scheduled on every agent."
-msgstr ""
-
-#: neutron/db/l3_hamode_db.py:59
-msgid ""
-"Minimum number of L3 agents which a HA router will be scheduled on. If it"
-" is set to 0 then the router will be scheduled on every agent."
-msgstr ""
-
-#: neutron/db/l3_hamode_db.py:64
-msgid "Subnet used for the l3 HA admin network."
-msgstr ""
-
-#: neutron/db/l3_hamode_db.py:66
-msgid ""
-"The network type to use when creating the HA network for an HA router. By"
-" default or if empty, the first 'tenant_network_types' is used. This is "
-"helpful when the VRRP traffic should use a specific network which is not "
-"the default one."
-msgstr ""
-
-#: neutron/db/l3_hamode_db.py:72
-msgid "The physical network name with which the HA network can be created."
-msgstr ""
-
-#: neutron/db/l3_hamode_db.py:438
-msgid ""
-"Cannot change HA attribute of active routers. Please set router "
-"admin_state_up to False prior to upgrade."
-msgstr ""
-
-#: neutron/db/rbac_db_models.py:28
-#, python-format
-msgid ""
-"Invalid action '%(action)s' for object type '%(object_type)s'. Valid "
-"actions: %(valid_actions)s"
-msgstr ""
-
-#: neutron/db/securitygroups_db.py:274 neutron/db/securitygroups_db.py:624
-#, python-format
-msgid "cannot be deleted due to %s"
-msgstr ""
-
-#: neutron/db/securitygroups_db.py:689
-msgid "Default security group"
-msgstr ""
-
-#: neutron/db/securitygroups_rpc_base.py:58
-#, python-format
-msgid "%s must implement get_port_from_device or get_ports_from_devices."
-msgstr ""
-
-#: neutron/db/sqlalchemyutils.py:70
-#, python-format
-msgid "%s is invalid attribute for sort_key"
-msgstr ""
-
-#: neutron/db/sqlalchemyutils.py:73
-#, python-format
-msgid ""
-"The attribute '%(attr)s' is reference to other resource, can't used by "
-"sort '%(resource)s'"
-msgstr ""
-
-#: neutron/db/migration/__init__.py:62
-#, python-format
-msgid "%s cannot be called while in offline mode"
-msgstr ""
-
-#: neutron/db/migration/cli.py:69
-msgid "Neutron plugin provider module"
-msgstr ""
-
-#: neutron/db/migration/cli.py:73
-msgid ""
-"(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced "
-"service to execute the command against."
-msgstr ""
-
-#: neutron/db/migration/cli.py:79
-#, python-format
-msgid "The subproject to execute the command against. Can be one of: '%s'."
-msgstr ""
-
-#: neutron/db/migration/cli.py:84
-msgid "Enforce using split branches file structure."
-msgstr ""
-
-#: neutron/db/migration/cli.py:90
-msgid "Neutron quota driver class"
-msgstr ""
-
-#: neutron/db/migration/cli.py:99
-msgid "URL to database"
-msgstr ""
-
-#: neutron/db/migration/cli.py:102
-msgid ""
-"Database engine for which script will be generated when using offline "
-"migration."
-msgstr ""
-
-#: neutron/db/migration/cli.py:119
-#, python-format
-msgid "Running %(cmd)s (%(desc)s) for %(project)s ..."
-msgstr ""
-
-#: neutron/db/migration/cli.py:122
-#, python-format
-msgid "Running %(cmd)s for %(project)s ..."
-msgstr ""
-
-#: neutron/db/migration/cli.py:128
-msgid "OK"
-msgstr ""
-
-#: neutron/db/migration/cli.py:133
-#, python-format
-msgid "Sub-project %s not installed."
-msgstr ""
-
-#: neutron/db/migration/cli.py:175
-msgid "Phase upgrade options do not accept revision specification"
-msgstr ""
-
-#: neutron/db/migration/cli.py:187
-msgid "You must provide a revision or relative delta"
-msgstr ""
-
-#: neutron/db/migration/cli.py:192
-msgid "Negative relative revision (downgrade) not supported"
-msgstr ""
-
-#: neutron/db/migration/cli.py:198
-msgid "Use either --delta or relative revision, not both"
-msgstr ""
-
-#: neutron/db/migration/cli.py:201
-msgid "Negative delta (downgrade) not supported"
-msgstr ""
-
-#: neutron/db/migration/cli.py:222
-msgid "Downgrade no longer supported"
-msgstr ""
-
-#: neutron/db/migration/cli.py:283
-#, python-format
-msgid ""
-"Release aware branch labels (%s) are deprecated. Please switch to expand@"
-" and contract@ labels."
-msgstr ""
-
-#: neutron/db/migration/cli.py:290
-#, python-format
-msgid "Unexpected label for script %(script_name)s: %(labels)s"
-msgstr ""
-
-#: neutron/db/migration/cli.py:332
-#, python-format
-msgid "Unexpected number of alembic branch points: %(branchpoints)s"
-msgstr ""
-
-#: neutron/db/migration/cli.py:371
-#, python-format
-msgid "HEAD file does not match migration timeline head, expected: %s"
-msgstr ""
-
-#: neutron/db/migration/cli.py:396
-#, python-format
-msgid ""
-"%(branch)s HEAD file does not match migration timeline head, expected: "
-"%(head)s"
-msgstr ""
-
-#: neutron/db/migration/cli.py:406
-msgid "Repository does not contain HEAD files for contract and expand branches."
-msgstr ""
-
-#: neutron/db/migration/cli.py:483
-msgid "Available commands"
-msgstr ""
-
-#: neutron/db/migration/cli.py:499
-#, python-format
-msgid "Failed to locate source for %s."
-msgstr ""
-
-#: neutron/db/migration/cli.py:592
-#, python-format
-msgid "Package %s not installed"
-msgstr ""
-
-#: neutron/db/migration/cli.py:681
-msgid "Cannot specify both --service and --subproject."
-msgstr ""
-
-#: neutron/db/migration/alembic_migrations/versions/14be42f3d0a5_default_sec_group_table.py:46
-#, python-format
-msgid ""
-"Some tenants have more than one security group named 'default': "
-"%(duplicates)s. All duplicate 'default' security groups must be resolved "
-"before upgrading the database."
-msgstr ""
-
-#: neutron/debug/commands.py:33
-msgid "Unimplemented commands"
-msgstr ""
-
-#: neutron/debug/commands.py:45
-msgid "ID of network to probe"
-msgstr ""
-
-#: neutron/debug/commands.py:49
-msgid "Owner type of the device: network/compute"
-msgstr ""
-
-#: neutron/debug/commands.py:57
-#, python-format
-msgid "Probe created : %s "
-msgstr ""
-
-#: neutron/debug/commands.py:69
-msgid "ID of probe port to delete"
-msgstr ""
-
-#: neutron/debug/commands.py:76
-#, python-format
-msgid "Probe %s deleted"
-msgstr ""
-
-#: neutron/debug/commands.py:119
-msgid "ID of probe port to execute command"
-msgstr ""
-
-#: neutron/debug/commands.py:124
-msgid "Command to execute"
-msgstr ""
-
-#: neutron/debug/commands.py:144
-msgid "Ping timeout"
-msgstr ""
-
-#: neutron/debug/commands.py:148
-msgid "ID of network"
-msgstr ""
-
-#: neutron/debug/shell.py:63
-msgid "Config file for interface driver (You may also use l3_agent.ini)"
-msgstr ""
-
-#: neutron/debug/shell.py:71
-msgid ""
-"You must provide a config file for bridge - either --config-file or "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-msgstr ""
-
-#: neutron/extensions/address_scope.py:70
-#, python-format
-msgid "Address scope %(address_scope_id)s could not be found"
-msgstr ""
-
-#: neutron/extensions/address_scope.py:74
-#, python-format
-msgid ""
-"Unable to complete operation on address scope %(address_scope_id)s. There"
-" are one or more subnet pools in use on the address scope"
-msgstr ""
-
-#: neutron/extensions/address_scope.py:80
-#, python-format
-msgid "Unable to update address scope %(address_scope_id)s : %(reason)s"
-msgstr ""
-
-#: neutron/extensions/agent.py:63
-#, python-format
-msgid "Agent %(id)s could not be found"
-msgstr ""
-
-#: neutron/extensions/agent.py:67
-#, python-format
-msgid "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found"
-msgstr ""
-
-#: neutron/extensions/agent.py:72
-#, python-format
-msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found"
-msgstr ""
-
-#: neutron/extensions/allowedaddresspairs.py:26
-msgid "Maximum number of allowed address pairs"
-msgstr ""
-
-#: neutron/extensions/allowedaddresspairs.py:33
-msgid "AllowedAddressPair must contain ip_address"
-msgstr ""
-
-#: neutron/extensions/allowedaddresspairs.py:37
-msgid ""
-"Port Security must be enabled in order to have allowed address pairs on a"
-" port."
-msgstr ""
-
-#: neutron/extensions/allowedaddresspairs.py:42
-#, python-format
-msgid ""
-"Request contains duplicate address pair: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-msgstr ""
-
-#: neutron/extensions/allowedaddresspairs.py:47
-#, python-format
-msgid "The number of allowed address pair exceeds the maximum %(quota)s."
-msgstr ""
-
-#: neutron/extensions/allowedaddresspairs.py:59
-msgid "Allowed address pairs must be a list."
-msgstr ""
-
-#: neutron/extensions/availability_zone.py:46
-msgid "Too many availability_zone_hints specified"
-msgstr ""
-
-#: neutron/extensions/availability_zone.py:78
-#, python-format
-msgid "AvailabilityZone %(availability_zone)s could not be found."
-msgstr ""
-
-#: neutron/extensions/dhcpagentscheduler.py:124
-#, python-format
-msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled"
-msgstr ""
-
-#: neutron/extensions/dhcpagentscheduler.py:128
-#, python-format
-msgid ""
-"The network %(network_id)s has been already hosted by the DHCP Agent "
-"%(agent_id)s."
-msgstr ""
-
-#: neutron/extensions/dhcpagentscheduler.py:133
-#, python-format
-msgid "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s."
-msgstr ""
-
-#: neutron/extensions/dns.py:55
-#, python-format
-msgid "'%s' exceeds the 255 character FQDN limit"
-msgstr ""
-
-#: neutron/extensions/dns.py:59
-msgid "Encountered an empty component."
-msgstr ""
-
-#: neutron/extensions/dns.py:62
-#, python-format
-msgid "Name '%s' must not start or end with a hyphen."
-msgstr ""
-
-#: neutron/extensions/dns.py:65
-#, python-format
-msgid ""
-"Name '%s' must be 1-63 characters long, each of which can only be "
-"alphanumeric or a hyphen."
-msgstr ""
-
-#: neutron/extensions/dns.py:70
-#, python-format
-msgid "TLD '%s' must not be all numeric"
-msgstr ""
-
-#: neutron/extensions/dns.py:72
-#, python-format
-msgid "'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s"
-msgstr ""
-
-#: neutron/extensions/dns.py:88
-#, python-format
-msgid ""
-"The dns_name passed is a PQDN and its size is '%(dns_name_len)s'. The "
-"dns_domain option in neutron.conf is set to %(dns_domain)s, with a length"
-" of '%(higher_labels_len)s'. When the two are concatenated to form a FQDN"
-" (with a '.' at the end), the resulting length exceeds the maximum size "
-"of '%(fqdn_max_len)s'"
-msgstr ""
-
-#: neutron/extensions/dns.py:105
-#, python-format
-msgid ""
-"The dns_name passed is a FQDN. Its higher level labels must be equal to "
-"the dns_domain option in neutron.conf, that has been set to "
-"'%(dns_domain)s'. It must also include one or more valid DNS labels to "
-"the left of '%(dns_domain)s'"
-msgstr ""
-
-#: neutron/extensions/dns.py:132
-#, python-format
-msgid "'%s' cannot be converted to lowercase string"
-msgstr ""
-
-#: neutron/extensions/dvr.py:39
-#, python-format
-msgid "Distributed Virtual Router Mac Address for host %(host)s does not exist."
-msgstr ""
-
-#: neutron/extensions/dvr.py:44
-#, python-format
-msgid "Unable to generate unique DVR mac for host %(host)s."
-msgstr ""
-
-#: neutron/extensions/external_net.py:23
-#, python-format
-msgid ""
-"External network %(net_id)s cannot be updated to be made non-external, "
-"since it has existing gateway ports"
-msgstr ""
-
-#: neutron/extensions/external_net.py:51
-msgid "Adds external network attribute to network resource."
-msgstr ""
-
-#: neutron/extensions/extra_dhcp_opt.py:24
-#, python-format
-msgid "ExtraDhcpOpt %(id)s could not be found"
-msgstr ""
-
-#: neutron/extensions/extra_dhcp_opt.py:28
-#, python-format
-msgid "Invalid data format for extra-dhcp-opt: %(data)s"
-msgstr ""
-
-#: neutron/extensions/extraroute.py:24
-#, python-format
-msgid "Invalid format for routes: %(routes)s, %(reason)s"
-msgstr ""
-
-#: neutron/extensions/extraroute.py:28
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot "
-"be deleted, as it is required by one or more routes."
-msgstr ""
-
-#: neutron/extensions/extraroute.py:34
-#, python-format
-msgid ""
-"Unable to complete operation for %(router_id)s. The number of routes "
-"exceeds the maximum %(quota)s."
-msgstr ""
-
-#: neutron/extensions/flavors.py:27
-#, python-format
-msgid "Flavor %(flavor_id)s could not be found."
-msgstr ""
-
-#: neutron/extensions/flavors.py:31
-#, python-format
-msgid "Flavor %(flavor_id)s is used by some service instance."
-msgstr ""
-
-#: neutron/extensions/flavors.py:35
-#, python-format
-msgid "Service Profile %(sp_id)s could not be found."
-msgstr ""
-
-#: neutron/extensions/flavors.py:39
-#, python-format
-msgid "Service Profile %(sp_id)s is used by some service instance."
-msgstr ""
-
-#: neutron/extensions/flavors.py:43
-#, python-format
-msgid "Service Profile %(sp_id)s is already associated with flavor %(fl_id)s."
-msgstr ""
-
-#: neutron/extensions/flavors.py:48
-#, python-format
-msgid "Service Profile %(sp_id)s is not associated with flavor %(fl_id)s."
-msgstr ""
-
-#: neutron/extensions/flavors.py:53
-#, python-format
-msgid "Service Profile driver %(driver)s could not be found."
-msgstr ""
-
-#: neutron/extensions/flavors.py:57
-msgid "Service Profile needs either a driver or metainfo."
-msgstr ""
-
-#: neutron/extensions/flavors.py:61
-msgid "Flavor is not enabled."
-msgstr ""
-
-#: neutron/extensions/flavors.py:65
-msgid "Service Profile is not enabled."
-msgstr ""
-
-#: neutron/extensions/flavors.py:69
-#, python-format
-msgid "Invalid service type %(service_type)s."
-msgstr ""
-
-#: neutron/extensions/l3.py:30
-#, python-format
-msgid "Router %(router_id)s could not be found"
-msgstr ""
-
-#: neutron/extensions/l3.py:34
-#, python-format
-msgid "Router %(router_id)s %(reason)s"
-msgstr ""
-
-#: neutron/extensions/l3.py:43
-#, python-format
-msgid "Router %(router_id)s does not have an interface with id %(port_id)s"
-msgstr ""
-
-#: neutron/extensions/l3.py:48
-#, python-format
-msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s"
-msgstr ""
-
-#: neutron/extensions/l3.py:53
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot "
-"be deleted, as it is required by one or more floating IPs."
-msgstr ""
-
-#: neutron/extensions/l3.py:59
-#, python-format
-msgid "Floating IP %(floatingip_id)s could not be found"
-msgstr ""
-
-#: neutron/extensions/l3.py:63
-#, python-format
-msgid ""
-"External network %(external_network_id)s is not reachable from subnet "
-"%(subnet_id)s.  Therefore, cannot associate Port %(port_id)s with a "
-"Floating IP."
-msgstr ""
-
-#: neutron/extensions/l3.py:69
-#, python-format
-msgid ""
-"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with "
-"port %(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already "
-"has a floating IP on external network %(net_id)s."
-msgstr ""
-
-#: neutron/extensions/l3.py:76
-#, python-format
-msgid ""
-"Gateway cannot be updated for router %(router_id)s, since a gateway to "
-"external network %(net_id)s is required by one or more floating IPs."
-msgstr ""
-
-#: neutron/extensions/l3.py:158
-msgid "Number of routers allowed per tenant. A negative value means unlimited."
-msgstr ""
-
-#: neutron/extensions/l3.py:162
-msgid ""
-"Number of floating IPs allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-
-#: neutron/extensions/l3_ext_ha_mode.py:34
-msgid "Currently distributed HA routers are not supported."
-msgstr ""
-
-#: neutron/extensions/l3_ext_ha_mode.py:39
-#, python-format
-msgid ""
-"Failed to allocate a VRID in the network %(network_id)s for the router "
-"%(router_id)s after %(max_tries)s tries."
-msgstr ""
-
-#: neutron/extensions/l3_ext_ha_mode.py:44
-#, python-format
-msgid ""
-"No more Virtual Router Identifier (VRID) available when creating router "
-"%(router_id)s. The limit of number of HA Routers per tenant is 254."
-msgstr ""
-
-#: neutron/extensions/l3_ext_ha_mode.py:50
-#, python-format
-msgid ""
-"The HA Network CIDR specified in the configuration file isn't valid; "
-"%(cidr)s."
-msgstr ""
-
-#: neutron/extensions/l3_ext_ha_mode.py:55
-#, python-format
-msgid ""
-"Not enough l3 agents available to ensure HA. Minimum required "
-"%(min_agents)s, available %(num_agents)s."
-msgstr ""
-
-#: neutron/extensions/l3_ext_ha_mode.py:60
-#, python-format
-msgid ""
-"max_l3_agents_per_router %(max_agents)s config parameter is not valid. It"
-" has to be greater than or equal to min_l3_agents_per_router "
-"%(min_agents)s."
-msgstr ""
-
-#: neutron/extensions/l3_ext_ha_mode.py:66
-#, python-format
-msgid ""
-"min_l3_agents_per_router config parameter is not valid. It has to be "
-"equal to or more than %s for HA."
-msgstr ""
-
-#: neutron/extensions/l3agentscheduler.py:153
-#, python-format
-msgid "Agent %(id)s is not a L3 Agent or has been disabled"
-msgstr ""
-
-#: neutron/extensions/l3agentscheduler.py:157
-#, python-format
-msgid ""
-"The router %(router_id)s has been already hosted by the L3 Agent "
-"%(agent_id)s."
-msgstr ""
-
-#: neutron/extensions/l3agentscheduler.py:162
-#, python-format
-msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s."
-msgstr ""
-
-#: neutron/extensions/l3agentscheduler.py:167
-#, python-format
-msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found."
-msgstr ""
-
-#: neutron/extensions/l3agentscheduler.py:172
-#, python-format
-msgid ""
-"Cannot host %(router_type)s router %(router_id)s on %(agent_mode)s L3 "
-"agent %(agent_id)s."
-msgstr ""
-
-#: neutron/extensions/l3agentscheduler.py:177
-#, python-format
-msgid ""
-"Not allowed to manually assign a %(router_type)s router %(router_id)s "
-"from an existing DVR node to another L3 agent %(agent_id)s."
-msgstr ""
-
-#: neutron/extensions/metering.py:29
-#, python-format
-msgid "Metering label %(label_id)s does not exist"
-msgstr ""
-
-#: neutron/extensions/metering.py:33
-msgid "Duplicate Metering Rule in POST."
-msgstr ""
-
-#: neutron/extensions/metering.py:37
-#, python-format
-msgid "Metering label rule %(rule_id)s does not exist"
-msgstr ""
-
-#: neutron/extensions/metering.py:41
-#, python-format
-msgid ""
-"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps "
-"another"
-msgstr ""
-
-#: neutron/extensions/multiprovidernet.py:28
-msgid "Segments and provider values cannot both be set."
-msgstr ""
-
-#: neutron/extensions/multiprovidernet.py:32
-msgid "Duplicate segment entry in request."
-msgstr ""
-
-#: neutron/extensions/portsecurity.py:22
-msgid ""
-"Port has security group associated. Cannot disable port security or ip "
-"address until security group is removed"
-msgstr ""
-
-#: neutron/extensions/portsecurity.py:27
-msgid ""
-"Port security must be enabled and port must have an IP address in order "
-"to use security groups."
-msgstr ""
-
-#: neutron/extensions/portsecurity.py:32
-msgid "Port does not have port security binding."
-msgstr ""
-
-#: neutron/extensions/providernet.py:60
-msgid "Plugin does not support updating provider attributes"
-msgstr ""
-
-#: neutron/extensions/quotasv2.py:70
-msgid "POST requests are not supported on this resource."
-msgstr ""
-
-#: neutron/extensions/quotasv2.py:90
-msgid "Only admin is authorized to access quotas for another tenant"
-msgstr ""
-
-#: neutron/extensions/quotasv2.py:95
-msgid "Only admin can view or configure quota"
-msgstr ""
-
-#: neutron/extensions/rbac.py:28
-#, python-format
-msgid "RBAC policy of type %(object_type)s with ID %(id)s not found"
-msgstr ""
-
-#: neutron/extensions/rbac.py:32
-#, python-format
-msgid ""
-"RBAC policy on object %(object_id)s cannot be removed because other "
-"objects depend on it.\n"
-"Details: %(details)s"
-msgstr ""
-
-#: neutron/extensions/rbac.py:40
-#, python-format
-msgid "'%s' is not a valid RBAC object type"
-msgstr ""
-
-#: neutron/extensions/rbac.py:76
-msgid ""
-"Default number of RBAC entries allowed per tenant. A negative value means"
-" unlimited."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:35
-msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max"
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:40
-#, python-format
-msgid "Invalid value for port %(port)s"
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:44
-#, python-format
-msgid ""
-"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to "
-"255."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:49
-#, python-format
-msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:54
-#, python-format
-msgid ""
-"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-"
-"range-min) is missing."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:59
-#, python-format
-msgid "Security Group %(id)s %(reason)s."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:63
-#: neutron/extensions/securitygroup.py:125
-msgid "in use"
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:68
-msgid "Insufficient rights for removing default security group."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:72
-msgid "Updating default security group not allowed."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:76
-msgid "Default security group already exists."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:80
-#, python-format
-msgid ""
-"Security group rule protocol %(protocol)s not supported. Only protocol "
-"values %(values)s and integer representations [0 to 255] are supported."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:86
-msgid "Multiple tenant_ids in bulk security group rule create not allowed"
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:91
-msgid "Only remote_ip_prefix or remote_group_id may be provided."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:96
-msgid "Must also specifiy protocol if port range is given."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:100
-msgid "Only allowed to update rules for one security profile at a time"
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:105
-#, python-format
-msgid "Security group %(id)s does not exist"
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:109
-#, python-format
-msgid "Security group rule %(id)s does not exist"
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:113
-msgid "Duplicate Security Group Rule in POST."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:117
-#, python-format
-msgid "Security group rule already exists. Rule id is %(id)s."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:121
-#, python-format
-msgid "Security Group Rule %(id)s %(reason)s."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:130
-#, python-format
-msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s"
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:134
-#, python-format
-msgid "Error %(reason)s while attempting the operation."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:138
-#, python-format
-msgid ""
-"Security group rule for ethertype '%(ethertype)s' not supported. Allowed "
-"values are %(values)s."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:192
-#, python-format
-msgid "'%s' is not an integer or uuid"
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:284
-msgid ""
-"Number of security groups allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-
-#: neutron/extensions/securitygroup.py:288
-msgid ""
-"Number of security rules allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-
-#: neutron/extensions/servicetype.py:48
-msgid "Neutron Service Type Management"
-msgstr ""
-
-#: neutron/extensions/servicetype.py:56
-msgid "API for retrieving service providers for Neutron advanced services"
-msgstr ""
-
-#: neutron/extensions/vlantransparent.py:28
-msgid "Backend does not support VLAN Transparency."
-msgstr ""
-
-#: neutron/ipam/exceptions.py:21
-#, python-format
-msgid "Cannot handle subnet of type %(subnet_type)s"
-msgstr ""
-
-#: neutron/ipam/exceptions.py:25
-#, python-format
-msgid "Unable to calculate %(address_type)s address because of:%(reason)s"
-msgstr ""
-
-#: neutron/ipam/exceptions.py:30
-#, python-format
-msgid "Unknown address type %(address_type)s"
-msgstr ""
-
-#: neutron/ipam/exceptions.py:34
-#, python-format
-msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s"
-msgstr ""
-
-#: neutron/ipam/exceptions.py:39
-#, python-format
-msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s"
-msgstr ""
-
-#: neutron/ipam/exceptions.py:43
-#, python-format
-msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s"
-msgstr ""
-
-#: neutron/ipam/exceptions.py:47
-#, python-format
-msgid "The address allocation request could not be satisfied because: %(reason)s"
-msgstr ""
-
-#: neutron/ipam/exceptions.py:52
-#, python-format
-msgid "The subnet request could not be satisfied because: %(reason)s"
-msgstr ""
-
-#: neutron/ipam/exceptions.py:57
-#, python-format
-msgid ""
-"IPv6 address %(ip)s cannot be directly assigned to a port on subnet "
-"%(subnet_id)s as the subnet is configured for automatic addresses"
-msgstr ""
-
-#: neutron/ipam/exceptions.py:63
-#, python-format
-msgid "No more IP addresses available for subnet %(subnet_id)s."
-msgstr ""
-
-#: neutron/ipam/exceptions.py:67
-msgid "IP allocation failed. Try again later."
-msgstr ""
-
-#: neutron/ipam/exceptions.py:71
-msgid "New value for first_ip or last_ip has to be specified."
-msgstr ""
-
-#: neutron/ipam/requests.py:70
-msgid "Ranges must be netaddr.IPRange"
-msgstr ""
-
-#: neutron/ipam/requests.py:72
-msgid "Ranges must not overlap"
-msgstr ""
-
-#: neutron/ipam/requests.py:82
-msgid "Ranges must be in the same IP version"
-msgstr ""
-
-#: neutron/ipam/requests.py:88
-msgid "Gateway IP version inconsistent with allocation pool version"
-msgstr ""
-
-#: neutron/ipam/requests.py:113
-msgid "gateway_ip is not in the subnet"
-msgstr ""
-
-#: neutron/ipam/requests.py:117
-msgid "allocation_pools use the wrong ip version"
-msgstr ""
-
-#: neutron/ipam/requests.py:121
-msgid "allocation_pools are not in the subnet"
-msgstr ""
-
-#: neutron/ipam/requests.py:225
-msgid "must provide exactly 2 arguments - cidr and MAC"
-msgstr ""
-
-#: neutron/ipam/subnet_alloc.py:136
-#, python-format
-msgid "Insufficient prefix space to allocate subnet size /%s"
-msgstr ""
-
-#: neutron/ipam/subnet_alloc.py:154
-msgid "Cannot allocate requested subnet from the available set of prefixes"
-msgstr ""
-
-#: neutron/ipam/subnet_alloc.py:175
-msgid "Unsupported request type"
-msgstr ""
-
-#: neutron/objects/base.py:26
-#, python-format
-msgid "Unable to update the following object fields: %(fields)s"
-msgstr ""
-
-#: neutron/objects/base.py:30
-#, python-format
-msgid ""
-"Failed to create a duplicate %(object_type)s: for attribute(s) "
-"%(attributes)s with value(s) %(values)s"
-msgstr ""
-
-#: neutron/objects/base.py:79
-#, python-format
-msgid "'%s' is not supported for filtering"
-msgstr ""
-
-#: neutron/objects/qos/policy.py:59
-#, python-format
-msgid "unable to load %s"
-msgstr ""
-
-#: neutron/pecan_wsgi/hooks/translation.py:40
-msgid "An unexpected internal error occurred."
-msgstr ""
-
-#: neutron/plugins/common/utils.py:61
-#, python-format
-msgid "%(id)s is not a valid %(type)s identifier"
-msgstr ""
-
-#: neutron/plugins/common/utils.py:66
-msgid "End of tunnel range is less than start of tunnel range"
-msgstr ""
-
-#: neutron/plugins/common/utils.py:76
-#, python-format
-msgid "%s is not a valid VLAN tag"
-msgstr ""
-
-#: neutron/plugins/common/utils.py:80
-msgid "End of VLAN range is less than start of VLAN range"
-msgstr ""
-
-#: neutron/plugins/common/utils.py:167
-msgid ""
-"Too long prefix provided. New name would exceed given length for an "
-"interface name."
-msgstr ""
-
-#: neutron/plugins/ml2/config.py:23
-msgid ""
-"List of network type driver entrypoints to be loaded from the "
-"neutron.ml2.type_drivers namespace."
-msgstr ""
-
-#: neutron/plugins/ml2/config.py:27
-msgid ""
-"Ordered list of network_types to allocate as tenant networks. The default"
-" value 'local' is useful for single-box testing but provides no "
-"connectivity between hosts."
-msgstr ""
-
-#: neutron/plugins/ml2/config.py:33
-msgid ""
-"An ordered list of networking mechanism driver entrypoints to be loaded "
-"from the neutron.ml2.mechanism_drivers namespace."
-msgstr ""
-
-#: neutron/plugins/ml2/config.py:38
-msgid ""
-"An ordered list of extension driver entrypoints to be loaded from the "
-"neutron.ml2.extension_drivers namespace. For example: extension_drivers ="
-" port_security,qos"
-msgstr ""
-
-#: neutron/plugins/ml2/config.py:43
-msgid ""
-"The maximum permissible size of an unfragmented packet travelling from "
-"and to addresses where encapsulated Neutron traffic is sent. Drivers "
-"calculate maximum viable MTU for validating tenant requests based on this"
-" value (typically, path_mtu - maxmum encapsulation header size). If <= 0,"
-" the path MTU is indeterminate and no calculation takes place."
-msgstr ""
-
-#: neutron/plugins/ml2/config.py:52
-msgid ""
-"The maximum permissible size of an unfragmented packet travelling a L2 "
-"network segment.  If <= 0, the segment MTU is indeterminate and no "
-"calculation takes place."
-msgstr ""
-
-#: neutron/plugins/ml2/config.py:58
-msgid ""
-"A list of mappings of physical networks to MTU values. The format of the "
-"mapping is <physnet>:<mtu val>. This mapping allows specifying a physical"
-" network MTU value that differs from the default segment_mtu value."
-msgstr ""
-
-#: neutron/plugins/ml2/config.py:64
-msgid ""
-"Default network type for external networks when no provider attributes "
-"are specified. By default it is None, which means that if provider "
-"attributes are not specified while creating external networks then they "
-"will have the same type as tenant networks. Allowed values for "
-"external_network_type config option depend on the network type values "
-"configured in type_drivers config option."
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:101
-msgid "network_type required"
-msgstr ""
-
-#: neutron/plugins/ml2/managers.py:218 neutron/plugins/ml2/managers.py:227
-#, python-format
-msgid "network_type value '%s' not supported"
-msgstr ""
-
-#: neutron/plugins/ml2/plugin.py:259
-msgid "binding:profile value too large"
-msgstr ""
-
-#: neutron/plugins/ml2/common/exceptions.py:24
-#, python-format
-msgid "%(method)s failed."
-msgstr ""
-
-#: neutron/plugins/ml2/common/exceptions.py:29
-#, python-format
-msgid "Extension %(driver)s failed."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_flat.py:34
-msgid ""
-"List of physical_network names with which flat networks can be created. "
-"Use default '*' to allow flat networks with arbitrary physical_network "
-"names. Use an empty list to disable flat networks."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_flat.py:94
-msgid "physical_network required for flat provider network"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_flat.py:97
-msgid "Flat provider networks are disabled"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_flat.py:100
-#, python-format
-msgid "physical_network '%s' unknown for flat provider network"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_flat.py:107
-#, python-format
-msgid "%s prohibited for flat provider network"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_geneve.py:32
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"Geneve VNI IDs that are available for tenant network allocation"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_geneve.py:37
-msgid ""
-"Geneve encapsulation header size is dynamic, this value is used to "
-"calculate the maximum MTU for the driver. This is the sum of the sizes of"
-" the outer ETH + IP + UDP + GENEVE header sizes. The default size for "
-"this field is 50, which is the size of the Geneve header without any "
-"additional option headers."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_gre.py:32
-msgid ""
-"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of "
-"GRE tunnel IDs that are available for tenant network allocation"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_local.py:52
-#, python-format
-msgid "%s prohibited for local provider network"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_tunnel.py:181
-#, python-format
-msgid "provider:physical_network specified for %s network"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_tunnel.py:188
-#, python-format
-msgid "%(key)s prohibited for %(tunnel)s provider network"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_tunnel.py:322
-msgid "Tunnel IP value needed by the ML2 plugin"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_tunnel.py:327
-msgid "Network type value needed by the ML2 plugin"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_tunnel.py:384
-#, python-format
-msgid "Network type value '%s' not supported"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_vlan.py:37
-msgid ""
-"List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> "
-"specifying physical_network names usable for VLAN provider and tenant "
-"networks, as well as ranges of VLAN tags on each available for allocation"
-" to tenant networks."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_vlan.py:174
-#, python-format
-msgid "physical_network '%s' unknown  for VLAN provider network"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_vlan.py:179
-#, python-format
-msgid "segmentation_id out of range (%(min)s through %(max)s)"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_vlan.py:185
-msgid "segmentation_id requires physical_network for VLAN provider network"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_vlan.py:193
-#, python-format
-msgid "%s prohibited for VLAN provider network"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_vxlan.py:32
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"VXLAN VNI IDs that are available for tenant network allocation"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/type_vxlan.py:36
-msgid ""
-"Multicast group for VXLAN. When configured, will enable sending all "
-"broadcast traffic to this multicast group. When left unconfigured, will "
-"disable multicast VXLAN mode."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/l2pop/config.py:23
-msgid ""
-"Delay within which agent is expected to update existing ports whent it "
-"restarts"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py:27
-msgid ""
-"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 "
-"plugin using linuxbridge mechanism driver"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py:31
-msgid "TTL for vxlan interface protocol packets."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py:33
-msgid "TOS for vxlan interface protocol packets."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py:35
-msgid ""
-"Multicast group(s) for vxlan interface. A range of group addresses may be"
-" specified by using CIDR notation. Specifying a range allows different "
-"VNIs to use different group addresses, reducing or eliminating spurious "
-"broadcast traffic to the tunnel endpoints. To reserve a unique group for "
-"each possible (24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting "
-"must be the same on all the agents."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py:44
-msgid "Local IP address of the VXLAN endpoints."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py:46
-msgid ""
-"Extension to use alongside ml2 plugin's l2population mechanism driver. It"
-" enables the plugin to populate VXLAN forwarding table."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py:54
-msgid ""
-"Comma-separated list of <physical_network>:<physical_interface> tuples "
-"mapping physical network names to the agent's node-specific physical "
-"network interfaces to be used for flat and VLAN networks. All physical "
-"networks listed in network_vlan_ranges on the server should have mappings"
-" to appropriate interfaces on each agent."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py:64
-msgid "List of <physical_network>:<physical_bridge>"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py:69
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py:59
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:98
-msgid ""
-"The number of seconds the agent will wait between polling for local "
-"device changes."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py:72
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:142
-msgid ""
-"Set new timeout in seconds for new rpc calls after agent receives "
-"SIGTERM. If value is set to 0, rpc timeout won't be changed"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py:80
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:125
-msgid ""
-"Enable suppression of ARP responses that don't match an IP address that "
-"belongs to the port from which they originate. Note: This prevents the "
-"VMs attached to this agent from spoofing, it doesn't protect them from "
-"other devices which have the capability to spoof (e.g. bare metal or VMs "
-"attached to agents without this flag set to True). Spoofing rules will "
-"not be added to any ports that have port security disabled. For "
-"LinuxBridge, this requires ebtables. For OVS, it requires a version that "
-"supports matching ARP headers."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:53
-msgid "Device not found"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:67
-msgid "Device has no virtual functions"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:386
-#, python-format
-msgid "Device name %(dev_name)s is missing from physical_device_mappings"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py:42
-#, python-format
-msgid "Device %(dev_name)s in mapping: %(mapping)s not unique"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py:66
-msgid ""
-"Comma-separated list of <physical_network>:<network_device> tuples "
-"mapping physical network names to the agent's node-specific physical "
-"network device interfaces of SR-IOV physical function to be used for VLAN"
-" networks. All physical networks listed in network_vlan_ranges on the "
-"server should have mappings to appropriate interfaces on each agent."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py:76
-msgid ""
-"Comma-separated list of <network_device>:<vfs_to_exclude> tuples, mapping"
-" network_device to the agent's node-specific list of virtual functions "
-"that should not be used for virtual networking. vfs_to_exclude is a "
-"semicolon-separated list of virtual functions to exclude from "
-"network_device. The network_device in the mapping should appear in the "
-"physical_device_mappings list."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py:25
-#, python-format
-msgid "Invalid Device %(dev_name)s: %(reason)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py:29
-#, python-format
-msgid "ip command failed on device %(dev_name)s: %(reason)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py:33
-#, python-format
-msgid "Operation not supported on device %(dev_name)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py:37
-#, python-format
-msgid "Invalid pci slot %(pci_slot)s"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/exceptions.py:24
-#, python-format
-msgid "Unsupported network type %(net_type)s."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:37
-msgid ""
-"Comma-separated list of supported PCI vendor devices, as defined by "
-"vendor_id:product_id according to the PCI ID Repository. Default enables "
-"support for Intel and Mellanox SR-IOV capable NICs."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:89
-msgid "Parsing supported pci_vendor_devs failed"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:194
-#, python-format
-msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:73
-#, python-format
-msgid "Unable to retrieve port details for devices: %(devices)s "
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:304
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1948
-msgid ""
-"DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be "
-"enabled, in both the Agent and Server side."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:32
-msgid ""
-"Integration bridge to use. Do not change this parameter unless you have a"
-" good reason to. This is the name of the OVS integration bridge. There is"
-" one per hypervisor. The integration bridge acts as a virtual 'patch "
-"bay'. All VM VIFs are attached to this bridge and then 'patched' "
-"according to their network connectivity."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:40
-msgid "Tunnel bridge to use."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:42
-msgid "Peer patch port in integration bridge for tunnel bridge."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:45
-msgid "Peer patch port in tunnel bridge for integration bridge."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:48
-msgid "Local IP address of tunnel endpoint."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:51
-msgid ""
-"Comma-separated list of <physical_network>:<bridge> tuples mapping "
-"physical network names to the agent's node-specific Open vSwitch bridge "
-"names to be used for flat and VLAN networks. The length of bridge names "
-"should be no more than 11. Each bridge must exist, and should have a "
-"physical network interface configured as a port. All physical networks "
-"configured on the server should have mappings to appropriate bridges on "
-"each agent. Note: If you remove a bridge from this mapping, make sure to "
-"disconnect it from the integration bridge as it won't be managed by the "
-"agent anymore. Deprecated for ofagent."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:65
-msgid ""
-"Use veths instead of patch ports to interconnect the integration bridge "
-"to physical networks. Support kernel without Open vSwitch patch port "
-"support so long as it is set to True."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:71
-msgid "OpenFlow interface to use."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:75
-msgid ""
-"OVS datapath to use. 'system' is the default value and corresponds to the"
-" kernel datapath. To enable the userspace datapath set this value to "
-"'netdev'."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:79
-msgid "OVS vhost-user socket directory."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:81
-msgid ""
-"Address to listen on for OpenFlow connections. Used only for 'native' "
-"driver."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:84
-msgid "Port to listen on for OpenFlow connections. Used only for 'native' driver."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:87
-msgid ""
-"Timeout in seconds to wait for the local switch connecting the "
-"controller. Used only for 'native' driver."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:91
-msgid ""
-"Timeout in seconds to wait for a single OpenFlow request. Used only for "
-"'native' driver."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:102
-msgid "Minimize polling by monitoring ovsdb for interface changes."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:106
-msgid ""
-"The number of seconds to wait before respawning the ovsdb monitor after "
-"losing communication with it."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:109
-msgid "Network types supported by the agent (gre and/or vxlan)."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:112
-msgid "The UDP port to use for VXLAN tunnels."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:114
-msgid "MTU size of veth interfaces"
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:116
-msgid ""
-"Use ML2 l2population mechanism driver to learn remote MAC and IPs and "
-"improve tunnel scalability."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:119
-msgid ""
-"Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 "
-"l2population driver. Allows the switch (when supporting an overlay) to "
-"respond to an ARP request locally without performing a costly ARP "
-"broadcast into the overlay."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:137
-msgid ""
-"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying "
-"GRE/VXLAN tunnel."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:140
-msgid "Make the l2 agent run in DVR mode."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:146
-msgid ""
-"Reset flow table on start. Setting this to True will cause brief traffic "
-"interruption."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:149
-msgid ""
-"Set or un-set the tunnel header checksum  on outgoing IP packet carrying "
-"GRE/VXLAN tunnel."
-msgstr ""
-
-#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:153
-msgid "Selects the Agent Type reported"
-msgstr ""
-
-#: neutron/quota/__init__.py:43
-msgid ""
-"Resource name(s) that are supported in quota features. This option is now"
-" deprecated for removal."
-msgstr ""
-
-#: neutron/quota/__init__.py:48
-msgid ""
-"Default number of resource allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-
-#: neutron/quota/__init__.py:52
-msgid "Number of networks allowed per tenant. A negative value means unlimited."
-msgstr ""
-
-#: neutron/quota/__init__.py:56
-msgid "Number of subnets allowed per tenant, A negative value means unlimited."
-msgstr ""
-
-#: neutron/quota/__init__.py:60
-msgid "Number of ports allowed per tenant. A negative value means unlimited."
-msgstr ""
-
-#: neutron/quota/__init__.py:64
-msgid "Default driver to use for quota checks"
-msgstr ""
-
-#: neutron/quota/__init__.py:67
-msgid ""
-"Keep in track in the database of current resourcequota usage. Plugins "
-"which do not leverage the neutron database should set this flag to False"
-msgstr ""
-
-#: neutron/quota/__init__.py:148 neutron/quota/__init__.py:153
-msgid "Access to this resource was denied."
-msgstr ""
-
-#: neutron/server/__init__.py:32
-msgid ""
-"ERROR: Unable to find configuration file via the default search paths "
-"(~/.neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!"
-msgstr ""
-
-#: neutron/server/__init__.py:40
-#, python-format
-msgid "ERROR: %s"
-msgstr ""
-
-#: neutron/services/provider_configuration.py:33
-msgid ""
-"Defines providers for advanced services using the format: "
-"<service_type>:<name>:<driver>[:default]"
-msgstr ""
-
-#: neutron/services/provider_configuration.py:139
-#, python-format
-msgid "Provider name %(name)s is limited by %(len)s characters"
-msgstr ""
-
-#: neutron/services/provider_configuration.py:153
-msgid "Invalid service provider format"
-msgstr ""
-
-#: neutron/services/provider_configuration.py:161
-#, python-format
-msgid "Invalid provider format. Last part should be 'default' or empty: %s"
-msgstr ""
-
-#: neutron/services/provider_configuration.py:176
-#, python-format
-msgid ""
-"Service provider '%(provider)s' could not be found for service type "
-"%(service_type)s"
-msgstr ""
-
-#: neutron/services/provider_configuration.py:181
-#, python-format
-msgid "Service type %(service_type)s does not have a default service provider"
-msgstr ""
-
-#: neutron/services/provider_configuration.py:186
-#, python-format
-msgid ""
-"Resource '%(resource_id)s' is already associated with provider "
-"'%(provider)s' for service type '%(service_type)s'"
-msgstr ""
-
-#: neutron/services/provider_configuration.py:200
-#, python-format
-msgid "Driver %s is not unique across providers"
-msgstr ""
-
-#: neutron/services/provider_configuration.py:210
-#, python-format
-msgid "Multiple default providers for service %s"
-msgstr ""
-
-#: neutron/services/provider_configuration.py:221
-#, python-format
-msgid "Multiple providers specified for service %s"
-msgstr ""
-
-#: neutron/services/service_base.py:66
-#, python-format
-msgid "No providers specified for '%s' service, exiting"
-msgstr ""
-
-#: neutron/services/firewall/agents/firewall_agent_api.py:27
-msgid "Name of the FWaaS Driver"
-msgstr ""
-
-#: neutron/services/firewall/agents/firewall_agent_api.py:31
-msgid "Enable FWaaS"
-msgstr ""
-
-#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:68
-msgid ""
-"FWaaS plugin is configured in the server side, but FWaaS is disabled in "
-"L3-agent."
-msgstr ""
-
-#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:80
-#, python-format
-msgid "Error importing FWaaS device driver: %s"
-msgstr ""
-
-#: neutron/services/metering/agents/metering_agent.py:69
-msgid "Metering driver"
-msgstr ""
-
-#: neutron/services/metering/agents/metering_agent.py:71
-msgid "Interval between two metering measures"
-msgstr ""
-
-#: neutron/services/metering/agents/metering_agent.py:73
-msgid "Interval between two metering reports"
-msgstr ""
-
-#: neutron/services/metering/agents/metering_agent.py:97
-msgid "A metering driver must be specified"
-msgstr ""
-
-#: neutron/services/metering/drivers/iptables/iptables_driver.py:88
-msgid "An interface driver must be specified"
-msgstr ""
-
-#: neutron/services/qos/notification_drivers/manager.py:22
-msgid "Drivers list to use to send the update notification"
-msgstr ""
-
-#: neutron/services/qos/notification_drivers/manager.py:54
-msgid "A QoS driver must be specified"
-msgstr ""
-
-#: neutron/tests/base.py:122
-#, python-format
-msgid "Unknown attribute '%s'."
-msgstr ""
-
-#: neutron/tests/common/agents/l3_agent.py:65
-msgid "Suffix to append to all namespace names."
-msgstr ""
-
-#: neutron/tests/functional/agent/linux/simple_daemon.py:38
-msgid ""
-"uuid provided from the command line so external_process can track us via "
-"/proc/cmdline interface."
-msgstr ""
-
-#: neutron/tests/functional/agent/linux/test_async_process.py:79
-msgid "Async process didn't respawn"
-msgstr ""
-
-#: neutron/tests/functional/agent/linux/test_keepalived.py:61
-msgid "Keepalived didn't spawn"
-msgstr ""
-
-#: neutron/tests/functional/agent/linux/test_keepalived.py:72
-msgid "Keepalived didn't respawn"
-msgstr ""
-
-#: neutron/tests/unit/agent/linux/test_iptables_manager.py:893
-#: neutron/tests/unit/agent/linux/test_iptables_manager.py:927
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables "
-"rules:\n"
-"%s"
-msgstr ""
-
-#: neutron/tests/unit/api/v2/test_resource.py:158
-#: neutron/tests/unit/api/v2/test_resource.py:203
-msgid "Unmapped error"
-msgstr ""
-
-#: neutron/tests/unit/api/v2/test_resource.py:262
-msgid ""
-"The server has either erred or is incapable of performing the requested "
-"operation."
-msgstr ""
-
-#: neutron/tests/unit/plugins/ml2/test_plugin.py:547
-#, python-format
-msgid "Deleting port %s"
-msgstr ""
-
-#: neutron/tests/unit/plugins/ml2/test_plugin.py:548
-#, python-format
-msgid "The port '%s' was deleted"
-msgstr ""
-
-#: neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py:34
-#, python-format
-msgid ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-msgstr ""
-
-#: neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py:61
-#, python-format
-msgid ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-msgstr ""
-
-#: neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py:87
-#, python-format
-msgid ""
-"%(method)s called with port settings %(current)s (original settings "
-"%(original)s) host %(host)s (original host %(original_host)s) vif type "
-"%(vif_type)s (original vif type %(original_vif_type)s) vif details "
-"%(vif_details)s (original vif details %(original_vif_details)s) binding "
-"levels %(levels)s (original binding levels %(original_levels)s) on "
-"network %(network)s with segments to bind %(segments_to_bind)s"
-msgstr ""
-
-#: neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py:1368
-#: neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py:1387
-#: neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py:1406
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr ""
-
-#: neutron/tests/unit/plugins/ml2/extensions/fake_extension.py:55
-msgid "Adds test attributes to core resources."
-msgstr ""
-
diff --git a/neutron/locale/pt_BR/LC_MESSAGES/neutron.po b/neutron/locale/pt_BR/LC_MESSAGES/neutron.po
deleted file mode 100644 (file)
index 5ea502b..0000000
+++ /dev/null
@@ -1,2381 +0,0 @@
-# Portuguese (Brazil) translations for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-09-06 10:15+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: pt_BR\n"
-"Language-Team: Portuguese (Brazil)\n"
-"Plural-Forms: nplurals=2; plural=(n > 1)\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#, python-format
-msgid ""
-"\n"
-"Command: %(cmd)s\n"
-"Exit code: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-msgstr ""
-"\n"
-"Comando: %(cmd)s\n"
-"Código de saída: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-
-#, python-format
-msgid "%(driver)s: Internal driver error."
-msgstr "%(driver)s: erro interno de driver."
-
-#, python-format
-msgid "%(id)s is not a valid %(type)s identifier"
-msgstr "%(id)s não é um identificador %(type)s válido"
-
-#, python-format
-msgid ""
-"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' "
-"and '%(desc)s'"
-msgstr ""
-"%(invalid_dirs)s é um valor inválido para sort_dirs, o valor válido é "
-"'%(asc)s' e '%(desc)s'"
-
-#, python-format
-msgid "%(key)s prohibited for %(tunnel)s provider network"
-msgstr "%(key)s proibida para rede de provedor %(tunnel)s"
-
-#, python-format
-msgid ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-msgstr ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-
-#, python-format
-msgid ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-msgstr ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-
-#, python-format
-msgid "%(method)s failed."
-msgstr "%(method)s falhou."
-
-#, python-format
-msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'"
-msgstr "%(name)s '%(addr)s' não corresponde à ip_version '%(ip_version)s'"
-
-#, python-format
-msgid "%s cannot be called while in offline mode"
-msgstr "%s não pode ser chamado durante o modo offline"
-
-#, python-format
-msgid "%s is invalid attribute for sort_key"
-msgstr "%s é um atributo inválido para sort_key"
-
-#, python-format
-msgid "%s is invalid attribute for sort_keys"
-msgstr "%s é um atributo inválido para sort_keys"
-
-#, python-format
-msgid "%s is not a valid VLAN tag"
-msgstr "%s não é um tag de VLAN válido"
-
-#, python-format
-msgid "%s must implement get_port_from_device or get_ports_from_devices."
-msgstr "%s deve implementar get_port_from_device ou get_ports_from_devices."
-
-#, python-format
-msgid "%s prohibited for VLAN provider network"
-msgstr "%s proibido para rede de provedor VLAN"
-
-#, python-format
-msgid "%s prohibited for flat provider network"
-msgstr "%s proibido para rede de provedor flat"
-
-#, python-format
-msgid "%s prohibited for local provider network"
-msgstr "%s proibido para rede de provedor local"
-
-#, python-format
-msgid "'%(data)s' exceeds maximum length of %(max_len)s"
-msgstr "'%(data)s' excede o comprimento máximo de %(max_len)s"
-
-#, python-format
-msgid "'%(data)s' is not in %(valid_values)s"
-msgstr "'%(data)s' não está em %(valid_values)s"
-
-#, python-format
-msgid "'%(data)s' is too large - must be no larger than '%(limit)d'"
-msgstr "'%(data)s' é muito grande - não deve ser maior que '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' is too small - must be at least '%(limit)d'"
-msgstr "'%(data)s' é muito pequeno - deve ser pelo menos '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended"
-msgstr "'%(data)s' não é um IP do CIDR da sub-rede,  '%(cidr)s' é recomendado"
-
-#, python-format
-msgid "'%(host)s' is not a valid nameserver. %(msg)s"
-msgstr "'%(host)s' não é um nameserver válido. %(msg)s"
-
-#, python-format
-msgid "'%s' Blank strings are not permitted"
-msgstr "'%s' Sequências em branco não são permitidas"
-
-#, python-format
-msgid "'%s' cannot be converted to boolean"
-msgstr "'%s' não pode ser convertido em booleano"
-
-#, python-format
-msgid "'%s' contains whitespace"
-msgstr "'%s' contém espaços em branco"
-
-#, python-format
-msgid "'%s' is not a dictionary"
-msgstr "'%s' não é um dicionário"
-
-#, python-format
-msgid "'%s' is not a list"
-msgstr "'%s' não é uma lista"
-
-#, python-format
-msgid "'%s' is not a valid IP address"
-msgstr "'%s' não é um endereço IP válido"
-
-#, python-format
-msgid "'%s' is not a valid IP subnet"
-msgstr "'%s' não é uma sub-rede de IP válida"
-
-#, python-format
-msgid "'%s' is not a valid MAC address"
-msgstr "'%s' não é um endereço MAC válido"
-
-#, python-format
-msgid "'%s' is not a valid UUID"
-msgstr "'%s' não é um UUID válido"
-
-#, python-format
-msgid "'%s' is not a valid boolean value"
-msgstr "'%s' não é um booleano válido"
-
-#, python-format
-msgid "'%s' is not a valid input"
-msgstr "'%s' não é uma entrada válida"
-
-#, python-format
-msgid "'%s' is not a valid string"
-msgstr "'%s' não é uma sequência válida"
-
-#, python-format
-msgid "'%s' is not an integer"
-msgstr "'%s' não é um número inteiro"
-
-#, python-format
-msgid "'%s' is not an integer or uuid"
-msgstr "'%s' não é um número inteiro ou um uuid"
-
-#, python-format
-msgid "'%s' is not of the form <key>=[value]"
-msgstr "'%s' não do formato <key>=[value]"
-
-#, python-format
-msgid "'%s' should be non-negative"
-msgstr "'%s' deve ser não negativo"
-
-msgid "0 is not allowed as CIDR prefix length"
-msgstr "0 não é permitido como um comprimento do prefixo CIDR"
-
-msgid "A cidr must be specified in the absence of a subnet pool"
-msgstr "Um cidr deve ser especificado na ausência de um conjunto de sub-rede"
-
-msgid ""
-"A list of mappings of physical networks to MTU values. The format of the "
-"mapping is <physnet>:<mtu val>. This mapping allows specifying a physical "
-"network MTU value that differs from the default segment_mtu value."
-msgstr ""
-"Uma lista de mapeamentos de redes físicas para valores da MTU. O formato do "
-"mapeamento é <physnet>:<mtu val>. Esse mapeamento permite especificar um "
-"valor de MTU de rede física que difere do valor segment_mtu padrão."
-
-msgid "A metering driver must be specified"
-msgstr "Um driver de medição deve ser especificado"
-
-msgid "API for retrieving service providers for Neutron advanced services"
-msgstr ""
-"API para recuperação de provedores de serviço para serviços avançados do "
-"Neutron"
-
-msgid "Access to this resource was denied."
-msgstr "Acesso à este recurso foi negado."
-
-msgid "Action to be executed when a child process dies"
-msgstr "Ação a ser executada quando um processo-filho morre"
-
-msgid "Adds external network attribute to network resource."
-msgstr "Inclui atributo de rede externo no recurso de rede."
-
-msgid "Adds test attributes to core resources."
-msgstr "Inclui atributos de teste aos recursos principais."
-
-#, python-format
-msgid "Agent %(id)s could not be found"
-msgstr "O agente %(id)s não pôde ser localizado"
-
-#, python-format
-msgid "Agent %(id)s is not a L3 Agent or has been disabled"
-msgstr "O agente %(id)s não é um agente L3 ou foi desativado"
-
-#, python-format
-msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled"
-msgstr "O agente %(id)s não é um Agente DHCP válido ou foi desativado"
-
-#, python-format
-msgid "Agent updated: %(payload)s"
-msgstr "Agente atualizado:%(payload)s"
-
-#, python-format
-msgid ""
-"Agent with agent_type=%(agent_type)s and host=%(host)s could not be found"
-msgstr ""
-"O agente com agent_type=%(agent_type)s e host=%(host)s não pôde ser "
-"localizado"
-
-msgid "Allow auto scheduling networks to DHCP agent."
-msgstr "Permitir o planejamento automático de redes para o agente DHCP."
-
-msgid "Allow auto scheduling of routers to L3 agent."
-msgstr "Permitir planejamento automático de roteadores para agente L3."
-
-msgid "Allow running metadata proxy."
-msgstr "Permite executar proxy de metadados."
-
-msgid "Allow sending resource operation notification to DHCP agent"
-msgstr ""
-"Permitir envio de notificação de operação de recurso para o agente DHCP"
-
-msgid "Allow the usage of the bulk API"
-msgstr "Permitir o uso da API em massa"
-
-msgid "Allow the usage of the pagination"
-msgstr "Permitir o uso da paginação"
-
-msgid "Allow the usage of the sorting"
-msgstr "Permitir o uso da classificação"
-
-msgid "Allow to perform insecure SSL (https) requests to nova metadata"
-msgstr ""
-"Permita executar solicitações (https) de SSL inseguras para metadados nova"
-
-msgid "AllowedAddressPair must contain ip_address"
-msgstr "AllowedAddressPair deve conter ip_address"
-
-msgid "An interface driver must be specified"
-msgstr "Um driver de interface deve ser especificado"
-
-msgid ""
-"An ordered list of networking mechanism driver entrypoints to be loaded from "
-"the neutron.ml2.mechanism_drivers namespace."
-msgstr ""
-"Lista ordenada de pontos de entrada do driver  de mecanismo de rede que será "
-"carregada do namespace neutron.ml2.mechanism_drivers."
-
-msgid "An unknown error has occurred. Please try your request again."
-msgstr "Ocorreu um erro desconhecido. Tente a solicitação novamente."
-
-msgid "An unknown exception occurred."
-msgstr "Ocorreu uma exceção desconhecida."
-
-#, python-format
-msgid "Attribute '%s' not allowed in POST"
-msgstr "Atributo '%s' não permitido no autoteste inicial"
-
-msgid "Automatically remove networks from offline DHCP agents."
-msgstr "Remover automaticamente as redes de agentes DHCP offline."
-
-msgid ""
-"Automatically reschedule routers from offline L3 agents to online L3 agents."
-msgstr ""
-"Reagende roteadores automaticamente de agentes L3 offline para agentes L3 "
-"online."
-
-msgid "Available commands"
-msgstr "Comandos disponíveis"
-
-msgid "Backend does not support VLAN Transparency."
-msgstr "O backend não suporta a Transparência da VLAN."
-
-#, python-format
-msgid ""
-"Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, "
-"%(mac)s:"
-msgstr ""
-"Prefixo ou formato mac inválido para gerar endereço IPv6 por EUI-64: "
-"%(prefix)s, %(mac)s:"
-
-#, python-format
-msgid "Bad prefix type for generate IPv6 address by EUI-64: %s"
-msgstr "Tipo de prefixo inválido para gerar endereço IPv6 por EUI-64: %s"
-
-#, python-format
-msgid "Base MAC: %s"
-msgstr "MAC Base: %s"
-
-#, python-format
-msgid "Bridge %(bridge)s does not exist."
-msgstr "A ponte %(bridge)s não existe."
-
-msgid "Bulk operation not supported"
-msgstr "Operação em massa não suportada"
-
-msgid "CIDR to monitor"
-msgstr "CIDR para monitorar"
-
-#, python-format
-msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip"
-msgstr ""
-"Não é possível incluir IP flutuante na porta da sub-rede %s que não possui "
-"gateway_ip"
-
-msgid "Cannot allocate requested subnet from the available set of prefixes"
-msgstr ""
-"Não é possível alocar a sub-rede solicitada a partir do conjunto disponível "
-"de prefixos"
-
-#, python-format
-msgid ""
-"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port "
-"%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a "
-"floating IP on external network %(net_id)s."
-msgstr ""
-"Não é possível associar o IP flutuante %(floating_ip_address)s (%(fip_id)s) "
-"com a porta %(port_id)s usando IP fixo %(fixed_ip)s, pois esse IP fixo já "
-"possui um IP flutuante em uma rede externa %(net_id)s."
-
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to Port %s, since that port is owned "
-"by a different tenant."
-msgstr ""
-"Não é possível criar IP flutuante e ligá-lo à porta %s, uma vez que a porta "
-"é de propriedade de um locatário diferente."
-
-msgid "Cannot create resource for another tenant"
-msgstr "Não é possível criar recurso para outro arrendatário"
-
-msgid "Cannot disable enable_dhcp with ipv6 attributes set"
-msgstr "Não é possível desativar enable_dhcp com conjunto de atributos ipv6"
-
-#, python-format
-msgid ""
-"Cannot have multiple router ports with the same network id if both contain "
-"IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s"
-msgstr ""
-"Não é possível ter várias portas de roteador com o mesmo ID de rede se ambas "
-"contiverem sub-redes IPv6. A porta existente %(p)s possui sub-rede(s) IPv6 e "
-"o ID de rede %(nid)s"
-
-#, python-format
-msgid ""
-"Cannot host %(router_type)s router %(router_id)s on %(agent_mode)s L3 agent "
-"%(agent_id)s."
-msgstr ""
-"Não é possível hospedar %(router_type)s o roteador %(router_id)s no "
-"%(agent_mode)s agente L3 %(agent_id)s."
-
-msgid "Cannot match priority on flow deletion or modification"
-msgstr ""
-"Não é possível corresponder a prioridade na exclusão ou modificação do fluxo"
-
-msgid "Cannot specify both subnet-id and port-id"
-msgstr "Não é possível especificar subnet-id e port-id"
-
-msgid "Cannot understand JSON"
-msgstr "Não é possível entender JSON"
-
-#, python-format
-msgid "Cannot update read-only attribute %s"
-msgstr "Não é possível atualizar o atributo de leitura %s"
-
-msgid "Certificate Authority public key (CA cert) file for ssl"
-msgstr ""
-"Arquivo de chave pública da autoridade de certificação (certificado CA) para "
-"ssl"
-
-msgid "Check for ARP responder support"
-msgstr "Verifique se há suporte respondente para ARP"
-
-msgid "Check for OVS vxlan support"
-msgstr "Verifique o suporte do vxlan do OVS"
-
-msgid "Check for VF management support"
-msgstr "Verifique o suporte de gerenciamento de VF"
-
-msgid "Check for iproute2 vxlan support"
-msgstr "Verifique o suporte do vxlan do iproute2"
-
-msgid "Check for nova notification support"
-msgstr "Verifique suporte para nova notificação"
-
-msgid "Check for patch port support"
-msgstr "Verifique o suporte para a porta de correção"
-
-msgid "Check minimal dnsmasq version"
-msgstr "Verifique a versão dnsmasq mínima"
-
-msgid "Check netns permission settings"
-msgstr "Verifique as configurações de permissão netns"
-
-msgid "Check ovsdb native interface support"
-msgstr "Verifique o suporte da interface nativa ovsdb"
-
-#, python-format
-msgid ""
-"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of "
-"subnet %(sub_id)s"
-msgstr ""
-"O cidr %(subnet_cidr)s de sub-rede %(subnet_id)s se sobrepõe com o cidr "
-"%(cidr)s da sub-rede %(sub_id)s"
-
-msgid "Client certificate for nova metadata api server."
-msgstr "Certificado do cliente para o servidor da API de metadados nova."
-
-msgid ""
-"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE "
-"tunnel IDs that are available for tenant network allocation"
-msgstr ""
-"Lista separada por vírgula de tuplas <tun_min>:<tun_max> enumerando as "
-"faixas de IDs de túnel GRE que estão disponíveis para alocação de redes de "
-"tenant"
-
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"VXLAN VNI IDs that are available for tenant network allocation"
-msgstr ""
-"Lista de valores separados por vírgula de tuplas <vni_min>:<vni_max> "
-"enumerando faixas de VXLAN VNI IDs que estão disponíveis para alocação de "
-"redes de tenant"
-
-msgid ""
-"Comma-separated list of the DNS servers which will be used as forwarders."
-msgstr ""
-"Lista separada por vírgula dos servidores DNS que será utilizada como "
-"encaminhadores."
-
-msgid "Command to execute"
-msgstr "Comando a ser executado"
-
-msgid "Config file for interface driver (You may also use l3_agent.ini)"
-msgstr ""
-"Arquivo de configuração para driver de interface (também é possível usar "
-"l3_agent.ini)"
-
-#, python-format
-msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s"
-msgstr "O valor conflitante ethertype %(ethertype)s para CIDR %(cidr)s"
-
-msgid ""
-"Controls whether the neutron security group API is enabled in the server. It "
-"should be false when using no security groups or using the nova security "
-"group API."
-msgstr ""
-"Controla se a API do grupo de segurança neutron está ativada no servidor. "
-"Ele deve ser false quando não usa nenhum grupo de segurança ou usa a API do "
-"grupo de segurança nova."
-
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds"
-msgstr ""
-"Não foi possível associar-se à %(host)s:%(port)s após tentar por %(time)d "
-"segundos"
-
-msgid "Could not deserialize data"
-msgstr "Não foi possível desserializar dados"
-
-#, python-format
-msgid "Creation failed. %(dev_name)s already exists."
-msgstr "Falha na criação. %(dev_name)s já existe."
-
-#, python-format
-msgid ""
-"Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable "
-"to update."
-msgstr ""
-"IP atual do gateway %(ip_address)s já está em uso pela porta %(port_id)s. "
-"Não é possível atualizar."
-
-msgid "Currently distributed HA routers are not supported."
-msgstr ""
-"Roteadores de alta disponibilidade distribuídos atualmente não são "
-"suportados."
-
-msgid ""
-"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite "
-"lease times."
-msgstr ""
-"Duração de lease de DHCP (em segundos). Use -1 para dizer ao dnsmasq para "
-"usar lease infinitas vezes."
-
-msgid "Default driver to use for quota checks"
-msgstr "Driver padrão para uso por verificações de cota"
-
-msgid ""
-"Default number of resource allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Número padrão de recurso permitido por locatário. Um valor negativo "
-"significa ilimitado."
-
-msgid "Default security group"
-msgstr "Grupo de segurança padrão"
-
-msgid "Default security group already exists."
-msgstr "O grupo de segurança padrão já existe."
-
-msgid ""
-"Defines providers for advanced services using the format: <service_type>:"
-"<name>:<driver>[:default]"
-msgstr ""
-"Define provedores para serviços avançados usando o formato:<service_type>:"
-"<name>:<driver>[:default]"
-
-msgid ""
-"Delay within which agent is expected to update existing ports whent it "
-"restarts"
-msgstr ""
-"Atraso dentro do qual o agente é esperado atualizar as portas existentes "
-"quando ele reinicia"
-
-msgid "Delete the namespace by removing all devices."
-msgstr "Excluir o namespace removendo todos os dispositivos."
-
-#, python-format
-msgid "Deleting port %s"
-msgstr "Excluindo porta %s"
-
-#, python-format
-msgid "Device %(dev_name)s in mapping: %(mapping)s not unique"
-msgstr "Dispositivo %(dev_name)s no mapeamento: %(mapping)s não exclusivo"
-
-msgid "Device has no virtual functions"
-msgstr "O dispositivo não possui funções virtuais"
-
-#, python-format
-msgid "Device name %(dev_name)s is missing from physical_device_mappings"
-msgstr ""
-"Nome do dispositivo %(dev_name)s está ausente no physical_device_mappings"
-
-msgid "Device not found"
-msgstr "Dispositivo não localizado."
-
-#, python-format
-msgid ""
-"Distributed Virtual Router Mac Address for host %(host)s does not exist."
-msgstr ""
-"O endereço Mac do Roteador Virtual Distribuído para o host %(host)s não "
-"existe."
-
-msgid "Domain to use for building the hostnames"
-msgstr "Domínio a ser usado para construir os nomes dos hosts"
-
-msgid "Downgrade no longer supported"
-msgstr "O downgrade não é mais suportado"
-
-#, python-format
-msgid "Driver %s is not unique across providers"
-msgstr "Driver %s não é único em todos provedores"
-
-msgid "Driver for security groups firewall in the L2 agent"
-msgstr "Driver para firewall para grupos de segurança no agente L2"
-
-msgid "Driver to use for scheduling network to DHCP agent"
-msgstr "Driver a ser usado para planejar a rede para o agente DHCP"
-
-msgid "Driver to use for scheduling router to a default L3 agent"
-msgstr "Driver a ser usado para planejar o roteador para um agente L3 padrão"
-
-#, python-format
-msgid "Duplicate IP address '%s'"
-msgstr "Endereço IP duplicado '%s'"
-
-msgid "Duplicate Metering Rule in POST."
-msgstr "Regra de marcação duplicada em POST."
-
-msgid "Duplicate Security Group Rule in POST."
-msgstr "Regra do Grupo de Segurança Duplicada no Autoteste Inicial."
-
-#, python-format
-msgid "Duplicate hostroute '%s'"
-msgstr "Hostroute duplicado '%s'"
-
-#, python-format
-msgid "Duplicate items in the list: '%s'"
-msgstr "Itens duplicados na lista: '%s'"
-
-#, python-format
-msgid "Duplicate nameserver '%s'"
-msgstr "Servidor de nomes duplicado '%s'"
-
-msgid "Duplicate segment entry in request."
-msgstr "Entrada duplicada de segmento na requisição."
-
-#, python-format
-msgid "ERROR: %s"
-msgstr "ERRO: %s"
-
-msgid ""
-"ERROR: Unable to find configuration file via the default search paths (~/."
-"neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!"
-msgstr ""
-"ERRO: Não é possível localizar o arquivo de configuração através dos "
-"caminhos de procura padrão (~/.neutron/, ~/, /etc/neutron/, /etc/) e a opção "
-"'--config-file'!"
-
-msgid ""
-"Either one of parameter network_id or router_id must be passed to _get_ports "
-"method."
-msgstr ""
-"Um dos parâmetros network_id ou router_id deve ser transmitido para o método "
-"_get_ports."
-
-msgid "Either subnet_id or port_id must be specified"
-msgstr "subnet_id ou port_id deve ser especificado"
-
-msgid "Empty physical network name."
-msgstr "Nome da rede física vazio."
-
-msgid "Enable FWaaS"
-msgstr "Habilitar FWaaS"
-
-msgid "Enable HA mode for virtual routers."
-msgstr "Ative o modo de alta disponibilidade para roteadores virtuais."
-
-msgid "Enable SSL on the API server"
-msgstr "Habilite SSL no servidor de API"
-
-msgid ""
-"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 "
-"plugin using linuxbridge mechanism driver"
-msgstr ""
-"Ative o VXLAN no agente. Pode ser ativado quando o agente é gerenciado pelo "
-"plug-in ml2 usando o driver do mecanismo linuxbridge"
-
-msgid ""
-"Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 "
-"l2population driver. Allows the switch (when supporting an overlay) to "
-"respond to an ARP request locally without performing a costly ARP broadcast "
-"into the overlay."
-msgstr ""
-"Ative respondente ARP local se ele for suportado. Requer OVS 2.1 e driver "
-"ML2 l2population. Permite que o comutador (ao suportar uma sobreposição) "
-"para responder a uma solicitação de ARP localmente, sem executar uma "
-"transmissão dispendiosa de ARP na sobreposição."
-
-msgid ""
-"Enable services on an agent with admin_state_up False. If this option is "
-"False, when admin_state_up of an agent is turned False, services on it will "
-"be disabled. Agents with admin_state_up False are not selected for automatic "
-"scheduling regardless of this option. But manual scheduling to such agents "
-"is available if this option is True."
-msgstr ""
-"Ativar os serviços em um agente com admin_state_up False. Se essa opção for "
-"False, quando admin_state_up de um agente tornar-se False, os serviços nele "
-"serão desativados. Os agentes com admin_state_up False não são selecionados "
-"para planejamento automático, independentemente dessa opção. Mas o "
-"planejamento manual para tais agentes estará disponível se essa opção for "
-"True."
-
-msgid ""
-"Enable/Disable log watch by metadata proxy. It should be disabled when "
-"metadata_proxy_user/group is not allowed to read/write its log file and "
-"copytruncate logrotate option must be used if logrotate is enabled on "
-"metadata proxy log files. Option default value is deduced from "
-"metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent "
-"effective user id/name."
-msgstr ""
-"Ativar/desativar a inspeção do log por proxy de metaddos. Deve ser "
-"desativado quando metadata_proxy_user/group não é permitido ler/gravar seu "
-"arquivo de log e a opção copytruncate logrotate deve ser usada se logrotate "
-"for ativado nos arquivos de log de proxy de metadados. O valor padrão da "
-"opção é deduzido de metadata_proxy_user: o log de inspeção é ativado se "
-"metadata_proxy_user for o ID/nome do usuário efetivo do agente."
-
-msgid "Encountered an empty component."
-msgstr "Foi encontrado um componente vazio."
-
-msgid "End of VLAN range is less than start of VLAN range"
-msgstr "Final da faixa de VLAN é menor que o início da faixa de VLAN"
-
-msgid "End of tunnel range is less than start of tunnel range"
-msgstr ""
-"O término do intervalo do túnel é inferior ao início do intervalo do túnel"
-
-#, python-format
-msgid "Error importing FWaaS device driver: %s"
-msgstr "Erro importando driver de dispositivo FWaaS: %s"
-
-#, python-format
-msgid "Error parsing dns address %s"
-msgstr "Erro ao analisar endereço dns %s"
-
-#, python-format
-msgid "Error while reading %s"
-msgstr "Erro ao ler %s"
-
-msgid "Existing prefixes must be a subset of the new prefixes"
-msgstr "Prefixos existentes devem ser um subconjunto dos novos prefixos"
-
-msgid ""
-"Extension to use alongside ml2 plugin's l2population mechanism driver. It "
-"enables the plugin to populate VXLAN forwarding table."
-msgstr ""
-"Extensão a ser usada ao lado do driver do mecanismo l2population do plug-in "
-"ml2. Ela permite que o plug-in preencha a tabela de encaminhamento de VXLAN."
-
-#, python-format
-msgid "Extension with alias %s does not exist"
-msgstr "A extensão com %s não existe"
-
-#, python-format
-msgid "External IP %s is the same as the gateway IP"
-msgstr "O IP externo %s é o mesmo que o IP de gateway"
-
-#, python-format
-msgid ""
-"External network %(external_network_id)s is not reachable from subnet "
-"%(subnet_id)s.  Therefore, cannot associate Port %(port_id)s with a Floating "
-"IP."
-msgstr ""
-"A rede externa %(external_network_id)s não é atingível a partir da sub-rede "
-"%(subnet_id)s. Portanto, não é possível associar a porta %(port_id)s com um "
-"IP Flutuante."
-
-#, python-format
-msgid ""
-"External network %(net_id)s cannot be updated to be made non-external, since "
-"it has existing gateway ports"
-msgstr ""
-"A rede externa %(net_id)s não pode ser atualizada para tornar-se não "
-"externa, pois ela possui portas de gateway existentes"
-
-#, python-format
-msgid "ExtraDhcpOpt %(id)s could not be found"
-msgstr "ExtraDhcpOpt %(id)s não pôde ser encontrado"
-
-msgid ""
-"FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-"
-"agent."
-msgstr ""
-"Plug-in FWaaS está configurado no lado do servidor, mas está desativado em "
-"FWaaS L3-agent."
-
-#, python-format
-msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found."
-msgstr ""
-"Falha ao reagendar o roteador %(router_id)s: nenhum agente l3 elegível "
-"encontrado."
-
-#, python-format
-msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s."
-msgstr ""
-"Falha ao planejar o roteador %(router_id)s para o Agente L3 %(agent_id)s."
-
-#, python-format
-msgid ""
-"Failed to allocate a VRID in the network %(network_id)s for the router "
-"%(router_id)s after %(max_tries)s tries."
-msgstr ""
-"Falha ao alocar um VRID na rede %(network_id)s para o roteador %(router_id)s "
-"após %(max_tries)s tentativas."
-
-#, python-format
-msgid ""
-"Failed to create port on network %(network_id)s, because fixed_ips included "
-"invalid subnet %(subnet_id)s"
-msgstr ""
-"Falha ao criar a porta na rede %(network_id)s, porque fixed_ips incluía uma "
-"sub-rede inválida %(subnet_id)s"
-
-#, python-format
-msgid "Failed to parse request. Parameter '%s' not specified"
-msgstr "Falha ao analisar solicitação. Parâmetro '%s' não especificado"
-
-#, python-format
-msgid "Failed to parse request. Required attribute '%s' not specified"
-msgstr ""
-"Falha ao analisar solicitação. Atributo necessário '%s' não especificado"
-
-msgid "Failed to remove supplemental groups"
-msgstr "Falha ao remover grupos suplementares"
-
-#, python-format
-msgid "Failed to set gid %s"
-msgstr "Falha ao configurar gid %s"
-
-#, python-format
-msgid "Failed to set uid %s"
-msgstr "Falha ao configurar uid %s"
-
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr "Falha ao setar porta do túnel %(type)s para %(ip)s"
-
-#, python-format
-msgid "Floating IP %(floatingip_id)s could not be found"
-msgstr "O IP flutuante %(floatingip_id)s não pôde ser localizado"
-
-msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max"
-msgstr "Para protocolos TCP/UDP, port_range_min deve ser <= port_range_max"
-
-msgid "Force ip_lib calls to use the root helper"
-msgstr "Força chamadas ip_lib para utilizar o ajudante raiz"
-
-#, python-format
-msgid ""
-"Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet "
-"%(subnet_cidr)s."
-msgstr ""
-"Conjuntos de alocação de sobreposição localizados:%(pool_1)s %(pool_2)s para "
-"a sub-rede %(subnet_cidr)s."
-
-#, python-format
-msgid ""
-"Gateway cannot be updated for router %(router_id)s, since a gateway to "
-"external network %(net_id)s is required by one or more floating IPs."
-msgstr ""
-"O gateway não pode ser atualizado para o roteador %(router_id)s, pois um "
-"gateway para rede externa %(net_id)s é requerido por um ou mais IPs "
-"flutuantes."
-
-msgid "Gateway is not valid on subnet"
-msgstr "O gateway não é válido na sub-rede"
-
-msgid "Group (gid or name) running metadata proxy after its initialization"
-msgstr ""
-"Grupo (gid ou nome) executando proxy de metadados após sua inicialização"
-
-msgid ""
-"Group (gid or name) running metadata proxy after its initialization (if "
-"empty: agent effective group)."
-msgstr ""
-"Grupo (gid ou nome) executando proxy de metadados após sua inicialização (se "
-"vazio: grupo efetivo do agente)."
-
-msgid "Group (gid or name) running this process after its initialization"
-msgstr "Grupo (gid ou nome) executando esse processo após sua inicialização"
-
-msgid "How many times Neutron will retry MAC generation"
-msgstr "Quantas vezes o Neutron tentará novamente a geração MAC"
-
-#, python-format
-msgid ""
-"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-"
-"min) is missing."
-msgstr ""
-"O código do ICMP (port-range-max) %(value)s é fornecido, mas o tipo do ICMP "
-"(port-range-min) está ausente."
-
-msgid "ID of network"
-msgstr "ID da rede"
-
-msgid "ID of network to probe"
-msgstr "ID da rede para análise"
-
-msgid "ID of probe port to delete"
-msgstr "ID da porta da análise a ser excluída"
-
-msgid "ID of probe port to execute command"
-msgstr "ID da porta da análise para executar comando"
-
-msgid "ID of the router"
-msgstr "ID do roteador"
-
-#, python-format
-msgid ""
-"IP address %(ip_address)s is not a valid IP for any of the subnets on the "
-"specified network."
-msgstr ""
-"O endereço IP %(ip_address)s não é um IP válido para nenhuma das sub-redes "
-"na rede especificada."
-
-#, python-format
-msgid "IP address %(ip_address)s is not a valid IP for the specified subnet."
-msgstr ""
-"O endereço IP %(ip_address)s não é um IP válido para a sub-rede especificada."
-
-msgid "IP address used by Nova metadata server."
-msgstr "Endereço IP usado pelo servidor de metadados Nova."
-
-msgid "IP allocation requires subnet_id or ip_address"
-msgstr "A alocação de IP requer subnet_id ou ip_address"
-
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables rules:\n"
-"%s"
-msgstr ""
-"IPTablesManager.apply falhou ao aplicar o seguinte conjunto de regras de "
-"tabelas de IP: \n"
-"%s"
-
-#, python-format
-msgid ""
-"IPv6 address %(address)s can not be directly assigned to a port on subnet "
-"%(id)s since the subnet is configured for automatic addresses"
-msgstr ""
-"O endereço IPv6 %(address)s não pode ser diretamente designado a uma porta "
-"na sub-rede %(id)s, pois a sub-rede é configurada para endereços automáticos"
-
-#, python-format
-msgid ""
-"IPv6 subnet %s configured to receive RAs from an external router cannot be "
-"added to Neutron Router."
-msgstr ""
-"A sub-rede IPv6 %s configurada para receber RAs de um roteador externo não "
-"pode ser incluída ao Neutron Router."
-
-msgid ""
-"If True, effort is made to advertise MTU settings to VMs via network methods "
-"(DHCP and RA MTU options) when the network's preferred MTU is known."
-msgstr ""
-"Se True, é feito um esforço para informar as definições de MTU para VMs "
-"através de métodos da rede (opções DHCP e RA MTU) quando a MTU preferencial "
-"da rede for conhecida."
-
-msgid ""
-"If True, then allow plugins that support it to create VLAN transparent "
-"networks."
-msgstr ""
-"Se True, então permita que plug-ins que suportam-no criem redes "
-"transparentes da VLAN."
-
-msgid "Illegal IP version number"
-msgstr "Número de versão de IP ilegal"
-
-#, python-format
-msgid "Insufficient prefix space to allocate subnet size /%s"
-msgstr "Espaço de prefixo insuficiente para alocar o tamanho da sub-rede /%s"
-
-msgid "Insufficient rights for removing default security group."
-msgstr "Direitos insuficientes para remover o grupo de segurança padrão."
-
-msgid "Interface to monitor"
-msgstr "Interface para monitorar"
-
-msgid ""
-"Interval between checks of child process liveness (seconds), use 0 to disable"
-msgstr ""
-"Intervalo entre verificações de um processo-filho em tempo real (segundos), "
-"use 0 para desativar"
-
-msgid "Interval between two metering measures"
-msgstr "Intervalo entre duas medidas de medição"
-
-msgid "Interval between two metering reports"
-msgstr "Intervalo entre dois relatórios de medição"
-
-#, python-format
-msgid ""
-"Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address "
-"format, which requires the prefix to be /64."
-msgstr ""
-"CIDR inválido %s para o modo de endereço IPv6. O OpenStack usa o formato de "
-"endereço EUI-64, que requer que o prefixo seja /64."
-
-#, python-format
-msgid "Invalid Device %(dev_name)s: %(reason)s"
-msgstr "Dispositivo Inválido %(dev_name)s:%(reason)s"
-
-#, python-format
-msgid ""
-"Invalid authentication type: %(auth_type)s, valid types are: "
-"%(valid_auth_types)s"
-msgstr ""
-"Tipo de autenticação inválido: %(auth_type)s, os tipos válidos são: "
-"%(valid_auth_types)s"
-
-#, python-format
-msgid "Invalid data format for IP pool: '%s'"
-msgstr "Formato de dados inválido para o pool de IPs: '%s'"
-
-#, python-format
-msgid "Invalid data format for extra-dhcp-opt: %(data)s"
-msgstr "Formato de dados inválido para extra-dhcp-opt: %(data)s"
-
-#, python-format
-msgid "Invalid data format for fixed IP: '%s'"
-msgstr "Formato de dados inválido para o IP fixo: '%s'"
-
-#, python-format
-msgid "Invalid data format for hostroute: '%s'"
-msgstr "Formato de dados inválido para hostroute: '%s'"
-
-#, python-format
-msgid "Invalid data format for nameserver: '%s'"
-msgstr "Formato de dados inválido para servidor de nomes: '%s'"
-
-#, python-format
-msgid "Invalid format for routes: %(routes)s, %(reason)s"
-msgstr "Formato inválido para rotas: %(routes)s, %(reason)s"
-
-#, python-format
-msgid "Invalid format: %s"
-msgstr "Formato inválido: %s"
-
-#, python-format
-msgid "Invalid input for %(attr)s. Reason: %(reason)s."
-msgstr "Entrada inválida para %(attr)s. Motivo: %(reason)s."
-
-#, python-format
-msgid "Invalid input for operation: %(error_message)s."
-msgstr "Entrada inválida para a operação: %(error_message)s."
-
-#, python-format
-msgid ""
-"Invalid input. '%(target_dict)s' must be a dictionary with keys: "
-"%(expected_keys)s"
-msgstr ""
-"Entrada inválida. '%(target_dict)s' deve ser um dicionário com chaves: "
-"%(expected_keys)s"
-
-#, python-format
-msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s"
-msgstr ""
-"Estado da instância inválido: %(state)s, os estados válidos são: "
-"%(valid_states)s"
-
-#, python-format
-msgid "Invalid mapping: '%s'"
-msgstr "Mapeamento inválido: '%s'"
-
-#, python-format
-msgid "Invalid pci slot %(pci_slot)s"
-msgstr "Slot pci inválido %(pci_slot)s"
-
-#, python-format
-msgid "Invalid provider format. Last part should be 'default' or empty: %s"
-msgstr ""
-"Formato de provedor inválido. Última parte deve ser 'default' ou vazia: %s"
-
-#, python-format
-msgid "Invalid route: %s"
-msgstr "Rota inválida: %s"
-
-msgid "Invalid service provider format"
-msgstr "Formato inválido de provedor de serviço"
-
-#, python-format
-msgid ""
-"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255."
-msgstr ""
-"Valor inválido para ICMP %(field)s (%(attr)s) %(value)s. Deve ser de 0 a 255."
-
-#, python-format
-msgid "Invalid value for port %(port)s"
-msgstr "Valor inválido para a porta %(port)s"
-
-msgid "Keepalived didn't respawn"
-msgstr "Keepalived não sofreu spawn novamente"
-
-#, python-format
-msgid "Key %(key)s in mapping: '%(mapping)s' not unique"
-msgstr "Chave %(key)s no mapeamento: '%(mapping)s' não exclusivo"
-
-#, python-format
-msgid "Limit must be an integer 0 or greater and not '%d'"
-msgstr "O limite deve ser um número inteiro de 0 ou superior e não '%d'"
-
-msgid "Limit number of leases to prevent a denial-of-service."
-msgstr "Limitar o número de concessões para impedir uma negação de serviço."
-
-msgid ""
-"List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> "
-"specifying physical_network names usable for VLAN provider and tenant "
-"networks, as well as ranges of VLAN tags on each available for allocation to "
-"tenant networks."
-msgstr ""
-"Lista de <physical_network>:<vlan_min>:<vlan_max> ou <physical_network> "
-"especificando nomes physical_network utilizáveis para provedores VLAN e "
-"redes de tenant, bem como faixas de tags de VLAN em cada um disponível para "
-"alocação pelas redes de tenant."
-
-msgid ""
-"List of network type driver entrypoints to be loaded from the neutron.ml2."
-"type_drivers namespace."
-msgstr ""
-"Lista de pontos de entrada do driver de tipo de rede que será carregado do "
-"namespace neutron.ml2.type_drivers namespace."
-
-msgid "Local IP address of the VXLAN endpoints."
-msgstr "Endereço IP local dos terminais VXLAN."
-
-msgid "Local IP address of tunnel endpoint."
-msgstr "Endereço IP local do terminal de túnel."
-
-msgid "Location for Metadata Proxy UNIX domain socket."
-msgstr "Local para soquete de domínio UNIX de Proxy de Metadados."
-
-msgid "Location of Metadata Proxy UNIX domain socket"
-msgstr "Local de soquete de domínio UNIX de Proxy de Metadados"
-
-msgid "Location of pid file of this process."
-msgstr "Local do arquivo pid deste processo."
-
-msgid "Location to store DHCP server config files"
-msgstr "Local para armazenar arquivos de configuração do servidor DHCP"
-
-msgid "Location to store IPv6 RA config files"
-msgstr "Local para armazenar arquivos de configuração RA IPv6"
-
-msgid "Location to store child pid files"
-msgstr "Local para armazenar arquivos pid filhos"
-
-msgid "Location to store keepalived/conntrackd config files"
-msgstr "Local para armazenar os arquivos de configuração keepalived/conntrackd"
-
-msgid "MTU setting for device."
-msgstr "Configuração de MTU para o dispositivo."
-
-msgid "MTU size of veth interfaces"
-msgstr "Tamanho MTU de interfaces vEth"
-
-msgid "Make the l2 agent run in DVR mode."
-msgstr "Faça com que o agente l2 seja executado no modo DVR."
-
-msgid "Malformed request body"
-msgstr "Corpo da solicitação malformado"
-
-msgid "Maximum number of allowed address pairs"
-msgstr "Número máximo de pares de endereço permitido"
-
-msgid "Maximum number of host routes per subnet"
-msgstr "Número máximo de rotas do host por sub-rede"
-
-msgid "Metering driver"
-msgstr "Driver de medição"
-
-#, python-format
-msgid "Metering label %(label_id)s does not exist"
-msgstr "Rótulo de marcação %(label_id)s não existe"
-
-#, python-format
-msgid "Metering label rule %(rule_id)s does not exist"
-msgstr "Regra para rótulo de marcação %(rule_id)s não existe"
-
-#, python-format
-msgid ""
-"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps "
-"another"
-msgstr ""
-"Regra de marcação com remote_ip_prefix %(remote_ip_prefix)s sobrepõe outra"
-
-msgid "Minimize polling by monitoring ovsdb for interface changes."
-msgstr "Minimizar pesquisa monitorando ovsdb para alterações da interface."
-
-#, python-format
-msgid "Missing key in mapping: '%s'"
-msgstr "Chave ausente no mapeamento: '%s'"
-
-#, python-format
-msgid "Missing value in mapping: '%s'"
-msgstr "Valor ausente no mapeamento: '%s'"
-
-#, python-format
-msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found"
-msgstr ""
-"Vários agentes com agent_type=%(agent_type)s e host=%(host)s localizados"
-
-#, python-format
-msgid "Multiple default providers for service %s"
-msgstr "Mútliplos provedores padrão para o serviço %s"
-
-#, python-format
-msgid "Multiple plugins for service %s were configured"
-msgstr "Vários plug-ins para o serviço %s foram configurados"
-
-#, python-format
-msgid "Multiple providers specified for service %s"
-msgstr "Mútliplos provedores especificados para o serviço %s"
-
-msgid "Multiple tenant_ids in bulk security group rule create not allowed"
-msgstr ""
-"Vários tenant_ids na criação da regra do grupo de segurança em massa não "
-"permitido"
-
-msgid "Must also specifiy protocol if port range is given."
-msgstr ""
-"Deverá especificar também o protocolo se o intervalo de portas for fornecido."
-
-msgid "Must specify one or more actions on flow addition or modification"
-msgstr "Deve especificar uma ou mais ações na adição ou modificação do fluxo"
-
-#, python-format
-msgid ""
-"Name '%s' must be 1-63 characters long, each of which can only be "
-"alphanumeric or a hyphen."
-msgstr ""
-"O nome '%s' deve ter 1-63 caracteres de comprimento, cada um dos quais pode "
-"ser apenas alfanumérico ou possuir um hífen."
-
-#, python-format
-msgid "Name '%s' must not start or end with a hyphen."
-msgstr "O nome '%s' não deve começar nem terminar com um hífen."
-
-msgid "Name of Open vSwitch bridge to use"
-msgstr "Nome da ponte Open vSwitch a ser usado"
-
-msgid ""
-"Name of nova region to use. Useful if keystone manages more than one region."
-msgstr ""
-"Nome da região do nova para utilização. Útil se keystone gerencia mais de "
-"uma região."
-
-msgid "Name of the FWaaS Driver"
-msgstr "Nome do driver FWaaS"
-
-msgid "Namespace of the router"
-msgstr "Namespace do roteador"
-
-msgid "Native pagination depend on native sorting"
-msgstr "A paginação nativa depende da classificação nativa"
-
-msgid "Negative delta (downgrade) not supported"
-msgstr "Delta negativo (downgrade) não suportado"
-
-msgid "Negative relative revision (downgrade) not supported"
-msgstr "Revisão relativa negativa (downgrade) não suportada"
-
-#, python-format
-msgid "Network %s is not a valid external network"
-msgstr "A rede %s não é uma rede externa válida"
-
-#, python-format
-msgid "Network %s is not an external network"
-msgstr "A rede %s não é uma rede externa"
-
-#, python-format
-msgid ""
-"Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges "
-"%(excluded_ranges)s was not found."
-msgstr ""
-"Rede de tamanho %(size)s, do intervalo de IP %(parent_range)s, excluindo "
-"intervalos de IP %(excluded_ranges)s não foi localizada."
-
-msgid "Network that will have instance metadata proxied."
-msgstr "Rede que terá os metadados da instância de proxy."
-
-#, python-format
-msgid "Network type value '%s' not supported"
-msgstr "Valor do tipo de rede '%s' não suportado"
-
-msgid "Network type value needed by the ML2 plugin"
-msgstr "Valor de tipo de rede necessário pelo plug-in ML2"
-
-msgid "Network types supported by the agent (gre and/or vxlan)."
-msgstr "Tipos de rede suportados pelo agente (gre e/ou vxlan)."
-
-msgid "Neutron Service Type Management"
-msgstr "Gerenciamento do Tipo de Serviço Neuron"
-
-msgid "Neutron core_plugin not configured!"
-msgstr "Neutron core_plugin não configurado!"
-
-msgid "Neutron plugin provider module"
-msgstr "Módulo do provedor de plug-in Neutron"
-
-msgid "Neutron quota driver class"
-msgstr "Classe do driver de cota Neutron"
-
-#, python-format
-msgid "No eligible l3 agent associated with external network %s found"
-msgstr "Nenhum agente l3 elegível associado com a rede externa %s localizado"
-
-#, python-format
-msgid "No more IP addresses available on network %(net_id)s."
-msgstr "Nenhum outro endereço IP disponível na rede %(net_id)s."
-
-#, python-format
-msgid ""
-"No more Virtual Router Identifier (VRID) available when creating router "
-"%(router_id)s. The limit of number of HA Routers per tenant is 254."
-msgstr ""
-"Nenhum outro Identificador de Roteador Virtual (VRID) disponível ao criar o "
-"roteador %(router_id)s. O limite do número de Roteadores de alta "
-"disponibilidade por locatário é de 254."
-
-#, python-format
-msgid "No providers specified for '%s' service, exiting"
-msgstr "Nenhum provedor especificado para o serviço '%s', saindo"
-
-#, python-format
-msgid ""
-"Not allowed to manually assign a %(router_type)s router %(router_id)s from "
-"an existing DVR node to another L3 agent %(agent_id)s."
-msgstr ""
-"Não é permitido designar manualmente um %(router_type)s roteador "
-"%(router_id)s de um nó DVR existente para outro agente L3 %(agent_id)s."
-
-msgid "Not authorized."
-msgstr "Não autorizado."
-
-#, python-format
-msgid ""
-"Not enough l3 agents available to ensure HA. Minimum required "
-"%(min_agents)s, available %(num_agents)s."
-msgstr ""
-"Não há l3 agentes disponíveis suficientes para assegurar a alta "
-"disponibilidade. Mínimo necessário %(min_agents)s, disponível %(num_agents)s."
-
-msgid "Number of RPC worker processes for service"
-msgstr "Número de processos do trabalhador RPC para o serviço"
-
-msgid "Number of backlog requests to configure the metadata server socket with"
-msgstr ""
-"Número de solicitações de lista não processada para configurar o soquete do "
-"servidor de metadados com"
-
-msgid "Number of backlog requests to configure the socket with"
-msgstr "Número de requisições de backlog para configurar no socket"
-
-msgid ""
-"Number of floating IPs allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Número de IPs flutuantes permitido por locatário. Um valor negativo "
-"significa ilimitado."
-
-msgid ""
-"Number of networks allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Número de redes permitidas por locatário. Um valor negativo significa "
-"ilimitado."
-
-msgid "Number of ports allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Número de portas permitidas por locatário. Um valor negativo significa "
-"ilimitado."
-
-msgid "Number of routers allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Número de roteadores permitidos por locatário. Um valor negativo significa "
-"ilimitado."
-
-msgid ""
-"Number of seconds between sending events to nova if there are any events to "
-"send."
-msgstr ""
-"Número de segundos entre o envio de eventos para nova se houver qualquer "
-"evento a enviar."
-
-msgid "Number of seconds to keep retrying to listen"
-msgstr "Número de segundos para continuar tentando escutar"
-
-msgid ""
-"Number of security groups allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Número de grupos de segurança permitidos por locatário. Um valor negativo "
-"significa ilimitado."
-
-msgid ""
-"Number of security rules allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Número de regras de segurança permitidas por locatário. Um valor negativo "
-"significa ilimitado."
-
-msgid "Number of subnets allowed per tenant, A negative value means unlimited."
-msgstr ""
-"Número de sub-redes permitidas por locatário. Um valor negativo significa "
-"ilimitado."
-
-msgid "OK"
-msgstr "OK"
-
-msgid "Only admin can view or configure quota"
-msgstr "Somente admin pode visualizar ou configurar cota"
-
-msgid "Only admin is authorized to access quotas for another tenant"
-msgstr ""
-"Somente o administrador está autorizado a acessar as cotas para outro "
-"locatário"
-
-msgid "Only allowed to update rules for one security profile at a time"
-msgstr "Permitido apenas atualizar regras para um perfil de segurança por vez"
-
-msgid "Only remote_ip_prefix or remote_group_id may be provided."
-msgstr "Apenas remote_ip_prefix ou remote_group_id pode ser fornecido."
-
-#, python-format
-msgid ""
-"Operation %(op)s is not supported for device_owner %(device_owner)s on port "
-"%(port_id)s."
-msgstr ""
-"A operação %(op)s não é suportada para device_owner %(device_owner)s na "
-"porta %(port_id)s."
-
-msgid "Override the default dnsmasq settings with this file"
-msgstr "Substituir as configurações padrão dnsmasq por este arquivo"
-
-msgid "Owner type of the device: network/compute"
-msgstr "Tipo de proprietário do dispositivo: rede/cálculo"
-
-msgid "POST requests are not supported on this resource."
-msgstr "Requisições POST não são suportadas neste recurso."
-
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr "Falha na análise de bridge_mappings: %s."
-
-msgid "Parsing supported pci_vendor_devs failed"
-msgstr "Análise de pci_vendor_devs suportada com falha"
-
-msgid "Path to PID file for this process"
-msgstr "Caminho para o arquivo PID para este processo"
-
-msgid "Path to the router directory"
-msgstr "Caminho para o diretório do roteador"
-
-msgid "Peer patch port in integration bridge for tunnel bridge."
-msgstr ""
-"Porta de correção do peer na ponte de integração para a ponte do túnel."
-
-msgid "Peer patch port in tunnel bridge for integration bridge."
-msgstr ""
-"Porta da correção do peer na ponte do túnel para a ponte de integração."
-
-msgid "Ping timeout"
-msgstr "Tempo Limite de Ping"
-
-msgid "Plugin does not support updating provider attributes"
-msgstr "O plug-in não suporta atualização de atributos do provedor"
-
-#, python-format
-msgid "Port %(id)s does not have fixed ip %(address)s"
-msgstr "A porta %(id)s não possui IP fixo %(address)s"
-
-#, python-format
-msgid ""
-"Port %(port_id)s is associated with a different tenant than Floating IP "
-"%(floatingip_id)s and therefore cannot be bound."
-msgstr ""
-"A porta %(port_id)s está associada a um arrendatário diferente de IP "
-"Flutuante %(floatingip_id)s e, portanto, não pode ser ligada."
-
-msgid ""
-"Port Security must be enabled in order to have allowed address pairs on a "
-"port."
-msgstr ""
-"A Segurança da Porta deve ser ativada para ter pares de endereços permitidos "
-"em uma porta."
-
-msgid "Port does not have port security binding."
-msgstr "A porta não possui uma ligação de segurança da porta."
-
-msgid ""
-"Port has security group associated. Cannot disable port security or ip "
-"address until security group is removed"
-msgstr ""
-"A porta possui grupo de segurança associado. Não é possível desativar a "
-"porta de segurança ou o endereço IP até que o grupo de segurança seja "
-"removido"
-
-msgid ""
-"Port security must be enabled and port must have an IP address in order to "
-"use security groups."
-msgstr ""
-"A segurança da porta deve estar ativada e a porta deve ter um endereço IP "
-"para usar grupos de segurança."
-
-msgid "Private key of client certificate."
-msgstr "Chave privada de certificado do cliente."
-
-#, python-format
-msgid "Probe %s deleted"
-msgstr "Análise %s excluída"
-
-#, python-format
-msgid "Probe created : %s "
-msgstr "Análise criada: %s "
-
-msgid "Process is already started"
-msgstr "O processo já está iniciado"
-
-msgid "Process is not running."
-msgstr "O processo não está em execução."
-
-msgid "Protocol to access nova metadata, http or https"
-msgstr "Protocolo para acessar os metadados de nova, http ou https"
-
-msgid ""
-"Range of seconds to randomly delay when starting the periodic task scheduler "
-"to reduce stampeding. (Disable by setting to 0)"
-msgstr ""
-"Intervalo de segundos para atrasar aleatoriamente quando iniciar o "
-"planejador de tarefas periódicas para reduzir registro de data e hora. "
-"(Desativar configurando como 0)"
-
-msgid "Remote metadata server experienced an internal server error."
-msgstr ""
-"O servidor de metadados remoto experimentou um erro de servidor interno."
-
-msgid ""
-"Representing the resource type whose load is being reported by the agent. "
-"This can be \"networks\", \"subnets\" or \"ports\". When specified (Default "
-"is networks), the server will extract particular load sent as part of its "
-"agent configuration object from the agent report state, which is the number "
-"of resources being consumed, at every report_interval.dhcp_load_type can be "
-"used in combination with network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is "
-"WeightScheduler, dhcp_load_type can be configured to represent the choice "
-"for the resource being balanced. Example: dhcp_load_type=networks"
-msgstr ""
-"Representando o tipo de recurso cujo carregamento está sendo relatado pelo "
-"agente. Isso pode ser \"redes\", \"sub-redes\" ou \"portas\". Quando "
-"especificado (o padrão é redes), o servidor irá extrair carregamento "
-"particular enviado como parte do seu objeto de configuração do agente do "
-"relatório de estado do agente, que é o número de recursos sendo consumido, "
-"em cada report_interval.dhcp_load_type pode ser usado em combinação com "
-"network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler."
-"WeightScheduler Quando o network_scheduler_driver é WeightScheduler, "
-"dhcp_load_type pode ser configurado para representar a opção para o recurso "
-"que está sendo balanceado. Exemplo: dhcp_load_type=networks"
-
-msgid "Request Failed: internal server error while processing your request."
-msgstr ""
-"Falha de solicitação: erro do servidor interno ao processar sua solicitação."
-
-#, python-format
-msgid ""
-"Request contains duplicate address pair: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-msgstr ""
-"A solicitação contém um par de endereços duplicado: mac_address "
-"%(mac_address)s ip_address %(ip_address)s."
-
-#, python-format
-msgid ""
-"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps "
-"with another subnet"
-msgstr ""
-"Sub-rede solicitada com cidr: %(cidr)s para rede: %(network_id)s se sobrepõe "
-"com outra sub-rede"
-
-#, python-format
-msgid ""
-"Resource '%(resource_id)s' is already associated with provider "
-"'%(provider)s' for service type '%(service_type)s'"
-msgstr ""
-"Recurso '%(resource_id)s' já está associado com o provedor '%(provider)s' "
-"para o tipo de serviço '%(service_type)s'"
-
-msgid "Resource body required"
-msgstr "Corpo do recurso necessário"
-
-msgid "Resource not found."
-msgstr "Recurso não encontrado."
-
-msgid "Resources required"
-msgstr "Recursos necessários"
-
-msgid "Root helper daemon application to use when possible."
-msgstr "Aplicativo do daemon auxiliar raiz para usar quando possível."
-
-msgid "Root permissions are required to drop privileges."
-msgstr "As permissões de raiz são necessárias para descartar privilégios."
-
-#, python-format
-msgid "Router %(router_id)s %(reason)s"
-msgstr "Roteador %(router_id)s %(reason)s"
-
-#, python-format
-msgid "Router %(router_id)s could not be found"
-msgstr "O roteador %(router_id)s não pôde ser localizado"
-
-#, python-format
-msgid "Router %(router_id)s does not have an interface with id %(port_id)s"
-msgstr "O roteador %(router_id)s não possui uma interface com o id %(port_id)s"
-
-#, python-format
-msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s"
-msgstr ""
-"O roteador %(router_id)s não possui uma interface na sub-rede %(subnet_id)s"
-
-#, python-format
-msgid "Router already has a port on subnet %s"
-msgstr "O roteador já possui uma porta na sub-rede %s"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more floating IPs."
-msgstr ""
-"A interface do roteador para a sub-rede %(subnet_id)s no roteador "
-"%(router_id)s não pode ser excluída, pois ela é requerida por um ou mais IPs "
-"flutuantes."
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more routes."
-msgstr ""
-"A interface do roteador para a sub-rede %(subnet_id)s no roteador "
-"%(router_id)s não pode ser excluída, pois ela é requerida por uma ou mais "
-"rotas."
-
-msgid "Router that will have connected instances' metadata proxied."
-msgstr "Roteador que terá conectado metadados de instâncias de proxy."
-
-msgid "Run as daemon."
-msgstr "Execute como daemon."
-
-msgid ""
-"Seconds between nodes reporting state to server; should be less than "
-"agent_down_time, best if it is half or less than agent_down_time."
-msgstr ""
-"Segundos entre os nós que relatam o estado para o servidor; deve ser menor "
-"que agent_down_time, melhor se for metade ou menos do que agent_down_time."
-
-msgid "Seconds between running periodic tasks"
-msgstr "Segundos entre execução de tarefas periódicas"
-
-msgid ""
-"Seconds to regard the agent is down; should be at least twice "
-"report_interval, to be sure the agent is down for good."
-msgstr ""
-"Segundos para considerar que o agente está inativo; deve ser no mínimo duas "
-"vezes report_interval, para ter certeza de que o agente está inativo."
-
-#, python-format
-msgid "Security group %(id)s does not exist"
-msgstr "O grupo de segurança %(id)s não existe"
-
-#, python-format
-msgid "Security group rule %(id)s does not exist"
-msgstr "A regra do grupo de segurança %(id)s não existe"
-
-#, python-format
-msgid "Security group rule already exists. Rule id is %(id)s."
-msgstr "A regra do grupo de segurança já existe. ID de regra é %(id)s."
-
-msgid "Segments and provider values cannot both be set."
-msgstr "Valores, de segmento e provedor não podem ser ambos setados."
-
-msgid ""
-"Send notification to nova when port data (fixed_ips/floatingip) changes so "
-"nova can update its cache."
-msgstr ""
-"Enviar notificação para nova quando dados da porta (fixed_ips/floatingip) "
-"muda de modo que nova possa atualizar seu cache."
-
-msgid "Send notification to nova when port status changes"
-msgstr "Enviar notificação para nova quando o status da porta muda"
-
-msgid ""
-"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the "
-"feature is disabled"
-msgstr ""
-"Enviar esses vários ARPs gratuitos para a configuração de HA, se for menor "
-"ou igual a 0, o recurso está desativado"
-
-#, python-format
-msgid ""
-"Service provider '%(provider)s' could not be found for service type "
-"%(service_type)s"
-msgstr ""
-"Provedor de serviço '%(provider)s' não pôde ser encontrado para o tipo de "
-"serviço %(service_type)s"
-
-#, python-format
-msgid "Service type %(service_type)s does not have a default service provider"
-msgstr ""
-"Tipo de serviço %(service_type)s não possui um provedor de serviço padrão"
-
-msgid ""
-"Set new timeout in seconds for new rpc calls after agent receives SIGTERM. "
-"If value is set to 0, rpc timeout won't be changed"
-msgstr ""
-"Configure novo tempo limite em segundos para novas chamadas rpc depois que o "
-"agente receber SIGTERM. Se o valor for configurado como 0, o tempo limite de "
-"rpc não será alterado"
-
-msgid ""
-"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/"
-"VXLAN tunnel."
-msgstr ""
-"Configure ou desconfigure o bit don't fragment (DF) no pacote IP de saída "
-"que transporta o túnel GRE/VXLAN."
-
-#, python-format
-msgid ""
-"Some tenants have more than one security group named 'default': "
-"%(duplicates)s. All duplicate 'default' security groups must be resolved "
-"before upgrading the database."
-msgstr ""
-"Alguns locatários possuem mais de um grupo de segurança chamado ‘padrão': "
-"%(duplicates)s. Todos os grupos de segurança 'padrão' devem ser duplicados "
-"antes de fazer upgrade do banco de dados."
-
-msgid ""
-"Specifying 'tenant_id' other than authenticated tenant in request requires "
-"admin privileges"
-msgstr ""
-"Especificando 'tenant_id' diferente do arrendatário autenticado na "
-"solicitação requer privilégios do administrador"
-
-msgid "Subnet for router interface must have a gateway IP"
-msgstr "A sub-rede para a interface do roteador deve ter um IP de gateway"
-
-msgid "Subnet pool has existing allocations"
-msgstr "O conjunto de sub-rede possui alocações existentes"
-
-msgid "Subnet used for the l3 HA admin network."
-msgstr "Sub-rede usada para a rede administrativa de alta disponibilidade l3."
-
-msgid ""
-"System-wide flag to determine the type of router that tenants can create. "
-"Only admin can override."
-msgstr ""
-"Sinalizador do Sistema Inteiro para determinar o tipo de roteador que "
-"locatários podem criar. Somente administrador pode substituir."
-
-msgid "TCP Port to listen for metadata server requests."
-msgstr "Porta TCP para atender a solicitações do servidor de metadados."
-
-msgid "TCP Port used by Neutron metadata namespace proxy."
-msgstr "Porta TCP usada pelo proxy de namespace de metadados Neutron."
-
-msgid "TCP Port used by Nova metadata server."
-msgstr "Porta TCP usada pelo servidor de metadados Nova."
-
-#, python-format
-msgid "TLD '%s' must not be all numeric"
-msgstr "TLD '%s' não deve ser todo numérico"
-
-msgid "TOS for vxlan interface protocol packets."
-msgstr "TOS para pacotes de protocolo da interface vxlan."
-
-msgid "TTL for vxlan interface protocol packets."
-msgstr "TTL para pacotes de protocolo da interface vxlan."
-
-#, python-format
-msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network"
-msgstr ""
-"Arrendatário %(tenant_id)s não permitido para criar %(resource)s nesta rede"
-
-msgid "Tenant network creation is not enabled."
-msgstr "A criação da rede do arrendatário não está ativada."
-
-msgid ""
-"The 'gateway_external_network_id' option must be configured for this agent "
-"as Neutron has more than one external network."
-msgstr ""
-"A opção 'gateway_external_network_id' deve estar configurada para este "
-"agente pois o Neutron possui mais de uma rede externa."
-
-#, python-format
-msgid ""
-"The HA Network CIDR specified in the configuration file isn't valid; "
-"%(cidr)s."
-msgstr ""
-"O CIDR da Rede de alta disponibilidade especificado no arquivo de "
-"configuração não é válido; %(cidr)s."
-
-msgid "The UDP port to use for VXLAN tunnels."
-msgstr "A porta UDP utilizada para túneis VXLAN."
-
-msgid "The advertisement interval in seconds"
-msgstr "O intervalo de propaganda em segundos"
-
-#, python-format
-msgid "The allocation pool %(pool)s is not valid."
-msgstr "O pool de alocação %(pool)s não é válido."
-
-#, python-format
-msgid ""
-"The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s."
-msgstr ""
-"O pool de alocações %(pool)s abrange além da sub-rede CIDR %(subnet_cidr)s."
-
-#, python-format
-msgid ""
-"The attribute '%(attr)s' is reference to other resource, can't used by sort "
-"'%(resource)s'"
-msgstr ""
-"O atributo '%(attr)s' é referência a outro recurso, não pode ser usado pela "
-"classificação '%(resource)s'"
-
-msgid "The core plugin Neutron will use"
-msgstr "O plug-in principal que o Neutron irá utilizar."
-
-msgid "The driver used to manage the DHCP server."
-msgstr "O driver usado para gerenciar o servidor DHCP."
-
-msgid "The driver used to manage the virtual interface."
-msgstr "O driver usado para gerenciar a interface virtual."
-
-#, python-format
-msgid ""
-"The following device_id %(device_id)s is not owned by your tenant or matches "
-"another tenants router."
-msgstr ""
-"O seguinte device_id %(device_id)s não pertence ao seu locatário ou "
-"corresponde a outro roteador de locatários."
-
-msgid "The host IP to bind to"
-msgstr "O IP do host para ligar a"
-
-msgid "The interface for interacting with the OVSDB"
-msgstr "A interface para interação com o OVSDB"
-
-msgid ""
-"The maximum number of items returned in a single response, value was "
-"'infinite' or negative integer means no limit"
-msgstr ""
-"O número máximo de itens retornados em uma única resposta, o valor era "
-"'infinito' ou um número inteiro negativo significa que não há limite"
-
-#, python-format
-msgid ""
-"The network %(network_id)s has been already hosted by the DHCP Agent "
-"%(agent_id)s."
-msgstr "A rede %(network_id)s já foi hospedada pelo Agente DHCP %(agent_id)s."
-
-#, python-format
-msgid ""
-"The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s."
-msgstr ""
-"A rede %(network_id)s não está hospedada pelo agente DHCP %(agent_id)s."
-
-#, python-format
-msgid "The number of allowed address pair exceeds the maximum %(quota)s."
-msgstr "O número de par de endereços permitidos excede o máximo de %(quota)s."
-
-msgid ""
-"The number of seconds the agent will wait between polling for local device "
-"changes."
-msgstr ""
-"O número de segundos que o agente aguardará entre as pesquisas para mudanças "
-"do dispositivo local."
-
-msgid ""
-"The number of seconds to wait before respawning the ovsdb monitor after "
-"losing communication with it."
-msgstr ""
-"O número de segundos a aguardar antes de reiniciar o monitor ovsdb após "
-"perder comunicação com ele."
-
-msgid "The number of sort_keys and sort_dirs must be same"
-msgstr "Os números de sort_keys e sort_dirs devem ser os mesmos"
-
-#, python-format
-msgid "The port '%s' was deleted"
-msgstr "A porta '%s' foi excluída"
-
-msgid "The port to bind to"
-msgstr "A porta para ligar a"
-
-#, python-format
-msgid "The requested content type %s is invalid."
-msgstr "O tipo de conteúdo requisitado %s é inválido."
-
-msgid "The resource could not be found."
-msgstr "O recurso não pôde ser encontrado."
-
-#, python-format
-msgid ""
-"The router %(router_id)s has been already hosted by the L3 Agent "
-"%(agent_id)s."
-msgstr "O roteador %(router_id)s já foi hospedado pelo Agente L3 %(agent_id)s."
-
-msgid ""
-"The server has either erred or is incapable of performing the requested "
-"operation."
-msgstr ""
-"O servidor possui um erro ou é incapaz de executar a operação solicitada."
-
-msgid "The service plugins Neutron will use"
-msgstr "Os plugins de serviço que o Neutron irá utilizar"
-
-msgid "The type of authentication to use"
-msgstr "O tipo de autenticação a ser usado"
-
-#, python-format
-msgid "The value '%(value)s' for %(element)s is not valid."
-msgstr "O valor ‘%(value)s' para %(element)s não é válido."
-
-msgid ""
-"The working mode for the agent. Allowed modes are: 'legacy' - this preserves "
-"the existing behavior where the L3 agent is deployed on a centralized "
-"networking node to provide L3 services like DNAT, and SNAT. Use this mode if "
-"you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality "
-"and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - "
-"this enables centralized SNAT support in conjunction with DVR.  This mode "
-"must be used for an L3 agent running on a centralized node (or in single-"
-"host deployments, e.g. devstack)"
-msgstr ""
-"O modo de trabalho para o agente. Os modos permitidos são: 'legacy' - isso "
-"preserva o comportamento existente em que o agente L3 é implementado em um "
-"nó de rede centralizada para fornecer serviços L3 como DNAT e SNAT. Use este "
-"modo se você não desejar adotar DVR. 'dvr' - este modo permite a "
-"funcionalidade de DVR e deve ser usado para um agente L3 que é executado em "
-"um host de cálculo. 'dvr_snat'- isto permite suporte SNAT centralizado em "
-"conjunto com DVR. Este modo deve ser usado para um agente L3 em execução em "
-"um nó centralizado (ou em implementações de host único, por exemplo, "
-"devstack)"
-
-msgid ""
-"True to delete all ports on all the OpenvSwitch bridges. False to delete "
-"ports created by Neutron on integration and external network bridges."
-msgstr ""
-"True para excluir todas as portas em todas as pontes OpenvSwitch. False para "
-"excluir portas criadas pelo Neutron na integração e pontes de rede externa."
-
-msgid "Tunnel IP value needed by the ML2 plugin"
-msgstr "Valor do IP do túnel necessário pelo plug-in ML2"
-
-msgid "Tunnel bridge to use."
-msgstr "Ponte do túnel a ser utilizada."
-
-msgid "URL to database"
-msgstr "URL para banco de dados"
-
-#, python-format
-msgid "Unable to access %s"
-msgstr "Não é possível acessar %s"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(router_id)s. The number of routes exceeds "
-"the maximum %(quota)s."
-msgstr ""
-"Não é possível concluir a operação para %(router_id)s. O número de rotas "
-"excede o máximo de %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of DNS "
-"nameservers exceeds the limit %(quota)s."
-msgstr ""
-"Não é possível concluir a operação para %(subnet_id)s. O número de "
-"servidores de nomes DNS excede o limite %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of host routes "
-"exceeds the limit %(quota)s."
-msgstr ""
-"Não é possível concluir a operação para %(subnet_id)s. O número de rotas do "
-"host excede o limite %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The IP address "
-"%(ip_address)s is in use."
-msgstr ""
-"Não é possível concluir a operação para a rede %(net_id)s. O endereço IP "
-"%(ip_address)s está em uso."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The mac address %(mac)s "
-"is in use."
-msgstr ""
-"Não é possível concluir a operação para a rede %(net_id)s. O endereço mac "
-"%(mac)s está em uso."
-
-#, python-format
-msgid ""
-"Unable to complete operation on network %(net_id)s. There are one or more "
-"ports still in use on the network."
-msgstr ""
-"Não é possível concluir a operação na rede %(net_id)s. Há uma ou mais portas "
-"ainda em uso na rede."
-
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s for network %(net_id)s. "
-"Port already has an attached device %(device_id)s."
-msgstr ""
-"Não é possível concluir a operação na porta %(port_id)s para a rede "
-"%(net_id)s. A porta já possui um dispositivo conectado %(device_id)s."
-
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr "Não é possível converter valor em %s"
-
-msgid "Unable to create the Agent Gateway Port"
-msgstr "Não é possível criar a porta do Gateway do Agente"
-
-msgid "Unable to create the SNAT Interface Port"
-msgstr "Não é possível criar a Porta da Interface SNAT"
-
-#, python-format
-msgid ""
-"Unable to create the flat network. Physical network %(physical_network)s is "
-"in use."
-msgstr ""
-"Não é possível criar a rede simples. A rede física %(physical_network)s está "
-"em uso."
-
-msgid ""
-"Unable to create the network. No available network found in maximum allowed "
-"attempts."
-msgstr ""
-"Não é possível criar a rede. Nenhuma rede disponível encontrada no máximo de "
-"tentativas permitidas."
-
-msgid ""
-"Unable to create the network. No tenant network is available for allocation."
-msgstr ""
-"Não é possível criar a rede. Nenhuma rede de arrendatário está disponível "
-"para alocação."
-
-#, python-format
-msgid ""
-"Unable to create the network. The VLAN %(vlan_id)s on physical network "
-"%(physical_network)s is in use."
-msgstr ""
-"Não é possível criar a rede. A VLAN %(vlan_id)s na rede física "
-"%(physical_network)s está em uso."
-
-#, python-format
-msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use."
-msgstr "Não é possível criar a rede. O ID do túnel %(tunnel_id)s está em uso."
-
-#, python-format
-msgid "Unable to determine mac address for %s"
-msgstr "Não foi possível determinar o endereço MAC para %s"
-
-#, python-format
-msgid "Unable to find '%s' in request body"
-msgstr "Não foi possível localizar '%s' no corpo da solicitação"
-
-#, python-format
-msgid "Unable to find any IP address on external network %(net_id)s."
-msgstr "Nenhum enderço IP encontrado na rede externa %(net_id)s."
-
-#, python-format
-msgid "Unable to find resource name in %s"
-msgstr "Não foi possível encontrar nome de recurso em %s"
-
-msgid "Unable to generate IP address by EUI64 for IPv4 prefix"
-msgstr "Não é possível gerar endereço IP pelo prefixo EUI64 para IPv4"
-
-#, python-format
-msgid "Unable to generate unique DVR mac for host %(host)s."
-msgstr "Não é possível gerar MAC de DVR exclusivo para o host %(host)s."
-
-#, python-format
-msgid "Unable to generate unique mac on network %(net_id)s."
-msgstr "Não é possível gerar um mac exclusivo na rede %(net_id)s."
-
-#, python-format
-msgid ""
-"Unable to identify a target field from:%s. Match should be in the form "
-"%%(<field_name>)s"
-msgstr ""
-"Não é possível identificar um campo de destino de: %s. A correspondência "
-"deve estar no formato %%(<field_name>)s"
-
-#, python-format
-msgid ""
-"Unable to verify match:%(match)s as the parent resource: %(res)s was not "
-"found"
-msgstr ""
-"Não foi possível verificar resultados:%(match)s pois o recurso pai: %(res)s "
-"não foi encontrado"
-
-#, python-format
-msgid "Unexpected response code: %s"
-msgstr "Código de resposta inesperado: %s"
-
-#, python-format
-msgid "Unexpected response: %s"
-msgstr "Resposta inesperada: %s"
-
-msgid "Unimplemented commands"
-msgstr "Comandos não implementados"
-
-msgid "Unknown API version specified"
-msgstr "Versão de API especificada é desconhecida"
-
-#, python-format
-msgid "Unknown attribute '%s'."
-msgstr "Atributo desconhecido '%s'."
-
-#, python-format
-msgid "Unknown chain: %r"
-msgstr "Cadeia desconhecida: %r"
-
-#, python-format
-msgid "Unknown quota resources %(unknown)s."
-msgstr "Recursos da cota desconhecidos %(unknown)s."
-
-msgid "Unmapped error"
-msgstr "Erro não mapeado"
-
-msgid "Unrecognized action"
-msgstr "Ação não reconhecida"
-
-#, python-format
-msgid "Unrecognized attribute(s) '%s'"
-msgstr "Atributo(s) não reconhecido(s) '%s'"
-
-msgid "Unsupported Content-Type"
-msgstr "Tipo de Conteúdo Não Suportado"
-
-#, python-format
-msgid "Unsupported network type %(net_type)s."
-msgstr "Tipo de rede não suportado %(net_type)s."
-
-msgid "Unsupported request type"
-msgstr "Tipo de solicitação não suportado"
-
-msgid "Updating default security group not allowed."
-msgstr "Não permitido atualizar o grupo de segurança padrão."
-
-msgid ""
-"Use ML2 l2population mechanism driver to learn remote MAC and IPs and "
-"improve tunnel scalability."
-msgstr ""
-"Utilize o driver de mecanismo de população ML2 l2 para aprender sobre MAC e "
-"IPs remotos e melhorar a escalabilidade do túnel."
-
-msgid "Use broadcast in DHCP replies"
-msgstr "Use a transmissão em respostas do DHCP"
-
-msgid "Use either --delta or relative revision, not both"
-msgstr "Use --delta ou revisão relativa, não ambos"
-
-msgid "User (uid or name) running metadata proxy after its initialization"
-msgstr ""
-"Usuário (uid ou nome) executando proxy de metadados após sua inicialização"
-
-msgid ""
-"User (uid or name) running metadata proxy after its initialization (if "
-"empty: agent effective user)."
-msgstr ""
-"Usuário (uid ou nome) executando proxy de metadados após sua inicialização "
-"(se vazio: usuário efetivo do agente)."
-
-msgid "User (uid or name) running this process after its initialization"
-msgstr "Usuário (uid ou nome) executando esse processo após sua inicialização"
-
-msgid "VRRP authentication password"
-msgstr "Senha de autenticação do VRRP"
-
-msgid "VRRP authentication type"
-msgstr "Tipo de autenticação do VRRP"
-
-#, python-format
-msgid ""
-"Validation of dictionary's keys failed. Expected keys: %(expected_keys)s "
-"Provided keys: %(provided_keys)s"
-msgstr ""
-"A validação de chaves do dicionário falhou. Chaves esperadas: "
-"%(expected_keys)s Chaves fornecidas: %(provided_keys)s"
-
-#, python-format
-msgid "Validator '%s' does not exist."
-msgstr "O validador '%s' não existe."
-
-#, python-format
-msgid "Value %(value)s in mapping: '%(mapping)s' not unique"
-msgstr "Valor %(value)s no mapeamento: '%(mapping)s' não exclusivo"
-
-msgid ""
-"Watch file log. Log watch should be disabled when metadata_proxy_user/group "
-"has no read/write permissions on metadata proxy log file."
-msgstr ""
-"Inspecionar log de arquivo. A inspeção do log deve ser desativada quando "
-"metadata_proxy_user/group não possui permissões de leitura/gravação no "
-"arquivo de log de proxy de metadados."
-
-msgid ""
-"Where to store Neutron state files. This directory must be writable by the "
-"agent."
-msgstr ""
-"Onde armazenar arquivos de estado Neutron.  O agente deve ter permissão de "
-"escrita neste diretório."
-
-msgid ""
-"With IPv6, the network used for the external gateway does not need to have "
-"an associated subnet, since the automatically assigned link-local address "
-"(LLA) can be used. However, an IPv6 gateway address is needed for use as the "
-"next-hop for the default route. If no IPv6 gateway address is configured "
-"here, (and only then) the neutron router will be configured to get its "
-"default route from router advertisements (RAs) from the upstream router; in "
-"which case the upstream router must also be configured to send these RAs. "
-"The ipv6_gateway, when configured, should be the LLA of the interface on the "
-"upstream router. If a next-hop using a global unique address (GUA) is "
-"desired, it needs to be done via a subnet allocated to the network and not "
-"through this parameter. "
-msgstr ""
-"Com IPv6, a rede usada para o gateway externo não precisa ter um sub-rede "
-"associada, pois o Link-local Address (LLA) designado automaticamente pode "
-"ser usado. No entanto, um endereço do gateway IPv6 é necessário para ser "
-"usado como o próximo hop para a rota padrão. Se nenhum endereço do gateway "
-"IPv6 for configurado aqui, (somente então) o roteador neutron será "
-"configurado para obter sua rota padrão de router advertisements (RAs) do "
-"roteador de envio de dados; em cujo caso o roteador de envio de dados também "
-"deve ser configurado para enviar esses RAs. O ipv6_gateway, quando "
-"configurado, deve ser o LLA da interface no roteador de envio de dados. Se "
-"um próximo hop usando um global unique address (GUA) for desejado, isso "
-"precisará ser feito por meio de uma sub-rede alocada para a rede e não por "
-"meio desse parâmetro. "
-
-msgid "You must implement __call__"
-msgstr "Você deve implementar __call__"
-
-msgid ""
-"You must provide a config file for bridge - either --config-file or "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-msgstr ""
-"Você deve fornecer um arquivo de configuração para a ponte - --config-file "
-"ou env[NEUTRON_TEST_CONFIG_FILE]"
-
-msgid "You must provide a revision or relative delta"
-msgstr "Você deve fornecer uma revisão ou um delta relativo"
-
-msgid "allocation_pools allowed only for specific subnet requests."
-msgstr ""
-"allocation_pools permitido somente para solicitações de sub-rede específicas."
-
-msgid "binding:profile value too large"
-msgstr "ligação: valor de perfil muito grande"
-
-msgid "cidr and prefixlen must not be supplied together"
-msgstr "cidr e prefixlen não devem ser fornecidos juntos"
-
-#, python-format
-msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid."
-msgstr "dhcp_agents_per_network deve ser >= 1. '%s' é inválido."
-
-msgid "fixed_ip_address cannot be specified without a port_id"
-msgstr "fixed_ip_address não pode ser especificado sem um port_id"
-
-#, python-format
-msgid "has device owner %s"
-msgstr "possui o proprietário do dispositivo %s"
-
-#, python-format
-msgid "ip command failed on device %(dev_name)s: %(reason)s"
-msgstr "comando ip falhou no dispositivo %(dev_name)s:%(reason)s"
-
-#, python-format
-msgid "ip link capability %(capability)s is not supported"
-msgstr "a capacidade %(capability)s de link de IP não é suportada"
-
-#, python-format
-msgid "ip link command is not supported: %(reason)s"
-msgstr "o comando de link do IP não é suportado: %(reason)s"
-
-msgid "ip_version must be specified in the absence of cidr and subnetpool_id"
-msgstr ""
-"ip_version deve ser especificado na ausência de cidr e de subnetpool_id"
-
-msgid "ipv6_address_mode is not valid when ip_version is 4"
-msgstr "ipv6_address_mode não é válido quando ip_version for 4"
-
-msgid "ipv6_ra_mode is not valid when ip_version is 4"
-msgstr "ipv6_ra_mode não será válido quando ip_version for 4"
-
-msgid ""
-"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to "
-"False."
-msgstr ""
-"ipv6_ra_mode ou ipv6_address_mode não pode ser configurado quando "
-"enable_dhcp está configurado para False."
-
-#, python-format
-msgid ""
-"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to "
-"'%(addr_mode)s' is not valid. If both attributes are set, they must be the "
-"same value"
-msgstr ""
-"ipv6_ra_mode configurado para '%(ra_mode)s' com ipv6_address_mode "
-"configurado para '%(addr_mode)s' não é válido. Se ambos os atributos forem "
-"configurados, eles devem ter o mesmo valor"
-
-msgid "mac address update"
-msgstr "atualização do endereço mac"
-
-#, python-format
-msgid ""
-"max_l3_agents_per_router %(max_agents)s config parameter is not valid. It "
-"has to be greater than or equal to min_l3_agents_per_router %(min_agents)s."
-msgstr ""
-"max_l3_agents_per_router %(max_agents)s de parâmetro de configuração não é "
-"válido. Ele deve ser maior ou igual a min_l3_agents_per_router "
-"%(min_agents)s."
-
-#, python-format
-msgid ""
-"min_l3_agents_per_router config parameter is not valid. It has to be equal "
-"to or more than %s for HA."
-msgstr ""
-"O parâmetro de configuração min_l3_agents_per_router não é válido. Ele deve "
-"ser igual ou superior a %s para alta disponibilidade."
-
-msgid "network_type required"
-msgstr "network_type necessário"
-
-#, python-format
-msgid "network_type value '%s' not supported"
-msgstr "Valor de network_type '%s' não suportado"
-
-msgid "new subnet"
-msgstr "nova sub-rede"
-
-#, python-format
-msgid "physical_network '%s' unknown  for VLAN provider network"
-msgstr "physical_network '%s' desconhecida para rede de provedor VLAN"
-
-#, python-format
-msgid "physical_network '%s' unknown for flat provider network"
-msgstr "physical_network '%s' desconhecida para rede de provedor flat"
-
-msgid "physical_network required for flat provider network"
-msgstr "physical_network requerida para rede de provedor flat"
-
-#, python-format
-msgid "provider:physical_network specified for %s network"
-msgstr "provider:physical_network especificado para a rede %s"
-
-msgid "respawn_interval must be >= 0 if provided."
-msgstr "respawn_interval deve ser >= 0 se fornecida."
-
-#, python-format
-msgid "segmentation_id out of range (%(min)s through %(max)s)"
-msgstr "segmentation_id fora da faixa (%(min)s até %(max)s)"
-
-msgid "segmentation_id requires physical_network for VLAN provider network"
-msgstr "segmentation_id requer physical_network para rede de provedor VLAN"
-
-msgid "the nexthop is not connected with router"
-msgstr "o nexthop não está conectado com o roteador"
-
-msgid "the nexthop is used by router"
-msgstr "o nexthop é usado pelo roteador"
-
-msgid ""
-"uuid provided from the command line so external_process can track us via /"
-"proc/cmdline interface."
-msgstr ""
-"uuid fornecido a partir da linha de comandos para que external_process possa "
-"nos monitorar via interface /proc/cmdline."
diff --git a/neutron/locale/ru/LC_MESSAGES/neutron.po b/neutron/locale/ru/LC_MESSAGES/neutron.po
deleted file mode 100644 (file)
index 8cd03d9..0000000
+++ /dev/null
@@ -1,2376 +0,0 @@
-# Russian translations for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-09-06 10:15+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: ru\n"
-"Language-Team: Russian\n"
-"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
-"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n"
-"%100>=11 && n%100<=14)? 2 : 3)\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#, python-format
-msgid ""
-"\n"
-"Command: %(cmd)s\n"
-"Exit code: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-msgstr ""
-"\n"
-"Команда: %(cmd)s\n"
-"Код выхода: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-
-#, python-format
-msgid "%(driver)s: Internal driver error."
-msgstr "%(driver)s: Внутренняя ошибка драйвера."
-
-#, python-format
-msgid "%(id)s is not a valid %(type)s identifier"
-msgstr "%(id)s не является допустимым идентификатором %(type)s"
-
-#, python-format
-msgid ""
-"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' "
-"and '%(desc)s'"
-msgstr ""
-"Значение %(invalid_dirs)s недопустимо для sort_dirs; допустимое значение: "
-"'%(asc)s' и '%(desc)s'"
-
-#, python-format
-msgid "%(key)s prohibited for %(tunnel)s provider network"
-msgstr "%(key)s запрещен для сети поставщика %(tunnel)s"
-
-#, python-format
-msgid ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-msgstr ""
-"Метод %(method)s вызывался с параметрами сети %(current)s (исходные "
-"параметры %(original)s) и сетевыми сегментами %(segments)s"
-
-#, python-format
-msgid ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-msgstr ""
-"Метод %(method)s вызывался с параметрами подсети %(current)s (исходные "
-"параметры %(original)s)"
-
-#, python-format
-msgid "%(method)s failed."
-msgstr "Не удалось выполнить %(method)s."
-
-#, python-format
-msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'"
-msgstr "%(name)s '%(addr)s' не соответствует версии IP '%(ip_version)s'"
-
-#, python-format
-msgid "%s cannot be called while in offline mode"
-msgstr "%s нельзя вызывать в режиме без подключения"
-
-#, python-format
-msgid "%s is invalid attribute for sort_key"
-msgstr "Атрибут %s недопустим для sort_key"
-
-#, python-format
-msgid "%s is invalid attribute for sort_keys"
-msgstr "Атрибут %s недопустим для sort_keys"
-
-#, python-format
-msgid "%s is not a valid VLAN tag"
-msgstr "%s не является допустимым тегом VLAN"
-
-#, python-format
-msgid "%s must implement get_port_from_device or get_ports_from_devices."
-msgstr "%s должен реализовать get_port_from_device или get_ports_from_devices."
-
-#, python-format
-msgid "%s prohibited for VLAN provider network"
-msgstr "%s запрещено для сети VLAN провайдера"
-
-#, python-format
-msgid "%s prohibited for flat provider network"
-msgstr "%s запрещено для одноуровневой сети провайдера"
-
-#, python-format
-msgid "%s prohibited for local provider network"
-msgstr "%s запрещено для локальной сети провайдера"
-
-#, python-format
-msgid "'%(data)s' exceeds maximum length of %(max_len)s"
-msgstr "'%(data)s' превышает максимальную длину %(max_len)s"
-
-#, python-format
-msgid "'%(data)s' is not in %(valid_values)s"
-msgstr "'%(data)s' отсутствует в %(valid_values)s"
-
-#, python-format
-msgid "'%(data)s' is too large - must be no larger than '%(limit)d'"
-msgstr ""
-"Слишком большое значение '%(data)s' - требуется значение не больше "
-"'%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' is too small - must be at least '%(limit)d'"
-msgstr ""
-"Слишком низкое значение '%(data)s' - требуется значение не меньше '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended"
-msgstr ""
-"Данные '%(data)s' не распознаются как cidr IP-подсети; рекомендуется "
-"'%(cidr)s'"
-
-#, python-format
-msgid "'%(host)s' is not a valid nameserver. %(msg)s"
-msgstr "'%(host)s' не является допустимым сервером имен. %(msg)s"
-
-#, python-format
-msgid "'%s' Blank strings are not permitted"
-msgstr "'%s' - Пустые строки недопустимы"
-
-#, python-format
-msgid "'%s' cannot be converted to boolean"
-msgstr "'%s' невозможно преобразовать в булевский тип"
-
-#, python-format
-msgid "'%s' contains whitespace"
-msgstr "'%s' содержит пробел, символ табуляции или пустой строки"
-
-#, python-format
-msgid "'%s' is not a dictionary"
-msgstr "'%s' не является словарем"
-
-#, python-format
-msgid "'%s' is not a list"
-msgstr "'%s' не является списком"
-
-#, python-format
-msgid "'%s' is not a valid IP address"
-msgstr "'%s' не является допустимым IP-адресом"
-
-#, python-format
-msgid "'%s' is not a valid IP subnet"
-msgstr "'%s' не является допустимой IP-подсетью"
-
-#, python-format
-msgid "'%s' is not a valid MAC address"
-msgstr "'%s' не является допустимым MAC-адресом"
-
-#, python-format
-msgid "'%s' is not a valid UUID"
-msgstr "'%s' не является допустимым UUID"
-
-#, python-format
-msgid "'%s' is not a valid boolean value"
-msgstr "'%s' не является допустимым булевским значением"
-
-#, python-format
-msgid "'%s' is not a valid input"
-msgstr "Недопустимые входные параметры: '%s'"
-
-#, python-format
-msgid "'%s' is not a valid string"
-msgstr "'%s' не является допустимой строкой"
-
-#, python-format
-msgid "'%s' is not an integer"
-msgstr "'%s' не является целым"
-
-#, python-format
-msgid "'%s' is not an integer or uuid"
-msgstr "'%s' не является целым или uuid"
-
-#, python-format
-msgid "'%s' is not of the form <key>=[value]"
-msgstr "'%s' не в форме <ключ>=[значение]"
-
-#, python-format
-msgid "'%s' should be non-negative"
-msgstr "Значение '%s' должно быть неотрицательным"
-
-msgid "0 is not allowed as CIDR prefix length"
-msgstr "Нулевое значение запрещено в качестве длины префикса CIDR"
-
-msgid "A cidr must be specified in the absence of a subnet pool"
-msgstr "Значение cidr должно быть указано при отсутствии пула подсетей"
-
-msgid ""
-"A list of mappings of physical networks to MTU values. The format of the "
-"mapping is <physnet>:<mtu val>. This mapping allows specifying a physical "
-"network MTU value that differs from the default segment_mtu value."
-msgstr ""
-"Списки карт связей физических сетей со значениями MTU. Формат карты связей: "
-"<физическая-сеть>:<значение-MTU>. Эта карта связей позволяет, указать "
-"значение MTU физической сети, отличное от значения segment_mtu по умолчанию."
-
-msgid "A metering driver must be specified"
-msgstr "Необходимо указать драйвер измерений"
-
-msgid "API for retrieving service providers for Neutron advanced services"
-msgstr "API для получения поставщиков служб для расширенных служб Neutron"
-
-msgid "Access to this resource was denied."
-msgstr "Доступ к этому ресурсу запрещен."
-
-msgid "Action to be executed when a child process dies"
-msgstr "Действие, выполняемое при завершении дочернего процесса"
-
-msgid "Adds external network attribute to network resource."
-msgstr "Добавляет атрибут внешней сети к сетевому ресурсу."
-
-msgid "Adds test attributes to core resources."
-msgstr "Добавляет атрибуты теста в базовые ресурсы."
-
-#, python-format
-msgid "Agent %(id)s could not be found"
-msgstr "Не найден агент %(id)s"
-
-#, python-format
-msgid "Agent %(id)s is not a L3 Agent or has been disabled"
-msgstr "Агент %(id)s выключен или не является агентом L3"
-
-#, python-format
-msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled"
-msgstr "Агент %(id)s выключен или не является допустимым агентом DHCP"
-
-#, python-format
-msgid "Agent updated: %(payload)s"
-msgstr "Агент обновлен: %(payload)s"
-
-#, python-format
-msgid ""
-"Agent with agent_type=%(agent_type)s and host=%(host)s could not be found"
-msgstr "Не найден агент с agent_type=%(agent_type)s и host=%(host)s"
-
-msgid "Allow auto scheduling networks to DHCP agent."
-msgstr "Разрешить автоматическое планирование сетей для агента DHCP."
-
-msgid "Allow auto scheduling of routers to L3 agent."
-msgstr "Разрешить автоматическое планирование маршрутизаторов для агента L3."
-
-msgid "Allow running metadata proxy."
-msgstr "Разрешить выполнение прокси метаданных."
-
-msgid "Allow sending resource operation notification to DHCP agent"
-msgstr "Разрешить отправку уведомления об операции ресурса агенту DHCP"
-
-msgid "Allow the usage of the bulk API"
-msgstr "Разрешить использование Bulk API"
-
-msgid "Allow the usage of the pagination"
-msgstr "Разрешить использование разбиения на страницы"
-
-msgid "Allow the usage of the sorting"
-msgstr "Разрешить использование сортировки"
-
-msgid "Allow to perform insecure SSL (https) requests to nova metadata"
-msgstr "Разрешить незащищенные запросы SSL (https) метаданных nova"
-
-msgid "AllowedAddressPair must contain ip_address"
-msgstr "AllowedAddressPair должен содержать атрибут ip_address"
-
-msgid "An interface driver must be specified"
-msgstr "Не указан драйвер интерфейса"
-
-msgid ""
-"An ordered list of networking mechanism driver entrypoints to be loaded from "
-"the neutron.ml2.mechanism_drivers namespace."
-msgstr ""
-"Упорядоченный список конечных точек драйверов механизмов создания сетей, "
-"загружаемых из пространства имен neutron.ml2.mechanism_drivers."
-
-msgid "An unknown error has occurred. Please try your request again."
-msgstr ""
-"Произошла неизвестная ошибка. Пожалуйста, попытайтесь повторить ваш запрос."
-
-msgid "An unknown exception occurred."
-msgstr "Обнаружено неизвестное исключение."
-
-#, python-format
-msgid "Attribute '%s' not allowed in POST"
-msgstr "Атрибут '%s' недопустим в POST"
-
-msgid "Automatically remove networks from offline DHCP agents."
-msgstr "Автоматически удалять сети из отключенных агентов DHCP."
-
-msgid ""
-"Automatically reschedule routers from offline L3 agents to online L3 agents."
-msgstr ""
-"Автоматически перепланировать маршрутизаторы с отключенных агентов L3 на "
-"включенные агенты L3 ."
-
-msgid "Available commands"
-msgstr "Доступные команды"
-
-msgid "Backend does not support VLAN Transparency."
-msgstr "Базовый сервер не поддерживает прозрачный режим VLAN."
-
-#, python-format
-msgid ""
-"Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, "
-"%(mac)s:"
-msgstr ""
-"Неверный префикс или формат mac для генерации адреса IPv6 с помощью EUI-64: "
-"%(prefix)s, %(mac)s:"
-
-#, python-format
-msgid "Bad prefix type for generate IPv6 address by EUI-64: %s"
-msgstr "Неверный тип префикса для генерации адреса IPv6 с помощью EUI-64: %s"
-
-#, python-format
-msgid "Base MAC: %s"
-msgstr "Базовый MAC: %s"
-
-#, python-format
-msgid "Bridge %(bridge)s does not exist."
-msgstr "Мост %(bridge)s не существует."
-
-msgid "Bulk operation not supported"
-msgstr "Групповая операция не поддерживается"
-
-msgid "CIDR to monitor"
-msgstr "CIDR для монитора"
-
-#, python-format
-msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip"
-msgstr ""
-"Невозможно добавить нефиксированный IP-адрес для порта в подсети %s, для "
-"которой не указан gateway_ip"
-
-msgid "Cannot allocate requested subnet from the available set of prefixes"
-msgstr "Невозможно выделить запрошенную подсеть из доступного набора префиксов"
-
-#, python-format
-msgid ""
-"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port "
-"%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a "
-"floating IP on external network %(net_id)s."
-msgstr ""
-"Нефиксированный IP-адрес  %(floating_ip_address)s (%(fip_id)s) невозможно "
-"связать с портом %(port_id)s, который использует фиксированный IP-адрес "
-"%(fixed_ip)s, так как для этого фиксированного IP-адреса уже есть "
-"нефиксированный IP-адрес во внешней сети %(net_id)s."
-
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to Port %s, since that port is owned "
-"by a different tenant."
-msgstr ""
-"Невозможно создать нефиксированный IP-адрес и связать его с портом %s, так "
-"как этот порт принадлежит другому арендатору."
-
-msgid "Cannot create resource for another tenant"
-msgstr "Невозможно создать ресурс для другого арендатора"
-
-msgid "Cannot disable enable_dhcp with ipv6 attributes set"
-msgstr "Невозможно отключить enable_dhcp, если заданы атрибуты ipv6"
-
-#, python-format
-msgid ""
-"Cannot have multiple router ports with the same network id if both contain "
-"IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s"
-msgstr ""
-"Невозможно иметь несколько портов маршрутизатора с одинаковыми ИД сети, если "
-"обасодержат подсети IPv6. Существующий порт %(p)s имеет ИД сети и подсетей "
-"IPv6 %(nid)s"
-
-#, python-format
-msgid ""
-"Cannot host %(router_type)s router %(router_id)s on %(agent_mode)s L3 agent "
-"%(agent_id)s."
-msgstr ""
-"Не удалось разместить маршрутизатор %(router_type)s %(router_id)s на агенте "
-"L3 %(agent_mode)s %(agent_id)s."
-
-msgid "Cannot match priority on flow deletion or modification"
-msgstr "Невозможно сравнить приоритет при удалении или изменении потока"
-
-msgid "Cannot specify both subnet-id and port-id"
-msgstr "subnet-id и port-id нельзя указывать одновременно"
-
-msgid "Cannot understand JSON"
-msgstr "Невозможно распознать JSON"
-
-#, python-format
-msgid "Cannot update read-only attribute %s"
-msgstr "Невозможно обновить атрибут %s, доступный только для чтения"
-
-msgid "Certificate Authority public key (CA cert) file for ssl"
-msgstr "Файл общего ключа CA (CA cert) для ssl"
-
-msgid "Check for ARP responder support"
-msgstr "Проверка наличия поддержки промежуточного клиента ARP"
-
-msgid "Check for OVS vxlan support"
-msgstr "Проверить на наличие поддержки OVS vxlan"
-
-msgid "Check for VF management support"
-msgstr "Проверить наличия поддержки управления VF"
-
-msgid "Check for iproute2 vxlan support"
-msgstr "Проверка наличия поддержки iproute2 vxlan"
-
-msgid "Check for nova notification support"
-msgstr "Проверка наличия поддержки уведомлений nova"
-
-msgid "Check for patch port support"
-msgstr "Проверка наличия поддержки портов исправлений"
-
-msgid "Check minimal dnsmasq version"
-msgstr "Проверить минимальную версию dnsmasq"
-
-msgid "Check netns permission settings"
-msgstr "Проверить параметры прав доступа netns"
-
-msgid "Check ovsdb native interface support"
-msgstr "Проверить поддержку собственного интерфейса ovsdb"
-
-#, python-format
-msgid ""
-"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of "
-"subnet %(sub_id)s"
-msgstr ""
-"Cidr %(subnet_cidr)s подсети %(subnet_id)s перекрывается с cidr %(cidr)s "
-"подсети %(sub_id)s"
-
-msgid "Client certificate for nova metadata api server."
-msgstr "Сертификат клиента для сервера API метаданных nova."
-
-msgid ""
-"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE "
-"tunnel IDs that are available for tenant network allocation"
-msgstr ""
-"Разделенный запятой список кортежей <tun_min>:<tun_max>, в котором "
-"перечислены диапазоны ИД туннелей GRE, доступные для выделения сети "
-"арендатора"
-
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"VXLAN VNI IDs that are available for tenant network allocation"
-msgstr ""
-"Разделенный запятой список кортежей <vni_min>:<vni_max>, в котором "
-"перечислены идентификаторы VNI VXLAN, доступные для выделения сети арендатора"
-
-msgid ""
-"Comma-separated list of the DNS servers which will be used as forwarders."
-msgstr ""
-"Разделенный запятыми список серверов DNS, которые будут использоваться для "
-"пересылки."
-
-msgid "Command to execute"
-msgstr "Выполняемая команда"
-
-msgid "Config file for interface driver (You may also use l3_agent.ini)"
-msgstr ""
-"Файл конфигурации для драйвера интерфейса (Можно также использовать l3_agent."
-"ini)"
-
-#, python-format
-msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s"
-msgstr "Конфликтующее значение ethertype %(ethertype)s для CIDR %(cidr)s"
-
-msgid ""
-"Controls whether the neutron security group API is enabled in the server. It "
-"should be false when using no security groups or using the nova security "
-"group API."
-msgstr ""
-"Контролирует, включен ли API групп защиты neutron на сервере. Значение "
-"должно быть false, когда группы защиты не используются или используется API "
-"групп защиты nova."
-
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds"
-msgstr ""
-"Не удалось подключиться к порту %(host)s:%(port)s по истечении %(time)d "
-"секунд"
-
-msgid "Could not deserialize data"
-msgstr "Не удалось десериализовать данные"
-
-#, python-format
-msgid "Creation failed. %(dev_name)s already exists."
-msgstr "Создание не выполнено. %(dev_name)s уже существует."
-
-#, python-format
-msgid ""
-"Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable "
-"to update."
-msgstr ""
-"Текущий ip-адрес шлюза %(ip_address)s уже используется портом %(port_id)s. "
-"Обновление невозможно."
-
-msgid "Currently distributed HA routers are not supported."
-msgstr ""
-"В настоящее время распределенные маршрутизаторы высокой готовности не "
-"поддерживаются."
-
-msgid ""
-"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite "
-"lease times."
-msgstr ""
-"Продолжительность выделения адреса DHCP (в секундах). Укажите -1, чтобы "
-"dnsmasq использовала бесконечное время выделения."
-
-msgid "Default driver to use for quota checks"
-msgstr "Драйвер по умолчанию, применяемый для проверки квоты"
-
-msgid ""
-"Default number of resource allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Количество ресурсов по умолчанию на одного арендатора. Отрицательное "
-"значение - не ограничено."
-
-msgid "Default security group"
-msgstr "Группа защиты по умолчанию"
-
-msgid "Default security group already exists."
-msgstr "Группа защиты по умолчанию уже существует."
-
-msgid ""
-"Defines providers for advanced services using the format: <service_type>:"
-"<name>:<driver>[:default]"
-msgstr ""
-"Определяет поставщиков для расширенных служб в формате: <service_type>:"
-"<name>:<driver>[:default]"
-
-msgid ""
-"Delay within which agent is expected to update existing ports whent it "
-"restarts"
-msgstr ""
-"Задержка, в течение которой агент, предположительно, должен обновить "
-"существующие порты при перезапуске"
-
-msgid "Delete the namespace by removing all devices."
-msgstr "Удалите пространство имен, удалив все устройства."
-
-#, python-format
-msgid "Deleting port %s"
-msgstr "Удаление порта %s"
-
-#, python-format
-msgid "Device %(dev_name)s in mapping: %(mapping)s not unique"
-msgstr "Устройство %(dev_name)s в карте связей %(mapping)s неуникально"
-
-msgid "Device has no virtual functions"
-msgstr "У устройства нет виртуальных функций"
-
-#, python-format
-msgid "Device name %(dev_name)s is missing from physical_device_mappings"
-msgstr "Имя устройства %(dev_name)s не указано в physical_device_mappings"
-
-msgid "Device not found"
-msgstr "Устройство не найдено"
-
-#, python-format
-msgid ""
-"Distributed Virtual Router Mac Address for host %(host)s does not exist."
-msgstr ""
-"MAC-адрес распределенного виртуального маршрутизатора для хоста %(host)s не "
-"существует."
-
-msgid "Domain to use for building the hostnames"
-msgstr "Домен, используемый для компоновки имен хостов"
-
-msgid "Downgrade no longer supported"
-msgstr "Понижение больше не поддерживается"
-
-#, python-format
-msgid "Driver %s is not unique across providers"
-msgstr "Драйвер %s не является уникальным среди поставщиков"
-
-msgid "Driver for security groups firewall in the L2 agent"
-msgstr "Драйвер для брандмауэра групп защиты в агенте L2"
-
-msgid "Driver to use for scheduling network to DHCP agent"
-msgstr "Драйвер, используемый для планирования сети для агента DHCP"
-
-msgid "Driver to use for scheduling router to a default L3 agent"
-msgstr ""
-"Драйвер, используемый для планирования маршрутизатора для агента L3 по "
-"умолчанию"
-
-#, python-format
-msgid "Duplicate IP address '%s'"
-msgstr "Одинаковые IP-адреса: '%s'"
-
-msgid "Duplicate Metering Rule in POST."
-msgstr "Дубликат правила измерения в POST."
-
-msgid "Duplicate Security Group Rule in POST."
-msgstr "Совпадающие правила группы защиты в POST."
-
-#, python-format
-msgid "Duplicate hostroute '%s'"
-msgstr "Одинаковые маршруты к хосту: '%s'"
-
-#, python-format
-msgid "Duplicate items in the list: '%s'"
-msgstr "Список содержит одинаковые элементы: '%s'"
-
-#, python-format
-msgid "Duplicate nameserver '%s'"
-msgstr "Одинаковые серверы имен: '%s'"
-
-msgid "Duplicate segment entry in request."
-msgstr "Дубликат записи сегмента в запросе."
-
-#, python-format
-msgid "ERROR: %s"
-msgstr "Ошибка: %s"
-
-msgid ""
-"ERROR: Unable to find configuration file via the default search paths (~/."
-"neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!"
-msgstr ""
-"ОШИБКА: Не удалось найти файл конфигурации с использованием путей поиска по "
-"умолчанию (~/.neutron/, ~/, /etc/neutron/, /etc/) и опция '--config-file'!"
-
-msgid ""
-"Either one of parameter network_id or router_id must be passed to _get_ports "
-"method."
-msgstr ""
-"Либо один из параметров network_id, либо router_id должен быть передан в "
-"метод _get_ports."
-
-msgid "Either subnet_id or port_id must be specified"
-msgstr "Необходимо указать или subnet_id, или port_id"
-
-msgid "Empty physical network name."
-msgstr "Пустое имя физической сети."
-
-msgid "Enable FWaaS"
-msgstr "Включить FWaaS"
-
-msgid "Enable HA mode for virtual routers."
-msgstr "Включить режим высокой готовности для виртуальных маршрутизаторов."
-
-msgid "Enable SSL on the API server"
-msgstr "Разрешить применение SSL на сервере API"
-
-msgid ""
-"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 "
-"plugin using linuxbridge mechanism driver"
-msgstr ""
-"Активируйте VXLAN на агенте. Активация возможна, если агентом управляет "
-"модуль ml2, использующий драйвер механизма linuxbridge"
-
-msgid ""
-"Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 "
-"l2population driver. Allows the switch (when supporting an overlay) to "
-"respond to an ARP request locally without performing a costly ARP broadcast "
-"into the overlay."
-msgstr ""
-"Включить локальный промежуточный клиент ARP, если он поддерживается. "
-"Требуется OVS 2.1 и драйвер ML2 l2population. Позволяет коммутатору (когда "
-"поддерживается перекрытие) отвечать на запрос ARP локально, без выполнения "
-"дорогостоящего оповещения ARP в перекрытии."
-
-msgid ""
-"Enable services on an agent with admin_state_up False. If this option is "
-"False, when admin_state_up of an agent is turned False, services on it will "
-"be disabled. Agents with admin_state_up False are not selected for automatic "
-"scheduling regardless of this option. But manual scheduling to such agents "
-"is available if this option is True."
-msgstr ""
-"Включить службы на агенте с admin_state_up False. Если эта опция равна "
-"False, когда admin_state_up агента устанавливается False, службы на нем "
-"будут выключены. Агенты с admin_state_up False не выбраны для "
-"автоматического планирования независимо от этой опции. Но ручное "
-"планирование для таких агентов доступно, если опция равна True."
-
-msgid ""
-"Enable/Disable log watch by metadata proxy. It should be disabled when "
-"metadata_proxy_user/group is not allowed to read/write its log file and "
-"copytruncate logrotate option must be used if logrotate is enabled on "
-"metadata proxy log files. Option default value is deduced from "
-"metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent "
-"effective user id/name."
-msgstr ""
-"Включить/выключить отслеживание протокола посредством прокси метаданных. Оно "
-"должно быть выключено, когда metadata_proxy_user/group не разрешено читать/"
-"писать в файл протокола, и должна использоваться опция copytruncate "
-"logrotate, если опция logrotate включена для файлов протокола прокси "
-"метаданных. Значение опции по умолчанию выводится из metadata_proxy_user: "
-"протокол отслеживания включен, если metadata_proxy_user является  "
-"действующим ИД/именем пользователя агента. "
-
-msgid "Encountered an empty component."
-msgstr "Обнаружен пустой компонент."
-
-msgid "End of VLAN range is less than start of VLAN range"
-msgstr "Конечное значение диапазона VLAN меньше его начального значения"
-
-msgid "End of tunnel range is less than start of tunnel range"
-msgstr "Конечное значение диапазона туннелей меньше его начального значения"
-
-#, python-format
-msgid "Error importing FWaaS device driver: %s"
-msgstr "Ошибка при импорте драйвера устройства FWaaS: %s"
-
-#, python-format
-msgid "Error parsing dns address %s"
-msgstr "Ошибка при анализе адреса dns %s"
-
-#, python-format
-msgid "Error while reading %s"
-msgstr "Ошибка при чтении %s"
-
-msgid "Existing prefixes must be a subset of the new prefixes"
-msgstr "Существующие префиксы должны быть подмножеством новых префиксов"
-
-msgid ""
-"Extension to use alongside ml2 plugin's l2population mechanism driver. It "
-"enables the plugin to populate VXLAN forwarding table."
-msgstr ""
-"Расширение для использования наряду с драйвером механизма l2population "
-"модуля ml2. Оно обеспечивает заполнение модулем таблицы пересылки VXLAN."
-
-#, python-format
-msgid "Extension with alias %s does not exist"
-msgstr "Расширение с псевдонимом %s не существует"
-
-#, python-format
-msgid "External IP %s is the same as the gateway IP"
-msgstr "Внешний IP-адрес %s совпадает с IP-адресом шлюза"
-
-#, python-format
-msgid ""
-"External network %(external_network_id)s is not reachable from subnet "
-"%(subnet_id)s.  Therefore, cannot associate Port %(port_id)s with a Floating "
-"IP."
-msgstr ""
-"Внешняя сеть %(external_network_id)s недостижима из подсети %(subnet_id)s. "
-"Поэтому порт %(port_id)s невозможно связать с нефиксированным IP-адресом."
-
-#, python-format
-msgid ""
-"External network %(net_id)s cannot be updated to be made non-external, since "
-"it has existing gateway ports"
-msgstr ""
-"Невозможно изменить внешнюю сеть %(net_id)s, сделав ее не внешней, так как в "
-"ней существуют порты шлюза"
-
-#, python-format
-msgid "ExtraDhcpOpt %(id)s could not be found"
-msgstr "Не удалось найти ExtraDhcpOpt %(id)s"
-
-msgid ""
-"FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-"
-"agent."
-msgstr ""
-"Модуль FWaaS настроен на стороне сервера, но FWaaS выключен в агенте L3."
-
-#, python-format
-msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found."
-msgstr ""
-"Не удалось перепланировать маршрутизатор %(router_id)s: не найден допустимый "
-"агент L3."
-
-#, python-format
-msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s."
-msgstr ""
-"Не удалось запланировать маршрутизатор %(router_id)s для агента L3 "
-"%(agent_id)s."
-
-#, python-format
-msgid ""
-"Failed to allocate a VRID in the network %(network_id)s for the router "
-"%(router_id)s after %(max_tries)s tries."
-msgstr ""
-"Не удалось выделить VRID в сети %(network_id)s для маршрутизатора "
-"%(router_id)s за %(max_tries)s попыток."
-
-#, python-format
-msgid ""
-"Failed to create port on network %(network_id)s, because fixed_ips included "
-"invalid subnet %(subnet_id)s"
-msgstr ""
-"Не удалось создать порт в сети %(network_id)s, так как fixed_ips содержат "
-"недопустимую подсеть %(subnet_id)s"
-
-#, python-format
-msgid "Failed to parse request. Parameter '%s' not specified"
-msgstr "Не удалось проанализировать запрос. Не указан параметр '%s'"
-
-#, python-format
-msgid "Failed to parse request. Required attribute '%s' not specified"
-msgstr ""
-"Не удалось проанализировать запрос. Не указан обязательный атрибут '%s'"
-
-msgid "Failed to remove supplemental groups"
-msgstr "Не удалось удалить дополнительные группы"
-
-#, python-format
-msgid "Failed to set gid %s"
-msgstr "Не удалось получить gid %s"
-
-#, python-format
-msgid "Failed to set uid %s"
-msgstr "Не удалось задать uid %s"
-
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr "Не удалось настроить порт туннеля %(type)s на %(ip)s"
-
-#, python-format
-msgid "Floating IP %(floatingip_id)s could not be found"
-msgstr "Не найден нефиксированный IP-адрес %(floatingip_id)s"
-
-msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max"
-msgstr ""
-"Для протоколов TCP/UDP значение port_range_min должно быть <= port_range_max"
-
-msgid "Force ip_lib calls to use the root helper"
-msgstr ""
-"Использовать в вызовах ip_lib вспомогательную программу для получения прав "
-"доступа root"
-
-#, python-format
-msgid ""
-"Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet "
-"%(subnet_cidr)s."
-msgstr ""
-"Обнаружено перекрытие пулов выделения %(pool_1)s %(pool_2)s для подсети "
-"%(subnet_cidr)s."
-
-#, python-format
-msgid ""
-"Gateway cannot be updated for router %(router_id)s, since a gateway to "
-"external network %(net_id)s is required by one or more floating IPs."
-msgstr ""
-"Невозможно обновить шлюз для маршрутизатора %(router_id)s, так как шлюз к "
-"внешней сети %(net_id)s требуется одному или нескольким нефиксированным IP-"
-"адресам."
-
-msgid "Gateway is not valid on subnet"
-msgstr "Шлюз недопустим в подсети"
-
-msgid "Group (gid or name) running metadata proxy after its initialization"
-msgstr "Группа (gid или имя) использует proxy метаданных после инициализации"
-
-msgid ""
-"Group (gid or name) running metadata proxy after its initialization (if "
-"empty: agent effective group)."
-msgstr ""
-"Группа (gid или имя) использует proxy метаданных после инициализации (если "
-"пустое, используется группа агента). "
-
-msgid "Group (gid or name) running this process after its initialization"
-msgstr "Группа (gid или имя) запускает этот процесс после инициализации"
-
-msgid "How many times Neutron will retry MAC generation"
-msgstr "Число повторов генерации MAC для Neutron"
-
-#, python-format
-msgid ""
-"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-"
-"min) is missing."
-msgstr ""
-"Код ICMP (port-range-max) %(value)s указан, но тип ICMP (port-range-min) "
-"отсутствует."
-
-msgid "ID of network"
-msgstr "ИД сети"
-
-msgid "ID of network to probe"
-msgstr "ИД сети для тестирования"
-
-msgid "ID of probe port to delete"
-msgstr "ИД удаляемого тестового порта"
-
-msgid "ID of probe port to execute command"
-msgstr "ИД тестового порта для выполнения команды"
-
-msgid "ID of the router"
-msgstr "ИД маршрутизатора"
-
-#, python-format
-msgid ""
-"IP address %(ip_address)s is not a valid IP for any of the subnets on the "
-"specified network."
-msgstr ""
-"IP-адрес %(ip_address)s не является допустимым IP-адресом ни для одной "
-"подсети в указанной сети."
-
-#, python-format
-msgid "IP address %(ip_address)s is not a valid IP for the specified subnet."
-msgstr ""
-"IP-адрес %(ip_address)s не является допустимым IP-адресом для указанной "
-"подсети."
-
-msgid "IP address used by Nova metadata server."
-msgstr "IP-адрес, используемый сервером метаданных Nova."
-
-msgid "IP allocation requires subnet_id or ip_address"
-msgstr "Для выделения IP-адреса требуется subnet_id или ip_address"
-
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables rules:\n"
-"%s"
-msgstr ""
-"Функции IPTablesManager.apply не удалось применить следующий набор правил "
-"iptables :\n"
-"%s"
-
-#, python-format
-msgid ""
-"IPv6 address %(address)s can not be directly assigned to a port on subnet "
-"%(id)s since the subnet is configured for automatic addresses"
-msgstr ""
-"Адрес IPv6 %(address)s не может быть напрямую связан с портом в подсети "
-"%(id)s, так как подсеть настроена для автоматических адресов"
-
-#, python-format
-msgid ""
-"IPv6 subnet %s configured to receive RAs from an external router cannot be "
-"added to Neutron Router."
-msgstr ""
-"Подсеть IPv6 %s, настроенная для приема RA из внешнего маршрутизатора, не "
-"может быть добавлена в маршрутизатор Neutron."
-
-msgid ""
-"If True, effort is made to advertise MTU settings to VMs via network methods "
-"(DHCP and RA MTU options) when the network's preferred MTU is known."
-msgstr ""
-"Если True, предпринимается попытка передачи параметров MTU виртуальным "
-"машинам сетевыми средствами (DHCP и параметры RA MTU), когда "
-"предпочтительный MTU сети известен."
-
-msgid ""
-"If True, then allow plugins that support it to create VLAN transparent "
-"networks."
-msgstr ""
-"Если True, разрешаются модули, поддерживающие создание прозрачных сетей VLAN."
-
-msgid "Illegal IP version number"
-msgstr "Запрещенный номер версии IP"
-
-#, python-format
-msgid "Insufficient prefix space to allocate subnet size /%s"
-msgstr "Недостаточное пространство префиксов для выделения размера сети /%s"
-
-msgid "Insufficient rights for removing default security group."
-msgstr ""
-"Отсутствуют требуемые права доступа для удаления группы защиты по умолчанию."
-
-msgid "Interface to monitor"
-msgstr "Интерфейс для монитора"
-
-msgid ""
-"Interval between checks of child process liveness (seconds), use 0 to disable"
-msgstr ""
-"Интервал между проверками работы дочернего процесса (в секундах), 0 для "
-"отключения"
-
-msgid "Interval between two metering measures"
-msgstr "Интервал между двумя показателями измерений"
-
-msgid "Interval between two metering reports"
-msgstr "Интервал между двумя отчетами измерений"
-
-#, python-format
-msgid ""
-"Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address "
-"format, which requires the prefix to be /64."
-msgstr ""
-"Недопустимый CIDR %s для режима адресации IPv6. OpenStack использует формат "
-"адреса EUI-64, для которого требуется префикс /64."
-
-#, python-format
-msgid "Invalid Device %(dev_name)s: %(reason)s"
-msgstr "Недопустимое устройство %(dev_name)s: %(reason)s"
-
-#, python-format
-msgid ""
-"Invalid authentication type: %(auth_type)s, valid types are: "
-"%(valid_auth_types)s"
-msgstr ""
-"Недопустимый тип идентификации: %(auth_type)s. Допустимые типы: "
-"%(valid_auth_types)s"
-
-#, python-format
-msgid "Invalid data format for IP pool: '%s'"
-msgstr "Недопустимый формат данных для пула IP: '%s'"
-
-#, python-format
-msgid "Invalid data format for extra-dhcp-opt: %(data)s"
-msgstr "Недопустимый формат данных для extra-dhcp-opt: %(data)s"
-
-#, python-format
-msgid "Invalid data format for fixed IP: '%s'"
-msgstr "Недопустимый формат данных для фиксированного IP: '%s'"
-
-#, python-format
-msgid "Invalid data format for hostroute: '%s'"
-msgstr "Недопустимый формат данных для маршрута к хосту: '%s'"
-
-#, python-format
-msgid "Invalid data format for nameserver: '%s'"
-msgstr "Недопустимый формат данных сервера имен: '%s'"
-
-#, python-format
-msgid "Invalid format for routes: %(routes)s, %(reason)s"
-msgstr "Недопустимый формат маршрутизаторов: %(routes)s, %(reason)s"
-
-#, python-format
-msgid "Invalid format: %s"
-msgstr "Неправильный формат: %s"
-
-#, python-format
-msgid "Invalid input for %(attr)s. Reason: %(reason)s."
-msgstr "Недопустимые входные данные для %(attr)s. Причина: %(reason)s."
-
-#, python-format
-msgid "Invalid input for operation: %(error_message)s."
-msgstr "Недопустимые входные данные для операции: %(error_message)s."
-
-#, python-format
-msgid ""
-"Invalid input. '%(target_dict)s' must be a dictionary with keys: "
-"%(expected_keys)s"
-msgstr ""
-"Недопустимые входные параметры. '%(target_dict)s' должен быть словарем с "
-"ключами %(expected_keys)s"
-
-#, python-format
-msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s"
-msgstr ""
-"Недопустимое состояние экземпляра: %(state)s. Допустимые состояния: "
-"%(valid_states)s"
-
-#, python-format
-msgid "Invalid mapping: '%s'"
-msgstr "Недопустимое отображение: '%s'"
-
-#, python-format
-msgid "Invalid pci slot %(pci_slot)s"
-msgstr "Недопустимый разъем pci %(pci_slot)s"
-
-#, python-format
-msgid "Invalid provider format. Last part should be 'default' or empty: %s"
-msgstr ""
-"Недопустимый формат поставщика. Последняя часть должна иметь вид 'default' "
-"или быть пустой: %s"
-
-#, python-format
-msgid "Invalid route: %s"
-msgstr "Недопустимый маршрут: %s"
-
-msgid "Invalid service provider format"
-msgstr "Недопустимый формат поставщика службы"
-
-#, python-format
-msgid ""
-"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255."
-msgstr ""
-"Недопустимое значение для ICMP %(field)s (%(attr)s) %(value)s. Значение "
-"должно лежать в диапазоне от 0 до 255."
-
-#, python-format
-msgid "Invalid value for port %(port)s"
-msgstr "Недопустимое значение для порта %(port)s"
-
-msgid "Keepalived didn't respawn"
-msgstr "Демон keepalived не выполнил повторное порождение"
-
-#, python-format
-msgid "Key %(key)s in mapping: '%(mapping)s' not unique"
-msgstr "Ключ %(key)s в отображении '%(mapping)s' не уникален"
-
-#, python-format
-msgid "Limit must be an integer 0 or greater and not '%d'"
-msgstr "Ограничение должно быть неотрицательным целым и не равно '%d'"
-
-msgid "Limit number of leases to prevent a denial-of-service."
-msgstr "Ограничить число выделений во избежание отказа в обслуживании."
-
-msgid ""
-"List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> "
-"specifying physical_network names usable for VLAN provider and tenant "
-"networks, as well as ranges of VLAN tags on each available for allocation to "
-"tenant networks."
-msgstr ""
-"Список <physical_network>:<vlan_min>:<vlan_max> или <physical_network>, "
-"содержащий имена физических сетей, которые могут использоваться для сетей "
-"VLAN провайдера и арендатора, а также диапазоны тегов VLAN для каждой сети, "
-"доступной для выделения арендаторам."
-
-msgid ""
-"List of network type driver entrypoints to be loaded from the neutron.ml2."
-"type_drivers namespace."
-msgstr ""
-"Список конечных точек драйвера типа сети, загружаемых из пространства имен "
-"neutron.ml2.type_drivers."
-
-msgid "Local IP address of the VXLAN endpoints."
-msgstr "Локальный IP-адрес конечных точек VXLAN."
-
-msgid "Local IP address of tunnel endpoint."
-msgstr "Локальный IP-адрес конечной точки туннеля."
-
-msgid "Location for Metadata Proxy UNIX domain socket."
-msgstr "Расположение сокета домена UNIX прокси метаданных. "
-
-msgid "Location of Metadata Proxy UNIX domain socket"
-msgstr "Расположение сокета домена UNIX прокси метаданных"
-
-msgid "Location of pid file of this process."
-msgstr "Расположение файла pid этого процесса."
-
-msgid "Location to store DHCP server config files"
-msgstr "Расположение для хранения файлов конфигурации сервера"
-
-msgid "Location to store IPv6 RA config files"
-msgstr "Расположение для хранения файлов конфигурации RA IPv6"
-
-msgid "Location to store child pid files"
-msgstr "Расположение для хранения дочерних файлов pid"
-
-msgid "Location to store keepalived/conntrackd config files"
-msgstr "Расположение для хранения файлов конфигурации keepalived/conntrackd"
-
-msgid "MTU setting for device."
-msgstr "Параметр MTU для устройства."
-
-msgid "MTU size of veth interfaces"
-msgstr "Размер MTU интерфейсов veth"
-
-msgid "Make the l2 agent run in DVR mode."
-msgstr "Создать агент L2, выполняемый в режиме DVR."
-
-msgid "Malformed request body"
-msgstr "Неправильное тело запроса"
-
-msgid "Maximum number of allowed address pairs"
-msgstr "Максимальное число разрешенных пар адресов"
-
-msgid "Maximum number of host routes per subnet"
-msgstr "Максимальное количество маршрутов хоста на подсеть"
-
-msgid "Metering driver"
-msgstr "Драйвер измерения"
-
-#, python-format
-msgid "Metering label %(label_id)s does not exist"
-msgstr "Метка измерения %(label_id)s не существует"
-
-#, python-format
-msgid "Metering label rule %(rule_id)s does not exist"
-msgstr "Правило метки измерения %(rule_id)s не существует"
-
-#, python-format
-msgid ""
-"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps "
-"another"
-msgstr ""
-"Правило метки измерения с remote_ip_prefix %(remote_ip_prefix)s "
-"перекрывается другим правилом"
-
-msgid "Minimize polling by monitoring ovsdb for interface changes."
-msgstr ""
-"Минимизировать опрос путем мониторинга ovsdb на предмет изменений интерфейса."
-
-#, python-format
-msgid "Missing key in mapping: '%s'"
-msgstr "Отсутствует ключ в отображении: '%s'"
-
-#, python-format
-msgid "Missing value in mapping: '%s'"
-msgstr "Отсутствует значение в отображении: '%s'"
-
-#, python-format
-msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found"
-msgstr "Найдено несколько агентов с agent_type=%(agent_type)s и host=%(host)s"
-
-#, python-format
-msgid "Multiple default providers for service %s"
-msgstr "Несколько поставщиков по умолчанию для службы %s"
-
-#, python-format
-msgid "Multiple plugins for service %s were configured"
-msgstr "Для службы %s настроено несколько модулей"
-
-#, python-format
-msgid "Multiple providers specified for service %s"
-msgstr "Несколько поставщиков задано для службы %s"
-
-msgid "Multiple tenant_ids in bulk security group rule create not allowed"
-msgstr ""
-"Групповая операция создания нескольких tenant_ids в правиле группы защиты не "
-"разрешена"
-
-msgid "Must also specifiy protocol if port range is given."
-msgstr "При указании диапазона портов необходимо задать протокол."
-
-msgid "Must specify one or more actions on flow addition or modification"
-msgstr ""
-"Необходимо указать одно или несколько действий добавления или изменения "
-"потока"
-
-#, python-format
-msgid ""
-"Name '%s' must be 1-63 characters long, each of which can only be "
-"alphanumeric or a hyphen."
-msgstr ""
-"Длина имени '%s' должна находиться в диапазоне от 1 до 63 алфавитно-цифровых "
-"символов или дефисов."
-
-#, python-format
-msgid "Name '%s' must not start or end with a hyphen."
-msgstr "Имя '%s' не должно начинаться дефисом или оканчиваться им."
-
-msgid "Name of Open vSwitch bridge to use"
-msgstr "Имя используемого моста Open vSwitch"
-
-msgid ""
-"Name of nova region to use. Useful if keystone manages more than one region."
-msgstr ""
-"Имя используемого региона nova. Необходимо, если keystone управляет "
-"несколькими регионами."
-
-msgid "Name of the FWaaS Driver"
-msgstr "Имя драйвера FWaaS"
-
-msgid "Namespace of the router"
-msgstr "Пространство имен маршрутизатора"
-
-msgid "Native pagination depend on native sorting"
-msgstr "Внутреннее разбиение на страницы зависит от внутренней сортировки"
-
-msgid "Negative delta (downgrade) not supported"
-msgstr "Отрицательная дельта (понижение) не поддерживается"
-
-msgid "Negative relative revision (downgrade) not supported"
-msgstr "Отрицательная относительная ревизия (понижение) не поддерживается"
-
-#, python-format
-msgid "Network %s is not a valid external network"
-msgstr "Сеть %s не является допустимой внешней сетью"
-
-#, python-format
-msgid "Network %s is not an external network"
-msgstr "Сеть %s не является внешней"
-
-#, python-format
-msgid ""
-"Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges "
-"%(excluded_ranges)s was not found."
-msgstr ""
-"Сеть размера %(size)s из диапазона IP-адресов %(parent_range)s, кроме "
-"диапазонов IP-адресов %(excluded_ranges)s, не найдена."
-
-msgid "Network that will have instance metadata proxied."
-msgstr "Сеть, у которой метаданные экземпляра будут доступны через посредника."
-
-#, python-format
-msgid "Network type value '%s' not supported"
-msgstr "Значение типа сети '%s' не поддерживается"
-
-msgid "Network type value needed by the ML2 plugin"
-msgstr "Для модуля ML2 требуется значение типа сети"
-
-msgid "Network types supported by the agent (gre and/or vxlan)."
-msgstr "Типы сетей, поддерживаемые агентом (gre или vxlan)."
-
-msgid "Neutron Service Type Management"
-msgstr "Управление типами служб Neutron"
-
-msgid "Neutron core_plugin not configured!"
-msgstr "Не настроен core_plugin Neutron!"
-
-msgid "Neutron plugin provider module"
-msgstr "Модуль провайдера модулей Neutron"
-
-msgid "Neutron quota driver class"
-msgstr "Класс драйвера квоты Neutron"
-
-#, python-format
-msgid "No eligible l3 agent associated with external network %s found"
-msgstr "Не найдены допустимые агенты l3, связанные с внешней сетью %s"
-
-#, python-format
-msgid "No more IP addresses available on network %(net_id)s."
-msgstr "В сети %(net_id)s больше нет доступных IP-адресов."
-
-#, python-format
-msgid ""
-"No more Virtual Router Identifier (VRID) available when creating router "
-"%(router_id)s. The limit of number of HA Routers per tenant is 254."
-msgstr ""
-"Не осталось доступных ИД виртуального маршрутизатора (VRID) при создании "
-"маршрутизатора %(router_id)s. Ограничение числа маршрутизаторов высокой "
-"готовности на арендатора составляет 254."
-
-#, python-format
-msgid "No providers specified for '%s' service, exiting"
-msgstr "Не заданы поставщики для службы '%s', выход"
-
-#, python-format
-msgid ""
-"Not allowed to manually assign a %(router_type)s router %(router_id)s from "
-"an existing DVR node to another L3 agent %(agent_id)s."
-msgstr ""
-"Присвоение вручную маршрутизатора %(router_type)s %(router_id)s из "
-"существующего узла DVR другому агенту L3 %(agent_id)s не разрешено."
-
-msgid "Not authorized."
-msgstr "Не авторизировано."
-
-#, python-format
-msgid ""
-"Not enough l3 agents available to ensure HA. Minimum required "
-"%(min_agents)s, available %(num_agents)s."
-msgstr ""
-"Недостаточно агентов L3 для обеспечения высокой готовности. Требуется "
-"минимум %(min_agents)s, доступно %(num_agents)s."
-
-msgid "Number of RPC worker processes for service"
-msgstr "Количество процессов обработчика RPC для службы"
-
-msgid "Number of backlog requests to configure the metadata server socket with"
-msgstr ""
-"Количество непереданных запросов для настройки сокета сервера метаданных"
-
-msgid "Number of backlog requests to configure the socket with"
-msgstr "Количество непереданных запросов для настройки сокета"
-
-msgid ""
-"Number of floating IPs allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Количество нефиксированных IP-адресов на одного арендатора. Отрицательное "
-"значение - не ограничено."
-
-msgid ""
-"Number of networks allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Число разрешенных сетей на одного арендатора. Отрицательное значение "
-"означает отсутствие ограничений."
-
-msgid "Number of ports allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Количество портов на одного арендатора. Отрицательное значение - не "
-"ограничено."
-
-msgid "Number of routers allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Количество маршрутизаторов на одного арендатора. Отрицательное значение - не "
-"ограничено."
-
-msgid ""
-"Number of seconds between sending events to nova if there are any events to "
-"send."
-msgstr ""
-"Интервал, в секундах, между отправкой событий nova, если имеются события, "
-"требующие отправки."
-
-msgid "Number of seconds to keep retrying to listen"
-msgstr "Интервал (в секундах) для продолжения попыток приема"
-
-msgid ""
-"Number of security groups allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Количество групп защиты на одного арендатора. Отрицательное значение - не "
-"ограничено."
-
-msgid ""
-"Number of security rules allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Количество правил защиты на одного арендатора. Отрицательное значение - не "
-"ограничено."
-
-msgid "Number of subnets allowed per tenant, A negative value means unlimited."
-msgstr ""
-"Количество подсетей на одного арендатора. Отрицательное значение - не "
-"ограничено."
-
-msgid "Only admin can view or configure quota"
-msgstr "Только администратор может просматривать и настраивать квоту"
-
-msgid "Only admin is authorized to access quotas for another tenant"
-msgstr "Только администратор имеет доступ к квотам других арендаторов"
-
-msgid "Only allowed to update rules for one security profile at a time"
-msgstr ""
-"Разрешено обновлять правила одновременно только для одного профайла защиты"
-
-msgid "Only remote_ip_prefix or remote_group_id may be provided."
-msgstr "Можно задать только remote_ip_prefix или remote_group_id."
-
-#, python-format
-msgid ""
-"Operation %(op)s is not supported for device_owner %(device_owner)s on port "
-"%(port_id)s."
-msgstr ""
-"Операция %(op)s не поддерживается для device_owner %(device_owner)s, порт: "
-"%(port_id)s."
-
-msgid "Override the default dnsmasq settings with this file"
-msgstr ""
-"Переопределите параметры по умолчанию для dnsmasq с помощью этого файла"
-
-msgid "Owner type of the device: network/compute"
-msgstr "Тип владельца устройства: network/compute"
-
-msgid "POST requests are not supported on this resource."
-msgstr "Запросы POST не поддерживаются этим ресурсом."
-
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr "Синтаксический анализ bridge_mappings не выполнен: %s."
-
-msgid "Parsing supported pci_vendor_devs failed"
-msgstr "Ошибка анализа поддерживаемых pci_vendor_devs"
-
-msgid "Path to PID file for this process"
-msgstr "Путь к файлу PID для этого процесса"
-
-msgid "Path to the router directory"
-msgstr "Путь к каталогу маршрутизатора"
-
-msgid "Peer patch port in integration bridge for tunnel bridge."
-msgstr "Равноправный порт исправлений в мосте интеграции для моста туннеля."
-
-msgid "Peer patch port in tunnel bridge for integration bridge."
-msgstr "Равноправный порт исправлений в мосте туннеля для моста интеграции."
-
-msgid "Ping timeout"
-msgstr "Тайм-аут проверки связи"
-
-msgid "Plugin does not support updating provider attributes"
-msgstr "Модуль не поддерживает обновление атрибутов поставщика"
-
-#, python-format
-msgid "Port %(id)s does not have fixed ip %(address)s"
-msgstr "Порт %(id)s не имеет фиксированного IP-адреса %(address)s"
-
-#, python-format
-msgid ""
-"Port %(port_id)s is associated with a different tenant than Floating IP "
-"%(floatingip_id)s and therefore cannot be bound."
-msgstr ""
-"Порт %(port_id)s ассоциируется с арендатором, отличным от нефиксированного "
-"IP %(floatingip_id)s, поэтому его нельзя связать."
-
-msgid ""
-"Port Security must be enabled in order to have allowed address pairs on a "
-"port."
-msgstr ""
-"Необходимо включить защиту порта для получения разрешенных пар адресов на "
-"порту."
-
-msgid "Port does not have port security binding."
-msgstr "Для порта отсутствует привязка защиты порта."
-
-msgid ""
-"Port has security group associated. Cannot disable port security or ip "
-"address until security group is removed"
-msgstr ""
-"С портом связана группа защиты. Пока группа защиты не удалена, невозможно "
-"выключить защиту порта или IP-адрес"
-
-msgid ""
-"Port security must be enabled and port must have an IP address in order to "
-"use security groups."
-msgstr ""
-"Для использования групп защиты необходимо включить защиту порта и присвоить "
-"ему IP-адрес."
-
-msgid "Private key of client certificate."
-msgstr "Личный ключ сертификата клиента."
-
-#, python-format
-msgid "Probe %s deleted"
-msgstr "Тест %s удален"
-
-#, python-format
-msgid "Probe created : %s "
-msgstr "Создан тест %s "
-
-msgid "Process is already started"
-msgstr "Процесс уже запущен"
-
-msgid "Process is not running."
-msgstr "Процесс не запущен."
-
-msgid "Protocol to access nova metadata, http or https"
-msgstr "Протокол для доступа к метаданным nova (http или https)"
-
-msgid ""
-"Range of seconds to randomly delay when starting the periodic task scheduler "
-"to reduce stampeding. (Disable by setting to 0)"
-msgstr ""
-"Диапазон случайных задержек (в секундах) при запуске планировщика "
-"периодических задач во избежание взрывного запуска. (Для выключения задайте "
-"0)"
-
-msgid "Remote metadata server experienced an internal server error."
-msgstr "Внутренняя ошибка удаленного сервера метаданных."
-
-msgid ""
-"Representing the resource type whose load is being reported by the agent. "
-"This can be \"networks\", \"subnets\" or \"ports\". When specified (Default "
-"is networks), the server will extract particular load sent as part of its "
-"agent configuration object from the agent report state, which is the number "
-"of resources being consumed, at every report_interval.dhcp_load_type can be "
-"used in combination with network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is "
-"WeightScheduler, dhcp_load_type can be configured to represent the choice "
-"for the resource being balanced. Example: dhcp_load_type=networks"
-msgstr ""
-"Представление типа ресурса, о чьей загрузке сообщает агент. Это может быть "
-"\"networks\", \"subnets\" или \"ports\". Когда указано (по умолчанию "
-"networks), сервер извлекает определенную загрузку, отправленную как часть "
-"его объекта конфигурации агента из состояния отчета агента, который содержит "
-"количество потребленных ресурсов за каждый интервал report_interval. "
-"dhcp_load_type можно использовать в сочетании с network_scheduler_driver = "
-"neutron.scheduler.dhcp_agent_scheduler.WeightScheduler Когда "
-"network_scheduler_driver - WeightScheduler, dhcp_load_type можно настроить "
-"для представления выбора балансируемого ресурса. Пример: "
-"dhcp_load_type=networks"
-
-msgid "Request Failed: internal server error while processing your request."
-msgstr ""
-"Запрос не выполнен: при обработке запроса произошла внутренняя ошибка "
-"сервера."
-
-#, python-format
-msgid ""
-"Request contains duplicate address pair: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-msgstr ""
-"В запросе содержится копия пары адресов: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-
-#, python-format
-msgid ""
-"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps "
-"with another subnet"
-msgstr ""
-"Запрошенная подсеть с cidr %(cidr)s для сети %(network_id)s перекрывается с "
-"другой сетью"
-
-#, python-format
-msgid ""
-"Resource '%(resource_id)s' is already associated with provider "
-"'%(provider)s' for service type '%(service_type)s'"
-msgstr ""
-"Ресурс '%(resource_id)s' уже связан с поставщиком '%(provider)s' для типа "
-"службы '%(service_type)s'"
-
-msgid "Resource body required"
-msgstr "Требуется тело ресурса"
-
-msgid "Resource not found."
-msgstr "Ресурс не найден."
-
-msgid "Resources required"
-msgstr "Требуются ресурсы"
-
-msgid "Root helper daemon application to use when possible."
-msgstr ""
-"Приложение вспомогательного демона для получения прав доступа root "
-"(используется, когда это возможно)."
-
-msgid "Root permissions are required to drop privileges."
-msgstr "Для сброса прав доступа требуются права доступа пользователя Root."
-
-#, python-format
-msgid "Router %(router_id)s %(reason)s"
-msgstr "Маршрутизатор %(router_id)s %(reason)s"
-
-#, python-format
-msgid "Router %(router_id)s could not be found"
-msgstr "Не найден маршрутизатор %(router_id)s"
-
-#, python-format
-msgid "Router %(router_id)s does not have an interface with id %(port_id)s"
-msgstr "У маршрутизатора %(router_id)s нет интерфейса с ИД %(port_id)s"
-
-#, python-format
-msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s"
-msgstr "У маршрутизатора %(router_id)s нет интерфейса в подсети %(subnet_id)s"
-
-#, python-format
-msgid "Router already has a port on subnet %s"
-msgstr "У маршрутизатора уже есть порт в подсети %s"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more floating IPs."
-msgstr ""
-"Невозможно удалить интерфейс маршрутизатора для подсети %(subnet_id)s для "
-"маршрутизатора %(router_id)s, так как он требуется одному или нескольким "
-"нефиксированным IP-адресам."
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more routes."
-msgstr ""
-"Невозможно удалить интерфейс маршрутизатора для подсети %(subnet_id)s для "
-"маршрутизатора %(router_id)s, так как он требуется одному или нескольким "
-"маршрутизаторам."
-
-msgid "Router that will have connected instances' metadata proxied."
-msgstr ""
-"Маршрутизатор, у которого метаданные подключенных экземпляров будут доступны "
-"через посредника."
-
-msgid "Run as daemon."
-msgstr "Выполнить как демон."
-
-msgid ""
-"Seconds between nodes reporting state to server; should be less than "
-"agent_down_time, best if it is half or less than agent_down_time."
-msgstr ""
-"Интервал отправки сообщений о состоянии узлов на сервер (в секундах). "
-"Значение должно быть меньше, чем agent_down_time, оптимально - не больше "
-"половины значения agent_down_time."
-
-msgid "Seconds between running periodic tasks"
-msgstr "Интервал запуска периодических задач (в секундах)"
-
-msgid ""
-"Seconds to regard the agent is down; should be at least twice "
-"report_interval, to be sure the agent is down for good."
-msgstr ""
-"Интервал (в секундах), в течение которого агент считается выключенным; "
-"должен по меньшей мере вдвое превышать значение report_interval, чтобы "
-"убедиться в том, что агент выключен навсегда."
-
-#, python-format
-msgid "Security group %(id)s does not exist"
-msgstr "Группа защиты %(id)s не существует"
-
-#, python-format
-msgid "Security group rule %(id)s does not exist"
-msgstr "Правило группы защиты %(id)s не существует"
-
-#, python-format
-msgid "Security group rule already exists. Rule id is %(id)s."
-msgstr "Правило группы защиты уже существует. ИД правила: %(id)s."
-
-msgid "Segments and provider values cannot both be set."
-msgstr "Нельзя одновременно задавать значения сегментов и поставщика."
-
-msgid ""
-"Send notification to nova when port data (fixed_ips/floatingip) changes so "
-"nova can update its cache."
-msgstr ""
-"Отправить уведомление nova в случае изменения данных порта (fixed_ips/"
-"floatingip), чтобы обеспечить обновление кэша nova."
-
-msgid "Send notification to nova when port status changes"
-msgstr "Отправить уведомление nova в случае изменения состояния порта"
-
-msgid ""
-"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the "
-"feature is disabled"
-msgstr ""
-"Отправить указанное количество уведомлений ARP для настройки высокой "
-"готовности. Нулевое или отрицательное значение выключает эту функцию"
-
-#, python-format
-msgid ""
-"Service provider '%(provider)s' could not be found for service type "
-"%(service_type)s"
-msgstr ""
-"Поставщик службы '%(provider)s' не найден для типа службы %(service_type)s"
-
-#, python-format
-msgid "Service type %(service_type)s does not have a default service provider"
-msgstr "Тип службы %(service_type)s не содержит поставщика службы по умолчанию"
-
-msgid ""
-"Set new timeout in seconds for new rpc calls after agent receives SIGTERM. "
-"If value is set to 0, rpc timeout won't be changed"
-msgstr ""
-"Задать новый тайм-аут (в секундах) для новых вызовов rpc после получения "
-"агентом сигнала SIGTERM. При значении 0 тайм-аут rpc не может быть изменен"
-
-msgid ""
-"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/"
-"VXLAN tunnel."
-msgstr ""
-"Установка/сброс бита Не разбивать на фрагменты (DF) в исходящем пакете IP, "
-"несущем туннель GRE/VXLAN."
-
-#, python-format
-msgid ""
-"Some tenants have more than one security group named 'default': "
-"%(duplicates)s. All duplicate 'default' security groups must be resolved "
-"before upgrading the database."
-msgstr ""
-"Некоторые арендаторы имеют несколько групп защиты с именем 'по умолчанию': "
-"%(duplicates)s. Перед обновлением базы данных необходимо определить все "
-"группы защиты 'по умолчанию'."
-
-msgid ""
-"Specifying 'tenant_id' other than authenticated tenant in request requires "
-"admin privileges"
-msgstr ""
-"Указание 'tenant_id', отличного от идентифицированного арендатора в запросе, "
-"требует прав доступа администратора"
-
-msgid "Subnet for router interface must have a gateway IP"
-msgstr ""
-"Маска подсети для интерфейса маршрутизатора должна иметь IP-адрес шлюза"
-
-msgid "Subnet pool has existing allocations"
-msgstr "Пул подсетей имеет существующие выделения"
-
-msgid "Subnet used for the l3 HA admin network."
-msgstr ""
-"Подсеть, используемая для сети администрирования высокой готовности L3."
-
-msgid ""
-"System-wide flag to determine the type of router that tenants can create. "
-"Only admin can override."
-msgstr ""
-"Общесистемный флаг для определения типа маршрутизаторов, которые арендаторы "
-"могут создавать. Может быть переопределен только администратором."
-
-msgid "TCP Port to listen for metadata server requests."
-msgstr "Порт TCP для приема запросов сервера метаданных."
-
-msgid "TCP Port used by Neutron metadata namespace proxy."
-msgstr "Порт TCP, применяемый прокси пространства имен метаданных."
-
-msgid "TCP Port used by Nova metadata server."
-msgstr "Порт TCP, используемый сервером метаданных Nova."
-
-#, python-format
-msgid "TLD '%s' must not be all numeric"
-msgstr "TLD '%s' не должен быть полностью числовым"
-
-msgid "TOS for vxlan interface protocol packets."
-msgstr "TOS для пакетов протокола интерфейса vxlan."
-
-msgid "TTL for vxlan interface protocol packets."
-msgstr "TTL для пакетов протокола интерфейса vxlan."
-
-#, python-format
-msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network"
-msgstr ""
-"Арендатору %(tenant_id)s не разрешено создание ресурса %(resource)s в этой "
-"сети"
-
-msgid "Tenant network creation is not enabled."
-msgstr "Создание сети арендатора не разрешено."
-
-msgid ""
-"The 'gateway_external_network_id' option must be configured for this agent "
-"as Neutron has more than one external network."
-msgstr ""
-"Для этого агента необходимо настроить опцию 'gateway_external_network_id', "
-"так как Neutron имеет несколько внешних сетей."
-
-#, python-format
-msgid ""
-"The HA Network CIDR specified in the configuration file isn't valid; "
-"%(cidr)s."
-msgstr ""
-"В файле конфигурации указан недопустимый адрес CIDR сети высокой готовности. "
-"%(cidr)s."
-
-msgid "The UDP port to use for VXLAN tunnels."
-msgstr "Порт UDP, применяемый для туннелей VXLAN."
-
-msgid "The advertisement interval in seconds"
-msgstr "Интервал объявления в секундах"
-
-#, python-format
-msgid "The allocation pool %(pool)s is not valid."
-msgstr "Пул выделения %(pool)s недопустим."
-
-#, python-format
-msgid ""
-"The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s."
-msgstr ""
-"Пул выделения %(pool)s выходит за пределы cidr подсети %(subnet_cidr)s."
-
-#, python-format
-msgid ""
-"The attribute '%(attr)s' is reference to other resource, can't used by sort "
-"'%(resource)s'"
-msgstr ""
-"Атрибут '%(attr)s' является ссылкой на другой ресурс и не может "
-"использоваться для сортировки '%(resource)s'"
-
-msgid "The core plugin Neutron will use"
-msgstr "Будет использоваться базовый модуль Neutron"
-
-msgid "The driver used to manage the DHCP server."
-msgstr "драйвер, используемый для управления сервером DHCP."
-
-msgid "The driver used to manage the virtual interface."
-msgstr "Драйвер, используемый для управления виртуальным интерфейсом."
-
-#, python-format
-msgid ""
-"The following device_id %(device_id)s is not owned by your tenant or matches "
-"another tenants router."
-msgstr ""
-"Следующий device_id %(device_id)s не принадлежит вашему арендатору или "
-"соответствует маршрутизатору другого арендатора."
-
-msgid "The host IP to bind to"
-msgstr "IP-адрес хоста для подключения к"
-
-msgid "The interface for interacting with the OVSDB"
-msgstr "Интерфейс для взаимодействия с OVSDB"
-
-msgid ""
-"The maximum number of items returned in a single response, value was "
-"'infinite' or negative integer means no limit"
-msgstr ""
-"Максимальное количество элементов, возвращаемых в одном ответе; значение "
-"было 'infinite' или отрицательным целым, что означает бесконечное число"
-
-#, python-format
-msgid ""
-"The network %(network_id)s has been already hosted by the DHCP Agent "
-"%(agent_id)s."
-msgstr "Сеть %(network_id)s уже была размещена агентом DHCP %(agent_id)s."
-
-#, python-format
-msgid ""
-"The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s."
-msgstr "Сеть %(network_id)s не размещена агентом DHCP %(agent_id)s."
-
-#, python-format
-msgid "The number of allowed address pair exceeds the maximum %(quota)s."
-msgstr "Число разрешенных пар адресов превышает максимальное %(quota)s."
-
-msgid ""
-"The number of seconds the agent will wait between polling for local device "
-"changes."
-msgstr ""
-"Интервал опроса агентом локальных устройств на предмет наличия изменений."
-
-msgid ""
-"The number of seconds to wait before respawning the ovsdb monitor after "
-"losing communication with it."
-msgstr ""
-"Время ожидания, в секундах, повторного порождения монитора ovsdb после "
-"потери соединения с ним."
-
-msgid "The number of sort_keys and sort_dirs must be same"
-msgstr "Количество sort_keys и sort_dirs должно быть одинаковым"
-
-#, python-format
-msgid "The port '%s' was deleted"
-msgstr "Порт '%s' был удален"
-
-msgid "The port to bind to"
-msgstr "Порт для подключения к"
-
-#, python-format
-msgid "The requested content type %s is invalid."
-msgstr "Запрашиваемый тип содержимого %s является недопустимым."
-
-msgid "The resource could not be found."
-msgstr "Ресурс не найден."
-
-#, python-format
-msgid ""
-"The router %(router_id)s has been already hosted by the L3 Agent "
-"%(agent_id)s."
-msgstr "Маршрутизатор %(router_id)s уже был размещен агентом L3 %(agent_id)s."
-
-msgid ""
-"The server has either erred or is incapable of performing the requested "
-"operation."
-msgstr ""
-"На сервере возникла ошибка, или он не поддерживает выполнение запрошенной "
-"операции."
-
-msgid "The service plugins Neutron will use"
-msgstr "Будут использоваться модули служб Neutron"
-
-msgid "The type of authentication to use"
-msgstr "Применяемый тип идентификации"
-
-#, python-format
-msgid "The value '%(value)s' for %(element)s is not valid."
-msgstr "Значение %(value)s для %(element)s недопустимо."
-
-msgid ""
-"The working mode for the agent. Allowed modes are: 'legacy' - this preserves "
-"the existing behavior where the L3 agent is deployed on a centralized "
-"networking node to provide L3 services like DNAT, and SNAT. Use this mode if "
-"you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality "
-"and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - "
-"this enables centralized SNAT support in conjunction with DVR.  This mode "
-"must be used for an L3 agent running on a centralized node (or in single-"
-"host deployments, e.g. devstack)"
-msgstr ""
-"Режим работы агента. Допустимые режимы: 'legacy' - сохраняет поведение, при "
-"котором агент L3 развернут на централизованном сетевом узле для "
-"предоставления служб L3, таких как DNAT и SNAT. Этот режим используется, "
-"если внедрять DVR не целесообразно. 'dvr' - этот режим включает "
-"функциональность DVR и должен использоваться для агентов L3, работающих на "
-"вычислительном хосте. 'dvr_snat' - этот режим включает поддержку "
-"централизованного SNAT в дополнение к DVR. Данный режим должен "
-"использоваться для агентов L3, работающих на централизованном узле (или в "
-"однохостовых развертываниях, таких как devstack)"
-
-msgid ""
-"True to delete all ports on all the OpenvSwitch bridges. False to delete "
-"ports created by Neutron on integration and external network bridges."
-msgstr ""
-"True - удалить все порты для всех мостов OpenvSwitch. False - удалить порты, "
-"созданные Neutron для мостов интеграции и внешних сетей."
-
-msgid "Tunnel IP value needed by the ML2 plugin"
-msgstr "Для модуля ML2 требуется значение IP-адреса туннеля"
-
-msgid "Tunnel bridge to use."
-msgstr "Используемый мост туннеля."
-
-msgid "URL to database"
-msgstr "URL базы данных"
-
-#, python-format
-msgid "Unable to access %s"
-msgstr "Ошибка доступа к %s"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(router_id)s. The number of routes exceeds "
-"the maximum %(quota)s."
-msgstr ""
-"Не удалось выполнить операцию для %(router_id)s. Число маршрутизаторов "
-"превышает допустимый максимум, равный %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of DNS "
-"nameservers exceeds the limit %(quota)s."
-msgstr ""
-"Невозможно выполнить операцию для %(subnet_id)s. Число серверов имен DNS "
-"превышает допустимый максимум %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of host routes "
-"exceeds the limit %(quota)s."
-msgstr ""
-"Невозможно выполнить операцию для %(subnet_id)s. Число маршрутов хоста "
-"превышает допустимый максимум %(quota)s."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The IP address "
-"%(ip_address)s is in use."
-msgstr ""
-"Невозможно выполнить операцию для сети %(net_id)s. IP-адрес %(ip_address)s "
-"занят."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The mac address %(mac)s "
-"is in use."
-msgstr ""
-"Невозможно выполнить операцию для сети %(net_id)s. MAC-адрес %(mac)s занят."
-
-#, python-format
-msgid ""
-"Unable to complete operation on network %(net_id)s. There are one or more "
-"ports still in use on the network."
-msgstr ""
-"Не удалось выполнить операцию в сети %(net_id)s. Один или несколько портов "
-"по-прежнему используются в этой сети."
-
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s for network %(net_id)s. "
-"Port already has an attached device %(device_id)s."
-msgstr ""
-"Невозможно выполнить операцию над портом %(port_id)s для сети %(net_id)s. К "
-"порту уже подключено устройство %(device_id)s."
-
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr "Невозможно преобразовать значение в %s"
-
-msgid "Unable to create the Agent Gateway Port"
-msgstr "Не удалось создать порт шлюза агента"
-
-msgid "Unable to create the SNAT Interface Port"
-msgstr "Не удалось создать порт интерфейса SNAT"
-
-#, python-format
-msgid ""
-"Unable to create the flat network. Physical network %(physical_network)s is "
-"in use."
-msgstr ""
-"Невозможно создать одноуровневую сеть. Физическая сеть %(physical_network)s "
-"занята."
-
-msgid ""
-"Unable to create the network. No available network found in maximum allowed "
-"attempts."
-msgstr ""
-"Не удалось создать сеть. Не найдена доступная сеть за максимальное число "
-"попыток."
-
-msgid ""
-"Unable to create the network. No tenant network is available for allocation."
-msgstr "Невозможно создать сеть. Нет доступной сети арендатора для выделения."
-
-#, python-format
-msgid ""
-"Unable to create the network. The VLAN %(vlan_id)s on physical network "
-"%(physical_network)s is in use."
-msgstr ""
-"Невозможно создать сеть. VLAN %(vlan_id)s в физической сети "
-"%(physical_network)s занята."
-
-#, python-format
-msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use."
-msgstr "Невозможно создать сеть. ИД туннеля %(tunnel_id)s занят."
-
-#, python-format
-msgid "Unable to determine mac address for %s"
-msgstr "Невозможно определить mac-адрес для %s"
-
-#, python-format
-msgid "Unable to find '%s' in request body"
-msgstr "Отсутствует '%s' в теле запроса"
-
-#, python-format
-msgid "Unable to find any IP address on external network %(net_id)s."
-msgstr "Во внешней сети %(net_id)s не найден ни один IP-адрес."
-
-#, python-format
-msgid "Unable to find resource name in %s"
-msgstr "В %s не найдено имя ресурса"
-
-msgid "Unable to generate IP address by EUI64 for IPv4 prefix"
-msgstr "Невозможно сгенерировать IP-адрес с помощью EUI64 для префикса IPv4"
-
-#, python-format
-msgid "Unable to generate unique DVR mac for host %(host)s."
-msgstr "Не удалось создать уникальный MAC-адрес DVR для хоста %(host)s."
-
-#, python-format
-msgid "Unable to generate unique mac on network %(net_id)s."
-msgstr "Невозможно сгенерировать уникальный mac в сети %(net_id)s."
-
-#, python-format
-msgid ""
-"Unable to identify a target field from:%s. Match should be in the form "
-"%%(<field_name>)s"
-msgstr ""
-"Невозможно идентифицировать целевое поле из %s. Совпадение должно быть в "
-"форме %%(<field_name>)s"
-
-#, python-format
-msgid ""
-"Unable to verify match:%(match)s as the parent resource: %(res)s was not "
-"found"
-msgstr ""
-"Невозможно проверить совпадение %(match)s, так как родительский ресурс "
-"%(res)s не найдено"
-
-#, python-format
-msgid "Unexpected response code: %s"
-msgstr "Непредвиденный код ответа: %s"
-
-#, python-format
-msgid "Unexpected response: %s"
-msgstr "Непредвиденный ответ: %s"
-
-msgid "Unimplemented commands"
-msgstr "Нереализованные команды"
-
-msgid "Unknown API version specified"
-msgstr "Указана неизвестная версия API"
-
-#, python-format
-msgid "Unknown attribute '%s'."
-msgstr "Неизвестный атрибут '%s'."
-
-#, python-format
-msgid "Unknown chain: %r"
-msgstr "Неизвестная цепочка: %r"
-
-#, python-format
-msgid "Unknown quota resources %(unknown)s."
-msgstr "Неизвестные ресурсы квоты: %(unknown)s."
-
-msgid "Unmapped error"
-msgstr "Ошибка без преобразования"
-
-msgid "Unrecognized action"
-msgstr "Неизвестное действие"
-
-#, python-format
-msgid "Unrecognized attribute(s) '%s'"
-msgstr "Нераспознаваемые атрибуты '%s'"
-
-msgid "Unsupported Content-Type"
-msgstr "Не поддерживаемый  тип содержимого"
-
-#, python-format
-msgid "Unsupported network type %(net_type)s."
-msgstr "Неподдерживаемый тип сети %(net_type)s."
-
-msgid "Unsupported request type"
-msgstr "Неподдерживаемый тип запроса"
-
-msgid "Updating default security group not allowed."
-msgstr "Обновление группы защиты по умолчанию не разрешено."
-
-msgid ""
-"Use ML2 l2population mechanism driver to learn remote MAC and IPs and "
-"improve tunnel scalability."
-msgstr ""
-"Использовать драйвер механизма ML2 l2population для определения удаленных "
-"MAC- и IP-адресов и улучшения масштабируемости туннеля."
-
-msgid "Use broadcast in DHCP replies"
-msgstr "Использовать широковещательные пакеты в ответах DHCP"
-
-msgid "Use either --delta or relative revision, not both"
-msgstr "Используйте или --delta, или относительную ревизию, но не оба"
-
-msgid "User (uid or name) running metadata proxy after its initialization"
-msgstr ""
-"Пользователь (uid или имя) использует proxy метаданных после инициализации"
-
-msgid ""
-"User (uid or name) running metadata proxy after its initialization (if "
-"empty: agent effective user)."
-msgstr ""
-"Пользователь (uid или имя) использует proxy метаданных после инициализации "
-"(если пустое, используется пользователь агента). "
-
-msgid "User (uid or name) running this process after its initialization"
-msgstr "Пользователь (uid или имя) запускает этот процесс после инициализации"
-
-msgid "VRRP authentication password"
-msgstr "Пароль идентификации VRRP"
-
-msgid "VRRP authentication type"
-msgstr "Тип идентификации VRRP"
-
-#, python-format
-msgid ""
-"Validation of dictionary's keys failed. Expected keys: %(expected_keys)s "
-"Provided keys: %(provided_keys)s"
-msgstr ""
-"Проверка ключей словаря не выполнена. Ожидаемые ключи: %(expected_keys)s "
-"Заданные ключи: %(provided_keys)s"
-
-#, python-format
-msgid "Validator '%s' does not exist."
-msgstr "Валидатор '%s' не существует."
-
-#, python-format
-msgid "Value %(value)s in mapping: '%(mapping)s' not unique"
-msgstr "Значение %(value)s в отображении '%(mapping)s' не уникально"
-
-msgid ""
-"Watch file log. Log watch should be disabled when metadata_proxy_user/group "
-"has no read/write permissions on metadata proxy log file."
-msgstr ""
-"Отслеживать протокол. Отслеживание протокола должно быть выключено, когда "
-"metadata_proxy_user/group не имеет прав доступа на чтение/запись файла "
-"протокола  прокси метаданных. "
-
-msgid ""
-"Where to store Neutron state files. This directory must be writable by the "
-"agent."
-msgstr ""
-"Расположение хранения файлов состояния Neutron. Этот каталог должен быть "
-"доступен для записи агентом."
-
-msgid ""
-"With IPv6, the network used for the external gateway does not need to have "
-"an associated subnet, since the automatically assigned link-local address "
-"(LLA) can be used. However, an IPv6 gateway address is needed for use as the "
-"next-hop for the default route. If no IPv6 gateway address is configured "
-"here, (and only then) the neutron router will be configured to get its "
-"default route from router advertisements (RAs) from the upstream router; in "
-"which case the upstream router must also be configured to send these RAs. "
-"The ipv6_gateway, when configured, should be the LLA of the interface on the "
-"upstream router. If a next-hop using a global unique address (GUA) is "
-"desired, it needs to be done via a subnet allocated to the network and not "
-"through this parameter. "
-msgstr ""
-"При использовании IPv6 применяемой для внешнего шлюза сети не обязательно "
-"иметь связанную подсеть, так как может быть использован автоматически "
-"назначаемый адрес link-local (LLA). Однако, адрес шлюза IPv6 необходим в "
-"качестве следующего узла для маршрута по умолчанию. Если адрес шлюза IPv6 не "
-"указан здесь, (и только в этом случае) будет настроен маршрутизатор Neutron "
-"для получения маршрута по умолчанию из объявлений маршрутизатора (RA) от "
-"маршрутизатора выше по течению. В этом случае маршрутизатор выше по течению "
-"должен быть также настроен для отправки этих RA. Когда указано значение "
-"ipv6_gateway, оно должно указывать на LLA интерфейса маршрутизатора выше по "
-"течению. Если следующий узел, использующийглобальный уникальный адрес (GUA), "
-"является предпочитаем, это необходимо обеспечить посредством подсети, "
-"выделенной для сети, а не с помощью этого параметра. "
-
-msgid "You must implement __call__"
-msgstr "Отсутствует реализация __call__"
-
-msgid ""
-"You must provide a config file for bridge - either --config-file or "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-msgstr ""
-"Необходимо задать файл конфигурации для моста, или --config-file, или "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-
-msgid "You must provide a revision or relative delta"
-msgstr "Необходимо указать ревизию или относительную дельта"
-
-msgid "allocation_pools allowed only for specific subnet requests."
-msgstr "allocation_pools разрешено только для определенных запросов подсетей. "
-
-msgid "binding:profile value too large"
-msgstr "Слишком большое значение binding:profile"
-
-msgid "cidr and prefixlen must not be supplied together"
-msgstr "cidr и prefixlen не должны быть указаны вместе"
-
-#, python-format
-msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid."
-msgstr ""
-"Значение dhcp_agents_per_network должно быть >= 1. Значение '%s' недопустимо."
-
-msgid "fixed_ip_address cannot be specified without a port_id"
-msgstr "fixed_ip_address должен указываться вместе с port_id"
-
-#, python-format
-msgid "has device owner %s"
-msgstr "имеет владельца устройства %s"
-
-#, python-format
-msgid "ip command failed on device %(dev_name)s: %(reason)s"
-msgstr "Не удалось выполнить команду ip на устройстве %(dev_name)s: %(reason)s"
-
-#, python-format
-msgid "ip link capability %(capability)s is not supported"
-msgstr "Функция ip link %(capability)s не поддерживается"
-
-#, python-format
-msgid "ip link command is not supported: %(reason)s"
-msgstr "Команда ip link не поддерживается: %(reason)s"
-
-msgid "ip_version must be specified in the absence of cidr and subnetpool_id"
-msgstr "ip_version должно быть указано при отсутствии cidr and subnetpool_id"
-
-msgid "ipv6_address_mode is not valid when ip_version is 4"
-msgstr "ipv6_address_mode недопустим, когда ip_version - 4"
-
-msgid "ipv6_ra_mode is not valid when ip_version is 4"
-msgstr "ipv6_ra_mode недопустим, когда ip_version - 4"
-
-msgid ""
-"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to "
-"False."
-msgstr ""
-"Невозможно задать ipv6_ra_mode или ipv6_address_mode, если для enable_dhcp "
-"задано значение False."
-
-#, python-format
-msgid ""
-"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to "
-"'%(addr_mode)s' is not valid. If both attributes are set, they must be the "
-"same value"
-msgstr ""
-"Для ipv6_ra_mode задано значение '%(ra_mode)s', а значение "
-"ipv6_address_mode, заданное как '%(addr_mode)s', является недопустимым. Если "
-"указаны оба атрибута, их значения должны совпадать"
-
-msgid "mac address update"
-msgstr "Обновление mac-адреса"
-
-#, python-format
-msgid ""
-"max_l3_agents_per_router %(max_agents)s config parameter is not valid. It "
-"has to be greater than or equal to min_l3_agents_per_router %(min_agents)s."
-msgstr ""
-"Недопустимый параметр конфигурации max_l3_agents_per_router %(max_agents)s. "
-"Он должен быть больше либо равен min_l3_agents_per_router %(min_agents)s."
-
-#, python-format
-msgid ""
-"min_l3_agents_per_router config parameter is not valid. It has to be equal "
-"to or more than %s for HA."
-msgstr ""
-"Недопустимый параметр конфигурации min_l3_agents_per_router. Он должен быть "
-"не меньше %s для высокой готовности."
-
-msgid "network_type required"
-msgstr "Требуется network_type"
-
-#, python-format
-msgid "network_type value '%s' not supported"
-msgstr "Для network_type не поддерживается значение '%s'"
-
-msgid "new subnet"
-msgstr "новая подсеть"
-
-#, python-format
-msgid "physical_network '%s' unknown  for VLAN provider network"
-msgstr "Неизвестная физическая сеть '%s' для сети VLAN провайдера"
-
-#, python-format
-msgid "physical_network '%s' unknown for flat provider network"
-msgstr "Неизвестная физическая сеть '%s' для одноуровневой сети провайдера"
-
-msgid "physical_network required for flat provider network"
-msgstr "Для одноуровневой сети провайдера требуется physical_network"
-
-#, python-format
-msgid "provider:physical_network specified for %s network"
-msgstr "Для сети %s указан provider:physical_network"
-
-msgid "respawn_interval must be >= 0 if provided."
-msgstr "Значение respawn_interval, если оно указано, должно быть >= 0."
-
-#, python-format
-msgid "segmentation_id out of range (%(min)s through %(max)s)"
-msgstr "segmentation_id вне диапазона (%(min)s - %(max)s)"
-
-msgid "segmentation_id requires physical_network for VLAN provider network"
-msgstr ""
-"Для segmentation_id требуется physical_network для сети VLAN провайдера"
-
-msgid "the nexthop is not connected with router"
-msgstr "следующий узел не соединен с маршрутизатором"
-
-msgid "the nexthop is used by router"
-msgstr "следующий узел используется маршрутизатором"
-
-msgid ""
-"uuid provided from the command line so external_process can track us via /"
-"proc/cmdline interface."
-msgstr ""
-"UUID передан из командной строки. Это позволяет внешнему процессу "
-"отслеживать через интерфейс /proc/cmdline."
diff --git a/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-error.po b/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-error.po
deleted file mode 100644 (file)
index 985d582..0000000
+++ /dev/null
@@ -1,827 +0,0 @@
-# Translations template for neutron.
-# Copyright (C) 2015 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-#
-# Translators:
-# ADİL REŞİT DURSUN <ardursun@deltanoc.com>, 2015
-# Alper Çiftçi <alprciftci@gmail.com>, 2015
-# Zana iLHAN <zanailhan@gmail.com>, 2015
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-08-20 03:49+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Turkish (Turkey)\n"
-"Language: tr-TR\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Plural-Forms: nplurals=1; plural=0;\n"
-"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.3\n"
-
-#, python-format
-msgid ""
-"%(service)s for %(resource_type)s with uuid %(uuid)s not found. The process "
-"should not have died"
-msgstr ""
-"uuid %(uuid)s ile %(resource_type)s için %(service)s bulunamadı!, İşlem "
-"sonlanmamış olmalı."
-
-#, python-format
-msgid "%s Agent terminated!"
-msgstr "%s Ajanı sonlandırıldı!"
-
-#, python-format
-msgid "%s failed"
-msgstr "%s başarısız"
-
-#, python-format
-msgid ""
-"%s used in config as ipv6_gateway is not a valid IPv6 link-local address."
-msgstr ""
-"ipv6_gateway geçerli bir IPv6 link-local adresi olmadığından yapılandırmada "
-"%s kullanıldı."
-
-#, python-format
-msgid ""
-"'rpc_workers = %d' ignored because start_rpc_listeners is not implemented."
-msgstr ""
-"henüz start_rpc_listeners implemente edilmediği için 'rpc_workers = %d' göz "
-"ardı edildi."
-
-msgid "Agent Initialization Failed"
-msgstr "Ajan İlklendirme Başarısız"
-
-#, python-format
-msgid "An error occurred while communicating with async process [%s]."
-msgstr "[%s] asenkron işlem ile haberleşirken bir hata oluştu."
-
-#, python-format
-msgid "An error occurred while killing [%s]."
-msgstr "[%s] sonlandırılırken bir hata oluştu."
-
-#, python-format
-msgid "An exception occurred while creating the %(resource)s:%(item)s"
-msgstr "%(resource)s:%(item)s oluşturulurken bir istisna oluştu"
-
-msgid "An interface driver must be specified"
-msgstr "Bir arayüz sürücüsü belirtmeniz gerekmektedir"
-
-#, python-format
-msgid "Binding info for DVR port %s not found"
-msgstr "DVR bağlantı noktası %s için bağlama bilgisi bulunamadı"
-
-#, python-format
-msgid ""
-"Bridge %(bridge)s for physical network %(physical_network)s does not exist. "
-"Agent terminated!"
-msgstr ""
-"%(physical_network)s fiziksel ağı için %(bridge)s köprüsü mevcut değil. Ajan "
-"sonlandırıldı!"
-
-#, python-format
-msgid ""
-"Cannot provision %(network_type)s network for net-id=%(net_uuid)s - "
-"tunneling disabled"
-msgstr ""
-"net-id=%(net_uuid)s için %(network_type)s ağı hazırlanamıyor - tünelleme "
-"kapalı"
-
-#, python-format
-msgid ""
-"Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for "
-"physical_network %(physical_network)s"
-msgstr ""
-"net-id=%(net_uuid)s için VLAN ağı hazırlanamıyor - physical_network "
-"%(physical_network)s için köprü yok"
-
-#, python-format
-msgid ""
-"Cannot provision flat network for net-id=%(net_uuid)s - no bridge for "
-"physical_network %(physical_network)s"
-msgstr ""
-"net-id=%(net_uuid)s için düz ağ hazırlanamıyor - physical_network "
-"%(physical_network)s için köprü yok"
-
-#, python-format
-msgid ""
-"Cannot provision unknown network type %(network_type)s for net-id="
-"%(net_uuid)s"
-msgstr ""
-"net-id=%(net_uuid)s için %(network_type)s bilinmeyen ağ türü hazırlanamıyor"
-
-#, python-format
-msgid ""
-"Cannot reclaim unknown network type %(network_type)s for net-id=%(net_uuid)s"
-msgstr ""
-"net-id=%(net_uuid)s için bilinmeyen ağ türü %(network_type)s iadesi "
-"istenemiyor"
-
-msgid "Cannot run ebtables. Please ensure that it is installed."
-msgstr "ebtables çalıştırılamadı. Lütfen kurulu olduğundan emin olun."
-
-#, python-format
-msgid ""
-"Centralized-SNAT port %(port)s on subnet %(port_subnet)s already seen on a "
-"different subnet %(orig_subnet)s"
-msgstr ""
-"%(port_subnet)s alt ağındaki merkezi-SNAT %(port)s bağlantı noktası başka "
-"bir alt ağda %(orig_subnet)s görüldü"
-
-msgid ""
-"Check for Open vSwitch ARP responder support failed. Please ensure that the "
-"version of openvswitch being used has ARP flows support."
-msgstr ""
-"Open vSwitch ARP yanıtlayıcısı desteği kontrolü başarısız. Lütfen kullanılan "
-"openvswitch sürümünün ARP akışı desteği olduğundan emin olun."
-
-msgid ""
-"Check for Open vSwitch VXLAN support failed. Please ensure that the version "
-"of openvswitch being used has VXLAN support."
-msgstr ""
-"Open vSwitch VXLAN desteği kontrolü başarısız. Lütfen kullanılan openvswitch "
-"sürümünün VXLAN desteği olduğundan emin olun."
-
-msgid ""
-"Check for Open vSwitch patch port support failed. Please ensure that the "
-"version of openvswitch being used has patch port support or disable features "
-"requiring patch ports (gre/vxlan, etc.)."
-msgstr ""
-"Open vSwitch yama bağlantı noktası desteği kontrolü başarısız. Lütfen "
-"kullanılan openvswitch sürümünün yama bağlantı noktası desteği olduğundan "
-"emin olun ya da yama bağlantı noktalarına ihtiyaç duyan özellikleri kapatın "
-"(gre/vxlan, vs.)."
-
-msgid ""
-"Check for Open vSwitch support of ARP header matching failed. ARP spoofing "
-"suppression will not work. A newer version of OVS is required."
-msgstr ""
-"Open vSwitch ARP başlığı eşleşme desteği kontrolü başarısız. ARP yanıltma "
-"önleme çalışmayacak. Daha yeni sürüm OVS gerekiyor."
-
-msgid ""
-"Check for VF management support failed. Please ensure that the version of ip "
-"link being used has VF support."
-msgstr ""
-"VF yönetim desteği kontrolü başarısız. Lütfen kullanılan ip bağlantısı "
-"sürümünün VF desteği olduğundan emin olun."
-
-msgid ""
-"Check for iproute2 VXLAN support failed. Please ensure that the iproute2 has "
-"VXLAN support."
-msgstr ""
-"Iproute2 VXLAN desteği kontrolü başarısız. iproute2'nin VXLAN desteği "
-"olduğundan emin olun."
-
-msgid "Check for native OVSDB support failed."
-msgstr "Doğal OVSDB desteği kontrolü başarısız."
-
-#, python-format
-msgid "Could not delete %(res)s %(id)s."
-msgstr "%(res)s %(id)s silinemedi."
-
-#, python-format
-msgid "Could not find %s to delete."
-msgstr "%s silmek için bulunamadı."
-
-#, python-format
-msgid "Could not retrieve gateway port for subnet %s"
-msgstr "Alt ağ %s için geçit bağlantı noktası alınamadı"
-
-#, python-format
-msgid "DVR: Duplicate DVR router interface detected for subnet %s"
-msgstr "DVR: %s alt ağı için çift DVR yönlendirici arayüzü algılandı"
-
-msgid ""
-"DVR: Failed to obtain a valid local DVR MAC address - L2 Agent operating in "
-"Non-DVR Mode"
-msgstr ""
-"DVR: Geçerli yerel DVR MAC adresi elde etme başarısız - L2 Ajan Non-DVR "
-"kipinde işletiliyor"
-
-msgid "DVR: Failed updating arp entry"
-msgstr "DVR: arp kayıt güncelleme hatası"
-
-#, python-format
-msgid "DVR: Unable to retrieve subnet information for subnet_id %s"
-msgstr "DVR: %s subnet_id için alt ağ bilgisi getirilemedi"
-
-msgid "DVR: error adding redirection logic"
-msgstr "DVR: yönlendirme mantığı ekleme hatası"
-
-#, python-format
-msgid "Driver %(driver)s does not implement %(func)s"
-msgstr "Sürücü %(driver)s %(func)s yi uygulamıyor"
-
-#, python-format
-msgid "Driver %(driver)s:%(func)s runtime error"
-msgstr "Sürücü %(driver)s:%(func)s çalışma zamanı hatası"
-
-#, python-format
-msgid "Error during notification for %(callback)s %(resource)s, %(event)s"
-msgstr "%(callback)s %(resource)s için bilgilendirme sırasında hata, %(event)s"
-
-msgid "Error executing command"
-msgstr "Komut çalıştırırken hata"
-
-#, python-format
-msgid "Error fetching extended attributes for extension '%s'"
-msgstr " '%s' uzantısına dair özellikler getirilirken hata oluştu."
-
-#, python-format
-msgid "Error in agent loop. Devices info: %s"
-msgstr "Ajan döngüsünde hata. Aygıt bilgisi: %s"
-
-#, python-format
-msgid "Error loading provider '%(provider)s' for service %(service_type)s"
-msgstr ""
-"%(service_type)s servisi için '%(provider)s' sağlayıcısını yüklemede hata"
-
-#, python-format
-msgid "Error response returned from nova: %s"
-msgstr "Nova'dan hata yanıtı döndü: %s"
-
-#, python-format
-msgid "Error unable to destroy namespace: %s"
-msgstr "Hata, isim uzayı: %s silinemedi"
-
-#, python-format
-msgid "Error while deleting router %s"
-msgstr "Yönlendirici %s silinirken hata"
-
-#, python-format
-msgid "Error while handling pidfile: %s"
-msgstr "%s pid dosyası işlenirken bir hata oluştu"
-
-msgid "Error while processing VIF ports"
-msgstr "VIF bağlantı noktaları işlenirken hata"
-
-msgid "Error while synchronizing tunnels"
-msgstr "Tüneller eş zamanlanırken hata"
-
-#, python-format
-msgid "Error while writing HA state for %s"
-msgstr "%s için HA durumu yazılırken hata"
-
-#, python-format
-msgid "Error, unable to destroy IPset: %s"
-msgstr "Hata, IPset: %s silinemedi"
-
-#, python-format
-msgid "Error, unable to remove iptables rule for IPset: %s"
-msgstr "Hata, IPset: %s için iptables kuralı kaldırılamıyor"
-
-#, python-format
-msgid ""
-"Exceeded maximum binding levels attempting to bind port %(port)s on host "
-"%(host)s"
-msgstr ""
-"%(host)s istemcisi üzerinde %(port)s bağlantı noktasına bağlanma girişiminde "
-"azami bağlanma seviyesi aşıldı"
-
-#, python-format
-msgid "Exception auto-deleting port %s"
-msgstr "%s bağlanı noktasını otomatik silme sırasında istisna"
-
-#, python-format
-msgid "Exception auto-deleting subnet %s"
-msgstr "%s alt ağını otomatik silme sırasında istisna"
-
-#, python-format
-msgid "Exception deleting fixed_ip from port %s"
-msgstr "%s bağlantı noktasından fixed_ip silinirken istisna"
-
-msgid "Exception encountered during network rescheduling"
-msgstr "Ağ yeniden zamanlama sırasında istisna oluştu"
-
-msgid "Exception encountered during router rescheduling."
-msgstr "Yönlendirici yeniden zamanlama sırasında istisna oluştu."
-
-msgid "Exception occurs when timer stops"
-msgstr "Zamanlayıcı durmaya çalışırken hata oluşur."
-
-msgid "Exception occurs when waiting for timer"
-msgstr "Zamanlayıcıyı beklerken hata oluşur"
-
-msgid "Exiting agent as programmed in check_child_processes_actions"
-msgstr ""
-"check_child_processes_actions deki programlanan ajan/işlevden çıkılıyor "
-
-#, python-format
-msgid ""
-"Exiting agent because of a malfunction with the %(service)s process "
-"identified by uuid %(uuid)s"
-msgstr ""
-"%(uuid)s ile tanımlanan %(service)s işlemlerden bir uyumsuzluk hatasından "
-"dolayı çıkılıyor"
-
-#, python-format
-msgid "Extension driver '%(name)s' failed in %(method)s"
-msgstr "Eklenti sürücüsü '%(name)s' %(method)s içerisinde başarısız"
-
-#, python-format
-msgid "Extension path '%s' doesn't exist!"
-msgstr "'%s' Uzantı dizini bulunamıyor."
-
-#, python-format
-msgid "FWaaS RPC failure in %(func_name)s for fw: %(fwid)s"
-msgstr "fw: %(fwid)s için %(func_name)s içinde FWaaS RPC hatası"
-
-#, python-format
-msgid "FWaaS RPC info call failed for '%s'."
-msgstr "'%s' için FWaaS RPC bilgi çağrısı başarısız"
-
-#, python-format
-msgid "Failed creating vxlan interface for %(segmentation_id)s"
-msgstr "%(segmentation_id)s için vxlan arayüzü oluşturma başarısız"
-
-#, python-format
-msgid "Failed deleting egress connection state of floatingip %s"
-msgstr ""
-"%s floatingip bağlantısının çıkış sevye durumu silinmeye çalışılırken bir "
-"hata ile karşılaştı."
-
-#, python-format
-msgid "Failed deleting ingress connection state of floatingip %s"
-msgstr ""
-"%s floatingip bağlantısının giris sevye durumu silinmeye çalışılırken bir "
-"hata ile karşılaştı."
-
-msgid "Failed executing ip command"
-msgstr "IP comutu çalıştırılamadı"
-
-msgid "Failed fwaas process services sync"
-msgstr "fwaas süreç servisleri eş zamanlama başarısız"
-
-msgid "Failed on Agent configuration parse. Agent terminated!"
-msgstr "Ajan yapılandırma aşamasında başarısız olundu. Ajan sonlandırıldı!"
-
-msgid "Failed reporting state!"
-msgstr "Raporlama durumu sağlanamıyor."
-
-#, python-format
-msgid ""
-"Failed sending gratuitous ARP to %(addr)s on %(iface)s in namespace %(ns)s"
-msgstr ""
-"%(ns)s bilinirlik alanında bulunan %(iface)s deki %(addr)s ne gereksiz/ ARP "
-"gönderilemedi."
-
-msgid "Failed synchronizing routers"
-msgstr "Yönlendiricileri eş zamanlama başarısız"
-
-msgid "Failed synchronizing routers due to RPC error"
-msgstr "RPC hatasından dolayı yönlendirici senkronizasyonu sağlanamıyor"
-
-#, python-format
-msgid "Failed to bind port %(port)s on host %(host)s"
-msgstr ""
-"%(host)s istemcisi üzerindeki %(port)s bağlantı noktasına bağlanılamadı"
-
-#, python-format
-msgid "Failed to commit binding results for %(port)s after %(max)s tries"
-msgstr ""
-"%(port)s için bağlama sonuçlarını gönderme %(max)s denemeden sonra başarısız "
-"oldu"
-
-msgid ""
-"Failed to create OVS patch port. Cannot have tunneling enabled on this "
-"agent, since this version of OVS does not support tunnels or patch ports. "
-"Agent terminated!"
-msgstr ""
-"OVS yama bağlantı noktası oluşturma başarısız. Bu ajanda tünelleme "
-"etkinleştirilemez, çünkü bu OVS sürümü tünelleri ya da yama bağlantı "
-"noktalarını desteklemiyor. Ajan sonlandırıldı!"
-
-#, python-format
-msgid "Failed to destroy stale namespace %s"
-msgstr "Vadesi geçmiş isim uzayı %s silinemedi"
-
-#, python-format
-msgid "Failed to fetch router information for '%s'"
-msgstr "%s icin yönlendirici bilgisine erisilemiyor"
-
-#, python-format
-msgid "Failed to get devices for %s"
-msgstr "%s için aygıtları alma başarısız"
-
-#, python-format
-msgid "Failed to get traffic counters, router: %s"
-msgstr "Trafik sayaçları alınamadı, yönlendirici: %s"
-
-#, python-format
-msgid ""
-"Failed to import required modules. Ensure that the python-openvswitch "
-"package is installed. Error: %s"
-msgstr ""
-"Gerekli modülleri içe aktarma başarısız. python-openvswitch paketinin kurulu "
-"olduğuna emin olun. Hata: %s"
-
-#, python-format
-msgid "Failed to notify nova on events: %s"
-msgstr "Nova şu olaylar üzerine bilgilendirilemiyor: %s"
-
-msgid "Failed to parse network_vlan_ranges. Service terminated!"
-msgstr "network_vlan_ranges ayrıştırma başarısız. Servis sonlandırıldı!"
-
-msgid "Failed to parse supported PCI vendor devices"
-msgstr "Desteklenen PCI satıcı aygıtları ayrıştırma başarısız"
-
-msgid "Failed to parse tunnel_id_ranges. Service terminated!"
-msgstr "tunnel_id_ranges ayrıştırma başarısız. Servis sonlandırıldı!"
-
-msgid "Failed to parse vni_ranges. Service terminated!"
-msgstr "vni_ranges ayrıştırma başarısız. Servis sonlandırıldı!"
-
-#, python-format
-msgid "Failed to process compatible router '%s'"
-msgstr "Uyumlu '%s' yönlendirici bilgisi işlenemiyor"
-
-#, python-format
-msgid "Failed to process or handle event for line %s"
-msgstr "%s satırı için olay ele alınamıyor ya da işlenemiyor"
-
-#, python-format
-msgid "Failed to release segment '%s' because network type is not supported."
-msgstr "'%s' dilimi bırakılamadı çünkü ağ türü desteklenmiyor."
-
-#, python-format
-msgid "Failed to reschedule router %s"
-msgstr "Yönlendirici %s yeniden zamanlama başarısız"
-
-#, python-format
-msgid "Failed to schedule network %s"
-msgstr "Ağ %s zamanlama başarısız"
-
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr "%(ip)s'ye %(type)s tünel bağlantı noktası kurulumu başarısız"
-
-#, python-format
-msgid "Failed trying to delete namespace: %s"
-msgstr "Bilinirlik alanı silme hatası: %s"
-
-#, python-format
-msgid "Failed unplugging interface '%s'"
-msgstr "%s arayuzu devre dışı bırakılamadı."
-
-#, python-format
-msgid "Firewall Driver Error for %(func_name)s for fw: %(fwid)s"
-msgstr "fw: %(fwid)s için %(func_name)s için Güvenlik Duvarı Hatası"
-
-#, python-format
-msgid "Firewall Driver Error on fw state %(fwmsg)s for fw: %(fwid)s"
-msgstr ""
-"fw: %(fwid)s için %(fwmsg)s fw durumunda Güvenlik Duvarı Sürücüsü Hatası"
-
-msgid "Fork failed"
-msgstr "Fork yapılırken hata ile karşılaşıldı."
-
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables rules:\n"
-"%s"
-msgstr ""
-"IPTablesManager.apply aşağıdakı iptables bilgileri uygulanamadı\n"
-"%s"
-
-msgid "Interface monitor is not active"
-msgstr "Arayüz izleme etkin değil"
-
-msgid "Internal error"
-msgstr "İçsel hata"
-
-#, python-format
-msgid "InvalidContentType: %s"
-msgstr "UyumsuzİçerikTipi: %s"
-
-#, python-format
-msgid ""
-"It was impossible to process the following extensions: %s because of missing "
-"requirements."
-msgstr ""
-"Belirtilen uzantılar çalıştırılması mümkün olamıyor: %s dair eksik "
-"ihtiyaclardan dolayı."
-
-#, python-format
-msgid "MAC generation error after %s attempts"
-msgstr "%s denemeden sonra MAC üretme hatası"
-
-#, python-format
-msgid "MalformedRequestBody: %s"
-msgstr "BozukİstekGövdesi: %s"
-
-#, python-format
-msgid "Mechanism driver %s failed in bind_port"
-msgstr "Mekanizma sürücüsü %s bind_port başarısız"
-
-#, python-format
-msgid "Mechanism driver '%(name)s' failed in %(method)s"
-msgstr "Mekanizma sürücüsü '%(name)s' %(method)s içinde başarısız oldu"
-
-#, python-format
-msgid ""
-"Message received from the host: %(host)s during the registration of "
-"%(agent_name)s has a timestamp: %(agent_time)s. This differs from the "
-"current server timestamp: %(serv_time)s by %(diff)s seconds, which is more "
-"than the threshold agent downtime: %(threshold)s."
-msgstr ""
-"%(agent_name)s kaydı sırasında %(host)s istemcisinden alınan iletinin "
-"%(agent_time)s zaman damgası var. Bu mevcut sunucu zaman damgası: "
-"%(serv_time)s ile %(diff)s saniye farklı, ki bu %(threshold)s eşik ajan "
-"aksama süresinden fazla."
-
-msgid "Missing subnet/agent_gateway_port"
-msgstr "Eksik subnet/agent_gateway_port"
-
-#, python-format
-msgid "Multiple ports have port_id starting with %s"
-msgstr "Birden çok bağlantı noktası %s port_id ile başlıyor"
-
-#, python-format
-msgid "Network %s has no segments"
-msgstr "%s ağının dilimi yok"
-
-#, python-format
-msgid "Network %s info call failed."
-msgstr " %s ağ bilgi çağırısı yapılamıyor."
-
-#, python-format
-msgid ""
-"No FloatingIP agent gateway port returned from server for 'network-id': %s"
-msgstr ""
-"Sunucudan 'network-id': %s için DeğişkenIP ajan geçit bağlantı noktası "
-"dönmedi"
-
-#, python-format
-msgid "No Host supplied to bind DVR Port %s"
-msgstr "%s DVR Bağlantı noktasına bağlanma için istemci sağlanmadı"
-
-msgid "No known API applications configured."
-msgstr "Hiçi bir tanımlı API uygulaması konfigüre edilmedi."
-
-#, python-format
-msgid "No local VLAN available for net-id=%s"
-msgstr "net-id=%s için uygun yerel VLAN yok"
-
-msgid "No plugin for L3 routing registered to handle router scheduling"
-msgstr ""
-"Yönlendirici zamanlamayı işlemesi için L3 yönlendirme için kaydedilmiş "
-"eklenti yok"
-
-#, python-format
-msgid ""
-"No plugin for L3 routing registered. Cannot notify agents with the message %s"
-msgstr ""
-"L3 yönlendirme için eklenti kaydedilmemiş. Ajanlar %s iletisiyle "
-"bilgilendirilemiyor"
-
-msgid "No tunnel_ip specified, cannot delete tunnels"
-msgstr "tunnel_ip belirtilmemiş, tüneller silinemiyor"
-
-msgid "No tunnel_type specified, cannot create tunnels"
-msgstr "tunnel_type belirtilmemiş, tünel oluşturulamıyor"
-
-msgid "No tunnel_type specified, cannot delete tunnels"
-msgstr "tunnel_type belirtilmemiş, tüneller silinemiyor"
-
-#, python-format
-msgid "No type driver for external network_type: %s. Service terminated!"
-msgstr "Harici network_type: %s için tür sürücüsü yok. Servis sonlandırıldı!"
-
-#, python-format
-msgid "No type driver for tenant network_type: %s. Service terminated!"
-msgstr "Kiracı network_type: %s için tür sürücüsü yok. Servis sonlandırıldı!"
-
-msgid "No valid Segmentation ID to perform UCAST test."
-msgstr "UCAST testi yapmak için geçerli Dilimlendirme ID'si yok."
-
-#, python-format
-msgid "Not enough candidates, a HA router needs at least %s agents"
-msgstr "Yeterli aday yok, bir HA yönlendirici en az %s ajana ihtiyaç duyar"
-
-msgid ""
-"Nova notifications are enabled, but novaclient is not installed. Either "
-"disable nova notifications or install python-novaclient."
-msgstr ""
-"Nova iletileri etkin, ama novaclient kurulu değil. Ya nova iletilerini "
-"kapatın ya da python-novaclient kurun."
-
-#, python-format
-msgid "OVS flows could not be applied on bridge %s"
-msgstr "OVS akışları  %s köprüsüne uygulanamıyor."
-
-#, python-format
-msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!"
-msgstr ""
-"physical_interface_mappings ayrıştırma başarısız: %s. Ajan sonlandırıldı!"
-
-#, python-format
-msgid "Pidfile %s already exist. Daemon already running?"
-msgstr "%s Pid zaten mevcut. Servis zaten calisiyor?"
-
-#, python-format
-msgid "Policy check error while calling %s!"
-msgstr "%s cağrılırken politika doğrulama hatası oluştu!"
-
-#, python-format
-msgid "Removing incompatible router '%s'"
-msgstr "Uygunsuz '%s' yönlendirici bilgisi kaldırılıyor"
-
-msgid "RuntimeError in obtaining namespace list for namespace cleanup."
-msgstr ""
-"İsim uzayı temizliği için isim uzayı listesi elde edilirken RuntimeError."
-
-#, python-format
-msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid"
-msgstr ""
-"%(port)s bağlantı noktası için serileştirilmiş profil DB değeri '%(value)s' "
-"geçersiz"
-
-#, python-format
-msgid ""
-"Serialized vif_details DB value '%(value)s' for port %(port)s is invalid"
-msgstr ""
-"%(port)s bağlantı noktası için serileştirilmiş vif_details DB değeri "
-"'%(value)s' geçersiz"
-
-#, python-format
-msgid "The external network bridge '%s' does not exist"
-msgstr "%s harici ağ geçidi mevcut degil"
-
-#, python-format
-msgid ""
-"The installed version of dnsmasq is too old. Please update to at least "
-"version %s."
-msgstr "Yüklü dnsmasq sürümü çok eski. Lütfen en az %s sürümüne güncelleyin."
-
-msgid ""
-"The user that is executing neutron does not have permissions to read the "
-"namespaces. Enable the use_helper_for_ns_read configuration option."
-msgstr ""
-"Neutron'u çalıştıran kullanıcının isim uzaylarını okuma yetkisi yok. "
-"use_helper_for_ns_read yapılandırma seçeneğini etkinleştirin."
-
-#, python-format
-msgid ""
-"Type driver '%(new_driver)s' ignored because type driver '%(old_driver)s' is "
-"already registered for type '%(type)s'"
-msgstr ""
-"Tür sürücüsü '%(new_driver)s' atlandı çünkü tür sürücüsü '%(old_driver)s' "
-"'%(type)s' türü için zaten kaydedilmiş"
-
-#, python-format
-msgid "Unable to %(action)s dhcp for %(net_id)s."
-msgstr "%(net_id)s için  %(action)s  dhcp de yapılamıyor.  "
-
-#, python-format
-msgid "Unable to add %(interface)s to %(bridge_name)s! Exception: %(e)s"
-msgstr "%(interface)s %(bridge_name)s e eklenemedi. İstisna: %(e)s"
-
-#, python-format
-msgid "Unable to add vxlan interface for network %s"
-msgstr "%s ağı için vxlan arayüzü eklenemedi"
-
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr "%s degeri dönüştürülemiyor"
-
-#, python-format
-msgid "Unable to execute %(cmd)s. Exception: %(exception)s"
-msgstr "%(cmd)s çalıştırılamadı. Hata: %(exception)s"
-
-#, python-format
-msgid "Unable to find agent %s."
-msgstr "%s ajanı bulunamıyor."
-
-#, python-format
-msgid "Unable to generate mac address after %s attempts"
-msgstr "%s denemeden sonra mac adresi üretilemedi"
-
-#, python-format
-msgid "Unable to listen on %(host)s:%(port)s"
-msgstr "%(host)s:%(port)s dinlenemiyor"
-
-msgid "Unable to obtain MAC address for unique ID. Agent terminated!"
-msgstr "Benzersiz ID için MAC adresi elde edilemedi. Ajan sonlandırıldı!"
-
-#, python-format
-msgid "Unable to parse route \"%s\""
-msgstr "\"%s\" rotası ayrıştırılamadı"
-
-#, python-format
-msgid "Unable to process HA router %s without HA port"
-msgstr "HA bağlantısı olmadan HA yönlendiricisi %s işlenemiyor"
-
-#, python-format
-msgid "Unable to sync network state on deleted network %s"
-msgstr "Silinmiş %s ağları için senkronizasyon sağlanamıyor"
-
-msgid "Unable to sync network state."
-msgstr "Ağ durumu senkronize edilemiyor."
-
-#, python-format
-msgid "Unable to undo add for %(resource)s %(id)s"
-msgstr "%(resource)s %(id)s için ekleme geri alınamıyor"
-
-msgid "Unexpected error."
-msgstr "Beklenmeyen hata."
-
-#, python-format
-msgid ""
-"Unexpected exception occurred while removing network %(net)s from agent "
-"%(agent)s"
-msgstr ""
-"%(net)s ağı %(agent)s ajanından kaldırılırken beklenmedik istisna oluştu"
-
-#, python-format
-msgid "Unexpected exception while checking supported feature via command: %s"
-msgstr ""
-"Şu komutla desteklenen özellik kontrolü yapılırken beklenmedik istisna: %s"
-
-msgid "Unexpected exception while checking supported ip link command"
-msgstr "Desteklenen ip bağlantısı komutu kontrol edilirken beklenmedik istisna"
-
-#, python-format
-msgid "Unknown network_type %(network_type)s for network %(network_id)s."
-msgstr "%(network_id)s ağı için bilinmeyen network_type %(network_type)s."
-
-msgid "Unrecoverable error: please check log for details."
-msgstr "Düzeltilemeyen hata: Lütfen detaylar için loglara bakınız."
-
-#, python-format
-msgid ""
-"Will not send event %(method)s for network %(net_id)s: no agent available. "
-"Payload: %(payload)s"
-msgstr ""
-"%(net_id)s ağı için %(method)s oalyı gönderilmeyecek: uygun ajan yok. "
-"Fayadalı yük: %(payload)s"
-
-#, python-format
-msgid "_bind_port_if_needed failed, deleting port '%s'"
-msgstr "_bind_port_if_needed başarısız, '%s' bağlantı noktası siliniyor"
-
-#, python-format
-msgid "_bind_port_if_needed failed. Deleting all ports from create bulk '%s'"
-msgstr ""
-"_bind_port_if_needed başarısız. '%s' toplu oluşturmasından tüm bağlantı "
-"noktaları siliniyor"
-
-#, python-format
-msgid ""
-"mechanism_manager.create_%(res)s_postcommit failed for %(res)s: "
-"'%(failed_id)s'. Deleting %(res)ss %(resource_ids)s"
-msgstr ""
-"mechanism_manager.create_%(res)s_postcommit %(res)s: '%(failed_id)s' için "
-"başarısız. %(res)ss %(resource_ids)s siliniyor"
-
-#, python-format
-msgid ""
-"mechanism_manager.create_network_postcommit failed, deleting network '%s'"
-msgstr ""
-"mechanism_manager.create_network_postcommit başarısız, '%s' ağı siliniyor"
-
-#, python-format
-msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'"
-msgstr ""
-"mechanism_manager.create_port_postcommit başarısız, '%s' bağlantı noktası "
-"siliniyor"
-
-#, python-format
-msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'"
-msgstr ""
-"mechanism_manager.create_subnet_postcommit başarısız, alt ağ '%s' siliniyor"
-
-msgid "mechanism_manager.delete_network_postcommit failed"
-msgstr "mechanism_manager.delete_network_postcommit başarısız"
-
-#, python-format
-msgid "mechanism_manager.delete_port_postcommit failed for port %s"
-msgstr ""
-"mechanism_manager.delete_port_postcommit %s bağlantı noktası için başarısız"
-
-msgid "mechanism_manager.delete_subnet_postcommit failed"
-msgstr "mechanism_manager.delete_subnet_postcommit başarısız"
-
-#, python-format
-msgid ""
-"process_ancillary_network_ports - iteration:%d - failure while retrieving "
-"port details from server"
-msgstr ""
-"process_ancillary_network_ports - yineleme:%d - sunucudan bağlantı noktası "
-"detaylarını alma başarısız"
-
-#, python-format
-msgid ""
-"process_network_ports - iteration:%d - failure while retrieving port details "
-"from server"
-msgstr ""
-"process_network_ports - yineleme:%d - sunucudan bağlantı noktası detaylarını "
-"alma başarısız"
-
-#, python-format
-msgid "tunnel_type %s not supported by agent"
-msgstr "tunnel_type %s ajan tarafından desteklenmiyor"
diff --git a/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-info.po b/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-info.po
deleted file mode 100644 (file)
index 5cf93e1..0000000
+++ /dev/null
@@ -1,575 +0,0 @@
-# Translations template for neutron.
-# Copyright (C) 2015 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-#
-# Translators:
-# ADİL REŞİT DURSUN <ardursun@deltanoc.com>, 2015
-# Alper Çiftçi <alprciftci@gmail.com>, 2015
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-08-21 01:06+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Turkish (Turkey)\n"
-"Language: tr-TR\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Plural-Forms: nplurals=1; plural=0;\n"
-"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.3\n"
-
-#, python-format
-msgid "%(action)s failed (client error): %(exc)s"
-msgstr "%(action)s başarısız (istemci hatası): %(exc)s"
-
-#, python-format
-msgid "%(method)s %(url)s"
-msgstr "%(method)s %(url)s"
-
-#, python-format
-msgid "%(prog)s version %(version)s"
-msgstr "%(prog)s sürüm %(version)s"
-
-#, python-format
-msgid "%(type)s ID ranges: %(range)s"
-msgstr "%(type)s ID aralığı: %(range)s"
-
-#, python-format
-msgid "%(url)s returned a fault: %(exception)s"
-msgstr "%(url)s hata döndürdü: %(exception)s"
-
-#, python-format
-msgid "%(url)s returned with HTTP %(status)d"
-msgstr "%(url)s HTTP %(status)d ile geri döndü"
-
-#, python-format
-msgid "%d probe(s) deleted"
-msgstr "%d sonda silindi"
-
-#, python-format
-msgid ""
-"Added segment %(id)s of type %(network_type)s for network %(network_id)s"
-msgstr "%(network_id)s ağı için %(network_type)s türünde %(id)s dilimi eklendi"
-
-#, python-format
-msgid "Adding %s to list of bridges."
-msgstr "%s köprü listesine ekleniyor."
-
-#, python-format
-msgid "Adding network %(net)s to agent %(agent)s on host %(host)s"
-msgstr "Ağ %(net)s %(host)s istemcisi üzerinde %(agent)s ajanına ekleniyor"
-
-#, python-format
-msgid "Agent %s already present"
-msgstr "Ajan %s zaten mevcut"
-
-#, python-format
-msgid "Agent Gateway port does not exist, so create one: %s"
-msgstr "Ajan geçit bağlantı noktası mevcut değil, bir tane oluştur: %s"
-
-msgid "Agent caught SIGHUP, resetting."
-msgstr "Ajan SIGHUP yakaladı, sıfırlanıyor."
-
-msgid "Agent caught SIGTERM, quitting daemon loop."
-msgstr "Ajan SIGTERM yakaladı, artalan işlemi döngüsünden çıkılıyor."
-
-msgid "Agent initialized successfully, now running... "
-msgstr "Ajan başarıyla ilklendirildi, şimdi çalıştırılıyor... "
-
-msgid "Agent out of sync with plugin!"
-msgstr "Ajan ve eklenti uyumsuz!"
-
-msgid "Agent tunnel out of sync with plugin!"
-msgstr "Ajan tüneli eklentiyle uyumsuz!"
-
-msgid ""
-"Allow sorting is enabled because native pagination requires native sorting"
-msgstr ""
-"Sıralamaya izin verme etkin çünkü doğal sayfalama doğal sıralamaya ihtiyaç "
-"duyar"
-
-#, python-format
-msgid "Allowable flat physical_network names: %s"
-msgstr "İzin verilebilecek düz fiziksel ağ isimleri: %s"
-
-msgid "Arbitrary flat physical_network names allowed"
-msgstr "Rastgele seçilmiş düz fiziksel ağ isimlerine izin verilmez"
-
-#, python-format
-msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s"
-msgstr "%(vlan_id)s net-id=%(net_uuid)s için yerel olarak atanıyor"
-
-#, python-format
-msgid "Attachment %s removed"
-msgstr "Eklenti %s kaldırıldı"
-
-#, python-format
-msgid ""
-"Attempt %(count)s to allocate a VRID in the network %(network)s for the "
-"router %(router)s"
-msgstr ""
-"%(network)s ağında %(router)s yönlendiricisi için VRID ayırmak için girişim "
-"%(count)s"
-
-#, python-format
-msgid "Attempt %(count)s to bind port %(port)s"
-msgstr "%(port)s bağlantı noktası bağlama için girişim %(count)s"
-
-#, python-format
-msgid "Attempted to remove port filter which is not filtered %r"
-msgstr ""
-"Filtrelenmiş %r olmayan bağlantı noktası filtresi kaldırılmaya çalışıldı"
-
-#, python-format
-msgid "Attempted to update port filter which is not filtered %s"
-msgstr "%s filtrelenmemiş bağlantı noktası filtresi güncellenmeye çalışıldı"
-
-#, python-format
-msgid ""
-"Binding info for port %s was not found, it might have been deleted already."
-msgstr ""
-"Bağlantı noktası %s için bağlama bilgisi bulunamadı, zaten silinmiş olabilir."
-
-#, python-format
-msgid ""
-"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not "
-"in port's address IP versions"
-msgstr ""
-"Dhcp seçeneği %(opt)s uygulanamıyor çünkü ip_version %(version)d bağlantı "
-"noktasının adres IP sürümleri içinde değil"
-
-#, python-format
-msgid "Centralizing distributed router %s is not supported"
-msgstr "Dağıtık yönlendirici %s merkezileştirme desteklenmiyor"
-
-#, python-format
-msgid "Cleaning bridge: %s"
-msgstr "Köprü temizleniyor: %s"
-
-#, python-format
-msgid "Clearing orphaned ARP spoofing entries for devices %s"
-msgstr "Aygıtlar %s için sahipsiz ARP aldatma girdileri temizleniyor"
-
-msgid ""
-"ConfDriver is used as quota_driver because the loaded plugin does not "
-"support 'quotas' table."
-msgstr ""
-"Yüklenen eklenti 'quotas' tablosunu desteklemediğinden ConfDriver "
-"quota_driver olarak kullanılıyor."
-
-#, python-format
-msgid "Configured extension driver names: %s"
-msgstr "Yapılandırılan eklenti sürücü isimleri: %s"
-
-#, python-format
-msgid "Configured mechanism driver names: %s"
-msgstr "Yapılandırılan mekanizma sürücü isimleri: %s"
-
-#, python-format
-msgid "Configured type driver names: %s"
-msgstr "Tür sürücü isimleri yapılandırıldı: %s"
-
-msgid "DHCP agent started"
-msgstr "DHCP ajanı başlatıldı"
-
-#, python-format
-msgid "Default provider is not specified for service type %s"
-msgstr "%s servis türü için varsayılan sağlayıcı belirtilmemiş"
-
-#, python-format
-msgid "Deleting port: %s"
-msgstr "Bağlantı noktası siliniyor: %s"
-
-#, python-format
-msgid "Destroying IPset: %s"
-msgstr "IPset siliniyor: %s"
-
-#, python-format
-msgid "Destroying IPsets with prefix: %s"
-msgstr "Şu öneke sahip IPset'ler siliniyor: %s"
-
-#, python-format
-msgid "Device %s already exists"
-msgstr "Aygıt %s zaten mevcut"
-
-#, python-format
-msgid "Device %s not defined on plugin"
-msgstr "Aygıt %s eklentide tanımlanmamış"
-
-#, python-format
-msgid "Device with MAC %s not defined on plugin"
-msgstr "%s MAC'ine sahip aygıt eklentide tanımlanmadı"
-
-msgid "Disabled allowed-address-pairs extension."
-msgstr "allowed-address-pairs eklentisi kapatıldı."
-
-msgid "Disabled security-group extension."
-msgstr "Güvenlik grubu eklentisi kapatıldı."
-
-msgid "Disabled vlantransparent extension."
-msgstr "vlantransparent eklentisi kapalı."
-
-#, python-format
-msgid "Exclude Devices: %s"
-msgstr "Aygıtları Hariç Tut: %s"
-
-#, python-format
-msgid ""
-"Failed to schedule network %s, no eligible agents or it might be already "
-"scheduled by another server"
-msgstr ""
-"%s ağı zamanlanamadı, uygun ajan yok veya başka bir sunucu tarafından zaten "
-"zamanlanmış olabilir"
-
-#, python-format
-msgid "Found invalid IP address in pool: %(start)s - %(end)s:"
-msgstr "Havuzda geçersiz IP adresi bulundu: %(start)s - %(end)s:"
-
-#, python-format
-msgid "Found overlapping ranges: %(l_range)s and %(r_range)s"
-msgstr "Kesişen aralıklar bulundu: %(l_range)s and %(r_range)s"
-
-#, python-format
-msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s"
-msgstr "Alt ağ CIDR'den büyük havuz bulundu:%(start)s - %(end)s"
-
-#, python-format
-msgid ""
-"Found port (%(port_id)s, %(ip)s) having IP allocation on subnet %(subnet)s, "
-"cannot delete"
-msgstr ""
-"%(subnet)s alt ağında IP ayrılmış bağlantı noktası (%(port_id)s, %(ip)s) "
-"bulundu, silinemez"
-
-#, python-format
-msgid "Got %(alias)s extension from driver '%(drv)s'"
-msgstr "'%(drv)s' sürücüsünden %(alias)s eklentisi alındı"
-
-#, python-format
-msgid "HTTP exception thrown: %s"
-msgstr "HTTP istisnası fırlatıldı: %s"
-
-#, python-format
-msgid ""
-"Heartbeat received from %(type)s agent on host %(host)s, uuid %(uuid)s after "
-"%(delta)s"
-msgstr ""
-"%(host)s istemcisi, uuid %(uuid)s üstündeki %(type)s ajandan %(delta)s sonra "
-"kalp atışı alındı"
-
-msgid "IPset cleanup completed successfully"
-msgstr "IPset temizliği başarıyla tamamlandı"
-
-msgid "IPv6 is not enabled on this system."
-msgstr "IPv6 bu sistemde etkin değil."
-
-#, python-format
-msgid "Initializing driver for type '%s'"
-msgstr "'%s' türü için sürücü ilklendiriliyor"
-
-#, python-format
-msgid "Initializing extension driver '%s'"
-msgstr "Eklenti sürücüsü ilklendiriliyor '%s'"
-
-msgid "Initializing extension manager."
-msgstr "Genişletme yöneticisi başlatılıyor"
-
-#, python-format
-msgid "Initializing mechanism driver '%s'"
-msgstr "Mekanizma sürücüsü ilklendiriliyor '%s'"
-
-#, python-format
-msgid "Interface mappings: %s"
-msgstr "Arayüz eşleştirmeleri: %s"
-
-#, python-format
-msgid "L2 Agent operating in DVR Mode with MAC %s"
-msgstr "L2 Ajanı %s MAC'i ile DVR Kipinde çalışıyor"
-
-msgid "L3 agent started"
-msgstr "L3 ajanı başlatıldı"
-
-msgid "LinuxBridge Agent RPC Daemon Started!"
-msgstr "LinuxBridge Ajanı RPC Artalan İşlemleri Başlatıldı!"
-
-#, python-format
-msgid "Loaded extension driver names: %s"
-msgstr "Yüklenen eklenti sürücü isimleri: %s"
-
-#, python-format
-msgid "Loaded extension: %s"
-msgstr "Yüklenen bölüm: %s"
-
-#, python-format
-msgid "Loaded mechanism driver names: %s"
-msgstr "Yüklenen mekanizma sürücü isimleri: %s"
-
-#, python-format
-msgid "Loaded quota_driver: %s."
-msgstr "quota_driver yüklendi: %s."
-
-#, python-format
-msgid "Loaded type driver names: %s"
-msgstr "Tür sürücü isimleri yüklendi: %s"
-
-#, python-format
-msgid "Loading Metering driver %s"
-msgstr "Ölçme sürücüsü %s yükleniyor"
-
-#, python-format
-msgid "Loading Plugin: %s"
-msgstr "Eklenti Yükleniyor: %s"
-
-#, python-format
-msgid "Loading core plugin: %s"
-msgstr "Çekirdek eklenti yükleniyor: %s"
-
-#, python-format
-msgid "Loading interface driver %s"
-msgstr "Arayüz sürücüsü %s yükleniyor"
-
-msgid "Logging enabled!"
-msgstr "Günlükleme etkin!"
-
-msgid "ML2 FlatTypeDriver initialization complete"
-msgstr "ML2 FlatTypeDriver ilklendirmesi tamamlandı"
-
-msgid "ML2 LocalTypeDriver initialization complete"
-msgstr "ML2 LocalTypeDriver ilklendirmesi tamamlandı"
-
-#, python-format
-msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s"
-msgstr "Fiziksel ağ %(physical_network)s %(bridge)s köprüsüne eşleştiriliyor"
-
-msgid "Modular L2 Plugin initialization complete"
-msgstr "Modüler L2 Eklentisi ilklendirme tamamlandı"
-
-#, python-format
-msgid "Network VLAN ranges: %s"
-msgstr "Ağ VLAN aralığı: %s"
-
-#, python-format
-msgid "Neutron service started, listening on %(host)s:%(port)s"
-msgstr "Neutron servisi başlatıldı, %(host)s:%(port)s üzerinde dinliyor"
-
-#, python-format
-msgid "No device with MAC %s defined on agent."
-msgstr "Ajanda %s MAC'ine sahip bir aygıt tanımlanmamış."
-
-msgid "No ports here to refresh firewall"
-msgstr "Burda güvenlik duvarını tazelemek için bağlantı noktası yok"
-
-#, python-format
-msgid "Nova event response: %s"
-msgstr "Nova olay yanıtı: %s"
-
-#, python-format
-msgid ""
-"Number of active agents lower than max_l3_agents_per_router. L3 agents "
-"available: %s"
-msgstr ""
-"Etkin ajan sayısı max_l3_agents_per_router'den küçük. Kullanılabilir L3 "
-"ajanları: %s"
-
-msgid "OVS cleanup completed successfully"
-msgstr "OVS temizliği başarıyla tamamlandı"
-
-#, python-format
-msgid "Physical Devices mappings: %s"
-msgstr "Fiziksel Aygıtların eşleştirmeleri: %s"
-
-#, python-format
-msgid "Port %(device)s updated. Details: %(details)s"
-msgstr "Bağlantı noktası %(device)s güncellendi. Detaylar: %(details)s"
-
-#, python-format
-msgid "Port %(port_id)s not present in bridge %(br_name)s"
-msgstr "Bağlantı noktası %(port_id)s %(br_name)s köprüsünde mevcut değil"
-
-#, python-format
-msgid "Port %s updated."
-msgstr "Bağlantı noktası %s güncellendi."
-
-#, python-format
-msgid "Port %s was deleted concurrently"
-msgstr "Bağlantı noktası %s eş zamanlı olarak silindi"
-
-#, python-format
-msgid ""
-"Port %s was not found on the integration bridge and will therefore not be "
-"processed"
-msgstr ""
-"Bağlantı noktası %s tümleştirme köprüsünde bulunamadı ve bu yüzden "
-"işlenmeyecek"
-
-#, python-format
-msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!"
-msgstr ""
-"'%(port_name)s' bağlantı noktası '%(vlan_tag)d' vlan etiketini kaybetti!"
-
-msgid "PortSecurityExtensionDriver initialization complete"
-msgstr "PortSecurityExtensionDriver ilklendirme tamamlandı"
-
-#, python-format
-msgid "Ports %s removed"
-msgstr "Portlar %s  silindi"
-
-#, python-format
-msgid "Preparing filters for devices %s"
-msgstr "Aygıtlar için filtreler hazırlanıyor %s"
-
-#, python-format
-msgid "Process runs with uid/gid: %(uid)s/%(gid)s"
-msgstr "Süreç şu uid/gid ile çalışıyor: %(uid)s/%(gid)s"
-
-msgid "Provider rule updated"
-msgstr "Sağlayıcı kuralı güncellendi"
-
-#, python-format
-msgid "RPC agent_id: %s"
-msgstr "RPC agent_id: %s"
-
-msgid "RPC was already started in parent process by plugin."
-msgstr "RPC üst süreçte eklenti tarafından zaten başlatılmıştı."
-
-#, python-format
-msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"
-msgstr "vlan = %(vlan_id)s'in net-id = %(net_uuid)s'den iades isteniyor"
-
-msgid "Refresh firewall rules"
-msgstr "Güvenlik duvarı kurallarını tazele"
-
-#, python-format
-msgid "Registered extension drivers: %s"
-msgstr "Eklenti sürücüleri kaydedildi: %s"
-
-#, python-format
-msgid "Registered mechanism drivers: %s"
-msgstr "Kaydedilen mekanizma sürücüleri: %s"
-
-#, python-format
-msgid "Registered types: %s"
-msgstr "Kaydedilen türler: %s"
-
-#, python-format
-msgid "Remove device filter for %r"
-msgstr "%r için aygıt filtresini kaldır"
-
-#, python-format
-msgid "Removing iptables rule for IPset: %s"
-msgstr "IPset için iptables kuralı siliniyor: %s"
-
-#, python-format
-msgid "Router %(router_id)s transitioned to %(state)s"
-msgstr "Yönlendirici %(router_id)s %(state)s durumuna geçti"
-
-#, python-format
-msgid ""
-"Router %s is not managed by this agent. It was possibly deleted concurrently."
-msgstr ""
-"%s yönlendiricisi bu ajan tarafından yönetilmiyor. Muhtemelen eş zamanlı "
-"olarak silindi."
-
-msgid "SNAT already bound to a service node."
-msgstr "SNAT zaten bir servis düğümüne bağlı."
-
-#, python-format
-msgid "SNAT interface port list does not exist, so create one: %s"
-msgstr ""
-"SNAT arayüzü bağlantı noktası listesi mevcut değil, bir tane oluştur: %s"
-
-msgid "SRIOV NIC Agent RPC Daemon Started!"
-msgstr "SRIOV NIC Ajanı RPC Artalan İşlemleri Başlatıldı!"
-
-#, python-format
-msgid "Scheduling unhosted network %s"
-msgstr "Sunulmamış ağ %s zamanlanıyor"
-
-#, python-format
-msgid "Security group member updated %r"
-msgstr "Güvenlik grubu üyesi güncellendi %r"
-
-#, python-format
-msgid "Security group rule updated %r"
-msgstr "Güvenlik grubu kuralı güncellendi %r"
-
-#, python-format
-msgid "Service %s is supported by the core plugin"
-msgstr "Servis %s çekirdek eklenti tarafından destekleniyor"
-
-#, python-format
-msgid ""
-"Skipping ARP spoofing rules for port '%s' because it has port security "
-"disabled"
-msgstr ""
-"'%s' bağlantı noktası için ARP aldatma kuralları atlanıyor çünkü bağlanı "
-"noktası güvenliği kapalı"
-
-#, python-format
-msgid ""
-"Skipping method %s as firewall is disabled or configured as "
-"NoopFirewallDriver."
-msgstr ""
-"Güvenlik duvarı kapalı ya da NoopFirewallDriver olarak yapılandırıldığından "
-"%s metodu atlanıyor."
-
-msgid ""
-"Skipping period L3 agent status check because automatic router rescheduling "
-"is disabled."
-msgstr ""
-"Devre L3 ajan durum kontrolü atlanıyor çünkü otomatik yönlendirici yeniden "
-"zamanlama kapalı."
-
-msgid ""
-"Skipping periodic DHCP agent status check because automatic network "
-"rescheduling is disabled."
-msgstr ""
-"Aralıklı DHCP ajan durum kontrolü atlanıyor çünkü otomatik ağ yeniden "
-"zamanlama kapalı."
-
-#, python-format
-msgid "Skipping port %s as no IP is configure on it"
-msgstr "Bağlantı noktası %s atlanıyor çünkü üzerinde yapılandırılmış IP yok"
-
-msgid "Specified IP addresses do not match the subnet IP version"
-msgstr "Belirtilen IP adresleri alt ağ IP sürümüyle eşleşmiyor"
-
-msgid "Stopping linuxbridge agent."
-msgstr "Linuxbridge ajanı durduruluyor."
-
-#, python-format
-msgid "Subnet %s was deleted concurrently"
-msgstr "Alt ağ %s eş zamanlı olarak silindi"
-
-msgid "Synchronizing state"
-msgstr "Durum eşzamanlandırılıyor"
-
-msgid "Synchronizing state complete"
-msgstr "Durum eş zamanlandırma tamamlandı"
-
-#, python-format
-msgid "Tenant network_types: %s"
-msgstr "Kiracı network_types: %s"
-
-#, python-format
-msgid ""
-"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet "
-"%(subnet_id)s (CIDR: %(cidr)s)"
-msgstr ""
-"CIDR: %(new_cidr)s için doğrulama başarısız - %(subnet_id)s (CIDR: %(cidr)s) "
-"ile çakışıyor"
-
-msgid "VlanTypeDriver initialization complete"
-msgstr "VlanTypeDriver ilklendirme tamamlandı"
-
-#, python-format
-msgid "agent_updated by server side %s!"
-msgstr "ajan sunucu tarafında güncellendi %s!"
-
-#, python-format
-msgid "port_unbound(): net_uuid %s not in local_vlan_map"
-msgstr "port_unbound(): net_uuid %s local_vlan_map içinde değil"
diff --git a/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-warning.po b/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-warning.po
deleted file mode 100644 (file)
index d09236f..0000000
+++ /dev/null
@@ -1,450 +0,0 @@
-# Translations template for neutron.
-# Copyright (C) 2015 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-#
-# Translators:
-# ADİL REŞİT DURSUN <ardursun@deltanoc.com>, 2015
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.dev524\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-11-29 06:23+0000\n"
-"PO-Revision-Date: 2015-08-21 01:06+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Turkish (Turkey)\n"
-"Language: tr-TR\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Plural-Forms: nplurals=1; plural=0;\n"
-"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.3\n"
-
-#, python-format
-msgid "%(agent_type)s agent %(agent_id)s is not active"
-msgstr "%(agent_type)s ajanı %(agent_id)s etkin değil"
-
-#, python-format
-msgid ""
-"%(port_num)d router ports found on the metadata access network. Only the "
-"port %(port_id)s, for router %(router_id)s will be considered"
-msgstr ""
-"Metadata erişim ağında %(port_num)d yönlendirici bağlantı noktası bulundu. "
-"Yalnızca %(port_id)s bağlantı noktası, %(router_id)s yönlendiricisi için "
-"değerlendirilecek"
-
-#, python-format
-msgid "%(type)s tunnel %(id)s not found"
-msgstr "%(type)s tünel %(id)s bulunamadı"
-
-msgid "A concurrent port creation has occurred"
-msgstr "Eş zamanlı bağlantı noktası oluşturma meydana geldi"
-
-#, python-format
-msgid ""
-"Action %(action)s for network %(net_id)s could not complete successfully: "
-"%(reason)s"
-msgstr ""
-"%(net_id)s ağı için %(action)s eylemi başarıyla tamamlanamadı: %(reason)s"
-
-#, python-format
-msgid "Action %s not supported"
-msgstr "%s eylemi desteklenmiyor"
-
-#, python-format
-msgid "Attempted to get traffic counters of chain %s which does not exist"
-msgstr "%s zincirinin mevcut olmayan trafik sayaçları alınmaya çalışıldı"
-
-#, python-format
-msgid "Attempting to bind with dead agent: %s"
-msgstr "Ölü ajanla bağlama deneniyor: %s"
-
-#, python-format
-msgid "Cannot find vf index for pci slot %s"
-msgstr "%s pci yuvası için vf indisi bulunamıyor"
-
-#, python-format
-msgid "Cannot find vfs %(vfs)s in device %(dev_name)s"
-msgstr "%(dev_name)s aygıtında vfs %(vfs)s bulunamıyor"
-
-#, python-format
-msgid "Configuration for agent %(agent_type)s on host %(host)s is invalid."
-msgstr ""
-"%(host)s istemcisi üstündeki %(agent_type)s ajanı için yapılandırma geçersiz."
-
-#, python-format
-msgid "Could not expand segment %s"
-msgstr "Dilim %s genişletilemedi"
-
-#, python-format
-msgid "DHCP agent %s is not active"
-msgstr "DHCP ajanı %s etkin değil"
-
-msgid "DVR functionality requires a server upgrade."
-msgstr "DVR işlevselliği sunucu yükseltmesi gerektiriyor."
-
-#, python-format
-msgid "Device %(device)s requested by agent %(agent_id)s not found in database"
-msgstr ""
-"%(agent_id)s ajanı tarafından istenen %(device)s aygıtı veri tabanında "
-"bulunamadı"
-
-#, python-format
-msgid ""
-"Device %(device)s requested by agent %(agent_id)s on network %(network_id)s "
-"not bound, vif_type: %(vif_type)s"
-msgstr ""
-"%(network_id)s ağı üstündeki %(agent_id)s ajanı tarafından istenen "
-"%(device)s aygıtı bağlı değil, vif_type: %(vif_type)s"
-
-#, python-format
-msgid "Device %s not defined on plugin"
-msgstr "Aygıt %s eklenti üzerinde tanımlanmamış"
-
-#, python-format
-msgid "Did not find expected name \"%(ext_name)s\" in %(file)s"
-msgstr "%(file)s içinde beklenen isim \"%(ext_name)s\" bulunamadı"
-
-msgid "Driver configuration doesn't match with enable_security_group"
-msgstr "Sürücü yapılandırması enable_security_group ile eşleşmiyor"
-
-#, python-format
-msgid "Endpoint with ip %s already exists"
-msgstr "%s ip'sine sahip son uç zaten mevcut"
-
-#, python-format
-msgid "Extension %s not supported by any of loaded plugins"
-msgstr "Eklenti %s yüklenen hiçbir eklenti tarafından desteklenmiyor"
-
-#, python-format
-msgid "Extension file %(f)s wasn't loaded due to %(exception)s"
-msgstr "Eklenti dosyası %(f)s %(exception)s sebebiyle yüklenmedi"
-
-#, python-format
-msgid "Failed to delete namespace %s"
-msgstr "%s isim uzayı silme başarısız"
-
-#, python-format
-msgid "Failed trying to delete interface: %s"
-msgstr "Arayüzü silme denemesi başarısız: %s"
-
-#, python-format
-msgid "Failed trying to delete namespace: %s"
-msgstr "Bilinirlik alanı silme hatası: %s"
-
-#, python-format
-msgid "Found failed openvswitch port: %s"
-msgstr "Başarısız olmuş openvswitch bağlantı noktası bulundu: %s"
-
-#, python-format
-msgid "Found not yet ready openvswitch port: %s"
-msgstr "Henüz hazır olmayan openvswitch bağlantı noktası bulundu: %s"
-
-#, python-format
-msgid ""
-"In _notify_port_updated(), no bound segment for port %(port_id)s on network "
-"%(network_id)s"
-msgstr ""
-"_notify_port_updated() içinde, %(network_id)s ağı üzerindeki %(port_id)s "
-"bağlantı noktası için bağlı dilim yok"
-
-#, python-format
-msgid "Info for router %s was not found. Performing router cleanup"
-msgstr ""
-"%s yönlendiricisi için bilgi bulunamadı. Yönlendirici temizliği "
-"gerçekleştiriliyor"
-
-msgid "Invalid Interface ID, will lead to incorrect tap device name"
-msgstr "Geçersiz arayüz kimliği, geçersiz tap aygıt ismine yol açacak"
-
-msgid "Invalid Network ID, will lead to incorrect bridge name"
-msgstr "Geçersiz Ağ ID'si, geçersiz köprü ismine yol açacak"
-
-#, python-format
-msgid "Invalid Segmentation ID: %s, will lead to incorrect vxlan device name"
-msgstr ""
-"Geçersiz Dilimlendirme kimliği: %s, geçersiz vxlan aygıt ismine sebep olacak"
-
-msgid "Invalid VLAN ID, will lead to incorrect subinterface name"
-msgstr "Geçersiz VLAN ID'si, geçersiz alt arayüz ismine yol açacak"
-
-#, python-format
-msgid "Invalid remote IP: %s"
-msgstr "Geçersiz uzak IP: %s"
-
-#, python-format
-msgid ""
-"Invalid value for pagination_max_limit: %s. It should be an integer greater "
-"to 0"
-msgstr ""
-"pagination_max_limit: %s için geçersiz değer. 0'dan büyük bir tam sayı olmalı"
-
-#, python-format
-msgid ""
-"L2 agent could not get DVR MAC address from server. Retrying. Detailed "
-"message: %s"
-msgstr ""
-"L2 ajanı sunucudan DVR MAC adresini alamadı. Tekrar deneniyor. Detaylı "
-"ileti: %s"
-
-#, python-format
-msgid "Loaded plugins do not implement extension %s interface"
-msgstr "Yüklü eklentiler eklenti %s arayüzünü uygulamıyor"
-
-#, python-format
-msgid "Network %s could not be found, it might have been deleted concurrently."
-msgstr "%s ağı bulunamadı, eş zamanlı olarak silinmiş olabilir."
-
-#, python-format
-msgid "Network %s has been deleted."
-msgstr "Ağ %s silindi."
-
-#, python-format
-msgid ""
-"Network %s may have been deleted and its resources may have already been "
-"disposed."
-msgstr "Ağ %s silinmiş ve kaynakları ortadan kaldırılmış olabilir."
-
-msgid ""
-"Neutron server does not support state report. State report for this agent "
-"will be disabled."
-msgstr ""
-"Neutron sunucusu durum raporu desteklemiyor. Bu ajan için durum raporu "
-"kapatılacak."
-
-msgid "No DHCP agents available, skipping rescheduling"
-msgstr "Uygun DHCP ajanı yok, yeniden zamanlama atlanıyor"
-
-#, python-format
-msgid "No L3 agents can host the router %s"
-msgstr "Hiçbir L3 ajanı %s yönlendiricisini sunamaz"
-
-msgid "No active L3 agents"
-msgstr "Etkin L3 ajanı yok"
-
-msgid "No active L3 agents found for SNAT"
-msgstr "SNAT için etkin L3 ajanı bulunamadı"
-
-#, python-format
-msgid "No flat network found on physical network %s"
-msgstr "Fiziksel ağ %s üzerinde düz ağ bulunamadı"
-
-msgid "No more DHCP agents"
-msgstr "Daha fazla DHCP ajanı yok"
-
-#, python-format
-msgid "No routers compatible with L3 agent configuration on host %s"
-msgstr ""
-"Hiçbir yönlendirici %s istemcisi üzerindeki L3 ajanı yapılandırmasıyla "
-"uyumlu değil"
-
-#, python-format
-msgid "No valid gateway port on subnet %s is found for IPv6 RA"
-msgstr ""
-"IPv6 RA için %s alt ağı üzerinde geçerli ağ geçidi bağlantı noktası "
-"bulunamadı"
-
-#, python-format
-msgid "No vlan_id %(vlan_id)s found on physical network %(physical_network)s"
-msgstr "%(physical_network)s fiziksel ağında vlan_id %(vlan_id)s bulunamadı"
-
-#, python-format
-msgid "Nova event: %s returned with failed status"
-msgstr "Nova olayı: %s başarısız durum döndürdü"
-
-#, python-format
-msgid "Nova returned NotFound for event: %s"
-msgstr "Nova %s eylemi için NotFound döndürdü"
-
-msgid ""
-"OVS is dead. OVSNeutronAgent will keep running and checking OVS status "
-"periodically."
-msgstr ""
-"OVS ölü. OVSNeutronAgent çalışmaya devam edip OVS durumunu aralıklarla "
-"kontrol edecek."
-
-msgid "OVS is restarted. OVSNeutronAgent will reset bridges and recover ports."
-msgstr ""
-"OVS yeniden başlatıldı. OVSNeutronAgent köprüleri sıfırlayacak ve bağlantı "
-"noktalarını kurtaracak."
-
-#, python-format
-msgid ""
-"Only %(active)d of %(total)d DHCP agents associated with network "
-"'%(net_id)s' are marked as active, so notifications may be sent to inactive "
-"agents."
-msgstr ""
-"'%(net_id)s' ağıyla ilişkilendirilmiş %(total)d DHCP ajanından yalnızca "
-"%(active)d kadarı etkin olarak işaretlenmiş, yani iletiler etkin olmayan "
-"ajanlara gönderilebilir."
-
-#, python-format
-msgid ""
-"Option \"%(option)s\" must be supported by command \"%(command)s\" to enable "
-"%(mode)s mode"
-msgstr ""
-"\"%(option)s\" seçeneği %(mode)s kipini etkinleştirmek için \"%(command)s\" "
-"komutuyla desteklenmeli"
-
-#, python-format
-msgid "Port %s not found during update"
-msgstr "%s bağlantı noktası güncelleme sırasında bulunamadı"
-
-msgid "Port ID not set! Nova will not be notified of port status change."
-msgstr ""
-"Bağlantı noktası kimliği ayarlanmamış! Nova bağlantı noktası durumu "
-"değişikliğinde bilgilendirilmeyecek."
-
-#, python-format
-msgid ""
-"Removing network %(network)s from agent %(agent)s because the agent did not "
-"report to the server in the last %(dead_time)s seconds."
-msgstr ""
-"%(network)s ağı %(agent)s ajanından çıkarılıyor çünkü ajan sunucuya son "
-"%(dead_time)s saniye rapor vermedi."
-
-#, python-format
-msgid ""
-"Rescheduling router %(router)s from agent %(agent)s because the agent did "
-"not report to the server in the last %(dead_time)s seconds."
-msgstr ""
-"Yönlendirici %(router)s %(agent)s ajanından yeniden zamanlanıyor çünkü ajan "
-"sunucuya son %(dead_time)s saniye rapor vermedi."
-
-msgid ""
-"Security group agent binding currently not set. This should be set by the "
-"end of the init process."
-msgstr ""
-"Güvenlik grubu ajan bağlama şu an ayarlanmış değil. Bu init sürecinin "
-"sonunda ayarlanmış olmalı."
-
-#, python-format
-msgid ""
-"The configured driver %(driver)s has been moved, automatically using "
-"%(new_driver)s instead. Please update your config files, as this automatic "
-"fixup will be removed in a future release."
-msgstr ""
-"Yapılandırılan sürücü %(driver)s taşınnmış, yerine otomatik olarak "
-"%(new_driver)s kullanılıyor. Lütfen yapılandırma dosyalarınızı güncelleyin, "
-"çünkü bu otomatik düzeltme ileri sürümlerde kaldırılacak."
-
-msgid ""
-"The remote metadata server responded with Forbidden. This response usually "
-"occurs when shared secrets do not match."
-msgstr ""
-"Uzak metadata sunucu Yasaklı yanıtı döndü. Bu yanıt genellikle paylaşılan "
-"gizler eşleşmediğinde oluşur."
-
-msgid ""
-"The user that is executing neutron can read the namespaces without using the "
-"root_helper. Disable the use_helper_for_ns_read option to avoid a "
-"performance impact."
-msgstr ""
-"Neutron'u çalıştıran kullanıcı root_helper kullanmadan isim uzaylarını "
-"okuyabilir. Performansı etkilememesi için use_helper_for_ns_read seçeneğini "
-"kapatın."
-
-#, python-format
-msgid ""
-"Time since last %s agent reschedule check has exceeded the interval between "
-"checks. Waiting before check to allow agents to send a heartbeat in case "
-"there was a clock adjustment."
-msgstr ""
-"Son %s ajan yeniden zamanlama kontrolünden sonra geçen zaman kontroller "
-"arası zaman aralığını aştı. Bir saat ayarlama yapılmış olması durumunu "
-"hesaba katmak için ajanların kalp atışı gönderebilmesi için kontrolden önce "
-"bekleniyor."
-
-#, python-format
-msgid ""
-"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r %(top)r"
-msgstr ""
-"Mevcut olmayan kural silinmeye çalışıldı: %(chain)r %(rule)r %(wrap)r %(top)r"
-
-msgid "Tunnel synchronization requires a server upgrade."
-msgstr "Tünel eş zamanlama sunucu yükseltmesi gerektiriyor."
-
-#, python-format
-msgid ""
-"Unable to %(action)s dhcp for %(net_id)s: there is a conflict with its "
-"current state; please check that the network and/or its subnet(s) still "
-"exist."
-msgstr ""
-"%(net_id)s için %(action)s dhcp yapılamadı: mevcut durumuyla ilgili bir "
-"çatışma var; lütfen ağ ve/veya alt ağ(lar)ının hala mevcut olduğunu kontrol "
-"edin."
-
-#, python-format
-msgid "Unable to configure IP address for floating IP: %s"
-msgstr "Değişken IP için IP adresi yapılandırılamıyor: %s"
-
-#, python-format
-msgid "Unable to find data type descriptor for attribute %s"
-msgstr "%s özniteliği için veri türü tanımlayıcısı bulunamadı"
-
-#, python-format
-msgid ""
-"Unable to schedule network %s: no agents available; will retry on subsequent "
-"port and subnet creation events."
-msgstr ""
-"Ağ %s zamanlanamadı: hiçbir ajan uygun değil; sonraki bağlantı noktası "
-"üzerinden ve alt ağ oluşturma olayları tekrar denenecek."
-
-#, python-format
-msgid "Updating lease expiration is now deprecated. Issued  from host %s."
-msgstr ""
-"Kira sona erme tarihlerini güncelleme artık kullanılmıyor. %s istemcisinden  "
-"yayınlandı."
-
-#, python-format
-msgid ""
-"VIF port: %s has no ofport configured, and might not be able to transmit"
-msgstr ""
-"VIF bağlantı noktası: %s'in yapılandırılmış bir ofport'u yok, aktarım "
-"yapamayabilir"
-
-#, python-format
-msgid "device pci mismatch: %(device_mac)s - %(pci_slot)s"
-msgstr "aygıt pci uyuşmazlığı: %(device_mac)s - %(pci_slot)s"
-
-#, python-format
-msgid "failed to parse vf link show line %(line)s: for %(device)s"
-msgstr ""
-"vf bağlantısı gösteri satırı %(line)s: %(device)s için ayrıştırma başarısız"
-
-#, python-format
-msgid ""
-"l3-agent cannot check service plugins enabled at the neutron server when "
-"startup due to RPC error. It happens when the server does not support this "
-"RPC API. If the error is UnsupportedVersion you can ignore this warning. "
-"Detail message: %s"
-msgstr ""
-"RPC hatası sebebiyle l3-agent açılışta neutron sunucusundaki neutron servis "
-"eklentilerinin etkinliğini kontrol edemiyor. Bu durum sunucu bu RPC API'sini "
-"desteklemediğinde olabilir. Hata UnsupportedVersion ise bu uyarıyı göz ardı "
-"edebilirsiniz. Detaylı ileti: %s"
-
-#, python-format
-msgid ""
-"l3-agent cannot check service plugins enabled on the neutron server. "
-"Retrying. Detail message: %s"
-msgstr ""
-"l3-agent neutron sunucusunda etkin servis eklentilerini kontrol edemiyor. "
-"Tekrar deneniyor. Detaylı ileti: %s"
-
-#, python-format
-msgid "ofport: %(ofport)s for VIF: %(vif)s is not a positive integer"
-msgstr "VIF: %(vif)s için ofport: %(ofport)s pozitif tam sayı değil"
-
-msgid ""
-"security_group_info_for_devices rpc call not supported by the server, "
-"falling back to old security_group_rules_for_devices which scales worse."
-msgstr ""
-"security_group_info_for_devices rpc çağrısı sunucu tarafından "
-"desteklenmiyor, daha kötü ölçeklenen eski security_group_rules_for_devices'e "
-"dönülüyor."
-
-#, python-format
-msgid "unable to modify mac_address of ACTIVE port %s"
-msgstr "%s ETKİN bağlantı noktasının mac_address'i değiştirilemiyor"
diff --git a/neutron/locale/tr_TR/LC_MESSAGES/neutron.po b/neutron/locale/tr_TR/LC_MESSAGES/neutron.po
deleted file mode 100644 (file)
index 3f1139f..0000000
+++ /dev/null
@@ -1,2549 +0,0 @@
-# Turkish (Turkey) translations for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-09-16 08:28+0000\n"
-"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
-"Language: tr_TR\n"
-"Language-Team: Turkish (Turkey)\n"
-"Plural-Forms: nplurals=1; plural=0\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#, python-format
-msgid ""
-"\n"
-"Command: %(cmd)s\n"
-"Exit code: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-msgstr ""
-"\n"
-"Komut: %(cmd)s\n"
-"Çıkış kodu: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-
-#, python-format
-msgid "%(driver)s: Internal driver error."
-msgstr "%(driver)s: Dahili sürücü hatası."
-
-#, python-format
-msgid "%(id)s is not a valid %(type)s identifier"
-msgstr "%(id)s geçerli bir %(type)s tanımlayıcı değil"
-
-#, python-format
-msgid ""
-"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' "
-"and '%(desc)s'"
-msgstr ""
-"%(invalid_dirs)s sort_dirs için geçersiz değer, geçerli değer '%(asc)s' ve "
-"'%(desc)s'"
-
-#, python-format
-msgid "%(key)s prohibited for %(tunnel)s provider network"
-msgstr "%(key)s %(tunnel)s sağlayıcı ağı için yasaklanmış"
-
-#, python-format
-msgid ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-msgstr ""
-"%(method)s ağ ayarları %(current)s (asıl ayarlar %(original)s) ve "
-"%(segments)s ağ dilimleri ile çağrıldı"
-
-#, python-format
-msgid ""
-"%(method)s called with port settings %(current)s (original settings "
-"%(original)s) host %(host)s (original host %(original_host)s) vif type "
-"%(vif_type)s (original vif type %(original_vif_type)s) vif details "
-"%(vif_details)s (original vif details %(original_vif_details)s) binding "
-"levels %(levels)s (original binding levels %(original_levels)s) on network "
-"%(network)s with segments to bind %(segments_to_bind)s"
-msgstr ""
-"%(method)s bağlantı noktası ayarları %(current)s (asıl ayarlar %(original)s) "
-"istemci %(host)s (asıl istemci %(original_host)s) vif türü %(vif_type)s "
-"(asıl vif türü %(original_vif_type)s) vif detayları %(vif_details)s (asıl "
-"vif detayları %(original_vif_details)s) bağlama seviyeleri %(levels)s (asıl "
-"bağlama seviyeleri %(original_levels)s) %(network)s ağı üzerinde bağlanacak "
-"%(segments_to_bind)s dilimleriyle çağrıldı"
-
-#, python-format
-msgid ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-msgstr ""
-"%(method)s alt ağ ayarları %(current)s (asıl ayarlar %(original)s) ile "
-"çağrıldı"
-
-#, python-format
-msgid "%(method)s failed."
-msgstr "%(method)s başarısız."
-
-#, python-format
-msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'"
-msgstr "%(name)s '%(addr)s' ip_version '%(ip_version)s' ile eşleşmiyor"
-
-#, python-format
-msgid "%s cannot be called while in offline mode"
-msgstr "%s çevrim dışı kipte çağrılamaz"
-
-#, python-format
-msgid "%s is invalid attribute for sort_key"
-msgstr "%s sort_key için geçersiz özniteliktir"
-
-#, python-format
-msgid "%s is invalid attribute for sort_keys"
-msgstr "%s sort_keys için geçersiz öznitelik"
-
-#, python-format
-msgid "%s is not a valid VLAN tag"
-msgstr "%s geçerli bir VLAN etiketi değil"
-
-#, python-format
-msgid "%s must implement get_port_from_device or get_ports_from_devices."
-msgstr "%s get_port_from_device veya get_ports_from_devices uygulamalıdır."
-
-#, python-format
-msgid "%s prohibited for VLAN provider network"
-msgstr "%s VLAN sağlayıcı ağı için yasaklanmış"
-
-#, python-format
-msgid "%s prohibited for flat provider network"
-msgstr "%s düz sağlayıcı ağı için yasaklanmış"
-
-#, python-format
-msgid "%s prohibited for local provider network"
-msgstr "%s yerel sağlayıcı ağı için yasaklanmış"
-
-#, python-format
-msgid "'%(data)s' exceeds maximum length of %(max_len)s"
-msgstr "'%(data)s' %(max_len)s azami uzunluğunu aşıyor"
-
-#, python-format
-msgid "'%(data)s' is not in %(valid_values)s"
-msgstr "'%(data)s' %(valid_values)s içinde değil"
-
-#, python-format
-msgid "'%(data)s' is too large - must be no larger than '%(limit)d'"
-msgstr "'%(data)s' çok büyük - en çok '%(limit)d' olmalı"
-
-#, python-format
-msgid "'%(data)s' is too small - must be at least '%(limit)d'"
-msgstr "'%(data)s' çok küçük - en az '%(limit)d' olmalı"
-
-#, python-format
-msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended"
-msgstr "'%(data)s' tanınan bir IP alt ağı cidr'i değil, '%(cidr)s' öneriliyor"
-
-#, python-format
-msgid "'%(host)s' is not a valid nameserver. %(msg)s"
-msgstr "'%(host)s' geçerli bir isim sunucu değil. %(msg)s"
-
-#, python-format
-msgid "'%s' Blank strings are not permitted"
-msgstr "'%s' boş karakter dizilerine izin verilmez"
-
-#, python-format
-msgid "'%s' cannot be converted to boolean"
-msgstr "'%s' bool değere çevrilemez"
-
-#, python-format
-msgid "'%s' contains whitespace"
-msgstr "'%s' boşluk içeriyor"
-
-#, python-format
-msgid "'%s' is not a dictionary"
-msgstr "'%s' bir dizin değil"
-
-#, python-format
-msgid "'%s' is not a list"
-msgstr "'%s' liste değil"
-
-#, python-format
-msgid "'%s' is not a valid IP address"
-msgstr "'%s' geçerli bir IP adresi değil"
-
-#, python-format
-msgid "'%s' is not a valid IP subnet"
-msgstr "'%s' geçerli IP alt ağ değil"
-
-#, python-format
-msgid "'%s' is not a valid MAC address"
-msgstr "'%s' geçerli bir MAC adresi değil"
-
-#, python-format
-msgid "'%s' is not a valid UUID"
-msgstr "'%s' geçerli bir UUID değil"
-
-#, python-format
-msgid "'%s' is not a valid boolean value"
-msgstr "'%s' geçerli bir bool değer değil"
-
-#, python-format
-msgid "'%s' is not a valid input"
-msgstr "'%s' geçerli bir girdi değil"
-
-#, python-format
-msgid "'%s' is not a valid string"
-msgstr "'%s' geçerli karakter dizisi değil"
-
-#, python-format
-msgid "'%s' is not an integer"
-msgstr "'%s' tam sayı değil"
-
-#, python-format
-msgid "'%s' is not an integer or uuid"
-msgstr "'%s' bir tam sayı ya da uuid değil"
-
-#, python-format
-msgid "'%s' is not of the form <key>=[value]"
-msgstr "'%s' <anahtar>=[değer] biçiminde olmalı"
-
-#, python-format
-msgid "'%s' must be a non negative decimal."
-msgstr "'%s' negatif olmayan ondalık olmalı."
-
-#, python-format
-msgid "'%s' should be non-negative"
-msgstr "'%s' negatif olmamalı"
-
-msgid "'.' searches are not implemented"
-msgstr "'.' aramaları uygulanmamış"
-
-msgid "0 is not allowed as CIDR prefix length"
-msgstr "0 CIDR önek uzunluğuna izin verilmez"
-
-msgid "A cidr must be specified in the absence of a subnet pool"
-msgstr "Alt ağ havuzu olmadığında bir cidr belirtilmelidir"
-
-msgid ""
-"A list of mappings of physical networks to MTU values. The format of the "
-"mapping is <physnet>:<mtu val>. This mapping allows specifying a physical "
-"network MTU value that differs from the default segment_mtu value."
-msgstr ""
-"Fiziksel ağların MTU değerlerine eşleştirilme listesi. Eşleştirme biçimi "
-"<physnet>:<mtu val>. Bu eşleştirme varsayılan segment_mtu değerinden farklı "
-"bir fiziksel ağ MTU değeri belirtmeye izin verir."
-
-msgid "A metering driver must be specified"
-msgstr "Bir ölçme sürücüsü belirtilmeli"
-
-msgid "API for retrieving service providers for Neutron advanced services"
-msgstr "Neutron gelişmiş servisleri için servis sağlayıcıları alma API'si"
-
-msgid "Access to this resource was denied."
-msgstr "Bu kaynağa erişime izin verilmiyor."
-
-msgid "Action to be executed when a child process dies"
-msgstr "Alt süreç öldüğünde çalıştırılacak eylem"
-
-#, python-format
-msgid "Address scope %(address_scope_id)s could not be found"
-msgstr "Adres kapsamı %(address_scope_id)s bulunamadı"
-
-msgid "Adds external network attribute to network resource."
-msgstr "Ek ağ özniteliğini ağ kaynağına ekler."
-
-msgid "Adds test attributes to core resources."
-msgstr "Çekirdek kaynaklara test özniteliklerini ekler."
-
-#, python-format
-msgid "Agent %(id)s could not be found"
-msgstr "Ajan %(id)s bulunamadı"
-
-#, python-format
-msgid "Agent %(id)s is not a L3 Agent or has been disabled"
-msgstr "Ajan %(id)s bir L3 Ajanı değil ya da kapalı"
-
-#, python-format
-msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled"
-msgstr "Ajan %(id)s geçerli bir DHCP Ajanı değil veya kapalı"
-
-#, python-format
-msgid "Agent updated: %(payload)s"
-msgstr "Ajan güncellendi: %(payload)s"
-
-#, python-format
-msgid ""
-"Agent with agent_type=%(agent_type)s and host=%(host)s could not be found"
-msgstr "agent_type=%(agent_type)s ve istemci=%(host)s olan ajan bulunamadı"
-
-msgid "Allow auto scheduling networks to DHCP agent."
-msgstr "Ağların DHCP ajanlarına otomatik zamanlanmasına izin ver."
-
-msgid "Allow auto scheduling of routers to L3 agent."
-msgstr "Yönlendiricilerin L3 ajanına otomatik zamanlanmasına izin ver."
-
-msgid "Allow running metadata proxy."
-msgstr "Metadata vekili çalıştırmaya izin ver."
-
-msgid "Allow sending resource operation notification to DHCP agent"
-msgstr "DHCP ajanına kaynak işlem bildirimi göndermeye izin ver"
-
-msgid "Allow the usage of the bulk API"
-msgstr "Toplu API'nin kullanımına izin ver"
-
-msgid "Allow the usage of the pagination"
-msgstr "Sayfalama kullanımına izin ver"
-
-msgid "Allow the usage of the sorting"
-msgstr "Sıralama kullanımına izin ver"
-
-msgid "Allow to perform insecure SSL (https) requests to nova metadata"
-msgstr "Nova metadata'ya güvensiz SSL (https) istekleri yapmaya izin ver"
-
-msgid "AllowedAddressPair must contain ip_address"
-msgstr "AllowedAddressPair ip_address içermeli"
-
-msgid "An interface driver must be specified"
-msgstr "Bir arayüz sürücüsü belirtmeniz gerekmektedir"
-
-msgid ""
-"An ordered list of networking mechanism driver entrypoints to be loaded from "
-"the neutron.ml2.mechanism_drivers namespace."
-msgstr ""
-"neutron.ml2.mechanism_drivers isim uzayından yüklenecek ağ mekanizması "
-"sürücü giriş noktalarının sıralı listesi."
-
-msgid "An unknown error has occurred. Please try your request again."
-msgstr "Bilinmeyen bir hata oluştu. Lütfen tekrar deneyin."
-
-msgid "An unknown exception occurred."
-msgstr "Bilinmeyen bir istisna oluştu."
-
-#, python-format
-msgid "Attribute '%s' not allowed in POST"
-msgstr "'%s' özniteliğine POST içinde izin verilmez"
-
-msgid "Automatically remove networks from offline DHCP agents."
-msgstr "Ağları çevrimdışı DHCP ajanlarından otomatik olarak çıkar."
-
-msgid ""
-"Automatically reschedule routers from offline L3 agents to online L3 agents."
-msgstr ""
-"Yönlendiricileri çevrimdışı L3 ajanlarından çevrimiçi L3 ajanlarına otomatik "
-"olarak yeniden zamanla."
-
-msgid "Available commands"
-msgstr "Kullanılabilir komutlar"
-
-msgid "Backend does not support VLAN Transparency."
-msgstr "Arka uç VLAN şeffaflığını desteklemiyor."
-
-#, python-format
-msgid ""
-"Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, "
-"%(mac)s:"
-msgstr ""
-"EUI-64 ile IPv6 adresi üretmek için kötü önek ya da mac biçimi: %(prefix)s, "
-"%(mac)s:"
-
-#, python-format
-msgid "Bad prefix type for generate IPv6 address by EUI-64: %s"
-msgstr "EUI-64 ile IPv6 adresi üretmek için kötü önek türü: %s"
-
-#, python-format
-msgid "Base MAC: %s"
-msgstr "Taban MAC: %s"
-
-msgid "Body contains invalid data"
-msgstr "Gövde geçersiz veri içeriyor"
-
-#, python-format
-msgid "Bridge %(bridge)s does not exist."
-msgstr "Köprü %(bridge)s mevcut değil."
-
-msgid "Bulk operation not supported"
-msgstr "Toplu işlem desteklenmiyor"
-
-msgid "CIDR to monitor"
-msgstr "İzlenecek CIDR"
-
-#, python-format
-msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip"
-msgstr ""
-"gateway_ip'ye sahip olmayan %s alt ağının bağlantı noktasına değişken IP "
-"eklenemiyor"
-
-#, python-format
-msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool"
-msgstr "IPv%(pool_ver)s alt ağ havuzundan IPv%(req_ver)s alt ağı ayrılamaz"
-
-msgid "Cannot allocate requested subnet from the available set of prefixes"
-msgstr "İstenen alt ağ kullanılabilir önek kümesinden ayrılamıyor"
-
-#, python-format
-msgid ""
-"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port "
-"%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a "
-"floating IP on external network %(net_id)s."
-msgstr ""
-"Değişken IP %(floating_ip_address)s (%(fip_id)s) sabit IP %(fixed_ip)s "
-"kullanılarak %(port_id)s bağlantı noktasıyla ilişkilendirilemiyor, çünkü bu "
-"sabit IP %(net_id)s harici ağında zaten bir değişken IP'ye sahip."
-
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to Port %s, since that port is owned "
-"by a different tenant."
-msgstr ""
-"Değişken IP oluşturup %s bağlantı noktasına bağlanamıyor, çünkü bu bağlantı "
-"noktası başka bir kiracıya ait."
-
-msgid "Cannot create resource for another tenant"
-msgstr "Başka bir kiracı için kaynak oluşturulamıyor"
-
-msgid "Cannot disable enable_dhcp with ipv6 attributes set"
-msgstr "ipv6 öznitelikleri ayarlıyken enable_dhcp kapatılamaz"
-
-#, python-format
-msgid "Cannot find %(table)s with %(col)s=%(match)s"
-msgstr "%(col)s=%(match)s olan %(table)s bulunamadı"
-
-#, python-format
-msgid "Cannot handle subnet of type %(subnet_type)s"
-msgstr "%(subnet_type)s türünde alt ağ işlenemiyor"
-
-#, python-format
-msgid ""
-"Cannot have multiple router ports with the same network id if both contain "
-"IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s"
-msgstr ""
-"İkisi de IPv6 alt ağı içeriyorsa aynı ağ id'si ile birden fazla yönlendirici "
-"bağlantı noktası olamaz. Mevcut bağlantı noktası %(p)s IPv6 alt ağ(lar)ına "
-"ve %(nid)s ağ kimliğine sahip"
-
-#, python-format
-msgid ""
-"Cannot host %(router_type)s router %(router_id)s on %(agent_mode)s L3 agent "
-"%(agent_id)s."
-msgstr ""
-"%(router_type)s yönlendiricisi %(router_id)s %(agent_mode)s L3 ajanı "
-"%(agent_id)s üzerinde sunulamaz."
-
-msgid "Cannot match priority on flow deletion or modification"
-msgstr "Akış silme veya değiştirmede öncelik eşleştirilemedi"
-
-msgid "Cannot specify both subnet-id and port-id"
-msgstr "Hem subnet-id hem port-id belirtilemez"
-
-msgid "Cannot understand JSON"
-msgstr "JSON anlaşılamıyor"
-
-#, python-format
-msgid "Cannot update read-only attribute %s"
-msgstr "Yalnızca okunabilir öznitelik %s güncellenemez"
-
-msgid "Certificate Authority public key (CA cert) file for ssl"
-msgstr "Ssl için Sertifika Yetkilisi açık anahtarı (CA cert)"
-
-msgid "Check ebtables installation"
-msgstr "Ebtables kurulumunu kontrol et"
-
-msgid "Check for ARP header match support"
-msgstr "ARP başlık eşleştirme desteğini kontrol et"
-
-msgid "Check for ARP responder support"
-msgstr "ARP yanıtlayıcısı desteğini kontrol et"
-
-msgid "Check for OVS vxlan support"
-msgstr "OVS vxlan desteğini kontrol et"
-
-msgid "Check for VF management support"
-msgstr "VF yönetim desteğini kontrol et"
-
-msgid "Check for iproute2 vxlan support"
-msgstr "Iproute2 vxlan desteğini kontrol et"
-
-msgid "Check for nova notification support"
-msgstr "Nova bildirim desteğini kontrol et"
-
-msgid "Check for patch port support"
-msgstr "Yama bağlantı noktası desteğini kontrol et"
-
-msgid "Check minimal dnsmasq version"
-msgstr "Asgari dnsmasq sürümünü kontrol et"
-
-msgid "Check netns permission settings"
-msgstr "Netns izin ayarlarını kontrol et"
-
-msgid "Check ovsdb native interface support"
-msgstr "Ovsdb doğal arayüz desteğini kontrol et"
-
-#, python-format
-msgid ""
-"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of "
-"subnet %(sub_id)s"
-msgstr ""
-"%(subnet_id)s alt ağının %(subnet_cidr)s cidr'i %(sub_id)s alt ağının "
-"%(cidr)s cidr'i ile çakışıyor"
-
-msgid "Client certificate for nova metadata api server."
-msgstr "Nova metadata api sunucusu için istemci sertifikası."
-
-msgid ""
-"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE "
-"tunnel IDs that are available for tenant network allocation"
-msgstr ""
-"Kiracı ağ ayırma için kullanılabilir GRE tünel kimliklerinin aralığını "
-"numaralandıran <tun_min>:<tun_max> demetlerinin virgülle ayrılmış listesi"
-
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"VXLAN VNI IDs that are available for tenant network allocation"
-msgstr ""
-"Kiracı ağı ayırmaları için kullanılabilir VXLAN VNI ID'lerinin aralıklarını "
-"numaralandıran <vni_min>:<vni_max> demetlerinin virgülle ayrılmış listesi"
-
-msgid ""
-"Comma-separated list of the DNS servers which will be used as forwarders."
-msgstr ""
-"Yönlendirici olarak kullanılacak DNS sunucularının virgülle ayrılmış listesi."
-
-msgid "Command to execute"
-msgstr "Çalıştırılacak komut"
-
-msgid "Config file for interface driver (You may also use l3_agent.ini)"
-msgstr ""
-"Arayüz sürücüsü için yapılandırma dosyası (l3_agent.ini de kullanabilirsiniz)"
-
-#, python-format
-msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s"
-msgstr "CIDR %(cidr)s için çatışan değer ethertype %(ethertype)s"
-
-msgid ""
-"Controls whether the neutron security group API is enabled in the server. It "
-"should be false when using no security groups or using the nova security "
-"group API."
-msgstr ""
-"Neutron güvenlik grubu API'sinin sunucuda etkin olup olmadığını kontrol "
-"eder. Güvenlik grubu kullanılmadığında veeya nova güvenlik grubu API'si "
-"kullanıldığında false olmalıdır."
-
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds"
-msgstr "%(time)d  saniye denedikten sonra %(host)s:%(port)s'a bağlanamadı"
-
-msgid "Could not deserialize data"
-msgstr "Veri serisi çözülemedi"
-
-#, python-format
-msgid "Creation failed. %(dev_name)s already exists."
-msgstr "Oluşturma başarısız. %(dev_name)s zaten mevcut."
-
-#, python-format
-msgid ""
-"Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable "
-"to update."
-msgstr ""
-"Mevcut geçit ip'si %(ip_address)s %(port_id)s bağlantı noktası tarafından "
-"zaten kullanılıyor. Güncellenemiyor."
-
-msgid "Currently distributed HA routers are not supported."
-msgstr "Şu anda dağıtık HA yönlendiriciler desteklenmiyor."
-
-msgid ""
-"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite "
-"lease times."
-msgstr ""
-"DHCP kira süresi (saniye olarak). Dnsmasq'a süresiz kira zamanları "
-"kullanmasını söylemek için -1 kullanın."
-
-msgid "Default driver to use for quota checks"
-msgstr "Kota kontrolleri için kullanılacak varsayılan sürücü"
-
-msgid ""
-"Default network type for external networks when no provider attributes are "
-"specified. By default it is None, which means that if provider attributes "
-"are not specified while creating external networks then they will have the "
-"same type as tenant networks. Allowed values for external_network_type "
-"config option depend on the network type values configured in type_drivers "
-"config option."
-msgstr ""
-"Sağlayıcı öznitelikleri belirtilmediğinde harici ağlar için varsayılan ağ "
-"türü. Varsayılan olarak None'dir, bunun anlamı harici ağ oluştururken "
-"sağlayıcı öznitelikleri belirtilmemişse kiracı ağlarla aynı türe sahip "
-"olacaklarıdır. external_network_type yapılandırma seçeneği için izin verilen "
-"değerler type_drivers yapılandırma seçeneğinde yapılandırılan ağ türü "
-"değerlerine bağlıdır."
-
-msgid ""
-"Default number of resource allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Kiracı başına izin verilecek varsayılan kaynak sayısı. Negatif değer "
-"sınırsız anlamına gelir."
-
-msgid "Default security group"
-msgstr "Varsayılan güvenlik grubu"
-
-msgid "Default security group already exists."
-msgstr "Varsayılan güvenlik grubu zaten mevcut."
-
-msgid ""
-"Define the default value of enable_snat if not provided in "
-"external_gateway_info."
-msgstr ""
-"external_gateway_info'da sağlanmamışsa enable_snat'ın varsayılan değerini "
-"tanımla."
-
-msgid ""
-"Defines providers for advanced services using the format: <service_type>:"
-"<name>:<driver>[:default]"
-msgstr ""
-"Şu biçimi kullanarak gleişmiş servisler için sağlayıcılar tanımlar: "
-"<service_type>:<name>:<driver>[:default]"
-
-msgid ""
-"Delay within which agent is expected to update existing ports whent it "
-"restarts"
-msgstr ""
-"Yeniden başlatıldığında mevcut bağlantı noktalarını güncellemesi beklenen "
-"ajan içindeki gecikme"
-
-msgid "Delete the namespace by removing all devices."
-msgstr "İsim uzayını tüm aygıtları kaldırarak sil."
-
-#, python-format
-msgid "Deleting port %s"
-msgstr "Bağlantı noktası %s siliniyor"
-
-msgid "Destroy IPsets even if there is an iptables reference."
-msgstr "Iptables referansı olsa bile IPset'leri sil."
-
-msgid "Destroy all IPsets."
-msgstr "Tüm IPset'leri sil."
-
-#, python-format
-msgid "Device %(dev_name)s in mapping: %(mapping)s not unique"
-msgstr "%(mapping)s eşleştirmesindeki aygıt %(dev_name)s benzersiz"
-
-msgid "Device has no virtual functions"
-msgstr "Aygıt sanal fonksiyonlara sahip değil"
-
-#, python-format
-msgid "Device name %(dev_name)s is missing from physical_device_mappings"
-msgstr "Aygıt ismi %(dev_name)s physical_device_mappings'de eksik"
-
-msgid "Device not found"
-msgstr "Aygıt bulunamadı"
-
-#, python-format
-msgid ""
-"Distributed Virtual Router Mac Address for host %(host)s does not exist."
-msgstr ""
-"%(host)s istemcisi için Dağıtık Sanal Yönlendirici Mac Adresi mevcut değil."
-
-msgid "Domain to use for building the hostnames"
-msgstr "Makine adlarını inşa için kullanılacak alan"
-
-msgid "Downgrade no longer supported"
-msgstr "Alçaltma artık desteklenmiyor"
-
-#, python-format
-msgid "Driver %s is not unique across providers"
-msgstr "%s sürücüsü sağlayıcılar arasında benzersiz değil"
-
-msgid "Driver for security groups firewall in the L2 agent"
-msgstr "L2 ajanındaki güvenlik grubunun güvenlik duvarı için sürücü"
-
-msgid "Driver to use for scheduling network to DHCP agent"
-msgstr "Ağın DHCP ajanlarına zamanlanması için kullanılacak sürücü"
-
-msgid "Driver to use for scheduling router to a default L3 agent"
-msgstr "Yönlendiriciyi bir L3 ajanına zamanlamak için gerekli sürücü"
-
-#, python-format
-msgid "Duplicate IP address '%s'"
-msgstr "Kopya IP adresi '%s'"
-
-msgid "Duplicate Metering Rule in POST."
-msgstr "POST'da kopya ölçme kuralı."
-
-msgid "Duplicate Security Group Rule in POST."
-msgstr "POST'da Kopya Güvenlik Grubu Kuralı."
-
-#, python-format
-msgid "Duplicate hostroute '%s'"
-msgstr "Kopya istemci rotası '%s'"
-
-#, python-format
-msgid "Duplicate items in the list: '%s'"
-msgstr "Listede kopya öğeler: '%s'"
-
-#, python-format
-msgid "Duplicate nameserver '%s'"
-msgstr "Kopya isim sunucu '%s'"
-
-msgid "Duplicate segment entry in request."
-msgstr "İstekte kopya dilim girdisi."
-
-#, python-format
-msgid "ERROR: %s"
-msgstr "HATA: %s"
-
-msgid ""
-"ERROR: Unable to find configuration file via the default search paths (~/."
-"neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!"
-msgstr ""
-"HATA: Varsayılan arama yollarıyla ve (~/.neutron/, ~/, /etc/neutron/, /etc/) "
-"ve '--config-file' seçeneğiyle yapılandırma dosyası bulunamadı!"
-
-msgid ""
-"Either one of parameter network_id or router_id must be passed to _get_ports "
-"method."
-msgstr ""
-"_get_ports metoduna network_id veya router_id parametrelerinden biri "
-"verilmelidir."
-
-msgid "Either subnet_id or port_id must be specified"
-msgstr "subnet_id veya port_id belirtilmeli"
-
-msgid "Empty physical network name."
-msgstr "Boş fiziksel ağ ismi."
-
-msgid "Enable FWaaS"
-msgstr "FWaaS'ı etkinleştir"
-
-msgid "Enable HA mode for virtual routers."
-msgstr "Sanal yönlendiriciler için HA kipini etkinleştir."
-
-msgid "Enable SSL on the API server"
-msgstr "API sunucuda SSL etkinleştir"
-
-msgid ""
-"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 "
-"plugin using linuxbridge mechanism driver"
-msgstr ""
-"Ajanda VXLAN etkinleştir. Ajan linuxbridge mekanizma sürücüsünü kullanan ml2 "
-"eklentisi ile yönetildiğinde etkinleştirilebilir"
-
-msgid ""
-"Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 "
-"l2population driver. Allows the switch (when supporting an overlay) to "
-"respond to an ARP request locally without performing a costly ARP broadcast "
-"into the overlay."
-msgstr ""
-"Destekleniyorsa yerel ARP yanıtlayıcıyı etkinleştir. OVS 2.1 ve ML2 "
-"I2population sürücüsüne ihtiyaç duyar. Anahtarın (kaplama desteklediğinde) "
-"bir ARP isteğine yerel olarak, kaplamaya maliyetli ARP yayını yapmadan yanıt "
-"vermesini sağlar."
-
-msgid ""
-"Enable services on an agent with admin_state_up False. If this option is "
-"False, when admin_state_up of an agent is turned False, services on it will "
-"be disabled. Agents with admin_state_up False are not selected for automatic "
-"scheduling regardless of this option. But manual scheduling to such agents "
-"is available if this option is True."
-msgstr ""
-"admin_state_up False olan bir ajan üzerinde servisleri etkinleştir. Bu "
-"seçenek False ise, bir ajanın admin_state_up'u False yapıldığında, "
-"üzerindeki servisler kapatılacaktır. admin_state_up False olan ajanlar bu "
-"seçeneğe bakılmaksızın otomatik zamanlama için seçilmezler. Ama bu seçenek "
-"True ise bu tür ajanlara elle zamanlama yapılabilir."
-
-msgid ""
-"Enable suppression of ARP responses that don't match an IP address that "
-"belongs to the port from which they originate. Note: This prevents the VMs "
-"attached to this agent from spoofing, it doesn't protect them from other "
-"devices which have the capability to spoof (e.g. bare metal or VMs attached "
-"to agents without this flag set to True). Spoofing rules will not be added "
-"to any ports that have port security disabled. For LinuxBridge, this "
-"requires ebtables. For OVS, it requires a version that supports matching ARP "
-"headers."
-msgstr ""
-"Aslen geldikleri bağlantı noktasına ait IP adresiyle eşleşmeyen ARP "
-"yanıtlarının bastırılmasını etkinleştir. Not: Bu, bu ajana bağlı VM'lerin "
-"yanıltma yapmasını önler, yanıltma yeteneği olan başka aygıtlardan korumaz "
-"(örn. çıplak metal veya bu bayrağı True olarak ayarlanmamış ajanlara bağlı "
-"VM'ler). Yanıltma kuralları bağlantı noktası güvenliği kapalı bağlantı "
-"noktalarına eklenmeyecektir. LinuxBridge için, bu ebtables gerektirir. OVS "
-"için, ARP başlıklarını eşleştirmeyi destekleyen bir sürüm gerektirir."
-
-msgid ""
-"Enable/Disable log watch by metadata proxy. It should be disabled when "
-"metadata_proxy_user/group is not allowed to read/write its log file and "
-"copytruncate logrotate option must be used if logrotate is enabled on "
-"metadata proxy log files. Option default value is deduced from "
-"metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent "
-"effective user id/name."
-msgstr ""
-"Metadata vekili tarafından kayıt etkinleştir/kapat. metadata_proxy_user/"
-"group kayıt dosyasına okuma/yazma iznine sahip olmadığında, kapatılmalıdır "
-"ve metadata vekil kayıt dosyaları üzerinde logrotate etkinse copytruncate "
-"logrotate seçeneği kullanılmalıdır. Seçeneğin varsayılan değeri "
-"metadata_proxy_user'den anlaşılır: metadata_proxy_user ajan etkin kullanıcı "
-"id/ismi ise izleme kaydı etkinleştirilir."
-
-msgid "End of VLAN range is less than start of VLAN range"
-msgstr "VLAN aralığı sonu VLAN aralığı başından daha küçük"
-
-msgid "End of tunnel range is less than start of tunnel range"
-msgstr "Tünel aralığı sonu tünel aralığı başından daha küçük"
-
-#, python-format
-msgid "Error %(reason)s while attempting the operation."
-msgstr "İşlem denenirken %(reason)s hatası."
-
-#, python-format
-msgid "Error importing FWaaS device driver: %s"
-msgstr "FWaaS aygıt sürücüsünü içe aktarmada hata: %s"
-
-#, python-format
-msgid "Error parsing dns address %s"
-msgstr "%s dns adresinin ayrıştırılmasında hata"
-
-#, python-format
-msgid "Error while reading %s"
-msgstr "%s okunurken hata"
-
-msgid "Existing prefixes must be a subset of the new prefixes"
-msgstr "Mevcut önekler yeni öneklerin alt kümesi olmalıdır"
-
-msgid ""
-"Extension to use alongside ml2 plugin's l2population mechanism driver. It "
-"enables the plugin to populate VXLAN forwarding table."
-msgstr ""
-"ml2 eklentisinin l2population mekanizma sürücüsünün yanında kullanılacak "
-"eklenti. Eklentiyi VXLAN iletim tablosunu doldurması için etkinleştirir."
-
-#, python-format
-msgid "Extension with alias %s does not exist"
-msgstr "%s rumuzlu eklenti mevcut değil"
-
-#, python-format
-msgid "External IP %s is the same as the gateway IP"
-msgstr "Harici IP %s geçit IP ile aynı"
-
-#, python-format
-msgid ""
-"External network %(external_network_id)s is not reachable from subnet "
-"%(subnet_id)s.  Therefore, cannot associate Port %(port_id)s with a Floating "
-"IP."
-msgstr ""
-"Harici ağ %(external_network_id)s %(subnet_id)s alt ağından erişilebilir "
-"değil.  Bu yüzden, %(port_id)s bağlantı noktası bir Değişken IP ile "
-"ilişkilendirilemiyor."
-
-#, python-format
-msgid ""
-"External network %(net_id)s cannot be updated to be made non-external, since "
-"it has existing gateway ports"
-msgstr ""
-"Harici ağ %(net_id)s harici-olmayan şekilde olması için güncellenemez, çünkü "
-"mevcut geçit bağlantı noktaları var"
-
-#, python-format
-msgid "ExtraDhcpOpt %(id)s could not be found"
-msgstr "ExtraDhcpOpt %(id)s bulunamadı"
-
-msgid ""
-"FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-"
-"agent."
-msgstr ""
-"FWaaS eklentisi sunucu tarafında yapılandırılmış, ama FWaaS L3-agent'de "
-"kapalı."
-
-#, python-format
-msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found."
-msgstr ""
-"%(router_id)s yönlendiricisini yeniden zamanlama başarısız: seçilebilir l3 "
-"ajanı bulunamadı."
-
-#, python-format
-msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s."
-msgstr ""
-"%(router_id)s yönlendiricisinin %(agent_id)s L3 Ajanına zamanlanması "
-"başarısız."
-
-#, python-format
-msgid ""
-"Failed to allocate a VRID in the network %(network_id)s for the router "
-"%(router_id)s after %(max_tries)s tries."
-msgstr ""
-"%(max_tries)s denemeden sonra %(router_id)s yönlendiricisi için "
-"%(network_id)s ağında VRID ayırma başarısız."
-
-#, python-format
-msgid ""
-"Failed to create port on network %(network_id)s, because fixed_ips included "
-"invalid subnet %(subnet_id)s"
-msgstr ""
-"%(network_id)s ağı üzerinde bağlantı noktası oluşturma başarısız, çünkü "
-"fixed_ips geçersiz %(subnet_id)s alt ağını içeriyor"
-
-#, python-format
-msgid "Failed to parse request. Parameter '%s' not specified"
-msgstr "İstek ayrıştırılamadı. '%s' parametresi belirtilmemiş"
-
-#, python-format
-msgid "Failed to parse request. Required attribute '%s' not specified"
-msgstr "İstek ayrıştırılamıyor. Gerekli öznitelik '%s' belirtilmemiş"
-
-msgid "Failed to remove supplemental groups"
-msgstr "Destekleyici gruplar kaldırılamadı"
-
-#, python-format
-msgid "Failed to set gid %s"
-msgstr "Gid %s ayarlanamadı"
-
-#, python-format
-msgid "Failed to set uid %s"
-msgstr "Uid %s ayarlanamadı"
-
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr "%(ip)s'ye %(type)s tünel bağlantı noktası kurulumu başarısız"
-
-#, python-format
-msgid "Failure waiting for address %(address)s to become ready: %(reason)s"
-msgstr "%(address)s adresinin hazır olmasını bekleme başarısız: %(reason)s"
-
-#, python-format
-msgid "Floating IP %(floatingip_id)s could not be found"
-msgstr "Değişken IP %(floatingip_id)s bulunamadı"
-
-msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max"
-msgstr ""
-"TCP/UDP iletişim kuralları için, port_range_min <= port_range_max olmalı"
-
-msgid "Force ip_lib calls to use the root helper"
-msgstr "ip_lib çağrılarını kök yardımcıyı kullanmaya zorla"
-
-#, python-format
-msgid ""
-"Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet "
-"%(subnet_cidr)s."
-msgstr ""
-"%(subnet_cidr)s alt ağı için çakışan ayırma havuzları: %(pool_1)s %(pool_2)s "
-"bulundu."
-
-#, python-format
-msgid ""
-"Gateway cannot be updated for router %(router_id)s, since a gateway to "
-"external network %(net_id)s is required by one or more floating IPs."
-msgstr ""
-"Geçit %(router_id)s yönlendiricisi için güncellenemedi, çünkü bir ya da "
-"fazla değişken IP tarafından %(net_id)s harici ağına bir geçit gerekli."
-
-msgid "Gateway is not valid on subnet"
-msgstr "Geçit alt ağda geçerli değil"
-
-msgid "Group (gid or name) running metadata proxy after its initialization"
-msgstr ""
-"İlklendirilmesinden sonra metadata vekilini çalıştıran grup (gid veya isim)"
-
-msgid ""
-"Group (gid or name) running metadata proxy after its initialization (if "
-"empty: agent effective group)."
-msgstr ""
-"İlklendirilmesinden sonra metadata vekilini çalıştıran grup (gid veya isim) "
-"(boşsa: ajan etkin grup)."
-
-msgid "Group (gid or name) running this process after its initialization"
-msgstr "İlklendirilmesinden sonra bu süreci çalıştıran grup (gid veya isim)"
-
-msgid "How many times Neutron will retry MAC generation"
-msgstr "Neutron kaç kere MAC üretmeyi deneyecek"
-
-#, python-format
-msgid ""
-"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-"
-"min) is missing."
-msgstr ""
-"ICMP kodu (port-range-max) %(value)s sağlanmış ama ICMP türü (port-range-"
-"min) eksik."
-
-msgid "ID of network"
-msgstr "Ağ kimliği"
-
-msgid "ID of network to probe"
-msgstr "Sorgulanacak ağ ID'si"
-
-msgid "ID of probe port to delete"
-msgstr "Silinecek deneme bağlantı noktasının kimliği"
-
-msgid "ID of probe port to execute command"
-msgstr "Komutun çalıştırılacağı deneme bağlantı noktası kimliği"
-
-msgid "ID of the router"
-msgstr "Yönetici kimliği"
-
-#, python-format
-msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s"
-msgstr "%(ip)s IP adresi %(subnet_id)s alt ağında zaten ayrılmış"
-
-#, python-format
-msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s"
-msgstr "%(ip)s IP adresi %(subnet_id)s alt ağına ait değil"
-
-#, python-format
-msgid ""
-"IP address %(ip_address)s is not a valid IP for any of the subnets on the "
-"specified network."
-msgstr ""
-"%(ip_address)s IP adresi belirtilen ağdaki alt ağlardan hiçbiri için geçerli "
-"bir IP değil."
-
-#, python-format
-msgid "IP address %(ip_address)s is not a valid IP for the specified subnet."
-msgstr "%(ip_address)s IP adresi belirtilen alt ağ için geçerli bir IP değil."
-
-msgid "IP address used by Nova metadata server."
-msgstr "Nova metadata sunucusu tarafından kullanılan IP adresi."
-
-msgid "IP allocation requires subnet_id or ip_address"
-msgstr "IP ayırma subnet_id veya ip_address gerektirir"
-
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables rules:\n"
-"%s"
-msgstr ""
-"IPTablesManager.apply aşağıdakı iptables bilgileri uygulanamadı\n"
-"%s"
-
-#, python-format
-msgid ""
-"IPv6 address %(address)s can not be directly assigned to a port on subnet "
-"%(id)s since the subnet is configured for automatic addresses"
-msgstr ""
-"Alt ağ otomatik adresler için yapılandırıldığından %(address)s IPv6 adresi "
-"%(id)s alt ağı üzerinde bir bağlantı noktasına doğrudan atanamaz"
-
-#, python-format
-msgid ""
-"IPv6 address %(ip)s cannot be directly assigned to a port on subnet "
-"%(subnet_id)s as the subnet is configured for automatic addresses"
-msgstr ""
-"Alt ağ otomatik adres olarak yapıladırıldığı için %(ip)s IPv6 adresi "
-"doğrudan %(subnet_id)s alt ağındaki bir bağlantı noktasına atanamaz."
-
-#, python-format
-msgid ""
-"IPv6 subnet %s configured to receive RAs from an external router cannot be "
-"added to Neutron Router."
-msgstr ""
-"Harici bir yönlendiriciden RA almak için yapılandırılmış %s IPv6 alt ağı "
-"Neutron Yönlendiriciye eklenemez."
-
-msgid ""
-"If True, effort is made to advertise MTU settings to VMs via network methods "
-"(DHCP and RA MTU options) when the network's preferred MTU is known."
-msgstr ""
-"True olduğunda, ağın tercih edilen MTU'su bilindiğinde MTU ayarları VM'lere "
-"ağ yöntemleriyle (DHCP ve RA MTU seçenekleri) dağıtılmaya çalışılır."
-
-msgid ""
-"If True, then allow plugins that support it to create VLAN transparent "
-"networks."
-msgstr ""
-"True ise, destekleyen eklentilerin VLAN şeffaf ağlar oluşturmasına izin ver."
-
-msgid "Illegal IP version number"
-msgstr "Kuraldışı IP sürüm numarası"
-
-#, python-format
-msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id"
-msgstr "Geçersiz pci_vendor_info: \"%s\", vendor_id:product_id çifti olmalı"
-
-#, python-format
-msgid "Insufficient prefix space to allocate subnet size /%s"
-msgstr "/%s boyutunda alt ağ ayırmak için yetersiz önek alanı"
-
-msgid "Insufficient rights for removing default security group."
-msgstr "Varsayılan güvenlik grubunu silmek için yeterli izin yok."
-
-msgid "Interface to monitor"
-msgstr "İzlenecek arayüz"
-
-msgid ""
-"Interval between checks of child process liveness (seconds), use 0 to disable"
-msgstr ""
-"Alt süreç canlılığı kontrolleri aralığı (saniye), kapatmak için 0 kullanın"
-
-msgid "Interval between two metering measures"
-msgstr "İki ölçüm arasındaki aralık"
-
-msgid "Interval between two metering reports"
-msgstr "İki ölçme raporu arasındaki aralık"
-
-#, python-format
-msgid ""
-"Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address "
-"format, which requires the prefix to be /64."
-msgstr ""
-"IPv6 adres kipi için geçersiz CIDR %s. OpenStack önekin /64 olmasına ihtiyaç "
-"duyan EUI-64 adres biçimini kullanır."
-
-#, python-format
-msgid "Invalid Device %(dev_name)s: %(reason)s"
-msgstr "Geçersiz Aygıt %(dev_name)s: %(reason)s"
-
-#, python-format
-msgid ""
-"Invalid authentication type: %(auth_type)s, valid types are: "
-"%(valid_auth_types)s"
-msgstr ""
-"Geçersiz kimlik doğrulama türü: %(auth_type)s, geçerli türler: "
-"%(valid_auth_types)s"
-
-#, python-format
-msgid "Invalid data format for IP pool: '%s'"
-msgstr "IP havuzu: '%s' için geçersiz veri biçimi"
-
-#, python-format
-msgid "Invalid data format for extra-dhcp-opt: %(data)s"
-msgstr "extra-dhcp-opt için geçersiz veri biçimi: %(data)s"
-
-#, python-format
-msgid "Invalid data format for fixed IP: '%s'"
-msgstr "Sabit IP için geçersiz veri biçimi: '%s'"
-
-#, python-format
-msgid "Invalid data format for hostroute: '%s'"
-msgstr "İstemci rotası için geçersiz veri biçimi: '%s'"
-
-#, python-format
-msgid "Invalid data format for nameserver: '%s'"
-msgstr "İsim sunucu için geçersiz veri biçimi: '%s'"
-
-#, python-format
-msgid "Invalid format for routes: %(routes)s, %(reason)s"
-msgstr "Rotalar için geçersiz biçim: %(routes)s, %(reason)s"
-
-#, python-format
-msgid "Invalid format: %s"
-msgstr "Geçersiz biçim: %s"
-
-#, python-format
-msgid "Invalid input for %(attr)s. Reason: %(reason)s."
-msgstr "%(attr)s için geçersiz girdi. Sebep: %(reason)s."
-
-#, python-format
-msgid "Invalid input for operation: %(error_message)s."
-msgstr "İşlem için geçersiz girdi: %(error_message)s"
-
-#, python-format
-msgid ""
-"Invalid input. '%(target_dict)s' must be a dictionary with keys: "
-"%(expected_keys)s"
-msgstr ""
-"Geçersiz girdi. '%(target_dict)s' şu anahtarları içeren bir sözlük olmalı: "
-"%(expected_keys)s"
-
-#, python-format
-msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s"
-msgstr "Geçersiz sunucu durumu: %(state)s, geçerli durumlar: %(valid_states)s"
-
-#, python-format
-msgid "Invalid mapping: '%s'"
-msgstr "Geçersiz eşleştirme: '%s'"
-
-#, python-format
-msgid "Invalid pci slot %(pci_slot)s"
-msgstr "Geçersiz pci yuvası %(pci_slot)s"
-
-#, python-format
-msgid "Invalid provider format. Last part should be 'default' or empty: %s"
-msgstr "Geçersiz sağlayıcı biçimi. Son kısım 'default' ya da boş olmalı: %s"
-
-#, python-format
-msgid "Invalid route: %s"
-msgstr "Geçersiz rota: %s"
-
-msgid "Invalid service provider format"
-msgstr "Geçersiz servis sağlayıcı biçimi"
-
-#, python-format
-msgid ""
-"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255."
-msgstr ""
-"ICMP %(field)s (%(attr)s) %(value)s için geçersiz değer. 0 dan 255'e kadar "
-"olmalı."
-
-#, python-format
-msgid "Invalid value for port %(port)s"
-msgstr "%(port)s bağlantı noktası için geçersiz değer"
-
-msgid "Keepalived didn't respawn"
-msgstr "Keepalived yeniden başlamadı"
-
-#, python-format
-msgid "Key %(key)s in mapping: '%(mapping)s' not unique"
-msgstr "'%(mapping)s' eşleştirmesindeki %(key)s anahtarı benzersiz değil"
-
-#, python-format
-msgid "Limit must be an integer 0 or greater and not '%d'"
-msgstr "Sınır tam sayı 0 ya da daha büyüğü olmalı '%d' değil"
-
-msgid "Limit number of leases to prevent a denial-of-service."
-msgstr "Servis engellemeyi önlemek için kiralama sayısını sınırla."
-
-msgid ""
-"List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> "
-"specifying physical_network names usable for VLAN provider and tenant "
-"networks, as well as ranges of VLAN tags on each available for allocation to "
-"tenant networks."
-msgstr ""
-"VLAN sağlayıcı ve kiracı ağlar için kullanılabilir physical_network "
-"isimlerini belirten <physical_network>:<vlan_min>:<vlan_max> veya "
-"<physical_network> listesi, aynı zamanda her birinde kiracı ağlara ayırma "
-"için VLAN etiketleri aralıkları."
-
-msgid ""
-"List of network type driver entrypoints to be loaded from the neutron.ml2."
-"type_drivers namespace."
-msgstr ""
-"neutron.ml2.type_drivers isim uzayından yüklenecek ağ türü sürücü giriş "
-"noktaları listesi."
-
-msgid "Local IP address of the VXLAN endpoints."
-msgstr "VXLAN son uçlarının yerel IP adresi."
-
-msgid "Local IP address of tunnel endpoint."
-msgstr "Tünel uç noktasının yerel IP adresi."
-
-msgid "Location for Metadata Proxy UNIX domain socket."
-msgstr "Metadata Vekil UNIX alan soketi için konum."
-
-msgid "Location of Metadata Proxy UNIX domain socket"
-msgstr "Metadata Vekil UNIX alan soketi konumu"
-
-msgid "Location of pid file of this process."
-msgstr "Bu sürecin pid dosyasının konumu."
-
-msgid "Location to store DHCP server config files"
-msgstr "DHCP sunucu yapılandırma dosyalarının depolanacağı konum"
-
-msgid "Location to store IPv6 RA config files"
-msgstr "IPv6 RA yapılandırma dosyalarının kaydedileceği konum"
-
-msgid "Location to store child pid files"
-msgstr "Alt süreç dosyalarının kaydedileceği konum"
-
-msgid "Location to store keepalived/conntrackd config files"
-msgstr "Keepalived/conntrackd yapılandırma dosyalarının tutulacağı konum"
-
-msgid "Log agent heartbeats"
-msgstr "Ajan kalp atışlarını kaydet"
-
-msgid "MTU setting for device."
-msgstr "Aygıt için MTU ayarı."
-
-msgid "MTU size of veth interfaces"
-msgstr "veth arayüzlerinin MTU boyutu"
-
-msgid "Make the l2 agent run in DVR mode."
-msgstr "L2 ajanın DVR kipinde çalışmasını sağla."
-
-msgid "Malformed request body"
-msgstr "Kusurlu istek gövdesi"
-
-msgid "Maximum number of allowed address pairs"
-msgstr "İzin verilen adres çiftlerinin azami sayısı"
-
-msgid "Maximum number of host routes per subnet"
-msgstr "Alt ağ başına azami istemci sayısı"
-
-msgid "Metering driver"
-msgstr "Ölçme sürücüsü"
-
-#, python-format
-msgid "Metering label %(label_id)s does not exist"
-msgstr "Ölçme etiketi %(label_id)s mevcut değil"
-
-#, python-format
-msgid "Metering label rule %(rule_id)s does not exist"
-msgstr "Ölçme etiketi kuralı %(rule_id)s mevcut değil"
-
-#, python-format
-msgid ""
-"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps "
-"another"
-msgstr ""
-"remote_ip_prefix %(remote_ip_prefix)s sahip ölçme etiket kuralı başka bir "
-"tanesiyle çatışıyor"
-
-msgid "Minimize polling by monitoring ovsdb for interface changes."
-msgstr ""
-"Sorgulamayı ovsdb arayüzünü değişiklikler için izleyerek olabildiğince azalt."
-
-#, python-format
-msgid "Missing key in mapping: '%s'"
-msgstr "Eşleştirmede anahtar eksik: '%s'"
-
-#, python-format
-msgid "Missing value in mapping: '%s'"
-msgstr "Eşleştirmede değer eksik: '%s'"
-
-#, python-format
-msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found"
-msgstr ""
-"agent_type=%(agent_type)s ve istemci=%(host)s olan birden fazla ajan bulundu"
-
-#, python-format
-msgid "Multiple default providers for service %s"
-msgstr "%s servisi için birden fazla varsayılan sağlayıcı"
-
-#, python-format
-msgid "Multiple plugins for service %s were configured"
-msgstr "%s servisi için birden fazla eklenti yapılandırılmış"
-
-#, python-format
-msgid "Multiple providers specified for service %s"
-msgstr "%s servisi için birden fazla sağlayıcı belirtilmiş"
-
-msgid "Multiple tenant_ids in bulk security group rule create not allowed"
-msgstr ""
-"Toplu güvenlik grubu kuralı oluşturmada birden çok tenant_id'ye izin "
-"verilmiyor"
-
-msgid "Must also specifiy protocol if port range is given."
-msgstr ""
-"Bağlantı noktası aralığı verilmişse aynı zamanda iletişim kuralı sağlanmalı."
-
-msgid "Must specify one or more actions on flow addition or modification"
-msgstr "Akış ekleme ya da değiştirmede bir ya da fazla eylem belirtilmeli"
-
-msgid "Name of Open vSwitch bridge to use"
-msgstr "Kullanılacak Open vSwitch köprüsünün ismi"
-
-msgid ""
-"Name of nova region to use. Useful if keystone manages more than one region."
-msgstr ""
-"Kullanılacak nova gölgesinin ismi. Keystone birden fazla bölgeyi yönetiyorsa "
-"kullanışlıdır."
-
-msgid "Name of the FWaaS Driver"
-msgstr "FWaaS Sürücüsü ismi"
-
-msgid "Namespace of the router"
-msgstr "Yönetici isim uzayı"
-
-msgid "Native pagination depend on native sorting"
-msgstr "Doğal sayfalama doğal sıralamaya bağlıdır"
-
-msgid "Negative delta (downgrade) not supported"
-msgstr "Negatif fark (alt sürüm) desteklenmiyor"
-
-msgid "Negative relative revision (downgrade) not supported"
-msgstr "Negatif ilişkili sürüm (alt sürüm) desteklenmiyor"
-
-#, python-format
-msgid "Network %s is not a valid external network"
-msgstr "%s ağı geçerli bir harici ağ değil"
-
-#, python-format
-msgid "Network %s is not an external network"
-msgstr "Ağ %s harici bir ağ değil"
-
-#, python-format
-msgid ""
-"Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges "
-"%(excluded_ranges)s was not found."
-msgstr ""
-"%(excluded_ranges)s IP aralıkları hariç %(parent_range)s IP aralığından "
-"%(size)s botyutunda ağ bulunamadı."
-
-msgid "Network that will have instance metadata proxied."
-msgstr "Sunucu metadata'sı vekillenecek ağ."
-
-#, python-format
-msgid "Network type value '%s' not supported"
-msgstr "Ağ türü değeri '%s' desteklenmiyor"
-
-msgid "Network type value needed by the ML2 plugin"
-msgstr "ML2 eklentisi ağ türü değerine ihtiyaç duyuyor"
-
-msgid "Network types supported by the agent (gre and/or vxlan)."
-msgstr "Ajan tarafından desteklenen ağ türleri (gre ve/veya vxlan)."
-
-msgid "Neutron Service Type Management"
-msgstr "Neutron Servis Türü Yönetimi"
-
-msgid "Neutron core_plugin not configured!"
-msgstr "Neutron core_plugin yapılandırılmamış!"
-
-msgid "Neutron plugin provider module"
-msgstr "Neutron eklenti sağlayıcı modülü"
-
-msgid "Neutron quota driver class"
-msgstr "Neutron kota sürücü sınıf"
-
-#, python-format
-msgid "No eligible l3 agent associated with external network %s found"
-msgstr "%s harici ağıyla ilişkilendirilmiş uygun l3 ajanı bulunamadı"
-
-#, python-format
-msgid "No more IP addresses available for subnet %(subnet_id)s."
-msgstr "%(subnet_id)s alt ağı için kullanılabilir başka IP adresi yok."
-
-#, python-format
-msgid "No more IP addresses available on network %(net_id)s."
-msgstr "%(net_id)s ağında başka kullanılabilir IP adresi yok."
-
-#, python-format
-msgid ""
-"No more Virtual Router Identifier (VRID) available when creating router "
-"%(router_id)s. The limit of number of HA Routers per tenant is 254."
-msgstr ""
-"%(router_id)s yönlendiricisi oluşturulurken kullanılabilir Sanal "
-"Yönlendirici Tanımlayıcı (VRID) yok. Kiracı başına HA Yönlendirici sayısı "
-"sınırı 254."
-
-#, python-format
-msgid "No providers specified for '%s' service, exiting"
-msgstr "'%s' servisi için sağlayıcı belirtilmemiş, çıkılıyor"
-
-#, python-format
-msgid ""
-"Not allowed to manually assign a %(router_type)s router %(router_id)s from "
-"an existing DVR node to another L3 agent %(agent_id)s."
-msgstr ""
-"Mevcut bir DVR düğümünden başka bir %(agent_id)s L3 ajanına elle "
-"%(router_type)s yönlendirici %(router_id)s atamaya izin verilmiyor."
-
-msgid "Not authorized."
-msgstr "Yetkiniz yok."
-
-#, python-format
-msgid ""
-"Not enough l3 agents available to ensure HA. Minimum required "
-"%(min_agents)s, available %(num_agents)s."
-msgstr ""
-"HA'dan emin olmak için yeterli l3 ajanı yok. Asgari %(min_agents)s gerekli, "
-"kullanılabilir %(num_agents)s var."
-
-msgid ""
-"Number of DHCP agents scheduled to host a tenant network. If this number is "
-"greater than 1, the scheduler automatically assigns multiple DHCP agents for "
-"a given tenant network, providing high availability for DHCP service."
-msgstr ""
-"Bir kiracı ağı sunmak için zamanlanan DHCP ajanları sayısı. Bu sayı 1'den "
-"büyükse, zamanlayıcı verilen bir kiracı ağa otomatik olarak birden çok DHCP "
-"ajanı atar, ve DHCP servisi için yüksek kullanılabilirlik sağlar."
-
-msgid "Number of RPC worker processes for service"
-msgstr "Servis için RPC işçi süreçlerinin sayısı"
-
-msgid "Number of backlog requests to configure the metadata server socket with"
-msgstr "Metadata sunucu soketinin yapılandırılacağı birikmiş isteklerin sayısı"
-
-msgid "Number of backlog requests to configure the socket with"
-msgstr "Soketin birlikte yapılandırılacağı backlog isteklerinin sayısı"
-
-msgid ""
-"Number of floating IPs allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Kiracı başına izin verilen değişken IP sayısı. Negatif değer sınırsız "
-"demektir."
-
-msgid ""
-"Number of networks allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Kiracı başına izin verilen ağ sayısı. Negatif değer sınırsız anlamına gelir."
-
-msgid "Number of ports allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Kiracı başına izin verilen bağlantı noktası sayısı. Negatif değer sınırsız "
-"anlamına gelir."
-
-msgid "Number of routers allowed per tenant. A negative value means unlimited."
-msgstr ""
-"Kiracı başına izin verilen yönlendirici sayısı. Negatif değer sınırsız "
-"anlamına gelir."
-
-msgid ""
-"Number of seconds between sending events to nova if there are any events to "
-"send."
-msgstr ""
-"Gönderilecek olay varsa olayların nova'ya gönderilmesi arasında beklenecek "
-"saniye sayısı."
-
-msgid "Number of seconds to keep retrying to listen"
-msgstr "Dinlemeye devam etmek için saniye sayısı"
-
-msgid ""
-"Number of security groups allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Kiracı başına izin verilen güvenlik grubu sayısı. Negatif değer sınırsız "
-"anlamına gelir."
-
-msgid ""
-"Number of security rules allowed per tenant. A negative value means "
-"unlimited."
-msgstr ""
-"Kiracı başına izin verilen güvenlik kuralı sayısı. Negatif bir değer "
-"sınırsız demektir."
-
-msgid ""
-"Number of separate API worker processes for service. If not specified, the "
-"default is equal to the number of CPUs available for best performance."
-msgstr ""
-"Servis için ayrı API işçi süreçlerinin sayısı. Belirtilmezse, varsayılan "
-"olarak en iyi performans için CPU sayısına eşit değerdir."
-
-msgid ""
-"Number of separate worker processes for metadata server (defaults to half of "
-"the number of CPUs)"
-msgstr ""
-"Metadata sunucu için ayrı işçi süreçleri sayısı (CPU sayısının yarısı "
-"varsayılır)"
-
-msgid "Number of subnets allowed per tenant, A negative value means unlimited."
-msgstr ""
-"Kiracı başına izin verilen alt ağ sayısı, negatif değer sınırsız anlamına "
-"gelir."
-
-msgid "OK"
-msgstr "Tamam"
-
-msgid "One or more ports have an IP allocation from this subnet."
-msgstr "Bir ya da fazla bağlantı noktasının bu alt ağdan ayrılmış IP'si var."
-
-msgid "Only admin can view or configure quota"
-msgstr "Yalnızca yönetici kotaları görüntüleyebilir ya da yapılandırabilir"
-
-msgid "Only admin is authorized to access quotas for another tenant"
-msgstr "Yalnızca yönetici başka bir kiracı için kotalara erişebilir"
-
-msgid "Only allowed to update rules for one security profile at a time"
-msgstr ""
-"Tek seferde bir güvenlik profili için kuralların güncellenmesine izin verilir"
-
-msgid "Only remote_ip_prefix or remote_group_id may be provided."
-msgstr "Yalnızca remote_ip_prefix veya remote_group_id sağlanabilir."
-
-msgid "OpenFlow interface to use."
-msgstr "Kullanılacak OpenFlow arayüzü."
-
-#, python-format
-msgid ""
-"Operation %(op)s is not supported for device_owner %(device_owner)s on port "
-"%(port_id)s."
-msgstr ""
-"İşlem %(op)s %(port_id)s bağlantı noktası üzerinde %(device_owner)s "
-"device_owner için  desteklenmiyor."
-
-msgid "Override the default dnsmasq settings with this file"
-msgstr "Bu dosyayla varsayılan dnsmasq ayarlarının üzerine yazın"
-
-msgid "Owner type of the device: network/compute"
-msgstr "Aygıt sahip türü: ağ/hesap"
-
-msgid "POST requests are not supported on this resource."
-msgstr "POST istekleri bu kaynakta desteklenmiyor."
-
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr "bridge_mappins ayrıştırma başarısız: %s."
-
-msgid "Parsing supported pci_vendor_devs failed"
-msgstr "Desteklenen pci_vendor_devs ayrıştırması başarısız"
-
-msgid "Path to PID file for this process"
-msgstr "Bu sürecin PID dosyasının yolu"
-
-msgid "Path to the router directory"
-msgstr "Yönlendirici dizininin yolu"
-
-msgid "Peer patch port in integration bridge for tunnel bridge."
-msgstr "Tünel köprüsü için tümleştirme köprüsündeki eş yama bağlantı noktası."
-
-msgid "Peer patch port in tunnel bridge for integration bridge."
-msgstr "Tümleştirme köprüsü için tünel köprüsündeki eş yama bağlantı noktası."
-
-msgid "Ping timeout"
-msgstr "Ping zaman aşımı"
-
-msgid "Plugin does not support updating provider attributes"
-msgstr "Eklenti sağlayıcı özniteliklerini güncellemeyi desteklemiyor"
-
-#, python-format
-msgid "Port %(id)s does not have fixed ip %(address)s"
-msgstr "%(id)s bağlantı noktası %(address)s sabit ip'sine sahip değil"
-
-#, python-format
-msgid ""
-"Port %(port_id)s is associated with a different tenant than Floating IP "
-"%(floatingip_id)s and therefore cannot be bound."
-msgstr ""
-"%(port_id)s bağlantı noktası %(floatingip_id)s değişken IP'sinden başka bir "
-"kiracı ile ilişkilendirilmiş bu yüzden bağlanamaz."
-
-msgid ""
-"Port Security must be enabled in order to have allowed address pairs on a "
-"port."
-msgstr ""
-"Bağlantı noktasında izin verilen adres çiftlerine sahip olmak için bağlantı "
-"noktası güvenliği etkin olmalı."
-
-msgid "Port does not have port security binding."
-msgstr "Bağlantı noktası bağlantı noktası güvenlik bağına sahip değil."
-
-msgid ""
-"Port has security group associated. Cannot disable port security or ip "
-"address until security group is removed"
-msgstr ""
-"Bağlantı noktasıyla ilişkili güvenlik grubu var. Güvenlik grubu kaldırılana "
-"kadar bağlantı noktası güvenliği ya da ip adresi kapatılamaz"
-
-msgid ""
-"Port security must be enabled and port must have an IP address in order to "
-"use security groups."
-msgstr ""
-"Güvenlik gruplarını kullanmak için bağlantı noktası güvenliği etkin olmalı "
-"ve bağlantı noktası bir IP adresine sahip olmalı."
-
-msgid "Private key of client certificate."
-msgstr "İstemci sertifikasının özel anahtarı."
-
-#, python-format
-msgid "Probe %s deleted"
-msgstr "Deneme %s silindi"
-
-#, python-format
-msgid "Probe created : %s "
-msgstr "Deneme oluşturuldu: %s "
-
-msgid "Process is already started"
-msgstr "Süreç zaten başlamış"
-
-msgid "Process is not running."
-msgstr "Süreç çalışmıyor."
-
-msgid "Protocol to access nova metadata, http or https"
-msgstr "Nova metadata'ya erişmek için iletişim kuralı, http veya https"
-
-msgid ""
-"Range of seconds to randomly delay when starting the periodic task scheduler "
-"to reduce stampeding. (Disable by setting to 0)"
-msgstr ""
-"Devresel görev zamanlayıcıyı başlatırken izdiham yaratmayı engellemek için "
-"beklenecek rastgele saniye aralığı. (0 olarak ayarlayıp kapatabilirsiniz)"
-
-msgid "Remote metadata server experienced an internal server error."
-msgstr "Uzak metadata sunucu dahil sunucu hatası yaşadı."
-
-msgid ""
-"Representing the resource type whose load is being reported by the agent. "
-"This can be \"networks\", \"subnets\" or \"ports\". When specified (Default "
-"is networks), the server will extract particular load sent as part of its "
-"agent configuration object from the agent report state, which is the number "
-"of resources being consumed, at every report_interval.dhcp_load_type can be "
-"used in combination with network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is "
-"WeightScheduler, dhcp_load_type can be configured to represent the choice "
-"for the resource being balanced. Example: dhcp_load_type=networks"
-msgstr ""
-"Ajan tarafından yükü rapor edilen kaynak türünü temsil eder. Bu \"ağlar\", "
-"\"alt ağlar\", veya \"bağlantı noktaları\" olabilir. Belirtildiğinde "
-"(varsayılanı ağlardır), sunucu ajan yapılandırma nesnesinin parçası olarak "
-"gönderilen belirli yükü ajan rapor durumundan çıkartır, ki bu her "
-"report_interval'da tüketilen kaynak sayısıdır. dhcp_load_type "
-"network_scheduler_driver WeightScheduler olduğunda network_scheduler_driver "
-"= neutron.scheduler.WeightScheduler ile birlikte kullanılabilir, "
-"dhcp_load_type dengelenen kaynak için seçimi temsil edecek şekilde "
-"yapılandırılabilir. Örneğin: dhcp_load_type=networks"
-
-msgid "Request Failed: internal server error while processing your request."
-msgstr "İstek Başarısız: isteğiniz işlenirken dahili sunucu hatası oluştu."
-
-#, python-format
-msgid ""
-"Request contains duplicate address pair: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-msgstr ""
-"İstek kopya adres çifti içeriyor: mac_address %(mac_address)s ip_address "
-"%(ip_address)s."
-
-#, python-format
-msgid ""
-"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps "
-"with another subnet"
-msgstr ""
-"%(network_id)s ağı için istenen %(cidr)s cidr'e sahip alt ağ başka bir alt "
-"ağla çatışıyor"
-
-#, python-format
-msgid ""
-"Resource '%(resource_id)s' is already associated with provider "
-"'%(provider)s' for service type '%(service_type)s'"
-msgstr ""
-"'%(resource_id)s' kaynağı '%(service_type)s' servis türü için zaten "
-"'%(provider)s' sağlayıcısıyla ilişkilendirilmiş"
-
-msgid "Resource body required"
-msgstr "Kaynak gövdesi gerekiyor"
-
-msgid ""
-"Resource name(s) that are supported in quota features. This option is now "
-"deprecated for removal."
-msgstr ""
-"Kota özelliklerinde desteklenen kaynak isim(ler)i. Bu seçenek artık "
-"kaldırılmak üzere kullanılmıyor."
-
-msgid "Resource not found."
-msgstr "Kaynak bulunamadı."
-
-msgid "Resources required"
-msgstr "Kaynaklar gerekiyor"
-
-msgid "Root helper daemon application to use when possible."
-msgstr "Mümkün olduğunda kullanılacak kök yardımcı artalan işlemi uygulaması."
-
-msgid "Root permissions are required to drop privileges."
-msgstr "İzinlerin düşürülmesi için Root izinleri gerekli."
-
-#, python-format
-msgid "Router %(router_id)s %(reason)s"
-msgstr "Yönlendirici %(router_id)s %(reason)s"
-
-#, python-format
-msgid "Router %(router_id)s could not be found"
-msgstr "Yönlendirici %(router_id)s bulunamadı"
-
-#, python-format
-msgid "Router %(router_id)s does not have an interface with id %(port_id)s"
-msgstr ""
-"%(router_id)s yönlendiricisi %(port_id)s kimliğine sahip bir arayüze sahip "
-"değil"
-
-#, python-format
-msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s"
-msgstr ""
-"%(router_id)s yönlendiricisi %(subnet_id)s alt ağı üzerinde arayüze sahip "
-"değil"
-
-#, python-format
-msgid "Router already has a port on subnet %s"
-msgstr "Yönlendirici zaten %s alt ağında bir bağlantı noktasına sahip"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more floating IPs."
-msgstr ""
-"%(router_id)s yönlendiricisi üstündeki %(subnet_id)s alt ağı için "
-"yönlendirici arayüzü silinemez, bir ya da fazla değişken IP tarafından "
-"ihtiyaç duyuluyor."
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more routes."
-msgstr ""
-"%(router_id)s yönlendiricisi üzerindeki %(subnet_id)s alt ağı için rota "
-"arayüzü silinemiyor, çünkü bir ya da fazla rota tarafından ihtiyaç duyuluyor."
-
-msgid "Router that will have connected instances' metadata proxied."
-msgstr "Bağlı sunucuların metadata'larının vekilleneceği yönlendirici."
-
-msgid "Run as daemon."
-msgstr "Artalan işlemi olarak çalış."
-
-msgid ""
-"Seconds between nodes reporting state to server; should be less than "
-"agent_down_time, best if it is half or less than agent_down_time."
-msgstr ""
-"Düğümlerin sunucuya durum raporu yapması arasında geçen saniye; "
-"agent_down_time'dan az olmalı, en iyisi agent_down_time'ın yarısı ya da daha "
-"azı olmasıdır."
-
-msgid "Seconds between running periodic tasks"
-msgstr "Devresel görevleri çalıştırma arasındaki saniye"
-
-msgid ""
-"Seconds to regard the agent is down; should be at least twice "
-"report_interval, to be sure the agent is down for good."
-msgstr ""
-"Ajanın çalışmıyor olduğuna karar vermek için geçmesi gereken saniye; ajanın "
-"gerçekten kapalı olduğundan emin olmak için report_interval değerinin en az "
-"iki katı olmalı."
-
-#, python-format
-msgid "Security Group %(id)s %(reason)s."
-msgstr "Güvenlik Grubu %(id)s %(reason)s."
-
-#, python-format
-msgid "Security Group Rule %(id)s %(reason)s."
-msgstr "Güvenlik Grubu Kuralı %(id)s %(reason)s."
-
-#, python-format
-msgid "Security group %(id)s does not exist"
-msgstr "%(id)s güvenlik grubu mevcut değil"
-
-#, python-format
-msgid "Security group rule %(id)s does not exist"
-msgstr "Güvenlik grubu kuralı %(id)s mevcut değil"
-
-#, python-format
-msgid "Security group rule already exists. Rule id is %(id)s."
-msgstr "Güvenlik grubu kuralı zaten mevcut. Kural kimliği %(id)s."
-
-#, python-format
-msgid ""
-"Security group rule protocol %(protocol)s not supported. Only protocol "
-"values %(values)s and integer representations [0 to 255] are supported."
-msgstr ""
-"Güvenlik grubu kuralı iletişim kuralı %(protocol)s desteklenmiyor. Yalnızca "
-"iletişim kuralı değerleri %(values)s ve tam sayı temsilleri [0 dan 255 e]  "
-"destekleniyor."
-
-msgid "Segments and provider values cannot both be set."
-msgstr "Dilimler ve sağlayıcı değerleri aynı anda ayarlanamaz."
-
-msgid ""
-"Send notification to nova when port data (fixed_ips/floatingip) changes so "
-"nova can update its cache."
-msgstr ""
-"Bağlantı noktası verisi (sabit_ipler/değişkenip) değiştiğinde nova'ya "
-"bildirim gönder ki nova zulasını güncelleyebilsin."
-
-msgid "Send notification to nova when port status changes"
-msgstr "Bağlantı noktası durumu değiştiğinde nova'ya bildirim gönder"
-
-msgid ""
-"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the "
-"feature is disabled"
-msgstr ""
-"HA kurulumu için bu kadar karşılıksız ARP gönder, 0'a eşit ya da küçükse, "
-"özellik kapatılır"
-
-#, python-format
-msgid ""
-"Service provider '%(provider)s' could not be found for service type "
-"%(service_type)s"
-msgstr ""
-"%(service_type)s servis türü için '%(provider)s' servis sağlayıcı bulunamadı"
-
-#, python-format
-msgid "Service type %(service_type)s does not have a default service provider"
-msgstr "%(service_type)s servis türü varsayılan servis sağlayıcıya sahip değil"
-
-msgid ""
-"Set new timeout in seconds for new rpc calls after agent receives SIGTERM. "
-"If value is set to 0, rpc timeout won't be changed"
-msgstr ""
-"Ajan SIGTERM aldıktan sonra yeni rpc çağrıları için saniye olarak yeni zaman "
-"aşımı ayarla. Değer 0 olarak ayarlanırsa, rpc zaman aşımı değiştirilmeyecek"
-
-msgid ""
-"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/"
-"VXLAN tunnel."
-msgstr ""
-"Dışa giden IP paketi taşıyan GRE/VXLAN tünelinde bölümlenme yapma (DF) "
-"bitini ayarla ya da ayarlama."
-
-msgid "Shared address scope can't be unshared"
-msgstr "Paylaşılan adres kapsamının paylaştırılması durdurulamaz"
-
-#, python-format
-msgid ""
-"Some tenants have more than one security group named 'default': "
-"%(duplicates)s. All duplicate 'default' security groups must be resolved "
-"before upgrading the database."
-msgstr ""
-"Bazı kiracılar birden fazla 'default' isimli güvenlik grubuna sahip: "
-"%(duplicates)s. Veri tabanı yükseltilmeden önce tüm 'default' güvenlik "
-"grupları çözülmeli."
-
-msgid ""
-"Specifying 'tenant_id' other than authenticated tenant in request requires "
-"admin privileges"
-msgstr ""
-"İstekte doğrulanmış kiracıdan başka 'tenant_id' belirtme yönetici yetkileri "
-"gerektirir"
-
-msgid "String prefix used to match IPset names."
-msgstr "IPset isimleriyle eşleştirme için kullanılan karakter dizisi önekleri."
-
-msgid "Subnet for router interface must have a gateway IP"
-msgstr "Yönlendirici arayüzü için alt ağ bir geçit IP'ye sahip olmalı"
-
-msgid ""
-"Subnet has a prefix length that is incompatible with DHCP service enabled."
-msgstr "Alt ağ etkin DHCP servisiyle uyumsz bir önek uzunluğuna sahip."
-
-msgid "Subnet pool has existing allocations"
-msgstr "Alt ağ havuzunun mevcut ayırmaları var"
-
-msgid "Subnet used for the l3 HA admin network."
-msgstr "L3 HA yönetici ağı için kullanılan alt ağ."
-
-msgid "Suffix to append to all namespace names."
-msgstr "Tüm isim uzaylarına eklenecek son ek."
-
-msgid ""
-"System-wide flag to determine the type of router that tenants can create. "
-"Only admin can override."
-msgstr ""
-"Kiracıların oluşturabileceği yönlendirici türünü belirlemek için sistem "
-"genelinde bayrak. Yalnızca yönetici üzerine yazabilir."
-
-msgid "TCP Port to listen for metadata server requests."
-msgstr "Metadata sunucu istekleri için dinlenecek TCP bağlantı noktası."
-
-msgid "TCP Port used by Neutron metadata namespace proxy."
-msgstr ""
-"Neutron metadata isim uzayı vekili tarafından kullanılan TCP bağlantı "
-"noktası."
-
-msgid "TCP Port used by Nova metadata server."
-msgstr "Nova metadata sunucusu tarafından kullanılan TCP Bağlantı noktası."
-
-msgid "TOS for vxlan interface protocol packets."
-msgstr "Vxlan arayüz iletişim paketleri için TOS."
-
-msgid "TTL for vxlan interface protocol packets."
-msgstr "Vxlan arayüz iletişim kuralı paketleri için TTL."
-
-#, python-format
-msgid "Table %s can only be queried by UUID"
-msgstr "Tablo %s yalnızca UUID ile sorgulanabilir"
-
-#, python-format
-msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network"
-msgstr ""
-"Kiracı %(tenant_id)s'in bu ağda %(resource)s oluşturmasına izin verilmiyor"
-
-msgid "Tenant network creation is not enabled."
-msgstr "Kiracı ağ oluşturma etkin değil."
-
-msgid ""
-"The 'gateway_external_network_id' option must be configured for this agent "
-"as Neutron has more than one external network."
-msgstr ""
-"Neutron birden fazla harici ağa sahip olduğundan "
-"'gateway_external_network_id' seçeneği bu ajan için yapılandırılmalıdır."
-
-#, python-format
-msgid ""
-"The HA Network CIDR specified in the configuration file isn't valid; "
-"%(cidr)s."
-msgstr ""
-"Yapılandırma dosyasında belirtilen HA Ağ CIDR'i geçerli değil; %(cidr)s."
-
-msgid "The UDP port to use for VXLAN tunnels."
-msgstr "VXLAN tünelleri için kullanılacak UDP bağlantı noktası."
-
-#, python-format
-msgid ""
-"The address allocation request could not be satisfied because: %(reason)s"
-msgstr "Adres ayırma isteği sağlanamadı çünkü: %(reason)s"
-
-msgid "The advertisement interval in seconds"
-msgstr "Saniye cinsinden duyuru aralığı"
-
-#, python-format
-msgid "The allocation pool %(pool)s is not valid."
-msgstr "Ayırma havuzu %(pool)s geçerli değil."
-
-#, python-format
-msgid ""
-"The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s."
-msgstr ""
-"Ayırma havuzu %(pool)s %(subnet_cidr)s alt ağ cidr'inin ötesine uzanıyor."
-
-#, python-format
-msgid ""
-"The attribute '%(attr)s' is reference to other resource, can't used by sort "
-"'%(resource)s'"
-msgstr ""
-"'%(attr)s' özniteliği başka kaynağa referans, '%(resource)s' sıralama "
-"tarafından kullanılamaz"
-
-msgid "The core plugin Neutron will use"
-msgstr "Neutron'un kullanacağı çekirdek eklenti"
-
-msgid "The driver used to manage the DHCP server."
-msgstr "DHCP sunucusunu yönetmek için kullanılan sürücü."
-
-msgid "The driver used to manage the virtual interface."
-msgstr "Sanal arayüzü yönetmek için kullanılan sürücü."
-
-#, python-format
-msgid ""
-"The following device_id %(device_id)s is not owned by your tenant or matches "
-"another tenants router."
-msgstr ""
-"device_id %(device_id)s sizin kiracınıza ait değil veya başka bir kiracının "
-"yönlendiricisiyle eşleşiyor."
-
-msgid "The host IP to bind to"
-msgstr "Bağlanılacak istemci IP'si"
-
-msgid "The interface for interacting with the OVSDB"
-msgstr "OVSDB ile etkileşim için arayüz"
-
-msgid ""
-"The maximum number of items returned in a single response, value was "
-"'infinite' or negative integer means no limit"
-msgstr ""
-"Tek bir yanıtta döndürülecek azami öğe sayısı, 'infinite' değeri ya da "
-"negatif tam sayı sınır yok demektir"
-
-#, python-format
-msgid ""
-"The network %(network_id)s has been already hosted by the DHCP Agent "
-"%(agent_id)s."
-msgstr "Ağ %(network_id)s zaten %(agent_id)s DHCP Ajanı tarafından sunuluyor."
-
-#, python-format
-msgid ""
-"The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s."
-msgstr "Ağ %(network_id)s %(agent_id)s DHCP ajanı tarafından sunulmuyor."
-
-#, python-format
-msgid "The number of allowed address pair exceeds the maximum %(quota)s."
-msgstr "İzin verilen adres çifti sayısı %(quota)s azami değerini aşıyor."
-
-msgid ""
-"The number of seconds the agent will wait between polling for local device "
-"changes."
-msgstr ""
-"Ajanın yerel aygıt değişiklikleri için sorgulama yapma aralığında "
-"bekleyeceği saniye sayısı."
-
-msgid ""
-"The number of seconds to wait before respawning the ovsdb monitor after "
-"losing communication with it."
-msgstr ""
-"İletişim koptuktan sonra ovsdb izleyiciyi yeniden başlatmak için beklenecek "
-"saniye sayısı."
-
-msgid "The number of sort_keys and sort_dirs must be same"
-msgstr "sort_keys ile sort_dirs sayıları aynı olmalı"
-
-#, python-format
-msgid "The port '%s' was deleted"
-msgstr "Bağlantı noktası '%s' silinmiş"
-
-msgid "The port to bind to"
-msgstr "Bağlanılacak bağlantı noktası"
-
-#, python-format
-msgid "The requested content type %s is invalid."
-msgstr "İstenen içerik türü %s geçersiz."
-
-msgid "The resource could not be found."
-msgstr "Kaynak bulunamadı."
-
-#, python-format
-msgid ""
-"The router %(router_id)s has been already hosted by the L3 Agent "
-"%(agent_id)s."
-msgstr ""
-"%(router_id)s yönlendiricisi zaten %(agent_id)s L3 Ajanı tarafından "
-"sunuluyor."
-
-msgid ""
-"The server has either erred or is incapable of performing the requested "
-"operation."
-msgstr ""
-"Sunucu ya hata verdi ya da istenen işlemi yapabilecek yeterlilikte değil."
-
-msgid "The service plugins Neutron will use"
-msgstr "Neutron'un kullanacağı servis eklentileri"
-
-#, python-format
-msgid "The subnet request could not be satisfied because: %(reason)s"
-msgstr "Alt ağ isteği sağlanamadı çünkü: %(reason)s"
-
-msgid "The type of authentication to use"
-msgstr "Kullanılacak kimlik doğrulama türü"
-
-#, python-format
-msgid "The value '%(value)s' for %(element)s is not valid."
-msgstr "%(element)s için '%(value)s' değeri geçerli değil."
-
-msgid ""
-"The working mode for the agent. Allowed modes are: 'legacy' - this preserves "
-"the existing behavior where the L3 agent is deployed on a centralized "
-"networking node to provide L3 services like DNAT, and SNAT. Use this mode if "
-"you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality "
-"and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - "
-"this enables centralized SNAT support in conjunction with DVR.  This mode "
-"must be used for an L3 agent running on a centralized node (or in single-"
-"host deployments, e.g. devstack)"
-msgstr ""
-"Ajanın çalışma kipi. İzin verilen kipler: 'legacy' - Bu L3 ajanının DNAT, ve "
-"SNAT gibi L3 servisleri sağlamak için merkezi ağ düğümüne kurulduğu mevcut "
-"davranışı korur. DVR'a geçmek istemiyorsanız bu kipi kullanın. 'dvr' - bu "
-"kip DVR işlevini etkinleştirir ve bir hesap istemcisi üzerinde çalışan L3 "
-"ajanı için kullanılmalıdır. 'dvr_snat' - bu DVR ile beraber merkezi SNAT "
-"desteğini etkinleştirir.  Bu kip merkezi bir düğümde çalışan L3 ajanı için "
-"kullanılmalıdır (veya tek-istemcili kurulumlarda, örn. devstack)"
-
-msgid ""
-"True to delete all ports on all the OpenvSwitch bridges. False to delete "
-"ports created by Neutron on integration and external network bridges."
-msgstr ""
-"Tüm OpenvSwitch köprülerinde tüm bağlantı noktalarını silmek için True. "
-"Neutron tarafından tümleştirme ve harici ağ köprüleri üzerinde oluşturulan "
-"bağlantı noktalarını silmek için False."
-
-msgid "Tunnel IP value needed by the ML2 plugin"
-msgstr "Tünel IP değerine ML2 eklentisi tarafından ihtiyaç duyuluyor"
-
-msgid "Tunnel bridge to use."
-msgstr "Kullanılacak tünel köprüsü."
-
-msgid "URL to database"
-msgstr "Veri tabanı URL'si"
-
-#, python-format
-msgid "Unable to access %s"
-msgstr "%s'e erişilemiyor"
-
-#, python-format
-msgid "Unable to calculate %(address_type)s address because of:%(reason)s"
-msgstr "%(reason)s sebebiyle %(address_type)s adresi hesaplanamıyor"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(router_id)s. The number of routes exceeds "
-"the maximum %(quota)s."
-msgstr ""
-"%(router_id)s için işlem tamamlanamıyor. Rota sayısı %(quota)s azami "
-"sayısını aşıyor."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of DNS "
-"nameservers exceeds the limit %(quota)s."
-msgstr ""
-"%(subnet_id)s için işlem tamamlanamadı. DNS isim sunucuları sayısı %(quota)s "
-"sayısını aşıyor."
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of host routes "
-"exceeds the limit %(quota)s."
-msgstr ""
-"%(subnet_id)s için işlem tamamlanamadı. İstemci rotaları sayısı %(quota)s "
-"sınırını aşıyor."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The IP address "
-"%(ip_address)s is in use."
-msgstr ""
-"%(net_id)s ağı için işlem tamamlanamadı. %(ip_address)s IP adresi kullanımda."
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The mac address %(mac)s "
-"is in use."
-msgstr ""
-"%(net_id)s ağı için işlem tamamlanamadı. Mac adresi %(mac)s kullanımda."
-
-#, python-format
-msgid ""
-"Unable to complete operation on network %(net_id)s. There are one or more "
-"ports still in use on the network."
-msgstr ""
-"%(net_id)s ağı üzerinde işlem tamamlanamadı. Ağda hala kullanımda olan bir "
-"ya da daha fazla bağlantı noktası var."
-
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s for network %(net_id)s. "
-"Port already has an attached device %(device_id)s."
-msgstr ""
-"%(net_id)s ağı için %(port_id)s bağlantı noktası üzerinde işlem "
-"tamamlanamadı. Bağlantı noktasına eklenmiş aygıt %(device_id)s zaten var."
-
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr "%s degeri dönüştürülemiyor"
-
-msgid "Unable to create the Agent Gateway Port"
-msgstr "Ajan Geçit Bağlantı Noktası oluşturulamıyor"
-
-msgid "Unable to create the SNAT Interface Port"
-msgstr "SNAT Arayüz Bağlantı Noktası oluşturulamıyor"
-
-#, python-format
-msgid ""
-"Unable to create the flat network. Physical network %(physical_network)s is "
-"in use."
-msgstr "Düz ağ oluşturulamıyor. Fiziksel ağ %(physical_network)s kullanımda."
-
-msgid ""
-"Unable to create the network. No available network found in maximum allowed "
-"attempts."
-msgstr ""
-"Ağ oluşturulamıyor. İzin verilen azami deneme içinde kullanılabilir ağ "
-"bulunamadı."
-
-msgid ""
-"Unable to create the network. No tenant network is available for allocation."
-msgstr "Ağ oluşturulamıyor. Ayırma için hiçbir kiracı ağ kullanılabilir değil."
-
-#, python-format
-msgid ""
-"Unable to create the network. The VLAN %(vlan_id)s on physical network "
-"%(physical_network)s is in use."
-msgstr ""
-"Ağ oluşturulamıyor. %(physical_network)s fiziksel ağındaki %(vlan_id)s VLAN "
-"kullanımda."
-
-#, python-format
-msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use."
-msgstr "Ağ oluşturulamıyor. Tünel ID'si %(tunnel_id)s kullanımda."
-
-#, python-format
-msgid "Unable to determine mac address for %s"
-msgstr "%s içim mac adresi tanımlanamadı"
-
-#, python-format
-msgid "Unable to find '%s' in request body"
-msgstr "İstek gövdesinde '%s' bulunamadı"
-
-#, python-format
-msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s"
-msgstr "%(subnet_id)s alt ağında %(ip_address)s IP adresi bulunamıyor"
-
-#, python-format
-msgid "Unable to find any IP address on external network %(net_id)s."
-msgstr "%(net_id)s harici ağında IP adresi bulunamadı."
-
-#, python-format
-msgid "Unable to find resource name in %s"
-msgstr "%s içinde kaynak ismi bulunamadı"
-
-msgid "Unable to generate IP address by EUI64 for IPv4 prefix"
-msgstr "IPv4 öneki için EUI64 ile IP adresi üretilemedi"
-
-#, python-format
-msgid "Unable to generate unique DVR mac for host %(host)s."
-msgstr "%(host)s istemcisi için benzersiz DVR mac üretilemedi."
-
-#, python-format
-msgid "Unable to generate unique mac on network %(net_id)s."
-msgstr "%(net_id)s ağı üzerinde benzersiz mac üretilemedi."
-
-#, python-format
-msgid ""
-"Unable to identify a target field from:%s. Match should be in the form "
-"%%(<field_name>)s"
-msgstr ""
-"%s den bir hedef alan tanımlanamadı. Eşleşme %%(<field_name>)s biçiminde "
-"olmalı"
-
-#, python-format
-msgid "Unable to update address scope %(address_scope_id)s : %(reason)s"
-msgstr "Adres kapsamı %(address_scope_id)s güncellenemiyor: %(reason)s"
-
-#, python-format
-msgid ""
-"Unable to verify match:%(match)s as the parent resource: %(res)s was not "
-"found"
-msgstr "Eşleşme:%(match)s doğrulanamadı çünkü üst kaynak: %(res)s bulunamadı"
-
-#, python-format
-msgid "Unexpected response code: %s"
-msgstr "Beklenmedik yanıt kodu: %s"
-
-#, python-format
-msgid "Unexpected response: %s"
-msgstr "Beklenmeyen yanıt: %s"
-
-msgid "Unimplemented commands"
-msgstr "Uygulanmayan komutlar"
-
-msgid "Unknown API version specified"
-msgstr "Bilinmeyen API sürümü belirtildi"
-
-#, python-format
-msgid "Unknown address type %(address_type)s"
-msgstr "Bilinmeyen adres türü %(address_type)s"
-
-#, python-format
-msgid "Unknown attribute '%s'."
-msgstr "Bilinmeyen öznitelik '%s'."
-
-#, python-format
-msgid "Unknown chain: %r"
-msgstr "Tanınmayan zincir: %r"
-
-#, python-format
-msgid "Unknown quota resources %(unknown)s."
-msgstr "Bilinmeyen kota kaynakları %(unknown)s."
-
-msgid "Unmapped error"
-msgstr "Unmapped hata"
-
-msgid "Unrecognized action"
-msgstr "Tanınmayan eylem"
-
-#, python-format
-msgid "Unrecognized attribute(s) '%s'"
-msgstr "Tanınmayan öznitelik(ler) '%s'"
-
-msgid "Unsupported Content-Type"
-msgstr "Desteklenmeyen içerik türü"
-
-#, python-format
-msgid "Unsupported network type %(net_type)s."
-msgstr "Desteklenmeyen ağ türü %(net_type)s."
-
-msgid "Unsupported request type"
-msgstr "Desteklenmeyen istek türü"
-
-msgid "Updating default security group not allowed."
-msgstr "Varsayılan güvenlik grubunu güncellemeye izin verilmiyor."
-
-msgid ""
-"Use ML2 l2population mechanism driver to learn remote MAC and IPs and "
-"improve tunnel scalability."
-msgstr ""
-"Uzak MAC ve IP'leri öğrenmek ve tünel ölçeklenebilirliğini artırmak için ML2 "
-"l2population mekanizması sürücüsünü kullan."
-
-msgid "Use broadcast in DHCP replies"
-msgstr "DHCP yanıtlarında yayın kullan"
-
-msgid "Use either --delta or relative revision, not both"
-msgstr "Ya --delta ya ilişkili sürüm kullanın, ikisini birden değil"
-
-msgid "User (uid or name) running metadata proxy after its initialization"
-msgstr ""
-"İlklendirilmesinden sonra metadata vekili çalıştıran kullanıcı (uid veya "
-"isim)"
-
-msgid ""
-"User (uid or name) running metadata proxy after its initialization (if "
-"empty: agent effective user)."
-msgstr ""
-"İlklendirilmesinden sonra metadata vekilini çalıştıran kullanıcı (uid veya "
-"isim) (boşsa: ajan etkin kullanıcı)."
-
-msgid "User (uid or name) running this process after its initialization"
-msgstr ""
-"İlklendirilmesinden sonra bu süreci çalıştıran kullanıcı (uid veya isim)"
-
-msgid "VRRP authentication password"
-msgstr "VRRP kimlik doğrulama parolası"
-
-msgid "VRRP authentication type"
-msgstr "VRRP kimlik doğrulama türü"
-
-#, python-format
-msgid ""
-"Validation of dictionary's keys failed. Expected keys: %(expected_keys)s "
-"Provided keys: %(provided_keys)s"
-msgstr ""
-"Sözlük anahtarlarının doğrulanması başarısız. Beklenen anahtarlar: "
-"%(expected_keys)s Sağlanan anahtarlar: %(provided_keys)s"
-
-#, python-format
-msgid "Validator '%s' does not exist."
-msgstr "Onaylayan '%s' mevcut değil."
-
-#, python-format
-msgid "Value %(value)s in mapping: '%(mapping)s' not unique"
-msgstr "'%(mapping)s' eşleştirmesindeki %(value)s değeri benzersiz değil"
-
-msgid ""
-"Watch file log. Log watch should be disabled when metadata_proxy_user/group "
-"has no read/write permissions on metadata proxy log file."
-msgstr ""
-"Dosya kaydını izle. Kayıt izleme metadata_proxy_user/group metadata vekil "
-"kayıt dosyasına okuyamıyorsa/yazamıyorsa kapatılmalıdır."
-
-msgid ""
-"Where to store Neutron state files. This directory must be writable by the "
-"agent."
-msgstr ""
-"Neutron durum dosyalarının nerede depolanacağı. Bu dizin ajan tarafından "
-"yazılabilir olmalıdır."
-
-msgid ""
-"With IPv6, the network used for the external gateway does not need to have "
-"an associated subnet, since the automatically assigned link-local address "
-"(LLA) can be used. However, an IPv6 gateway address is needed for use as the "
-"next-hop for the default route. If no IPv6 gateway address is configured "
-"here, (and only then) the neutron router will be configured to get its "
-"default route from router advertisements (RAs) from the upstream router; in "
-"which case the upstream router must also be configured to send these RAs. "
-"The ipv6_gateway, when configured, should be the LLA of the interface on the "
-"upstream router. If a next-hop using a global unique address (GUA) is "
-"desired, it needs to be done via a subnet allocated to the network and not "
-"through this parameter. "
-msgstr ""
-"IPv6 ile, harici geçit için kullanılan ağ ilişkili bir alt ağa sahip olmak "
-"zorunda değildir, çünkü otomatik olarak atanan bağlantı-yerel adres (LLA) "
-"kullanılabilir. Ancak bir IPv6 geçit adresine varsayılan rota için sonraki-"
-"nokta olarak kullanılmak üzere ihtiyaç vardır. Burada bir IPv6 geçit adresi "
-"yapılandırılmazsa, (ve yalnızca bu durumda) neutron yönlendirici, varsayılan "
-"rotasını üst seviye yönlendirici duyurularından (RA) alacak şekilde "
-"yapılandırılır; ki bu durumda üst seviye yönlendirici bu RA'ları gönderecek "
-"şekilde yapılandırılmalıdır. ipv6_gateway, yapılandırıldığında, üst seviye "
-"yönlendirici üzerindeki arayüzün LLA'sı olmalıdır. Eğer genel benzersiz "
-"adres (GUA) kullanan bir sonraki-nokta isteniyorsa, bu ağa ayrılmış bir alt "
-"ağ vasıtasıyla yapılmalıdır, bu parametre ile değil. "
-
-msgid "You must implement __call__"
-msgstr "__call__ fonksiyonunu uygulamalısınız."
-
-msgid ""
-"You must provide a config file for bridge - either --config-file or "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-msgstr ""
-"Köprü için bir yapılandırma dosyası sağlamalısınız - ya --config-file ya da "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-
-msgid "You must provide a revision or relative delta"
-msgstr "Bir sürüm ya da ilişkili fark sağlamalısınız"
-
-msgid "allocation_pools allowed only for specific subnet requests."
-msgstr "allocation_pools yalnızca belirli alt ağ istekleri için izinli."
-
-msgid "binding:profile value too large"
-msgstr "bağ:profil değeri çok büyük"
-
-#, python-format
-msgid "cannot be deleted due to %s"
-msgstr "%s sebebiyle silinemez"
-
-msgid "cidr and prefixlen must not be supplied together"
-msgstr "cidr ve prefixlen birlikte verilmemelidir"
-
-#, python-format
-msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid."
-msgstr "dhcp_agents_per_network >= 1 olmalı. '%s' geçersiz."
-
-msgid "fixed_ip_address cannot be specified without a port_id"
-msgstr "fixed_ip_addres port_id olmadan belirtilemez"
-
-#, python-format
-msgid "has device owner %s"
-msgstr "%s aygıt sahibine sahip"
-
-msgid "in use"
-msgstr "kullanımda"
-
-#, python-format
-msgid "ip command failed on device %(dev_name)s: %(reason)s"
-msgstr "ip komutu %(dev_name)s aygıtı üzerinde başarısız: %(reason)s"
-
-#, python-format
-msgid "ip link capability %(capability)s is not supported"
-msgstr "ip bağlantı yeteneği %(capability)s desteklenmiyor"
-
-#, python-format
-msgid "ip link command is not supported: %(reason)s"
-msgstr "ip bağlantı komutu desteklenmiyor: %(reason)s"
-
-msgid "ip_version must be specified in the absence of cidr and subnetpool_id"
-msgstr "subnetpool_id ve cidr olmadığında ip_version belirtilmelidir"
-
-msgid "ipv6_address_mode is not valid when ip_version is 4"
-msgstr "ip_version 4 olduğunda ipv6_address_mode geçerli değildir"
-
-msgid "ipv6_ra_mode is not valid when ip_version is 4"
-msgstr "ip_version 4 olduğunda ipv6_ra_mode geçerli değildir"
-
-msgid ""
-"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to "
-"False."
-msgstr ""
-"enable_dhcp False olarak ayarlıyken ipv6_ra_mode veya ipv6_address_mode "
-"ayarlanamaz."
-
-#, python-format
-msgid ""
-"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to "
-"'%(addr_mode)s' is not valid. If both attributes are set, they must be the "
-"same value"
-msgstr ""
-"ipv6_ra_mode kipi '%(ra_mode)s' olarak ipv6_address_mode '%(addr_mode)s' "
-"olarak ayarlanması geçersizdir. İki öznitelik de ayarlanıyorsa, aynı değerde "
-"olmalılar"
-
-msgid "mac address update"
-msgstr "mac adres güncellemesi"
-
-#, python-format
-msgid ""
-"max_l3_agents_per_router %(max_agents)s config parameter is not valid. It "
-"has to be greater than or equal to min_l3_agents_per_router %(min_agents)s."
-msgstr ""
-"max_l3_agents_per_router %(max_agents)s yapılandırma parametresi geçerli "
-"değil. min_l3_agents_per_router %(min_agents)s den büyük ya da ona eşit "
-"olmalı."
-
-#, python-format
-msgid ""
-"min_l3_agents_per_router config parameter is not valid. It has to be equal "
-"to or more than %s for HA."
-msgstr ""
-"min_l3_agents_per_router yapılandırma parametresi geçerli değil. HA için %s "
-"den büyük ya da eşit olmalı."
-
-msgid "network_type required"
-msgstr "network_type gerekli"
-
-#, python-format
-msgid "network_type value '%s' not supported"
-msgstr "network_type değeri '%s' desteklenmiyor"
-
-msgid "new subnet"
-msgstr "yeni alt ağ"
-
-#, python-format
-msgid "physical_network '%s' unknown  for VLAN provider network"
-msgstr "physical_network '%s' VLAN sağlayıcı ağı için bilinmiyor"
-
-#, python-format
-msgid "physical_network '%s' unknown for flat provider network"
-msgstr "physical_network '%s' düz sağlayıcı ağı için bilinmiyor"
-
-msgid "physical_network required for flat provider network"
-msgstr "Düz sağlayıcı ağı için physical_network gerekir"
-
-#, python-format
-msgid "provider:physical_network specified for %s network"
-msgstr "sağlayıcı:physical_network %s ağı için belirtildi"
-
-msgid "record"
-msgstr "kayıt"
-
-msgid "respawn_interval must be >= 0 if provided."
-msgstr "eğer sağlanmışsa respawn_interval >= 0 olmalı."
-
-#, python-format
-msgid "segmentation_id out of range (%(min)s through %(max)s)"
-msgstr "segmentation_id aralık dışında (%(min)s %(max)s arasında)"
-
-msgid "segmentation_id requires physical_network for VLAN provider network"
-msgstr ""
-"segmentation_id VLAN sağlayıcı ağı için physical_network'e ihtiyaç duyar"
-
-msgid "the nexthop is not connected with router"
-msgstr "Sonraki nokta yönlendiriciyle bağlı değil"
-
-msgid "the nexthop is used by router"
-msgstr "sonraki nokta yönlendirici tarafından kullanılıyor"
-
-msgid ""
-"uuid provided from the command line so external_process can track us via /"
-"proc/cmdline interface."
-msgstr ""
-"external_process bizi /proc/cmdline arayüzünden takip edebilsin diye komut "
-"satırından sağlanan uuid."
diff --git a/neutron/locale/zh_CN/LC_MESSAGES/neutron.po b/neutron/locale/zh_CN/LC_MESSAGES/neutron.po
deleted file mode 100644 (file)
index 46cd005..0000000
+++ /dev/null
@@ -1,2181 +0,0 @@
-# Chinese (Simplified, China) translations for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-09-06 10:15+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: zh_Hans_CN\n"
-"Language-Team: Chinese (China)\n"
-"Plural-Forms: nplurals=1; plural=0\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#, python-format
-msgid ""
-"\n"
-"Command: %(cmd)s\n"
-"Exit code: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-msgstr ""
-"\n"
-"命令:%(cmd)s\n"
-"退出代码:%(code)s\n"
-"标准输入:%(stdin)s\n"
-"标准输出:%(stdout)s\n"
-"标准错误:%(stderr)s"
-
-#, python-format
-msgid "%(driver)s: Internal driver error."
-msgstr "%(driver)s: 内部驱动错误。"
-
-#, python-format
-msgid "%(id)s is not a valid %(type)s identifier"
-msgstr "%(id)s 不是有效的 %(type)s 标识"
-
-#, python-format
-msgid ""
-"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' "
-"and '%(desc)s'"
-msgstr ""
-"%(invalid_dirs)s 对于 sort_dirs 是无效值,有效值是“%(asc)s”和“%(desc)s”"
-
-#, python-format
-msgid "%(key)s prohibited for %(tunnel)s provider network"
-msgstr "对于 %(tunnel)s 提供程序网络,已禁止 %(key)s"
-
-#, python-format
-msgid ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-msgstr ""
-"已使用网络设置 %(current)s(原始设置 %(original)s)和网络段 %(segments)s 调"
-"用 %(method)s"
-
-#, python-format
-msgid ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-msgstr "已使用子网设置 %(current)s 调用 %(method)s(原始设置 %(original)s)"
-
-#, python-format
-msgid "%(method)s failed."
-msgstr "%(method)s 失败。"
-
-#, python-format
-msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'"
-msgstr "%(name)s“%(addr)s”与 ip_version“%(ip_version)s”不匹配"
-
-#, python-format
-msgid "%s cannot be called while in offline mode"
-msgstr "在 %s 处于脱机方式时,无法对其进行调用"
-
-#, python-format
-msgid "%s is invalid attribute for sort_key"
-msgstr "%s 对于 sort_key 是无效属性"
-
-#, python-format
-msgid "%s is invalid attribute for sort_keys"
-msgstr "%s 对于 sort_keys 是无效属性"
-
-#, python-format
-msgid "%s is not a valid VLAN tag"
-msgstr "%s 不是一个有效的标签"
-
-#, python-format
-msgid "%s must implement get_port_from_device or get_ports_from_devices."
-msgstr "%s 必须实现 get_port_from_device 或 get_ports_from_devices。"
-
-#, python-format
-msgid "%s prohibited for VLAN provider network"
-msgstr "VLAN提供者网络中禁止%s"
-
-#, python-format
-msgid "%s prohibited for flat provider network"
-msgstr "在平面供应商网络中禁止%s"
-
-#, python-format
-msgid "%s prohibited for local provider network"
-msgstr "在本地供应商网络中禁止%s"
-
-#, python-format
-msgid "'%(data)s' exceeds maximum length of %(max_len)s"
-msgstr "“%(data)s”超过最大长度 %(max_len)s"
-
-#, python-format
-msgid "'%(data)s' is not in %(valid_values)s"
-msgstr "“%(data)s”没有在 %(valid_values)s 中"
-
-#, python-format
-msgid "'%(data)s' is too large - must be no larger than '%(limit)d'"
-msgstr "'%(data)s' 太大 - 必须不能大于 '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' is too small - must be at least '%(limit)d'"
-msgstr "'%(data)s' 太小 - 必须至少 '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended"
-msgstr "'%(data)s' 不是一个可识别的IP子网CIDR, 建议'%(cidr)s' "
-
-#, python-format
-msgid "'%(host)s' is not a valid nameserver. %(msg)s"
-msgstr "'%(host)s' 不是合法的nameserver %(msg)s"
-
-#, python-format
-msgid "'%s' Blank strings are not permitted"
-msgstr "'%s' 不允许空白字符串"
-
-#, python-format
-msgid "'%s' cannot be converted to boolean"
-msgstr "无法将“%s”转换为布尔值"
-
-#, python-format
-msgid "'%s' contains whitespace"
-msgstr "'%s' 包含空格"
-
-#, python-format
-msgid "'%s' is not a dictionary"
-msgstr "“%s”不是字典"
-
-#, python-format
-msgid "'%s' is not a list"
-msgstr "“%s”不是列表"
-
-#, python-format
-msgid "'%s' is not a valid IP address"
-msgstr "“%s”是无效 IP 地址"
-
-#, python-format
-msgid "'%s' is not a valid IP subnet"
-msgstr "“%s”是无效 IP 子网"
-
-#, python-format
-msgid "'%s' is not a valid MAC address"
-msgstr "“%s”是无效 MAC 地址"
-
-#, python-format
-msgid "'%s' is not a valid UUID"
-msgstr "“%s”是无效 UUID"
-
-#, python-format
-msgid "'%s' is not a valid boolean value"
-msgstr "'%s' 不是一个有效的布尔值"
-
-#, python-format
-msgid "'%s' is not a valid input"
-msgstr "“%s”不是有效的输入"
-
-#, python-format
-msgid "'%s' is not a valid string"
-msgstr "“%s”是无效字符串"
-
-#, python-format
-msgid "'%s' is not an integer"
-msgstr "“%s”不是整数"
-
-#, python-format
-msgid "'%s' is not an integer or uuid"
-msgstr "“%s”不是整数或 uuid"
-
-#, python-format
-msgid "'%s' is not of the form <key>=[value]"
-msgstr "“%s”没有采用格式 <键>=[值]"
-
-#, python-format
-msgid "'%s' should be non-negative"
-msgstr "“%s”应该为非负"
-
-msgid "0 is not allowed as CIDR prefix length"
-msgstr "0不允许作为CIDR前缀长度"
-
-msgid "A cidr must be specified in the absence of a subnet pool"
-msgstr "在缺少子网池的情况下,必须指定 cidr"
-
-msgid ""
-"A list of mappings of physical networks to MTU values. The format of the "
-"mapping is <physnet>:<mtu val>. This mapping allows specifying a physical "
-"network MTU value that differs from the default segment_mtu value."
-msgstr ""
-"物理网络至 MTU 值的映射的列表。映射的格式为 <physnet>:<mtu val>。此映射允许指"
-"定不同于缺省 segment_mtu 值的物理网络 MTU 值。"
-
-msgid "A metering driver must be specified"
-msgstr "必须指定测量驱动程序"
-
-msgid "API for retrieving service providers for Neutron advanced services"
-msgstr "用于为 Neutron 高级服务检索服务提供程序的 API"
-
-msgid "Access to this resource was denied."
-msgstr "访问该资源被拒绝。"
-
-msgid "Action to be executed when a child process dies"
-msgstr "当子进程终止时要执行的操作"
-
-msgid "Adds external network attribute to network resource."
-msgstr "请对网络资源添加外部网络属性。"
-
-msgid "Adds test attributes to core resources."
-msgstr "将测试属性添加至核心资源。"
-
-#, python-format
-msgid "Agent %(id)s could not be found"
-msgstr "找不到代理 %(id)s"
-
-#, python-format
-msgid "Agent %(id)s is not a L3 Agent or has been disabled"
-msgstr "代理 %(id)s 不是 L3 代理或已禁用"
-
-#, python-format
-msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled"
-msgstr "代理 %(id)s 是无效 DHCP 代理或已禁用"
-
-#, python-format
-msgid "Agent updated: %(payload)s"
-msgstr "进程更新: %(payload)s"
-
-#, python-format
-msgid ""
-"Agent with agent_type=%(agent_type)s and host=%(host)s could not be found"
-msgstr "找不到符合以下条件的代理:agent_type=%(agent_type)s 且 host=%(host)s"
-
-msgid "Allow auto scheduling networks to DHCP agent."
-msgstr "允许自动对 DHCP 代理调度网络。"
-
-msgid "Allow auto scheduling of routers to L3 agent."
-msgstr "允许自动对 L3 代理调度路由器。"
-
-msgid "Allow running metadata proxy."
-msgstr "允许运行 metadata代理"
-
-msgid "Allow sending resource operation notification to DHCP agent"
-msgstr "允许将资源操作通知发送至 DHCP 代理"
-
-msgid "Allow the usage of the bulk API"
-msgstr "允许使用成批 API"
-
-msgid "Allow the usage of the pagination"
-msgstr "允许使用分页"
-
-msgid "Allow the usage of the sorting"
-msgstr "允许使用排序"
-
-msgid "Allow to perform insecure SSL (https) requests to nova metadata"
-msgstr "允许对 nova 元数据执行非安全 SSL (HTTPS) 请求"
-
-msgid "AllowedAddressPair must contain ip_address"
-msgstr "AllowedAddressPair 必须包含 ip_address"
-
-msgid "An interface driver must be specified"
-msgstr "必须指定接口驱动程序"
-
-msgid ""
-"An ordered list of networking mechanism driver entrypoints to be loaded from "
-"the neutron.ml2.mechanism_drivers namespace."
-msgstr ""
-"要从 neutron.ml2.mechanism_drivers 名称空间装入的联网机制驱动程序入口点的已排"
-"序列表。"
-
-msgid "An unknown error has occurred. Please try your request again."
-msgstr "发生未知错误。请再次尝试您的请求。"
-
-msgid "An unknown exception occurred."
-msgstr "发生未知异常。"
-
-#, python-format
-msgid "Attribute '%s' not allowed in POST"
-msgstr "在 POST 中,不允许属性“%s”"
-
-msgid "Automatically remove networks from offline DHCP agents."
-msgstr "自动从脱机 DHCP 代理移除网络。"
-
-msgid ""
-"Automatically reschedule routers from offline L3 agents to online L3 agents."
-msgstr "将路由器从脱机 L3 代理自动重新安排至联机 L3 代理程序。"
-
-msgid "Available commands"
-msgstr "可用的命令"
-
-msgid "Backend does not support VLAN Transparency."
-msgstr "后端不支持 VLAN 透明。"
-
-#, python-format
-msgid ""
-"Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, "
-"%(mac)s:"
-msgstr ""
-"以下前缀或 mac 的格式不正确,无法通过 EUI-64 生成 IPv6 地址:%(prefix)s 和 "
-"%(mac)s:"
-
-#, python-format
-msgid "Bad prefix type for generate IPv6 address by EUI-64: %s"
-msgstr "前缀类型不正确,无法通过 EUI-64 生成 IPv6 地址:%s"
-
-#, python-format
-msgid "Base MAC: %s"
-msgstr "基本 MAC:%s"
-
-#, python-format
-msgid "Bridge %(bridge)s does not exist."
-msgstr "网桥 %(bridge)s 不存在。"
-
-msgid "Bulk operation not supported"
-msgstr "成批操作不受支持"
-
-msgid "CIDR to monitor"
-msgstr "要监视的 CIDR"
-
-#, python-format
-msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip"
-msgstr "无法将浮动 IP 添加至子网 %s(不具有任何 gateway_ip)上的端口"
-
-msgid "Cannot allocate requested subnet from the available set of prefixes"
-msgstr "无法从可用的一组前缀分配所请求的子网"
-
-#, python-format
-msgid ""
-"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port "
-"%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a "
-"floating IP on external network %(net_id)s."
-msgstr ""
-"无法使浮动 IP %(floating_ip_address)s (%(fip_id)s) 与使用固定 IP "
-"%(fixed_ip)s 的端口 %(port_id)s 关联,因为该固定 IP 已具有外部网络 "
-"%(net_id)s 上的浮动 IP。"
-
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to Port %s, since that port is owned "
-"by a different tenant."
-msgstr "无法创建浮动 IP 并将它绑定至端口 %s,因为该端口由另一租户拥有。"
-
-msgid "Cannot create resource for another tenant"
-msgstr "无法为另一租户创建资源"
-
-msgid "Cannot disable enable_dhcp with ipv6 attributes set"
-msgstr "在设置了 ipv6 属性的情况下,无法禁用 enable_dhcp"
-
-#, python-format
-msgid ""
-"Cannot have multiple router ports with the same network id if both contain "
-"IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s"
-msgstr ""
-"不得存在多个具有相同网络标识的路由器端口(如果它们都包含 IPv6 子网)。现有端"
-"口 %(p)s 具有 IPv6 子网和网络标识 %(nid)s"
-
-#, python-format
-msgid ""
-"Cannot host %(router_type)s router %(router_id)s on %(agent_mode)s L3 agent "
-"%(agent_id)s."
-msgstr ""
-"无法在 %(agent_mode)s L3 代理 %(agent_id)s 上主管 %(router_type)s 路由器 "
-"%(router_id)s。"
-
-msgid "Cannot match priority on flow deletion or modification"
-msgstr "无法匹配删除或修改流时的优先级"
-
-msgid "Cannot specify both subnet-id and port-id"
-msgstr "无法同时指定 subnet-id 和 port-id"
-
-msgid "Cannot understand JSON"
-msgstr "无法理解 JSON"
-
-#, python-format
-msgid "Cannot update read-only attribute %s"
-msgstr "无法更新只读属性 %s"
-
-msgid "Certificate Authority public key (CA cert) file for ssl"
-msgstr "用于 SSL 的认证中心公用密钥(CA 证书)文件"
-
-msgid "Check for ARP responder support"
-msgstr "检查 ARP 响应程序支持"
-
-msgid "Check for OVS vxlan support"
-msgstr "检查OVS vxlan支持"
-
-msgid "Check for VF management support"
-msgstr "检查 VF 管理支持"
-
-msgid "Check for iproute2 vxlan support"
-msgstr "检查 iproute2 vxlan 支持"
-
-msgid "Check for nova notification support"
-msgstr "检查 nova 通知支持"
-
-msgid "Check for patch port support"
-msgstr "检查补丁端口支持"
-
-msgid "Check minimal dnsmasq version"
-msgstr "检查最小 dnsmasq 版本"
-
-msgid "Check netns permission settings"
-msgstr "检查 netns 许可权设置"
-
-msgid "Check ovsdb native interface support"
-msgstr "检查 ovsdb 本机接口支持"
-
-#, python-format
-msgid ""
-"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of "
-"subnet %(sub_id)s"
-msgstr ""
-"子网 %(subnet_id)s 的 cidr %(subnet_cidr)s 与子网 %(sub_id)s 的 cidr "
-"%(cidr)s 重叠"
-
-msgid "Client certificate for nova metadata api server."
-msgstr "nova 元数据 API 服务器的客户机证书。"
-
-msgid ""
-"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE "
-"tunnel IDs that are available for tenant network allocation"
-msgstr ""
-"可用于租户网络分配的 GRE 隧道标识的 <tun_min>:<tun_max> 元组枚举范围的逗号分"
-"隔列表"
-
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"VXLAN VNI IDs that are available for tenant network allocation"
-msgstr ""
-"可用于租户网络分配的 VXLAN VNI 标识的 <vni_min>:<vni_max> 元组枚举范围的逗号"
-"分隔列表"
-
-msgid ""
-"Comma-separated list of the DNS servers which will be used as forwarders."
-msgstr "将用作转发器的 DNS 服务器的逗号分隔列表。"
-
-msgid "Command to execute"
-msgstr "要执行的命令"
-
-msgid "Config file for interface driver (You may also use l3_agent.ini)"
-msgstr "用于接口驱动程序的配置文件(还可使用 l3_agent.ini)"
-
-#, python-format
-msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s"
-msgstr "CIDR %(cidr)s 具有冲突值 ethertype %(ethertype)s "
-
-msgid ""
-"Controls whether the neutron security group API is enabled in the server. It "
-"should be false when using no security groups or using the nova security "
-"group API."
-msgstr ""
-"控制是否在服务器中启用了 neutron 安全组 API。未使用安全组或使用 nova安全组 "
-"API 时,它应该为 false。"
-
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds"
-msgstr "在尝试%(time)d 秒之后不能绑定 %(host)s:%(port)s "
-
-msgid "Could not deserialize data"
-msgstr "未能对数据进行反序列化"
-
-#, python-format
-msgid "Creation failed. %(dev_name)s already exists."
-msgstr "创建失败。%(dev_name)s 已存在。"
-
-#, python-format
-msgid ""
-"Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable "
-"to update."
-msgstr "当前网关 IP %(ip_address)s 已由端口 %(port_id)s 使用。无法更新。"
-
-msgid "Currently distributed HA routers are not supported."
-msgstr "当前,分布式 HA 路由器不受支持。"
-
-msgid ""
-"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite "
-"lease times."
-msgstr "DHCP 租赁持续时间(以秒计)。使用 -1 告诉 dnsmasq 使用无限租赁时间。"
-
-msgid "Default driver to use for quota checks"
-msgstr "存在要用于配额检查的缺省驱动程序"
-
-msgid ""
-"Default number of resource allowed per tenant. A negative value means "
-"unlimited."
-msgstr "每个租户允许的缺省资源数。负值表示无限。"
-
-msgid "Default security group"
-msgstr "缺省安全组"
-
-msgid "Default security group already exists."
-msgstr "缺省安全组已存在。"
-
-msgid ""
-"Defines providers for advanced services using the format: <service_type>:"
-"<name>:<driver>[:default]"
-msgstr ""
-"会使用以下格式为高级服务定义提供程序:<service_type>:<name>:<driver>[:"
-"default]"
-
-msgid ""
-"Delay within which agent is expected to update existing ports whent it "
-"restarts"
-msgstr "延迟时间,当代理重新启动时,在该段时间内,代理应该更新现有端口"
-
-msgid "Delete the namespace by removing all devices."
-msgstr "请通过除去所有设备来删除名称空间。"
-
-#, python-format
-msgid "Deleting port %s"
-msgstr "正在删除端口 %s"
-
-#, python-format
-msgid "Device %(dev_name)s in mapping: %(mapping)s not unique"
-msgstr "映射 %(mapping)s 中的设备 %(dev_name)s 并非唯一"
-
-msgid "Device has no virtual functions"
-msgstr "设备没有虚拟功能"
-
-#, python-format
-msgid "Device name %(dev_name)s is missing from physical_device_mappings"
-msgstr "physical_device_mappings 中缺少设备名称 %(dev_name)s"
-
-msgid "Device not found"
-msgstr "找不到设备"
-
-#, python-format
-msgid ""
-"Distributed Virtual Router Mac Address for host %(host)s does not exist."
-msgstr "主机 %(host)s 的分布式虚拟路由器 MAC 地址不存在。"
-
-msgid "Domain to use for building the hostnames"
-msgstr "要用于构建主机名的域"
-
-msgid "Downgrade no longer supported"
-msgstr "降级不再支持"
-
-#, python-format
-msgid "Driver %s is not unique across providers"
-msgstr "驱动程序 %s 在提供程序中不唯一"
-
-msgid "Driver for security groups firewall in the L2 agent"
-msgstr "L2 代理程序中的安全组防火墙的驱动程序"
-
-msgid "Driver to use for scheduling network to DHCP agent"
-msgstr "要用于对 DHCP 代理调度网络的驱动程序"
-
-msgid "Driver to use for scheduling router to a default L3 agent"
-msgstr "要用于对缺省 L3 代理调度路由器的驱动程序"
-
-#, python-format
-msgid "Duplicate IP address '%s'"
-msgstr "IP 地址“%s”重复"
-
-msgid "Duplicate Metering Rule in POST."
-msgstr "POST 中的测量规则重复。"
-
-msgid "Duplicate Security Group Rule in POST."
-msgstr "POST 中的安全组规则重复。"
-
-#, python-format
-msgid "Duplicate hostroute '%s'"
-msgstr "主机路由“%s”重复"
-
-#, python-format
-msgid "Duplicate items in the list: '%s'"
-msgstr "列表“%s”中的项重复"
-
-#, python-format
-msgid "Duplicate nameserver '%s'"
-msgstr "名称服务器“%s”重复"
-
-msgid "Duplicate segment entry in request."
-msgstr "请求中的段条目重复。"
-
-#, python-format
-msgid "ERROR: %s"
-msgstr "错误:%s"
-
-msgid ""
-"ERROR: Unable to find configuration file via the default search paths (~/."
-"neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!"
-msgstr ""
-"错误:无法通过缺省搜索路径(~/.neutron/、~/、/etc/neutron/ 和 /etc/)以及“--"
-"config-file”选项找到配置文件!"
-
-msgid ""
-"Either one of parameter network_id or router_id must be passed to _get_ports "
-"method."
-msgstr "参数 network_id 或 router_id 的其中之一必须传递至_get_ports 方法。"
-
-msgid "Either subnet_id or port_id must be specified"
-msgstr "必须指定 subnet_id 或 port_id"
-
-msgid "Empty physical network name."
-msgstr "空的物理网络名。"
-
-msgid "Enable FWaaS"
-msgstr "请启用 FWaaS"
-
-msgid "Enable HA mode for virtual routers."
-msgstr "为虚拟路由器启用HA模式。"
-
-msgid "Enable SSL on the API server"
-msgstr "在API 服务器上打开SSL"
-
-msgid ""
-"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 "
-"plugin using linuxbridge mechanism driver"
-msgstr ""
-"请在代理上启用 VXLAN。可在通过使用 linuxbridge 机制驱动程序由 ml2 插件管理代"
-"理时启用"
-
-msgid ""
-"Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 "
-"l2population driver. Allows the switch (when supporting an overlay) to "
-"respond to an ARP request locally without performing a costly ARP broadcast "
-"into the overlay."
-msgstr ""
-"启用本地 ARP 响应程序(如果它受支持)。需要 OVS 2.1 和 ML2 l2population 驱动"
-"程序。允许交换机(支持 Overlay 时)在本地响应ARP 请求而不必执行成本高昂的 ARP"
-"广播到 Overlay 中。"
-
-msgid ""
-"Enable services on an agent with admin_state_up False. If this option is "
-"False, when admin_state_up of an agent is turned False, services on it will "
-"be disabled. Agents with admin_state_up False are not selected for automatic "
-"scheduling regardless of this option. But manual scheduling to such agents "
-"is available if this option is True."
-msgstr ""
-"在 admin_state_up 为 False 的代理上启用服务。如果此选项为 False,那么当代理"
-"的 admin_state_up 变为 False 时,将禁用该代理上的服务。无论此选项如何,都不会"
-"选择 admin_state_up 为 False 的代理进行自动调度。但是,如果此选项为 True,那"
-"么可以手动调度这样的代理。"
-
-msgid ""
-"Enable/Disable log watch by metadata proxy. It should be disabled when "
-"metadata_proxy_user/group is not allowed to read/write its log file and "
-"copytruncate logrotate option must be used if logrotate is enabled on "
-"metadata proxy log files. Option default value is deduced from "
-"metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent "
-"effective user id/name."
-msgstr ""
-"由元数据代理启用/禁用日志监控。当不允许 metadata_proxy_user/group 读/写其日志"
-"文件时,应当禁用日志监控,如果对元数据代理日志文件启用了 logrotate,那么必须"
-"使用 copytruncate logrotate 选项。从 metadata_proxy_user 推论出选项缺省值:如"
-"果 metadata_proxy_user 是代理有效用户标识/名称,那么启用了监控日志。"
-
-msgid "Encountered an empty component."
-msgstr "遇到空的组件。"
-
-msgid "End of VLAN range is less than start of VLAN range"
-msgstr "VLAN范围结束值比开始值小"
-
-msgid "End of tunnel range is less than start of tunnel range"
-msgstr "隧道范围的结束小于隧道范围的起始"
-
-#, python-format
-msgid "Error importing FWaaS device driver: %s"
-msgstr "导入 FWaaS 设备驱动程序时出错:%s"
-
-#, python-format
-msgid "Error parsing dns address %s"
-msgstr "解析 dns 地址 %s 时出错"
-
-#, python-format
-msgid "Error while reading %s"
-msgstr "读取 %s 时出错"
-
-msgid "Existing prefixes must be a subset of the new prefixes"
-msgstr "现有前缀必须是新前缀的子集"
-
-msgid ""
-"Extension to use alongside ml2 plugin's l2population mechanism driver. It "
-"enables the plugin to populate VXLAN forwarding table."
-msgstr ""
-"要与 ml2 插件的 l2population 机制驱动程序一起使用的扩展。它使该插件能够填充 "
-"VXLAN 转发表。"
-
-#, python-format
-msgid "Extension with alias %s does not exist"
-msgstr "具有别名 %s 的扩展不存在"
-
-#, python-format
-msgid "External IP %s is the same as the gateway IP"
-msgstr "外部 IP %s 和网关IP相同"
-
-#, python-format
-msgid ""
-"External network %(external_network_id)s is not reachable from subnet "
-"%(subnet_id)s.  Therefore, cannot associate Port %(port_id)s with a Floating "
-"IP."
-msgstr ""
-"无法从子网 %(subnet_id)s 访问外部网络 %(external_network_id)s。因此,无法使端"
-"口 %(port_id)s 与浮动 IP 关联。"
-
-#, python-format
-msgid ""
-"External network %(net_id)s cannot be updated to be made non-external, since "
-"it has existing gateway ports"
-msgstr "无法将外部网络 %(net_id)s 更新为非外部网络,因为它包含现有的网关端口"
-
-#, python-format
-msgid "ExtraDhcpOpt %(id)s could not be found"
-msgstr "找不到 ExtraDhcpOpt %(id)s"
-
-msgid ""
-"FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-"
-"agent."
-msgstr "FWaaS 插件是在服务器端配置的,但 FWaaS 在L3 代理中被禁用。"
-
-#, python-format
-msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found."
-msgstr "重新安排路由器 %(router_id)s 失败:找不到合格 L3 代理。"
-
-#, python-format
-msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s."
-msgstr "将路由器 %(router_id)s 调度到 L3 代理 %(agent_id)s 失败。"
-
-#, python-format
-msgid ""
-"Failed to allocate a VRID in the network %(network_id)s for the router "
-"%(router_id)s after %(max_tries)s tries."
-msgstr ""
-"在 %(max_tries)s 次尝试之后,未能在网络 %(network_id)s 中为路由器 "
-"%(router_id)s 分配 VRID。"
-
-#, python-format
-msgid ""
-"Failed to create port on network %(network_id)s, because fixed_ips included "
-"invalid subnet %(subnet_id)s"
-msgstr ""
-"未能在网络 %(network_id)s 上创建端口,因为 fixed_ips 包括了无效子网 "
-"%(subnet_id)s"
-
-#, python-format
-msgid "Failed to parse request. Parameter '%s' not specified"
-msgstr "未能解析请求。未指定参数“%s”"
-
-#, python-format
-msgid "Failed to parse request. Required attribute '%s' not specified"
-msgstr "未能解析请求。未指定必需属性“%s”"
-
-msgid "Failed to remove supplemental groups"
-msgstr "未能移除补充组"
-
-#, python-format
-msgid "Failed to set gid %s"
-msgstr "设置gid %s 失败"
-
-#, python-format
-msgid "Failed to set uid %s"
-msgstr "设置uid %s 失败"
-
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr "未能将 %(type)s 隧道端口设置为 %(ip)s"
-
-#, python-format
-msgid "Floating IP %(floatingip_id)s could not be found"
-msgstr "找不到浮动 IP %(floatingip_id)s"
-
-msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max"
-msgstr "对于 TCP/UDP 协议,port_range_min 必须小于等于 port_range_max"
-
-msgid "Force ip_lib calls to use the root helper"
-msgstr "强制ip_lib呼叫使用root helper"
-
-#, python-format
-msgid ""
-"Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet "
-"%(subnet_cidr)s."
-msgstr "对于子网 %(subnet_cidr)s,找到重叠的分配池:%(pool_1)s %(pool_2)s"
-
-#, python-format
-msgid ""
-"Gateway cannot be updated for router %(router_id)s, since a gateway to "
-"external network %(net_id)s is required by one or more floating IPs."
-msgstr ""
-"无法为路由器 %(router_id)s 更新网关,因为一个或多个浮动 IP 需要指向外部网络 "
-"%(net_id)s 的网关。"
-
-msgid "Gateway is not valid on subnet"
-msgstr "网关在子网上无效"
-
-msgid "Group (gid or name) running metadata proxy after its initialization"
-msgstr "在元数据代理的初始化之后,运行该代理的组(gid 或名称)"
-
-msgid ""
-"Group (gid or name) running metadata proxy after its initialization (if "
-"empty: agent effective group)."
-msgstr ""
-"在元数据代理的初始化之后,运行该代理的组(gid 或名称),(如果此组为空,那么"
-"这是代理有效组)。"
-
-msgid "Group (gid or name) running this process after its initialization"
-msgstr "在此进程的初始化之后,运行此进程的组(gid 或名称)"
-
-msgid "How many times Neutron will retry MAC generation"
-msgstr "Neutron 将重试 MAC 生成的次数"
-
-#, python-format
-msgid ""
-"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-"
-"min) is missing."
-msgstr ""
-"已提供 ICMP 代码 (port-range-max) %(value)s,但缺少 ICMP 类型 (port-range-"
-"min)。"
-
-msgid "ID of network"
-msgstr "网络的标识"
-
-msgid "ID of network to probe"
-msgstr "要探测的网络的标识"
-
-msgid "ID of probe port to delete"
-msgstr "要删除的探测器端口的标识"
-
-msgid "ID of probe port to execute command"
-msgstr "用于执行命令的探测器端口的标识"
-
-msgid "ID of the router"
-msgstr "路由器ID"
-
-#, python-format
-msgid ""
-"IP address %(ip_address)s is not a valid IP for any of the subnets on the "
-"specified network."
-msgstr "对于所指定网络上的任何子网,IP 地址 %(ip_address)s 不是有效 IP。"
-
-#, python-format
-msgid "IP address %(ip_address)s is not a valid IP for the specified subnet."
-msgstr "对于所指定子网,IP 地址 %(ip_address)s 是无效 IP。"
-
-msgid "IP address used by Nova metadata server."
-msgstr "Nova 元数据服务器使用的 IP 地址。"
-
-msgid "IP allocation requires subnet_id or ip_address"
-msgstr "IP 分配需要 subnet_id 或 ip_address"
-
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables rules:\n"
-"%s"
-msgstr ""
-"IPTablesManager.apply 无法应用以下 iptables规则集:\n"
-"%s"
-
-#, python-format
-msgid ""
-"IPv6 address %(address)s can not be directly assigned to a port on subnet "
-"%(id)s since the subnet is configured for automatic addresses"
-msgstr ""
-"IPv6 地址 %(address)s 无法直接分配给子网 %(id)s 上的端口,因为该子网是针对自"
-"动地址配置的"
-
-#, python-format
-msgid ""
-"IPv6 subnet %s configured to receive RAs from an external router cannot be "
-"added to Neutron Router."
-msgstr ""
-"无法将已配置为从外部路由器接收 RA 的 IPv6 子网 %s 添加至 Neutron 路由器。"
-
-msgid ""
-"If True, effort is made to advertise MTU settings to VMs via network methods "
-"(DHCP and RA MTU options) when the network's preferred MTU is known."
-msgstr ""
-"如果为 True,那么当网络的首选 MTU 已知时,会进行工作,以通过网络方法(DHCP "
-"和 RA MTU 选项)向 VM 通告 MTU 设置。"
-
-msgid ""
-"If True, then allow plugins that support it to create VLAN transparent "
-"networks."
-msgstr "如果为 True,那么允许那些支持它的插件创建 VLAN 透明网络。"
-
-msgid "Illegal IP version number"
-msgstr "IP 版本号不合法"
-
-#, python-format
-msgid "Insufficient prefix space to allocate subnet size /%s"
-msgstr "没有足够的前缀空间来分配子网大小 /%s"
-
-msgid "Insufficient rights for removing default security group."
-msgstr "权利不足,无法移除缺省安全组。"
-
-msgid "Interface to monitor"
-msgstr "要监视的接口"
-
-msgid ""
-"Interval between checks of child process liveness (seconds), use 0 to disable"
-msgstr "子进程活性检查之间的时间间隔(秒),使用 0 来进行禁用"
-
-msgid "Interval between two metering measures"
-msgstr "在采取两种测量措施之间的时间间隔"
-
-msgid "Interval between two metering reports"
-msgstr "在生成两个测量报告之间的时间间隔"
-
-#, python-format
-msgid ""
-"Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address "
-"format, which requires the prefix to be /64."
-msgstr ""
-"CIDR %s 对于 IPv6 地址方式无效。OpenStack 使用 EUI-64 地址格式,该格式要求前"
-"缀为 /64。"
-
-#, python-format
-msgid "Invalid Device %(dev_name)s: %(reason)s"
-msgstr "无效设备 %(dev_name)s:%(reason)s"
-
-#, python-format
-msgid ""
-"Invalid authentication type: %(auth_type)s, valid types are: "
-"%(valid_auth_types)s"
-msgstr "认证类型 %(auth_type)s 无效,以下是有效类型:%(valid_auth_types)s"
-
-#, python-format
-msgid "Invalid data format for IP pool: '%s'"
-msgstr "IP 池的数据格式无效:“%s”"
-
-#, python-format
-msgid "Invalid data format for extra-dhcp-opt: %(data)s"
-msgstr "extra-dhcp-opt 的数据格式无效:%(data)s"
-
-#, python-format
-msgid "Invalid data format for fixed IP: '%s'"
-msgstr "固定 IP 的数据格式无效:“%s”"
-
-#, python-format
-msgid "Invalid data format for hostroute: '%s'"
-msgstr "主机路由“%s”的数据格式无效"
-
-#, python-format
-msgid "Invalid data format for nameserver: '%s'"
-msgstr "名称服务器“%s”的数据格式无效"
-
-#, python-format
-msgid "Invalid format for routes: %(routes)s, %(reason)s"
-msgstr "路由 %(routes)s 的格式无效,%(reason)s"
-
-#, python-format
-msgid "Invalid format: %s"
-msgstr "格式无效:%s"
-
-#, python-format
-msgid "Invalid input for %(attr)s. Reason: %(reason)s."
-msgstr "输入对于 %(attr)s 无效。原因:%(reason)s。"
-
-#, python-format
-msgid "Invalid input for operation: %(error_message)s."
-msgstr "针对操作的输入无效:%(error_message)s。"
-
-#, python-format
-msgid ""
-"Invalid input. '%(target_dict)s' must be a dictionary with keys: "
-"%(expected_keys)s"
-msgstr "输入无效。“%(target_dict)s”必须是具有以下键的字典:%(expected_keys)s"
-
-#, python-format
-msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s"
-msgstr "实例状态 %(state)s 无效,以下是有效状态:%(valid_states)s"
-
-#, python-format
-msgid "Invalid mapping: '%s'"
-msgstr "映射无效:“%s”"
-
-#, python-format
-msgid "Invalid pci slot %(pci_slot)s"
-msgstr "无效 PCI 插槽 %(pci_slot)s"
-
-#, python-format
-msgid "Invalid provider format. Last part should be 'default' or empty: %s"
-msgstr "提供程序格式无效。最后部分应该为“default”或空:%s"
-
-#, python-format
-msgid "Invalid route: %s"
-msgstr "路由无效:%s"
-
-msgid "Invalid service provider format"
-msgstr "服务提供程序格式无效"
-
-#, python-format
-msgid ""
-"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255."
-msgstr "ICMP %(field)s (%(attr)s) 的值 %(value)s 无效。它必须为 0 到 255。"
-
-#, python-format
-msgid "Invalid value for port %(port)s"
-msgstr "端口 %(port)s 的值无效"
-
-msgid "Keepalived didn't respawn"
-msgstr "保持活动的未重新衍生"
-
-#, python-format
-msgid "Key %(key)s in mapping: '%(mapping)s' not unique"
-msgstr "映射“%(mapping)s”中的键 %(key)s 不唯一"
-
-#, python-format
-msgid "Limit must be an integer 0 or greater and not '%d'"
-msgstr "限制必须是整数 0 或更大整数,而不是“%d”"
-
-msgid "Limit number of leases to prevent a denial-of-service."
-msgstr "请对租赁数进行限制,以防止拒绝服务。"
-
-msgid ""
-"List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> "
-"specifying physical_network names usable for VLAN provider and tenant "
-"networks, as well as ranges of VLAN tags on each available for allocation to "
-"tenant networks."
-msgstr ""
-"为VLAN提供商和租户网络提供<physical_network>:<vlan_min>:<vlan_max> 或 "
-"<physical_network>专属物理网络名称,从事实现每个租户网络可以分配到相应的VLAN"
-"标识。"
-
-msgid ""
-"List of network type driver entrypoints to be loaded from the neutron.ml2."
-"type_drivers namespace."
-msgstr ""
-"要从 neutron.ml2.type_drivers 名称空间装入的网络类型驱动程序入口点的列表。"
-
-msgid "Local IP address of the VXLAN endpoints."
-msgstr "VXLAN 端点的本地 IP 地址。"
-
-msgid "Local IP address of tunnel endpoint."
-msgstr "隧道端点的本地 IP 地址。"
-
-msgid "Location for Metadata Proxy UNIX domain socket."
-msgstr "元数据代理 UNIX 域套接字的位置。"
-
-msgid "Location of Metadata Proxy UNIX domain socket"
-msgstr "元数据代理 UNIX 域套接字的位置"
-
-msgid "Location of pid file of this process."
-msgstr "此进程的 pid 文件的位置。"
-
-msgid "Location to store DHCP server config files"
-msgstr "用于存储 DHCP 服务器配置文件的位置"
-
-msgid "Location to store IPv6 RA config files"
-msgstr "用于存储 IPv6 RA 配置文件的位置"
-
-msgid "Location to store child pid files"
-msgstr "用于存储子 pid 文件的位置"
-
-msgid "Location to store keepalived/conntrackd config files"
-msgstr "用于存储保持活动的/连接跟踪的配置文件的位置"
-
-msgid "MTU setting for device."
-msgstr "存在设备的 MTU 设置。"
-
-msgid "MTU size of veth interfaces"
-msgstr "veth 接口的 MTU 大小"
-
-msgid "Make the l2 agent run in DVR mode."
-msgstr "使 l2 代理在 DVR 方式下运行。"
-
-msgid "Malformed request body"
-msgstr "请求主体的格式不正确"
-
-msgid "Maximum number of allowed address pairs"
-msgstr "允许的最大地址对数"
-
-msgid "Maximum number of host routes per subnet"
-msgstr "每个子网的最大主机路由数"
-
-msgid "Metering driver"
-msgstr "测量驱动程序"
-
-#, python-format
-msgid "Metering label %(label_id)s does not exist"
-msgstr "测量标签\t%(label_id)s 不存在"
-
-#, python-format
-msgid "Metering label rule %(rule_id)s does not exist"
-msgstr "测量标签规则 %(rule_id)s 不存在"
-
-#, python-format
-msgid ""
-"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps "
-"another"
-msgstr ""
-"带有 remote_ip_prefix %(remote_ip_prefix)s 的测量标签规则与另一测量标签规则重"
-"叠"
-
-msgid "Minimize polling by monitoring ovsdb for interface changes."
-msgstr "请通过监视 ovsdb 以获取接口更改来最大程度地减少轮询。"
-
-#, python-format
-msgid "Missing key in mapping: '%s'"
-msgstr "映射中缺少键:“%s”"
-
-#, python-format
-msgid "Missing value in mapping: '%s'"
-msgstr "映射中缺少值:“%s”"
-
-#, python-format
-msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found"
-msgstr "找到多个符合以下条件的代理:agent_type=%(agent_type)s 且 host=%(host)s"
-
-#, python-format
-msgid "Multiple default providers for service %s"
-msgstr "对于服务 %s,存在多个缺省提供程序"
-
-#, python-format
-msgid "Multiple plugins for service %s were configured"
-msgstr "已配置多个用于服务 %s 的插件"
-
-#, python-format
-msgid "Multiple providers specified for service %s"
-msgstr "对于服务 %s,已指定多个提供程序"
-
-msgid "Multiple tenant_ids in bulk security group rule create not allowed"
-msgstr "不允许在成批安全组规则创建中使用多个 tenant_id"
-
-msgid "Must also specifiy protocol if port range is given."
-msgstr "还必须指定协议(如果给定了端口范围)。"
-
-msgid "Must specify one or more actions on flow addition or modification"
-msgstr "必须在添加或删除流时指定一个或多个操作"
-
-#, python-format
-msgid ""
-"Name '%s' must be 1-63 characters long, each of which can only be "
-"alphanumeric or a hyphen."
-msgstr ""
-"名称“%s”的长度必须是 1 至 63 个字符,其中每个字符只能是字母数字或连字符。"
-
-#, python-format
-msgid "Name '%s' must not start or end with a hyphen."
-msgstr "名称“%s”不能以连字符开头或结尾。"
-
-msgid "Name of Open vSwitch bridge to use"
-msgstr "要使用的已打开 vSwitch 网桥的名称"
-
-msgid ""
-"Name of nova region to use. Useful if keystone manages more than one region."
-msgstr "要使用的 nova 区域的名称。如果 keystone 管理多个区域,那么这很有用。"
-
-msgid "Name of the FWaaS Driver"
-msgstr "FWaaS 驱动程序的名称"
-
-msgid "Namespace of the router"
-msgstr "路由器名字空间"
-
-msgid "Native pagination depend on native sorting"
-msgstr "本机分页依赖于本机排序"
-
-msgid "Negative delta (downgrade) not supported"
-msgstr "不支持为负数的增量修订版(降级)"
-
-msgid "Negative relative revision (downgrade) not supported"
-msgstr "不支持为负数的相关修订版(降级)"
-
-#, python-format
-msgid "Network %s is not a valid external network"
-msgstr "网络 %s 是无效外部网络"
-
-#, python-format
-msgid "Network %s is not an external network"
-msgstr "网络 %s 不是外部网络"
-
-#, python-format
-msgid ""
-"Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges "
-"%(excluded_ranges)s was not found."
-msgstr ""
-"从 IP 范围 %(parent_range)s(排除 IP 范围%(excluded_ranges)s)中找不到大小为 "
-"%(size)s 的网络。"
-
-msgid "Network that will have instance metadata proxied."
-msgstr "将代理实例元数据的网络。"
-
-#, python-format
-msgid "Network type value '%s' not supported"
-msgstr "网络类型值“%s”不受支持"
-
-msgid "Network type value needed by the ML2 plugin"
-msgstr "ML2 插件需要网络类型值"
-
-msgid "Network types supported by the agent (gre and/or vxlan)."
-msgstr "代理支持的网络类型(gre 和/或 vxlan)。"
-
-msgid "Neutron Service Type Management"
-msgstr "Neutron 服务类型管理"
-
-msgid "Neutron core_plugin not configured!"
-msgstr "未配置 Neutron core_plugin!"
-
-msgid "Neutron plugin provider module"
-msgstr "Neutron 插件提供程序模块"
-
-msgid "Neutron quota driver class"
-msgstr "Neutron 配额驱动程序类"
-
-#, python-format
-msgid "No eligible l3 agent associated with external network %s found"
-msgstr "找不到合格的与外部网络 %s 关联的 L3 代理"
-
-#, python-format
-msgid "No more IP addresses available on network %(net_id)s."
-msgstr "在网络 %(net_id)s 上,没有更多 IP 地址可用。"
-
-#, python-format
-msgid ""
-"No more Virtual Router Identifier (VRID) available when creating router "
-"%(router_id)s. The limit of number of HA Routers per tenant is 254."
-msgstr ""
-"当创建路由器 %(router_id)s 时,没有更多虚拟路由器标识 (VRID) 可用。每个租户"
-"的 HA 路由器数的限制为 254。"
-
-#, python-format
-msgid "No providers specified for '%s' service, exiting"
-msgstr "没有为“%s”服务指定任何提供程序,正在退出"
-
-#, python-format
-msgid ""
-"Not allowed to manually assign a %(router_type)s router %(router_id)s from "
-"an existing DVR node to another L3 agent %(agent_id)s."
-msgstr ""
-"不允许手动将 %(router_type)s 路由器 %(router_id)s 从现有 DVR 节点分配给另一 "
-"L3 代理 %(agent_id)s。"
-
-msgid "Not authorized."
-msgstr "未授权。"
-
-#, python-format
-msgid ""
-"Not enough l3 agents available to ensure HA. Minimum required "
-"%(min_agents)s, available %(num_agents)s."
-msgstr ""
-"l3 个代理并非足够可用于确保 HA。需要的最小数目为 %(min_agents)s,可用的数目"
-"为 %(num_agents)s。"
-
-msgid "Number of RPC worker processes for service"
-msgstr "针对服务的RPC执行程序编号。"
-
-msgid "Number of backlog requests to configure the metadata server socket with"
-msgstr "关于配置元数据服务器套接字的储备请求数"
-
-msgid "Number of backlog requests to configure the socket with"
-msgstr "积压许多配置socket的请求"
-
-msgid ""
-"Number of floating IPs allowed per tenant. A negative value means unlimited."
-msgstr "每个租户允许的浮动 IP 数。负值表示无限。"
-
-msgid ""
-"Number of networks allowed per tenant. A negative value means unlimited."
-msgstr "每个租户允许的网络数。负值表示无限。"
-
-msgid "Number of ports allowed per tenant. A negative value means unlimited."
-msgstr "每个租户允许的端口数。负值表示无限。"
-
-msgid "Number of routers allowed per tenant. A negative value means unlimited."
-msgstr "每个租户允许的路由器数。负值表示无限。"
-
-msgid ""
-"Number of seconds between sending events to nova if there are any events to "
-"send."
-msgstr "前后两次将事件发送至 nova 的间隔秒数(如果有事件要发送)。"
-
-msgid "Number of seconds to keep retrying to listen"
-msgstr "若干秒保持重试监听"
-
-msgid ""
-"Number of security groups allowed per tenant. A negative value means "
-"unlimited."
-msgstr "每个租户允许的安全组数。负值表示无限。"
-
-msgid ""
-"Number of security rules allowed per tenant. A negative value means "
-"unlimited."
-msgstr "每个租户允许的安全性规则数。负值表示无限。"
-
-msgid ""
-"Number of separate API worker processes for service. If not specified, the "
-"default is equal to the number of CPUs available for best performance."
-msgstr ""
-"针对服务的不同API执行程序的编号。如果没有指定,默认等于最佳性能下的可用CPU的"
-"个数值"
-
-msgid "Number of subnets allowed per tenant, A negative value means unlimited."
-msgstr "每个租户允许的子网数。负值表示无限。"
-
-msgid "OK"
-msgstr "确定"
-
-msgid "Only admin can view or configure quota"
-msgstr "只有管理员才能查看或配置配额"
-
-msgid "Only admin is authorized to access quotas for another tenant"
-msgstr "只有管理员才有权访问另一租户的配额"
-
-msgid "Only allowed to update rules for one security profile at a time"
-msgstr "一次仅允许为一个安全概要文件更新规则"
-
-msgid "Only remote_ip_prefix or remote_group_id may be provided."
-msgstr "只能提供 remote_ip_prefix 或 remote_group_id。"
-
-#, python-format
-msgid ""
-"Operation %(op)s is not supported for device_owner %(device_owner)s on port "
-"%(port_id)s."
-msgstr ""
-"端口 %(port_id)s 上的 device_owner %(device_owner)s 不支持操作 %(op)s。"
-
-msgid "Override the default dnsmasq settings with this file"
-msgstr "请使用此文件来覆盖缺省 dnsmasq 设置"
-
-msgid "Owner type of the device: network/compute"
-msgstr "设备的所有者类型如下:网络/计算"
-
-msgid "POST requests are not supported on this resource."
-msgstr "POST 请求在此资源上不受支持。"
-
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr "解析 bridge_mappings 失败:%s。"
-
-msgid "Parsing supported pci_vendor_devs failed"
-msgstr "解析受支持的 pci_vendor_devs 失败"
-
-msgid "Path to PID file for this process"
-msgstr "此进程的 PID 文件的路径"
-
-msgid "Path to the router directory"
-msgstr "直连路由器的路径"
-
-msgid "Peer patch port in integration bridge for tunnel bridge."
-msgstr "集成网桥中的同级补丁端口(对于隧道网桥)。"
-
-msgid "Peer patch port in tunnel bridge for integration bridge."
-msgstr "隧道网桥中的同级补丁端口(对于集成网桥)。"
-
-msgid "Ping timeout"
-msgstr "Ping 超时"
-
-msgid "Plugin does not support updating provider attributes"
-msgstr "插件不支持更新提供程序属性"
-
-#, python-format
-msgid "Port %(id)s does not have fixed ip %(address)s"
-msgstr "端口 %(id)s 没有固定 ip %(address)s"
-
-#, python-format
-msgid ""
-"Port %(port_id)s is associated with a different tenant than Floating IP "
-"%(floatingip_id)s and therefore cannot be bound."
-msgstr ""
-"端口 %(port_id)s 和浮动 IP %(floatingip_id)s 不是与同一租户关联,因此找不到该"
-"端口。"
-
-msgid ""
-"Port Security must be enabled in order to have allowed address pairs on a "
-"port."
-msgstr "必须启用端口安全性,以便在端口上具有所允许的地址对。"
-
-msgid "Port does not have port security binding."
-msgstr "端口没有端口安全性绑定。"
-
-msgid ""
-"Port has security group associated. Cannot disable port security or ip "
-"address until security group is removed"
-msgstr "端口已使安全组关联。直到除去安全组,才能禁用端口安全性或 IP 地址"
-
-msgid ""
-"Port security must be enabled and port must have an IP address in order to "
-"use security groups."
-msgstr "必须启用端口安全性,并且端口必须具有 IP 地址,以便使用安全组。"
-
-msgid "Private key of client certificate."
-msgstr "客户机证书的专用密钥。"
-
-#, python-format
-msgid "Probe %s deleted"
-msgstr "已删除探测器 %s"
-
-#, python-format
-msgid "Probe created : %s "
-msgstr "已创建探测器:%s "
-
-msgid "Process is already started"
-msgstr "进程已经启动"
-
-msgid "Process is not running."
-msgstr "进程未运行"
-
-msgid "Protocol to access nova metadata, http or https"
-msgstr "用于访问 nova 元数据的协议(HTTP 或 HTTPS)"
-
-msgid ""
-"Range of seconds to randomly delay when starting the periodic task scheduler "
-"to reduce stampeding. (Disable by setting to 0)"
-msgstr ""
-"当启动定期任务调度程序以减少拥堵时要随机延迟的秒数范围.(通过设置为 0 来禁用)"
-
-msgid "Remote metadata server experienced an internal server error."
-msgstr "远程元数据服务器遇到内部服务器错误。"
-
-msgid ""
-"Representing the resource type whose load is being reported by the agent. "
-"This can be \"networks\", \"subnets\" or \"ports\". When specified (Default "
-"is networks), the server will extract particular load sent as part of its "
-"agent configuration object from the agent report state, which is the number "
-"of resources being consumed, at every report_interval.dhcp_load_type can be "
-"used in combination with network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is "
-"WeightScheduler, dhcp_load_type can be configured to represent the choice "
-"for the resource being balanced. Example: dhcp_load_type=networks"
-msgstr ""
-"表示其负载要由代理报告的资源类型。这可以是“网络”、“子网”或“端口”。如果已指定"
-"(缺省值为“网络”),那么服务器将根据代理报告状态抽取特定负载(作为其代理配置"
-"对象的一部分发送),这是在每个 report_interval 要消耗的资源数。"
-"dhcp_load_type 可与 network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler 一起使用。当 network_scheduler_driver "
-"为 WeightScheduler 时,dhcp_load_type 可配置为表示针对要均衡的资源的选择。示"
-"例:dhcp_load_type=networks"
-
-msgid "Request Failed: internal server error while processing your request."
-msgstr "请求失败:在处理请求时,发生内部服务器错误。"
-
-#, python-format
-msgid ""
-"Request contains duplicate address pair: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-msgstr ""
-"请求包含重复地址对:mac_address %(mac_address)s ip_address %(ip_address)s。"
-
-#, python-format
-msgid ""
-"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps "
-"with another subnet"
-msgstr ""
-"所请求子网(具有 cidr %(cidr)s,对于网络 %(network_id)s)与另一子网重叠"
-
-#, python-format
-msgid ""
-"Resource '%(resource_id)s' is already associated with provider "
-"'%(provider)s' for service type '%(service_type)s'"
-msgstr ""
-"对于服务类型“%(service_type)s”,资源“%(resource_id)s”已经与提供程"
-"序“%(provider)s”关联"
-
-msgid "Resource body required"
-msgstr "需要资源主体"
-
-msgid "Resource not found."
-msgstr "找不到资源。"
-
-msgid "Resources required"
-msgstr "需要资源"
-
-msgid "Root helper daemon application to use when possible."
-msgstr "在可能的情况下,要使用的 Root Helper 守护程序应用程序。"
-
-msgid "Root permissions are required to drop privileges."
-msgstr "删除特权需要 root 用户许可权。"
-
-#, python-format
-msgid "Router %(router_id)s %(reason)s"
-msgstr "路由器 %(router_id)s %(reason)s"
-
-#, python-format
-msgid "Router %(router_id)s could not be found"
-msgstr "找不到路由器 %(router_id)s"
-
-#, python-format
-msgid "Router %(router_id)s does not have an interface with id %(port_id)s"
-msgstr "路由器 %(router_id)s 没有具有标识 %(port_id)s 的接口"
-
-#, python-format
-msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s"
-msgstr "路由器 %(router_id)s 在子网 %(subnet_id)s 上不具有任何接口"
-
-#, python-format
-msgid "Router already has a port on subnet %s"
-msgstr "路由器已在子网 %s 上具有端口"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more floating IPs."
-msgstr ""
-"无法删除路由器 %(router_id)s 上用于子网 %(subnet_id)s 的路由器接口,因为一个"
-"或多个浮动 IP 需要该接口。"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more routes."
-msgstr ""
-"无法删除路由器 %(router_id)s 上用于子网 %(subnet_id)s 的路由器接口,因为一个"
-"或多个路由需要该接口。"
-
-msgid "Router that will have connected instances' metadata proxied."
-msgstr "将代理相连实例元数据的路由器。"
-
-msgid "Run as daemon."
-msgstr "作为守护程序运行。"
-
-msgid ""
-"Seconds between nodes reporting state to server; should be less than "
-"agent_down_time, best if it is half or less than agent_down_time."
-msgstr ""
-"节点向服务器报告状态的间隔秒数;应该小于 agent_down_time,最好小于 "
-"agent_down_time 或是它的一半。"
-
-msgid "Seconds between running periodic tasks"
-msgstr "运行定期任务之间的秒数"
-
-msgid ""
-"Seconds to regard the agent is down; should be at least twice "
-"report_interval, to be sure the agent is down for good."
-msgstr ""
-"认为代理已关闭的秒数;应该至少为 report_interval 的两倍,以确保代理已正常关"
-"闭。"
-
-#, python-format
-msgid "Security group %(id)s does not exist"
-msgstr "安全组 %(id)s 不存在"
-
-#, python-format
-msgid "Security group rule %(id)s does not exist"
-msgstr "安全组规则 %(id)s 不存在"
-
-#, python-format
-msgid "Security group rule already exists. Rule id is %(id)s."
-msgstr "安全组规则已存在。规则标识为 %(id)s。"
-
-msgid "Segments and provider values cannot both be set."
-msgstr "无法同时设置段和提供程序值。"
-
-msgid ""
-"Send notification to nova when port data (fixed_ips/floatingip) changes so "
-"nova can update its cache."
-msgstr ""
-"当端口数据(固定 IP/floatingip)更改时,将通知发送至 nova,以便 nova 可更新其"
-"高速缓存。"
-
-msgid "Send notification to nova when port status changes"
-msgstr "当端口状态更改时,将通知发送至 nova"
-
-msgid ""
-"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the "
-"feature is disabled"
-msgstr "针对 HA 设置,发送此大量免费 ARP,如果小于或等于 0,那么会禁用该功能"
-
-#, python-format
-msgid ""
-"Service provider '%(provider)s' could not be found for service type "
-"%(service_type)s"
-msgstr "对于以下服务类型,找不到服务提供程序“%(provider)s”:%(service_type)s"
-
-#, python-format
-msgid "Service type %(service_type)s does not have a default service provider"
-msgstr "服务类型 %(service_type)s 没有缺省服务提供程序"
-
-msgid ""
-"Set new timeout in seconds for new rpc calls after agent receives SIGTERM. "
-"If value is set to 0, rpc timeout won't be changed"
-msgstr ""
-"在代理接收到 SIGTERM 之后,为新的 RPC 调用设置新超时(以秒计)。如果值设置为 "
-"0,那么 RPC 超时将不更改"
-
-msgid ""
-"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/"
-"VXLAN tunnel."
-msgstr "在承载 GRE/VXLAN 隧道的出局 IP 包上设置或取消设置不分段 (DF) 位。"
-
-#, python-format
-msgid ""
-"Some tenants have more than one security group named 'default': "
-"%(duplicates)s. All duplicate 'default' security groups must be resolved "
-"before upgrading the database."
-msgstr ""
-"一些租户具有多个名为“default”的安全组:%(duplicates)s。在升级数据库之前,必须"
-"解析所有重复“default”安全组。"
-
-msgid ""
-"Specifying 'tenant_id' other than authenticated tenant in request requires "
-"admin privileges"
-msgstr "在请求中指定除了已认证租户之外的“tenant_id”需要管理特权"
-
-msgid "Subnet for router interface must have a gateway IP"
-msgstr "路由器接口的子网必须具有网关 IP"
-
-msgid "Subnet pool has existing allocations"
-msgstr "子网池具有现有分配"
-
-msgid "Subnet used for the l3 HA admin network."
-msgstr "用于 l3 HA 管理网络的子网。"
-
-msgid ""
-"System-wide flag to determine the type of router that tenants can create. "
-"Only admin can override."
-msgstr "系统范围标记,用于确定租户可创建的路由器类型。仅管理员可以覆盖。"
-
-msgid "TCP Port to listen for metadata server requests."
-msgstr "用于侦听元数据服务器请求的 TCP 端口。"
-
-msgid "TCP Port used by Neutron metadata namespace proxy."
-msgstr "TCP 端口已由 Neutron 元数据名称空间代理使用。"
-
-msgid "TCP Port used by Nova metadata server."
-msgstr "Nova 元数据服务器使用的 TCP 端口。"
-
-#, python-format
-msgid "TLD '%s' must not be all numeric"
-msgstr "TLD“%s”不能全部为数字"
-
-msgid "TOS for vxlan interface protocol packets."
-msgstr "用于 vxlan 接口协议包的 TOS。"
-
-msgid "TTL for vxlan interface protocol packets."
-msgstr "用于 vxlan 接口协议包的 TTL。"
-
-#, python-format
-msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network"
-msgstr "不允许租户 %(tenant_id)s 在此网络上创建 %(resource)s"
-
-msgid "Tenant network creation is not enabled."
-msgstr "未启用租户网络创建。"
-
-msgid ""
-"The 'gateway_external_network_id' option must be configured for this agent "
-"as Neutron has more than one external network."
-msgstr ""
-"必须为此代理配置“gateway_external_network_id”选项,因为 Neutron 具有多个外部"
-"网络。"
-
-#, python-format
-msgid ""
-"The HA Network CIDR specified in the configuration file isn't valid; "
-"%(cidr)s."
-msgstr "配置文件中指定的 HA 网络 CIDR 无效;%(cidr)s。"
-
-msgid "The UDP port to use for VXLAN tunnels."
-msgstr "UDP端口用于VXLAN隧道"
-
-msgid "The advertisement interval in seconds"
-msgstr "通告间隔(秒)"
-
-#, python-format
-msgid "The allocation pool %(pool)s is not valid."
-msgstr "分配池 %(pool)s 无效。"
-
-#, python-format
-msgid ""
-"The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s."
-msgstr "分配池 %(pool)s 范围超出子网 cidr %(subnet_cidr)s。"
-
-#, python-format
-msgid ""
-"The attribute '%(attr)s' is reference to other resource, can't used by sort "
-"'%(resource)s'"
-msgstr "属性“%(attr)s”是对其他资源的引用,无法由排序“%(resource)s”使用"
-
-msgid "The core plugin Neutron will use"
-msgstr "Neutron 将使用的核心插件"
-
-msgid "The driver used to manage the DHCP server."
-msgstr "用于管理 DHCP 服务器的驱动程序。"
-
-msgid "The driver used to manage the virtual interface."
-msgstr "用于管理虚拟接口的驱动程序。"
-
-#, python-format
-msgid ""
-"The following device_id %(device_id)s is not owned by your tenant or matches "
-"another tenants router."
-msgstr "以下 device_id %(device_id)s 不属于您的租户或与另一租户路由器 匹配。"
-
-msgid "The host IP to bind to"
-msgstr "主机 IP 要绑定至"
-
-msgid "The interface for interacting with the OVSDB"
-msgstr "用于与 OVSDB 进行交互的接口"
-
-msgid ""
-"The maximum number of items returned in a single response, value was "
-"'infinite' or negative integer means no limit"
-msgstr "在单个响应中返回的最大项数,值为“无限”或负整数表示无限制"
-
-#, python-format
-msgid ""
-"The network %(network_id)s has been already hosted by the DHCP Agent "
-"%(agent_id)s."
-msgstr "网络 %(network_id)s 已由 DHCP 代理 %(agent_id)s 主管。"
-
-#, python-format
-msgid ""
-"The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s."
-msgstr "网络 %(network_id)s 未由 DHCP 代理 %(agent_id)s 主管。"
-
-#, python-format
-msgid "The number of allowed address pair exceeds the maximum %(quota)s."
-msgstr "允许的地址对数超过最大值 %(quota)s。"
-
-msgid ""
-"The number of seconds the agent will wait between polling for local device "
-"changes."
-msgstr "在轮询本地设备更改之间,代理将等待的秒数。"
-
-msgid ""
-"The number of seconds to wait before respawning the ovsdb monitor after "
-"losing communication with it."
-msgstr "在与 ovsdb 监视器失去通信联系之后重新衍生该监视器之前要等待的秒数。"
-
-msgid "The number of sort_keys and sort_dirs must be same"
-msgstr "sort_keys 的数字与 sort_dirs 的数字必须相同"
-
-#, python-format
-msgid "The port '%s' was deleted"
-msgstr "已删除端口“%s”"
-
-msgid "The port to bind to"
-msgstr "端口要绑定至"
-
-#, python-format
-msgid "The requested content type %s is invalid."
-msgstr "请求的内容类型%s非法。"
-
-msgid "The resource could not be found."
-msgstr "找不到该资源。"
-
-#, python-format
-msgid ""
-"The router %(router_id)s has been already hosted by the L3 Agent "
-"%(agent_id)s."
-msgstr "路由器 %(router_id)s 已由 L3 代理 %(agent_id)s 主管。"
-
-msgid ""
-"The server has either erred or is incapable of performing the requested "
-"operation."
-msgstr "服务器已出错或无法执行所请求操作。"
-
-msgid "The service plugins Neutron will use"
-msgstr "Neutron 将使用的服务插件"
-
-msgid "The type of authentication to use"
-msgstr "要使用的认证的类型"
-
-#, python-format
-msgid "The value '%(value)s' for %(element)s is not valid."
-msgstr "%(element)s 的值“%(value)s”无效。"
-
-msgid ""
-"The working mode for the agent. Allowed modes are: 'legacy' - this preserves "
-"the existing behavior where the L3 agent is deployed on a centralized "
-"networking node to provide L3 services like DNAT, and SNAT. Use this mode if "
-"you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality "
-"and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - "
-"this enables centralized SNAT support in conjunction with DVR.  This mode "
-"must be used for an L3 agent running on a centralized node (or in single-"
-"host deployments, e.g. devstack)"
-msgstr ""
-"代理程序的工作方式。允许的方式为:“legacy”- 它会保留现有行为,即,L3 代理部署"
-"在中央联网节点上,以提供 DNAT 和 SNAT 之类的 L3 服务。如果不想采用 DVR,请使"
-"用此方式。“dvr”- 此方法启用 DVR 功能,并且必须用于计算主机上运行的 L3 代"
-"理。“dvr_snat”- 它允许中央 SNAT 支持与 DVR 配合使用。此方法必须用于中央节点或"
-"单主机部署(例如,devstack)上运行的 L3代理程序"
-
-msgid ""
-"True to delete all ports on all the OpenvSwitch bridges. False to delete "
-"ports created by Neutron on integration and external network bridges."
-msgstr ""
-"True 表示删除所有 OpenvSwitch 网桥上的所有端口。False 表示删除集成和外部网络"
-"网桥上由 Neutron 创建的端口。"
-
-msgid "Tunnel IP value needed by the ML2 plugin"
-msgstr "ML2 插件需要隧道 IP 值"
-
-msgid "Tunnel bridge to use."
-msgstr "要使用的隧道网桥。"
-
-msgid "URL to database"
-msgstr "指向数据库的 URL"
-
-#, python-format
-msgid "Unable to access %s"
-msgstr "无法访问 %s"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(router_id)s. The number of routes exceeds "
-"the maximum %(quota)s."
-msgstr "对于 %(router_id)s,无法完成操作。路由数超过最大值 %(quota)s。"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of DNS "
-"nameservers exceeds the limit %(quota)s."
-msgstr "对于 %(subnet_id)s,无法完成操作。DNS 名称服务器数超过限制 %(quota)s。"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of host routes "
-"exceeds the limit %(quota)s."
-msgstr "对于 %(subnet_id)s,无法完成操作。主机路由数超过限制 %(quota)s。"
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The IP address "
-"%(ip_address)s is in use."
-msgstr "对于网络 %(net_id)s,无法完成操作。IP 地址 %(ip_address)s 在使用中。"
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The mac address %(mac)s "
-"is in use."
-msgstr "对于网络 %(net_id)s,无法完成操作。MAC 地址 %(mac)s 在使用中。"
-
-#, python-format
-msgid ""
-"Unable to complete operation on network %(net_id)s. There are one or more "
-"ports still in use on the network."
-msgstr ""
-"无法在网络 %(net_id)s 上完成操作。在该网络上,一个或多个端口仍然在使用中。"
-
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s for network %(net_id)s. "
-"Port already has an attached device %(device_id)s."
-msgstr ""
-"对于网络 %(net_id)s,无法在端口 %(port_id)s 上完成操作。端口已具有连接的设备 "
-"%(device_id)s。"
-
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr "无法转换 %s 中的值"
-
-msgid "Unable to create the Agent Gateway Port"
-msgstr "无法创建代理网关端口"
-
-msgid "Unable to create the SNAT Interface Port"
-msgstr "无法创建 SNAT 接口端口"
-
-#, python-format
-msgid ""
-"Unable to create the flat network. Physical network %(physical_network)s is "
-"in use."
-msgstr "无法创建该平面网络。物理网络 %(physical_network)s 在使用中。"
-
-msgid ""
-"Unable to create the network. No available network found in maximum allowed "
-"attempts."
-msgstr "无法创建网络。未在最大允许尝试次数中发现任何可用网络。"
-
-msgid ""
-"Unable to create the network. No tenant network is available for allocation."
-msgstr "无法创建该网络。没有任何租户网络可用于分配。"
-
-#, python-format
-msgid ""
-"Unable to create the network. The VLAN %(vlan_id)s on physical network "
-"%(physical_network)s is in use."
-msgstr ""
-"无法创建该网络。物理网络 %(physical_network)s 上的 VLAN %(vlan_id)s 在使用"
-"中。"
-
-#, python-format
-msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use."
-msgstr "无法创建该网络。隧道标识 %(tunnel_id)s 在使用中。"
-
-#, python-format
-msgid "Unable to determine mac address for %s"
-msgstr "无法为 %s 确定网卡地址"
-
-#, python-format
-msgid "Unable to find '%s' in request body"
-msgstr "在请求主体中找不到“%s”"
-
-#, python-format
-msgid "Unable to find any IP address on external network %(net_id)s."
-msgstr "在外部网络上找不到任何 IP 地址%(net_id)s."
-
-#, python-format
-msgid "Unable to find resource name in %s"
-msgstr "在%s中找不到源的名称"
-
-msgid "Unable to generate IP address by EUI64 for IPv4 prefix"
-msgstr "对于 IPv4 前缀,无法通过 EUI64 生成 IP 地址"
-
-#, python-format
-msgid "Unable to generate unique DVR mac for host %(host)s."
-msgstr "无法为主机 %(host)s 生成唯一 DVR MAC。"
-
-#, python-format
-msgid "Unable to generate unique mac on network %(net_id)s."
-msgstr "无法在网络 %(net_id)s 上生成唯一 MAC。"
-
-#, python-format
-msgid ""
-"Unable to identify a target field from:%s. Match should be in the form "
-"%%(<field_name>)s"
-msgstr "无法从:%s中匹配目标域. 匹配应该使用以下形式%%(域名)s"
-
-#, python-format
-msgid ""
-"Unable to verify match:%(match)s as the parent resource: %(res)s was not "
-"found"
-msgstr "无法验证该匹配%(match)s为父资源:未找到%(res)s"
-
-#, python-format
-msgid "Unexpected response code: %s"
-msgstr "意外响应代码:%s"
-
-#, python-format
-msgid "Unexpected response: %s"
-msgstr "意外响应:%s"
-
-msgid "Unimplemented commands"
-msgstr "未实现的命令"
-
-msgid "Unknown API version specified"
-msgstr "指定的 API 版本未知"
-
-#, python-format
-msgid "Unknown attribute '%s'."
-msgstr "属性“%s”未知。"
-
-#, python-format
-msgid "Unknown chain: %r"
-msgstr "链未知:%r"
-
-#, python-format
-msgid "Unknown quota resources %(unknown)s."
-msgstr "配额资源 %(unknown)s 未知。"
-
-msgid "Unmapped error"
-msgstr "已取消映射错误"
-
-msgid "Unrecognized action"
-msgstr "无法识别动作"
-
-#, python-format
-msgid "Unrecognized attribute(s) '%s'"
-msgstr "无法识别属性“%s”"
-
-msgid "Unsupported Content-Type"
-msgstr "Content-Type 不受支持"
-
-#, python-format
-msgid "Unsupported network type %(net_type)s."
-msgstr "网络类型 %(net_type)s 不受支持。"
-
-msgid "Unsupported request type"
-msgstr "未支持请求类型"
-
-msgid "Updating default security group not allowed."
-msgstr "正在更新的默认安全组内容不合法"
-
-msgid ""
-"Use ML2 l2population mechanism driver to learn remote MAC and IPs and "
-"improve tunnel scalability."
-msgstr ""
-"请使用 ML2 l2population 机制驱动程序以了解远程 MAC 和 IP 并提高隧道可伸缩性。"
-
-msgid "Use broadcast in DHCP replies"
-msgstr "在DHCP应答中使用广播"
-
-msgid "Use either --delta or relative revision, not both"
-msgstr "请使用 --delta 或者相关修订版,但是不能同时指定这两者"
-
-msgid "User (uid or name) running metadata proxy after its initialization"
-msgstr "在元数据代理的初始化之后,运行该代理的用户(uid 或名称)"
-
-msgid ""
-"User (uid or name) running metadata proxy after its initialization (if "
-"empty: agent effective user)."
-msgstr ""
-"在元数据代理的初始化之后,运行该代理的用户(uid 或名称),(如果此用户为空,"
-"那么这是代理有效用户)。"
-
-msgid "User (uid or name) running this process after its initialization"
-msgstr "在此进程的初始化之后,运行此进程的用户(uid 或名称)"
-
-msgid "VRRP authentication password"
-msgstr "VRRP认证密码"
-
-msgid "VRRP authentication type"
-msgstr "VRRP认证类型"
-
-#, python-format
-msgid ""
-"Validation of dictionary's keys failed. Expected keys: %(expected_keys)s "
-"Provided keys: %(provided_keys)s"
-msgstr ""
-"对字典的键进行的验证失败。期望的键是 %(expected_keys)s,提供的键是 "
-"%(provided_keys)s"
-
-#, python-format
-msgid "Validator '%s' does not exist."
-msgstr "验证器“%s”不存在。"
-
-#, python-format
-msgid "Value %(value)s in mapping: '%(mapping)s' not unique"
-msgstr "映射“%(mapping)s”中的值 %(value)s 不唯一"
-
-msgid ""
-"Watch file log. Log watch should be disabled when metadata_proxy_user/group "
-"has no read/write permissions on metadata proxy log file."
-msgstr ""
-"监控文件日志。当 metadata_proxy_user/group 对元数据代理日志文件不具备读/写许"
-"可权时,应当禁用日志监控。"
-
-msgid ""
-"Where to store Neutron state files. This directory must be writable by the "
-"agent."
-msgstr "用于存储 Neutron 状态文件的位置。此目录对于代理必须为可写。"
-
-msgid ""
-"With IPv6, the network used for the external gateway does not need to have "
-"an associated subnet, since the automatically assigned link-local address "
-"(LLA) can be used. However, an IPv6 gateway address is needed for use as the "
-"next-hop for the default route. If no IPv6 gateway address is configured "
-"here, (and only then) the neutron router will be configured to get its "
-"default route from router advertisements (RAs) from the upstream router; in "
-"which case the upstream router must also be configured to send these RAs. "
-"The ipv6_gateway, when configured, should be the LLA of the interface on the "
-"upstream router. If a next-hop using a global unique address (GUA) is "
-"desired, it needs to be done via a subnet allocated to the network and not "
-"through this parameter. "
-msgstr ""
-"对于 IPv6,用于外部网关的网络不需要具备相关联的子网,因为可以使用自动指定的链"
-"路本地地址 (LLA)。但是,需要 IPv6 网关地址用作缺省路由的下一个路由器。如果此"
-"处未配置 IPv6 网关地址,那么将配置 Neutron 路由器,以从上游的路由器中获取路由"
-"器广告(RA)中的缺省路由;在这种情况下,还必须配置上游路由器以发送这些 RA。配"
-"置了 ipv6_gateway 时,ipv6_gateway 应为上游路由器上的接口的 LLA。如果需要下一"
-"个使用全局唯一地址 (GUA) 的路由器,那么它需要通过分配给该网络的子网来完成,而"
-"不是通过此参数来完成。"
-
-msgid "You must implement __call__"
-msgstr "必须实现 __call__"
-
-msgid ""
-"You must provide a config file for bridge - either --config-file or "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-msgstr "必须为网桥提供配置文件 - --config-file 或env[NEUTRON_TEST_CONFIG_FILE]"
-
-msgid "You must provide a revision or relative delta"
-msgstr "必须提供修订或相对变化量"
-
-msgid "allocation_pools allowed only for specific subnet requests."
-msgstr "仅允许将 allocation_pools 用于特定子网请求。"
-
-msgid "binding:profile value too large"
-msgstr "binding:profile 值太大"
-
-msgid "cidr and prefixlen must not be supplied together"
-msgstr "不得同时指定 cidr 和 prefixlen"
-
-#, python-format
-msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid."
-msgstr "dhcp_agents_per_network 必须是>= 1. '%s' 是不合法的、"
-
-msgid "fixed_ip_address cannot be specified without a port_id"
-msgstr "在没有 port_id 的情况下,无法指定 fixed_ip_address"
-
-#, python-format
-msgid "has device owner %s"
-msgstr "具有设备所有者 %s"
-
-#, python-format
-msgid "ip command failed on device %(dev_name)s: %(reason)s"
-msgstr "对设备 %(dev_name)s 执行 IP 命令失败:%(reason)s"
-
-#, python-format
-msgid "ip link capability %(capability)s is not supported"
-msgstr "IP 链接功能 %(capability)s 不受支持"
-
-#, python-format
-msgid "ip link command is not supported: %(reason)s"
-msgstr "ip 链路命令未支持: %(reason)s"
-
-msgid "ip_version must be specified in the absence of cidr and subnetpool_id"
-msgstr "在缺少 cidr 和 subnetpool_id 的情况下,必须指定 ip_version"
-
-msgid "ipv6_address_mode is not valid when ip_version is 4"
-msgstr "ip_version 为 4 时,ipv6_address_mode 无效"
-
-msgid "ipv6_ra_mode is not valid when ip_version is 4"
-msgstr "ip_version 为 4 时,ipv6_ra_mode 无效"
-
-msgid ""
-"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to "
-"False."
-msgstr ""
-"当 enable_dhcp 设置为 False 时,无法设置 ipv6_ra_mode 或 ipv6_address_mode。"
-
-#, python-format
-msgid ""
-"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to "
-"'%(addr_mode)s' is not valid. If both attributes are set, they must be the "
-"same value"
-msgstr ""
-"设置为“%(ra_mode)s”的 ipv6_ra_mode(在 ipv6_address_mode 设置"
-"为“%(addr_mode)s”的情况下)无效。如果设置了这两个属性,那么它们必须为同一个值"
-
-msgid "mac address update"
-msgstr "MAC 地址更新"
-
-#, python-format
-msgid ""
-"max_l3_agents_per_router %(max_agents)s config parameter is not valid. It "
-"has to be greater than or equal to min_l3_agents_per_router %(min_agents)s."
-msgstr ""
-"max_l3_agents_per_router %(max_agents)s 配置参数无效。它必须大于或等于 "
-"min_l3_agents_per_router %(min_agents)s。"
-
-#, python-format
-msgid ""
-"min_l3_agents_per_router config parameter is not valid. It has to be equal "
-"to or more than %s for HA."
-msgstr ""
-"min_l3_agents_per_router 配置参数无效。它必须等于或大于 %s,才能确保 HA。"
-
-msgid "network_type required"
-msgstr "需要 network_type"
-
-#, python-format
-msgid "network_type value '%s' not supported"
-msgstr "不支持的网络类型值 '%s'"
-
-msgid "new subnet"
-msgstr "新子网"
-
-#, python-format
-msgid "physical_network '%s' unknown  for VLAN provider network"
-msgstr "对于 VLAN 提供程序网络,physical_network“%s”未知"
-
-#, python-format
-msgid "physical_network '%s' unknown for flat provider network"
-msgstr "平面供应商网络的物理网络 '%s'为未知状态"
-
-msgid "physical_network required for flat provider network"
-msgstr "平面供应商网络需要的物理网络"
-
-#, python-format
-msgid "provider:physical_network specified for %s network"
-msgstr "提供程序:已为%s 网络指定 physical_network"
-
-msgid "respawn_interval must be >= 0 if provided."
-msgstr "respawn_interval 必须不小于 0(如果已提供此项)。"
-
-#, python-format
-msgid "segmentation_id out of range (%(min)s through %(max)s)"
-msgstr "segmentation_id 超出范围,从(%(min)s 到 %(max)s)"
-
-msgid "segmentation_id requires physical_network for VLAN provider network"
-msgstr "segmentation_id 需要 VLAN 提供程序网络的 physical_network"
-
-msgid "the nexthop is not connected with router"
-msgstr "下一中继段未与路由器连接"
-
-msgid "the nexthop is used by router"
-msgstr "路由器已使用下一中继段"
-
-msgid ""
-"uuid provided from the command line so external_process can track us via /"
-"proc/cmdline interface."
-msgstr ""
-"从命令行中提供了 uuid,以便 external_process 可通过 /proc/cmdline 接口跟踪我"
-"们。"
diff --git a/neutron/locale/zh_TW/LC_MESSAGES/neutron.po b/neutron/locale/zh_TW/LC_MESSAGES/neutron.po
deleted file mode 100644 (file)
index ba23f79..0000000
+++ /dev/null
@@ -1,2182 +0,0 @@
-# Chinese (Traditional, Taiwan) translations for neutron.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the neutron project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-msgid ""
-msgstr ""
-"Project-Id-Version: neutron 8.0.0.0b2.dev248\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-06 06:29+0000\n"
-"PO-Revision-Date: 2015-09-06 10:15+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: zh_Hant_TW\n"
-"Language-Team: Chinese (Taiwan)\n"
-"Plural-Forms: nplurals=1; plural=0\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#, python-format
-msgid ""
-"\n"
-"Command: %(cmd)s\n"
-"Exit code: %(code)s\n"
-"Stdin: %(stdin)s\n"
-"Stdout: %(stdout)s\n"
-"Stderr: %(stderr)s"
-msgstr ""
-"\n"
-"指令:%(cmd)s\n"
-"結束碼:%(code)s\n"
-"標準輸入:%(stdin)s\n"
-"標準輸出:%(stdout)s\n"
-"標準錯誤:%(stderr)s"
-
-#, python-format
-msgid "%(driver)s: Internal driver error."
-msgstr "%(driver)s:內部驅動程式錯誤。"
-
-#, python-format
-msgid "%(id)s is not a valid %(type)s identifier"
-msgstr "%(id)s 不是有效的 %(type)s ID"
-
-#, python-format
-msgid ""
-"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' "
-"and '%(desc)s'"
-msgstr ""
-"對於 sort_dir 來說,%(invalid_dirs)s 是無效值,有效值為 '%(asc)s' 及 "
-"'%(desc)s'"
-
-#, python-format
-msgid "%(key)s prohibited for %(tunnel)s provider network"
-msgstr "%(tunnel)s 提供者網路已禁止 %(key)s"
-
-#, python-format
-msgid ""
-"%(method)s called with network settings %(current)s (original settings "
-"%(original)s) and network segments %(segments)s"
-msgstr ""
-"已使用網路設定 %(current)s(原始設定%(original)s)及網路區段 %(segments)s 來"
-"呼叫了 %(method)s"
-
-#, python-format
-msgid ""
-"%(method)s called with subnet settings %(current)s (original settings "
-"%(original)s)"
-msgstr ""
-"已使用子網路設定 %(current)s(原始設定%(original)s)來呼叫了 %(method)s"
-
-#, python-format
-msgid "%(method)s failed."
-msgstr "%(method)s 失敗。"
-
-#, python-format
-msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'"
-msgstr "%(name)s '%(addr)s' 與 ip_version '%(ip_version)s' 不符"
-
-#, python-format
-msgid "%s cannot be called while in offline mode"
-msgstr "當 %s 處於離線模式時,無法對其進行呼叫"
-
-#, python-format
-msgid "%s is invalid attribute for sort_key"
-msgstr "對於 sort_key 來說,%s 是無效的屬性"
-
-#, python-format
-msgid "%s is invalid attribute for sort_keys"
-msgstr "對於 sort_key 來說,%s 是無效的屬性"
-
-#, python-format
-msgid "%s is not a valid VLAN tag"
-msgstr "%s 不是有效的 VLAN 標籤"
-
-#, python-format
-msgid "%s must implement get_port_from_device or get_ports_from_devices."
-msgstr "%s 必須實作 get_port_from_device 或 get_ports_from_devices。"
-
-#, python-format
-msgid "%s prohibited for VLAN provider network"
-msgstr "VLAN 提供者網路已禁止 %s"
-
-#, python-format
-msgid "%s prohibited for flat provider network"
-msgstr "平面提供者網路已禁止 %s"
-
-#, python-format
-msgid "%s prohibited for local provider network"
-msgstr "本端提供者網路已禁止 %s"
-
-#, python-format
-msgid "'%(data)s' exceeds maximum length of %(max_len)s"
-msgstr "'%(data)s' 超出 %(max_len)s 的長度上限"
-
-#, python-format
-msgid "'%(data)s' is not in %(valid_values)s"
-msgstr "'%(data)s' 不在 %(valid_values)s 中"
-
-#, python-format
-msgid "'%(data)s' is too large - must be no larger than '%(limit)d'"
-msgstr "'%(data)s' 太大 - 不得大於 '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' is too small - must be at least '%(limit)d'"
-msgstr "'%(data)s' 太小 - 必須至少為 '%(limit)d'"
-
-#, python-format
-msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended"
-msgstr "'%(data)s' 不是可以辨識的 IP 子網路 CIDR,建議使用 '%(cidr)s'"
-
-#, python-format
-msgid "'%(host)s' is not a valid nameserver. %(msg)s"
-msgstr "'%(host)s' 不是有效的命名伺服器。%(msg)s"
-
-#, python-format
-msgid "'%s' Blank strings are not permitted"
-msgstr "'%s',不允許空白字串"
-
-#, python-format
-msgid "'%s' cannot be converted to boolean"
-msgstr "無法將 '%s' 轉換為布林值"
-
-#, python-format
-msgid "'%s' contains whitespace"
-msgstr "'%s' 包含空格"
-
-#, python-format
-msgid "'%s' is not a dictionary"
-msgstr "'%s' 不是字典"
-
-#, python-format
-msgid "'%s' is not a list"
-msgstr "'%s' 不是清單"
-
-#, python-format
-msgid "'%s' is not a valid IP address"
-msgstr "'%s' 不是有效的 IP 位址"
-
-#, python-format
-msgid "'%s' is not a valid IP subnet"
-msgstr "'%s' 不是有效的 IP 子網路"
-
-#, python-format
-msgid "'%s' is not a valid MAC address"
-msgstr "'%s' 不是有效的 MAC 位址"
-
-#, python-format
-msgid "'%s' is not a valid UUID"
-msgstr "'%s' 不是有效的 UUID"
-
-#, python-format
-msgid "'%s' is not a valid boolean value"
-msgstr "'%s' 不是有效的布林值"
-
-#, python-format
-msgid "'%s' is not a valid input"
-msgstr "'%s' 不是有效的輸入"
-
-#, python-format
-msgid "'%s' is not a valid string"
-msgstr "'%s' 不是有效字串"
-
-#, python-format
-msgid "'%s' is not an integer"
-msgstr "'%s' 不是整數"
-
-#, python-format
-msgid "'%s' is not an integer or uuid"
-msgstr "'%s' 不是整數或 UUID"
-
-#, python-format
-msgid "'%s' is not of the form <key>=[value]"
-msgstr "'%s' 的格式不是 <key>=[value]"
-
-#, python-format
-msgid "'%s' should be non-negative"
-msgstr "'%s' 應該為非負數"
-
-msgid "0 is not allowed as CIDR prefix length"
-msgstr "不接受 0 作為 CIDR 字首長度"
-
-msgid "A cidr must be specified in the absence of a subnet pool"
-msgstr "如果未指定子網路儲存區,則必須指定 cidr"
-
-msgid ""
-"A list of mappings of physical networks to MTU values. The format of the "
-"mapping is <physnet>:<mtu val>. This mapping allows specifying a physical "
-"network MTU value that differs from the default segment_mtu value."
-msgstr ""
-"實體網路與 MTU 值的對映清單。對映格式為<physnet>:<mtu val>。此對映容許指定不"
-"同於預設 segment_mtu 值的實體網路 MTU 值。"
-
-msgid "A metering driver must be specified"
-msgstr "必須指定計量驅動程式"
-
-msgid "API for retrieving service providers for Neutron advanced services"
-msgstr "此 API 用於擷取 Neutron 進階服務的服務提供者"
-
-msgid "Access to this resource was denied."
-msgstr "拒絕存取此資源。"
-
-msgid "Action to be executed when a child process dies"
-msgstr "子程序當掉時要執行的動作"
-
-msgid "Adds external network attribute to network resource."
-msgstr "將外部網路屬性新增至網路資源。"
-
-msgid "Adds test attributes to core resources."
-msgstr "將測試屬性新增至核心資源。"
-
-#, python-format
-msgid "Agent %(id)s could not be found"
-msgstr "找不到代理程式 %(id)s"
-
-#, python-format
-msgid "Agent %(id)s is not a L3 Agent or has been disabled"
-msgstr "代理程式 %(id)s 不是 L3 代理程式或者已停用"
-
-#, python-format
-msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled"
-msgstr "代理程式 %(id)s 不是有效的 DHCP 代理程式或者已停用"
-
-#, python-format
-msgid "Agent updated: %(payload)s"
-msgstr "已更新代理程式:%(payload)s"
-
-#, python-format
-msgid ""
-"Agent with agent_type=%(agent_type)s and host=%(host)s could not be found"
-msgstr "找不到 agent_type = %(agent_type)s 且主機 = %(host)s 的代理程式"
-
-msgid "Allow auto scheduling networks to DHCP agent."
-msgstr "容許自動將網路排程到 DHCP 代理程式。"
-
-msgid "Allow auto scheduling of routers to L3 agent."
-msgstr "容許自動將路由器排定到 L3 代理程式。"
-
-msgid "Allow running metadata proxy."
-msgstr "容許執行 meta 資料 Proxy。"
-
-msgid "Allow sending resource operation notification to DHCP agent"
-msgstr "容許將資源作業通知傳送給 DHCP 代理程式"
-
-msgid "Allow the usage of the bulk API"
-msgstr "容許使用主體 API"
-
-msgid "Allow the usage of the pagination"
-msgstr "容許使用分頁"
-
-msgid "Allow the usage of the sorting"
-msgstr "容許使用排序"
-
-msgid "Allow to perform insecure SSL (https) requests to nova metadata"
-msgstr "容許對 Nova meta 資料執行不安全的 SSL (HTTPS) 要求"
-
-msgid "AllowedAddressPair must contain ip_address"
-msgstr "AllowedAddressPair 必須包含 ip_address"
-
-msgid "An interface driver must be specified"
-msgstr "必須指定介面驅動程式"
-
-msgid ""
-"An ordered list of networking mechanism driver entrypoints to be loaded from "
-"the neutron.ml2.mechanism_drivers namespace."
-msgstr ""
-"要從 neutron.ml2.mechanism_drivers 名稱空間載入的網路機制驅動程式進入點有序清"
-"單。"
-
-msgid "An unknown error has occurred. Please try your request again."
-msgstr "發生不明錯誤。請重試要求。"
-
-msgid "An unknown exception occurred."
-msgstr "發生不明異常狀況。"
-
-#, python-format
-msgid "Attribute '%s' not allowed in POST"
-msgstr "POST 中不接受屬性 '%s'"
-
-msgid "Automatically remove networks from offline DHCP agents."
-msgstr "從離線 DHCP 代理程式自動移除網路。"
-
-msgid ""
-"Automatically reschedule routers from offline L3 agents to online L3 agents."
-msgstr "自動將路由器從離線 L3 代理程式重新排定至線上 L3代理程式。"
-
-msgid "Available commands"
-msgstr "可用的指令"
-
-msgid "Backend does not support VLAN Transparency."
-msgstr "後端不支援 VLAN 透通性。"
-
-#, python-format
-msgid ""
-"Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, "
-"%(mac)s:"
-msgstr ""
-"依 EUI-64 產生 IPv6 位址時使用的字首或 MAC 格式錯誤:%(prefix)s 及 %(mac)s:"
-
-#, python-format
-msgid "Bad prefix type for generate IPv6 address by EUI-64: %s"
-msgstr "依 EUI-64 產生 IPv6 位址時使用的字首類型錯誤:%s"
-
-#, python-format
-msgid "Base MAC: %s"
-msgstr "基本 MAC:%s"
-
-#, python-format
-msgid "Bridge %(bridge)s does not exist."
-msgstr "橋接器 %(bridge)s 不存在。"
-
-msgid "Bulk operation not supported"
-msgstr "不支援主體作業"
-
-msgid "CIDR to monitor"
-msgstr "要監視的 CIDR"
-
-#, python-format
-msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip"
-msgstr "無法將浮動 IP 新增至子網路 %s 上沒有 gateway_ip 的埠"
-
-msgid "Cannot allocate requested subnet from the available set of prefixes"
-msgstr "無法配置可用字首集中的所要求子網路"
-
-#, python-format
-msgid ""
-"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port "
-"%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a "
-"floating IP on external network %(net_id)s."
-msgstr ""
-"無法使浮動 IP %(floating_ip_address)s (%(fip_id)s) 與使用固定 IP "
-"%(fixed_ip)s 的埠 %(port_id)s 產生關聯,因為該固定 IP 在外部網路 %(net_id)s "
-"上已經有浮動 IP。"
-
-#, python-format
-msgid ""
-"Cannot create floating IP and bind it to Port %s, since that port is owned "
-"by a different tenant."
-msgstr "無法建立浮動 IP 並將其連結至埠 %s,因為該埠是由其他承租人擁有。"
-
-msgid "Cannot create resource for another tenant"
-msgstr "無法給另一個 Tenant 建立資源"
-
-msgid "Cannot disable enable_dhcp with ipv6 attributes set"
-msgstr "在設定了 ipv6 屬性的情況下,無法停用 enable_dhcp"
-
-#, python-format
-msgid ""
-"Cannot have multiple router ports with the same network id if both contain "
-"IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s"
-msgstr ""
-"不能具有多個包含相同網路 ID 的路由器埠(如果兩者都包含 IPv6 子網路)。現有埠 "
-"%(p)s 具有 IPv6 子網路和網路 ID %(nid)s"
-
-#, python-format
-msgid ""
-"Cannot host %(router_type)s router %(router_id)s on %(agent_mode)s L3 agent "
-"%(agent_id)s."
-msgstr ""
-"無法在下列 %(agent_mode)s L3 代理程式上管理 %(router_type)s 路由器 "
-"%(router_id)s:%(agent_id)s。"
-
-msgid "Cannot match priority on flow deletion or modification"
-msgstr "無法符合流程刪除作業或修改作業上的優先順序"
-
-msgid "Cannot specify both subnet-id and port-id"
-msgstr "無法同時指定 subnet-id 及 port-id"
-
-msgid "Cannot understand JSON"
-msgstr "無法理解 JSON"
-
-#, python-format
-msgid "Cannot update read-only attribute %s"
-msgstr "無法更新唯讀屬性 %s"
-
-msgid "Certificate Authority public key (CA cert) file for ssl"
-msgstr "用於 SSL 的「憑證管理中心」公開金鑰(CA 憑證)檔案"
-
-msgid "Check for ARP responder support"
-msgstr "檢查 ARP 回應者支援"
-
-msgid "Check for OVS vxlan support"
-msgstr "檢查 OVS vxlan 支援"
-
-msgid "Check for VF management support"
-msgstr "檢查 VF 管理支援"
-
-msgid "Check for iproute2 vxlan support"
-msgstr "檢查 iproute2 vxlan 支援"
-
-msgid "Check for nova notification support"
-msgstr "檢查 Nova 通知支援"
-
-msgid "Check for patch port support"
-msgstr "檢查修補程式埠支援"
-
-msgid "Check minimal dnsmasq version"
-msgstr "檢查最低 dnsmasq 版本"
-
-msgid "Check netns permission settings"
-msgstr "檢查 netns 權限設定"
-
-msgid "Check ovsdb native interface support"
-msgstr "檢查 OVSDB 原生介面支援"
-
-#, python-format
-msgid ""
-"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of "
-"subnet %(sub_id)s"
-msgstr ""
-"子網路 %(subnet_id)s 的 CIDR %(subnet_cidr)s 與子網路 %(sub_id)s 的 CIDR "
-"%(cidr)s 重疊"
-
-msgid "Client certificate for nova metadata api server."
-msgstr "Nova meta 資料 API 伺服器的用戶端憑證。"
-
-msgid ""
-"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE "
-"tunnel IDs that are available for tenant network allocation"
-msgstr ""
-"<tun_min>:<tun_max> 值組的逗點區隔清單,用於列舉可用於承租人網路配置的 GRE 通"
-"道 ID 範圍"
-
-msgid ""
-"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of "
-"VXLAN VNI IDs that are available for tenant network allocation"
-msgstr ""
-"<vni_min>:<vni_max> 值組的逗點區隔清單,用於列舉可用於承租人網路配置的 VXLAN "
-"VNI ID 範圍"
-
-msgid ""
-"Comma-separated list of the DNS servers which will be used as forwarders."
-msgstr "將用來作為轉遞程式的 DNS 伺服器逗點區隔清單。"
-
-msgid "Command to execute"
-msgstr "要執行的指令"
-
-msgid "Config file for interface driver (You may also use l3_agent.ini)"
-msgstr "介面驅動程式的配置檔(您也可使用 l3_agent.ini)"
-
-#, python-format
-msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s"
-msgstr "CIDR %(cidr)s 的乙太網路類型 %(ethertype)s 值有衝突"
-
-msgid ""
-"Controls whether the neutron security group API is enabled in the server. It "
-"should be false when using no security groups or using the nova security "
-"group API."
-msgstr ""
-"控制是否在伺服器中啟用 Neutron 安全群組 API。當不使用安全群組時或者使用 Nova "
-"安全群組 API 時,它應該是 false。"
-
-#, python-format
-msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds"
-msgstr "嘗試 %(time)d 秒後仍無法連結至 %(host)s:%(port)s"
-
-msgid "Could not deserialize data"
-msgstr "無法解除序列化資料"
-
-#, python-format
-msgid "Creation failed. %(dev_name)s already exists."
-msgstr "建立失敗。%(dev_name)s 已存在。"
-
-#, python-format
-msgid ""
-"Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable "
-"to update."
-msgstr "埠 %(port_id)s 已在使用現行閘道 IP %(ip_address)s。無法更新。"
-
-msgid "Currently distributed HA routers are not supported."
-msgstr "目前不支援分散式 HA 路由器。"
-
-msgid ""
-"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite "
-"lease times."
-msgstr ""
-"DHCP 租賃期限(以秒為單位)。使用 -1 可告知 dnsmasq 使用無限的租賃時間。"
-
-msgid "Default driver to use for quota checks"
-msgstr "要用於配額檢查的預設驅動程式"
-
-msgid ""
-"Default number of resource allowed per tenant. A negative value means "
-"unlimited."
-msgstr "每個承租人所容許的預設資源數目。負數值表示無限制。"
-
-msgid "Default security group"
-msgstr "預設安全群組"
-
-msgid "Default security group already exists."
-msgstr "預設安全群組已存在。"
-
-msgid ""
-"Defines providers for advanced services using the format: <service_type>:"
-"<name>:<driver>[:default]"
-msgstr ""
-"使用下列格式,給進階服務定義提供者:<service_type>:<name>:<driver>[:default]"
-
-msgid ""
-"Delay within which agent is expected to update existing ports whent it "
-"restarts"
-msgstr "延遲時間,代理程式在重新啟動時,應該在此時間內更新現有埠"
-
-msgid "Delete the namespace by removing all devices."
-msgstr "透過移除所有裝置來刪除名稱空間。"
-
-#, python-format
-msgid "Deleting port %s"
-msgstr "正在刪除埠 %s"
-
-#, python-format
-msgid "Device %(dev_name)s in mapping: %(mapping)s not unique"
-msgstr "對映 %(mapping)s 中的裝置 %(dev_name)s 不是唯一的"
-
-msgid "Device has no virtual functions"
-msgstr "裝置沒有虛擬函數"
-
-#, python-format
-msgid "Device name %(dev_name)s is missing from physical_device_mappings"
-msgstr "physical_device_mappings 中遺漏了裝置名稱 %(dev_name)s"
-
-msgid "Device not found"
-msgstr "找不到裝置"
-
-#, python-format
-msgid ""
-"Distributed Virtual Router Mac Address for host %(host)s does not exist."
-msgstr "主機 %(host)s 的分散式虛擬路由器 MAC 位址不存在。"
-
-msgid "Domain to use for building the hostnames"
-msgstr "用於建置主機名稱的網域"
-
-msgid "Downgrade no longer supported"
-msgstr "不再支援降級"
-
-#, python-format
-msgid "Driver %s is not unique across providers"
-msgstr "驅動程式 %s 在提供者之間不是唯一的"
-
-msgid "Driver for security groups firewall in the L2 agent"
-msgstr "L2 代理程式中安全群組防火牆的驅動程式"
-
-msgid "Driver to use for scheduling network to DHCP agent"
-msgstr "用於將網路排程到 DHCP 代理程式的驅動程式"
-
-msgid "Driver to use for scheduling router to a default L3 agent"
-msgstr "用於將路由器排程到預設 L3 代理程式的驅動程式"
-
-#, python-format
-msgid "Duplicate IP address '%s'"
-msgstr "重複的 IP 位址 '%s'"
-
-msgid "Duplicate Metering Rule in POST."
-msgstr "POST 中的計量規則重複。"
-
-msgid "Duplicate Security Group Rule in POST."
-msgstr "POST 中的安全群組規則重複。"
-
-#, python-format
-msgid "Duplicate hostroute '%s'"
-msgstr "重複的主機路徑 '%s'"
-
-#, python-format
-msgid "Duplicate items in the list: '%s'"
-msgstr "清單中的重複項目:'%s'"
-
-#, python-format
-msgid "Duplicate nameserver '%s'"
-msgstr "重複的名稱伺服器 '%s'"
-
-msgid "Duplicate segment entry in request."
-msgstr "要求中的區段項目重複。"
-
-#, python-format
-msgid "ERROR: %s"
-msgstr "錯誤:%s"
-
-msgid ""
-"ERROR: Unable to find configuration file via the default search paths (~/."
-"neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!"
-msgstr ""
-"錯誤:無法透過預設搜尋路徑(~/.neutron/、~/、/etc/neutron/及 /etc/)與 '--"
-"config-file' 選項來找到配置檔!"
-
-msgid ""
-"Either one of parameter network_id or router_id must be passed to _get_ports "
-"method."
-msgstr "必須將 network_id 或 router_id 中的一個參數傳遞至_get_ports 方法。"
-
-msgid "Either subnet_id or port_id must be specified"
-msgstr "必須指定 subnet_id 或 port_id"
-
-msgid "Empty physical network name."
-msgstr "空的實體網路名稱。"
-
-msgid "Enable FWaaS"
-msgstr "啟用 FWaaS"
-
-msgid "Enable HA mode for virtual routers."
-msgstr "啟用虛擬路由器的 HA 模式。"
-
-msgid "Enable SSL on the API server"
-msgstr "在 API 伺服器上啟用 SSL"
-
-msgid ""
-"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 "
-"plugin using linuxbridge mechanism driver"
-msgstr ""
-"在代理程式上啟用 VXLAN。代理程式是由 ML2 外掛程式(使用 LinuxBridge 機制驅動"
-"程式)管理時,可以啟用 VXLAN"
-
-msgid ""
-"Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 "
-"l2population driver. Allows the switch (when supporting an overlay) to "
-"respond to an ARP request locally without performing a costly ARP broadcast "
-"into the overlay."
-msgstr ""
-"如果支援本端 ARP 回應者,請將其啟用。需要 OVS 2.1 及 ML2 l2population 驅動程"
-"式。容許交換器(當支援套版時)在本端對 ARP 要求做出回應,但不執行高成本的 "
-"ARP 播送至套版。"
-
-msgid ""
-"Enable services on an agent with admin_state_up False. If this option is "
-"False, when admin_state_up of an agent is turned False, services on it will "
-"be disabled. Agents with admin_state_up False are not selected for automatic "
-"scheduling regardless of this option. But manual scheduling to such agents "
-"is available if this option is True."
-msgstr ""
-"對 admin_state_up 為 False 的代理程式啟用服務。如果此選項為 False,則當代理程"
-"式的 admin_state_up 變為 False 時,將停用其上的服務。無論此選項為何,都不會選"
-"取 admin_state_up 為 False的代理程式以進行自動排程。但如果此選項為 True,則可"
-"以使用此類代理程式的手動排程。"
-
-msgid ""
-"Enable/Disable log watch by metadata proxy. It should be disabled when "
-"metadata_proxy_user/group is not allowed to read/write its log file and "
-"copytruncate logrotate option must be used if logrotate is enabled on "
-"metadata proxy log files. Option default value is deduced from "
-"metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent "
-"effective user id/name."
-msgstr ""
-"按 meta 資料 Proxy 啟用/停用日誌監看。當不容許 metadata_proxy_user/group 讀"
-"取/寫入其日誌檔時,應該停用日誌監看,且如果已對 meta 資料 Proxy 日誌檔啟用 "
-"logrotate,則必須使用copytruncate logrotate 選項。選項預設值是從"
-"metadata_proxy_user 推斷得出的:如果 metadata_proxy_user 為代理程式有效使用"
-"者 ID/名稱,則已啟用日誌監看。"
-
-msgid "Encountered an empty component."
-msgstr "發現空元件。"
-
-msgid "End of VLAN range is less than start of VLAN range"
-msgstr "VLAN 範圍的終止值小於 VLAN 範圍的起始值"
-
-msgid "End of tunnel range is less than start of tunnel range"
-msgstr "通道範圍的終止值小於通道範圍的起始值"
-
-#, python-format
-msgid "Error importing FWaaS device driver: %s"
-msgstr "匯入 FWaaS 裝置驅動程式時發生錯誤:%s"
-
-#, python-format
-msgid "Error parsing dns address %s"
-msgstr "剖析 DNS 位址 %s 時發生錯誤"
-
-#, python-format
-msgid "Error while reading %s"
-msgstr "讀取 %s 時發生錯誤"
-
-msgid "Existing prefixes must be a subset of the new prefixes"
-msgstr "現有字首必須是新字首的子集"
-
-msgid ""
-"Extension to use alongside ml2 plugin's l2population mechanism driver. It "
-"enables the plugin to populate VXLAN forwarding table."
-msgstr ""
-"與 ML2 外掛程式的 l2population 機制驅動程式一起使用的延伸。它支援該外掛程式將"
-"資料移入 VXLAN 轉遞表格。"
-
-#, python-format
-msgid "Extension with alias %s does not exist"
-msgstr "別名為 %s 的延伸不存在"
-
-#, python-format
-msgid "External IP %s is the same as the gateway IP"
-msgstr "外部 IP %s 與閘道 IP 相同"
-
-#, python-format
-msgid ""
-"External network %(external_network_id)s is not reachable from subnet "
-"%(subnet_id)s.  Therefore, cannot associate Port %(port_id)s with a Floating "
-"IP."
-msgstr ""
-"無法從子網路 %(subnet_id)s 抵達外部網路 %(external_network_id)s。因此,無法使"
-"埠 %(port_id)s 與浮動 IP 產生關聯。"
-
-#, python-format
-msgid ""
-"External network %(net_id)s cannot be updated to be made non-external, since "
-"it has existing gateway ports"
-msgstr "無法將外部網路 %(net_id)s 更新成非外部網路,因為它具有現存的閘道埠"
-
-#, python-format
-msgid "ExtraDhcpOpt %(id)s could not be found"
-msgstr "找不到 ExtraDhcpOpt %(id)s"
-
-msgid ""
-"FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-"
-"agent."
-msgstr "FWaaS 外掛程式已在伺服器端進行配置,但在 L3 代理程式中已停用 FWaaS。"
-
-#, python-format
-msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found."
-msgstr "無法重新排定路由器 %(router_id)s:找不到適用的 L3 代理程式。"
-
-#, python-format
-msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s."
-msgstr "無法將路由器 %(router_id)s 排程到 L3 代理程式 %(agent_id)s。"
-
-#, python-format
-msgid ""
-"Failed to allocate a VRID in the network %(network_id)s for the router "
-"%(router_id)s after %(max_tries)s tries."
-msgstr ""
-"在嘗試 %(max_tries)s 次之後,無法為路由器 %(router_id)s配置網路 "
-"%(network_id)s 中的 VRID。"
-
-#, python-format
-msgid ""
-"Failed to create port on network %(network_id)s, because fixed_ips included "
-"invalid subnet %(subnet_id)s"
-msgstr ""
-"無法在網路 %(network_id)s 上建立埠,因為 fixed_ips 包含無效的子網路 "
-"%(subnet_id)s"
-
-#, python-format
-msgid "Failed to parse request. Parameter '%s' not specified"
-msgstr "無法剖析要求。未指定參數 '%s'"
-
-#, python-format
-msgid "Failed to parse request. Required attribute '%s' not specified"
-msgstr "無法剖析要求。未指定必要屬性 '%s'"
-
-msgid "Failed to remove supplemental groups"
-msgstr "無法移除增補群組"
-
-#, python-format
-msgid "Failed to set gid %s"
-msgstr "無法設定 GID %s"
-
-#, python-format
-msgid "Failed to set uid %s"
-msgstr "無法設定 UID %s"
-
-#, python-format
-msgid "Failed to set-up %(type)s tunnel port to %(ip)s"
-msgstr "無法將 %(type)s 通道埠設為 %(ip)s"
-
-#, python-format
-msgid "Floating IP %(floatingip_id)s could not be found"
-msgstr "找不到浮動 IP %(floatingip_id)s"
-
-msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max"
-msgstr "對於 TCP/UDP 通訊協定,port_range_min 必須 <= port_range_max"
-
-msgid "Force ip_lib calls to use the root helper"
-msgstr "強制 ip_lib 呼叫使用根說明程式"
-
-#, python-format
-msgid ""
-"Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet "
-"%(subnet_cidr)s."
-msgstr ""
-"發現以下子網路的配置儲存區 %(pool_1)s %(pool_2)s重疊:%(subnet_cidr)s。"
-
-#, python-format
-msgid ""
-"Gateway cannot be updated for router %(router_id)s, since a gateway to "
-"external network %(net_id)s is required by one or more floating IPs."
-msgstr ""
-"無法更新路由器 %(router_id)s 的閘道,因為一個以上的浮動 IP 需要外部網路 "
-"%(net_id)s 的閘道。"
-
-msgid "Gateway is not valid on subnet"
-msgstr "閘道在子網路上無效"
-
-msgid "Group (gid or name) running metadata proxy after its initialization"
-msgstr "在 meta 資料 Proxy 起始設定之後執行該 Proxy 的群組(GID 或名稱)"
-
-msgid ""
-"Group (gid or name) running metadata proxy after its initialization (if "
-"empty: agent effective group)."
-msgstr ""
-"在 meta 資料 Proxy 起始設定之後執行該 Proxy 的群組(GID 或名稱)(如果為空:"
-"則為代理程式有效群組)。"
-
-msgid "Group (gid or name) running this process after its initialization"
-msgstr "在此程序起始設定之後執行此程序的群組(GID 或名稱)"
-
-msgid "How many times Neutron will retry MAC generation"
-msgstr "Neutron 將重試 MAC 產生作業的次數"
-
-#, python-format
-msgid ""
-"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-"
-"min) is missing."
-msgstr ""
-"提供了 ICMP 代碼 (port-range-max) %(value)s,但遺漏了 ICMP 類型(port-range-"
-"min)。"
-
-msgid "ID of network"
-msgstr "網路的 ID"
-
-msgid "ID of network to probe"
-msgstr "要探測的網路 ID"
-
-msgid "ID of probe port to delete"
-msgstr "要刪除的探針埠 ID"
-
-msgid "ID of probe port to execute command"
-msgstr "要執行指令的探針埠 ID"
-
-msgid "ID of the router"
-msgstr "路由器 ID"
-
-#, python-format
-msgid ""
-"IP address %(ip_address)s is not a valid IP for any of the subnets on the "
-"specified network."
-msgstr "IP 位址 %(ip_address)s 不是所指定網路上任何子網路的有效 IP。"
-
-#, python-format
-msgid "IP address %(ip_address)s is not a valid IP for the specified subnet."
-msgstr "IP 位址 %(ip_address)s 不是所指定子網路的有效 IP。"
-
-msgid "IP address used by Nova metadata server."
-msgstr "Nova meta 資料伺服器所使用的 IP 位址。"
-
-msgid "IP allocation requires subnet_id or ip_address"
-msgstr "IP 配置需要 subnet_id 或 ip_address"
-
-#, python-format
-msgid ""
-"IPTablesManager.apply failed to apply the following set of iptables rules:\n"
-"%s"
-msgstr ""
-"IPTablesManager.apply 無法套用下列 iptables 規則集:\n"
-"%s"
-
-#, python-format
-msgid ""
-"IPv6 address %(address)s can not be directly assigned to a port on subnet "
-"%(id)s since the subnet is configured for automatic addresses"
-msgstr ""
-"無法直接將 IPv6 位址 %(address)s 指派給子網路 %(id)s 上的埠,因為該子網路配置"
-"為用於自動位址"
-
-#, python-format
-msgid ""
-"IPv6 subnet %s configured to receive RAs from an external router cannot be "
-"added to Neutron Router."
-msgstr ""
-"無法將配置為從外部路由器接收 RA 的 IPv6 子網路 %s 新增至 Neutron 路由器。"
-
-msgid ""
-"If True, effort is made to advertise MTU settings to VMs via network methods "
-"(DHCP and RA MTU options) when the network's preferred MTU is known."
-msgstr ""
-"如果為 True,則在網路的偏好 MTU 已知時,會盡量嘗試透過網路方法(DHCP 及 RA "
-"MTU 選項)將 MTU 設定公佈給 VM。"
-
-msgid ""
-"If True, then allow plugins that support it to create VLAN transparent "
-"networks."
-msgstr "如果為 True,則容許支援它的外掛程式建立 VLAN 透通網路。"
-
-msgid "Illegal IP version number"
-msgstr "無效的 IP 版本號碼"
-
-#, python-format
-msgid "Insufficient prefix space to allocate subnet size /%s"
-msgstr "字首空間不足,無法配置子網路大小 /%s"
-
-msgid "Insufficient rights for removing default security group."
-msgstr "權限不足,無法移除預設安全群組。"
-
-msgid "Interface to monitor"
-msgstr "要監視的介面"
-
-msgid ""
-"Interval between checks of child process liveness (seconds), use 0 to disable"
-msgstr "子程序存活檢查之間的間隔(秒),使用 0 以停用"
-
-msgid "Interval between two metering measures"
-msgstr "兩次計量測量之間的間隔"
-
-msgid "Interval between two metering reports"
-msgstr "兩次計量報告之間的間隔"
-
-#, python-format
-msgid ""
-"Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address "
-"format, which requires the prefix to be /64."
-msgstr ""
-"IPv6 位址模式的 CIDR %s 無效。OpenStack 使用 EUI-64 位址格式,其需要字首"
-"為 /64。"
-
-#, python-format
-msgid "Invalid Device %(dev_name)s: %(reason)s"
-msgstr "無效的裝置 %(dev_name)s:%(reason)s"
-
-#, python-format
-msgid ""
-"Invalid authentication type: %(auth_type)s, valid types are: "
-"%(valid_auth_types)s"
-msgstr "無效的鑑別類型:%(auth_type)s,有效的類型為:%(valid_auth_types)s"
-
-#, python-format
-msgid "Invalid data format for IP pool: '%s'"
-msgstr "IP 儲存區的資料格式無效:'%s'"
-
-#, python-format
-msgid "Invalid data format for extra-dhcp-opt: %(data)s"
-msgstr "extra-dhcp-opt 的資料格式無效:%(data)s"
-
-#, python-format
-msgid "Invalid data format for fixed IP: '%s'"
-msgstr "固定 IP 的資料格式無效:'%s'"
-
-#, python-format
-msgid "Invalid data format for hostroute: '%s'"
-msgstr "主機路徑的資料格式無效:'%s'"
-
-#, python-format
-msgid "Invalid data format for nameserver: '%s'"
-msgstr "名稱伺服器的資料格式無效:'%s'"
-
-#, python-format
-msgid "Invalid format for routes: %(routes)s, %(reason)s"
-msgstr "無效的路徑格式:%(routes)s,%(reason)s"
-
-#, python-format
-msgid "Invalid format: %s"
-msgstr "無效的格式:%s"
-
-#, python-format
-msgid "Invalid input for %(attr)s. Reason: %(reason)s."
-msgstr "%(attr)s 的輸入無效。原因:%(reason)s。"
-
-#, python-format
-msgid "Invalid input for operation: %(error_message)s."
-msgstr "作業的輸入無效:%(error_message)s。"
-
-#, python-format
-msgid ""
-"Invalid input. '%(target_dict)s' must be a dictionary with keys: "
-"%(expected_keys)s"
-msgstr ""
-"無效的輸入。'%(target_dict)s' 必須是含有下列索引鍵的字典:%(expected_keys)s"
-
-#, python-format
-msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s"
-msgstr "無效的實例狀態:%(state)s,有效的狀態為:%(valid_states)s"
-
-#, python-format
-msgid "Invalid mapping: '%s'"
-msgstr "無效的對映:'%s'"
-
-#, python-format
-msgid "Invalid pci slot %(pci_slot)s"
-msgstr "無效的 PCI 插槽 %(pci_slot)s"
-
-#, python-format
-msgid "Invalid provider format. Last part should be 'default' or empty: %s"
-msgstr "無效的提供者格式。最後一個部分應該是 'default' 或空白:%s"
-
-#, python-format
-msgid "Invalid route: %s"
-msgstr "無效的路徑:%s"
-
-msgid "Invalid service provider format"
-msgstr "無效的服務提供者格式"
-
-#, python-format
-msgid ""
-"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255."
-msgstr ""
-"ICMP %(field)s (%(attr)s) 的值 %(value)s 無效。該值必須在 0 到255 之間。"
-
-#, python-format
-msgid "Invalid value for port %(port)s"
-msgstr "埠 %(port)s 的值無效"
-
-msgid "Keepalived didn't respawn"
-msgstr "Keepalived 未再次大量產生"
-
-#, python-format
-msgid "Key %(key)s in mapping: '%(mapping)s' not unique"
-msgstr "對映 '%(mapping)s' 中的索引鍵 %(key)s 不是唯一的"
-
-#, python-format
-msgid "Limit must be an integer 0 or greater and not '%d'"
-msgstr "限制值必須是大於或等於 0 的整數,而不是 '%d'"
-
-msgid "Limit number of leases to prevent a denial-of-service."
-msgstr "限制租賃次數以防止阻斷服務攻擊。"
-
-msgid ""
-"List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> "
-"specifying physical_network names usable for VLAN provider and tenant "
-"networks, as well as ranges of VLAN tags on each available for allocation to "
-"tenant networks."
-msgstr ""
-"<physical_network>:<vlan_min>:<vlan_max> 或 <physical_network> 的清單,指定可"
-"用於 VLAN 提供者及承租人網路的 physical_network 名稱,以及在每個可用於配置給"
-"承租人網路的 physical_network 上指定 VLAN標籤範圍。"
-
-msgid ""
-"List of network type driver entrypoints to be loaded from the neutron.ml2."
-"type_drivers namespace."
-msgstr ""
-"要從 neutron.ml2.type_drivers 名稱空間中載入的網路類型驅動程式進入點清單。"
-
-msgid "Local IP address of the VXLAN endpoints."
-msgstr "VXLAN 端點的本端 IP 位址。"
-
-msgid "Local IP address of tunnel endpoint."
-msgstr "通道端點的本端 IP 位址。"
-
-msgid "Location for Metadata Proxy UNIX domain socket."
-msgstr "meta 資料 Proxy UNIX 網域 Socket 的位置"
-
-msgid "Location of Metadata Proxy UNIX domain socket"
-msgstr "meta 資料 Proxy UNIX 網域 Socket 的位置"
-
-msgid "Location of pid file of this process."
-msgstr "此程序的 PID 檔位置。"
-
-msgid "Location to store DHCP server config files"
-msgstr "DHCP 伺服器配置檔的儲存位置"
-
-msgid "Location to store IPv6 RA config files"
-msgstr "用於儲存 IPv6 RA 配置檔的位置"
-
-msgid "Location to store child pid files"
-msgstr "子項 PID 檔案的儲存位置"
-
-msgid "Location to store keepalived/conntrackd config files"
-msgstr "用於儲存 keepalived/conntrackd 配置檔的位置"
-
-msgid "MTU setting for device."
-msgstr "裝置的 MTU 設定。"
-
-msgid "MTU size of veth interfaces"
-msgstr "veth 介面的 MTU 大小"
-
-msgid "Make the l2 agent run in DVR mode."
-msgstr "讓 L2 代理程式在 DVR 模式下執行。"
-
-msgid "Malformed request body"
-msgstr "要求內文的格式不正確"
-
-msgid "Maximum number of allowed address pairs"
-msgstr "所容許的位址配對數目上限"
-
-msgid "Maximum number of host routes per subnet"
-msgstr "每個子網路的主機路徑數目上限"
-
-msgid "Metering driver"
-msgstr "計量驅動程式"
-
-#, python-format
-msgid "Metering label %(label_id)s does not exist"
-msgstr "計量標籤 %(label_id)s 不存在"
-
-#, python-format
-msgid "Metering label rule %(rule_id)s does not exist"
-msgstr "計量標籤規則 %(rule_id)s 不存在"
-
-#, python-format
-msgid ""
-"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps "
-"another"
-msgstr ""
-"計量標籤規則 (remote_ip_prefix = %(remote_ip_prefix)s),與另一個計量標籤規則"
-"重疊"
-
-msgid "Minimize polling by monitoring ovsdb for interface changes."
-msgstr "透過監視 OVSDB 是否有介面變更,將輪詢減至最少。"
-
-#, python-format
-msgid "Missing key in mapping: '%s'"
-msgstr "對映中遺漏了索引鍵:'%s'"
-
-#, python-format
-msgid "Missing value in mapping: '%s'"
-msgstr "對映中遺漏了值:'%s'"
-
-#, python-format
-msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found"
-msgstr "找到多個 agent_type = %(agent_type)s 且主機 = %(host)s 的代理程式"
-
-#, python-format
-msgid "Multiple default providers for service %s"
-msgstr "服務 %s 的多個預設提供者"
-
-#, python-format
-msgid "Multiple plugins for service %s were configured"
-msgstr "已給服務 %s 配置多個外掛程式"
-
-#, python-format
-msgid "Multiple providers specified for service %s"
-msgstr "給服務 %s 指定了多個提供者"
-
-msgid "Multiple tenant_ids in bulk security group rule create not allowed"
-msgstr "不容許主體安全群組規則建立作業中存在多個 tenant_id"
-
-msgid "Must also specifiy protocol if port range is given."
-msgstr "如果給定埠範圍,則也必須指定通訊協定。"
-
-msgid "Must specify one or more actions on flow addition or modification"
-msgstr "必須對流程新增作業或修改作業指定一個以上的動作"
-
-#, python-format
-msgid ""
-"Name '%s' must be 1-63 characters long, each of which can only be "
-"alphanumeric or a hyphen."
-msgstr "名稱 '%s' 的長度必須為 1-63 個字元,每個字元只能是英數字元或連字號。"
-
-#, python-format
-msgid "Name '%s' must not start or end with a hyphen."
-msgstr "名稱 '%s' 不得以連字號開頭或結尾。"
-
-msgid "Name of Open vSwitch bridge to use"
-msgstr "要使用的 Open vSwitch 橋接器名稱"
-
-msgid ""
-"Name of nova region to use. Useful if keystone manages more than one region."
-msgstr "要使用的 Nova 區域名稱。如果 Keystone 管理多個區域,則很有用。"
-
-msgid "Name of the FWaaS Driver"
-msgstr "FWaaS 驅動程式的名稱"
-
-msgid "Namespace of the router"
-msgstr "路由器名稱空間"
-
-msgid "Native pagination depend on native sorting"
-msgstr "原生分頁相依於原生排序"
-
-msgid "Negative delta (downgrade) not supported"
-msgstr "不支援負數差異(降級)"
-
-msgid "Negative relative revision (downgrade) not supported"
-msgstr "不支援負面的相對修訂(降級)"
-
-#, python-format
-msgid "Network %s is not a valid external network"
-msgstr "網路 %s 不是有效的外部網路"
-
-#, python-format
-msgid "Network %s is not an external network"
-msgstr "網路 %s 不是外部網路"
-
-#, python-format
-msgid ""
-"Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges "
-"%(excluded_ranges)s was not found."
-msgstr ""
-"在 IP 範圍 %(parent_range)s(排除 IP 範圍 %(excluded_ranges)s)中找不到大小"
-"為 %(size)s 的網路。"
-
-msgid "Network that will have instance metadata proxied."
-msgstr "其實例 meta 資料將被代理的網路。"
-
-#, python-format
-msgid "Network type value '%s' not supported"
-msgstr "不支援網路類型值 '%s'"
-
-msgid "Network type value needed by the ML2 plugin"
-msgstr "ML2 外掛程式需要的網路類型值"
-
-msgid "Network types supported by the agent (gre and/or vxlan)."
-msgstr "代理程式支援的網路類型(GRE 及/或 VXLAN)。"
-
-msgid "Neutron Service Type Management"
-msgstr "Neutron 服務類型管理"
-
-msgid "Neutron core_plugin not configured!"
-msgstr "未配置 Neutron core_plugin!"
-
-msgid "Neutron plugin provider module"
-msgstr "Neutron 外掛程式提供者模組"
-
-msgid "Neutron quota driver class"
-msgstr "Neutron 配額驅動程式類別"
-
-#, python-format
-msgid "No eligible l3 agent associated with external network %s found"
-msgstr "找不到與外部網路 %s 相關聯的適用 L3 代理程式"
-
-#, python-format
-msgid "No more IP addresses available on network %(net_id)s."
-msgstr "網路 %(net_id)s 上沒有更多的可用 IP 位址。"
-
-#, python-format
-msgid ""
-"No more Virtual Router Identifier (VRID) available when creating router "
-"%(router_id)s. The limit of number of HA Routers per tenant is 254."
-msgstr ""
-"建立路由器 %(router_id)s 時,沒有其他「虛擬路由器 ID (VRID)」可用。每個承租人"
-"的 HA 路由器數目限制為 254 個。"
-
-#, python-format
-msgid "No providers specified for '%s' service, exiting"
-msgstr "未給 '%s' 服務指定提供者,正在結束"
-
-#, python-format
-msgid ""
-"Not allowed to manually assign a %(router_type)s router %(router_id)s from "
-"an existing DVR node to another L3 agent %(agent_id)s."
-msgstr ""
-"不容許手動將 %(router_type)s 路由器 %(router_id)s 從現有 DVR 節點指派給另一"
-"個 L3 代理程式 %(agent_id)s。"
-
-msgid "Not authorized."
-msgstr "未獲授權。"
-
-#, python-format
-msgid ""
-"Not enough l3 agents available to ensure HA. Minimum required "
-"%(min_agents)s, available %(num_agents)s."
-msgstr ""
-"沒有足夠的 L3 代理程式可用,無法確保 HA。所需的數目下限為%(min_agents)s,可用"
-"數目為 %(num_agents)s。"
-
-msgid "Number of RPC worker processes for service"
-msgstr "服務的 RPC 工作者處理程序數目"
-
-msgid "Number of backlog requests to configure the metadata server socket with"
-msgstr "要配置給 meta 資料伺服器 Socket 的待辦事項要求數目"
-
-msgid "Number of backlog requests to configure the socket with"
-msgstr "要配置給 Socket 的待辦事項要求數目"
-
-msgid ""
-"Number of floating IPs allowed per tenant. A negative value means unlimited."
-msgstr "每個承租人所容許的浮動 IP 數目。負數值表示無限制。"
-
-msgid ""
-"Number of networks allowed per tenant. A negative value means unlimited."
-msgstr "每個承租人所容許的網路數目。負數值表示無限制。"
-
-msgid "Number of ports allowed per tenant. A negative value means unlimited."
-msgstr "每個承租人所容許的埠數目。負數值表示無限制。"
-
-msgid "Number of routers allowed per tenant. A negative value means unlimited."
-msgstr "每個承租人所容許的路由器數目。負數值表示無限制。"
-
-msgid ""
-"Number of seconds between sending events to nova if there are any events to "
-"send."
-msgstr "兩次將事件傳送至 Nova 之間的秒數(如果有任何事件要傳送)。"
-
-msgid "Number of seconds to keep retrying to listen"
-msgstr "不斷重試接聽的秒數"
-
-msgid ""
-"Number of security groups allowed per tenant. A negative value means "
-"unlimited."
-msgstr "每個承租人所容許的安全群組數目。負數值表示無限制。"
-
-msgid ""
-"Number of security rules allowed per tenant. A negative value means "
-"unlimited."
-msgstr "每個承租人所容許的安全規則數目。負數值表示無限制。"
-
-msgid "Number of subnets allowed per tenant, A negative value means unlimited."
-msgstr "每個承租人所容許的子網路數目。負數值表示無限制。"
-
-msgid "Only admin can view or configure quota"
-msgstr "只有管理者才能檢視或配置配額"
-
-msgid "Only admin is authorized to access quotas for another tenant"
-msgstr "只有管理者才獲授權來存取另一個承租人的配額"
-
-msgid "Only allowed to update rules for one security profile at a time"
-msgstr "一次只容許更新一個安全設定檔的規則"
-
-msgid "Only remote_ip_prefix or remote_group_id may be provided."
-msgstr "只能提供 remote_ip_prefix 或 remote_group_id。"
-
-#, python-format
-msgid ""
-"Operation %(op)s is not supported for device_owner %(device_owner)s on port "
-"%(port_id)s."
-msgstr "埠 %(port_id)s 上的裝置擁有者 %(device_owner)s 不支援作業 %(op)s。"
-
-msgid "Override the default dnsmasq settings with this file"
-msgstr "使用此檔案來置換預設 dnsmasq 設定"
-
-msgid "Owner type of the device: network/compute"
-msgstr "裝置的擁有者類型:網路/計算"
-
-msgid "POST requests are not supported on this resource."
-msgstr "此資源上不支援 POST 要求。"
-
-#, python-format
-msgid "Parsing bridge_mappings failed: %s."
-msgstr "剖析 bridge_mappings 時失敗:%s。"
-
-msgid "Parsing supported pci_vendor_devs failed"
-msgstr "剖析受支援的 pci_vendor_devs 失敗"
-
-msgid "Path to PID file for this process"
-msgstr "用於此程序的 PID 檔案路徑"
-
-msgid "Path to the router directory"
-msgstr "路由器目錄的路徑"
-
-msgid "Peer patch port in integration bridge for tunnel bridge."
-msgstr "整合橋接器中用於通道橋接器的同層級修補程式埠。"
-
-msgid "Peer patch port in tunnel bridge for integration bridge."
-msgstr "通道橋接器中用於整合橋接器的同層級修補程式埠。"
-
-msgid "Ping timeout"
-msgstr "連通測試逾時值"
-
-msgid "Plugin does not support updating provider attributes"
-msgstr "外掛程式不支援更新提供者屬性"
-
-#, python-format
-msgid "Port %(id)s does not have fixed ip %(address)s"
-msgstr "埠 %(id)s 沒有固定 IP %(address)s"
-
-#, python-format
-msgid ""
-"Port %(port_id)s is associated with a different tenant than Floating IP "
-"%(floatingip_id)s and therefore cannot be bound."
-msgstr ""
-"埠 %(port_id)s 已與浮動 IP %(floatingip_id)s 之外的 Tenant 產生關聯,因此無法"
-"連結。"
-
-msgid ""
-"Port Security must be enabled in order to have allowed address pairs on a "
-"port."
-msgstr "必須啟用埠安全,才能在埠上使用位址配對。"
-
-msgid "Port does not have port security binding."
-msgstr "埠沒有埠安全連結。"
-
-msgid ""
-"Port has security group associated. Cannot disable port security or ip "
-"address until security group is removed"
-msgstr "埠已與安全群組產生關聯。無法停用埠安全或 IP 位址,除非將安全群組移除"
-
-msgid ""
-"Port security must be enabled and port must have an IP address in order to "
-"use security groups."
-msgstr "埠安全必須加以啟用,而且埠必須具有 IP 位址,才能使用安全群組。"
-
-msgid "Private key of client certificate."
-msgstr "用戶端憑證的私密金鑰。"
-
-#, python-format
-msgid "Probe %s deleted"
-msgstr "已刪除探針 %s"
-
-#, python-format
-msgid "Probe created : %s "
-msgstr "已建立探針:%s "
-
-msgid "Process is already started"
-msgstr "程序已啟動"
-
-msgid "Process is not running."
-msgstr "程序不在執行中。"
-
-msgid "Protocol to access nova metadata, http or https"
-msgstr "用於存取 Nova meta 資料的通訊協定:HTTP 或 HTTPS"
-
-msgid ""
-"Range of seconds to randomly delay when starting the periodic task scheduler "
-"to reduce stampeding. (Disable by setting to 0)"
-msgstr ""
-"啟動定期作業排定器以減少大混亂的隨機延遲秒數範圍。(如果要停用,則設為 0)"
-
-msgid "Remote metadata server experienced an internal server error."
-msgstr "遠端 meta 資料伺服器發生內部伺服器錯誤。"
-
-msgid ""
-"Representing the resource type whose load is being reported by the agent. "
-"This can be \"networks\", \"subnets\" or \"ports\". When specified (Default "
-"is networks), the server will extract particular load sent as part of its "
-"agent configuration object from the agent report state, which is the number "
-"of resources being consumed, at every report_interval.dhcp_load_type can be "
-"used in combination with network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is "
-"WeightScheduler, dhcp_load_type can be configured to represent the choice "
-"for the resource being balanced. Example: dhcp_load_type=networks"
-msgstr ""
-"代表將由代理程式報告其負載的資源類型。它可以為「網路」、「子網路」或「埠」。"
-"指定時(預設值為網路),伺服器將從代理程式報告狀態(為所耗用的資源數目)擷取"
-"作為其代理程式配置物件一部分傳送的特定負載,擷取間隔為 report_interval."
-"dhcp_load_type 可以與network_scheduler_driver = neutron.scheduler."
-"dhcp_agent_scheduler.WeightScheduler 組合使用。當 network_scheduler_driver "
-"為 WeightScheduler 時,可以將 dhcp_load_type配置為代表您選擇要進行平衡的資"
-"源。範例:dhcp_load_type=網路"
-
-msgid "Request Failed: internal server error while processing your request."
-msgstr "要求失敗:處理要求時發生內部伺服器錯誤。"
-
-#, python-format
-msgid ""
-"Request contains duplicate address pair: mac_address %(mac_address)s "
-"ip_address %(ip_address)s."
-msgstr ""
-"要求包含重複的位址配對:mac_address %(mac_address)sip_address "
-"%(ip_address)s。"
-
-#, python-format
-msgid ""
-"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps "
-"with another subnet"
-msgstr "所要求的網路 %(network_id)s 子網路 (CIDR %(cidr)s) 與另一個子網路重疊"
-
-#, python-format
-msgid ""
-"Resource '%(resource_id)s' is already associated with provider "
-"'%(provider)s' for service type '%(service_type)s'"
-msgstr ""
-"資源 '%(resource_id)s' 已與服務類型 '%(service_type)s' 的提供者 "
-"'%(provider)s' 產生關聯"
-
-msgid "Resource body required"
-msgstr "需要資源主體"
-
-msgid "Resource not found."
-msgstr "找不到資源。"
-
-msgid "Resources required"
-msgstr "需要資源"
-
-msgid "Root helper daemon application to use when possible."
-msgstr "可能時要使用的根說明程式常駐程式應用程式。"
-
-msgid "Root permissions are required to drop privileges."
-msgstr "需要 root 權限才能捨棄專用權。"
-
-#, python-format
-msgid "Router %(router_id)s %(reason)s"
-msgstr "路由器 %(router_id)s %(reason)s"
-
-#, python-format
-msgid "Router %(router_id)s could not be found"
-msgstr "找不到路由器 %(router_id)s"
-
-#, python-format
-msgid "Router %(router_id)s does not have an interface with id %(port_id)s"
-msgstr "路由器 %(router_id)s 沒有 ID 為 %(port_id)s 的介面"
-
-#, python-format
-msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s"
-msgstr "路由器 %(router_id)s 在子網路 %(subnet_id)s 上沒有介面"
-
-#, python-format
-msgid "Router already has a port on subnet %s"
-msgstr "路由器在子網路 %s 上已經有埠"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more floating IPs."
-msgstr ""
-"路由器 %(router_id)s 上子網路 %(subnet_id)s 的路由器介面無法刪除,因為一個以"
-"上的浮動 IP 需要該介面。"
-
-#, python-format
-msgid ""
-"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be "
-"deleted, as it is required by one or more routes."
-msgstr ""
-"路由器 %(router_id)s 上子網路 %(subnet_id)s 的路由器介面無法刪除,因為一個以"
-"上的路徑需要該介面。"
-
-msgid "Router that will have connected instances' metadata proxied."
-msgstr "其已連接的實例 meta 資料將被代理的路由器。"
-
-msgid "Run as daemon."
-msgstr "作為常駐程式來執行。"
-
-msgid ""
-"Seconds between nodes reporting state to server; should be less than "
-"agent_down_time, best if it is half or less than agent_down_time."
-msgstr ""
-"兩個節點將狀態報告給伺服器的間隔秒數;應該小於 agent_down_time;如果是 "
-"agent_down_time 的一半或者小於 agent_down_time,則最佳。"
-
-msgid "Seconds between running periodic tasks"
-msgstr "執行定期作業的間隔秒數"
-
-msgid ""
-"Seconds to regard the agent is down; should be at least twice "
-"report_interval, to be sure the agent is down for good."
-msgstr ""
-"將代理程式視為已關閉的秒數;應該至少是report_interval 的兩倍,以確保代理程式"
-"已永久關閉。"
-
-#, python-format
-msgid "Security group %(id)s does not exist"
-msgstr "安全群組 %(id)s 不存在"
-
-#, python-format
-msgid "Security group rule %(id)s does not exist"
-msgstr "安全群組規則 %(id)s 不存在"
-
-#, python-format
-msgid "Security group rule already exists. Rule id is %(id)s."
-msgstr "安全群組規則已經存在。規則 ID 為 %(id)s。"
-
-msgid "Segments and provider values cannot both be set."
-msgstr "無法同時設定區段及提供者值。"
-
-msgid ""
-"Send notification to nova when port data (fixed_ips/floatingip) changes so "
-"nova can update its cache."
-msgstr ""
-"埠資料 (fixed_ips/floatingip) 變更時,將通知傳送至 Nova,以便 Nova 可以更新其"
-"快取。"
-
-msgid "Send notification to nova when port status changes"
-msgstr "埠狀態變更時,將通知傳送至 Nova"
-
-msgid ""
-"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the "
-"feature is disabled"
-msgstr ""
-"給這個項目傳送了用於高可用性設定的許多無償 ARP,如果小於或等於 0,則會停用該"
-"功能"
-
-#, python-format
-msgid ""
-"Service provider '%(provider)s' could not be found for service type "
-"%(service_type)s"
-msgstr "找不到服務類型 %(service_type)s 的服務提供者 '%(provider)s'"
-
-#, python-format
-msgid "Service type %(service_type)s does not have a default service provider"
-msgstr "服務類型 %(service_type)s 不具有預設服務提供者"
-
-msgid ""
-"Set new timeout in seconds for new rpc calls after agent receives SIGTERM. "
-"If value is set to 0, rpc timeout won't be changed"
-msgstr ""
-"在代理程式接收 SIGTERM 之後為新 RPC 呼叫設定新逾時(以秒為單位)。如果值設定"
-"為 0,RPC 逾時將不會變更"
-
-msgid ""
-"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/"
-"VXLAN tunnel."
-msgstr ""
-"在帶有 GRE/VXLAN 通道的送出 IP 封包上,設定或取消設定「不劃分片段 (DF)」位"
-"元。"
-
-#, python-format
-msgid ""
-"Some tenants have more than one security group named 'default': "
-"%(duplicates)s. All duplicate 'default' security groups must be resolved "
-"before upgrading the database."
-msgstr ""
-"部分承租人具有多個名稱為 'default' 的安全群組:%(duplicates)s。必須先解決所有"
-"重複的 'default' 安全群組,才能升級資料庫。"
-
-msgid ""
-"Specifying 'tenant_id' other than authenticated tenant in request requires "
-"admin privileges"
-msgstr "在要求中指定已鑑別 Tenant 之外的 'tenant_id' 時需要管理者專用權"
-
-msgid "Subnet for router interface must have a gateway IP"
-msgstr "路由器介面的子網路必須具有閘道 IP"
-
-msgid "Subnet pool has existing allocations"
-msgstr "子網路儲存區具有現有的配置"
-
-msgid "Subnet used for the l3 HA admin network."
-msgstr "用於 l3 HA 管理網路的子網路。"
-
-msgid ""
-"System-wide flag to determine the type of router that tenants can create. "
-"Only admin can override."
-msgstr "此系統層面旗標用來決定承租人可以建立的路由器類型。只有管理者才能置換。"
-
-msgid "TCP Port to listen for metadata server requests."
-msgstr "用於接聽 meta 資料伺服器要求的 TCP 埠。"
-
-msgid "TCP Port used by Neutron metadata namespace proxy."
-msgstr "Neutron meta 資料名稱空間 Proxy 所使用的 TCP 埠。"
-
-msgid "TCP Port used by Nova metadata server."
-msgstr "Nova meta 資料伺服器所使用的 TCP 埠。"
-
-#, python-format
-msgid "TLD '%s' must not be all numeric"
-msgstr "TLD '%s' 不得全為數值"
-
-msgid "TOS for vxlan interface protocol packets."
-msgstr "VXLAN 介面通訊協定封包的 TOS。"
-
-msgid "TTL for vxlan interface protocol packets."
-msgstr "VXLAN 介面通訊協定封包的 TTL。"
-
-#, python-format
-msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network"
-msgstr "Tenant %(tenant_id)s 不可在此網路上建立 %(resource)s"
-
-msgid "Tenant network creation is not enabled."
-msgstr "未啟用 Tenant 網路建立作業。"
-
-msgid ""
-"The 'gateway_external_network_id' option must be configured for this agent "
-"as Neutron has more than one external network."
-msgstr ""
-"必須為此代理程式配置 'gateway_external_network_id' 選項,因為 Neutron 具有多"
-"個外部網路。"
-
-#, python-format
-msgid ""
-"The HA Network CIDR specified in the configuration file isn't valid; "
-"%(cidr)s."
-msgstr "配置檔中指定的「HA 網路 CIDR」無效:%(cidr)s。"
-
-msgid "The UDP port to use for VXLAN tunnels."
-msgstr "要用於 VXLAN 通道的 UDP 埠。"
-
-msgid "The advertisement interval in seconds"
-msgstr "廣告間隔(以秒為單位)"
-
-#, python-format
-msgid "The allocation pool %(pool)s is not valid."
-msgstr "配置儲存區 %(pool)s 無效。"
-
-#, python-format
-msgid ""
-"The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s."
-msgstr "配置儲存區 %(pool)s 跨越了子網路 CIDR %(subnet_cidr)s。"
-
-#, python-format
-msgid ""
-"The attribute '%(attr)s' is reference to other resource, can't used by sort "
-"'%(resource)s'"
-msgstr "屬性 '%(attr)s' 是對其他資源的參照,無法由排序 '%(resource)s' 使用"
-
-msgid "The core plugin Neutron will use"
-msgstr "Neutron 將使用的核心外掛程式"
-
-msgid "The driver used to manage the DHCP server."
-msgstr "用於管理 DHCP 伺服器的驅動程式。"
-
-msgid "The driver used to manage the virtual interface."
-msgstr "用於管理虛擬介面的驅動程式。"
-
-#, python-format
-msgid ""
-"The following device_id %(device_id)s is not owned by your tenant or matches "
-"another tenants router."
-msgstr ""
-"下列 device_id %(device_id)s 不是由您的承租人所擁有者,或者與另一個承租人路由"
-"器相符。"
-
-msgid "The host IP to bind to"
-msgstr "要連結至的主機 IP"
-
-msgid "The interface for interacting with the OVSDB"
-msgstr "用於與 OVSDB 互動的介面"
-
-msgid ""
-"The maximum number of items returned in a single response, value was "
-"'infinite' or negative integer means no limit"
-msgstr "在單一回應中傳回的項目數上限,值為 'infinite' 或負整數時表示無限制"
-
-#, python-format
-msgid ""
-"The network %(network_id)s has been already hosted by the DHCP Agent "
-"%(agent_id)s."
-msgstr "網路 %(network_id)s 已經由 DHCP 代理程式 %(agent_id)s 管理。"
-
-#, python-format
-msgid ""
-"The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s."
-msgstr "網路 %(network_id)s 不是由 DHCP 代理程式 %(agent_id)s 管理。"
-
-#, python-format
-msgid "The number of allowed address pair exceeds the maximum %(quota)s."
-msgstr "所容許的位址配對數目超過了上限 %(quota)s。"
-
-msgid ""
-"The number of seconds the agent will wait between polling for local device "
-"changes."
-msgstr "輪詢本端裝置變更之間代理程式將等待的秒數。"
-
-msgid ""
-"The number of seconds to wait before respawning the ovsdb monitor after "
-"losing communication with it."
-msgstr "與 OVSDB 監視器的通訊中斷後重新大量產生OVSDB 監視器之前等待的秒數。"
-
-msgid "The number of sort_keys and sort_dirs must be same"
-msgstr "sort_key 數目及 sort_dir 數目必須相同"
-
-#, python-format
-msgid "The port '%s' was deleted"
-msgstr "已刪除埠 '%s'"
-
-msgid "The port to bind to"
-msgstr "要連結至的埠"
-
-#, python-format
-msgid "The requested content type %s is invalid."
-msgstr "所要求的內容類型 %s 無效。"
-
-msgid "The resource could not be found."
-msgstr "找不到資源。"
-
-#, python-format
-msgid ""
-"The router %(router_id)s has been already hosted by the L3 Agent "
-"%(agent_id)s."
-msgstr "路由器 %(router_id)s 已經由 L3 代理程式 %(agent_id)s 管理。"
-
-msgid ""
-"The server has either erred or is incapable of performing the requested "
-"operation."
-msgstr "伺服器發生錯誤,或者無法執行所要求的作業。"
-
-msgid "The service plugins Neutron will use"
-msgstr "Neutron 將使用的服務外掛程式"
-
-msgid "The type of authentication to use"
-msgstr "要使用的鑑別類型"
-
-#, python-format
-msgid "The value '%(value)s' for %(element)s is not valid."
-msgstr "%(element)s 的值 '%(value)s' 無效。"
-
-msgid ""
-"The working mode for the agent. Allowed modes are: 'legacy' - this preserves "
-"the existing behavior where the L3 agent is deployed on a centralized "
-"networking node to provide L3 services like DNAT, and SNAT. Use this mode if "
-"you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality "
-"and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - "
-"this enables centralized SNAT support in conjunction with DVR.  This mode "
-"must be used for an L3 agent running on a centralized node (or in single-"
-"host deployments, e.g. devstack)"
-msgstr ""
-"代理程式的工作中模式。所容許的模式為:「舊式」- 這種模式會將現有行為保留在集"
-"中式網路節點上用於部署L3 代理程式的位置,以提供 L3 服務(例如 DNAT 和 "
-"SNAT)。如果您不想採用 DVR,請使用這種模式。'dvr' - 這種模式會啟用DVR 功能,"
-"並且必須用於在計算主機上執行的 L3 代理程式。'dvr_snat' - 這種模式會啟用集中"
-"式 SNAT 支援以及 DVR。這種模式必須用於在集中式節點上執行(或者在單一主機部屬"
-"中執行,例如 devstack)的 L3 代理程式"
-
-msgid ""
-"True to delete all ports on all the OpenvSwitch bridges. False to delete "
-"ports created by Neutron on integration and external network bridges."
-msgstr ""
-"如果為 True,則刪除所有 OpenvSwitch 橋接器上的所有埠。如果為 False,則刪除"
-"Neutron 在整合及外部網路橋接器上建立的埠。"
-
-msgid "Tunnel IP value needed by the ML2 plugin"
-msgstr "ML2 外掛程式需要的通道 IP 值"
-
-msgid "Tunnel bridge to use."
-msgstr "要使用的通道橋接器。"
-
-msgid "URL to database"
-msgstr "資料庫 URL"
-
-#, python-format
-msgid "Unable to access %s"
-msgstr "無法存取 %s"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(router_id)s. The number of routes exceeds "
-"the maximum %(quota)s."
-msgstr "無法對 %(router_id)s 完成作業。路徑數目超出上限 %(quota)s。"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of DNS "
-"nameservers exceeds the limit %(quota)s."
-msgstr ""
-"無法對 %(subnet_id)s 完成作業。DNS 名稱伺服器的數目超出限制 %(quota)s。"
-
-#, python-format
-msgid ""
-"Unable to complete operation for %(subnet_id)s. The number of host routes "
-"exceeds the limit %(quota)s."
-msgstr "無法對 %(subnet_id)s 完成作業。主機路徑數目超出限制 %(quota)s。"
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The IP address "
-"%(ip_address)s is in use."
-msgstr "無法對網路 %(net_id)s 完成作業。IP 位址 %(ip_address)s 正在使用中。"
-
-#, python-format
-msgid ""
-"Unable to complete operation for network %(net_id)s. The mac address %(mac)s "
-"is in use."
-msgstr "無法對網路 %(net_id)s 完成作業。MAC 位址 %(mac)s 正在使用中。"
-
-#, python-format
-msgid ""
-"Unable to complete operation on network %(net_id)s. There are one or more "
-"ports still in use on the network."
-msgstr "無法對網路 %(net_id)s 完成作業。網路上有一個以上的埠仍在使用中。"
-
-#, python-format
-msgid ""
-"Unable to complete operation on port %(port_id)s for network %(net_id)s. "
-"Port already has an attached device %(device_id)s."
-msgstr ""
-"無法對網路 %(net_id)s 的埠 %(port_id)s 完成作業。埠已連接裝置 %(device_id)s。"
-
-#, python-format
-msgid "Unable to convert value in %s"
-msgstr "無法轉換 %s 中的值"
-
-msgid "Unable to create the Agent Gateway Port"
-msgstr "無法建立「代理程式閘道埠」"
-
-msgid "Unable to create the SNAT Interface Port"
-msgstr "無法建立「SNAT 介面埠」"
-
-#, python-format
-msgid ""
-"Unable to create the flat network. Physical network %(physical_network)s is "
-"in use."
-msgstr "無法建立平面網路。實體網路 %(physical_network)s 正在使用中。"
-
-msgid ""
-"Unable to create the network. No available network found in maximum allowed "
-"attempts."
-msgstr "無法建立網路。在所容許的嘗試次數上限內,找不到可用的網路。"
-
-msgid ""
-"Unable to create the network. No tenant network is available for allocation."
-msgstr "無法建立網路。沒有可用於配置的 Tenant 網路。"
-
-#, python-format
-msgid ""
-"Unable to create the network. The VLAN %(vlan_id)s on physical network "
-"%(physical_network)s is in use."
-msgstr ""
-"無法建立網路。實體網路 %(physical_network)s 上的 VLAN %(vlan_id)s 正在使用"
-"中。"
-
-#, python-format
-msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use."
-msgstr "無法建立網路。通道 ID %(tunnel_id)s 正在使用中。"
-
-#, python-format
-msgid "Unable to determine mac address for %s"
-msgstr "無法判定 %s 的 MAC 位址"
-
-#, python-format
-msgid "Unable to find '%s' in request body"
-msgstr "在要求內文中找不到 '%s'"
-
-#, python-format
-msgid "Unable to find any IP address on external network %(net_id)s."
-msgstr "在外部網路 %(net_id)s 上找不到任何 IP 位址。"
-
-#, python-format
-msgid "Unable to find resource name in %s"
-msgstr "在 %s 中找不到資源名稱"
-
-msgid "Unable to generate IP address by EUI64 for IPv4 prefix"
-msgstr "無法依 EUI-64 針對 IPv4 字首產生 IP 位址"
-
-#, python-format
-msgid "Unable to generate unique DVR mac for host %(host)s."
-msgstr "無法為主機 %(host)s 產生唯一的 DVR MAC。"
-
-#, python-format
-msgid "Unable to generate unique mac on network %(net_id)s."
-msgstr "無法在網路 %(net_id)s 上產生唯一 MAC 位址。"
-
-#, python-format
-msgid ""
-"Unable to identify a target field from:%s. Match should be in the form "
-"%%(<field_name>)s"
-msgstr "無法識別來自 %s 的目標欄位。相符項的格式應該為%%(<field_name>)s"
-
-#, python-format
-msgid ""
-"Unable to verify match:%(match)s as the parent resource: %(res)s was not "
-"found"
-msgstr "無法驗證相符項 %(match)s,因為找不到母項資源 %(res)s"
-
-#, python-format
-msgid "Unexpected response code: %s"
-msgstr "非預期的回應碼:%s"
-
-#, python-format
-msgid "Unexpected response: %s"
-msgstr "非預期的回應:%s"
-
-msgid "Unimplemented commands"
-msgstr "未實作的指令"
-
-msgid "Unknown API version specified"
-msgstr "指定的 API 版本不明"
-
-#, python-format
-msgid "Unknown attribute '%s'."
-msgstr "不明屬性 '%s'。"
-
-#, python-format
-msgid "Unknown chain: %r"
-msgstr "不明鏈:%r"
-
-#, python-format
-msgid "Unknown quota resources %(unknown)s."
-msgstr "不明的配額資源 %(unknown)s。"
-
-msgid "Unmapped error"
-msgstr "「未對映」錯誤"
-
-msgid "Unrecognized action"
-msgstr "無法辨識的動作"
-
-#, python-format
-msgid "Unrecognized attribute(s) '%s'"
-msgstr "無法辨識屬性 '%s'"
-
-msgid "Unsupported Content-Type"
-msgstr "不支援的內容類型"
-
-#, python-format
-msgid "Unsupported network type %(net_type)s."
-msgstr "不支援網路類型 %(net_type)s。"
-
-msgid "Unsupported request type"
-msgstr "不受支援的要求類型"
-
-msgid "Updating default security group not allowed."
-msgstr "不容許更新預設安全群組。"
-
-msgid ""
-"Use ML2 l2population mechanism driver to learn remote MAC and IPs and "
-"improve tunnel scalability."
-msgstr ""
-"使用 ML2 l2population 機制驅動程式,來瞭解遠端 MAC 及 IP 位址,並提升通道可調"
-"整性。"
-
-msgid "Use broadcast in DHCP replies"
-msgstr "在 DHCP 回覆中使用廣播"
-
-msgid "Use either --delta or relative revision, not both"
-msgstr "使用 --delta 或相對修訂,但不要同時使用兩者"
-
-msgid "User (uid or name) running metadata proxy after its initialization"
-msgstr "在 meta 資料 Proxy 起始設定之後執行該 Proxy 的使用者(UID 或名稱)"
-
-msgid ""
-"User (uid or name) running metadata proxy after its initialization (if "
-"empty: agent effective user)."
-msgstr ""
-"在 meta 資料 Proxy 起始設定之後執行該 Proxy 的使用者(UID 或名稱)(如果為"
-"空:則為代理程式有效使用者)。"
-
-msgid "User (uid or name) running this process after its initialization"
-msgstr "在此程序起始設定之後執行此程序的使用者(UID 或名稱)"
-
-msgid "VRRP authentication password"
-msgstr "VRRP 鑑別密碼"
-
-msgid "VRRP authentication type"
-msgstr "VRRP 鑑別類型"
-
-#, python-format
-msgid ""
-"Validation of dictionary's keys failed. Expected keys: %(expected_keys)s "
-"Provided keys: %(provided_keys)s"
-msgstr ""
-"驗證字典索引鍵失敗。預期索引鍵:%(expected_keys)s提供的索引鍵:"
-"%(provided_keys)s"
-
-#, python-format
-msgid "Validator '%s' does not exist."
-msgstr "驗證器 '%s' 不存在。"
-
-#, python-format
-msgid "Value %(value)s in mapping: '%(mapping)s' not unique"
-msgstr "對映 '%(mapping)s' 中的值 %(value)s 不是唯一的"
-
-msgid ""
-"Watch file log. Log watch should be disabled when metadata_proxy_user/group "
-"has no read/write permissions on metadata proxy log file."
-msgstr ""
-"監看日誌檔。當 metadata_proxy_user/group 沒有對meta 資料 Proxy 日誌檔的讀寫許"
-"可權時,應該停用日誌監看。"
-
-msgid ""
-"Where to store Neutron state files. This directory must be writable by the "
-"agent."
-msgstr "Neutron 狀態檔的儲存位置。此目錄必須可以由代理程式寫入。"
-
-msgid ""
-"With IPv6, the network used for the external gateway does not need to have "
-"an associated subnet, since the automatically assigned link-local address "
-"(LLA) can be used. However, an IPv6 gateway address is needed for use as the "
-"next-hop for the default route. If no IPv6 gateway address is configured "
-"here, (and only then) the neutron router will be configured to get its "
-"default route from router advertisements (RAs) from the upstream router; in "
-"which case the upstream router must also be configured to send these RAs. "
-"The ipv6_gateway, when configured, should be the LLA of the interface on the "
-"upstream router. If a next-hop using a global unique address (GUA) is "
-"desired, it needs to be done via a subnet allocated to the network and not "
-"through this parameter. "
-msgstr ""
-"如果使用 IPv6,則用於外部閘道的網路不需要具有相關聯的子網路,因為可以使用自動"
-"指派的鏈結本端位址 (LLA)。但是,IPv6 閘道位址需要用作預設路由的下一個中繼站。"
-"如果未在這裡配置 IPv6 閘道位址,(且僅當那時)則將會配置 Neutron 路由器以從上"
-"游路由器的路由器通告 (RA) 中取得其預設路由;在該情況下,也必須配置上游路由器"
-"以傳送這些 RA。ipv6_gateway(如果已配置)應為上游路由器介面的 LLA。如果需要使"
-"用廣域唯一位址 (GUA) 的下一個中繼站,則需要透過配置給網路的子網路來執行此配"
-"置,而不是透過此參數。"
-
-msgid "You must implement __call__"
-msgstr "必須實作 __call__"
-
-msgid ""
-"You must provide a config file for bridge - either --config-file or "
-"env[NEUTRON_TEST_CONFIG_FILE]"
-msgstr "必須為橋接器提供配置檔:--config-file,或env[NEUTRON_TEST_CONFIG_FILE]"
-
-msgid "You must provide a revision or relative delta"
-msgstr "必須提供修訂或相對差異"
-
-msgid "allocation_pools allowed only for specific subnet requests."
-msgstr "僅容許用於特定子網路要求的 allocation_pools。"
-
-msgid "binding:profile value too large"
-msgstr "binding:profile 值太大"
-
-msgid "cidr and prefixlen must not be supplied together"
-msgstr "不得同時提供 cidr 和 prefixlen"
-
-#, python-format
-msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid."
-msgstr "dhcp_agents_per_network 必須 >= 1。'%s' 無效。"
-
-msgid "fixed_ip_address cannot be specified without a port_id"
-msgstr "如果未指定 port_id,則無法指定 fixed_ip_address"
-
-#, python-format
-msgid "has device owner %s"
-msgstr "具有裝置擁有者 %s"
-
-#, python-format
-msgid "ip command failed on device %(dev_name)s: %(reason)s"
-msgstr "對裝置 %(dev_name)s 執行的 IP 指令失敗:%(reason)s"
-
-#, python-format
-msgid "ip link capability %(capability)s is not supported"
-msgstr "不支援 ip link 功能 %(capability)s"
-
-#, python-format
-msgid "ip link command is not supported: %(reason)s"
-msgstr "不支援 ip link 指令:%(reason)s"
-
-msgid "ip_version must be specified in the absence of cidr and subnetpool_id"
-msgstr "如果未指定 cidr 和 subnetpool_id,則必須指定 ip_version"
-
-msgid "ipv6_address_mode is not valid when ip_version is 4"
-msgstr "當 ip_version 是 4 時,ipv6_address_mode 無效"
-
-msgid "ipv6_ra_mode is not valid when ip_version is 4"
-msgstr "當 ip_version 是 4 時,ipv6_ra_mode 無效"
-
-msgid ""
-"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to "
-"False."
-msgstr ""
-"如果 enable_dhcp 設為 False,則 ipv6_ra_mode 和 ipv6_address_mode都無法進行設"
-"定。"
-
-#, python-format
-msgid ""
-"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to "
-"'%(addr_mode)s' is not valid. If both attributes are set, they must be the "
-"same value"
-msgstr ""
-"如果在 ipv6_address_mode 設為 '%(addr_mode)s' 時將 ipv6_ra_mode 設"
-"為'%(ra_mode)s',則無效。如果兩個屬性同時設定,則它們的值必須相同"
-
-msgid "mac address update"
-msgstr "MAC 位址更新"
-
-#, python-format
-msgid ""
-"max_l3_agents_per_router %(max_agents)s config parameter is not valid. It "
-"has to be greater than or equal to min_l3_agents_per_router %(min_agents)s."
-msgstr ""
-"max_l3_agents_per_router %(max_agents)s 配置參數無效。它必須大於或等於 "
-"min_l3_agents_per_router %(min_agents)s。"
-
-#, python-format
-msgid ""
-"min_l3_agents_per_router config parameter is not valid. It has to be equal "
-"to or more than %s for HA."
-msgstr ""
-"min_l3_agents_per_router 配置參數無效。該配置參數必須等於或大於 HA 的 %s。"
-
-msgid "network_type required"
-msgstr "需要 network_type"
-
-#, python-format
-msgid "network_type value '%s' not supported"
-msgstr "不支援 network_type 值 '%s'"
-
-msgid "new subnet"
-msgstr "新子網路"
-
-#, python-format
-msgid "physical_network '%s' unknown  for VLAN provider network"
-msgstr "VLAN 提供者網路的 physical_network '%s' 不明"
-
-#, python-format
-msgid "physical_network '%s' unknown for flat provider network"
-msgstr "平面提供者網路的 physical_network '%s' 不明"
-
-msgid "physical_network required for flat provider network"
-msgstr "平面提供者網路所需的 physical_network"
-
-#, python-format
-msgid "provider:physical_network specified for %s network"
-msgstr "為 %s 網路指定了 provider:physical_network"
-
-msgid "respawn_interval must be >= 0 if provided."
-msgstr "如果提供的話,則 respawn_interval 必須 >= 0。"
-
-#, python-format
-msgid "segmentation_id out of range (%(min)s through %(max)s)"
-msgstr "segmentation_id 超出範圍(%(min)s 到 %(max)s)"
-
-msgid "segmentation_id requires physical_network for VLAN provider network"
-msgstr "segmentation_id 需要 VLAN 提供者網路的 physical_network"
-
-msgid "the nexthop is not connected with router"
-msgstr "下一個中繼站未與路由器連接"
-
-msgid "the nexthop is used by router"
-msgstr "路由器已使用下一個中繼站"
-
-msgid ""
-"uuid provided from the command line so external_process can track us via /"
-"proc/cmdline interface."
-msgstr ""
-"已從指令行提供了 UUID,因此, external_process 可以透過/proc/cmdline 介面對我"
-"們進行追蹤。"
diff --git a/neutron/manager.py b/neutron/manager.py
deleted file mode 100644 (file)
index fcbf78a..0000000
+++ /dev/null
@@ -1,264 +0,0 @@
-# Copyright 2011 VMware, Inc
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import weakref
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_service import periodic_task
-import six
-
-from neutron._i18n import _, _LI
-from neutron.common import utils
-from neutron.plugins.common import constants
-
-
-LOG = logging.getLogger(__name__)
-
-CORE_PLUGINS_NAMESPACE = 'neutron.core_plugins'
-
-
-class Manager(periodic_task.PeriodicTasks):
-
-    # Set RPC API version to 1.0 by default.
-    target = oslo_messaging.Target(version='1.0')
-
-    def __init__(self, host=None):
-        if not host:
-            host = cfg.CONF.host
-        self.host = host
-        conf = getattr(self, "conf", cfg.CONF)
-        super(Manager, self).__init__(conf)
-
-    def periodic_tasks(self, context, raise_on_error=False):
-        self.run_periodic_tasks(context, raise_on_error=raise_on_error)
-
-    def init_host(self):
-        """Handle initialization if this is a standalone service.
-
-        Child classes should override this method.
-
-        """
-        pass
-
-    def after_start(self):
-        """Handler post initialization stuff.
-
-        Child classes can override this method.
-        """
-        pass
-
-
-def validate_post_plugin_load():
-    """Checks if the configuration variables are valid.
-
-    If the configuration is invalid then the method will return an error
-    message. If all is OK then it will return None.
-    """
-    if ('dhcp_agents_per_network' in cfg.CONF and
-        cfg.CONF.dhcp_agents_per_network <= 0):
-        msg = _("dhcp_agents_per_network must be >= 1. '%s' "
-                "is invalid.") % cfg.CONF.dhcp_agents_per_network
-        return msg
-
-
-def validate_pre_plugin_load():
-    """Checks if the configuration variables are valid.
-
-    If the configuration is invalid then the method will return an error
-    message. If all is OK then it will return None.
-    """
-    if cfg.CONF.core_plugin is None:
-        msg = _('Neutron core_plugin not configured!')
-        return msg
-
-
-class NeutronManager(object):
-    """Neutron's Manager class.
-
-    Neutron's Manager class is responsible for parsing a config file and
-    instantiating the correct plugin that concretely implements
-    neutron_plugin_base class.
-    The caller should make sure that NeutronManager is a singleton.
-    """
-    _instance = None
-
-    def __init__(self, options=None, config_file=None):
-        # If no options have been provided, create an empty dict
-        if not options:
-            options = {}
-
-        msg = validate_pre_plugin_load()
-        if msg:
-            LOG.critical(msg)
-            raise Exception(msg)
-
-        # NOTE(jkoelker) Testing for the subclass with the __subclasshook__
-        #                breaks tach monitoring. It has been removed
-        #                intentionally to allow v2 plugins to be monitored
-        #                for performance metrics.
-        plugin_provider = cfg.CONF.core_plugin
-        LOG.info(_LI("Loading core plugin: %s"), plugin_provider)
-        self.plugin = self._get_plugin_instance(CORE_PLUGINS_NAMESPACE,
-                                                plugin_provider)
-        msg = validate_post_plugin_load()
-        if msg:
-            LOG.critical(msg)
-            raise Exception(msg)
-
-        # core plugin as a part of plugin collection simplifies
-        # checking extensions
-        # TODO(enikanorov): make core plugin the same as
-        # the rest of service plugins
-        self.service_plugins = {constants.CORE: self.plugin}
-        self._load_service_plugins()
-        # Used by pecan WSGI
-        self.resource_plugin_mappings = {}
-        self.resource_controller_mappings = {}
-
-    @staticmethod
-    def load_class_for_provider(namespace, plugin_provider):
-        """Loads plugin using alias or class name
-        :param namespace: namespace where alias is defined
-        :param plugin_provider: plugin alias or class name
-        :returns plugin that is loaded
-        :raises ImportError if fails to load plugin
-        """
-
-        try:
-            return utils.load_class_by_alias_or_classname(namespace,
-                    plugin_provider)
-        except ImportError:
-            raise ImportError(_("Plugin '%s' not found.") % plugin_provider)
-
-    def _get_plugin_instance(self, namespace, plugin_provider):
-        plugin_class = self.load_class_for_provider(namespace, plugin_provider)
-        return plugin_class()
-
-    def _load_services_from_core_plugin(self):
-        """Puts core plugin in service_plugins for supported services."""
-        LOG.debug("Loading services supported by the core plugin")
-
-        # supported service types are derived from supported extensions
-        for ext_alias in getattr(self.plugin,
-                                 "supported_extension_aliases", []):
-            if ext_alias in constants.EXT_TO_SERVICE_MAPPING:
-                service_type = constants.EXT_TO_SERVICE_MAPPING[ext_alias]
-                self.service_plugins[service_type] = self.plugin
-                LOG.info(_LI("Service %s is supported by the core plugin"),
-                         service_type)
-
-    def _load_service_plugins(self):
-        """Loads service plugins.
-
-        Starts from the core plugin and checks if it supports
-        advanced services then loads classes provided in configuration.
-        """
-        # load services from the core plugin first
-        self._load_services_from_core_plugin()
-
-        plugin_providers = cfg.CONF.service_plugins
-        LOG.debug("Loading service plugins: %s", plugin_providers)
-        for provider in plugin_providers:
-            if provider == '':
-                continue
-
-            LOG.info(_LI("Loading Plugin: %s"), provider)
-            plugin_inst = self._get_plugin_instance('neutron.service_plugins',
-                                                    provider)
-
-            # only one implementation of svc_type allowed
-            # specifying more than one plugin
-            # for the same type is a fatal exception
-            if plugin_inst.get_plugin_type() in self.service_plugins:
-                raise ValueError(_("Multiple plugins for service "
-                                   "%s were configured") %
-                                 plugin_inst.get_plugin_type())
-
-            self.service_plugins[plugin_inst.get_plugin_type()] = plugin_inst
-
-            # search for possible agent notifiers declared in service plugin
-            # (needed by agent management extension)
-            if (hasattr(self.plugin, 'agent_notifiers') and
-                    hasattr(plugin_inst, 'agent_notifiers')):
-                self.plugin.agent_notifiers.update(plugin_inst.agent_notifiers)
-
-            LOG.debug("Successfully loaded %(type)s plugin. "
-                      "Description: %(desc)s",
-                      {"type": plugin_inst.get_plugin_type(),
-                       "desc": plugin_inst.get_plugin_description()})
-
-    @classmethod
-    @utils.synchronized("manager")
-    def _create_instance(cls):
-        if not cls.has_instance():
-            cls._instance = cls()
-
-    @classmethod
-    def has_instance(cls):
-        return cls._instance is not None
-
-    @classmethod
-    def clear_instance(cls):
-        cls._instance = None
-
-    @classmethod
-    def get_instance(cls):
-        # double checked locking
-        if not cls.has_instance():
-            cls._create_instance()
-        return cls._instance
-
-    @classmethod
-    def get_plugin(cls):
-        # Return a weakref to minimize gc-preventing references.
-        return weakref.proxy(cls.get_instance().plugin)
-
-    @classmethod
-    def get_service_plugins(cls):
-        # Return weakrefs to minimize gc-preventing references.
-        service_plugins = cls.get_instance().service_plugins
-        return dict((x, weakref.proxy(y))
-                    for x, y in six.iteritems(service_plugins))
-
-    @classmethod
-    def get_unique_service_plugins(cls):
-        service_plugins = cls.get_instance().service_plugins
-        return tuple(weakref.proxy(x) for x in set(service_plugins.values()))
-
-    @classmethod
-    def set_plugin_for_resource(cls, resource, plugin):
-        cls.get_instance().resource_plugin_mappings[resource] = plugin
-
-    @classmethod
-    def get_plugin_for_resource(cls, resource):
-        return cls.get_instance().resource_plugin_mappings.get(resource)
-
-    @classmethod
-    def set_controller_for_resource(cls, resource, controller):
-        cls.get_instance().resource_controller_mappings[resource] = controller
-
-    @classmethod
-    def get_controller_for_resource(cls, resource):
-        return cls.get_instance().resource_controller_mappings.get(resource)
-
-    @classmethod
-    def get_service_plugin_by_path_prefix(cls, path_prefix):
-        service_plugins = cls.get_unique_service_plugins()
-        for service_plugin in service_plugins:
-            plugin_path_prefix = getattr(service_plugin, 'path_prefix', None)
-            if plugin_path_prefix and plugin_path_prefix == path_prefix:
-                return service_plugin
diff --git a/neutron/neutron_plugin_base_v2.py b/neutron/neutron_plugin_base_v2.py
deleted file mode 100644 (file)
index 18d1503..0000000
+++ /dev/null
@@ -1,419 +0,0 @@
-# Copyright 2011 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-v2 Neutron Plug-in API specification.
-
-:class:`NeutronPluginBaseV2` provides the definition of minimum set of
-methods that needs to be implemented by a v2 Neutron Plug-in.
-"""
-
-import abc
-import six
-
-
-@six.add_metaclass(abc.ABCMeta)
-class NeutronPluginBaseV2(object):
-
-    @abc.abstractmethod
-    def create_subnet(self, context, subnet):
-        """Create a subnet.
-
-        Create a subnet, which represents a range of IP addresses
-        that can be allocated to devices
-
-        :param context: neutron api request context
-        :param subnet: dictionary describing the subnet, with keys
-                       as listed in the  :obj:`RESOURCE_ATTRIBUTE_MAP` object
-                       in :file:`neutron/api/v2/attributes.py`.  All keys will
-                       be populated.
-        """
-        pass
-
-    @abc.abstractmethod
-    def update_subnet(self, context, id, subnet):
-        """Update values of a subnet.
-
-        :param context: neutron api request context
-        :param id: UUID representing the subnet to update.
-        :param subnet: dictionary with keys indicating fields to update.
-                       valid keys are those that have a value of True for
-                       'allow_put' as listed in the
-                       :obj:`RESOURCE_ATTRIBUTE_MAP` object in
-                       :file:`neutron/api/v2/attributes.py`.
-        """
-        pass
-
-    @abc.abstractmethod
-    def get_subnet(self, context, id, fields=None):
-        """Retrieve a subnet.
-
-        :param context: neutron api request context
-        :param id: UUID representing the subnet to fetch.
-        :param fields: a list of strings that are valid keys in a
-                       subnet dictionary as listed in the
-                       :obj:`RESOURCE_ATTRIBUTE_MAP` object in
-                       :file:`neutron/api/v2/attributes.py`. Only these fields
-                       will be returned.
-        """
-        pass
-
-    @abc.abstractmethod
-    def get_subnets(self, context, filters=None, fields=None,
-                    sorts=None, limit=None, marker=None, page_reverse=False):
-        """Retrieve a list of subnets.
-
-        The contents of the list depends on
-        the identity of the user making the request (as indicated by the
-        context) as well as any filters.
-
-        :param context: neutron api request context
-        :param filters: a dictionary with keys that are valid keys for
-                        a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
-                        object in :file:`neutron/api/v2/attributes.py`.
-                        Values in this dictionary are an iterable containing
-                        values that will be used for an exact match comparison
-                        for that value.  Each result returned by this
-                        function will have matched one of the values for each
-                        key in filters.
-        :param fields: a list of strings that are valid keys in a
-                       subnet dictionary as listed in the
-                       :obj:`RESOURCE_ATTRIBUTE_MAP` object in
-                       :file:`neutron/api/v2/attributes.py`. Only these fields
-                       will be returned.
-        """
-        pass
-
-    def get_subnets_count(self, context, filters=None):
-        """Return the number of subnets.
-
-        The result depends on the identity of
-        the user making the request (as indicated by the context) as well as
-        any filters.
-
-        :param context: neutron api request context
-        :param filters: a dictionary with keys that are valid keys for
-                        a network as listed in the
-                        :obj:`RESOURCE_ATTRIBUTE_MAP` object in
-                        :file:`neutron/api/v2/attributes.py`.  Values in this
-                        dictionary are an iterable containing values that
-                        will be used for an exact match comparison for that
-                        value.  Each result returned by this function will
-                        have matched one of the values for each key in filters.
-
-        .. note:: this method is optional, as it was not part of the originally
-                  defined plugin API.
-        """
-        raise NotImplementedError()
-
-    @abc.abstractmethod
-    def delete_subnet(self, context, id):
-        """Delete a subnet.
-
-        :param context: neutron api request context
-        :param id: UUID representing the subnet to delete.
-        """
-        pass
-
-    def create_subnetpool(self, context, subnetpool):
-        """Create a subnet pool.
-
-        :param context: neutron api request context
-        :param subnetpool: Dictionary representing the subnetpool to create.
-        """
-        raise NotImplementedError()
-
-    def update_subnetpool(self, context, id, subnetpool):
-        """Update a subnet pool.
-
-        :param context: neutron api request context
-        :param subnetpool: Dictionary representing the subnetpool attributes
-                           to update.
-        """
-        raise NotImplementedError()
-
-    def get_subnetpool(self, context, id, fields=None):
-        """Show a subnet pool.
-
-        :param context: neutron api request context
-        :param id: The UUID of the subnetpool to show.
-        """
-        raise NotImplementedError()
-
-    def get_subnetpools(self, context, filters=None, fields=None,
-                        sorts=None, limit=None, marker=None,
-                        page_reverse=False):
-        """Retrieve list of subnet pools."""
-        raise NotImplementedError()
-
-    def delete_subnetpool(self, context, id):
-        """Delete a subnet pool.
-
-        :param context: neutron api request context
-        :param id: The UUID of the subnet pool to delete.
-        """
-        raise NotImplementedError()
-
-    @abc.abstractmethod
-    def create_network(self, context, network):
-        """Create a network.
-
-        Create a network, which represents an L2 network segment which
-        can have a set of subnets and ports associated with it.
-
-        :param context: neutron api request context
-        :param network: dictionary describing the network, with keys
-                        as listed in the  :obj:`RESOURCE_ATTRIBUTE_MAP` object
-                        in :file:`neutron/api/v2/attributes.py`.  All keys will
-                        be populated.
-
-        """
-        pass
-
-    @abc.abstractmethod
-    def update_network(self, context, id, network):
-        """Update values of a network.
-
-        :param context: neutron api request context
-        :param id: UUID representing the network to update.
-        :param network: dictionary with keys indicating fields to update.
-                        valid keys are those that have a value of True for
-                        'allow_put' as listed in the
-                        :obj:`RESOURCE_ATTRIBUTE_MAP` object in
-                        :file:`neutron/api/v2/attributes.py`.
-        """
-        pass
-
-    @abc.abstractmethod
-    def get_network(self, context, id, fields=None):
-        """Retrieve a network.
-
-        :param context: neutron api request context
-        :param id: UUID representing the network to fetch.
-        :param fields: a list of strings that are valid keys in a
-                       network dictionary as listed in the
-                       :obj:`RESOURCE_ATTRIBUTE_MAP` object in
-                       :file:`neutron/api/v2/attributes.py`. Only these fields
-                       will be returned.
-        """
-        pass
-
-    @abc.abstractmethod
-    def get_networks(self, context, filters=None, fields=None,
-                     sorts=None, limit=None, marker=None, page_reverse=False):
-        """Retrieve a list of networks.
-
-        The contents of the list depends on
-        the identity of the user making the request (as indicated by the
-        context) as well as any filters.
-
-        :param context: neutron api request context
-        :param filters: a dictionary with keys that are valid keys for
-                        a network as listed in the
-                        :obj:`RESOURCE_ATTRIBUTE_MAP` object in
-                        :file:`neutron/api/v2/attributes.py`.  Values in this
-                        dictionary are an iterable containing values that will
-                        be used for an exact match comparison for that value.
-                        Each result returned by this function will have matched
-                        one of the values for each key in filters.
-        :param fields: a list of strings that are valid keys in a
-                       network dictionary as listed in the
-                       :obj:`RESOURCE_ATTRIBUTE_MAP` object in
-                       :file:`neutron/api/v2/attributes.py`. Only these fields
-                       will be returned.
-        """
-        pass
-
-    def get_networks_count(self, context, filters=None):
-        """Return the number of networks.
-
-        The result depends on the identity
-        of the user making the request (as indicated by the context) as well
-        as any filters.
-
-        :param context: neutron api request context
-        :param filters: a dictionary with keys that are valid keys for
-                        a network as listed in the
-                        :obj:`RESOURCE_ATTRIBUTE_MAP` object
-                        in :file:`neutron/api/v2/attributes.py`. Values in
-                        this dictionary are an iterable containing values that
-                        will be used for an exact match comparison for that
-                        value.  Each result returned by this function will have
-                        matched one of the values for each key in filters.
-
-        NOTE: this method is optional, as it was not part of the originally
-              defined plugin API.
-        """
-        raise NotImplementedError()
-
-    @abc.abstractmethod
-    def delete_network(self, context, id):
-        """Delete a network.
-
-        :param context: neutron api request context
-        :param id: UUID representing the network to delete.
-        """
-        pass
-
-    @abc.abstractmethod
-    def create_port(self, context, port):
-        """Create a port.
-
-        Create a port, which is a connection point of a device (e.g., a VM
-        NIC) to attach to a L2 neutron network.
-
-        :param context: neutron api request context
-        :param port: dictionary describing the port, with keys as listed in the
-                     :obj:`RESOURCE_ATTRIBUTE_MAP` object in
-                     :file:`neutron/api/v2/attributes.py`.  All keys will be
-                     populated.
-        """
-        pass
-
-    @abc.abstractmethod
-    def update_port(self, context, id, port):
-        """Update values of a port.
-
-        :param context: neutron api request context
-        :param id: UUID representing the port to update.
-        :param port: dictionary with keys indicating fields to update.
-                     valid keys are those that have a value of True for
-                     'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
-                     object in :file:`neutron/api/v2/attributes.py`.
-        """
-        pass
-
-    @abc.abstractmethod
-    def get_port(self, context, id, fields=None):
-        """Retrieve a port.
-
-        :param context: neutron api request context
-        :param id: UUID representing the port to fetch.
-        :param fields: a list of strings that are valid keys in a port
-                       dictionary as listed in the
-                       :obj:`RESOURCE_ATTRIBUTE_MAP` object in
-                       :file:`neutron/api/v2/attributes.py`. Only these fields
-                       will be returned.
-        """
-        pass
-
-    @abc.abstractmethod
-    def get_ports(self, context, filters=None, fields=None,
-                  sorts=None, limit=None, marker=None, page_reverse=False):
-        """Retrieve a list of ports.
-
-        The contents of the list depends on the identity of the user making
-        the request (as indicated by the context) as well as any filters.
-
-        :param context: neutron api request context
-        :param filters: a dictionary with keys that are valid keys for
-                        a port as listed in the  :obj:`RESOURCE_ATTRIBUTE_MAP`
-                        object in :file:`neutron/api/v2/attributes.py`. Values
-                        in this dictionary are an iterable containing values
-                        that will be used for an exact match comparison for
-                        that value.  Each result returned by this function will
-                        have matched one of the values for each key in filters.
-        :param fields: a list of strings that are valid keys in a
-                       port dictionary as listed in the
-                       :obj:`RESOURCE_ATTRIBUTE_MAP` object in
-                       :file:`neutron/api/v2/attributes.py`. Only these fields
-                       will be returned.
-        """
-        pass
-
-    def get_ports_count(self, context, filters=None):
-        """Return the number of ports.
-
-        The result depends on the identity of the user making the request
-        (as indicated by the context) as well as any filters.
-
-        :param context: neutron api request context
-        :param filters: a dictionary with keys that are valid keys for
-                        a network as listed in the
-                        :obj:`RESOURCE_ATTRIBUTE_MAP` object in
-                        :file:`neutron/api/v2/attributes.py`.  Values in this
-                        dictionary are an iterable containing values that will
-                        be used for an exact match comparison for that value.
-                        Each result returned by this function will have matched
-                        one of the values for each key in filters.
-
-        .. note:: this method is optional, as it was not part of the originally
-                  defined plugin API.
-        """
-        raise NotImplementedError()
-
-    @abc.abstractmethod
-    def delete_port(self, context, id):
-        """Delete a port.
-
-        :param context: neutron api request context
-        :param id: UUID representing the port to delete.
-        """
-        pass
-
-    def start_rpc_listeners(self):
-        """Start the RPC listeners.
-
-        Most plugins start RPC listeners implicitly on initialization.  In
-        order to support multiple process RPC, the plugin needs to expose
-        control over when this is started.
-
-        .. note:: this method is optional, as it was not part of the originally
-                  defined plugin API.
-        """
-        raise NotImplementedError()
-
-    def start_rpc_state_reports_listener(self):
-        """Start the RPC listeners consuming state reports queue.
-
-        This optional method creates rpc consumer for REPORTS queue only.
-
-        .. note:: this method is optional, as it was not part of the originally
-                  defined plugin API.
-        """
-        raise NotImplementedError()
-
-    def rpc_workers_supported(self):
-        """Return whether the plugin supports multiple RPC workers.
-
-        A plugin that supports multiple RPC workers should override the
-        start_rpc_listeners method to ensure that this method returns True and
-        that start_rpc_listeners is called at the appropriate time.
-        Alternately, a plugin can override this method to customize detection
-        of support for multiple rpc workers
-
-        .. note:: this method is optional, as it was not part of the originally
-                  defined plugin API.
-        """
-        return (self.__class__.start_rpc_listeners !=
-                NeutronPluginBaseV2.start_rpc_listeners)
-
-    def rpc_state_report_workers_supported(self):
-        """Return whether the plugin supports state report RPC workers.
-
-        .. note:: this method is optional, as it was not part of the originally
-                  defined plugin API.
-        """
-        return (self.__class__.start_rpc_state_reports_listener !=
-                NeutronPluginBaseV2.start_rpc_state_reports_listener)
-
-    def get_workers(self):
-        """Returns a collection NeutronWorker instances
-
-        If a plugin needs to define worker processes outside of API/RPC workers
-        then it will override this and return a collection of NeutronWorker
-        instances
-        """
-        return ()
diff --git a/neutron/notifiers/__init__.py b/neutron/notifiers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/notifiers/batch_notifier.py b/neutron/notifiers/batch_notifier.py
deleted file mode 100644 (file)
index 0396042..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import eventlet
-
-
-class BatchNotifier(object):
-    def __init__(self, batch_interval, callback):
-        self.pending_events = []
-        self._waiting_to_send = False
-        self.callback = callback
-        self.batch_interval = batch_interval
-
-    def queue_event(self, event):
-        """Called to queue sending an event with the next batch of events.
-
-        Sending events individually, as they occur, has been problematic as it
-        can result in a flood of sends.  Previously, there was a loopingcall
-        thread that would send batched events on a periodic interval.  However,
-        maintaining a persistent thread in the loopingcall was also
-        problematic.
-
-        This replaces the loopingcall with a mechanism that creates a
-        short-lived thread on demand when the first event is queued.  That
-        thread will sleep once for the same batch_duration to allow other
-        events to queue up in pending_events and then will send them when it
-        wakes.
-
-        If a thread is already alive and waiting, this call will simply queue
-        the event and return leaving it up to the thread to send it.
-
-        :param event: the event that occurred.
-        """
-        if not event:
-            return
-
-        self.pending_events.append(event)
-
-        if self._waiting_to_send:
-            return
-
-        self._waiting_to_send = True
-
-        def last_out_sends():
-            eventlet.sleep(self.batch_interval)
-            self._waiting_to_send = False
-            self._notify()
-
-        eventlet.spawn_n(last_out_sends)
-
-    def _notify(self):
-        if not self.pending_events:
-            return
-
-        batched_events = self.pending_events
-        self.pending_events = []
-        self.callback(batched_events)
diff --git a/neutron/notifiers/nova.py b/neutron/notifiers/nova.py
deleted file mode 100644 (file)
index 9abe6bc..0000000
+++ /dev/null
@@ -1,235 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from keystoneauth1 import loading as ks_loading
-from novaclient import client as nova_client
-from novaclient import exceptions as nova_exceptions
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-from sqlalchemy.orm import attributes as sql_attr
-
-from neutron._i18n import _LE, _LI, _LW
-from neutron.common import constants
-from neutron import context
-from neutron import manager
-from neutron.notifiers import batch_notifier
-
-
-LOG = logging.getLogger(__name__)
-
-VIF_UNPLUGGED = 'network-vif-unplugged'
-VIF_PLUGGED = 'network-vif-plugged'
-VIF_DELETED = 'network-vif-deleted'
-NEUTRON_NOVA_EVENT_STATUS_MAP = {constants.PORT_STATUS_ACTIVE: 'completed',
-                                 constants.PORT_STATUS_ERROR: 'failed',
-                                 constants.PORT_STATUS_DOWN: 'completed'}
-NOVA_API_VERSION = "2"
-
-
-class Notifier(object):
-
-    def __init__(self):
-        # FIXME(jamielennox): A notifier is being created for each Controller
-        # and each Notifier is handling it's own auth. That means that we are
-        # authenticating the exact same thing len(controllers) times. This
-        # should be an easy thing to optimize.
-        auth = ks_loading.load_auth_from_conf_options(cfg.CONF, 'nova')
-
-        session = ks_loading.load_session_from_conf_options(
-            cfg.CONF,
-            'nova',
-            auth=auth)
-
-        extensions = [
-            ext for ext in nova_client.discover_extensions(NOVA_API_VERSION)
-            if ext.name == "server_external_events"]
-        self.nclient = nova_client.Client(
-            NOVA_API_VERSION,
-            session=session,
-            region_name=cfg.CONF.nova.region_name,
-            endpoint_type=cfg.CONF.nova.endpoint_type,
-            extensions=extensions)
-        self.batch_notifier = batch_notifier.BatchNotifier(
-            cfg.CONF.send_events_interval, self.send_events)
-
-    def _is_compute_port(self, port):
-        try:
-            if (port['device_id'] and uuidutils.is_uuid_like(port['device_id'])
-                    and port['device_owner'].startswith(
-                        constants.DEVICE_OWNER_COMPUTE_PREFIX)):
-                return True
-        except (KeyError, AttributeError):
-            pass
-        return False
-
-    def _get_network_changed_event(self, device_id):
-        return {'name': 'network-changed',
-                'server_uuid': device_id}
-
-    def _get_port_delete_event(self, port):
-        return {'server_uuid': port['device_id'],
-                'name': VIF_DELETED,
-                'tag': port['id']}
-
-    @property
-    def _plugin(self):
-        # NOTE(arosen): this cannot be set in __init__ currently since
-        # this class is initialized at the same time as NeutronManager()
-        # which is decorated with synchronized()
-        if not hasattr(self, '_plugin_ref'):
-            self._plugin_ref = manager.NeutronManager.get_plugin()
-        return self._plugin_ref
-
-    def send_network_change(self, action, original_obj,
-                            returned_obj):
-        """Called when a network change is made that nova cares about.
-
-        :param action: the event that occurred.
-        :param original_obj: the previous value of resource before action.
-        :param returned_obj: the body returned to client as result of action.
-        """
-
-        if not cfg.CONF.notify_nova_on_port_data_changes:
-            return
-
-        # When neutron re-assigns floating ip from an original instance
-        # port to a new instance port without disassociate it first, an
-        # event should be sent for original instance, that will make nova
-        # know original instance's info, and update database for it.
-        if (action == 'update_floatingip'
-                and returned_obj['floatingip'].get('port_id')
-                and original_obj.get('port_id')):
-            disassociate_returned_obj = {'floatingip': {'port_id': None}}
-            event = self.create_port_changed_event(action, original_obj,
-                                                   disassociate_returned_obj)
-            self.batch_notifier.queue_event(event)
-
-        event = self.create_port_changed_event(action, original_obj,
-                                               returned_obj)
-        self.batch_notifier.queue_event(event)
-
-    def create_port_changed_event(self, action, original_obj, returned_obj):
-        port = None
-        if action in ['update_port', 'delete_port']:
-            port = returned_obj['port']
-
-        elif action in ['update_floatingip', 'create_floatingip',
-                        'delete_floatingip']:
-            # NOTE(arosen) if we are associating a floatingip the
-            # port_id is in the returned_obj. Otherwise on disassociate
-            # it's in the original_object
-            port_id = (returned_obj['floatingip'].get('port_id') or
-                       original_obj.get('port_id'))
-
-            if port_id is None:
-                return
-
-            ctx = context.get_admin_context()
-            port = self._plugin.get_port(ctx, port_id)
-
-        if port and self._is_compute_port(port):
-            if action == 'delete_port':
-                return self._get_port_delete_event(port)
-            else:
-                return self._get_network_changed_event(port['device_id'])
-
-    def record_port_status_changed(self, port, current_port_status,
-                                   previous_port_status, initiator):
-        """Determine if nova needs to be notified due to port status change.
-        """
-        # clear out previous _notify_event
-        port._notify_event = None
-        # If there is no device_id set there is nothing we can do here.
-        if not port.device_id:
-            LOG.debug("device_id is not set on port yet.")
-            return
-
-        if not port.id:
-            LOG.warning(_LW("Port ID not set! Nova will not be notified of "
-                            "port status change."))
-            return
-
-        # We only want to notify about nova ports.
-        if not self._is_compute_port(port):
-            return
-
-        # We notify nova when a vif is unplugged which only occurs when
-        # the status goes from ACTIVE to DOWN.
-        if (previous_port_status == constants.PORT_STATUS_ACTIVE and
-                current_port_status == constants.PORT_STATUS_DOWN):
-            event_name = VIF_UNPLUGGED
-
-        # We only notify nova when a vif is plugged which only occurs
-        # when the status goes from:
-        # NO_VALUE/DOWN/BUILD -> ACTIVE/ERROR.
-        elif (previous_port_status in [sql_attr.NO_VALUE,
-                                       constants.PORT_STATUS_DOWN,
-                                       constants.PORT_STATUS_BUILD]
-              and current_port_status in [constants.PORT_STATUS_ACTIVE,
-                                          constants.PORT_STATUS_ERROR]):
-            event_name = VIF_PLUGGED
-        # All the remaining state transitions are of no interest to nova
-        else:
-            LOG.debug("Ignoring state change previous_port_status: "
-                      "%(pre_status)s current_port_status: %(cur_status)s"
-                      " port_id %(id)s",
-                      {'pre_status': previous_port_status,
-                       'cur_status': current_port_status,
-                       'id': port.id})
-            return
-
-        port._notify_event = (
-            {'server_uuid': port.device_id,
-             'name': event_name,
-             'status': NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status),
-             'tag': port.id})
-
-    def send_port_status(self, mapper, connection, port):
-        event = getattr(port, "_notify_event", None)
-        self.batch_notifier.queue_event(event)
-        port._notify_event = None
-
-    def send_events(self, batched_events):
-        LOG.debug("Sending events: %s", batched_events)
-        try:
-            response = self.nclient.server_external_events.create(
-                batched_events)
-        except nova_exceptions.NotFound:
-            LOG.warning(_LW("Nova returned NotFound for event: %s"),
-                        batched_events)
-        except Exception:
-            LOG.exception(_LE("Failed to notify nova on events: %s"),
-                          batched_events)
-        else:
-            if not isinstance(response, list):
-                LOG.error(_LE("Error response returned from nova: %s"),
-                          response)
-                return
-            response_error = False
-            for event in response:
-                try:
-                    code = event['code']
-                except KeyError:
-                    response_error = True
-                    continue
-                if code != 200:
-                    LOG.warning(_LW("Nova event: %s returned with failed "
-                                    "status"), event)
-                else:
-                    LOG.info(_LI("Nova event response: %s"), event)
-            if response_error:
-                LOG.error(_LE("Error response returned from nova: %s"),
-                          response)
diff --git a/neutron/objects/__init__.py b/neutron/objects/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/objects/base.py b/neutron/objects/base.py
deleted file mode 100644 (file)
index ac16d41..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-from oslo_db import exception as obj_exc
-from oslo_utils import reflection
-from oslo_versionedobjects import base as obj_base
-import six
-
-from neutron._i18n import _
-from neutron.common import exceptions
-from neutron.db import api as db_api
-
-
-class NeutronObjectUpdateForbidden(exceptions.NeutronException):
-    message = _("Unable to update the following object fields: %(fields)s")
-
-
-class NeutronDbObjectDuplicateEntry(exceptions.Conflict):
-    message = _("Failed to create a duplicate %(object_type)s: "
-                "for attribute(s) %(attributes)s with value(s) %(values)s")
-
-    def __init__(self, object_class, db_exception):
-        super(NeutronDbObjectDuplicateEntry, self).__init__(
-            object_type=reflection.get_class_name(object_class,
-                                                  fully_qualified=False),
-            attributes=db_exception.columns,
-            values=db_exception.value)
-
-
-def get_updatable_fields(cls, fields):
-    fields = fields.copy()
-    for field in cls.fields_no_update:
-        if field in fields:
-            del fields[field]
-    return fields
-
-
-@six.add_metaclass(abc.ABCMeta)
-class NeutronObject(obj_base.VersionedObject,
-                    obj_base.VersionedObjectDictCompat,
-                    obj_base.ComparableVersionedObject):
-
-    synthetic_fields = []
-
-    def __init__(self, context=None, **kwargs):
-        super(NeutronObject, self).__init__(context, **kwargs)
-        self.obj_set_defaults()
-
-    def to_dict(self):
-        return dict(self.items())
-
-    @classmethod
-    def clean_obj_from_primitive(cls, primitive, context=None):
-        obj = cls.obj_from_primitive(primitive, context)
-        obj.obj_reset_changes()
-        return obj
-
-    @classmethod
-    def get_by_id(cls, context, id):
-        raise NotImplementedError()
-
-    @classmethod
-    def validate_filters(cls, **kwargs):
-        bad_filters = [key for key in kwargs
-                       if key not in cls.fields or key in cls.synthetic_fields]
-        if bad_filters:
-            bad_filters = ', '.join(bad_filters)
-            msg = _("'%s' is not supported for filtering") % bad_filters
-            raise exceptions.InvalidInput(error_message=msg)
-
-    @classmethod
-    @abc.abstractmethod
-    def get_objects(cls, context, **kwargs):
-        raise NotImplementedError()
-
-    def create(self):
-        raise NotImplementedError()
-
-    def update(self):
-        raise NotImplementedError()
-
-    def delete(self):
-        raise NotImplementedError()
-
-
-class NeutronDbObject(NeutronObject):
-
-    # should be overridden for all persistent objects
-    db_model = None
-
-    fields_no_update = []
-
-    def from_db_object(self, *objs):
-        for field in self.fields:
-            for db_obj in objs:
-                if field in db_obj:
-                    setattr(self, field, db_obj[field])
-                break
-        self.obj_reset_changes()
-
-    @classmethod
-    def get_by_id(cls, context, id):
-        db_obj = db_api.get_object(context, cls.db_model, id=id)
-        if db_obj:
-            obj = cls(context, **db_obj)
-            obj.obj_reset_changes()
-            return obj
-
-    @classmethod
-    def get_objects(cls, context, **kwargs):
-        cls.validate_filters(**kwargs)
-        db_objs = db_api.get_objects(context, cls.db_model, **kwargs)
-        objs = [cls(context, **db_obj) for db_obj in db_objs]
-        for obj in objs:
-            obj.obj_reset_changes()
-        return objs
-
-    def _get_changed_persistent_fields(self):
-        fields = self.obj_get_changes()
-        for field in self.synthetic_fields:
-            if field in fields:
-                del fields[field]
-        return fields
-
-    def _validate_changed_fields(self, fields):
-        fields = fields.copy()
-        # We won't allow id update anyway, so let's pop it out not to trigger
-        # update on id field touched by the consumer
-        fields.pop('id', None)
-
-        forbidden_updates = set(self.fields_no_update) & set(fields.keys())
-        if forbidden_updates:
-            raise NeutronObjectUpdateForbidden(fields=forbidden_updates)
-
-        return fields
-
-    def create(self):
-        fields = self._get_changed_persistent_fields()
-        try:
-            db_obj = db_api.create_object(self._context, self.db_model, fields)
-        except obj_exc.DBDuplicateEntry as db_exc:
-            raise NeutronDbObjectDuplicateEntry(object_class=self.__class__,
-                                                db_exception=db_exc)
-
-        self.from_db_object(db_obj)
-
-    def update(self):
-        updates = self._get_changed_persistent_fields()
-        updates = self._validate_changed_fields(updates)
-
-        if updates:
-            db_obj = db_api.update_object(self._context, self.db_model,
-                                          self.id, updates)
-            self.from_db_object(self, db_obj)
-
-    def delete(self):
-        db_api.delete_object(self._context, self.db_model, self.id)
diff --git a/neutron/objects/qos/__init__.py b/neutron/objects/qos/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/objects/qos/policy.py b/neutron/objects/qos/policy.py
deleted file mode 100644 (file)
index 110a921..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_versionedobjects import base as obj_base
-from oslo_versionedobjects import fields as obj_fields
-
-from neutron._i18n import _
-from neutron.common import exceptions
-from neutron.db import api as db_api
-from neutron.db.qos import api as qos_db_api
-from neutron.db.qos import models as qos_db_model
-from neutron.objects import base
-from neutron.objects.qos import rule as rule_obj_impl
-
-
-@obj_base.VersionedObjectRegistry.register
-class QosPolicy(base.NeutronDbObject):
-    # Version 1.0: Initial version
-    VERSION = '1.0'
-
-    db_model = qos_db_model.QosPolicy
-
-    port_binding_model = qos_db_model.QosPortPolicyBinding
-    network_binding_model = qos_db_model.QosNetworkPolicyBinding
-
-    fields = {
-        'id': obj_fields.UUIDField(),
-        'tenant_id': obj_fields.UUIDField(),
-        'name': obj_fields.StringField(),
-        'description': obj_fields.StringField(),
-        'shared': obj_fields.BooleanField(default=False),
-        'rules': obj_fields.ListOfObjectsField('QosRule', subclasses=True),
-    }
-
-    fields_no_update = ['id', 'tenant_id']
-
-    synthetic_fields = ['rules']
-
-    def to_dict(self):
-        dict_ = super(QosPolicy, self).to_dict()
-        if 'rules' in dict_:
-            dict_['rules'] = [rule.to_dict() for rule in dict_['rules']]
-        return dict_
-
-    def obj_load_attr(self, attrname):
-        if attrname != 'rules':
-            raise exceptions.ObjectActionError(
-                action='obj_load_attr',
-                reason=_('unable to load %s') % attrname)
-
-        if not hasattr(self, attrname):
-            self.reload_rules()
-
-    def reload_rules(self):
-        rules = rule_obj_impl.get_rules(self._context, self.id)
-        setattr(self, 'rules', rules)
-        self.obj_reset_changes(['rules'])
-
-    def get_rule_by_id(self, rule_id):
-        """Return rule specified by rule_id.
-
-        @raise QosRuleNotFound: if there is no such rule in the policy.
-        """
-
-        for rule in self.rules:
-            if rule_id == rule.id:
-                return rule
-        raise exceptions.QosRuleNotFound(policy_id=self.id,
-                                         rule_id=rule_id)
-
-    @staticmethod
-    def _is_policy_accessible(context, db_obj):
-        #TODO(QoS): Look at I3426b13eede8bfa29729cf3efea3419fb91175c4 for
-        #           other possible solutions to this.
-        return (context.is_admin or
-                db_obj.shared or
-                db_obj.tenant_id == context.tenant_id)
-
-    @classmethod
-    def get_by_id(cls, context, id):
-        # We want to get the policy regardless of its tenant id. We'll make
-        # sure the tenant has permission to access the policy later on.
-        admin_context = context.elevated()
-        with db_api.autonested_transaction(admin_context.session):
-            policy_obj = super(QosPolicy, cls).get_by_id(admin_context, id)
-            if (not policy_obj or
-                not cls._is_policy_accessible(context, policy_obj)):
-                return
-
-            policy_obj.reload_rules()
-            return policy_obj
-
-    @classmethod
-    def get_objects(cls, context, **kwargs):
-        # We want to get the policy regardless of its tenant id. We'll make
-        # sure the tenant has permission to access the policy later on.
-        admin_context = context.elevated()
-        with db_api.autonested_transaction(admin_context.session):
-            objs = super(QosPolicy, cls).get_objects(admin_context,
-                                                     **kwargs)
-            result = []
-            for obj in objs:
-                if not cls._is_policy_accessible(context, obj):
-                    continue
-                obj.reload_rules()
-                result.append(obj)
-            return result
-
-    @classmethod
-    def _get_object_policy(cls, context, model, **kwargs):
-        with db_api.autonested_transaction(context.session):
-            binding_db_obj = db_api.get_object(context, model, **kwargs)
-            if binding_db_obj:
-                return cls.get_by_id(context, binding_db_obj['policy_id'])
-
-    @classmethod
-    def get_network_policy(cls, context, network_id):
-        return cls._get_object_policy(context, cls.network_binding_model,
-                                      network_id=network_id)
-
-    @classmethod
-    def get_port_policy(cls, context, port_id):
-        return cls._get_object_policy(context, cls.port_binding_model,
-                                      port_id=port_id)
-
-    # TODO(QoS): Consider extending base to trigger registered methods for us
-    def create(self):
-        with db_api.autonested_transaction(self._context.session):
-            super(QosPolicy, self).create()
-            self.reload_rules()
-
-    def delete(self):
-        models = (
-            ('network', self.network_binding_model),
-            ('port', self.port_binding_model)
-        )
-        with db_api.autonested_transaction(self._context.session):
-            for object_type, model in models:
-                binding_db_obj = db_api.get_object(self._context, model,
-                                                   policy_id=self.id)
-                if binding_db_obj:
-                    raise exceptions.QosPolicyInUse(
-                        policy_id=self.id,
-                        object_type=object_type,
-                        object_id=binding_db_obj['%s_id' % object_type])
-
-            super(QosPolicy, self).delete()
-
-    def attach_network(self, network_id):
-        qos_db_api.create_policy_network_binding(self._context,
-                                                 policy_id=self.id,
-                                                 network_id=network_id)
-
-    def attach_port(self, port_id):
-        qos_db_api.create_policy_port_binding(self._context,
-                                              policy_id=self.id,
-                                              port_id=port_id)
-
-    def detach_network(self, network_id):
-        qos_db_api.delete_policy_network_binding(self._context,
-                                                 policy_id=self.id,
-                                                 network_id=network_id)
-
-    def detach_port(self, port_id):
-        qos_db_api.delete_policy_port_binding(self._context,
-                                              policy_id=self.id,
-                                              port_id=port_id)
diff --git a/neutron/objects/qos/rule.py b/neutron/objects/qos/rule.py
deleted file mode 100644 (file)
index 1cf90eb..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import sys
-
-from oslo_versionedobjects import base as obj_base
-from oslo_versionedobjects import fields as obj_fields
-import six
-
-from neutron.common import constants
-from neutron.common import utils
-from neutron.db import api as db_api
-from neutron.db.qos import models as qos_db_model
-from neutron.objects import base
-from neutron.services.qos import qos_consts
-
-
-def get_rules(context, qos_policy_id):
-    all_rules = []
-    with db_api.autonested_transaction(context.session):
-        for rule_type in qos_consts.VALID_RULE_TYPES:
-            rule_cls_name = 'Qos%sRule' % utils.camelize(rule_type)
-            rule_cls = getattr(sys.modules[__name__], rule_cls_name)
-
-            rules = rule_cls.get_objects(context, qos_policy_id=qos_policy_id)
-            all_rules.extend(rules)
-    return all_rules
-
-
-@six.add_metaclass(abc.ABCMeta)
-class QosRule(base.NeutronDbObject):
-
-    fields = {
-        'id': obj_fields.UUIDField(),
-        'qos_policy_id': obj_fields.UUIDField()
-    }
-
-    fields_no_update = ['id', 'qos_policy_id']
-
-    # should be redefined in subclasses
-    rule_type = None
-
-    def to_dict(self):
-        dict_ = super(QosRule, self).to_dict()
-        dict_['type'] = self.rule_type
-        return dict_
-
-    def should_apply_to_port(self, port):
-        """Check whether a rule can be applied to a specific port.
-
-        This function has the logic to decide whether a rule should
-        be applied to a port or not, depending on the source of the
-        policy (network, or port). Eventually rules could override
-        this method, or we could make it abstract to allow different
-        rule behaviour.
-        """
-        is_network_rule = self.qos_policy_id != port[qos_consts.QOS_POLICY_ID]
-        is_network_device_port = any(port['device_owner'].startswith(prefix)
-                                     for prefix
-                                     in constants.DEVICE_OWNER_PREFIXES)
-
-        return not (is_network_rule and is_network_device_port)
-
-
-@obj_base.VersionedObjectRegistry.register
-class QosBandwidthLimitRule(QosRule):
-    # Version 1.0: Initial version
-    VERSION = '1.0'
-
-    db_model = qos_db_model.QosBandwidthLimitRule
-
-    fields = {
-        'max_kbps': obj_fields.IntegerField(nullable=True),
-        'max_burst_kbps': obj_fields.IntegerField(nullable=True)
-    }
-
-    rule_type = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT
diff --git a/neutron/objects/qos/rule_type.py b/neutron/objects/qos/rule_type.py
deleted file mode 100644 (file)
index bb5d9bd..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_versionedobjects import base as obj_base
-from oslo_versionedobjects import fields as obj_fields
-
-from neutron import manager
-from neutron.objects import base
-from neutron.services.qos import qos_consts
-
-
-class RuleTypeField(obj_fields.BaseEnumField):
-
-    def __init__(self, **kwargs):
-        self.AUTO_TYPE = obj_fields.Enum(
-            valid_values=qos_consts.VALID_RULE_TYPES)
-        super(RuleTypeField, self).__init__(**kwargs)
-
-
-@obj_base.VersionedObjectRegistry.register
-class QosRuleType(base.NeutronObject):
-    # Version 1.0: Initial version
-    VERSION = '1.0'
-
-    fields = {
-        'type': RuleTypeField(),
-    }
-
-    # we don't receive context because we don't need db access at all
-    @classmethod
-    def get_objects(cls, **kwargs):
-        cls.validate_filters(**kwargs)
-        core_plugin = manager.NeutronManager.get_plugin()
-        return [cls(type=type_)
-                for type_ in core_plugin.supported_qos_rule_types]
diff --git a/neutron/openstack/__init__.py b/neutron/openstack/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/openstack/common/__init__.py b/neutron/openstack/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/openstack/common/cache/__init__.py b/neutron/openstack/common/cache/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/openstack/common/cache/_backends/__init__.py b/neutron/openstack/common/cache/_backends/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/openstack/common/cache/_backends/memory.py b/neutron/openstack/common/cache/_backends/memory.py
deleted file mode 100644 (file)
index 0dbb3f6..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-
-from oslo_utils import timeutils
-from oslo_concurrency import lockutils
-
-from neutron.openstack.common.cache import backends
-
-
-class MemoryBackend(backends.BaseCache):
-
-    def __init__(self, parsed_url, options=None):
-        super(MemoryBackend, self).__init__(parsed_url, options)
-        self._clear()
-
-    def _set_unlocked(self, key, value, ttl=0):
-        expires_at = 0
-        if ttl != 0:
-            expires_at = timeutils.utcnow_ts() + ttl
-
-        self._cache[key] = (expires_at, value)
-
-        if expires_at:
-            self._keys_expires[expires_at].add(key)
-
-    def _set(self, key, value, ttl=0, not_exists=False):
-        with lockutils.lock(key):
-
-            # NOTE(flaper87): This is needed just in `set`
-            # calls, hence it's not in `_set_unlocked`
-            if not_exists and self._exists_unlocked(key):
-                return False
-
-            self._set_unlocked(key, value, ttl)
-            return True
-
-    def _get_unlocked(self, key, default=None):
-        now = timeutils.utcnow_ts()
-
-        try:
-            timeout, value = self._cache[key]
-        except KeyError:
-            return (0, default)
-
-        if timeout and now >= timeout:
-
-            # NOTE(flaper87): Record expired,
-            # remove it from the cache but catch
-            # KeyError and ValueError in case
-            # _purge_expired removed this key already.
-            try:
-                del self._cache[key]
-            except KeyError:
-                pass
-
-            try:
-                # NOTE(flaper87): Keys with ttl == 0
-                # don't exist in the _keys_expires dict
-                self._keys_expires[timeout].remove(key)
-            except (KeyError, ValueError):
-                pass
-
-            return (0, default)
-
-        return (timeout, value)
-
-    def _get(self, key, default=None):
-        with lockutils.lock(key):
-            return self._get_unlocked(key, default)[1]
-
-    def _exists_unlocked(self, key):
-        now = timeutils.utcnow_ts()
-        try:
-            timeout = self._cache[key][0]
-            return not timeout or now <= timeout
-        except KeyError:
-            return False
-
-    def __contains__(self, key):
-        with lockutils.lock(key):
-            return self._exists_unlocked(key)
-
-    def _incr_append(self, key, other):
-        with lockutils.lock(key):
-            timeout, value = self._get_unlocked(key)
-
-            if value is None:
-                return None
-
-            ttl = timeutils.utcnow_ts() - timeout
-            new_value = value + other
-            self._set_unlocked(key, new_value, ttl)
-            return new_value
-
-    def _incr(self, key, delta):
-        if not isinstance(delta, int):
-            raise TypeError('delta must be an int instance')
-
-        return self._incr_append(key, delta)
-
-    def _append_tail(self, key, tail):
-        return self._incr_append(key, tail)
-
-    def _purge_expired(self):
-        """Removes expired keys from the cache."""
-
-        now = timeutils.utcnow_ts()
-        for timeout in sorted(self._keys_expires.keys()):
-
-            # NOTE(flaper87): If timeout is greater
-            # than `now`, stop the iteration, remaining
-            # keys have not expired.
-            if now < timeout:
-                break
-
-            # NOTE(flaper87): Unset every key in
-            # this set from the cache if its timeout
-            # is equal to `timeout`. (The key might
-            # have been updated)
-            for subkey in self._keys_expires.pop(timeout):
-                try:
-                    if self._cache[subkey][0] == timeout:
-                        del self._cache[subkey]
-                except KeyError:
-                    continue
-
-    def __delitem__(self, key):
-        self._purge_expired()
-
-        # NOTE(flaper87): Delete the key. Using pop
-        # since it could have been deleted already
-        value = self._cache.pop(key, None)
-
-        if value:
-            try:
-                # NOTE(flaper87): Keys with ttl == 0
-                # don't exist in the _keys_expires dict
-                self._keys_expires[value[0]].remove(key)
-            except (KeyError, ValueError):
-                pass
-
-    def _clear(self):
-        self._cache = {}
-        self._keys_expires = collections.defaultdict(set)
-
-    def _get_many(self, keys, default):
-        return super(MemoryBackend, self)._get_many(keys, default)
-
-    def _set_many(self, data, ttl=0):
-        return super(MemoryBackend, self)._set_many(data, ttl)
-
-    def _unset_many(self, keys):
-        return super(MemoryBackend, self)._unset_many(keys)
diff --git a/neutron/openstack/common/cache/backends.py b/neutron/openstack/common/cache/backends.py
deleted file mode 100644 (file)
index 1bea891..0000000
+++ /dev/null
@@ -1,250 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-import six
-
-
-NOTSET = object()
-
-
-@six.add_metaclass(abc.ABCMeta)
-class BaseCache(object):
-    """Base Cache Abstraction
-
-    :params parsed_url: Parsed url object.
-    :params options: A dictionary with configuration parameters
-      for the cache. For example:
-
-        - default_ttl: An integer defining the default ttl for keys.
-    """
-
-    def __init__(self, parsed_url, options=None):
-        self._parsed_url = parsed_url
-        self._options = options or {}
-        self._default_ttl = int(self._options.get('default_ttl', 0))
-
-    @abc.abstractmethod
-    def _set(self, key, value, ttl, not_exists=False):
-        """Implementations of this class have to override this method."""
-
-    def set(self, key, value, ttl, not_exists=False):
-        """Sets or updates a cache entry
-
-        .. note:: Thread-safety is required and has to be guaranteed by the
-           backend implementation.
-
-        :params key: Item key as string.
-        :type key: `unicode string`
-        :params value: Value to assign to the key. This can be anything that
-          is handled by the current backend.
-        :params ttl: Key's timeout in seconds. 0 means no timeout.
-        :type ttl: int
-        :params not_exists: If True, the key will be set if it doesn't exist.
-          Otherwise, it'll always be set.
-        :type not_exists: bool
-
-        :returns: True if the operation succeeds, False otherwise.
-        """
-        if ttl is None:
-            ttl = self._default_ttl
-
-        return self._set(key, value, ttl, not_exists)
-
-    def __setitem__(self, key, value):
-        self.set(key, value, self._default_ttl)
-
-    def setdefault(self, key, value):
-        """Sets the key value to `value` if it doesn't exist
-
-        :params key: Item key as string.
-        :type key: `unicode string`
-        :params value: Value to assign to the key. This can be anything that
-          is handled by the current backend.
-        """
-        try:
-            return self[key]
-        except KeyError:
-            self[key] = value
-            return value
-
-    @abc.abstractmethod
-    def _get(self, key, default):
-        """Implementations of this class have to override this method."""
-
-    def get(self, key, default=None):
-        """Gets one item from the cache
-
-        .. note:: Thread-safety is required and it has to be guaranteed
-           by the backend implementation.
-
-        :params key: Key for the item to retrieve from the cache.
-        :params default: The default value to return.
-
-        :returns: `key`'s value in the cache if it exists, otherwise
-          `default` should be returned.
-        """
-        return self._get(key, default)
-
-    def __getitem__(self, key):
-        value = self.get(key, NOTSET)
-
-        if value is NOTSET:
-            raise KeyError
-
-        return value
-
-    @abc.abstractmethod
-    def __delitem__(self, key):
-        """Removes an item from cache.
-
-        .. note:: Thread-safety is required and it has to be guaranteed by
-           the backend implementation.
-
-        :params key: The key to remove.
-
-        :returns: The key value if there's one
-        """
-
-    @abc.abstractmethod
-    def _clear(self):
-        """Implementations of this class have to override this method."""
-
-    def clear(self):
-        """Removes all items from the cache.
-
-        .. note:: Thread-safety is required and it has to be guaranteed by
-           the backend implementation.
-        """
-        return self._clear()
-
-    @abc.abstractmethod
-    def _incr(self, key, delta):
-        """Implementations of this class have to override this method."""
-
-    def incr(self, key, delta=1):
-        """Increments the value for a key
-
-        :params key: The key for the value to be incremented
-        :params delta: Number of units by which to increment the value.
-          Pass a negative number to decrement the value.
-
-        :returns: The new value
-        """
-        return self._incr(key, delta)
-
-    @abc.abstractmethod
-    def _append_tail(self, key, tail):
-        """Implementations of this class have to override this method."""
-
-    def append_tail(self, key, tail):
-        """Appends `tail` to `key`'s value.
-
-        :params key: The key of the value to which `tail` should be appended.
-        :params tail: The list of values to append to the original.
-
-        :returns: The new value
-        """
-
-        if not hasattr(tail, "__iter__"):
-            raise TypeError('Tail must be an iterable')
-
-        if not isinstance(tail, list):
-            # NOTE(flaper87): Make sure we pass a list
-            # down to the implementation. Not all drivers
-            # have support for generators, sets or other
-            # iterables.
-            tail = list(tail)
-
-        return self._append_tail(key, tail)
-
-    def append(self, key, value):
-        """Appends `value` to `key`'s value.
-
-        :params key: The key of the value to which `tail` should be appended.
-        :params value: The value to append to the original.
-
-        :returns: The new value
-        """
-        return self.append_tail(key, [value])
-
-    @abc.abstractmethod
-    def __contains__(self, key):
-        """Verifies that a key exists.
-
-        :params key: The key to verify.
-
-        :returns: True if the key exists, otherwise False.
-        """
-
-    @abc.abstractmethod
-    def _get_many(self, keys, default):
-        """Implementations of this class have to override this method."""
-        return ((k, self.get(k, default=default)) for k in keys)
-
-    def get_many(self, keys, default=NOTSET):
-        """Gets keys' value from cache
-
-        :params keys: List of keys to retrieve.
-        :params default: The default value to return for each key that is not
-          in the cache.
-
-        :returns: A  generator of (key, value)
-        """
-        return self._get_many(keys, default)
-
-    @abc.abstractmethod
-    def _set_many(self, data, ttl):
-        """Implementations of this class have to override this method."""
-
-        for key, value in data.items():
-            self.set(key, value, ttl=ttl)
-
-    def set_many(self, data, ttl=None):
-        """Puts several items into the cache at once
-
-        Depending on the backend, this operation may or may not be efficient.
-        The default implementation calls set for each (key, value) pair
-        passed, other backends support set_many operations as part of their
-        protocols.
-
-        :params data: A dictionary like {key: val} to store in the cache.
-        :params ttl: Key's timeout in seconds.
-        """
-
-        if ttl is None:
-            ttl = self._default_ttl
-
-        self._set_many(data, ttl)
-
-    def update(self, **kwargs):
-        """Sets several (key, value) paris.
-
-        Refer to the `set_many` docstring.
-        """
-        self.set_many(kwargs, ttl=self._default_ttl)
-
-    @abc.abstractmethod
-    def _unset_many(self, keys):
-        """Implementations of this class have to override this method."""
-        for key in keys:
-            del self[key]
-
-    def unset_many(self, keys):
-        """Removes several keys from the cache at once
-
-        :params keys: List of keys to unset.
-        """
-        self._unset_many(keys)
diff --git a/neutron/openstack/common/cache/cache.py b/neutron/openstack/common/cache/cache.py
deleted file mode 100644 (file)
index 5b77b6b..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""Cache library.
-
-Supported configuration options:
-
-`default_backend`: Name of the cache backend to use.
-`key_namespace`: Namespace under which keys will be created.
-"""
-
-########################################################################
-#
-# THIS MODULE IS DEPRECATED
-#
-# Please refer to
-# https://etherpad.openstack.org/p/kilo-neutron-library-proposals for
-# the discussion leading to this deprecation.
-#
-# We recommend helping with the new oslo.cache library being created
-# as a wrapper for dogpile.
-#
-########################################################################
-
-
-from six.moves.urllib import parse
-from stevedore import driver
-
-
-def _get_oslo_configs():
-    """Returns the oslo config options to register."""
-    # NOTE(flaper87): Oslo config should be
-    # optional. Instead of doing try / except
-    # at the top of this file, lets import cfg
-    # here and assume that the caller of this
-    # function already took care of this dependency.
-    from oslo_config import cfg
-
-    return [
-        cfg.StrOpt('cache_url', default='memory://',
-                   help='URL to connect to the cache back end.')
-    ]
-
-
-def register_oslo_configs(conf):
-    """Registers a cache configuration options
-
-    :params conf: Config object.
-    :type conf: `cfg.ConfigOptions`
-    """
-    conf.register_opts(_get_oslo_configs())
-
-
-def get_cache(url='memory://'):
-    """Loads the cache backend
-
-    This function loads the cache backend
-    specified in the given configuration.
-
-    :param conf: Configuration instance to use
-    """
-
-    parsed = parse.urlparse(url)
-    backend = parsed.scheme
-
-    query = parsed.query
-    # NOTE(flaper87): We need the following hack
-    # for python versions < 2.7.5. Previous versions
-    # of python parsed query params just for 'known'
-    # schemes. This was changed in this patch:
-    # http://hg.python.org/cpython/rev/79e6ff3d9afd
-    if not query and '?' in parsed.path:
-        query = parsed.path.split('?', 1)[-1]
-    parameters = parse.parse_qsl(query)
-    kwargs = {'options': dict(parameters)}
-
-    mgr = driver.DriverManager('neutron.openstack.common.cache.backends', backend,
-                               invoke_on_load=True,
-                               invoke_args=[parsed],
-                               invoke_kwds=kwargs)
-    return mgr.driver
diff --git a/neutron/opts.py b/neutron/opts.py
deleted file mode 100644 (file)
index 4e08604..0000000
+++ /dev/null
@@ -1,290 +0,0 @@
-#  Licensed under the Apache License, Version 2.0 (the "License"); you may
-#  not use this file except in compliance with the License. You may obtain
-#  a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#  License for the specific language governing permissions and limitations
-#  under the License.
-
-import copy
-import itertools
-import operator
-
-from keystoneauth1 import loading as ks_loading
-from oslo_config import cfg
-
-import neutron.agent.common.config
-import neutron.agent.common.ovs_lib
-import neutron.agent.dhcp.config
-import neutron.agent.l2.extensions.manager
-import neutron.agent.l3.config
-import neutron.agent.l3.ha
-import neutron.agent.linux.interface
-import neutron.agent.linux.pd
-import neutron.agent.linux.ra
-import neutron.agent.metadata.config
-import neutron.agent.ovsdb.api
-import neutron.agent.securitygroups_rpc
-import neutron.db.agents_db
-import neutron.db.agentschedulers_db
-import neutron.db.dvr_mac_db
-import neutron.db.extraroute_db
-import neutron.db.l3_agentschedulers_db
-import neutron.db.l3_dvr_db
-import neutron.db.l3_gwmode_db
-import neutron.db.l3_hamode_db
-import neutron.db.migration.cli
-import neutron.extensions.allowedaddresspairs
-import neutron.extensions.l3
-import neutron.extensions.securitygroup
-import neutron.openstack.common.cache.cache
-import neutron.plugins.ml2.config
-import neutron.plugins.ml2.drivers.linuxbridge.agent.common.config
-import neutron.plugins.ml2.drivers.mech_sriov.agent.common.config
-import neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver
-import neutron.plugins.ml2.drivers.openvswitch.agent.common.config
-import neutron.plugins.ml2.drivers.type_flat
-import neutron.plugins.ml2.drivers.type_geneve
-import neutron.plugins.ml2.drivers.type_gre
-import neutron.plugins.ml2.drivers.type_vlan
-import neutron.plugins.ml2.drivers.type_vxlan
-import neutron.quota
-import neutron.service
-import neutron.services.metering.agents.metering_agent
-import neutron.services.qos.notification_drivers.manager
-import neutron.wsgi
-
-
-NOVA_GROUP = 'nova'
-
-CONF = cfg.CONF
-
-deprecations = {'nova.cafile': [cfg.DeprecatedOpt('ca_certificates_file',
-                                                  group=NOVA_GROUP)],
-                'nova.insecure': [cfg.DeprecatedOpt('api_insecure',
-                                                    group=NOVA_GROUP)],
-                'nova.timeout': [cfg.DeprecatedOpt('url_timeout',
-                                                   group=NOVA_GROUP)]}
-
-_nova_options = ks_loading.register_session_conf_options(
-            CONF, NOVA_GROUP, deprecated_opts=deprecations)
-
-
-def list_agent_opts():
-    return [
-        ('agent',
-         itertools.chain(
-             neutron.agent.common.config.ROOT_HELPER_OPTS,
-             neutron.agent.common.config.AGENT_STATE_OPTS,
-             neutron.agent.common.config.IPTABLES_OPTS,
-             neutron.agent.common.config.PROCESS_MONITOR_OPTS,
-             neutron.agent.common.config.AVAILABILITY_ZONE_OPTS)
-         ),
-        ('DEFAULT',
-         itertools.chain(
-             neutron.agent.common.config.INTERFACE_DRIVER_OPTS,
-             neutron.agent.metadata.config.SHARED_OPTS,
-             neutron.agent.metadata.config.DRIVER_OPTS)
-         )
-    ]
-
-
-def list_extension_opts():
-    return [
-        ('DEFAULT',
-         neutron.extensions.allowedaddresspairs.allowed_address_pair_opts),
-        ('quotas',
-         itertools.chain(
-             neutron.extensions.l3.l3_quota_opts,
-             neutron.extensions.securitygroup.security_group_quota_opts)
-         )
-    ]
-
-
-def list_db_opts():
-    return [
-        ('DEFAULT',
-         itertools.chain(
-             neutron.db.agents_db.AGENT_OPTS,
-             neutron.db.extraroute_db.extra_route_opts,
-             neutron.db.l3_gwmode_db.OPTS,
-             neutron.db.agentschedulers_db.AGENTS_SCHEDULER_OPTS,
-             neutron.db.dvr_mac_db.dvr_mac_address_opts,
-             neutron.db.l3_dvr_db.router_distributed_opts,
-             neutron.db.l3_agentschedulers_db.L3_AGENTS_SCHEDULER_OPTS,
-             neutron.db.l3_hamode_db.L3_HA_OPTS)
-         ),
-        ('database',
-         neutron.db.migration.cli.get_engine_config())
-    ]
-
-
-def list_opts():
-    return [
-        ('DEFAULT',
-         itertools.chain(
-             neutron.common.config.core_cli_opts,
-             neutron.common.config.core_opts,
-             neutron.wsgi.socket_opts,
-             neutron.service.service_opts)
-         ),
-        (neutron.common.config.NOVA_CONF_SECTION,
-         itertools.chain(
-              neutron.common.config.nova_opts)
-         ),
-        ('quotas', neutron.quota.quota_opts)
-    ]
-
-
-def list_qos_opts():
-    return [
-        ('DEFAULT',
-         neutron.services.qos.notification_drivers.manager.QOS_PLUGIN_OPTS)
-    ]
-
-
-def list_base_agent_opts():
-    return [
-        ('DEFAULT',
-         itertools.chain(
-             neutron.agent.linux.interface.OPTS,
-             neutron.agent.common.config.INTERFACE_DRIVER_OPTS,
-             neutron.agent.common.ovs_lib.OPTS)
-         ),
-        ('AGENT', neutron.agent.common.config.AGENT_STATE_OPTS)
-    ]
-
-
-def list_dhcp_agent_opts():
-    return [
-        ('DEFAULT',
-         itertools.chain(
-             neutron.agent.dhcp.config.DHCP_AGENT_OPTS,
-             neutron.agent.dhcp.config.DHCP_OPTS,
-             neutron.agent.dhcp.config.DNSMASQ_OPTS)
-         )
-    ]
-
-
-def list_linux_bridge_opts():
-    return [
-        ('linux_bridge',
-         neutron.plugins.ml2.drivers.linuxbridge.agent.common.config.
-         bridge_opts),
-        ('vxlan',
-         neutron.plugins.ml2.drivers.linuxbridge.agent.common.config.
-         vxlan_opts),
-        ('agent',
-         neutron.plugins.ml2.drivers.linuxbridge.agent.common.config.
-         agent_opts),
-        ('securitygroup',
-         neutron.agent.securitygroups_rpc.security_group_opts)
-    ]
-
-
-def list_l3_agent_opts():
-    return [
-        ('DEFAULT',
-         itertools.chain(
-             neutron.agent.l3.config.OPTS,
-             neutron.service.service_opts,
-             neutron.agent.l3.ha.OPTS,
-             neutron.agent.linux.pd.OPTS,
-             neutron.agent.linux.ra.OPTS)
-         )
-    ]
-
-
-def list_metadata_agent_opts():
-    return [
-        ('DEFAULT',
-         itertools.chain(
-             neutron.agent.metadata.config.SHARED_OPTS,
-             neutron.agent.metadata.config.METADATA_PROXY_HANDLER_OPTS,
-             neutron.agent.metadata.config.UNIX_DOMAIN_METADATA_PROXY_OPTS,
-             neutron.openstack.common.cache.cache._get_oslo_configs())
-         ),
-        ('AGENT', neutron.agent.common.config.AGENT_STATE_OPTS)
-    ]
-
-
-def list_metering_agent_opts():
-    return [
-        ('DEFAULT',
-         itertools.chain(
-             neutron.services.metering.agents.metering_agent.MeteringAgent.
-             Opts,
-             neutron.agent.common.config.INTERFACE_DRIVER_OPTS)
-         )
-    ]
-
-
-def list_ml2_conf_opts():
-    return [
-        ('ml2',
-         neutron.plugins.ml2.config.ml2_opts),
-        ('ml2_type_flat',
-         neutron.plugins.ml2.drivers.type_flat.flat_opts),
-        ('ml2_type_vlan',
-         neutron.plugins.ml2.drivers.type_vlan.vlan_opts),
-        ('ml2_type_gre',
-         neutron.plugins.ml2.drivers.type_gre.gre_opts),
-        ('ml2_type_vxlan',
-         neutron.plugins.ml2.drivers.type_vxlan.vxlan_opts),
-        ('ml2_type_geneve',
-         neutron.plugins.ml2.drivers.type_geneve.geneve_opts),
-        ('securitygroup',
-         neutron.agent.securitygroups_rpc.security_group_opts)
-    ]
-
-
-def list_ml2_conf_sriov_opts():
-    return [
-        ('ml2_sriov',
-         neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver.
-         sriov_opts)
-    ]
-
-
-def list_ovs_opts():
-    return [
-        ('ovs',
-         itertools.chain(
-             neutron.plugins.ml2.drivers.openvswitch.agent.common.config.
-             ovs_opts,
-             neutron.agent.ovsdb.api.OPTS)
-         ),
-        ('agent',
-         neutron.plugins.ml2.drivers.openvswitch.agent.common.config.
-         agent_opts),
-        ('securitygroup',
-         neutron.agent.securitygroups_rpc.security_group_opts)
-    ]
-
-
-def list_sriov_agent_opts():
-    return [
-        ('ml2_sriov',
-         neutron.plugins.ml2.drivers.mech_sriov.agent.common.config.
-         sriov_nic_opts),
-        ('agent',
-         neutron.agent.l2.extensions.manager.L2_AGENT_EXT_MANAGER_OPTS)
-    ]
-
-
-def list_auth_opts():
-    opt_list = copy.deepcopy(_nova_options)
-    opt_list.insert(0, ks_loading.get_auth_common_conf_options()[0])
-    # NOTE(mhickey): There are a lot of auth plugins, we just generate
-    # the config options for a few common ones
-    plugins = ['password', 'v2password', 'v3password']
-    for name in plugins:
-        for plugin_option in ks_loading.get_plugin_loader(name).get_options():
-            if all(option.name != plugin_option.name for option in opt_list):
-                opt_list.append(plugin_option)
-    opt_list.sort(key=operator.attrgetter('name'))
-    return [(NOVA_GROUP, opt_list)]
diff --git a/neutron/pecan_wsgi/__init__.py b/neutron/pecan_wsgi/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/pecan_wsgi/app.py b/neutron/pecan_wsgi/app.py
deleted file mode 100644 (file)
index 4dcc5bb..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from keystonemiddleware import auth_token
-from oslo_config import cfg
-from oslo_middleware import cors
-from oslo_middleware import request_id
-import pecan
-
-from neutron.common import exceptions as n_exc
-from neutron.pecan_wsgi import hooks
-from neutron.pecan_wsgi import startup
-
-CONF = cfg.CONF
-CONF.import_opt('bind_host', 'neutron.common.config')
-CONF.import_opt('bind_port', 'neutron.common.config')
-
-
-def setup_app(*args, **kwargs):
-    config = {
-        'server': {
-            'port': CONF.bind_port,
-            'host': CONF.bind_host
-        },
-        'app': {
-            'root': 'neutron.pecan_wsgi.controllers.root.RootController',
-            'modules': ['neutron.pecan_wsgi'],
-        }
-        #TODO(kevinbenton): error templates
-    }
-    pecan_config = pecan.configuration.conf_from_dict(config)
-
-    app_hooks = [
-        hooks.ExceptionTranslationHook(),  # priority 100
-        hooks.ContextHook(),  # priority 95
-        hooks.MemberActionHook(),  # piority 95
-        hooks.BodyValidationHook(),  # priority 120
-        hooks.OwnershipValidationHook(),  # priority 125
-        hooks.QuotaEnforcementHook(),  # priority 130
-        hooks.PolicyHook(),  # priority 135
-        hooks.NotifierHook(),  # priority 140
-    ]
-
-    app = pecan.make_app(
-        pecan_config.app.root,
-        debug=False,
-        wrap_app=_wrap_app,
-        force_canonical=False,
-        hooks=app_hooks,
-        guess_content_type_from_ext=True
-    )
-    startup.initialize_all()
-
-    return app
-
-
-def _wrap_app(app):
-    app = request_id.RequestId(app)
-    if cfg.CONF.auth_strategy == 'noauth':
-        pass
-    elif cfg.CONF.auth_strategy == 'keystone':
-        app = auth_token.AuthProtocol(app, {})
-    else:
-        raise n_exc.InvalidConfigurationOption(
-            opt_name='auth_strategy', opt_value=cfg.CONF.auth_strategy)
-
-    # This should be the last middleware in the list (which results in
-    # it being the first in the middleware chain). This is to ensure
-    # that any errors thrown by other middleware, such as an auth
-    # middleware - are annotated with CORS headers, and thus accessible
-    # by the browser.
-    app = cors.CORS(app, cfg.CONF)
-    app.set_latent(
-        allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles',
-                       'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id',
-                       'X-OpenStack-Request-ID'],
-        allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'],
-        expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token',
-                        'X-OpenStack-Request-ID']
-    )
-
-    return app
diff --git a/neutron/pecan_wsgi/controllers/__init__.py b/neutron/pecan_wsgi/controllers/__init__.py
deleted file mode 100644 (file)
index a25c1ad..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.pecan_wsgi.controllers import quota
-
-
-QuotasController = quota.QuotasController
diff --git a/neutron/pecan_wsgi/controllers/quota.py b/neutron/pecan_wsgi/controllers/quota.py
deleted file mode 100644 (file)
index d773102..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright (c) 2015 Taturiello Consulting, Meh.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log
-from oslo_utils import importutils
-from pecan import request
-from pecan import response
-
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.pecan_wsgi.controllers import utils
-from neutron.quota import resource_registry
-
-LOG = log.getLogger(__name__)
-RESOURCE_NAME = "quota"
-
-
-class QuotasController(utils.NeutronPecanController):
-
-    def __init__(self):
-        self._driver = importutils.import_class(
-            cfg.CONF.QUOTAS.quota_driver
-        )
-        super(QuotasController, self).__init__(
-            "%ss" % RESOURCE_NAME, RESOURCE_NAME)
-
-    def _check_admin(self, context,
-                     reason=_("Only admin can view or configure quota")):
-        if not context.is_admin:
-            raise n_exc.AdminRequired(reason=reason)
-
-    @utils.expose()
-    def _lookup(self, tenant_id, *remainder):
-        return QuotaController(self._driver, tenant_id), remainder
-
-    @utils.expose()
-    def index(self):
-        neutron_context = request.context.get('neutron_context')
-        # FIXME(salv-orlando): There shouldn't be any need to to this eplicit
-        # check. However some behaviours from the "old" extension have
-        # been temporarily carried over here
-        self._check_admin(neutron_context)
-        # TODO(salv-orlando): proper plurals management
-        return {self.collection:
-                self._driver.get_all_quotas(
-                    neutron_context,
-                    resource_registry.get_all_resources())}
-
-
-class QuotaController(utils.NeutronPecanController):
-
-    def __init__(self, _driver, tenant_id):
-        self._driver = _driver
-        self._tenant_id = tenant_id
-
-        super(QuotaController, self).__init__(
-            "%ss" % RESOURCE_NAME, RESOURCE_NAME)
-
-        # Ensure limits for all registered resources are returned
-        attr_dict = attributes.RESOURCE_ATTRIBUTE_MAP[self.collection]
-        for quota_resource in resource_registry.get_all_resources().keys():
-            attr_dict[quota_resource] = {
-                'allow_post': False,
-                'allow_put': True,
-                'convert_to': attributes.convert_to_int,
-                'validate': {
-                    'type:range': [-1, constants.DB_INTEGER_MAX_VALUE]},
-                'is_visible': True}
-
-    @utils.expose(generic=True)
-    def index(self):
-        return get_tenant_quotas(self._tenant_id, self._driver)
-
-    @utils.when(index, method='PUT')
-    def put(self, *args, **kwargs):
-        neutron_context = request.context.get('neutron_context')
-        # For put requests there's always going to be a single element
-        quota_data = request.context['resources'][0]
-        for key, value in quota_data.items():
-            self._driver.update_quota_limit(
-                neutron_context, self._tenant_id, key, value)
-        return get_tenant_quotas(self._tenant_id, self._driver)
-
-    @utils.when(index, method='DELETE')
-    def delete(self):
-        neutron_context = request.context.get('neutron_context')
-        self._driver.delete_tenant_quota(neutron_context,
-                                         self._tenant_id)
-        response.status = 204
-
-
-def get_tenant_quotas(tenant_id, driver=None):
-    if not driver:
-        driver = importutils.import_class(cfg.CONF.QUOTAS.quota_driver)
-
-    neutron_context = request.context.get('neutron_context')
-    if tenant_id == 'tenant':
-        # NOTE(salv-orlando): Read the following before the code in order
-        # to avoid puking.
-        # There is a weird undocumented behaviour of the Neutron quota API
-        # as 'tenant' is used as an API action to return the identifier
-        # of the tenant in the request context. This is used exclusively
-        # for interaction with python-neutronclient and is a possibly
-        # unnecessary 'whoami' API endpoint. Pending resolution of this
-        # API issue, this controller will just treat the magic string
-        # 'tenant' (and only that string) and return the response expected
-        # by python-neutronclient
-        return {'tenant': {'tenant_id': neutron_context.tenant_id}}
-    tenant_quotas = driver.get_tenant_quotas(
-        neutron_context,
-        resource_registry.get_all_resources(),
-        tenant_id)
-    tenant_quotas['tenant_id'] = tenant_id
-    return {RESOURCE_NAME: tenant_quotas}
diff --git a/neutron/pecan_wsgi/controllers/root.py b/neutron/pecan_wsgi/controllers/root.py
deleted file mode 100644 (file)
index 416d606..0000000
+++ /dev/null
@@ -1,237 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# Copyright (c) 2015 Rackspace, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log
-import pecan
-from pecan import request
-
-from neutron._i18n import _, _LW
-from neutron.api import extensions
-from neutron.api.views import versions as versions_view
-from neutron import manager
-from neutron.pecan_wsgi.controllers import utils
-
-LOG = log.getLogger(__name__)
-_VERSION_INFO = {}
-
-
-def _load_version_info(version_info):
-    assert version_info['id'] not in _VERSION_INFO
-    _VERSION_INFO[version_info['id']] = version_info
-
-
-def _get_version_info():
-    return _VERSION_INFO.values()
-
-
-class RootController(object):
-
-    @utils.expose(generic=True)
-    def index(self):
-        builder = versions_view.get_view_builder(pecan.request)
-        versions = [builder.build(version) for version in _get_version_info()]
-        return dict(versions=versions)
-
-    @utils.when(index, method='HEAD')
-    @utils.when(index, method='POST')
-    @utils.when(index, method='PATCH')
-    @utils.when(index, method='PUT')
-    @utils.when(index, method='DELETE')
-    def not_supported(self):
-        pecan.abort(405)
-
-
-class ExtensionsController(object):
-
-    @utils.expose()
-    def _lookup(self, alias, *remainder):
-        return ExtensionController(alias), remainder
-
-    @utils.expose()
-    def index(self):
-        ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
-        exts = [extensions.ExtensionController._translate(ext)
-                for ext in ext_mgr.extensions.values()]
-        return {'extensions': exts}
-
-
-class V2Controller(object):
-
-    # Same data structure as neutron.api.versions.Versions for API backward
-    # compatibility
-    version_info = {
-        'id': 'v2.0',
-        'status': 'CURRENT'
-    }
-    _load_version_info(version_info)
-
-    extensions = ExtensionsController()
-
-    @utils.expose(generic=True)
-    def index(self):
-        builder = versions_view.get_view_builder(pecan.request)
-        return dict(version=builder.build(self.version_info))
-
-    @utils.when(index, method='HEAD')
-    @utils.when(index, method='POST')
-    @utils.when(index, method='PATCH')
-    @utils.when(index, method='PUT')
-    @utils.when(index, method='DELETE')
-    def not_supported(self):
-        pecan.abort(405)
-
-    @utils.expose()
-    def _lookup(self, collection, *remainder):
-        # if collection exists in the extension to service plugins map then
-        # we are assuming that collection is the service plugin and
-        # needs to be remapped.
-        # Example: https://neutron.endpoint/v2.0/lbaas/loadbalancers
-        if (remainder and
-                manager.NeutronManager.get_service_plugin_by_path_prefix(
-                    collection)):
-            collection = remainder[0]
-            remainder = remainder[1:]
-        controller = manager.NeutronManager.get_controller_for_resource(
-            collection)
-        if not controller:
-            LOG.warn(_LW("No controller found for: %s - returning response "
-                         "code 404"), collection)
-            pecan.abort(404)
-        # Store resource and collection names in pecan request context so that
-        # hooks can leverage them if necessary
-        request.context['resource'] = controller.resource
-        request.context['collection'] = collection
-        return controller, remainder
-
-
-# This controller cannot be specified directly as a member of RootController
-# as its path is not a valid python identifier
-pecan.route(RootController, 'v2.0', V2Controller())
-
-
-class ExtensionController(object):
-
-    def __init__(self, alias):
-        self.alias = alias
-
-    @utils.expose()
-    def index(self):
-        ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
-        ext = ext_mgr.extensions.get(self.alias, None)
-        if not ext:
-            pecan.abort(
-                404, detail=_("Extension with alias %s "
-                              "does not exist") % self.alias)
-        return {'extension': extensions.ExtensionController._translate(ext)}
-
-
-class CollectionsController(utils.NeutronPecanController):
-
-    @utils.expose()
-    def _lookup(self, item, *remainder):
-        # Store resource identifier in request context
-        request.context['resource_id'] = item
-        return ItemController(self.resource, item), remainder
-
-    @utils.expose(generic=True)
-    def index(self, *args, **kwargs):
-        return self.get(*args, **kwargs)
-
-    def get(self, *args, **kwargs):
-        # list request
-        # TODO(kevinbenton): use user-provided fields in call to plugin
-        # after making sure policy enforced fields remain
-        kwargs.pop('fields', None)
-        _listify = lambda x: x if isinstance(x, list) else [x]
-        filters = {k: _listify(v) for k, v in kwargs.items()}
-        # TODO(kevinbenton): convert these using api_common.get_filters
-        lister = getattr(self.plugin, 'get_%s' % self.collection)
-        neutron_context = request.context['neutron_context']
-        return {self.collection: lister(neutron_context, filters=filters)}
-
-    @utils.when(index, method='HEAD')
-    @utils.when(index, method='PATCH')
-    @utils.when(index, method='PUT')
-    @utils.when(index, method='DELETE')
-    def not_supported(self):
-        pecan.abort(405)
-
-    @utils.when(index, method='POST')
-    def post(self, *args, **kwargs):
-        # TODO(kevinbenton): emulated bulk!
-        resources = request.context['resources']
-        pecan.response.status = 201
-        return self.create(resources)
-
-    def create(self, resources):
-        if len(resources) > 1:
-            # Bulk!
-            method = 'create_%s_bulk' % self.resource
-            key = self.collection
-            data = {key: [{self.resource: res} for res in resources]}
-        else:
-            method = 'create_%s' % self.resource
-            key = self.resource
-            data = {key: resources[0]}
-        creator = getattr(self.plugin, method)
-        neutron_context = request.context['neutron_context']
-        return {key: creator(neutron_context, data)}
-
-
-class ItemController(utils.NeutronPecanController):
-
-    def __init__(self, resource, item):
-        super(ItemController, self).__init__(None, resource)
-        self.item = item
-
-    @utils.expose(generic=True)
-    def index(self, *args, **kwargs):
-        return self.get()
-
-    def get(self, *args, **kwargs):
-        getter = getattr(self.plugin, 'get_%s' % self.resource)
-        neutron_context = request.context['neutron_context']
-        return {self.resource: getter(neutron_context, self.item)}
-
-    @utils.when(index, method='HEAD')
-    @utils.when(index, method='POST')
-    @utils.when(index, method='PATCH')
-    def not_supported(self):
-        pecan.abort(405)
-
-    @utils.when(index, method='PUT')
-    def put(self, *args, **kwargs):
-        neutron_context = request.context['neutron_context']
-        resources = request.context['resources']
-        if request.member_action:
-            member_action_method = getattr(self.plugin,
-                                           request.member_action)
-            return member_action_method(neutron_context, self.item,
-                                        resources[0])
-        # TODO(kevinbenton): bulk?
-        updater = getattr(self.plugin, 'update_%s' % self.resource)
-        # Bulk update is not supported, 'resources' always contains a single
-        # elemenet
-        data = {self.resource: resources[0]}
-        return updater(neutron_context, self.item, data)
-
-    @utils.when(index, method='DELETE')
-    def delete(self):
-        # TODO(kevinbenton): setting code could be in a decorator
-        pecan.response.status = 204
-        neutron_context = request.context['neutron_context']
-        deleter = getattr(self.plugin, 'delete_%s' % self.resource)
-        return deleter(neutron_context, self.item)
diff --git a/neutron/pecan_wsgi/controllers/utils.py b/neutron/pecan_wsgi/controllers/utils.py
deleted file mode 100644 (file)
index 49bd97e..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (c) 2015 Taturiello Consulting, Meh.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import pecan
-
-from neutron import manager
-
-# Utility functions for Pecan controllers.
-
-
-def expose(*args, **kwargs):
-    """Helper function so we don't have to specify json for everything."""
-    kwargs.setdefault('content_type', 'application/json')
-    kwargs.setdefault('template', 'json')
-    return pecan.expose(*args, **kwargs)
-
-
-def when(index, *args, **kwargs):
-    """Helper function so we don't have to specify json for everything."""
-    kwargs.setdefault('content_type', 'application/json')
-    kwargs.setdefault('template', 'json')
-    return index.when(*args, **kwargs)
-
-
-class NeutronPecanController(object):
-
-    def __init__(self, collection, resource):
-        self.collection = collection
-        self.resource = resource
-        self.plugin = manager.NeutronManager.get_plugin_for_resource(
-            self.resource)
diff --git a/neutron/pecan_wsgi/hooks/__init__.py b/neutron/pecan_wsgi/hooks/__init__.py
deleted file mode 100644 (file)
index a14a810..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.pecan_wsgi.hooks import body_validation
-from neutron.pecan_wsgi.hooks import context
-from neutron.pecan_wsgi.hooks import member_action
-from neutron.pecan_wsgi.hooks import notifier
-from neutron.pecan_wsgi.hooks import ownership_validation
-from neutron.pecan_wsgi.hooks import policy_enforcement
-from neutron.pecan_wsgi.hooks import quota_enforcement
-from neutron.pecan_wsgi.hooks import translation
-
-
-ExceptionTranslationHook = translation.ExceptionTranslationHook
-ContextHook = context.ContextHook
-MemberActionHook = member_action.MemberActionHook
-BodyValidationHook = body_validation.BodyValidationHook
-OwnershipValidationHook = ownership_validation.OwnershipValidationHook
-PolicyHook = policy_enforcement.PolicyHook
-QuotaEnforcementHook = quota_enforcement.QuotaEnforcementHook
-NotifierHook = notifier.NotifierHook
diff --git a/neutron/pecan_wsgi/hooks/body_validation.py b/neutron/pecan_wsgi/hooks/body_validation.py
deleted file mode 100644 (file)
index 6e95730..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from pecan import hooks
-
-from neutron.api.v2 import attributes as v2_attributes
-from neutron.api.v2 import base as v2_base
-
-
-class BodyValidationHook(hooks.PecanHook):
-
-    priority = 120
-
-    def before(self, state):
-        if state.request.method not in ('POST', 'PUT'):
-            return
-        resource = state.request.context.get('resource')
-        collection = state.request.context.get('collection')
-        neutron_context = state.request.context['neutron_context']
-        is_create = state.request.method == 'POST'
-        if not resource:
-            return
-        # Prepare data to be passed to the plugin from request body
-        data = v2_base.Controller.prepare_request_body(
-            neutron_context,
-            state.request.json,
-            is_create,
-            resource,
-            v2_attributes.get_collection_info(collection),
-            allow_bulk=is_create)
-        if collection in data:
-            state.request.context['resources'] = [item[resource] for item in
-                                                  data[collection]]
-        else:
-            state.request.context['resources'] = [data[resource]]
diff --git a/neutron/pecan_wsgi/hooks/context.py b/neutron/pecan_wsgi/hooks/context.py
deleted file mode 100644 (file)
index 7c3f9a3..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_middleware import request_id
-from pecan import hooks
-
-from neutron import context
-
-
-class ContextHook(hooks.PecanHook):
-    """Configures a request context and attaches it to the request.
-    The following HTTP request headers are used:
-    X-User-Id or X-User:
-        Used for context.user_id.
-    X-Project-Id:
-        Used for context.tenant_id.
-    X-Project-Name:
-        Used for context.tenant_name.
-    X-Auth-Token:
-        Used for context.auth_token.
-    X-Roles:
-        Used for setting context.is_admin flag to either True or False.
-        The flag is set to True, if X-Roles contains either an administrator
-        or admin substring. Otherwise it is set to False.
-    """
-
-    priority = 95
-
-    def before(self, state):
-        user_id = state.request.headers.get('X-User-Id')
-        user_id = state.request.headers.get('X-User', user_id)
-        user_name = state.request.headers.get('X-User-Name', '')
-        tenant_id = state.request.headers.get('X-Project-Id')
-        tenant_name = state.request.headers.get('X-Project-Name')
-        auth_token = state.request.headers.get('X-Auth-Token')
-        roles = state.request.headers.get('X-Roles', '').split(',')
-        roles = [r.strip() for r in roles]
-        creds = {'roles': roles}
-        req_id = state.request.headers.get(request_id.ENV_REQUEST_ID)
-        # TODO(kevinbenton): is_admin logic
-        # Create a context with the authentication data
-        ctx = context.Context(user_id, tenant_id=tenant_id,
-                              roles=creds['roles'],
-                              user_name=user_name, tenant_name=tenant_name,
-                              request_id=req_id, auth_token=auth_token)
-
-        # Inject the context...
-        state.request.context['neutron_context'] = ctx
diff --git a/neutron/pecan_wsgi/hooks/member_action.py b/neutron/pecan_wsgi/hooks/member_action.py
deleted file mode 100644 (file)
index 86cfd3f..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from pecan import abort
-from pecan import hooks
-
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-
-
-class MemberActionHook(hooks.PecanHook):
-
-    priority = 95
-
-    def before(self, state):
-        # TODO(salv-orlando): This hook must go. Handling actions like this is
-        # shameful
-        resource = state.request.context.get('resource')
-        if not resource:
-            return
-        try:
-            # Remove the format suffix if any
-            uri = state.request.path.rsplit('.', 1)[0].split('/')[2:]
-            if not uri:
-                # there's nothing to process in the URI
-                return
-        except IndexError:
-            return
-        collection = None
-        for (collection, res) in attributes.PLURALS.items():
-            if res == resource:
-                break
-        else:
-            return
-        state.request.member_action = self._parse_action(
-            resource, collection, uri[1:])
-
-    def _parse_action(self, resource, collection, remainder):
-        # NOTE(salv-orlando): This check is revolting and makes me
-        # puke, but avoids silly failures when dealing with API actions
-        # such as "add_router_interface".
-        if len(remainder) > 1:
-            action = remainder[1]
-        else:
-            return
-        ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
-        resource_exts = ext_mgr.get_resources()
-        for ext in resource_exts:
-            if (ext.collection == collection and action in ext.member_actions):
-                return action
-        # Action or resource extension not found
-        if action:
-            abort(404, detail="Action %(action)s for resource "
-                              "%(resource)s undefined" %
-                              {'action': action,
-                               'resource': resource})
diff --git a/neutron/pecan_wsgi/hooks/notifier.py b/neutron/pecan_wsgi/hooks/notifier.py
deleted file mode 100644 (file)
index f63c953..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from pecan import hooks
-
-
-class NotifierHook(hooks.PecanHook):
-    priority = 140
-
-    # TODO(kevinbenton): implement
-    # dhcp agent notifier
-    # ceilo notifier
-    # nova notifier
-    def before(self, state):
-        pass
-
-    def after(self, state):
-        pass
diff --git a/neutron/pecan_wsgi/hooks/ownership_validation.py b/neutron/pecan_wsgi/hooks/ownership_validation.py
deleted file mode 100644 (file)
index e3c4af3..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from pecan import hooks
-import webob
-
-from neutron._i18n import _
-from neutron import manager
-
-
-class OwnershipValidationHook(hooks.PecanHook):
-
-    priority = 125
-
-    def before(self, state):
-        if state.request.method != 'POST':
-            return
-        for item in state.request.context.get('resources', []):
-            self._validate_network_tenant_ownership(state, item)
-
-    def _validate_network_tenant_ownership(self, state, resource_item):
-        # TODO(salvatore-orlando): consider whether this check can be folded
-        # in the policy engine
-        neutron_context = state.request.context.get('neutron_context')
-        resource = state.request.context.get('resource')
-        if (neutron_context.is_admin or neutron_context.is_advsvc or
-                resource not in ('port', 'subnet')):
-            return
-        plugin = manager.NeutronManager.get_plugin()
-        network = plugin.get_network(neutron_context,
-                                     resource_item['network_id'])
-        # do not perform the check on shared networks
-        if network.get('shared'):
-            return
-
-        network_owner = network['tenant_id']
-
-        if network_owner != resource_item['tenant_id']:
-            msg = _("Tenant %(tenant_id)s not allowed to "
-                    "create %(resource)s on this network")
-            raise webob.exc.HTTPForbidden(msg % {
-                "tenant_id": resource_item['tenant_id'],
-                "resource": resource,
-            })
diff --git a/neutron/pecan_wsgi/hooks/policy_enforcement.py b/neutron/pecan_wsgi/hooks/policy_enforcement.py
deleted file mode 100644 (file)
index dcd1800..0000000
+++ /dev/null
@@ -1,201 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-import simplejson
-
-from oslo_policy import policy as oslo_policy
-from oslo_utils import excutils
-from pecan import hooks
-import webob
-
-from neutron._i18n import _
-from neutron.api.v2 import attributes as v2_attributes
-from neutron.common import constants as const
-from neutron.extensions import quotasv2
-from neutron import manager
-from neutron.pecan_wsgi.controllers import quota
-from neutron import policy
-
-
-def _custom_getter(resource, resource_id):
-    """Helper function to retrieve resources not served by any plugin."""
-    if resource == quotasv2.RESOURCE_NAME:
-        return quota.get_tenant_quotas(resource_id)[quotasv2.RESOURCE_NAME]
-
-
-class PolicyHook(hooks.PecanHook):
-    priority = 135
-    ACTION_MAP = {'POST': 'create', 'PUT': 'update', 'GET': 'get',
-                  'DELETE': 'delete'}
-
-    def _fetch_resource(self, neutron_context, resource, resource_id):
-        attrs = v2_attributes.get_resource_info(resource)
-        field_list = [name for (name, value) in attrs.items()
-                      if (value.get('required_by_policy') or
-                          value.get('primary_key') or 'default' not in value)]
-        plugin = manager.NeutronManager.get_plugin_for_resource(resource)
-        if plugin:
-            getter = getattr(plugin, 'get_%s' % resource)
-            # TODO(kevinbenton): the parent_id logic currently in base.py
-            return getter(neutron_context, resource_id, fields=field_list)
-        else:
-            # Some legit resources, like quota, do not have a plugin yet.
-            # Retrieving the original object is nevertheless important
-            # for policy checks.
-            return _custom_getter(resource, resource_id)
-
-    def before(self, state):
-        # This hook should be run only for PUT,POST and DELETE methods and for
-        # requests targeting a neutron resource
-        resources = state.request.context.get('resources', [])
-        if state.request.method not in ('POST', 'PUT', 'DELETE'):
-            return
-        # As this routine will likely alter the resources, do a shallow copy
-        resources_copy = resources[:]
-        neutron_context = state.request.context.get('neutron_context')
-        resource = state.request.context.get('resource')
-        # If there is no resource for this request, don't bother running authZ
-        # policies
-        if not resource:
-            return
-        collection = state.request.context.get('collection')
-        needs_prefetch = (state.request.method == 'PUT' or
-                          state.request.method == 'DELETE')
-        policy.init()
-        action = '%s_%s' % (self.ACTION_MAP[state.request.method], resource)
-
-        # NOTE(salv-orlando): As bulk updates are not supported, in case of PUT
-        # requests there will be only a single item to process, and its
-        # identifier would have been already retrieved by the lookup process;
-        # in the case of DELETE requests there won't be any item to process in
-        # the request body
-        if needs_prefetch:
-            try:
-                item = resources_copy.pop()
-            except IndexError:
-                # Ops... this was a delete after all!
-                item = {}
-            resource_id = state.request.context.get('resource_id')
-            obj = copy.copy(self._fetch_resource(neutron_context,
-                                                 resource,
-                                                 resource_id))
-            obj.update(item)
-            obj[const.ATTRIBUTES_TO_UPDATE] = item.keys()
-            # Put back the item in the list so that policies could be enforced
-            resources_copy.append(obj)
-
-        for item in resources_copy:
-            try:
-                policy.enforce(
-                    neutron_context, action, item,
-                    pluralized=collection)
-            except oslo_policy.PolicyNotAuthorized:
-                with excutils.save_and_reraise_exception() as ctxt:
-                    # If a tenant is modifying it's own object, it's safe to
-                    # return a 403. Otherwise, pretend that it doesn't exist
-                    # to avoid giving away information.
-                    if (needs_prefetch and
-                        neutron_context.tenant_id != item['tenant_id']):
-                        ctxt.reraise = False
-                msg = _('The resource could not be found.')
-                raise webob.exc.HTTPNotFound(msg)
-
-    def after(self, state):
-        neutron_context = state.request.context.get('neutron_context')
-        resource = state.request.context.get('resource')
-        collection = state.request.context.get('collection')
-        if not resource:
-            # can't filter a resource we don't recognize
-            return
-        # NOTE(kevinbenton): extension listing isn't controlled by policy
-        if resource == 'extension':
-            return
-        try:
-            data = state.response.json
-        except simplejson.JSONDecodeError:
-            return
-        action = '%s_%s' % (self.ACTION_MAP[state.request.method],
-                            resource)
-        if not data or (resource not in data and collection not in data):
-            return
-        is_single = resource in data
-        key = resource if is_single else collection
-        to_process = [data[resource]] if is_single else data[collection]
-        # in the single case, we enforce which raises on violation
-        # in the plural case, we just check so violating items are hidden
-        policy_method = policy.enforce if is_single else policy.check
-        plugin = manager.NeutronManager.get_plugin_for_resource(resource)
-        try:
-            resp = [self._get_filtered_item(state.request, resource,
-                                            collection, item)
-                    for item in to_process
-                    if (state.request.method != 'GET' or
-                        policy_method(neutron_context, action, item,
-                                      plugin=plugin,
-                                      pluralized=collection))]
-        except oslo_policy.PolicyNotAuthorized as e:
-            # This exception must be explicitly caught as the exception
-            # translation hook won't be called if an error occurs in the
-            # 'after' handler.
-            raise webob.exc.HTTPForbidden(e.message)
-
-        if is_single:
-            resp = resp[0]
-        data[key] = resp
-        state.response.json = data
-
-    def _get_filtered_item(self, request, resource, collection, data):
-        neutron_context = request.context.get('neutron_context')
-        to_exclude = self._exclude_attributes_by_policy(
-            neutron_context, resource, collection, data)
-        return self._filter_attributes(request, data, to_exclude)
-
-    def _filter_attributes(self, request, data, fields_to_strip):
-        # TODO(kevinbenton): this works but we didn't allow the plugin to
-        # only fetch the fields we are interested in. consider moving this
-        # to the call
-        user_fields = request.params.getall('fields')
-        return dict(item for item in data.items()
-                    if (item[0] not in fields_to_strip and
-                        (not user_fields or item[0] in user_fields)))
-
-    def _exclude_attributes_by_policy(self, context, resource,
-                                      collection, data):
-        """Identifies attributes to exclude according to authZ policies.
-
-        Return a list of attribute names which should be stripped from the
-        response returned to the user because the user is not authorized
-        to see them.
-        """
-        attributes_to_exclude = []
-        for attr_name in data.keys():
-            attr_data = v2_attributes.get_resource_info(
-                resource).get(attr_name)
-            if attr_data and attr_data['is_visible']:
-                if policy.check(
-                    context,
-                    # NOTE(kevinbenton): this used to reference a
-                    # _plugin_handlers dict, why?
-                    'get_%s:%s' % (resource, attr_name),
-                    data,
-                    might_not_exist=True,
-                    pluralized=collection):
-                    # this attribute is visible, check next one
-                    continue
-            # if the code reaches this point then either the policy check
-            # failed or the attribute was not visible in the first place
-            attributes_to_exclude.append(attr_name)
-        return attributes_to_exclude
diff --git a/neutron/pecan_wsgi/hooks/quota_enforcement.py b/neutron/pecan_wsgi/hooks/quota_enforcement.py
deleted file mode 100644 (file)
index 6cede44..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-from pecan import hooks
-
-from neutron.common import exceptions
-from neutron import manager
-from neutron import quota
-
-
-LOG = logging.getLogger(__name__)
-
-
-class QuotaEnforcementHook(hooks.PecanHook):
-
-    priority = 130
-
-    def before(self, state):
-        # TODO(salv-orlando): This hook must go when adapting the pecan code to
-        # use reservations.
-        resource = state.request.context.get('resource')
-        if state.request.method != 'POST' or not resource:
-            return
-        plugin = manager.NeutronManager.get_plugin_for_resource(resource)
-        items = state.request.context.get('resources')
-        deltas = {}
-        for item in items:
-            tenant_id = item['tenant_id']
-            try:
-                neutron_context = state.request.context.get('neutron_context')
-                count = quota.QUOTAS.count(neutron_context,
-                                           resource,
-                                           plugin,
-                                           tenant_id)
-                delta = deltas.get(tenant_id, 0) + 1
-                kwargs = {resource: count + delta}
-            except exceptions.QuotaResourceUnknown as e:
-                # We don't want to quota this resource
-                LOG.debug(e)
-            else:
-                quota.QUOTAS.limit_check(neutron_context, tenant_id,
-                                         **kwargs)
diff --git a/neutron/pecan_wsgi/hooks/translation.py b/neutron/pecan_wsgi/hooks/translation.py
deleted file mode 100644 (file)
index f20105a..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-from pecan import hooks
-import webob.exc
-
-from neutron._i18n import _
-from neutron.api.v2 import base as v2base
-from neutron.i18n import _LE
-
-
-LOG = logging.getLogger(__name__)
-
-
-class ExceptionTranslationHook(hooks.PecanHook):
-    def on_error(self, state, e):
-        # if it's already an http error, just return to let it go through
-        if isinstance(e, webob.exc.WSGIHTTPException):
-            return
-        for exc_class, to_class in v2base.FAULT_MAP.items():
-            if isinstance(e, exc_class):
-                raise to_class(getattr(e, 'msg', e.message))
-        # leaked unexpected exception, convert to boring old 500 error and
-        # hide message from user in case it contained sensitive details
-        LOG.exception(_LE("An unexpected exception was caught: %s"), e)
-        raise webob.exc.HTTPInternalServerError(
-            _("An unexpected internal error occurred."))
diff --git a/neutron/pecan_wsgi/startup.py b/neutron/pecan_wsgi/startup.py
deleted file mode 100644 (file)
index e4ea30e..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log
-
-from neutron._i18n import _LI, _LW
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.api.v2 import router
-from neutron import manager
-from neutron.pecan_wsgi.controllers import root
-from neutron import policy
-from neutron.quota import resource_registry
-
-LOG = log.getLogger(__name__)
-
-
-def _plugin_for_resource(collection):
-    if collection in router.RESOURCES.values():
-        # this is a core resource, return the core plugin
-        return manager.NeutronManager.get_plugin()
-    ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
-    # Multiple extensions can map to the same resource. This happens
-    # because of 'attribute' extensions. Due to the way in which neutron
-    # plugins and request dispatching is constructed, it is impossible for
-    # the same resource to be handled by more than one plugin. Therefore
-    # all the extensions mapped to a given resource will necessarily be
-    # implemented by the same plugin.
-    ext_res_mappings = dict((ext.get_alias(), collection) for
-                            ext in ext_mgr.extensions.values() if
-                            collection in ext.get_extended_resources('2.0'))
-    LOG.debug("Extension mappings for: %(collection)s: %(aliases)s",
-              {'collection': collection, 'aliases': ext_res_mappings.keys()})
-    # find the plugin that supports this extension
-    for plugin in ext_mgr.plugins.values():
-        ext_aliases = getattr(plugin, 'supported_extension_aliases', [])
-        for alias in ext_aliases:
-            if alias in ext_res_mappings:
-                # This plugin implements this resource
-                return plugin
-    LOG.warn(_LW("No plugin found for:%s"), collection)
-
-
-def _handle_plurals(collection):
-    resource = attributes.PLURALS.get(collection)
-    if not resource:
-        if collection.endswith('ies'):
-            resource = "%sy" % collection[:-3]
-        else:
-            resource = collection[:-1]
-    attributes.PLURALS[collection] = resource
-    return resource
-
-
-def initialize_all():
-    ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
-    ext_mgr.extend_resources("2.0", attributes.RESOURCE_ATTRIBUTE_MAP)
-    # At this stage we have a fully populated resource attribute map;
-    # build Pecan controllers and routes for every resource (both core
-    # and extensions)
-    pecanized_exts = [ext for ext in ext_mgr.extensions.values() if
-                      hasattr(ext, 'get_pecan_controllers')]
-    pecan_controllers = {}
-    for ext in pecanized_exts:
-        LOG.info(_LI("Extension %s is pecan-aware. Fetching resources "
-                     "and controllers"), ext.get_name())
-        controllers = ext.get_pecan_controllers()
-        # controllers is actually a list of pairs where the first element is
-        # the collection name and the second the actual controller
-        for (collection, coll_controller) in controllers:
-            pecan_controllers[collection] = coll_controller
-
-    for collection in attributes.RESOURCE_ATTRIBUTE_MAP:
-        resource = _handle_plurals(collection)
-        controller = pecan_controllers.get(collection)
-        if not controller:
-            LOG.debug("Building controller for resource:%s", resource)
-            plugin = _plugin_for_resource(collection)
-            if plugin:
-                manager.NeutronManager.set_plugin_for_resource(
-                    resource, plugin)
-            else:
-                LOG.warn(_LW("No plugin found for resource:%s. API calls "
-                             "may not be correctly dispatched"), resource)
-            controller = root.CollectionsController(collection, resource)
-        else:
-            LOG.debug("There are already controllers for resource:%s",
-                      resource)
-
-        manager.NeutronManager.set_controller_for_resource(
-            collection, controller)
-        LOG.info(_LI("Added controller for resource %(resource)s "
-                     "via URI path segment:%(collection)s"),
-                 {'resource': resource,
-                  'collection': collection})
-    # NOTE(salv-orlando): If you are care about code quality, please read below
-    # Hackiness is strong with the piece of code below. It is used for
-    # populating resource plurals and registering resources with the quota
-    # engine, but the method it calls were not conceived with this aim.
-    # Therefore it only leverages side-effects from those methods. Moreover,
-    # as it is really not advisable to load an instance of
-    # neutron.api.v2.router.APIRouter just to register resources with the
-    # quota  engine, core resources are explicitly registered here.
-    # TODO(salv-orlando): The Pecan WSGI support should provide its own
-    # solution to manage resource plurals and registration of resources with
-    # the quota engine
-    for resource in router.RESOURCES.keys():
-        resource_registry.register_resource_by_name(resource)
-    for ext in ext_mgr.extensions.values():
-        # make each extension populate its plurals
-        if hasattr(ext, 'get_resources'):
-            ext.get_resources()
-        if hasattr(ext, 'get_extended_resources'):
-            ext.get_extended_resources('v2.0')
-    # Certain policy checks require that the extensions are loaded
-    # and the RESOURCE_ATTRIBUTE_MAP populated before they can be
-    # properly initialized. This can only be claimed with certainty
-    # once this point in the code has been reached. In the event
-    # that the policies have been initialized before this point,
-    # calling reset will cause the next policy check to
-    # re-initialize with all of the required data in place.
-    policy.reset()
diff --git a/neutron/plugins/__init__.py b/neutron/plugins/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/common/__init__.py b/neutron/plugins/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/common/constants.py b/neutron/plugins/common/constants.py
deleted file mode 100644 (file)
index 65a0fb3..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# Neutron well-known service type constants:
-CORE = "CORE"
-DUMMY = "DUMMY"
-LOADBALANCER = "LOADBALANCER"
-LOADBALANCERV2 = "LOADBALANCERV2"
-FIREWALL = "FIREWALL"
-VPN = "VPN"
-METERING = "METERING"
-L3_ROUTER_NAT = "L3_ROUTER_NAT"
-FLAVORS = "FLAVORS"
-QOS = "QOS"
-
-# Maps extension alias to service type
-EXT_TO_SERVICE_MAPPING = {
-    'dummy': DUMMY,
-    'lbaas': LOADBALANCER,
-    'lbaasv2': LOADBALANCERV2,
-    'fwaas': FIREWALL,
-    'vpnaas': VPN,
-    'metering': METERING,
-    'router': L3_ROUTER_NAT,
-    'flavors': FLAVORS,
-    'qos': QOS,
-}
-
-# Service operation status constants
-ACTIVE = "ACTIVE"
-DOWN = "DOWN"
-CREATED = "CREATED"
-PENDING_CREATE = "PENDING_CREATE"
-PENDING_UPDATE = "PENDING_UPDATE"
-PENDING_DELETE = "PENDING_DELETE"
-INACTIVE = "INACTIVE"
-ERROR = "ERROR"
-
-ACTIVE_PENDING_STATUSES = (
-    ACTIVE,
-    PENDING_CREATE,
-    PENDING_UPDATE
-)
-
-# Network Type constants
-TYPE_FLAT = 'flat'
-TYPE_GENEVE = 'geneve'
-TYPE_GRE = 'gre'
-TYPE_LOCAL = 'local'
-TYPE_VXLAN = 'vxlan'
-TYPE_VLAN = 'vlan'
-TYPE_NONE = 'none'
-
-# Values for network_type
-
-# For VLAN Network
-MIN_VLAN_TAG = 1
-MAX_VLAN_TAG = 4094
-
-# For Geneve Tunnel
-MIN_GENEVE_VNI = 1
-MAX_GENEVE_VNI = 2 ** 24 - 1
-
-# For GRE Tunnel
-MIN_GRE_ID = 1
-MAX_GRE_ID = 2 ** 32 - 1
-
-# For VXLAN Tunnel
-MIN_VXLAN_VNI = 1
-MAX_VXLAN_VNI = 2 ** 24 - 1
-VXLAN_UDP_PORT = 4789
-
-# Network Type MTU overhead
-GENEVE_ENCAP_MIN_OVERHEAD = 50
-GRE_ENCAP_OVERHEAD = 42
-VXLAN_ENCAP_OVERHEAD = 50
diff --git a/neutron/plugins/common/utils.py b/neutron/plugins/common/utils.py
deleted file mode 100644 (file)
index 7ec7ba5..0000000
+++ /dev/null
@@ -1,183 +0,0 @@
-# Copyright 2013 Cisco Systems, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Common utilities and helper functions for OpenStack Networking Plugins.
-"""
-
-import hashlib
-
-from oslo_log import log as logging
-import six
-import webob.exc
-
-from neutron._i18n import _, _LI
-from neutron.api.v2 import attributes
-from neutron.common import constants as n_const
-from neutron.common import exceptions as n_exc
-from neutron.plugins.common import constants as p_const
-
-INTERFACE_HASH_LEN = 6
-LOG = logging.getLogger(__name__)
-
-
-def is_valid_vlan_tag(vlan):
-    return p_const.MIN_VLAN_TAG <= vlan <= p_const.MAX_VLAN_TAG
-
-
-def is_valid_gre_id(gre_id):
-    return p_const.MIN_GRE_ID <= gre_id <= p_const.MAX_GRE_ID
-
-
-def is_valid_vxlan_vni(vni):
-    return p_const.MIN_VXLAN_VNI <= vni <= p_const.MAX_VXLAN_VNI
-
-
-def is_valid_geneve_vni(vni):
-    return p_const.MIN_GENEVE_VNI <= vni <= p_const.MAX_GENEVE_VNI
-
-
-def verify_tunnel_range(tunnel_range, tunnel_type):
-    """Raise an exception for invalid tunnel range or malformed range."""
-    mappings = {p_const.TYPE_GRE: is_valid_gre_id,
-                p_const.TYPE_VXLAN: is_valid_vxlan_vni,
-                p_const.TYPE_GENEVE: is_valid_geneve_vni}
-    if tunnel_type in mappings:
-        for ident in tunnel_range:
-            if not mappings[tunnel_type](ident):
-                raise n_exc.NetworkTunnelRangeError(
-                    tunnel_range=tunnel_range,
-                    error=_("%(id)s is not a valid %(type)s identifier") %
-                    {'id': ident, 'type': tunnel_type})
-    if tunnel_range[1] < tunnel_range[0]:
-        raise n_exc.NetworkTunnelRangeError(
-            tunnel_range=tunnel_range,
-            error=_("End of tunnel range is less "
-                    "than start of tunnel range"))
-
-
-def verify_vlan_range(vlan_range):
-    """Raise an exception for invalid tags or malformed range."""
-    for vlan_tag in vlan_range:
-        if not is_valid_vlan_tag(vlan_tag):
-            raise n_exc.NetworkVlanRangeError(
-                vlan_range=vlan_range,
-                error=_("%s is not a valid VLAN tag") % vlan_tag)
-    if vlan_range[1] < vlan_range[0]:
-        raise n_exc.NetworkVlanRangeError(
-            vlan_range=vlan_range,
-            error=_("End of VLAN range is less than start of VLAN range"))
-
-
-def parse_network_vlan_range(network_vlan_range):
-    """Interpret a string as network[:vlan_begin:vlan_end]."""
-    entry = network_vlan_range.strip()
-    if ':' in entry:
-        try:
-            network, vlan_min, vlan_max = entry.split(':')
-            vlan_range = (int(vlan_min), int(vlan_max))
-        except ValueError as ex:
-            raise n_exc.NetworkVlanRangeError(vlan_range=entry, error=ex)
-        if not network:
-            raise n_exc.PhysicalNetworkNameError()
-        verify_vlan_range(vlan_range)
-        return network, vlan_range
-    else:
-        return entry, None
-
-
-def parse_network_vlan_ranges(network_vlan_ranges_cfg_entries):
-    """Interpret a list of strings as network[:vlan_begin:vlan_end] entries."""
-    networks = {}
-    for entry in network_vlan_ranges_cfg_entries:
-        network, vlan_range = parse_network_vlan_range(entry)
-        if vlan_range:
-            networks.setdefault(network, []).append(vlan_range)
-        else:
-            networks.setdefault(network, [])
-    return networks
-
-
-def in_pending_status(status):
-    return status in (p_const.PENDING_CREATE,
-                      p_const.PENDING_UPDATE,
-                      p_const.PENDING_DELETE)
-
-
-def _fixup_res_dict(context, attr_name, res_dict, check_allow_post=True):
-    attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[attr_name]
-    try:
-        attributes.populate_tenant_id(context, res_dict, attr_info, True)
-        attributes.verify_attributes(res_dict, attr_info)
-    except webob.exc.HTTPBadRequest as e:
-        # convert webob exception into ValueError as these functions are
-        # for internal use. webob exception doesn't make sense.
-        raise ValueError(e.detail)
-    attributes.fill_default_value(attr_info, res_dict,
-                                  check_allow_post=check_allow_post)
-    attributes.convert_value(attr_info, res_dict)
-    return res_dict
-
-
-def create_network(core_plugin, context, net):
-    net_data = _fixup_res_dict(context, attributes.NETWORKS,
-                               net.get('network', {}))
-    return core_plugin.create_network(context, {'network': net_data})
-
-
-def create_subnet(core_plugin, context, subnet):
-    subnet_data = _fixup_res_dict(context, attributes.SUBNETS,
-                                  subnet.get('subnet', {}))
-    return core_plugin.create_subnet(context, {'subnet': subnet_data})
-
-
-def create_port(core_plugin, context, port, check_allow_post=True):
-    port_data = _fixup_res_dict(context, attributes.PORTS,
-                                port.get('port', {}),
-                                check_allow_post=check_allow_post)
-    return core_plugin.create_port(context, {'port': port_data})
-
-
-def get_interface_name(name, prefix='', max_len=n_const.DEVICE_NAME_MAX_LEN):
-    """Construct an interface name based on the prefix and name.
-
-    The interface name can not exceed the maximum length passed in. Longer
-    names are hashed to help ensure uniqueness.
-    """
-    requested_name = prefix + name
-
-    if len(requested_name) <= max_len:
-        return requested_name
-
-    # We can't just truncate because interfaces may be distinguished
-    # by an ident at the end. A hash over the name should be unique.
-    # Leave part of the interface name on for easier identification
-    if (len(prefix) + INTERFACE_HASH_LEN) > max_len:
-        raise ValueError(_("Too long prefix provided. New name would exceed "
-                           "given length for an interface name."))
-
-    namelen = max_len - len(prefix) - INTERFACE_HASH_LEN
-    if isinstance(name, six.text_type):
-        hashed_name = hashlib.sha1(name.encode('utf-8'))
-    else:
-        hashed_name = hashlib.sha1(name)
-    new_name = ('%(prefix)s%(truncated)s%(hash)s' %
-                {'prefix': prefix, 'truncated': name[0:namelen],
-                 'hash': hashed_name.hexdigest()[0:INTERFACE_HASH_LEN]})
-    LOG.info(_LI("The requested interface name %(requested_name)s exceeds the "
-                 "%(limit)d character limitation. It was shortened to "
-                 "%(new_name)s to fit."),
-             {'requested_name': requested_name,
-              'limit': max_len, 'new_name': new_name})
-    return new_name
diff --git a/neutron/plugins/hyperv/__init__.py b/neutron/plugins/hyperv/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/hyperv/agent/__init__.py b/neutron/plugins/hyperv/agent/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/hyperv/agent/security_groups_driver.py b/neutron/plugins/hyperv/agent/security_groups_driver.py
deleted file mode 100644 (file)
index 17b606a..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-#Copyright 2014 Cloudbase Solutions SRL
-#All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from debtcollector import moves
-from hyperv.neutron import security_groups_driver as sg_driver
-from oslo_log import log as logging
-
-from neutron.i18n import _LW
-
-LOG = logging.getLogger(__name__)
-
-# TODO(claudiub): Remove this module at the beginning of the O cycle.
-
-new_driver = 'hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver'
-LOG.warn(_LW("You are using the deprecated firewall driver: %(deprecated)s. "
-             "Use the recommended driver %(new)s instead."),
-         {'deprecated': '%s.HyperVSecurityGroupsDriver' % __name__,
-          'new': new_driver})
-
-HyperVSecurityGroupsDriver = moves.moved_class(
-    sg_driver.HyperVSecurityGroupsDriver,
-    'HyperVSecurityGroupsDriver', __name__)
diff --git a/neutron/plugins/ml2/README b/neutron/plugins/ml2/README
deleted file mode 100644 (file)
index 0c1fe45..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-The Modular Layer 2 (ML2) plugin is a framework allowing OpenStack
-Networking to simultaneously utilize the variety of layer 2 networking
-technologies found in complex real-world data centers. It supports the
-Open vSwitch, Linux bridge, and Hyper-V L2 agents, replacing and
-deprecating the monolithic plugins previously associated with those
-agents, and can also support hardware devices and SDN controllers. The
-ML2 framework is intended to greatly simplify adding support for new
-L2 networking technologies, requiring much less initial and ongoing
-effort than would be required for an additional monolithic core
-plugin. It is also intended to foster innovation through its
-organization as optional driver modules.
-
-The ML2 plugin supports all the non-vendor-specific neutron API
-extensions, and works with the standard neutron DHCP agent. It
-utilizes the service plugin interface to implement the L3 router
-abstraction, allowing use of either the standard neutron L3 agent or
-alternative L3 solutions. Additional service plugins can also be used
-with the ML2 core plugin.
-
-Drivers within ML2 implement separately extensible sets of network
-types and of mechanisms for accessing networks of those
-types. Multiple mechanisms can be used simultaneously to access
-different ports of the same virtual network. Mechanisms can utilize L2
-agents via RPC and/or interact with external devices or
-controllers. By utilizing the multiprovidernet extension, virtual
-networks can be composed of multiple segments of the same or different
-types. Type and mechanism drivers are loaded as python entrypoints
-using the stevedore library.
-
-Each available network type is managed by an ML2 type driver.  Type
-drivers maintain any needed type-specific network state, and perform
-provider network validation and tenant network allocation. As of the
-havana release, drivers for the local, flat, vlan, gre, and vxlan
-network types are included.
-
-Each available networking mechanism is managed by an ML2 mechanism
-driver. All registered mechanism drivers are called twice when
-networks, subnets, and ports are created, updated, or deleted. They
-are first called as part of the DB transaction, where they can
-maintain any needed driver-specific state. Once the transaction has
-been committed, they are called again, at which point they can
-interact with external devices and controllers. Mechanism drivers are
-also called as part of the port binding process, to determine whether
-the associated mechanism can provide connectivity for the network, and
-if so, the network segment and VIF driver to be used. The havana
-release includes mechanism drivers for the Open vSwitch, Linux bridge,
-and Hyper-V L2 agents, and for vendor switches/controllers/etc.
-It also includes an L2 Population mechanism driver that
-can help optimize tunneled virtual network traffic.
-
-For additional information regarding the ML2 plugin and its collection
-of type and mechanism drivers, see the OpenStack manuals and
-http://wiki.openstack.org/wiki/Neutron/ML2.
diff --git a/neutron/plugins/ml2/__init__.py b/neutron/plugins/ml2/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/common/__init__.py b/neutron/plugins/ml2/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/common/exceptions.py b/neutron/plugins/ml2/common/exceptions.py
deleted file mode 100644 (file)
index 349a80e..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""Exceptions used by ML2."""
-
-from neutron._i18n import _
-from neutron.common import exceptions
-
-
-class MechanismDriverError(exceptions.NeutronException):
-    """Mechanism driver call failed."""
-    message = _("%(method)s failed.")
-
-
-class ExtensionDriverError(exceptions.InvalidInput):
-    """Extension driver call failed."""
-    message = _("Extension %(driver)s failed.")
-
-
-class ExtensionDriverNotFound(exceptions.InvalidConfigurationOption):
-    """Required extension driver not found in ML2 config."""
-    message = _("Extension driver %(driver)s required for "
-                "service plugin %(service_plugin)s not found.")
diff --git a/neutron/plugins/ml2/config.py b/neutron/plugins/ml2/config.py
deleted file mode 100644 (file)
index de89606..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-
-ml2_opts = [
-    cfg.ListOpt('type_drivers',
-                default=['local', 'flat', 'vlan', 'gre', 'vxlan', 'geneve'],
-                help=_("List of network type driver entrypoints to be loaded "
-                       "from the neutron.ml2.type_drivers namespace.")),
-    cfg.ListOpt('tenant_network_types',
-                default=['local'],
-                help=_("Ordered list of network_types to allocate as tenant "
-                       "networks. The default value 'local' is useful for "
-                       "single-box testing but provides no connectivity "
-                       "between hosts.")),
-    cfg.ListOpt('mechanism_drivers',
-                default=[],
-                help=_("An ordered list of networking mechanism driver "
-                       "entrypoints to be loaded from the "
-                       "neutron.ml2.mechanism_drivers namespace.")),
-    cfg.ListOpt('extension_drivers',
-                default=[],
-                help=_("An ordered list of extension driver "
-                       "entrypoints to be loaded from the "
-                       "neutron.ml2.extension_drivers namespace. "
-                       "For example: extension_drivers = port_security,qos")),
-    cfg.IntOpt('path_mtu', default=0,
-               help=_('The maximum permissible size of an unfragmented '
-                      'packet travelling from and to addresses where '
-                      'encapsulated Neutron traffic is sent. '
-                      'Drivers calculate maximum viable MTU for validating '
-                      'tenant requests based on this value (typically, '
-                      'path_mtu - maxmum encapsulation header size). If <= 0, '
-                      'the path MTU is indeterminate and no calculation '
-                      'takes place.')),
-    cfg.IntOpt('segment_mtu', default=0,
-               help=_('The maximum permissible size of an unfragmented '
-                      'packet travelling a L2 network segment.  If <= 0, the '
-                      'segment MTU is indeterminate and no calculation takes '
-                      'place.')),
-    cfg.ListOpt('physical_network_mtus',
-                default=[],
-                help=_("A list of mappings of physical networks to MTU "
-                       "values. The format of the mapping is "
-                       "<physnet>:<mtu val>. This mapping allows "
-                       "specifying a physical network MTU value that "
-                       "differs from the default segment_mtu value.")),
-    cfg.StrOpt('external_network_type',
-               help=_("Default network type for external networks when no "
-                      "provider attributes are specified. By default it is "
-                      "None, which means that if provider attributes are not "
-                      "specified while creating external networks then they "
-                      "will have the same type as tenant networks. Allowed "
-                      "values for external_network_type config option depend "
-                      "on the network type values configured in type_drivers "
-                      "config option."))
-]
-
-
-cfg.CONF.register_opts(ml2_opts, "ml2")
diff --git a/neutron/plugins/ml2/db.py b/neutron/plugins/ml2/db.py
deleted file mode 100644 (file)
index 6edaa74..0000000
+++ /dev/null
@@ -1,370 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_db import exception as db_exc
-from oslo_log import log
-from oslo_utils import uuidutils
-import six
-from sqlalchemy import or_
-from sqlalchemy.orm import exc
-
-from neutron._i18n import _LE, _LI
-from neutron.common import constants as n_const
-from neutron.db import models_v2
-from neutron.db import securitygroups_db as sg_db
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2 import models
-
-LOG = log.getLogger(__name__)
-
-# limit the number of port OR LIKE statements in one query
-MAX_PORTS_PER_QUERY = 500
-
-
-def _make_segment_dict(record):
-    """Make a segment dictionary out of a DB record."""
-    return {api.ID: record.id,
-            api.NETWORK_TYPE: record.network_type,
-            api.PHYSICAL_NETWORK: record.physical_network,
-            api.SEGMENTATION_ID: record.segmentation_id}
-
-
-def add_network_segment(session, network_id, segment, segment_index=0,
-                        is_dynamic=False):
-    with session.begin(subtransactions=True):
-        record = models.NetworkSegment(
-            id=uuidutils.generate_uuid(),
-            network_id=network_id,
-            network_type=segment.get(api.NETWORK_TYPE),
-            physical_network=segment.get(api.PHYSICAL_NETWORK),
-            segmentation_id=segment.get(api.SEGMENTATION_ID),
-            segment_index=segment_index,
-            is_dynamic=is_dynamic
-        )
-        session.add(record)
-        segment[api.ID] = record.id
-    LOG.info(_LI("Added segment %(id)s of type %(network_type)s for network"
-                 " %(network_id)s"),
-             {'id': record.id,
-              'network_type': record.network_type,
-              'network_id': record.network_id})
-
-
-def get_network_segments(session, network_id, filter_dynamic=False):
-    return get_networks_segments(
-        session, [network_id], filter_dynamic)[network_id]
-
-
-def get_networks_segments(session, network_ids, filter_dynamic=False):
-    with session.begin(subtransactions=True):
-        query = (session.query(models.NetworkSegment).
-                 filter(models.NetworkSegment.network_id.in_(network_ids)).
-                 order_by(models.NetworkSegment.segment_index))
-        if filter_dynamic is not None:
-            query = query.filter_by(is_dynamic=filter_dynamic)
-        records = query.all()
-        result = {net_id: [] for net_id in network_ids}
-        for record in records:
-            result[record.network_id].append(_make_segment_dict(record))
-        return result
-
-
-def get_segment_by_id(session, segment_id):
-    with session.begin(subtransactions=True):
-        try:
-            record = (session.query(models.NetworkSegment).
-                      filter_by(id=segment_id).
-                      one())
-            return _make_segment_dict(record)
-        except exc.NoResultFound:
-            return
-
-
-def get_dynamic_segment(session, network_id, physical_network=None,
-                        segmentation_id=None):
-        """Return a dynamic segment for the filters provided if one exists."""
-        with session.begin(subtransactions=True):
-            query = (session.query(models.NetworkSegment).
-                     filter_by(network_id=network_id, is_dynamic=True))
-            if physical_network:
-                query = query.filter_by(physical_network=physical_network)
-            if segmentation_id:
-                query = query.filter_by(segmentation_id=segmentation_id)
-            record = query.first()
-
-        if record:
-            return _make_segment_dict(record)
-        else:
-            LOG.debug("No dynamic segment found for "
-                      "Network:%(network_id)s, "
-                      "Physical network:%(physnet)s, "
-                      "segmentation_id:%(segmentation_id)s",
-                      {'network_id': network_id,
-                       'physnet': physical_network,
-                       'segmentation_id': segmentation_id})
-            return None
-
-
-def delete_network_segment(session, segment_id):
-    """Release a dynamic segment for the params provided if one exists."""
-    with session.begin(subtransactions=True):
-        (session.query(models.NetworkSegment).
-         filter_by(id=segment_id).delete())
-
-
-def add_port_binding(session, port_id):
-    with session.begin(subtransactions=True):
-        record = models.PortBinding(
-            port_id=port_id,
-            vif_type=portbindings.VIF_TYPE_UNBOUND)
-        session.add(record)
-        return record
-
-
-def get_locked_port_and_binding(session, port_id):
-    """Get port and port binding records for update within transaction."""
-
-    try:
-        # REVISIT(rkukura): We need the Port and PortBinding records
-        # to both be added to the session and locked for update. A
-        # single joined query should work, but the combination of left
-        # outer joins and postgresql doesn't seem to work.
-        port = (session.query(models_v2.Port).
-                enable_eagerloads(False).
-                filter_by(id=port_id).
-                with_lockmode('update').
-                one())
-        binding = (session.query(models.PortBinding).
-                   enable_eagerloads(False).
-                   filter_by(port_id=port_id).
-                   with_lockmode('update').
-                   one())
-        return port, binding
-    except exc.NoResultFound:
-        return None, None
-
-
-def set_binding_levels(session, levels):
-    if levels:
-        for level in levels:
-            session.add(level)
-        LOG.debug("For port %(port_id)s, host %(host)s, "
-                  "set binding levels %(levels)s",
-                  {'port_id': levels[0].port_id,
-                   'host': levels[0].host,
-                   'levels': levels})
-    else:
-        LOG.debug("Attempted to set empty binding levels")
-
-
-def get_binding_levels(session, port_id, host):
-    if host:
-        result = (session.query(models.PortBindingLevel).
-                  filter_by(port_id=port_id, host=host).
-                  order_by(models.PortBindingLevel.level).
-                  all())
-        LOG.debug("For port %(port_id)s, host %(host)s, "
-                  "got binding levels %(levels)s",
-                  {'port_id': port_id,
-                   'host': host,
-                   'levels': result})
-        return result
-
-
-def clear_binding_levels(session, port_id, host):
-    if host:
-        (session.query(models.PortBindingLevel).
-         filter_by(port_id=port_id, host=host).
-         delete())
-        LOG.debug("For port %(port_id)s, host %(host)s, "
-                  "cleared binding levels",
-                  {'port_id': port_id,
-                   'host': host})
-
-
-def ensure_dvr_port_binding(session, port_id, host, router_id=None):
-    record = (session.query(models.DVRPortBinding).
-              filter_by(port_id=port_id, host=host).first())
-    if record:
-        return record
-
-    try:
-        with session.begin(subtransactions=True):
-            record = models.DVRPortBinding(
-                port_id=port_id,
-                host=host,
-                router_id=router_id,
-                vif_type=portbindings.VIF_TYPE_UNBOUND,
-                vnic_type=portbindings.VNIC_NORMAL,
-                status=n_const.PORT_STATUS_DOWN)
-            session.add(record)
-            return record
-    except db_exc.DBDuplicateEntry:
-        LOG.debug("DVR Port %s already bound", port_id)
-        return (session.query(models.DVRPortBinding).
-                filter_by(port_id=port_id, host=host).one())
-
-
-def delete_dvr_port_binding(session, port_id, host):
-    with session.begin(subtransactions=True):
-        (session.query(models.DVRPortBinding).
-         filter_by(port_id=port_id, host=host).
-         delete(synchronize_session=False))
-
-
-def delete_dvr_port_binding_if_stale(session, binding):
-    if not binding.router_id and binding.status == n_const.PORT_STATUS_DOWN:
-        with session.begin(subtransactions=True):
-            LOG.debug("DVR: Deleting binding %s", binding)
-            session.delete(binding)
-
-
-def get_port(session, port_id):
-    """Get port record for update within transaction."""
-
-    with session.begin(subtransactions=True):
-        try:
-            record = (session.query(models_v2.Port).
-                      enable_eagerloads(False).
-                      filter(models_v2.Port.id.startswith(port_id)).
-                      one())
-            return record
-        except exc.NoResultFound:
-            return
-        except exc.MultipleResultsFound:
-            LOG.error(_LE("Multiple ports have port_id starting with %s"),
-                      port_id)
-            return
-
-
-def get_port_from_device_mac(context, device_mac):
-    LOG.debug("get_port_from_device_mac() called for mac %s", device_mac)
-    qry = context.session.query(models_v2.Port).filter_by(
-        mac_address=device_mac)
-    return qry.first()
-
-
-def get_ports_and_sgs(context, port_ids):
-    """Get ports from database with security group info."""
-
-    # break large queries into smaller parts
-    if len(port_ids) > MAX_PORTS_PER_QUERY:
-        LOG.debug("Number of ports %(pcount)s exceeds the maximum per "
-                  "query %(maxp)s. Partitioning queries.",
-                  {'pcount': len(port_ids), 'maxp': MAX_PORTS_PER_QUERY})
-        return (get_ports_and_sgs(context, port_ids[:MAX_PORTS_PER_QUERY]) +
-                get_ports_and_sgs(context, port_ids[MAX_PORTS_PER_QUERY:]))
-
-    LOG.debug("get_ports_and_sgs() called for port_ids %s", port_ids)
-
-    if not port_ids:
-        # if port_ids is empty, avoid querying to DB to ask it for nothing
-        return []
-    ports_to_sg_ids = get_sg_ids_grouped_by_port(context, port_ids)
-    return [make_port_dict_with_security_groups(port, sec_groups)
-            for port, sec_groups in six.iteritems(ports_to_sg_ids)]
-
-
-def get_sg_ids_grouped_by_port(context, port_ids):
-    sg_ids_grouped_by_port = {}
-    sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
-
-    with context.session.begin(subtransactions=True):
-        # partial UUIDs must be individually matched with startswith.
-        # full UUIDs may be matched directly in an IN statement
-        partial_uuids = set(port_id for port_id in port_ids
-                            if not uuidutils.is_uuid_like(port_id))
-        full_uuids = set(port_ids) - partial_uuids
-        or_criteria = [models_v2.Port.id.startswith(port_id)
-                       for port_id in partial_uuids]
-        if full_uuids:
-            or_criteria.append(models_v2.Port.id.in_(full_uuids))
-
-        query = context.session.query(
-            models_v2.Port, sg_db.SecurityGroupPortBinding.security_group_id)
-        query = query.outerjoin(sg_db.SecurityGroupPortBinding,
-                                models_v2.Port.id == sg_binding_port)
-        query = query.filter(or_(*or_criteria))
-
-        for port, sg_id in query:
-            if port not in sg_ids_grouped_by_port:
-                sg_ids_grouped_by_port[port] = []
-            if sg_id:
-                sg_ids_grouped_by_port[port].append(sg_id)
-    return sg_ids_grouped_by_port
-
-
-def make_port_dict_with_security_groups(port, sec_groups):
-    plugin = manager.NeutronManager.get_plugin()
-    port_dict = plugin._make_port_dict(port)
-    port_dict['security_groups'] = sec_groups
-    port_dict['security_group_rules'] = []
-    port_dict['security_group_source_groups'] = []
-    port_dict['fixed_ips'] = [ip['ip_address']
-                              for ip in port['fixed_ips']]
-    return port_dict
-
-
-def get_port_binding_host(session, port_id):
-    try:
-        with session.begin(subtransactions=True):
-            query = (session.query(models.PortBinding).
-                     filter(models.PortBinding.port_id.startswith(port_id)).
-                     one())
-    except exc.NoResultFound:
-        LOG.debug("No binding found for port %(port_id)s",
-                  {'port_id': port_id})
-        return
-    except exc.MultipleResultsFound:
-        LOG.error(_LE("Multiple ports have port_id starting with %s"),
-                  port_id)
-        return
-    return query.host
-
-
-def generate_dvr_port_status(session, port_id):
-    # an OR'ed value of status assigned to parent port from the
-    # dvrportbinding bucket
-    query = session.query(models.DVRPortBinding)
-    final_status = n_const.PORT_STATUS_BUILD
-    for bind in query.filter(models.DVRPortBinding.port_id == port_id):
-        if bind.status == n_const.PORT_STATUS_ACTIVE:
-            return bind.status
-        elif bind.status == n_const.PORT_STATUS_DOWN:
-            final_status = bind.status
-    return final_status
-
-
-def get_dvr_port_binding_by_host(session, port_id, host):
-    with session.begin(subtransactions=True):
-        binding = (session.query(models.DVRPortBinding).
-                   filter(models.DVRPortBinding.port_id.startswith(port_id),
-                          models.DVRPortBinding.host == host).first())
-    if not binding:
-        LOG.debug("No binding for DVR port %(port_id)s with host "
-                  "%(host)s", {'port_id': port_id, 'host': host})
-    return binding
-
-
-def get_dvr_port_bindings(session, port_id):
-    with session.begin(subtransactions=True):
-        bindings = (session.query(models.DVRPortBinding).
-                    filter(models.DVRPortBinding.port_id.startswith(port_id)).
-                    all())
-    if not bindings:
-        LOG.debug("No bindings for DVR port %s", port_id)
-    return bindings
diff --git a/neutron/plugins/ml2/driver_api.py b/neutron/plugins/ml2/driver_api.py
deleted file mode 100644 (file)
index 39bc612..0000000
+++ /dev/null
@@ -1,1066 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import six
-
-# The following keys are used in the segment dictionaries passed via
-# the driver API. These are defined separately from similar keys in
-# neutron.extensions.providernet so that drivers don't need to change
-# if/when providernet moves to the core API.
-#
-ID = 'id'
-NETWORK_TYPE = 'network_type'
-PHYSICAL_NETWORK = 'physical_network'
-SEGMENTATION_ID = 'segmentation_id'
-MTU = 'mtu'
-
-# The following keys are used in the binding level dictionaries
-# available via the binding_levels and original_binding_levels
-# PortContext properties.
-BOUND_DRIVER = 'bound_driver'
-BOUND_SEGMENT = 'bound_segment'
-
-
-@six.add_metaclass(abc.ABCMeta)
-class TypeDriver(object):
-    """Define stable abstract interface for ML2 type drivers.
-
-    ML2 type drivers each support a specific network_type for provider
-    and/or tenant network segments. Type drivers must implement this
-    abstract interface, which defines the API by which the plugin uses
-    the driver to manage the persistent type-specific resource
-    allocation state associated with network segments of that type.
-
-    Network segments are represented by segment dictionaries using the
-    NETWORK_TYPE, PHYSICAL_NETWORK, and SEGMENTATION_ID keys defined
-    above, corresponding to the provider attributes.  Future revisions
-    of the TypeDriver API may add additional segment dictionary
-    keys. Attributes not applicable for a particular network_type may
-    either be excluded or stored as None.
-    """
-
-    @abc.abstractmethod
-    def get_type(self):
-        """Get driver's network type.
-
-        :returns network_type value handled by this driver
-        """
-        pass
-
-    @abc.abstractmethod
-    def initialize(self):
-        """Perform driver initialization.
-
-        Called after all drivers have been loaded and the database has
-        been initialized. No abstract methods defined below will be
-        called prior to this method being called.
-        """
-        pass
-
-    @abc.abstractmethod
-    def is_partial_segment(self, segment):
-        """Return True if segment is a partially specified segment.
-
-        :param segment: segment dictionary
-        :returns: boolean
-        """
-
-    @abc.abstractmethod
-    def validate_provider_segment(self, segment):
-        """Validate attributes of a provider network segment.
-
-        :param segment: segment dictionary using keys defined above
-        :raises: neutron.common.exceptions.InvalidInput if invalid
-
-        Called outside transaction context to validate the provider
-        attributes for a provider network segment. Raise InvalidInput
-        if:
-
-         - any required attribute is missing
-         - any prohibited or unrecognized attribute is present
-         - any attribute value is not valid
-
-        The network_type attribute is present in segment, but
-        need not be validated.
-        """
-        pass
-
-    @abc.abstractmethod
-    def reserve_provider_segment(self, session, segment):
-        """Reserve resource associated with a provider network segment.
-
-        :param session: database session
-        :param segment: segment dictionary
-        :returns: segment dictionary
-
-        Called inside transaction context on session to reserve the
-        type-specific resource for a provider network segment. The
-        segment dictionary passed in was returned by a previous
-        validate_provider_segment() call.
-        """
-        pass
-
-    @abc.abstractmethod
-    def allocate_tenant_segment(self, session):
-        """Allocate resource for a new tenant network segment.
-
-        :param session: database session
-        :returns: segment dictionary using keys defined above
-
-        Called inside transaction context on session to allocate a new
-        tenant network, typically from a type-specific resource
-        pool. If successful, return a segment dictionary describing
-        the segment. If tenant network segment cannot be allocated
-        (i.e. tenant networks not supported or resource pool is
-        exhausted), return None.
-        """
-        pass
-
-    @abc.abstractmethod
-    def release_segment(self, session, segment):
-        """Release network segment.
-
-        :param session: database session
-        :param segment: segment dictionary using keys defined above
-
-        Called inside transaction context on session to release a
-        tenant or provider network's type-specific resource. Runtime
-        errors are not expected, but raising an exception will result
-        in rollback of the transaction.
-        """
-        pass
-
-    @abc.abstractmethod
-    def get_mtu(self, physical):
-        """Get driver's network MTU.
-
-        :returns mtu: maximum transmission unit
-
-        Returns the mtu for the network based on the config values and
-        the network type.
-        """
-        pass
-
-
-@six.add_metaclass(abc.ABCMeta)
-class NetworkContext(object):
-    """Context passed to MechanismDrivers for changes to network resources.
-
-    A NetworkContext instance wraps a network resource. It provides
-    helper methods for accessing other relevant information. Results
-    from expensive operations are cached so that other
-    MechanismDrivers can freely access the same information.
-    """
-
-    @abc.abstractproperty
-    def current(self):
-        """Return the network in its current configuration.
-
-        Return the network, as defined by NeutronPluginBaseV2.
-        create_network and all extensions in the ml2 plugin, with
-        all its properties 'current' at the time the context was
-        established.
-        """
-        pass
-
-    @abc.abstractproperty
-    def original(self):
-        """Return the network in its original configuration.
-
-        Return the network, with all its properties set to their
-        original values prior to a call to update_network. Method is
-        only valid within calls to update_network_precommit and
-        update_network_postcommit.
-        """
-        pass
-
-    @abc.abstractproperty
-    def network_segments(self):
-        """Return the segments associated with this network resource."""
-        pass
-
-
-@six.add_metaclass(abc.ABCMeta)
-class SubnetContext(object):
-    """Context passed to MechanismDrivers for changes to subnet resources.
-
-    A SubnetContext instance wraps a subnet resource. It provides
-    helper methods for accessing other relevant information. Results
-    from expensive operations are cached so that other
-    MechanismDrivers can freely access the same information.
-    """
-
-    @abc.abstractproperty
-    def current(self):
-        """Return the subnet in its current configuration.
-
-        Return the subnet, as defined by NeutronPluginBaseV2.
-        create_subnet and all extensions in the ml2 plugin, with
-        all its properties 'current' at the time the context was
-        established.
-        """
-        pass
-
-    @abc.abstractproperty
-    def original(self):
-        """Return the subnet in its original configuration.
-
-        Return the subnet, with all its properties set to their
-        original values prior to a call to update_subnet. Method is
-        only valid within calls to update_subnet_precommit and
-        update_subnet_postcommit.
-        """
-        pass
-
-
-@six.add_metaclass(abc.ABCMeta)
-class PortContext(object):
-    """Context passed to MechanismDrivers for changes to port resources.
-
-    A PortContext instance wraps a port resource. It provides helper
-    methods for accessing other relevant information. Results from
-    expensive operations are cached so that other MechanismDrivers can
-    freely access the same information.
-    """
-
-    @abc.abstractproperty
-    def current(self):
-        """Return the port in its current configuration.
-
-        Return the port, as defined by NeutronPluginBaseV2.
-        create_port and all extensions in the ml2 plugin, with
-        all its properties 'current' at the time the context was
-        established.
-        """
-        pass
-
-    @abc.abstractproperty
-    def original(self):
-        """Return the port in its original configuration.
-
-        Return the port, with all its properties set to their
-        original values prior to a call to update_port. Method is
-        only valid within calls to update_port_precommit and
-        update_port_postcommit.
-        """
-        pass
-
-    @abc.abstractproperty
-    def status(self):
-        """Return the status of the current port."""
-        pass
-
-    @abc.abstractproperty
-    def original_status(self):
-        """Return the status of the original port.
-
-        The method is only valid within calls to update_port_precommit and
-        update_port_postcommit.
-        """
-        pass
-
-    @abc.abstractproperty
-    def network(self):
-        """Return the NetworkContext associated with this port."""
-        pass
-
-    @abc.abstractproperty
-    def binding_levels(self):
-        """Return dictionaries describing the current binding levels.
-
-        This property returns a list of dictionaries describing each
-        binding level if the port is bound or partially bound, or None
-        if the port is unbound. Each returned dictionary contains the
-        name of the bound driver under the BOUND_DRIVER key, and the
-        bound segment dictionary under the BOUND_SEGMENT key.
-
-        The first entry (index 0) describes the top-level binding,
-        which always involves one of the port's network's static
-        segments. In the case of a hierarchical binding, subsequent
-        entries describe the lower-level bindings in descending order,
-        which may involve dynamic segments. Adjacent levels where
-        different drivers bind the same static or dynamic segment are
-        possible. The last entry (index -1) describes the bottom-level
-        binding that supplied the port's binding:vif_type and
-        binding:vif_details attribute values.
-
-        Within calls to MechanismDriver.bind_port, descriptions of the
-        levels above the level currently being bound are returned.
-        """
-        pass
-
-    @abc.abstractproperty
-    def original_binding_levels(self):
-        """Return dictionaries describing the original binding levels.
-
-        This property returns a list of dictionaries describing each
-        original binding level if the port was previously bound, or
-        None if the port was unbound. The content is as described for
-        the binding_levels property.
-
-        This property is only valid within calls to
-        update_port_precommit and update_port_postcommit. It returns
-        None otherwise.
-        """
-        pass
-
-    @abc.abstractproperty
-    def top_bound_segment(self):
-        """Return the current top-level bound segment dictionary.
-
-        This property returns the current top-level bound segment
-        dictionary, or None if the port is unbound. For a bound port,
-        top_bound_segment is equivalent to
-        binding_levels[0][BOUND_SEGMENT], and returns one of the
-        port's network's static segments.
-        """
-        pass
-
-    @abc.abstractproperty
-    def original_top_bound_segment(self):
-        """Return the original top-level bound segment dictionary.
-
-        This property returns the original top-level bound segment
-        dictionary, or None if the port was previously unbound. For a
-        previously bound port, original_top_bound_segment is
-        equivalent to original_binding_levels[0][BOUND_SEGMENT], and
-        returns one of the port's network's static segments.
-
-        This property is only valid within calls to
-        update_port_precommit and update_port_postcommit. It returns
-        None otherwise.
-        """
-        pass
-
-    @abc.abstractproperty
-    def bottom_bound_segment(self):
-        """Return the current bottom-level bound segment dictionary.
-
-        This property returns the current bottom-level bound segment
-        dictionary, or None if the port is unbound. For a bound port,
-        bottom_bound_segment is equivalent to
-        binding_levels[-1][BOUND_SEGMENT], and returns the segment
-        whose binding supplied the port's binding:vif_type and
-        binding:vif_details attribute values.
-        """
-        pass
-
-    @abc.abstractproperty
-    def original_bottom_bound_segment(self):
-        """Return the original bottom-level bound segment dictionary.
-
-        This property returns the orignal bottom-level bound segment
-        dictionary, or None if the port was previously unbound. For a
-        previously bound port, original_bottom_bound_segment is
-        equivalent to original_binding_levels[-1][BOUND_SEGMENT], and
-        returns the segment whose binding supplied the port's previous
-        binding:vif_type and binding:vif_details attribute values.
-
-        This property is only valid within calls to
-        update_port_precommit and update_port_postcommit. It returns
-        None otherwise.
-        """
-        pass
-
-    @abc.abstractproperty
-    def host(self):
-        """Return the host with which the port is associated.
-
-        In the context of a host-specific operation on a distributed
-        port, the host property indicates the host for which the port
-        operation is being performed. Otherwise, it is the same value
-        as current['binding:host_id'].
-        """
-        pass
-
-    @abc.abstractproperty
-    def original_host(self):
-        """Return the original host with which the port was associated.
-
-        In the context of a host-specific operation on a distributed
-        port, the original_host property indicates the host for which
-        the port operation is being performed. Otherwise, it is the
-        same value as original['binding:host_id'].
-
-        This property is only valid within calls to
-        update_port_precommit and update_port_postcommit. It returns
-        None otherwise.
-        """
-        pass
-
-    @abc.abstractproperty
-    def vif_type(self):
-        """Return the vif_type indicating the binding state of the port.
-
-        In the context of a host-specific operation on a distributed
-        port, the vif_type property indicates the binding state for
-        the host for which the port operation is being
-        performed. Otherwise, it is the same value as
-        current['binding:vif_type'].
-        """
-        pass
-
-    @abc.abstractproperty
-    def original_vif_type(self):
-        """Return the original vif_type of the port.
-
-        In the context of a host-specific operation on a distributed
-        port, the original_vif_type property indicates original
-        binding state for the host for which the port operation is
-        being performed. Otherwise, it is the same value as
-        original['binding:vif_type'].
-
-        This property is only valid within calls to
-        update_port_precommit and update_port_postcommit. It returns
-        None otherwise.
-        """
-        pass
-
-    @abc.abstractproperty
-    def vif_details(self):
-        """Return the vif_details describing the binding of the port.
-
-        In the context of a host-specific operation on a distributed
-        port, the vif_details property describes the binding for the
-        host for which the port operation is being
-        performed. Otherwise, it is the same value as
-        current['binding:vif_details'].
-        """
-        pass
-
-    @abc.abstractproperty
-    def original_vif_details(self):
-        """Return the original vif_details of the port.
-
-        In the context of a host-specific operation on a distributed
-        port, the original_vif_details property describes the original
-        binding for the host for which the port operation is being
-        performed. Otherwise, it is the same value as
-        original['binding:vif_details'].
-
-        This property is only valid within calls to
-        update_port_precommit and update_port_postcommit. It returns
-        None otherwise.
-        """
-        pass
-
-    @abc.abstractproperty
-    def segments_to_bind(self):
-        """Return the list of segments with which to bind the port.
-
-        This property returns the list of segment dictionaries with
-        which the mechanism driver may bind the port. When
-        establishing a top-level binding, these will be the port's
-        network's static segments. For each subsequent level, these
-        will be the segments passed to continue_binding by the
-        mechanism driver that bound the level above.
-
-        This property is only valid within calls to
-        MechanismDriver.bind_port. It returns None otherwise.
-        """
-        pass
-
-    @abc.abstractmethod
-    def host_agents(self, agent_type):
-        """Get agents of the specified type on port's host.
-
-        :param agent_type: Agent type identifier
-        :returns: List of agents_db.Agent records
-        """
-        pass
-
-    @abc.abstractmethod
-    def set_binding(self, segment_id, vif_type, vif_details,
-                    status=None):
-        """Set the bottom-level binding for the port.
-
-        :param segment_id: Network segment bound for the port.
-        :param vif_type: The VIF type for the bound port.
-        :param vif_details: Dictionary with details for VIF driver.
-        :param status: Port status to set if not None.
-
-        This method is called by MechanismDriver.bind_port to indicate
-        success and specify binding details to use for port. The
-        segment_id must identify an item in the current value of the
-        segments_to_bind property.
-        """
-        pass
-
-    @abc.abstractmethod
-    def continue_binding(self, segment_id, next_segments_to_bind):
-        """Continue binding the port with different segments.
-
-        :param segment_id: Network segment partially bound for the port.
-        :param next_segments_to_bind: Segments to continue binding with.
-
-        This method is called by MechanismDriver.bind_port to indicate
-        it was able to partially bind the port, but that one or more
-        additional mechanism drivers are required to complete the
-        binding. The segment_id must identify an item in the current
-        value of the segments_to_bind property. The list of segments
-        IDs passed as next_segments_to_bind identify dynamic (or
-        static) segments of the port's network that will be used to
-        populate segments_to_bind for the next lower level of a
-        hierarchical binding.
-        """
-        pass
-
-    @abc.abstractmethod
-    def allocate_dynamic_segment(self, segment):
-        """Allocate a dynamic segment.
-
-        :param segment: A partially or fully specified segment dictionary
-
-        Called by the MechanismDriver.bind_port, create_port or update_port
-        to dynamically allocate a segment for the port using the partial
-        segment specified. The segment dictionary can be a fully or partially
-        specified segment. At a minumim it needs the network_type populated to
-        call on the appropriate type driver.
-        """
-        pass
-
-    @abc.abstractmethod
-    def release_dynamic_segment(self, segment_id):
-        """Release an allocated dynamic segment.
-
-        :param segment_id: UUID of the dynamic network segment.
-
-        Called by the MechanismDriver.delete_port or update_port to release
-        the dynamic segment allocated for this port.
-        """
-        pass
-
-
-@six.add_metaclass(abc.ABCMeta)
-class MechanismDriver(object):
-    """Define stable abstract interface for ML2 mechanism drivers.
-
-    A mechanism driver is called on the creation, update, and deletion
-    of networks and ports. For every event, there are two methods that
-    get called - one within the database transaction (method suffix of
-    _precommit), one right afterwards (method suffix of _postcommit).
-
-    Exceptions raised by methods called inside the transaction can
-    rollback, but should not make any blocking calls (for example,
-    REST requests to an outside controller). Methods called after
-    transaction commits can make blocking external calls, though these
-    will block the entire process. Exceptions raised in calls after
-    the transaction commits may cause the associated resource to be
-    deleted.
-
-    Because rollback outside of the transaction is not done in the
-    update network/port case, all data validation must be done within
-    methods that are part of the database transaction.
-    """
-
-    @abc.abstractmethod
-    def initialize(self):
-        """Perform driver initialization.
-
-        Called after all drivers have been loaded and the database has
-        been initialized. No abstract methods defined below will be
-        called prior to this method being called.
-        """
-        pass
-
-    def create_network_precommit(self, context):
-        """Allocate resources for a new network.
-
-        :param context: NetworkContext instance describing the new
-        network.
-
-        Create a new network, allocating resources as necessary in the
-        database. Called inside transaction context on session. Call
-        cannot block.  Raising an exception will result in a rollback
-        of the current transaction.
-        """
-        pass
-
-    def create_network_postcommit(self, context):
-        """Create a network.
-
-        :param context: NetworkContext instance describing the new
-        network.
-
-        Called after the transaction commits. Call can block, though
-        will block the entire process so care should be taken to not
-        drastically affect performance. Raising an exception will
-        cause the deletion of the resource.
-        """
-        pass
-
-    def update_network_precommit(self, context):
-        """Update resources of a network.
-
-        :param context: NetworkContext instance describing the new
-        state of the network, as well as the original state prior
-        to the update_network call.
-
-        Update values of a network, updating the associated resources
-        in the database. Called inside transaction context on session.
-        Raising an exception will result in rollback of the
-        transaction.
-
-        update_network_precommit is called for all changes to the
-        network state. It is up to the mechanism driver to ignore
-        state or state changes that it does not know or care about.
-        """
-        pass
-
-    def update_network_postcommit(self, context):
-        """Update a network.
-
-        :param context: NetworkContext instance describing the new
-        state of the network, as well as the original state prior
-        to the update_network call.
-
-        Called after the transaction commits. Call can block, though
-        will block the entire process so care should be taken to not
-        drastically affect performance. Raising an exception will
-        cause the deletion of the resource.
-
-        update_network_postcommit is called for all changes to the
-        network state.  It is up to the mechanism driver to ignore
-        state or state changes that it does not know or care about.
-        """
-        pass
-
-    def delete_network_precommit(self, context):
-        """Delete resources for a network.
-
-        :param context: NetworkContext instance describing the current
-        state of the network, prior to the call to delete it.
-
-        Delete network resources previously allocated by this
-        mechanism driver for a network. Called inside transaction
-        context on session. Runtime errors are not expected, but
-        raising an exception will result in rollback of the
-        transaction.
-        """
-        pass
-
-    def delete_network_postcommit(self, context):
-        """Delete a network.
-
-        :param context: NetworkContext instance describing the current
-        state of the network, prior to the call to delete it.
-
-        Called after the transaction commits. Call can block, though
-        will block the entire process so care should be taken to not
-        drastically affect performance. Runtime errors are not
-        expected, and will not prevent the resource from being
-        deleted.
-        """
-        pass
-
-    def create_subnet_precommit(self, context):
-        """Allocate resources for a new subnet.
-
-        :param context: SubnetContext instance describing the new
-        subnet.
-
-        Create a new subnet, allocating resources as necessary in the
-        database. Called inside transaction context on session. Call
-        cannot block.  Raising an exception will result in a rollback
-        of the current transaction.
-        """
-        pass
-
-    def create_subnet_postcommit(self, context):
-        """Create a subnet.
-
-        :param context: SubnetContext instance describing the new
-        subnet.
-
-        Called after the transaction commits. Call can block, though
-        will block the entire process so care should be taken to not
-        drastically affect performance. Raising an exception will
-        cause the deletion of the resource.
-        """
-        pass
-
-    def update_subnet_precommit(self, context):
-        """Update resources of a subnet.
-
-        :param context: SubnetContext instance describing the new
-        state of the subnet, as well as the original state prior
-        to the update_subnet call.
-
-        Update values of a subnet, updating the associated resources
-        in the database. Called inside transaction context on session.
-        Raising an exception will result in rollback of the
-        transaction.
-
-        update_subnet_precommit is called for all changes to the
-        subnet state. It is up to the mechanism driver to ignore
-        state or state changes that it does not know or care about.
-        """
-        pass
-
-    def update_subnet_postcommit(self, context):
-        """Update a subnet.
-
-        :param context: SubnetContext instance describing the new
-        state of the subnet, as well as the original state prior
-        to the update_subnet call.
-
-        Called after the transaction commits. Call can block, though
-        will block the entire process so care should be taken to not
-        drastically affect performance. Raising an exception will
-        cause the deletion of the resource.
-
-        update_subnet_postcommit is called for all changes to the
-        subnet state.  It is up to the mechanism driver to ignore
-        state or state changes that it does not know or care about.
-        """
-        pass
-
-    def delete_subnet_precommit(self, context):
-        """Delete resources for a subnet.
-
-        :param context: SubnetContext instance describing the current
-        state of the subnet, prior to the call to delete it.
-
-        Delete subnet resources previously allocated by this
-        mechanism driver for a subnet. Called inside transaction
-        context on session. Runtime errors are not expected, but
-        raising an exception will result in rollback of the
-        transaction.
-        """
-        pass
-
-    def delete_subnet_postcommit(self, context):
-        """Delete a subnet.
-
-        :param context: SubnetContext instance describing the current
-        state of the subnet, prior to the call to delete it.
-
-        Called after the transaction commits. Call can block, though
-        will block the entire process so care should be taken to not
-        drastically affect performance. Runtime errors are not
-        expected, and will not prevent the resource from being
-        deleted.
-        """
-        pass
-
-    def create_port_precommit(self, context):
-        """Allocate resources for a new port.
-
-        :param context: PortContext instance describing the port.
-
-        Create a new port, allocating resources as necessary in the
-        database. Called inside transaction context on session. Call
-        cannot block.  Raising an exception will result in a rollback
-        of the current transaction.
-        """
-        pass
-
-    def create_port_postcommit(self, context):
-        """Create a port.
-
-        :param context: PortContext instance describing the port.
-
-        Called after the transaction completes. Call can block, though
-        will block the entire process so care should be taken to not
-        drastically affect performance.  Raising an exception will
-        result in the deletion of the resource.
-        """
-        pass
-
-    def update_port_precommit(self, context):
-        """Update resources of a port.
-
-        :param context: PortContext instance describing the new
-        state of the port, as well as the original state prior
-        to the update_port call.
-
-        Called inside transaction context on session to complete a
-        port update as defined by this mechanism driver. Raising an
-        exception will result in rollback of the transaction.
-
-        update_port_precommit is called for all changes to the port
-        state. It is up to the mechanism driver to ignore state or
-        state changes that it does not know or care about.
-        """
-        pass
-
-    def update_port_postcommit(self, context):
-        """Update a port.
-
-        :param context: PortContext instance describing the new
-        state of the port, as well as the original state prior
-        to the update_port call.
-
-        Called after the transaction completes. Call can block, though
-        will block the entire process so care should be taken to not
-        drastically affect performance.  Raising an exception will
-        result in the deletion of the resource.
-
-        update_port_postcommit is called for all changes to the port
-        state. It is up to the mechanism driver to ignore state or
-        state changes that it does not know or care about.
-        """
-        pass
-
-    def delete_port_precommit(self, context):
-        """Delete resources of a port.
-
-        :param context: PortContext instance describing the current
-        state of the port, prior to the call to delete it.
-
-        Called inside transaction context on session. Runtime errors
-        are not expected, but raising an exception will result in
-        rollback of the transaction.
-        """
-        pass
-
-    def delete_port_postcommit(self, context):
-        """Delete a port.
-
-        :param context: PortContext instance describing the current
-        state of the port, prior to the call to delete it.
-
-        Called after the transaction completes. Call can block, though
-        will block the entire process so care should be taken to not
-        drastically affect performance.  Runtime errors are not
-        expected, and will not prevent the resource from being
-        deleted.
-        """
-        pass
-
-    def bind_port(self, context):
-        """Attempt to bind a port.
-
-        :param context: PortContext instance describing the port
-
-        This method is called outside any transaction to attempt to
-        establish a port binding using this mechanism driver. Bindings
-        may be created at each of multiple levels of a hierarchical
-        network, and are established from the top level downward. At
-        each level, the mechanism driver determines whether it can
-        bind to any of the network segments in the
-        context.segments_to_bind property, based on the value of the
-        context.host property, any relevant port or network
-        attributes, and its own knowledge of the network topology. At
-        the top level, context.segments_to_bind contains the static
-        segments of the port's network. At each lower level of
-        binding, it contains static or dynamic segments supplied by
-        the driver that bound at the level above. If the driver is
-        able to complete the binding of the port to any segment in
-        context.segments_to_bind, it must call context.set_binding
-        with the binding details. If it can partially bind the port,
-        it must call context.continue_binding with the network
-        segments to be used to bind at the next lower level.
-
-        If the binding results are committed after bind_port returns,
-        they will be seen by all mechanism drivers as
-        update_port_precommit and update_port_postcommit calls. But if
-        some other thread or process concurrently binds or updates the
-        port, these binding results will not be committed, and
-        update_port_precommit and update_port_postcommit will not be
-        called on the mechanism drivers with these results. Because
-        binding results can be discarded rather than committed,
-        drivers should avoid making persistent state changes in
-        bind_port, or else must ensure that such state changes are
-        eventually cleaned up.
-
-        Implementing this method explicitly declares the mechanism
-        driver as having the intention to bind ports. This is inspected
-        by the QoS service to identify the available QoS rules you
-        can use with ports.
-        """
-        pass
-
-    @property
-    def _supports_port_binding(self):
-        return self.__class__.bind_port != MechanismDriver.bind_port
-
-    def check_vlan_transparency(self, context):
-        """Check if the network supports vlan transparency.
-
-        :param context: NetworkContext instance describing the network.
-
-        Check if the network supports vlan transparency or not.
-        """
-        pass
-
-    def get_workers(self):
-        """Get any NeutronWorker instances that should have their own process
-
-        Any driver that needs to run processes separate from the API or RPC
-        workers, can return a sequence of NeutronWorker instances.
-        """
-        return ()
-
-
-@six.add_metaclass(abc.ABCMeta)
-class ExtensionDriver(object):
-    """Define stable abstract interface for ML2 extension drivers.
-
-    An extension driver extends the core resources implemented by the
-    ML2 plugin with additional attributes. Methods that process create
-    and update operations for these resources validate and persist
-    values for extended attributes supplied through the API. Other
-    methods extend the resource dictionaries returned from the API
-    operations with the values of the extended attributes.
-    """
-
-    @abc.abstractmethod
-    def initialize(self):
-        """Perform driver initialization.
-
-        Called after all drivers have been loaded and the database has
-        been initialized. No abstract methods defined below will be
-        called prior to this method being called.
-        """
-        pass
-
-    @property
-    def extension_alias(self):
-        """Supported extension alias.
-
-        Return the alias identifying the core API extension supported
-        by this driver. Do not declare if API extension handling will
-        be left to a service plugin, and we just need to provide
-        core resource extension and updates.
-        """
-        pass
-
-    def process_create_network(self, plugin_context, data, result):
-        """Process extended attributes for create network.
-
-        :param plugin_context: plugin request context
-        :param data: dictionary of incoming network data
-        :param result: network dictionary to extend
-
-        Called inside transaction context on plugin_context.session to
-        validate and persist any extended network attributes defined by this
-        driver. Extended attribute values must also be added to
-        result.
-        """
-        pass
-
-    def process_create_subnet(self, plugin_context, data, result):
-        """Process extended attributes for create subnet.
-
-        :param plugin_context: plugin request context
-        :param data: dictionary of incoming subnet data
-        :param result: subnet dictionary to extend
-
-        Called inside transaction context on plugin_context.session to
-        validate and persist any extended subnet attributes defined by this
-        driver. Extended attribute values must also be added to
-        result.
-        """
-        pass
-
-    def process_create_port(self, plugin_context, data, result):
-        """Process extended attributes for create port.
-
-        :param plugin_context: plugin request context
-        :param data: dictionary of incoming port data
-        :param result: port dictionary to extend
-
-        Called inside transaction context on plugin_context.session to
-        validate and persist any extended port attributes defined by this
-        driver. Extended attribute values must also be added to
-        result.
-        """
-        pass
-
-    def process_update_network(self, plugin_context, data, result):
-        """Process extended attributes for update network.
-
-        :param plugin_context: plugin request context
-        :param data: dictionary of incoming network data
-        :param result: network dictionary to extend
-
-        Called inside transaction context on plugin_context.session to
-        validate and update any extended network attributes defined by this
-        driver. Extended attribute values, whether updated or not,
-        must also be added to result.
-        """
-        pass
-
-    def process_update_subnet(self, plugin_context, data, result):
-        """Process extended attributes for update subnet.
-
-        :param plugin_context: plugin request context
-        :param data: dictionary of incoming subnet data
-        :param result: subnet dictionary to extend
-
-        Called inside transaction context on plugin_context.session to
-        validate and update any extended subnet attributes defined by this
-        driver. Extended attribute values, whether updated or not,
-        must also be added to result.
-        """
-        pass
-
-    def process_update_port(self, plugin_context, data, result):
-        """Process extended attributes for update port.
-
-        :param plugin_context: plugin request context
-        :param data: dictionary of incoming port data
-        :param result: port dictionary to extend
-
-        Called inside transaction context on plugin_context.session to
-        validate and update any extended port attributes defined by this
-        driver. Extended attribute values, whether updated or not,
-        must also be added to result.
-        """
-        pass
-
-    def extend_network_dict(self, session, base_model, result):
-        """Add extended attributes to network dictionary.
-
-        :param session: database session
-        :param base_model: network model data
-        :param result: network dictionary to extend
-
-        Called inside transaction context on session to add any
-        extended attributes defined by this driver to a network
-        dictionary to be used for mechanism driver calls and/or
-        returned as the result of a network operation.
-        """
-        pass
-
-    def extend_subnet_dict(self, session, base_model, result):
-        """Add extended attributes to subnet dictionary.
-
-        :param session: database session
-        :param base_model: subnet model data
-        :param result: subnet dictionary to extend
-
-        Called inside transaction context on session to add any
-        extended attributes defined by this driver to a subnet
-        dictionary to be used for mechanism driver calls and/or
-        returned as the result of a subnet operation.
-        """
-        pass
-
-    def extend_port_dict(self, session, base_model, result):
-        """Add extended attributes to port dictionary.
-
-        :param session: database session
-        :param base_model: port model data
-        :param result: port dictionary to extend
-
-        Called inside transaction context on session to add any
-        extended attributes defined by this driver to a port
-        dictionary to be used for mechanism driver calls
-        and/or returned as the result of a port operation.
-        """
-        pass
diff --git a/neutron/plugins/ml2/driver_context.py b/neutron/plugins/ml2/driver_context.py
deleted file mode 100644 (file)
index f6e71a6..0000000
+++ /dev/null
@@ -1,266 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log
-from oslo_serialization import jsonutils
-
-from neutron._i18n import _LW
-from neutron.common import constants
-from neutron.extensions import portbindings
-from neutron.plugins.ml2 import db
-from neutron.plugins.ml2 import driver_api as api
-
-LOG = log.getLogger(__name__)
-
-
-class MechanismDriverContext(object):
-    """MechanismDriver context base class."""
-    def __init__(self, plugin, plugin_context):
-        self._plugin = plugin
-        # This temporarily creates a reference loop, but the
-        # lifetime of PortContext is limited to a single
-        # method call of the plugin.
-        self._plugin_context = plugin_context
-
-
-class NetworkContext(MechanismDriverContext, api.NetworkContext):
-
-    def __init__(self, plugin, plugin_context, network,
-                 original_network=None):
-        super(NetworkContext, self).__init__(plugin, plugin_context)
-        self._network = network
-        self._original_network = original_network
-        self._segments = db.get_network_segments(plugin_context.session,
-                                                 network['id'])
-
-    @property
-    def current(self):
-        return self._network
-
-    @property
-    def original(self):
-        return self._original_network
-
-    @property
-    def network_segments(self):
-        return self._segments
-
-
-class SubnetContext(MechanismDriverContext, api.SubnetContext):
-
-    def __init__(self, plugin, plugin_context, subnet, network,
-                 original_subnet=None):
-        super(SubnetContext, self).__init__(plugin, plugin_context)
-        self._subnet = subnet
-        self._original_subnet = original_subnet
-        self._network_context = NetworkContext(plugin, plugin_context,
-                                               network)
-
-    @property
-    def current(self):
-        return self._subnet
-
-    @property
-    def original(self):
-        return self._original_subnet
-
-    @property
-    def network(self):
-        return self._network_context
-
-
-class PortContext(MechanismDriverContext, api.PortContext):
-
-    def __init__(self, plugin, plugin_context, port, network, binding,
-                 binding_levels, original_port=None):
-        super(PortContext, self).__init__(plugin, plugin_context)
-        self._port = port
-        self._original_port = original_port
-        self._network_context = NetworkContext(plugin, plugin_context,
-                                               network)
-        self._binding = binding
-        self._binding_levels = binding_levels
-        self._segments_to_bind = None
-        self._new_bound_segment = None
-        self._next_segments_to_bind = None
-        if original_port:
-            self._original_vif_type = binding.vif_type
-            self._original_vif_details = self._plugin._get_vif_details(binding)
-            self._original_binding_levels = self._binding_levels
-        else:
-            self._original_vif_type = None
-            self._original_vif_details = None
-            self._original_binding_levels = None
-        self._new_port_status = None
-
-    # The following methods are for use by the ML2 plugin and are not
-    # part of the driver API.
-
-    def _prepare_to_bind(self, segments_to_bind):
-        self._segments_to_bind = segments_to_bind
-        self._new_bound_segment = None
-        self._next_segments_to_bind = None
-
-    def _clear_binding_levels(self):
-        self._binding_levels = []
-
-    def _push_binding_level(self, binding_level):
-        self._binding_levels.append(binding_level)
-
-    def _pop_binding_level(self):
-        return self._binding_levels.pop()
-
-    # The following implement the abstract methods and properties of
-    # the driver API.
-
-    @property
-    def current(self):
-        return self._port
-
-    @property
-    def original(self):
-        return self._original_port
-
-    @property
-    def status(self):
-        # REVISIT(rkukura): Eliminate special DVR case as part of
-        # resolving bug 1367391?
-        if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE:
-            return self._binding.status
-
-        return self._port['status']
-
-    @property
-    def original_status(self):
-        # REVISIT(rkukura): Should return host-specific status for DVR
-        # ports. Fix as part of resolving bug 1367391.
-        if self._original_port:
-            return self._original_port['status']
-
-    @property
-    def network(self):
-        return self._network_context
-
-    @property
-    def binding_levels(self):
-        if self._binding_levels:
-            return [{
-                api.BOUND_DRIVER: level.driver,
-                api.BOUND_SEGMENT: self._expand_segment(level.segment_id)
-            } for level in self._binding_levels]
-
-    @property
-    def original_binding_levels(self):
-        if self._original_binding_levels:
-            return [{
-                api.BOUND_DRIVER: level.driver,
-                api.BOUND_SEGMENT: self._expand_segment(level.segment_id)
-            } for level in self._original_binding_levels]
-
-    @property
-    def top_bound_segment(self):
-        if self._binding_levels:
-            return self._expand_segment(self._binding_levels[0].segment_id)
-
-    @property
-    def original_top_bound_segment(self):
-        if self._original_binding_levels:
-            return self._expand_segment(
-                self._original_binding_levels[0].segment_id)
-
-    @property
-    def bottom_bound_segment(self):
-        if self._binding_levels:
-            return self._expand_segment(self._binding_levels[-1].segment_id)
-
-    @property
-    def original_bottom_bound_segment(self):
-        if self._original_binding_levels:
-            return self._expand_segment(
-                self._original_binding_levels[-1].segment_id)
-
-    def _expand_segment(self, segment_id):
-        segment = db.get_segment_by_id(self._plugin_context.session,
-                                       segment_id)
-        if not segment:
-            LOG.warning(_LW("Could not expand segment %s"), segment_id)
-        return segment
-
-    @property
-    def host(self):
-        # REVISIT(rkukura): Eliminate special DVR case as part of
-        # resolving bug 1367391?
-        if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE:
-            return self._binding.host
-
-        return self._port.get(portbindings.HOST_ID)
-
-    @property
-    def original_host(self):
-        # REVISIT(rkukura): Eliminate special DVR case as part of
-        # resolving bug 1367391?
-        if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE:
-            return self._original_port and self._binding.host
-        else:
-            return (self._original_port and
-                    self._original_port.get(portbindings.HOST_ID))
-
-    @property
-    def vif_type(self):
-        return self._binding.vif_type
-
-    @property
-    def original_vif_type(self):
-        return self._original_vif_type
-
-    @property
-    def vif_details(self):
-        return self._plugin._get_vif_details(self._binding)
-
-    @property
-    def original_vif_details(self):
-        return self._original_vif_details
-
-    @property
-    def segments_to_bind(self):
-        return self._segments_to_bind
-
-    def host_agents(self, agent_type):
-        return self._plugin.get_agents(self._plugin_context,
-                                       filters={'agent_type': [agent_type],
-                                                'host': [self._binding.host]})
-
-    def set_binding(self, segment_id, vif_type, vif_details,
-                    status=None):
-        # TODO(rkukura) Verify binding allowed, segment in network
-        self._new_bound_segment = segment_id
-        self._binding.vif_type = vif_type
-        self._binding.vif_details = jsonutils.dumps(vif_details)
-        self._new_port_status = status
-
-    def continue_binding(self, segment_id, next_segments_to_bind):
-        # TODO(rkukura) Verify binding allowed, segment in network
-        self._new_bound_segment = segment_id
-        self._next_segments_to_bind = next_segments_to_bind
-
-    def allocate_dynamic_segment(self, segment):
-        network_id = self._network_context.current['id']
-
-        return self._plugin.type_manager.allocate_dynamic_segment(
-                self._plugin_context.session, network_id, segment)
-
-    def release_dynamic_segment(self, segment_id):
-        return self._plugin.type_manager.release_dynamic_segment(
-                self._plugin_context.session, segment_id)
diff --git a/neutron/plugins/ml2/drivers/__init__.py b/neutron/plugins/ml2/drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/helpers.py b/neutron/plugins/ml2/drivers/helpers.py
deleted file mode 100644 (file)
index 1e8b1fc..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright (c) 2014 Thales Services SAS
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import random
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_log import log
-
-from neutron.common import exceptions as exc
-from neutron.common import utils
-from neutron.plugins.ml2 import driver_api as api
-
-
-LOG = log.getLogger(__name__)
-
-IDPOOL_SELECT_SIZE = 100
-
-
-class BaseTypeDriver(api.TypeDriver):
-    """BaseTypeDriver for functions common to Segment and flat."""
-
-    def __init__(self):
-        try:
-            self.physnet_mtus = utils.parse_mappings(
-                cfg.CONF.ml2.physical_network_mtus
-            )
-        except Exception:
-            self.physnet_mtus = []
-
-    def get_mtu(self, physical_network=None):
-        return cfg.CONF.ml2.segment_mtu
-
-
-class SegmentTypeDriver(BaseTypeDriver):
-    """SegmentTypeDriver for segment allocation.
-
-    Provide methods helping to perform segment allocation fully or partially
-    specified.
-    """
-
-    def __init__(self, model):
-        super(SegmentTypeDriver, self).__init__()
-        self.model = model
-        self.primary_keys = set(dict(model.__table__.columns))
-        self.primary_keys.remove("allocated")
-
-    def allocate_fully_specified_segment(self, session, **raw_segment):
-        """Allocate segment fully specified by raw_segment.
-
-        If segment exists, then try to allocate it and return db object
-        If segment does not exists, then try to create it and return db object
-        If allocation/creation failed, then return None
-        """
-
-        network_type = self.get_type()
-        try:
-            with session.begin(subtransactions=True):
-                alloc = (session.query(self.model).filter_by(**raw_segment).
-                         first())
-                if alloc:
-                    if alloc.allocated:
-                        # Segment already allocated
-                        return
-                    else:
-                        # Segment not allocated
-                        LOG.debug("%(type)s segment %(segment)s allocate "
-                                  "started ",
-                                  {"type": network_type,
-                                   "segment": raw_segment})
-                        count = (session.query(self.model).
-                                 filter_by(allocated=False, **raw_segment).
-                                 update({"allocated": True}))
-                        if count:
-                            LOG.debug("%(type)s segment %(segment)s allocate "
-                                      "done ",
-                                  {"type": network_type,
-                                   "segment": raw_segment})
-                            return alloc
-
-                        # Segment allocated or deleted since select
-                        LOG.debug("%(type)s segment %(segment)s allocate "
-                                  "failed: segment has been allocated or "
-                                  "deleted",
-                                  {"type": network_type,
-                                   "segment": raw_segment})
-
-                # Segment to create or already allocated
-                LOG.debug("%(type)s segment %(segment)s create started",
-                          {"type": network_type, "segment": raw_segment})
-                alloc = self.model(allocated=True, **raw_segment)
-                alloc.save(session)
-                LOG.debug("%(type)s segment %(segment)s create done",
-                          {"type": network_type, "segment": raw_segment})
-
-        except db_exc.DBDuplicateEntry:
-            # Segment already allocated (insert failure)
-            alloc = None
-            LOG.debug("%(type)s segment %(segment)s create failed",
-                      {"type": network_type, "segment": raw_segment})
-
-        return alloc
-
-    def allocate_partially_specified_segment(self, session, **filters):
-        """Allocate model segment from pool partially specified by filters.
-
-        Return allocated db object or None.
-        """
-
-        network_type = self.get_type()
-        with session.begin(subtransactions=True):
-            select = (session.query(self.model).
-                      filter_by(allocated=False, **filters))
-
-            # Selected segment can be allocated before update by someone else,
-            allocs = select.limit(IDPOOL_SELECT_SIZE).all()
-
-            if not allocs:
-                # No resource available
-                return
-
-            alloc = random.choice(allocs)
-            raw_segment = dict((k, alloc[k]) for k in self.primary_keys)
-            LOG.debug("%(type)s segment allocate from pool "
-                      "started with %(segment)s ",
-                      {"type": network_type,
-                       "segment": raw_segment})
-            count = (session.query(self.model).
-                     filter_by(allocated=False, **raw_segment).
-                     update({"allocated": True}))
-            if count:
-                LOG.debug("%(type)s segment allocate from pool "
-                          "success with %(segment)s ",
-                          {"type": network_type,
-                           "segment": raw_segment})
-                return alloc
-
-            # Segment allocated since select
-            LOG.debug("Allocate %(type)s segment from pool "
-                      "failed with segment %(segment)s",
-                      {"type": network_type,
-                       "segment": raw_segment})
-            # saving real exception in case we exceeded amount of attempts
-            raise db_exc.RetryRequest(
-                exc.NoNetworkFoundInMaximumAllowedAttempts())
diff --git a/neutron/plugins/ml2/drivers/l2pop/README b/neutron/plugins/ml2/drivers/l2pop/README
deleted file mode 100644 (file)
index 46bb27e..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-Neutron ML2 l2 population Mechanism Drivers
-
-l2 population (l2pop) mechanism drivers implements the ML2 driver to improve
-open source plugins overlay implementations (VXLAN with Linux bridge and
-GRE/VXLAN with OVS). This mechanism driver is implemented in ML2 to propagate
-the forwarding information among agents using a common RPC API.
-
-More informations could be found on the wiki page [1].
-
-VXLAN Linux kernel:
--------------------
-The VXLAN Linux kernel module provide all necessary functionalities to populate
-the forwarding table and local ARP responder tables. This module appears on
-release 3.7 of the vanilla Linux kernel in experimental:
-- 3.8: first stable release, no edge replication (multicast necessary),
-- 3.9: edge replication only for the broadcasted packets,
-- 3.11: edge replication for broadcast, multicast and unknown packets.
-
-Note: Some distributions (like RHEL) have backported this module on precedent
-      kernel version.
-
-OpenvSwitch:
-------------
-The OVS OpenFlow tables provide all of the necessary functionality to populate
-the forwarding table and local ARP responder tables.
-A wiki page describe how the flow tables did evolve on OVS agents:
-- [2] without local ARP responder
-- [3] with local ARP responder. /!\ This functionality is only available since
-                                    the development branch 2.1. It's possible
-                                    to disable (enable by default) it through
-                                    the flag 'arp_responder'. /!\
-
-
-Note: A difference persists between the LB and OVS agents when they are used
-      with the l2-pop mechanism driver (and local ARP responder available). The
-      LB agent will drop unknown unicast (VXLAN bridge mode), whereas the OVS
-      agent will flood it.
-
-[1] https://wiki.openstack.org/wiki/L2population_blueprint
-[2] https://wiki.openstack.org/wiki/Ovs-flow-logic#OVS_flows_logic
-[3] https://wiki.openstack.org/wiki/Ovs-flow-logic#OVS_flows_logic_with_local_ARP_responder
\ No newline at end of file
diff --git a/neutron/plugins/ml2/drivers/l2pop/__init__.py b/neutron/plugins/ml2/drivers/l2pop/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/l2pop/config.py b/neutron/plugins/ml2/drivers/l2pop/config.py
deleted file mode 100644 (file)
index d8d685d..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-
-
-l2_population_options = [
-    cfg.IntOpt('agent_boot_time', default=180,
-               help=_('Delay within which agent is expected to update '
-                      'existing ports whent it restarts')),
-]
-
-cfg.CONF.register_opts(l2_population_options, "l2pop")
diff --git a/neutron/plugins/ml2/drivers/l2pop/db.py b/neutron/plugins/ml2/drivers/l2pop/db.py
deleted file mode 100644 (file)
index 257b4af..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils
-from oslo_utils import timeutils
-
-from neutron.common import constants as const
-from neutron.db import agents_db
-from neutron.db import models_v2
-from neutron.plugins.ml2 import models as ml2_models
-
-
-def get_agent_ip_by_host(session, agent_host):
-    agent = get_agent_by_host(session, agent_host)
-    if agent:
-        return get_agent_ip(agent)
-
-
-def get_agent_ip(agent):
-    configuration = jsonutils.loads(agent.configurations)
-    return configuration.get('tunneling_ip')
-
-
-def get_agent_uptime(agent):
-    return timeutils.delta_seconds(agent.started_at,
-                                   agent.heartbeat_timestamp)
-
-
-def get_agent_tunnel_types(agent):
-    configuration = jsonutils.loads(agent.configurations)
-    return configuration.get('tunnel_types')
-
-
-def get_agent_l2pop_network_types(agent):
-    configuration = jsonutils.loads(agent.configurations)
-    return configuration.get('l2pop_network_types')
-
-
-def get_agent_by_host(session, agent_host):
-    """Return a L2 agent on the host."""
-
-    with session.begin(subtransactions=True):
-        query = session.query(agents_db.Agent)
-        query = query.filter(agents_db.Agent.host == agent_host)
-    for agent in query:
-        if get_agent_ip(agent):
-            return agent
-
-
-def _get_active_network_ports(session, network_id):
-    with session.begin(subtransactions=True):
-        query = session.query(ml2_models.PortBinding, agents_db.Agent)
-        query = query.join(agents_db.Agent,
-                           agents_db.Agent.host == ml2_models.PortBinding.host)
-        query = query.join(models_v2.Port)
-        query = query.filter(models_v2.Port.network_id == network_id,
-                             models_v2.Port.status == const.PORT_STATUS_ACTIVE)
-        return query
-
-
-def get_nondvr_active_network_ports(session, network_id):
-    query = _get_active_network_ports(session, network_id)
-    query = query.filter(models_v2.Port.device_owner !=
-                         const.DEVICE_OWNER_DVR_INTERFACE)
-    return [(bind, agent) for bind, agent in query.all()
-            if get_agent_ip(agent)]
-
-
-def get_dvr_active_network_ports(session, network_id):
-    with session.begin(subtransactions=True):
-        query = session.query(ml2_models.DVRPortBinding, agents_db.Agent)
-        query = query.join(agents_db.Agent,
-                           agents_db.Agent.host ==
-                           ml2_models.DVRPortBinding.host)
-        query = query.join(models_v2.Port)
-        query = query.filter(models_v2.Port.network_id == network_id,
-                             models_v2.Port.status == const.PORT_STATUS_ACTIVE,
-                             models_v2.Port.device_owner ==
-                             const.DEVICE_OWNER_DVR_INTERFACE)
-    return [(bind, agent) for bind, agent in query.all()
-            if get_agent_ip(agent)]
-
-
-def get_agent_network_active_port_count(session, agent_host,
-                                        network_id):
-    with session.begin(subtransactions=True):
-        query = session.query(models_v2.Port)
-        query1 = query.join(ml2_models.PortBinding)
-        query1 = query1.filter(models_v2.Port.network_id == network_id,
-                               models_v2.Port.status ==
-                               const.PORT_STATUS_ACTIVE,
-                               models_v2.Port.device_owner !=
-                               const.DEVICE_OWNER_DVR_INTERFACE,
-                               ml2_models.PortBinding.host == agent_host)
-        query2 = query.join(ml2_models.DVRPortBinding)
-        query2 = query2.filter(models_v2.Port.network_id == network_id,
-                               ml2_models.DVRPortBinding.status ==
-                               const.PORT_STATUS_ACTIVE,
-                               models_v2.Port.device_owner ==
-                               const.DEVICE_OWNER_DVR_INTERFACE,
-                               ml2_models.DVRPortBinding.host == agent_host)
-        return (query1.count() + query2.count())
diff --git a/neutron/plugins/ml2/drivers/l2pop/mech_driver.py b/neutron/plugins/ml2/drivers/l2pop/mech_driver.py
deleted file mode 100644 (file)
index e4ddfe2..0000000
+++ /dev/null
@@ -1,289 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from neutron._i18n import _LW
-from neutron.common import constants as const
-from neutron import context as n_context
-from neutron.db import api as db_api
-from neutron.plugins.ml2.common import exceptions as ml2_exc
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2.drivers.l2pop import config  # noqa
-from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
-from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
-
-LOG = logging.getLogger(__name__)
-
-
-class L2populationMechanismDriver(api.MechanismDriver):
-
-    def __init__(self):
-        super(L2populationMechanismDriver, self).__init__()
-        self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI()
-
-    def initialize(self):
-        LOG.debug("Experimental L2 population driver")
-        self.rpc_ctx = n_context.get_admin_context_without_session()
-        self.migrated_ports = {}
-
-    def _get_port_fdb_entries(self, port):
-        return [l2pop_rpc.PortInfo(mac_address=port['mac_address'],
-                                   ip_address=ip['ip_address'])
-                for ip in port['fixed_ips']]
-
-    def delete_port_postcommit(self, context):
-        port = context.current
-        agent_host = context.host
-
-        fdb_entries = self._get_agent_fdb(context, port, agent_host)
-        self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx,
-            fdb_entries)
-
-    def _get_diff_ips(self, orig, port):
-        orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']])
-        port_ips = set([ip['ip_address'] for ip in port['fixed_ips']])
-
-        # check if an ip has been added or removed
-        orig_chg_ips = orig_ips.difference(port_ips)
-        port_chg_ips = port_ips.difference(orig_ips)
-
-        if orig_chg_ips or port_chg_ips:
-            return orig_chg_ips, port_chg_ips
-
-    def _fixed_ips_changed(self, context, orig, port, diff_ips):
-        orig_ips, port_ips = diff_ips
-
-        if (port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
-            agent_host = context.host
-        else:
-            agent_host = context.original_host
-
-        if not agent_host:
-            return
-
-        agent_ip = l2pop_db.get_agent_ip_by_host(db_api.get_session(),
-                                                 agent_host)
-
-        orig_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'],
-                                          ip_address=ip)
-                       for ip in orig_ips]
-        port_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'],
-                                          ip_address=ip)
-                       for ip in port_ips]
-
-        upd_fdb_entries = {port['network_id']: {agent_ip: {}}}
-
-        ports = upd_fdb_entries[port['network_id']][agent_ip]
-        if orig_mac_ip:
-            ports['before'] = orig_mac_ip
-
-        if port_mac_ip:
-            ports['after'] = port_mac_ip
-
-        self.L2populationAgentNotify.update_fdb_entries(
-            self.rpc_ctx, {'chg_ip': upd_fdb_entries})
-
-        return True
-
-    def update_port_precommit(self, context):
-        port = context.current
-        orig = context.original
-
-        if (orig['mac_address'] != port['mac_address'] and
-            context.status == const.PORT_STATUS_ACTIVE):
-            LOG.warning(_LW("unable to modify mac_address of ACTIVE port "
-                            "%s"), port['id'])
-            raise ml2_exc.MechanismDriverError(method='update_port_precommit')
-
-    def update_port_postcommit(self, context):
-        port = context.current
-        orig = context.original
-
-        diff_ips = self._get_diff_ips(orig, port)
-        if diff_ips:
-            self._fixed_ips_changed(context, orig, port, diff_ips)
-        if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
-            if context.status == const.PORT_STATUS_ACTIVE:
-                self._update_port_up(context)
-            if context.status == const.PORT_STATUS_DOWN:
-                agent_host = context.host
-                fdb_entries = self._get_agent_fdb(
-                        context, port, agent_host)
-                self.L2populationAgentNotify.remove_fdb_entries(
-                    self.rpc_ctx, fdb_entries)
-        elif (context.host != context.original_host
-            and context.status == const.PORT_STATUS_ACTIVE
-            and not self.migrated_ports.get(orig['id'])):
-            # The port has been migrated. We have to store the original
-            # binding to send appropriate fdb once the port will be set
-            # on the destination host
-            self.migrated_ports[orig['id']] = (
-                (orig, context.original_host))
-        elif context.status != context.original_status:
-            if context.status == const.PORT_STATUS_ACTIVE:
-                self._update_port_up(context)
-            elif context.status == const.PORT_STATUS_DOWN:
-                fdb_entries = self._get_agent_fdb(
-                    context, port, context.host)
-                self.L2populationAgentNotify.remove_fdb_entries(
-                    self.rpc_ctx, fdb_entries)
-            elif context.status == const.PORT_STATUS_BUILD:
-                orig = self.migrated_ports.pop(port['id'], None)
-                if orig:
-                    original_port = orig[0]
-                    original_host = orig[1]
-                    # this port has been migrated: remove its entries from fdb
-                    fdb_entries = self._get_agent_fdb(
-                        context, original_port, original_host)
-                    self.L2populationAgentNotify.remove_fdb_entries(
-                        self.rpc_ctx, fdb_entries)
-
-    def _get_and_validate_segment(self, context, port_id, agent):
-        segment = context.bottom_bound_segment
-        if not segment:
-            LOG.debug("Port %(port)s updated by agent %(agent)s isn't bound "
-                      "to any segment", {'port': port_id, 'agent': agent})
-            return
-
-        network_types = l2pop_db.get_agent_l2pop_network_types(agent)
-        if network_types is None:
-            network_types = l2pop_db.get_agent_tunnel_types(agent)
-        if segment['network_type'] not in network_types:
-            return
-
-        return segment
-
-    def _create_agent_fdb(self, session, agent, segment, network_id):
-        agent_fdb_entries = {network_id:
-                             {'segment_id': segment['segmentation_id'],
-                              'network_type': segment['network_type'],
-                              'ports': {}}}
-        tunnel_network_ports = (
-            l2pop_db.get_dvr_active_network_ports(session, network_id))
-        fdb_network_ports = (
-            l2pop_db.get_nondvr_active_network_ports(session, network_id))
-        ports = agent_fdb_entries[network_id]['ports']
-        ports.update(self._get_tunnels(
-            fdb_network_ports + tunnel_network_ports,
-            agent.host))
-        for agent_ip, fdbs in ports.items():
-            for binding, agent in fdb_network_ports:
-                if l2pop_db.get_agent_ip(agent) == agent_ip:
-                    fdbs.extend(self._get_port_fdb_entries(binding.port))
-
-        return agent_fdb_entries
-
-    def _get_tunnels(self, tunnel_network_ports, exclude_host):
-        agents = {}
-        for _, agent in tunnel_network_ports:
-            if agent.host == exclude_host:
-                continue
-
-            ip = l2pop_db.get_agent_ip(agent)
-            if not ip:
-                LOG.debug("Unable to retrieve the agent ip, check "
-                          "the agent %s configuration.", agent.host)
-                continue
-
-            if ip not in agents:
-                agents[ip] = [const.FLOODING_ENTRY]
-
-        return agents
-
-    def _update_port_up(self, context):
-        port = context.current
-        agent_host = context.host
-        session = db_api.get_session()
-        agent = l2pop_db.get_agent_by_host(session, agent_host)
-        if not agent:
-            LOG.warning(_LW("Unable to retrieve active L2 agent on host %s"),
-                        agent_host)
-            return
-
-        network_id = port['network_id']
-
-        agent_active_ports = l2pop_db.get_agent_network_active_port_count(
-            session, agent_host, network_id)
-
-        agent_ip = l2pop_db.get_agent_ip(agent)
-        segment = self._get_and_validate_segment(context, port['id'], agent)
-        if not segment:
-            return
-        other_fdb_entries = self._get_fdb_entries_template(
-            segment, agent_ip, network_id)
-        other_fdb_ports = other_fdb_entries[network_id]['ports']
-
-        if agent_active_ports == 1 or (l2pop_db.get_agent_uptime(agent) <
-                                       cfg.CONF.l2pop.agent_boot_time):
-            # First port activated on current agent in this network,
-            # we have to provide it with the whole list of fdb entries
-            agent_fdb_entries = self._create_agent_fdb(session,
-                                                       agent,
-                                                       segment,
-                                                       network_id)
-
-            # And notify other agents to add flooding entry
-            other_fdb_ports[agent_ip].append(const.FLOODING_ENTRY)
-
-            if agent_fdb_entries[network_id]['ports'].keys():
-                self.L2populationAgentNotify.add_fdb_entries(
-                    self.rpc_ctx, agent_fdb_entries, agent_host)
-
-        # Notify other agents to add fdb rule for current port
-        if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE:
-            other_fdb_ports[agent_ip] += self._get_port_fdb_entries(port)
-
-        self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx,
-                                                     other_fdb_entries)
-
-    def _get_agent_fdb(self, context, port, agent_host):
-        if not agent_host:
-            return
-
-        network_id = port['network_id']
-
-        session = db_api.get_session()
-        agent_active_ports = l2pop_db.get_agent_network_active_port_count(
-            session, agent_host, network_id)
-
-        agent = l2pop_db.get_agent_by_host(db_api.get_session(), agent_host)
-        segment = self._get_and_validate_segment(context, port['id'], agent)
-        if not segment:
-            return
-
-        agent_ip = l2pop_db.get_agent_ip(agent)
-        other_fdb_entries = self._get_fdb_entries_template(
-            segment, agent_ip, port['network_id'])
-        if agent_active_ports == 0:
-            # Agent is removing its last activated port in this network,
-            # other agents needs to be notified to delete their flooding entry.
-            other_fdb_entries[network_id]['ports'][agent_ip].append(
-                const.FLOODING_ENTRY)
-        # Notify other agents to remove fdb rules for current port
-        if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE:
-            fdb_entries = self._get_port_fdb_entries(port)
-            other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries
-
-        return other_fdb_entries
-
-    @classmethod
-    def _get_fdb_entries_template(cls, segment, agent_ip, network_id):
-        return {
-            network_id:
-                {'segment_id': segment['segmentation_id'],
-                 'network_type': segment['network_type'],
-                 'ports': {agent_ip: []}}}
diff --git a/neutron/plugins/ml2/drivers/l2pop/rpc.py b/neutron/plugins/ml2/drivers/l2pop/rpc.py
deleted file mode 100644 (file)
index afa7e1b..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import copy
-
-from oslo_log import log as logging
-import oslo_messaging
-
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-
-
-LOG = logging.getLogger(__name__)
-
-
-PortInfo = collections.namedtuple("PortInfo", "mac_address ip_address")
-
-
-class L2populationAgentNotifyAPI(object):
-
-    def __init__(self, topic=topics.AGENT):
-        self.topic = topic
-        self.topic_l2pop_update = topics.get_topic_name(topic,
-                                                        topics.L2POPULATION,
-                                                        topics.UPDATE)
-        target = oslo_messaging.Target(topic=topic, version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def _notification_fanout(self, context, method, fdb_entries):
-        LOG.debug('Fanout notify l2population agents at %(topic)s '
-                  'the message %(method)s with %(fdb_entries)s',
-                  {'topic': self.topic,
-                   'method': method,
-                   'fdb_entries': fdb_entries})
-
-        marshalled_fdb_entries = self._marshall_fdb_entries(fdb_entries)
-        cctxt = self.client.prepare(topic=self.topic_l2pop_update, fanout=True)
-        cctxt.cast(context, method, fdb_entries=marshalled_fdb_entries)
-
-    def _notification_host(self, context, method, fdb_entries, host):
-        LOG.debug('Notify l2population agent %(host)s at %(topic)s the '
-                  'message %(method)s with %(fdb_entries)s',
-                  {'host': host,
-                   'topic': self.topic,
-                   'method': method,
-                   'fdb_entries': fdb_entries})
-
-        marshalled_fdb_entries = self._marshall_fdb_entries(fdb_entries)
-        cctxt = self.client.prepare(topic=self.topic_l2pop_update, server=host)
-        cctxt.cast(context, method, fdb_entries=marshalled_fdb_entries)
-
-    def add_fdb_entries(self, context, fdb_entries, host=None):
-        if fdb_entries:
-            if host:
-                self._notification_host(context, 'add_fdb_entries',
-                                        fdb_entries, host)
-            else:
-                self._notification_fanout(context, 'add_fdb_entries',
-                                          fdb_entries)
-
-    def remove_fdb_entries(self, context, fdb_entries, host=None):
-        if fdb_entries:
-            if host:
-                self._notification_host(context, 'remove_fdb_entries',
-                                        fdb_entries, host)
-            else:
-                self._notification_fanout(context, 'remove_fdb_entries',
-                                          fdb_entries)
-
-    def update_fdb_entries(self, context, fdb_entries, host=None):
-        if fdb_entries:
-            if host:
-                self._notification_host(context, 'update_fdb_entries',
-                                        fdb_entries, host)
-            else:
-                self._notification_fanout(context, 'update_fdb_entries',
-                                          fdb_entries)
-
-    @staticmethod
-    def _marshall_fdb_entries(fdb_entries):
-        """Prepares fdb_entries for serialization to JSON for RPC.
-
-        All methods in this class that send messages should call this to
-        marshall fdb_entries for the wire.
-
-        :param fdb_entries: Original fdb_entries data-structure.  Looks like:
-            {
-                <uuid>: {
-                    ...,
-                    'ports': {
-                        <ip address>: [ PortInfo, ...  ],
-                        ...
-
-        :returns: Deep copy with PortInfo converted to [mac, ip]
-        """
-        marshalled = copy.deepcopy(fdb_entries)
-        for value in marshalled.values():
-            if 'ports' in value:
-                for address, port_infos in value['ports'].items():
-                    value['ports'][address] = [[mac, ip]
-                                               for mac, ip in port_infos]
-        return marshalled
diff --git a/neutron/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py b/neutron/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py b/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py
deleted file mode 100644 (file)
index 908842b..0000000
+++ /dev/null
@@ -1,313 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-from oslo_config import cfg
-from oslo_log import helpers as log_helpers
-import six
-
-from neutron.common import constants as n_const
-from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
-
-
-@six.add_metaclass(abc.ABCMeta)
-class L2populationRpcCallBackMixin(object):
-    '''General mixin class of L2-population RPC call back.
-
-    The following methods are called through RPC.
-        add_fdb_entries(), remove_fdb_entries(), update_fdb_entries()
-    The following methods are used in an agent as internal methods.
-        fdb_add(), fdb_remove(), fdb_update()
-    '''
-
-    @log_helpers.log_method_call
-    def add_fdb_entries(self, context, fdb_entries, host=None):
-        if not host or host == cfg.CONF.host:
-            self.fdb_add(context, self._unmarshall_fdb_entries(fdb_entries))
-
-    @log_helpers.log_method_call
-    def remove_fdb_entries(self, context, fdb_entries, host=None):
-        if not host or host == cfg.CONF.host:
-            self.fdb_remove(context, self._unmarshall_fdb_entries(fdb_entries))
-
-    @log_helpers.log_method_call
-    def update_fdb_entries(self, context, fdb_entries, host=None):
-        if not host or host == cfg.CONF.host:
-            self.fdb_update(context, self._unmarshall_fdb_entries(fdb_entries))
-
-    @staticmethod
-    def _unmarshall_fdb_entries(fdb_entries):
-        """Prepares fdb_entries from JSON.
-
-        All methods in this class that receive messages should call this to
-        unmarshall fdb_entries from the wire.
-
-        :param fdb_entries: Original fdb_entries data-structure.  Looks like:
-            {
-                <uuid>: {
-                    ...,
-                    'ports': {
-                        <ip address>: [ [<mac>, <ip>], ...  ],
-                        ...
-
-        :returns: Deep copy with [<mac>, <ip>] converted to PortInfo
-        """
-        unmarshalled = dict(fdb_entries)
-        for value in unmarshalled.values():
-            if 'ports' in value:
-                value['ports'] = dict(
-                    (address, [l2pop_rpc.PortInfo(*pi) for pi in port_infos])
-                    for address, port_infos in value['ports'].items()
-                )
-        return unmarshalled
-
-    @abc.abstractmethod
-    def fdb_add(self, context, fdb_entries):
-        pass
-
-    @abc.abstractmethod
-    def fdb_remove(self, context, fdb_entries):
-        pass
-
-    @abc.abstractmethod
-    def fdb_update(self, context, fdb_entries):
-        pass
-
-
-class L2populationRpcCallBackTunnelMixin(L2populationRpcCallBackMixin):
-    '''Mixin class of L2-population call back for Tunnel.
-
-    The following methods are all used in agents as internal methods.
-
-    Some of the methods in this class use Local VLAN Mapping, aka lvm.
-    It's a python object with at least the following attributes:
-
-    ============ =========================================================
-    Attribute    Description
-    ============ =========================================================
-    vlan         An identifier used by the agent to identify a neutron
-                 network.
-    network_type A network type found in neutron.plugins.common.constants.
-    ============ =========================================================
-
-    NOTE(yamamoto): "Local VLAN" is an OVS-agent term.  OVS-agent internally
-    uses 802.1q VLAN tagging to isolate networks.  While this class inherited
-    the terms from OVS-agent, it does not assume the specific underlying
-    technologies.  E.g. this class is also used by ofagent, where a different
-    mechanism is used.
-    '''
-
-    @abc.abstractmethod
-    def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
-        '''Add flow for fdb
-
-        This method is assumed to be used by method fdb_add_tun.
-        We expect to add a flow entry to send a packet to specified port
-        on bridge.
-        And you may edit some information for local arp response.
-
-        :param br: represent the bridge on which add_fdb_flow should be
-        applied.
-        :param port_info: PortInfo instance to include mac and ip.
-            .mac_address
-            .ip_address
-
-        :remote_ip: remote ip address.
-        :param lvm: a local VLAN map of network.
-        :param ofport: a port to add.
-        '''
-        pass
-
-    @abc.abstractmethod
-    def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
-        '''Delete flow for fdb
-
-        This method is assumed to be used by method fdb_remove_tun.
-        We expect to delete a flow entry to send a packet to specified port
-        from bridge.
-        And you may delete some information for local arp response.
-
-        :param br: represent the bridge on which del_fdb_flow should be
-        applied.
-        :param port_info: PortInfo instance to include mac and ip.
-            .mac_address
-            .ip_address
-
-        :remote_ip: remote ip address.
-        :param lvm: local VLAN map of a network. See add_fdb_flow for
-            more explanation.
-        :param ofport: a port to delete.
-        '''
-        pass
-
-    @abc.abstractmethod
-    def setup_tunnel_port(self, br, remote_ip, network_type):
-        '''Setup an added tunnel port.
-
-        This method is assumed to be used by method fdb_add_tun.
-        We expect to prepare to call add_fdb_flow. It will be mainly adding
-        a port to a bridge.
-        If you need, you may do some preparations for a bridge.
-
-        :param br: represent the bridge on which setup_tunnel_port should be
-        applied.
-        :param remote_ip: an ip for a port to setup.
-        :param network_type: a type of a network.
-        :returns: an ofport value. value 0 means the port is unavailable.
-        '''
-        pass
-
-    @abc.abstractmethod
-    def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
-        '''Clean up a deleted tunnel port.
-
-        This method is assumed to be used by method fdb_remove_tun.
-        We expect to clean up after calling del_fdb_flow. It will be mainly
-        deleting a port from a bridge.
-        If you need, you may do some cleanup for a bridge.
-
-        :param br: represent the bridge on which cleanup_tunnel_port should be
-        applied.
-        :param tun_ofport: a port value to cleanup.
-        :param tunnel_type: a type of a tunnel.
-        '''
-        pass
-
-    @abc.abstractmethod
-    def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
-                                  ip_address):
-        '''Operate the ARP respond information.
-
-        Update MAC/IPv4 associations, which is typically used by
-        the local ARP responder.  For example, OVS-agent sets up
-        flow entries to perform ARP responses.
-
-        :param br: represent the bridge on which setup_entry_for_arp_reply
-        should be applied.
-        :param action: add/remove flow for arp response information.
-        :param local_vid: id in local VLAN map of network's ARP entry.
-        :param mac_address: MAC string value.
-        :param ip_address: IP string value.
-        '''
-        pass
-
-    def get_agent_ports(self, fdb_entries, local_vlan_map):
-        """Generator to yield port info.
-
-        For each known (i.e found in local_vlan_map) network in
-        fdb_entries, yield (lvm, fdb_entries[network_id]['ports']) pair.
-
-        :param fdb_entries: l2pop fdb entries
-        :param local_vlan_map: A dict to map network_id to
-            the corresponding lvm entry.
-        """
-        for network_id, values in fdb_entries.items():
-            lvm = local_vlan_map.get(network_id)
-            if lvm is None:
-                continue
-            agent_ports = values.get('ports')
-            yield (lvm, agent_ports)
-
-    @log_helpers.log_method_call
-    def fdb_add_tun(self, context, br, lvm, agent_ports, lookup_port):
-        for remote_ip, ports in agent_ports.items():
-            # Ensure we have a tunnel port with this remote agent
-            ofport = lookup_port(lvm.network_type, remote_ip)
-            if not ofport:
-                ofport = self.setup_tunnel_port(br, remote_ip,
-                                                lvm.network_type)
-                if ofport == 0:
-                    continue
-            for port in ports:
-                self.add_fdb_flow(br, port, remote_ip, lvm, ofport)
-
-    @log_helpers.log_method_call
-    def fdb_remove_tun(self, context, br, lvm, agent_ports, lookup_port):
-        for remote_ip, ports in agent_ports.items():
-            ofport = lookup_port(lvm.network_type, remote_ip)
-            if not ofport:
-                continue
-            for port in ports:
-                self.del_fdb_flow(br, port, remote_ip, lvm, ofport)
-                if port == n_const.FLOODING_ENTRY:
-                    # Check if this tunnel port is still used
-                    self.cleanup_tunnel_port(br, ofport, lvm.network_type)
-
-    @log_helpers.log_method_call
-    def fdb_update(self, context, fdb_entries):
-        '''Call methods named '_fdb_<action>'.
-
-        This method assumes that methods '_fdb_<action>' are defined in class.
-        Currently the following actions are available.
-            chg_ip
-        '''
-        for action, values in fdb_entries.items():
-            method = '_fdb_' + action
-            if not hasattr(self, method):
-                raise NotImplementedError()
-
-            getattr(self, method)(context, values)
-
-    @log_helpers.log_method_call
-    def fdb_chg_ip_tun(self, context, br, fdb_entries, local_ip,
-                       local_vlan_map):
-        '''fdb update when an IP of a port is updated.
-
-        The ML2 l2-pop mechanism driver sends an fdb update rpc message when an
-        IP of a port is updated.
-
-        :param context: RPC context.
-        :param br: represent the bridge on which fdb_chg_ip_tun should be
-        applied.
-        :param fdb_entries: fdb dicts that contain all mac/IP information per
-                            agent and network.
-                               {'net1':
-                                {'agent_ip':
-                                 {'before': PortInfo,
-                                  'after': PortInfo
-                                 }
-                                }
-                                'net2':
-                                ...
-                               }
-
-                             PortInfo has .mac_address and .ip_address attrs.
-
-        :param local_ip: local IP address of this agent.
-        :param local_vlan_map: A dict to map network_id to
-            the corresponding lvm entry.
-        '''
-
-        for network_id, agent_ports in fdb_entries.items():
-            lvm = local_vlan_map.get(network_id)
-            if not lvm:
-                continue
-
-            for agent_ip, state in agent_ports.items():
-                if agent_ip == local_ip:
-                    continue
-
-                after = state.get('after', [])
-                for mac_ip in after:
-                    self.setup_entry_for_arp_reply(br, 'add', lvm.vlan,
-                                                   mac_ip.mac_address,
-                                                   mac_ip.ip_address)
-
-                before = state.get('before', [])
-                for mac_ip in before:
-                    self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan,
-                                                   mac_ip.mac_address,
-                                                   mac_ip.ip_address)
diff --git a/neutron/plugins/ml2/drivers/linuxbridge/__init__.py b/neutron/plugins/ml2/drivers/linuxbridge/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/__init__.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py
deleted file mode 100644 (file)
index 54daff1..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-from oslo_concurrency import lockutils
-from oslo_log import log as logging
-
-from neutron._i18n import _LI
-from neutron.agent.linux import ip_lib
-from neutron.common import utils
-
-LOG = logging.getLogger(__name__)
-SPOOF_CHAIN_PREFIX = 'neutronARP-'
-
-
-def setup_arp_spoofing_protection(vif, port_details):
-    current_rules = ebtables(['-L']).splitlines()
-    if not port_details.get('port_security_enabled', True):
-        # clear any previous entries related to this port
-        delete_arp_spoofing_protection([vif], current_rules)
-        LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because "
-                     "it has port security disabled"), vif)
-        return
-    if utils.is_port_trusted(port_details):
-        # clear any previous entries related to this port
-        delete_arp_spoofing_protection([vif], current_rules)
-        LOG.debug("Skipping ARP spoofing rules for network owned port "
-                  "'%s'.", vif)
-        return
-    # collect all of the addresses and cidrs that belong to the port
-    addresses = {f['ip_address'] for f in port_details['fixed_ips']}
-    if port_details.get('allowed_address_pairs'):
-        addresses |= {p['ip_address']
-                      for p in port_details['allowed_address_pairs']}
-
-    addresses = {ip for ip in addresses
-                 if netaddr.IPNetwork(ip).version == 4}
-    if any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in addresses):
-        # don't try to install protection because a /0 prefix allows any
-        # address anyway and the ARP_SPA can only match on /1 or more.
-        return
-
-    install_arp_spoofing_protection(vif, addresses, current_rules)
-
-
-def chain_name(vif):
-    # start each chain with a common identifier for cleanup to find
-    return '%s%s' % (SPOOF_CHAIN_PREFIX, vif)
-
-
-@lockutils.synchronized('ebtables')
-def delete_arp_spoofing_protection(vifs, current_rules=None):
-    if not current_rules:
-        current_rules = ebtables(['-L']).splitlines()
-    # delete the jump rule and then delete the whole chain
-    jumps = [vif for vif in vifs if vif_jump_present(vif, current_rules)]
-    for vif in jumps:
-        ebtables(['-D', 'FORWARD', '-i', vif, '-j',
-                  chain_name(vif), '-p', 'ARP'])
-    for vif in vifs:
-        if chain_exists(chain_name(vif), current_rules):
-            ebtables(['-X', chain_name(vif)])
-
-
-def delete_unreferenced_arp_protection(current_vifs):
-    # deletes all jump rules and chains that aren't in current_vifs but match
-    # the spoof prefix
-    output = ebtables(['-L']).splitlines()
-    to_delete = []
-    for line in output:
-        # we're looking to find and turn the following:
-        # Bridge chain: SPOOF_CHAIN_PREFIXtap199, entries: 0, policy: DROP
-        # into 'tap199'
-        if line.startswith('Bridge chain: %s' % SPOOF_CHAIN_PREFIX):
-            devname = line.split(SPOOF_CHAIN_PREFIX, 1)[1].split(',')[0]
-            if devname not in current_vifs:
-                to_delete.append(devname)
-    LOG.info(_LI("Clearing orphaned ARP spoofing entries for devices %s"),
-             to_delete)
-    delete_arp_spoofing_protection(to_delete, output)
-
-
-@lockutils.synchronized('ebtables')
-def install_arp_spoofing_protection(vif, addresses, current_rules):
-    # make a VIF-specific ARP chain so we don't conflict with other rules
-    vif_chain = chain_name(vif)
-    if not chain_exists(vif_chain, current_rules):
-        ebtables(['-N', vif_chain, '-P', 'DROP'])
-    # flush the chain to clear previous accepts. this will cause dropped ARP
-    # packets until the allows are installed, but that's better than leaked
-    # spoofed packets and ARP can handle losses.
-    ebtables(['-F', vif_chain])
-    for addr in addresses:
-        ebtables(['-A', vif_chain, '-p', 'ARP', '--arp-ip-src', addr,
-                  '-j', 'ACCEPT'])
-    # check if jump rule already exists, if not, install it
-    if not vif_jump_present(vif, current_rules):
-        ebtables(['-A', 'FORWARD', '-i', vif, '-j',
-                  vif_chain, '-p', 'ARP'])
-
-
-def chain_exists(chain, current_rules):
-    for rule in current_rules:
-        if rule.startswith('Bridge chain: %s' % chain):
-            return True
-    return False
-
-
-def vif_jump_present(vif, current_rules):
-    searches = (('-i %s' % vif), ('-j %s' % chain_name(vif)), ('-p ARP'))
-    for line in current_rules:
-        if all(s in line for s in searches):
-            return True
-    return False
-
-
-# Used to scope ebtables commands in testing
-NAMESPACE = None
-
-
-def ebtables(comm):
-    execute = ip_lib.IPWrapper(NAMESPACE).netns.execute
-    return execute(['ebtables'] + comm, run_as_root=True)
diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/common/__init__.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py
deleted file mode 100644 (file)
index b2d192e..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2012 Cisco Systems, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.agent.common import config
-
-DEFAULT_BRIDGE_MAPPINGS = []
-DEFAULT_INTERFACE_MAPPINGS = []
-DEFAULT_VXLAN_GROUP = '224.0.0.1'
-
-
-vxlan_opts = [
-    cfg.BoolOpt('enable_vxlan', default=True,
-                help=_("Enable VXLAN on the agent. Can be enabled when "
-                       "agent is managed by ml2 plugin using linuxbridge "
-                       "mechanism driver")),
-    cfg.IntOpt('ttl',
-               help=_("TTL for vxlan interface protocol packets.")),
-    cfg.IntOpt('tos',
-               help=_("TOS for vxlan interface protocol packets.")),
-    cfg.StrOpt('vxlan_group', default=DEFAULT_VXLAN_GROUP,
-               help=_("Multicast group(s) for vxlan interface. A range of "
-                      "group addresses may be specified by using CIDR "
-                      "notation. Specifying a range allows different VNIs to "
-                      "use different group addresses, reducing or eliminating "
-                      "spurious broadcast traffic to the tunnel endpoints. "
-                      "To reserve a unique group for each possible "
-                      "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This "
-                      "setting must be the same on all the agents.")),
-    cfg.IPOpt('local_ip', help=_("Local IP address of the VXLAN endpoints.")),
-    cfg.BoolOpt('l2_population', default=False,
-                help=_("Extension to use alongside ml2 plugin's l2population "
-                       "mechanism driver. It enables the plugin to populate "
-                       "VXLAN forwarding table.")),
-]
-
-bridge_opts = [
-    cfg.ListOpt('physical_interface_mappings',
-                default=DEFAULT_INTERFACE_MAPPINGS,
-                help=_("Comma-separated list of "
-                       "<physical_network>:<physical_interface> tuples "
-                       "mapping physical network names to the agent's "
-                       "node-specific physical network interfaces to be used "
-                       "for flat and VLAN networks. All physical networks "
-                       "listed in network_vlan_ranges on the server should "
-                       "have mappings to appropriate interfaces on each "
-                       "agent.")),
-    cfg.ListOpt('bridge_mappings',
-                default=DEFAULT_BRIDGE_MAPPINGS,
-                help=_("List of <physical_network>:<physical_bridge>")),
-]
-
-agent_opts = [
-    cfg.IntOpt('polling_interval', default=2,
-               help=_("The number of seconds the agent will wait between "
-                      "polling for local device changes.")),
-    cfg.IntOpt('quitting_rpc_timeout', default=10,
-               help=_("Set new timeout in seconds for new rpc calls after "
-                      "agent receives SIGTERM. If value is set to 0, rpc "
-                      "timeout won't be changed")),
-    # TODO(kevinbenton): The following opt is duplicated between the OVS agent
-    # and the Linuxbridge agent to make it easy to back-port. These shared opts
-    # should be moved into a common agent config options location as part of
-    # the deduplication work.
-    cfg.BoolOpt('prevent_arp_spoofing', default=True,
-                help=_("Enable suppression of ARP responses that don't match "
-                       "an IP address that belongs to the port from which "
-                       "they originate. Note: This prevents the VMs attached "
-                       "to this agent from spoofing, it doesn't protect them "
-                       "from other devices which have the capability to spoof "
-                       "(e.g. bare metal or VMs attached to agents without "
-                       "this flag set to True). Spoofing rules will not be "
-                       "added to any ports that have port security disabled. "
-                       "For LinuxBridge, this requires ebtables. For OVS, it "
-                       "requires a version that supports matching ARP "
-                       "headers."))
-]
-
-
-cfg.CONF.register_opts(vxlan_opts, "VXLAN")
-cfg.CONF.register_opts(bridge_opts, "LINUX_BRIDGE")
-cfg.CONF.register_opts(agent_opts, "AGENT")
-config.register_agent_state_opts_helper(cfg.CONF)
diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py
deleted file mode 100644 (file)
index 45c7918..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2012 Cisco Systems, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-FLAT_VLAN_ID = -1
-LOCAL_VLAN_ID = -2
-
-# Supported VXLAN features
-VXLAN_NONE = 'not_supported'
-VXLAN_MCAST = 'multicast_flooding'
-VXLAN_UCAST = 'unicast_flooding'
-
-EXTENSION_DRIVER_TYPE = 'linuxbridge'
diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py
deleted file mode 100644 (file)
index ee95c2e..0000000
+++ /dev/null
@@ -1,1213 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2012 Cisco Systems, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-#
-# Performs per host Linux Bridge configuration for Neutron.
-# Based on the structure of the OpenVSwitch agent in the
-# Neutron OpenVSwitch Plugin.
-
-import collections
-import sys
-import time
-
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_service import loopingcall
-from oslo_service import service
-from oslo_utils import excutils
-from six import moves
-
-from neutron._i18n import _LE, _LI, _LW
-from neutron.agent.l2.extensions import manager as ext_manager
-from neutron.agent.linux import bridge_lib
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.agent import rpc as agent_rpc
-from neutron.agent import securitygroups_rpc as sg_rpc
-from neutron.common import config as common_config
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.common import topics
-from neutron.common import utils as n_utils
-from neutron import context
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers.l2pop.rpc_manager \
-    import l2population_rpc as l2pop_rpc
-from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect
-from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config  # noqa
-from neutron.plugins.ml2.drivers.linuxbridge.agent.common \
-    import constants as lconst
-
-
-LOG = logging.getLogger(__name__)
-
-BRIDGE_NAME_PREFIX = "brq"
-VXLAN_INTERFACE_PREFIX = "vxlan-"
-
-
-class NetworkSegment(object):
-    def __init__(self, network_type, physical_network, segmentation_id):
-        self.network_type = network_type
-        self.physical_network = physical_network
-        self.segmentation_id = segmentation_id
-
-
-class LinuxBridgeManager(object):
-    def __init__(self, bridge_mappings, interface_mappings):
-        self.bridge_mappings = bridge_mappings
-        self.interface_mappings = interface_mappings
-        self.validate_interface_mappings()
-        self.validate_bridge_mappings()
-        self.ip = ip_lib.IPWrapper()
-        # VXLAN related parameters:
-        self.local_ip = cfg.CONF.VXLAN.local_ip
-        self.vxlan_mode = lconst.VXLAN_NONE
-        if cfg.CONF.VXLAN.enable_vxlan:
-            device = self.get_local_ip_device(self.local_ip)
-            self.validate_vxlan_group_with_local_ip()
-            self.local_int = device.name
-            self.check_vxlan_support()
-        # Store network mapping to segments
-        self.network_map = {}
-
-    def validate_interface_mappings(self):
-        for physnet, interface in self.interface_mappings.items():
-            if not ip_lib.device_exists(interface):
-                LOG.error(_LE("Interface %(intf)s for physical network %(net)s"
-                              " does not exist. Agent terminated!"),
-                          {'intf': interface, 'net': physnet})
-                sys.exit(1)
-
-    def validate_bridge_mappings(self):
-        for physnet, bridge in self.bridge_mappings.items():
-            if not ip_lib.device_exists(bridge):
-                LOG.error(_LE("Bridge %(brq)s for physical network %(net)s"
-                              " does not exist. Agent terminated!"),
-                          {'brq': bridge, 'net': physnet})
-                sys.exit(1)
-
-    def validate_vxlan_group_with_local_ip(self):
-        if not cfg.CONF.VXLAN.vxlan_group:
-            return
-        try:
-            ip_addr = netaddr.IPAddress(self.local_ip)
-            # Ensure the configured group address/range is valid and multicast
-            group_net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group)
-            if not group_net.is_multicast():
-                raise ValueError()
-            if not ip_addr.version == group_net.version:
-                raise ValueError()
-        except (netaddr.core.AddrFormatError, ValueError):
-            LOG.error(_LE("Invalid VXLAN Group: %(group)s, must be an address "
-                          "or network (in CIDR notation) in a multicast "
-                          "range of the same address family as local_ip: "
-                          "%(ip)s"),
-                      {'group': cfg.CONF.VXLAN.vxlan_group,
-                       'ip': self.local_ip})
-            sys.exit(1)
-
-    def get_local_ip_device(self, local_ip):
-        """Return the device with local_ip on the host."""
-        device = self.ip.get_device_by_ip(local_ip)
-        if not device:
-            LOG.error(_LE("Tunneling cannot be enabled without the local_ip "
-                          "bound to an interface on the host. Please "
-                          "configure local_ip %s on the host interface to "
-                          "be used for tunneling and restart the agent."),
-                      local_ip)
-            sys.exit(1)
-        return device
-
-    def get_existing_bridge_name(self, physical_network):
-        if not physical_network:
-            return None
-        return self.bridge_mappings.get(physical_network)
-
-    def get_bridge_name(self, network_id):
-        if not network_id:
-            LOG.warning(_LW("Invalid Network ID, will lead to incorrect "
-                            "bridge name"))
-        bridge_name = BRIDGE_NAME_PREFIX + network_id[0:11]
-        return bridge_name
-
-    def get_subinterface_name(self, physical_interface, vlan_id):
-        if not vlan_id:
-            LOG.warning(_LW("Invalid VLAN ID, will lead to incorrect "
-                            "subinterface name"))
-        subinterface_name = '%s.%s' % (physical_interface, vlan_id)
-        return subinterface_name
-
-    def get_tap_device_name(self, interface_id):
-        if not interface_id:
-            LOG.warning(_LW("Invalid Interface ID, will lead to incorrect "
-                            "tap device name"))
-        tap_device_name = constants.TAP_DEVICE_PREFIX + interface_id[0:11]
-        return tap_device_name
-
-    def get_vxlan_device_name(self, segmentation_id):
-        if 0 <= int(segmentation_id) <= p_const.MAX_VXLAN_VNI:
-            return VXLAN_INTERFACE_PREFIX + str(segmentation_id)
-        else:
-            LOG.warning(_LW("Invalid Segmentation ID: %s, will lead to "
-                            "incorrect vxlan device name"), segmentation_id)
-
-    def get_vxlan_group(self, segmentation_id):
-        net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group)
-        # Map the segmentation ID to (one of) the group address(es)
-        return str(net.network +
-                   (int(segmentation_id) & int(net.hostmask)))
-
-    def get_deletable_bridges(self):
-        bridge_list = bridge_lib.get_bridge_names()
-        bridges = {b for b in bridge_list if b.startswith(BRIDGE_NAME_PREFIX)}
-        bridges.difference_update(self.bridge_mappings.values())
-        return bridges
-
-    def get_tap_devices_count(self, bridge_name):
-        if_list = bridge_lib.BridgeDevice(bridge_name).get_interfaces()
-        return len([interface for interface in if_list if
-                    interface.startswith(constants.TAP_DEVICE_PREFIX)])
-
-    def ensure_vlan_bridge(self, network_id, phy_bridge_name,
-                           physical_interface, vlan_id):
-        """Create a vlan and bridge unless they already exist."""
-        interface = self.ensure_vlan(physical_interface, vlan_id)
-        if phy_bridge_name:
-            return self.ensure_bridge(phy_bridge_name)
-        else:
-            bridge_name = self.get_bridge_name(network_id)
-            ips, gateway = self.get_interface_details(interface)
-            if self.ensure_bridge(bridge_name, interface, ips, gateway):
-                return interface
-
-    def ensure_vxlan_bridge(self, network_id, segmentation_id):
-        """Create a vxlan and bridge unless they already exist."""
-        interface = self.ensure_vxlan(segmentation_id)
-        if not interface:
-            LOG.error(_LE("Failed creating vxlan interface for "
-                          "%(segmentation_id)s"),
-                      {segmentation_id: segmentation_id})
-            return
-        bridge_name = self.get_bridge_name(network_id)
-        self.ensure_bridge(bridge_name, interface)
-        return interface
-
-    def get_interface_details(self, interface):
-        device = self.ip.device(interface)
-        ips = device.addr.list(scope='global')
-
-        # Update default gateway if necessary
-        gateway = device.route.get_gateway(scope='global')
-        return ips, gateway
-
-    def ensure_flat_bridge(self, network_id, phy_bridge_name,
-                           physical_interface):
-        """Create a non-vlan bridge unless it already exists."""
-        if phy_bridge_name:
-            return self.ensure_bridge(phy_bridge_name)
-        else:
-            bridge_name = self.get_bridge_name(network_id)
-            ips, gateway = self.get_interface_details(physical_interface)
-            if self.ensure_bridge(bridge_name, physical_interface, ips,
-                                  gateway):
-                return physical_interface
-
-    def ensure_local_bridge(self, network_id, phy_bridge_name):
-        """Create a local bridge unless it already exists."""
-        if phy_bridge_name:
-            bridge_name = phy_bridge_name
-        else:
-            bridge_name = self.get_bridge_name(network_id)
-        return self.ensure_bridge(bridge_name)
-
-    def ensure_vlan(self, physical_interface, vlan_id):
-        """Create a vlan unless it already exists."""
-        interface = self.get_subinterface_name(physical_interface, vlan_id)
-        if not ip_lib.device_exists(interface):
-            LOG.debug("Creating subinterface %(interface)s for "
-                      "VLAN %(vlan_id)s on interface "
-                      "%(physical_interface)s",
-                      {'interface': interface, 'vlan_id': vlan_id,
-                       'physical_interface': physical_interface})
-            if utils.execute(['ip', 'link', 'add', 'link',
-                              physical_interface,
-                              'name', interface, 'type', 'vlan', 'id',
-                              vlan_id], run_as_root=True):
-                return
-            if utils.execute(['ip', 'link', 'set',
-                              interface, 'up'], run_as_root=True):
-                return
-            LOG.debug("Done creating subinterface %s", interface)
-        return interface
-
-    def ensure_vxlan(self, segmentation_id):
-        """Create a vxlan unless it already exists."""
-        interface = self.get_vxlan_device_name(segmentation_id)
-        if not ip_lib.device_exists(interface):
-            LOG.debug("Creating vxlan interface %(interface)s for "
-                      "VNI %(segmentation_id)s",
-                      {'interface': interface,
-                       'segmentation_id': segmentation_id})
-            args = {'dev': self.local_int}
-            if self.vxlan_mode == lconst.VXLAN_MCAST:
-                args['group'] = self.get_vxlan_group(segmentation_id)
-            if cfg.CONF.VXLAN.ttl:
-                args['ttl'] = cfg.CONF.VXLAN.ttl
-            if cfg.CONF.VXLAN.tos:
-                args['tos'] = cfg.CONF.VXLAN.tos
-            if cfg.CONF.VXLAN.l2_population:
-                args['proxy'] = True
-            try:
-                int_vxlan = self.ip.add_vxlan(interface, segmentation_id,
-                                              **args)
-            except RuntimeError:
-                with excutils.save_and_reraise_exception() as ctxt:
-                    # perform this check after an attempt rather than before
-                    # to avoid excessive lookups and a possible race condition.
-                    if ip_lib.vxlan_in_use(segmentation_id):
-                        ctxt.reraise = False
-                        LOG.error(_LE("Unable to create VXLAN interface for "
-                                      "VNI %s because it is in use by another "
-                                      "interface."), segmentation_id)
-                        return None
-            int_vxlan.link.set_up()
-            LOG.debug("Done creating vxlan interface %s", interface)
-        return interface
-
-    def update_interface_ip_details(self, destination, source, ips,
-                                    gateway):
-        if ips or gateway:
-            dst_device = self.ip.device(destination)
-            src_device = self.ip.device(source)
-
-        # Append IP's to bridge if necessary
-        if ips:
-            for ip in ips:
-                dst_device.addr.add(cidr=ip['cidr'])
-
-        if gateway:
-            # Ensure that the gateway can be updated by changing the metric
-            metric = 100
-            if 'metric' in gateway:
-                metric = gateway['metric'] - 1
-            dst_device.route.add_gateway(gateway=gateway['gateway'],
-                                         metric=metric)
-            src_device.route.delete_gateway(gateway=gateway['gateway'])
-
-        # Remove IP's from interface
-        if ips:
-            for ip in ips:
-                src_device.addr.delete(cidr=ip['cidr'])
-
-    def _bridge_exists_and_ensure_up(self, bridge_name):
-        """Check if the bridge exists and make sure it is up."""
-        br = ip_lib.IPDevice(bridge_name)
-        br.set_log_fail_as_error(False)
-        try:
-            # If the device doesn't exist this will throw a RuntimeError
-            br.link.set_up()
-        except RuntimeError:
-            return False
-        return True
-
-    def ensure_bridge(self, bridge_name, interface=None, ips=None,
-                      gateway=None):
-        """Create a bridge unless it already exists."""
-        # _bridge_exists_and_ensure_up instead of device_exists is used here
-        # because there are cases where the bridge exists but it's not UP,
-        # for example:
-        # 1) A greenthread was executing this function and had not yet executed
-        # "ip link set bridge_name up" before eventlet switched to this
-        # thread running the same function
-        # 2) The Nova VIF driver was running concurrently and had just created
-        #    the bridge, but had not yet put it UP
-        if not self._bridge_exists_and_ensure_up(bridge_name):
-            LOG.debug("Starting bridge %(bridge_name)s for subinterface "
-                      "%(interface)s",
-                      {'bridge_name': bridge_name, 'interface': interface})
-            bridge_device = bridge_lib.BridgeDevice.addbr(bridge_name)
-            if bridge_device.setfd(0):
-                return
-            if bridge_device.disable_stp():
-                return
-            if bridge_device.disable_ipv6():
-                return
-            if bridge_device.link.set_up():
-                return
-            LOG.debug("Done starting bridge %(bridge_name)s for "
-                      "subinterface %(interface)s",
-                      {'bridge_name': bridge_name, 'interface': interface})
-        else:
-            bridge_device = bridge_lib.BridgeDevice(bridge_name)
-
-        if not interface:
-            return bridge_name
-
-        # Update IP info if necessary
-        self.update_interface_ip_details(bridge_name, interface, ips, gateway)
-
-        # Check if the interface is part of the bridge
-        if not bridge_device.owns_interface(interface):
-            try:
-                # Check if the interface is not enslaved in another bridge
-                bridge = bridge_lib.BridgeDevice.get_interface_bridge(
-                    interface)
-                if bridge:
-                    bridge.delif(interface)
-
-                bridge_device.addif(interface)
-            except Exception as e:
-                LOG.error(_LE("Unable to add %(interface)s to %(bridge_name)s"
-                              "! Exception: %(e)s"),
-                          {'interface': interface, 'bridge_name': bridge_name,
-                           'e': e})
-                return
-        return bridge_name
-
-    def ensure_physical_in_bridge(self, network_id,
-                                  network_type,
-                                  physical_network,
-                                  segmentation_id):
-        if network_type == p_const.TYPE_VXLAN:
-            if self.vxlan_mode == lconst.VXLAN_NONE:
-                LOG.error(_LE("Unable to add vxlan interface for network %s"),
-                          network_id)
-                return
-            return self.ensure_vxlan_bridge(network_id, segmentation_id)
-
-        # NOTE(nick-ma-z): Obtain mappings of physical bridge and interfaces
-        physical_bridge = self.get_existing_bridge_name(physical_network)
-        physical_interface = self.interface_mappings.get(physical_network)
-        if not physical_bridge and not physical_interface:
-            LOG.error(_LE("No bridge or interface mappings"
-                          " for physical network %s"),
-                      physical_network)
-            return
-        if network_type == p_const.TYPE_FLAT:
-            return self.ensure_flat_bridge(network_id, physical_bridge,
-                                           physical_interface)
-        elif network_type == p_const.TYPE_VLAN:
-            return self.ensure_vlan_bridge(network_id, physical_bridge,
-                                           physical_interface,
-                                           segmentation_id)
-        else:
-            LOG.error(_LE("Unknown network_type %(network_type)s for network "
-                          "%(network_id)s."), {network_type: network_type,
-                                             network_id: network_id})
-
-    def add_tap_interface(self, network_id, network_type, physical_network,
-                          segmentation_id, tap_device_name, device_owner):
-        """Add tap interface.
-
-        If a VIF has been plugged into a network, this function will
-        add the corresponding tap device to the relevant bridge.
-        """
-        if not ip_lib.device_exists(tap_device_name):
-            LOG.debug("Tap device: %s does not exist on "
-                      "this host, skipped", tap_device_name)
-            return False
-
-        bridge_name = self.get_existing_bridge_name(physical_network)
-        if not bridge_name:
-            bridge_name = self.get_bridge_name(network_id)
-
-        if network_type == p_const.TYPE_LOCAL:
-            self.ensure_local_bridge(network_id, bridge_name)
-        else:
-            phy_dev_name = self.ensure_physical_in_bridge(network_id,
-                                                          network_type,
-                                                          physical_network,
-                                                          segmentation_id)
-            if not phy_dev_name:
-                return False
-            self.ensure_tap_mtu(tap_device_name, phy_dev_name)
-        # Avoid messing with plugging devices into a bridge that the agent
-        # does not own
-        if device_owner.startswith(constants.DEVICE_OWNER_PREFIXES):
-            # Check if device needs to be added to bridge
-            if not bridge_lib.BridgeDevice.get_interface_bridge(
-                tap_device_name):
-                data = {'tap_device_name': tap_device_name,
-                        'bridge_name': bridge_name}
-                LOG.debug("Adding device %(tap_device_name)s to bridge "
-                          "%(bridge_name)s", data)
-                if bridge_lib.BridgeDevice(bridge_name).addif(tap_device_name):
-                    return False
-        else:
-            data = {'tap_device_name': tap_device_name,
-                    'device_owner': device_owner,
-                    'bridge_name': bridge_name}
-            LOG.debug("Skip adding device %(tap_device_name)s to "
-                      "%(bridge_name)s. It is owned by %(device_owner)s and "
-                      "thus added elsewhere.", data)
-        return True
-
-    def ensure_tap_mtu(self, tap_dev_name, phy_dev_name):
-        """Ensure the MTU on the tap is the same as the physical device."""
-        phy_dev_mtu = ip_lib.IPDevice(phy_dev_name).link.mtu
-        ip_lib.IPDevice(tap_dev_name).link.set_mtu(phy_dev_mtu)
-
-    def add_interface(self, network_id, network_type, physical_network,
-                      segmentation_id, port_id, device_owner):
-        self.network_map[network_id] = NetworkSegment(network_type,
-                                                      physical_network,
-                                                      segmentation_id)
-        tap_device_name = self.get_tap_device_name(port_id)
-        return self.add_tap_interface(network_id, network_type,
-                                      physical_network, segmentation_id,
-                                      tap_device_name, device_owner)
-
-    def delete_bridge(self, bridge_name):
-        bridge_device = bridge_lib.BridgeDevice(bridge_name)
-        if bridge_device.exists():
-            physical_interfaces = set(self.interface_mappings.values())
-            interfaces_on_bridge = bridge_device.get_interfaces()
-            for interface in interfaces_on_bridge:
-                self.remove_interface(bridge_name, interface)
-
-                if interface.startswith(VXLAN_INTERFACE_PREFIX):
-                    self.delete_interface(interface)
-                else:
-                    # Match the vlan/flat interface in the bridge.
-                    # If the bridge has an IP, it mean that this IP was moved
-                    # from the current interface, which also mean that this
-                    # interface was not created by the agent.
-                    ips, gateway = self.get_interface_details(bridge_name)
-                    if ips:
-                        self.update_interface_ip_details(interface,
-                                                         bridge_name,
-                                                         ips, gateway)
-                    elif interface not in physical_interfaces:
-                        self.delete_interface(interface)
-
-            LOG.debug("Deleting bridge %s", bridge_name)
-            if bridge_device.link.set_down():
-                return
-            if bridge_device.delbr():
-                return
-            LOG.debug("Done deleting bridge %s", bridge_name)
-
-        else:
-            LOG.debug("Cannot delete bridge %s; it does not exist",
-                      bridge_name)
-
-    def remove_interface(self, bridge_name, interface_name):
-        bridge_device = bridge_lib.BridgeDevice(bridge_name)
-        if bridge_device.exists():
-            if not bridge_lib.is_bridged_interface(interface_name):
-                return True
-            LOG.debug("Removing device %(interface_name)s from bridge "
-                      "%(bridge_name)s",
-                      {'interface_name': interface_name,
-                       'bridge_name': bridge_name})
-            if bridge_device.delif(interface_name):
-                return False
-            LOG.debug("Done removing device %(interface_name)s from bridge "
-                      "%(bridge_name)s",
-                      {'interface_name': interface_name,
-                       'bridge_name': bridge_name})
-            return True
-        else:
-            LOG.debug("Cannot remove device %(interface_name)s bridge "
-                      "%(bridge_name)s does not exist",
-                      {'interface_name': interface_name,
-                       'bridge_name': bridge_name})
-            return False
-
-    def delete_interface(self, interface):
-        device = self.ip.device(interface)
-        if device.exists():
-            LOG.debug("Deleting interface %s",
-                      interface)
-            device.link.set_down()
-            device.link.delete()
-            LOG.debug("Done deleting interface %s", interface)
-
-    def get_tap_devices(self):
-        devices = set()
-        for device in bridge_lib.get_bridge_names():
-            if device.startswith(constants.TAP_DEVICE_PREFIX):
-                devices.add(device)
-        return devices
-
-    def vxlan_ucast_supported(self):
-        if not cfg.CONF.VXLAN.l2_population:
-            return False
-        if not ip_lib.iproute_arg_supported(
-                ['bridge', 'fdb'], 'append'):
-            LOG.warning(_LW('Option "%(option)s" must be supported by command '
-                            '"%(command)s" to enable %(mode)s mode'),
-                        {'option': 'append',
-                         'command': 'bridge fdb',
-                         'mode': 'VXLAN UCAST'})
-            return False
-
-        test_iface = None
-        for seg_id in moves.range(1, p_const.MAX_VXLAN_VNI + 1):
-            if (ip_lib.device_exists(self.get_vxlan_device_name(seg_id))
-                    or ip_lib.vxlan_in_use(seg_id)):
-                continue
-            test_iface = self.ensure_vxlan(seg_id)
-            break
-        else:
-            LOG.error(_LE('No valid Segmentation ID to perform UCAST test.'))
-            return False
-
-        try:
-            utils.execute(
-                cmd=['bridge', 'fdb', 'append', constants.FLOODING_ENTRY[0],
-                     'dev', test_iface, 'dst', '1.1.1.1'],
-                run_as_root=True, log_fail_as_error=False)
-            return True
-        except RuntimeError:
-            return False
-        finally:
-            self.delete_interface(test_iface)
-
-    def vxlan_mcast_supported(self):
-        if not cfg.CONF.VXLAN.vxlan_group:
-            LOG.warning(_LW('VXLAN muticast group(s) must be provided in '
-                            'vxlan_group option to enable VXLAN MCAST mode'))
-            return False
-        if not ip_lib.iproute_arg_supported(
-                ['ip', 'link', 'add', 'type', 'vxlan'],
-                'proxy'):
-            LOG.warning(_LW('Option "%(option)s" must be supported by command '
-                            '"%(command)s" to enable %(mode)s mode'),
-                        {'option': 'proxy',
-                         'command': 'ip link add type vxlan',
-                         'mode': 'VXLAN MCAST'})
-
-            return False
-        return True
-
-    def check_vxlan_support(self):
-        self.vxlan_mode = lconst.VXLAN_NONE
-
-        if self.vxlan_ucast_supported():
-            self.vxlan_mode = lconst.VXLAN_UCAST
-        elif self.vxlan_mcast_supported():
-            self.vxlan_mode = lconst.VXLAN_MCAST
-        else:
-            raise exceptions.VxlanNetworkUnsupported()
-        LOG.debug('Using %s VXLAN mode', self.vxlan_mode)
-
-    def fdb_ip_entry_exists(self, mac, ip, interface):
-        entries = utils.execute(['ip', 'neigh', 'show', 'to', ip,
-                                 'dev', interface],
-                                run_as_root=True)
-        return mac in entries
-
-    def fdb_bridge_entry_exists(self, mac, interface, agent_ip=None):
-        entries = utils.execute(['bridge', 'fdb', 'show', 'dev', interface],
-                                run_as_root=True)
-        if not agent_ip:
-            return mac in entries
-
-        return (agent_ip in entries and mac in entries)
-
-    def add_fdb_ip_entry(self, mac, ip, interface):
-        ip_lib.IPDevice(interface).neigh.add(ip, mac)
-
-    def remove_fdb_ip_entry(self, mac, ip, interface):
-        ip_lib.IPDevice(interface).neigh.delete(ip, mac)
-
-    def add_fdb_bridge_entry(self, mac, agent_ip, interface, operation="add"):
-        utils.execute(['bridge', 'fdb', operation, mac, 'dev', interface,
-                       'dst', agent_ip],
-                      run_as_root=True,
-                      check_exit_code=False)
-
-    def remove_fdb_bridge_entry(self, mac, agent_ip, interface):
-        utils.execute(['bridge', 'fdb', 'del', mac, 'dev', interface,
-                       'dst', agent_ip],
-                      run_as_root=True,
-                      check_exit_code=False)
-
-    def add_fdb_entries(self, agent_ip, ports, interface):
-        for mac, ip in ports:
-            if mac != constants.FLOODING_ENTRY[0]:
-                self.add_fdb_ip_entry(mac, ip, interface)
-                self.add_fdb_bridge_entry(mac, agent_ip, interface,
-                                          operation="replace")
-            elif self.vxlan_mode == lconst.VXLAN_UCAST:
-                if self.fdb_bridge_entry_exists(mac, interface):
-                    self.add_fdb_bridge_entry(mac, agent_ip, interface,
-                                              "append")
-                else:
-                    self.add_fdb_bridge_entry(mac, agent_ip, interface)
-
-    def remove_fdb_entries(self, agent_ip, ports, interface):
-        for mac, ip in ports:
-            if mac != constants.FLOODING_ENTRY[0]:
-                self.remove_fdb_ip_entry(mac, ip, interface)
-                self.remove_fdb_bridge_entry(mac, agent_ip, interface)
-            elif self.vxlan_mode == lconst.VXLAN_UCAST:
-                self.remove_fdb_bridge_entry(mac, agent_ip, interface)
-
-
-class LinuxBridgeRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
-                              l2pop_rpc.L2populationRpcCallBackMixin):
-
-    # Set RPC API version to 1.0 by default.
-    # history
-    #   1.1 Support Security Group RPC
-    #   1.3 Added param devices_to_update to security_groups_provider_updated
-    #   1.4 Added support for network_update
-    target = oslo_messaging.Target(version='1.4')
-
-    def __init__(self, context, agent, sg_agent):
-        super(LinuxBridgeRpcCallbacks, self).__init__()
-        self.context = context
-        self.agent = agent
-        self.sg_agent = sg_agent
-
-    def network_delete(self, context, **kwargs):
-        LOG.debug("network_delete received")
-        network_id = kwargs.get('network_id')
-
-        # NOTE(nick-ma-z): Don't remove pre-existing user-defined bridges
-        if network_id in self.agent.br_mgr.network_map:
-            phynet = self.agent.br_mgr.network_map[network_id].physical_network
-            if phynet and phynet in self.agent.br_mgr.bridge_mappings:
-                LOG.info(_LI("Physical network %s is defined in "
-                             "bridge_mappings and cannot be deleted."),
-                         network_id)
-                return
-        else:
-            LOG.error(_LE("Network %s is not available."), network_id)
-            return
-
-        bridge_name = self.agent.br_mgr.get_bridge_name(network_id)
-        LOG.debug("Delete %s", bridge_name)
-        self.agent.br_mgr.delete_bridge(bridge_name)
-
-    def port_update(self, context, **kwargs):
-        port_id = kwargs['port']['id']
-        tap_name = self.agent.br_mgr.get_tap_device_name(port_id)
-        # Put the tap name in the updated_devices set.
-        # Do not store port details, as if they're used for processing
-        # notifications there is no guarantee the notifications are
-        # processed in the same order as the relevant API requests.
-        self.agent.updated_devices.add(tap_name)
-        LOG.debug("port_update RPC received for port: %s", port_id)
-
-    def network_update(self, context, **kwargs):
-        network_id = kwargs['network']['id']
-        LOG.debug("network_update message processed for network "
-                  "%(network_id)s, with ports: %(ports)s",
-                  {'network_id': network_id,
-                   'ports': self.agent.network_ports[network_id]})
-        for port_data in self.agent.network_ports[network_id]:
-            self.agent.updated_devices.add(port_data['device'])
-
-    def fdb_add(self, context, fdb_entries):
-        LOG.debug("fdb_add received")
-        for network_id, values in fdb_entries.items():
-            segment = self.agent.br_mgr.network_map.get(network_id)
-            if not segment:
-                return
-
-            if segment.network_type != p_const.TYPE_VXLAN:
-                return
-
-            interface = self.agent.br_mgr.get_vxlan_device_name(
-                segment.segmentation_id)
-
-            agent_ports = values.get('ports')
-            for agent_ip, ports in agent_ports.items():
-                if agent_ip == self.agent.br_mgr.local_ip:
-                    continue
-
-                self.agent.br_mgr.add_fdb_entries(agent_ip,
-                                                  ports,
-                                                  interface)
-
-    def fdb_remove(self, context, fdb_entries):
-        LOG.debug("fdb_remove received")
-        for network_id, values in fdb_entries.items():
-            segment = self.agent.br_mgr.network_map.get(network_id)
-            if not segment:
-                return
-
-            if segment.network_type != p_const.TYPE_VXLAN:
-                return
-
-            interface = self.agent.br_mgr.get_vxlan_device_name(
-                segment.segmentation_id)
-
-            agent_ports = values.get('ports')
-            for agent_ip, ports in agent_ports.items():
-                if agent_ip == self.agent.br_mgr.local_ip:
-                    continue
-
-                self.agent.br_mgr.remove_fdb_entries(agent_ip,
-                                                     ports,
-                                                     interface)
-
-    def _fdb_chg_ip(self, context, fdb_entries):
-        LOG.debug("update chg_ip received")
-        for network_id, agent_ports in fdb_entries.items():
-            segment = self.agent.br_mgr.network_map.get(network_id)
-            if not segment:
-                return
-
-            if segment.network_type != p_const.TYPE_VXLAN:
-                return
-
-            interface = self.agent.br_mgr.get_vxlan_device_name(
-                segment.segmentation_id)
-
-            for agent_ip, state in agent_ports.items():
-                if agent_ip == self.agent.br_mgr.local_ip:
-                    continue
-
-                after = state.get('after', [])
-                for mac, ip in after:
-                    self.agent.br_mgr.add_fdb_ip_entry(mac, ip, interface)
-
-                before = state.get('before', [])
-                for mac, ip in before:
-                    self.agent.br_mgr.remove_fdb_ip_entry(mac, ip, interface)
-
-    def fdb_update(self, context, fdb_entries):
-        LOG.debug("fdb_update received")
-        for action, values in fdb_entries.items():
-            method = '_fdb_' + action
-            if not hasattr(self, method):
-                raise NotImplementedError()
-
-            getattr(self, method)(context, values)
-
-
-class LinuxBridgeNeutronAgentRPC(service.Service):
-
-    def __init__(self, bridge_mappings, interface_mappings, polling_interval,
-                 quitting_rpc_timeout):
-        """Constructor.
-
-        :param bridge_mappings: dict mapping physical_networks to
-               physical_bridges.
-        :param interface_mappings: dict mapping physical_networks to
-               physical_interfaces.
-        :param polling_interval: interval (secs) to poll DB.
-        :param quitting_rpc_timeout: timeout in seconds for rpc calls after
-               stop is called.
-        """
-        super(LinuxBridgeNeutronAgentRPC, self).__init__()
-        self.interface_mappings = interface_mappings
-        self.bridge_mappings = bridge_mappings
-        self.polling_interval = polling_interval
-        self.quitting_rpc_timeout = quitting_rpc_timeout
-
-    def start(self):
-        self.prevent_arp_spoofing = cfg.CONF.AGENT.prevent_arp_spoofing
-        self.setup_linux_bridge(self.bridge_mappings, self.interface_mappings)
-
-        # stores received port_updates and port_deletes for
-        # processing by the main loop
-        self.updated_devices = set()
-
-        # stores all configured ports on agent
-        self.network_ports = collections.defaultdict(list)
-        # flag to do a sync after revival
-        self.fullsync = False
-        self.context = context.get_admin_context_without_session()
-        self.setup_rpc(self.interface_mappings.values())
-        self.init_extension_manager(self.connection)
-
-        configurations = {
-            'bridge_mappings': self.bridge_mappings,
-            'interface_mappings': self.interface_mappings,
-            'extensions': self.ext_manager.names()
-        }
-        if self.br_mgr.vxlan_mode != lconst.VXLAN_NONE:
-            configurations['tunneling_ip'] = self.br_mgr.local_ip
-            configurations['tunnel_types'] = [p_const.TYPE_VXLAN]
-            configurations['l2_population'] = cfg.CONF.VXLAN.l2_population
-        self.agent_state = {
-            'binary': 'neutron-linuxbridge-agent',
-            'host': cfg.CONF.host,
-            'topic': constants.L2_AGENT_TOPIC,
-            'configurations': configurations,
-            'agent_type': constants.AGENT_TYPE_LINUXBRIDGE,
-            'start_flag': True}
-
-        report_interval = cfg.CONF.AGENT.report_interval
-        if report_interval:
-            heartbeat = loopingcall.FixedIntervalLoopingCall(
-                self._report_state)
-            heartbeat.start(interval=report_interval)
-        self.daemon_loop()
-
-    def stop(self, graceful=True):
-        LOG.info(_LI("Stopping linuxbridge agent."))
-        if graceful and self.quitting_rpc_timeout:
-            self.set_rpc_timeout(self.quitting_rpc_timeout)
-        super(LinuxBridgeNeutronAgentRPC, self).stop(graceful)
-
-    def reset(self):
-        common_config.setup_logging()
-
-    def _report_state(self):
-        try:
-            devices = len(self.br_mgr.get_tap_devices())
-            self.agent_state.get('configurations')['devices'] = devices
-            agent_status = self.state_rpc.report_state(self.context,
-                                                       self.agent_state,
-                                                       True)
-            if agent_status == constants.AGENT_REVIVED:
-                LOG.info(_LI('Agent has just been revived. '
-                             'Doing a full sync.'))
-                self.fullsync = True
-            self.agent_state.pop('start_flag', None)
-        except Exception:
-            LOG.exception(_LE("Failed reporting state!"))
-
-    def setup_rpc(self, physical_interfaces):
-        if physical_interfaces:
-            mac = utils.get_interface_mac(physical_interfaces[0])
-        else:
-            devices = ip_lib.IPWrapper().get_devices(True)
-            if devices:
-                mac = utils.get_interface_mac(devices[0].name)
-            else:
-                LOG.error(_LE("Unable to obtain MAC address for unique ID. "
-                              "Agent terminated!"))
-                exit(1)
-
-        self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
-        self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
-        self.sg_agent = sg_rpc.SecurityGroupAgentRpc(
-            self.context, self.sg_plugin_rpc, defer_refresh_firewall=True)
-
-        self.agent_id = '%s%s' % ('lb', (mac.replace(":", "")))
-        LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
-
-        self.topic = topics.AGENT
-        self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
-        # RPC network init
-        # Handle updates from service
-        self.endpoints = [LinuxBridgeRpcCallbacks(self.context, self,
-                                                  self.sg_agent)]
-        # Define the listening consumers for the agent
-        consumers = [[topics.PORT, topics.UPDATE],
-                     [topics.NETWORK, topics.DELETE],
-                     [topics.NETWORK, topics.UPDATE],
-                     [topics.SECURITY_GROUP, topics.UPDATE]]
-
-        if cfg.CONF.VXLAN.l2_population:
-            consumers.append([topics.L2POPULATION, topics.UPDATE])
-        self.connection = agent_rpc.create_consumers(self.endpoints,
-                                                     self.topic,
-                                                     consumers)
-
-    def init_extension_manager(self, connection):
-        ext_manager.register_opts(cfg.CONF)
-        self.ext_manager = (
-            ext_manager.AgentExtensionsManager(cfg.CONF))
-        self.ext_manager.initialize(
-            connection, lconst.EXTENSION_DRIVER_TYPE)
-
-    def setup_linux_bridge(self, bridge_mappings, interface_mappings):
-        self.br_mgr = LinuxBridgeManager(bridge_mappings, interface_mappings)
-
-    def _ensure_port_admin_state(self, port_id, admin_state_up):
-        LOG.debug("Setting admin_state_up to %s for port %s",
-                  admin_state_up, port_id)
-        tap_name = self.br_mgr.get_tap_device_name(port_id)
-        if admin_state_up:
-            ip_lib.IPDevice(tap_name).link.set_up()
-        else:
-            ip_lib.IPDevice(tap_name).link.set_down()
-
-    def _clean_network_ports(self, device):
-        for netid, ports_list in self.network_ports.items():
-            for port_data in ports_list:
-                if device == port_data['device']:
-                    ports_list.remove(port_data)
-                    if ports_list == []:
-                        self.network_ports.pop(netid)
-                    return port_data['port_id']
-
-    def _update_network_ports(self, network_id, port_id, device):
-        self._clean_network_ports(device)
-        self.network_ports[network_id].append({
-            "port_id": port_id,
-            "device": device
-        })
-
-    def process_network_devices(self, device_info):
-        resync_a = False
-        resync_b = False
-
-        self.sg_agent.setup_port_filters(device_info.get('added'),
-                                         device_info.get('updated'))
-        # Updated devices are processed the same as new ones, as their
-        # admin_state_up may have changed. The set union prevents duplicating
-        # work when a device is new and updated in the same polling iteration.
-        devices_added_updated = (set(device_info.get('added'))
-                                 | set(device_info.get('updated')))
-        if devices_added_updated:
-            resync_a = self.treat_devices_added_updated(devices_added_updated)
-
-        if device_info.get('removed'):
-            resync_b = self.treat_devices_removed(device_info['removed'])
-        # If one of the above operations fails => resync with plugin
-        return (resync_a | resync_b)
-
-    def treat_devices_added_updated(self, devices):
-        try:
-            devices_details_list = self.plugin_rpc.get_devices_details_list(
-                self.context, devices, self.agent_id)
-        except Exception:
-            LOG.exception(_LE("Unable to get port details for %s"), devices)
-            # resync is needed
-            return True
-
-        for device_details in devices_details_list:
-            device = device_details['device']
-            LOG.debug("Port %s added", device)
-
-            if 'port_id' in device_details:
-                LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
-                         {'device': device, 'details': device_details})
-                if self.prevent_arp_spoofing:
-                    port = self.br_mgr.get_tap_device_name(
-                        device_details['port_id'])
-                    arp_protect.setup_arp_spoofing_protection(port,
-                                                              device_details)
-                # create the networking for the port
-                network_type = device_details.get('network_type')
-                segmentation_id = device_details.get('segmentation_id')
-                tap_in_bridge = self.br_mgr.add_interface(
-                    device_details['network_id'], network_type,
-                    device_details['physical_network'], segmentation_id,
-                    device_details['port_id'], device_details['device_owner'])
-                # REVISIT(scheuran): Changed the way how ports admin_state_up
-                # is implemented.
-                #
-                # Old lb implementation:
-                # - admin_state_up: ensure that tap is plugged into bridge
-                # - admin_state_down: remove tap from bridge
-                # New lb implementation:
-                # - admin_state_up: set tap device state to up
-                # - admin_state_down: set tap device stae to down
-                #
-                # However both approaches could result in races with
-                # nova/libvirt and therefore to an invalid system state in the
-                # scenario, where an instance is booted with a port configured
-                # with admin_state_up = False:
-                #
-                # Libvirt does the following actions in exactly
-                # this order (see libvirt virnetdevtap.c)
-                #     1) Create the tap device, set its MAC and MTU
-                #     2) Plug the tap into the bridge
-                #     3) Set the tap online
-                #
-                # Old lb implementation:
-                #   A race could occur, if the lb agent removes the tap device
-                #   right after step 1). Then libvirt will add it to the bridge
-                #   again in step 2).
-                # New lb implementation:
-                #   The race could occur if the lb-agent sets the taps device
-                #   state to down right after step 2). In step 3) libvirt
-                #   might set it to up again.
-                #
-                # This is not an issue if an instance is booted with a port
-                # configured with admin_state_up = True. Libvirt would just
-                # set the tap device up again.
-                #
-                # This refactoring is recommended for the following reasons:
-                # 1) An existing race with libvirt caused by the behavior of
-                #    the old implementation. See Bug #1312016
-                # 2) The new code is much more readable
-                self._ensure_port_admin_state(device_details['port_id'],
-                                              device_details['admin_state_up'])
-                # update plugin about port status if admin_state is up
-                if device_details['admin_state_up']:
-                    if tap_in_bridge:
-                        self.plugin_rpc.update_device_up(self.context,
-                                                         device,
-                                                         self.agent_id,
-                                                         cfg.CONF.host)
-                    else:
-                        self.plugin_rpc.update_device_down(self.context,
-                                                           device,
-                                                           self.agent_id,
-                                                           cfg.CONF.host)
-                self._update_network_ports(device_details['network_id'],
-                                           device_details['port_id'],
-                                           device_details['device'])
-                self.ext_manager.handle_port(self.context, device_details)
-            else:
-                LOG.info(_LI("Device %s not defined on plugin"), device)
-        return False
-
-    def treat_devices_removed(self, devices):
-        resync = False
-        self.sg_agent.remove_devices_filter(devices)
-        for device in devices:
-            LOG.info(_LI("Attachment %s removed"), device)
-            details = None
-            try:
-                details = self.plugin_rpc.update_device_down(self.context,
-                                                             device,
-                                                             self.agent_id,
-                                                             cfg.CONF.host)
-            except Exception:
-                LOG.exception(_LE("Error occurred while removing port %s"),
-                              device)
-                resync = True
-            if details and details['exists']:
-                LOG.info(_LI("Port %s updated."), device)
-            else:
-                LOG.debug("Device %s not defined on plugin", device)
-            port_id = self._clean_network_ports(device)
-            self.ext_manager.delete_port(self.context,
-                                         {'device': device,
-                                          'port_id': port_id})
-        if self.prevent_arp_spoofing:
-            arp_protect.delete_arp_spoofing_protection(devices)
-        return resync
-
-    def scan_devices(self, previous, sync):
-        device_info = {}
-
-        # Save and reinitialize the set variable that the port_update RPC uses.
-        # This should be thread-safe as the greenthread should not yield
-        # between these two statements.
-        updated_devices = self.updated_devices
-        self.updated_devices = set()
-
-        current_devices = self.br_mgr.get_tap_devices()
-        device_info['current'] = current_devices
-
-        if previous is None:
-            # This is the first iteration of daemon_loop().
-            previous = {'added': set(),
-                        'current': set(),
-                        'updated': set(),
-                        'removed': set()}
-            # clear any orphaned ARP spoofing rules (e.g. interface was
-            # manually deleted)
-            if self.prevent_arp_spoofing:
-                arp_protect.delete_unreferenced_arp_protection(current_devices)
-
-        if sync:
-            # This is the first iteration, or the previous one had a problem.
-            # Re-add all existing devices.
-            device_info['added'] = current_devices
-
-            # Retry cleaning devices that may not have been cleaned properly.
-            # And clean any that disappeared since the previous iteration.
-            device_info['removed'] = (previous['removed'] | previous['current']
-                                      - current_devices)
-
-            # Retry updating devices that may not have been updated properly.
-            # And any that were updated since the previous iteration.
-            # Only update devices that currently exist.
-            device_info['updated'] = (previous['updated'] | updated_devices
-                                      & current_devices)
-        else:
-            device_info['added'] = current_devices - previous['current']
-            device_info['removed'] = previous['current'] - current_devices
-            device_info['updated'] = updated_devices & current_devices
-
-        return device_info
-
-    def _device_info_has_changes(self, device_info):
-        return (device_info.get('added')
-                or device_info.get('updated')
-                or device_info.get('removed'))
-
-    def daemon_loop(self):
-        LOG.info(_LI("LinuxBridge Agent RPC Daemon Started!"))
-        device_info = None
-        sync = True
-
-        while True:
-            start = time.time()
-
-            if self.fullsync:
-                sync = True
-                self.fullsync = False
-
-            if sync:
-                LOG.info(_LI("Agent out of sync with plugin!"))
-
-            device_info = self.scan_devices(previous=device_info, sync=sync)
-            sync = False
-
-            if (self._device_info_has_changes(device_info)
-                or self.sg_agent.firewall_refresh_needed()):
-                LOG.debug("Agent loop found changes! %s", device_info)
-                try:
-                    sync = self.process_network_devices(device_info)
-                except Exception:
-                    LOG.exception(_LE("Error in agent loop. Devices info: %s"),
-                                  device_info)
-                    sync = True
-
-            # sleep till end of polling interval
-            elapsed = (time.time() - start)
-            if (elapsed < self.polling_interval):
-                time.sleep(self.polling_interval - elapsed)
-            else:
-                LOG.debug("Loop iteration exceeded interval "
-                          "(%(polling_interval)s vs. %(elapsed)s)!",
-                          {'polling_interval': self.polling_interval,
-                           'elapsed': elapsed})
-
-    def set_rpc_timeout(self, timeout):
-        for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc,
-                        self.state_rpc):
-            rpc_api.client.timeout = timeout
-
-
-def main():
-    common_config.init(sys.argv[1:])
-
-    common_config.setup_logging()
-    try:
-        interface_mappings = n_utils.parse_mappings(
-            cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
-    except ValueError as e:
-        LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
-                      "Agent terminated!"), e)
-        sys.exit(1)
-    LOG.info(_LI("Interface mappings: %s"), interface_mappings)
-
-    try:
-        bridge_mappings = n_utils.parse_mappings(
-            cfg.CONF.LINUX_BRIDGE.bridge_mappings)
-    except ValueError as e:
-        LOG.error(_LE("Parsing bridge_mappings failed: %s. "
-                      "Agent terminated!"), e)
-        sys.exit(1)
-    LOG.info(_LI("Bridge mappings: %s"), bridge_mappings)
-
-    polling_interval = cfg.CONF.AGENT.polling_interval
-    quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
-    agent = LinuxBridgeNeutronAgentRPC(bridge_mappings,
-                                       interface_mappings,
-                                       polling_interval,
-                                       quitting_rpc_timeout)
-    LOG.info(_LI("Agent initialized successfully, now running... "))
-    launcher = service.launch(cfg.CONF, agent)
-    launcher.wait()
diff --git a/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py b/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py b/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py
deleted file mode 100644 (file)
index 44c842c..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent import securitygroups_rpc
-from neutron.common import constants
-from neutron.extensions import portbindings
-from neutron.plugins.common import constants as p_constants
-from neutron.plugins.ml2.drivers import mech_agent
-
-
-class LinuxbridgeMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
-    """Attach to networks using linuxbridge L2 agent.
-
-    The LinuxbridgeMechanismDriver integrates the ml2 plugin with the
-    linuxbridge L2 agent. Port binding with this driver requires the
-    linuxbridge agent to be running on the port's host, and that agent
-    to have connectivity to at least one segment of the port's
-    network.
-    """
-
-    def __init__(self):
-        sg_enabled = securitygroups_rpc.is_firewall_enabled()
-        super(LinuxbridgeMechanismDriver, self).__init__(
-            constants.AGENT_TYPE_LINUXBRIDGE,
-            portbindings.VIF_TYPE_BRIDGE,
-            {portbindings.CAP_PORT_FILTER: sg_enabled})
-
-    def get_allowed_network_types(self, agent):
-        return (agent['configurations'].get('tunnel_types', []) +
-                [p_constants.TYPE_LOCAL, p_constants.TYPE_FLAT,
-                 p_constants.TYPE_VLAN])
-
-    def get_mappings(self, agent):
-        mappings = dict(agent['configurations'].get('interface_mappings', {}),
-                        **agent['configurations'].get('bridge_mappings', {}))
-        return mappings
-
-    def check_vlan_transparency(self, context):
-        """Linuxbridge driver vlan transparency support."""
-        return True
diff --git a/neutron/plugins/ml2/drivers/mech_agent.py b/neutron/plugins/ml2/drivers/mech_agent.py
deleted file mode 100644 (file)
index 4d3fb2e..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import six
-
-from oslo_log import log
-
-from neutron._i18n import _LW
-from neutron.extensions import portbindings
-from neutron.plugins.common import constants as p_constants
-from neutron.plugins.ml2 import driver_api as api
-
-LOG = log.getLogger(__name__)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class AgentMechanismDriverBase(api.MechanismDriver):
-    """Base class for drivers that attach to networks using an L2 agent.
-
-    The AgentMechanismDriverBase provides common code for mechanism
-    drivers that integrate the ml2 plugin with L2 agents. Port binding
-    with this driver requires the driver's associated agent to be
-    running on the port's host, and that agent to have connectivity to
-    at least one segment of the port's network.
-
-    MechanismDrivers using this base class must pass the agent type to
-    __init__(), and must implement try_to_bind_segment_for_agent().
-    """
-
-    def __init__(self, agent_type,
-                 supported_vnic_types=[portbindings.VNIC_NORMAL]):
-        """Initialize base class for specific L2 agent type.
-
-        :param agent_type: Constant identifying agent type in agents_db
-        :param supported_vnic_types: The binding:vnic_type values we can bind
-        """
-        self.agent_type = agent_type
-        self.supported_vnic_types = supported_vnic_types
-
-    def initialize(self):
-        pass
-
-    def bind_port(self, context):
-        LOG.debug("Attempting to bind port %(port)s on "
-                  "network %(network)s",
-                  {'port': context.current['id'],
-                   'network': context.network.current['id']})
-        vnic_type = context.current.get(portbindings.VNIC_TYPE,
-                                        portbindings.VNIC_NORMAL)
-        if vnic_type not in self.supported_vnic_types:
-            LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
-                      vnic_type)
-            return
-        for agent in context.host_agents(self.agent_type):
-            LOG.debug("Checking agent: %s", agent)
-            if agent['alive']:
-                for segment in context.segments_to_bind:
-                    if self.try_to_bind_segment_for_agent(context, segment,
-                                                          agent):
-                        LOG.debug("Bound using segment: %s", segment)
-                        return
-            else:
-                LOG.warning(_LW("Refusing to bind port %(pid)s to dead agent: "
-                                "%(agent)s"),
-                            {'pid': context.current['id'], 'agent': agent})
-
-    @abc.abstractmethod
-    def try_to_bind_segment_for_agent(self, context, segment, agent):
-        """Try to bind with segment for agent.
-
-        :param context: PortContext instance describing the port
-        :param segment: segment dictionary describing segment to bind
-        :param agent: agents_db entry describing agent to bind
-        :returns: True iff segment has been bound for agent
-
-        Called outside any transaction during bind_port() so that
-        derived MechanismDrivers can use agent_db data along with
-        built-in knowledge of the corresponding agent's capabilities
-        to attempt to bind to the specified network segment for the
-        agent.
-
-        If the segment can be bound for the agent, this function must
-        call context.set_binding() with appropriate values and then
-        return True. Otherwise, it must return False.
-        """
-
-
-@six.add_metaclass(abc.ABCMeta)
-class SimpleAgentMechanismDriverBase(AgentMechanismDriverBase):
-    """Base class for simple drivers using an L2 agent.
-
-    The SimpleAgentMechanismDriverBase provides common code for
-    mechanism drivers that integrate the ml2 plugin with L2 agents,
-    where the binding:vif_type and binding:vif_details values are the
-    same for all bindings. Port binding with this driver requires the
-    driver's associated agent to be running on the port's host, and
-    that agent to have connectivity to at least one segment of the
-    port's network.
-
-    MechanismDrivers using this base class must pass the agent type
-    and the values for binding:vif_type and binding:vif_details to
-    __init__(), and must implement check_segment_for_agent().
-    """
-
-    def __init__(self, agent_type, vif_type, vif_details,
-                 supported_vnic_types=[portbindings.VNIC_NORMAL]):
-        """Initialize base class for specific L2 agent type.
-
-        :param agent_type: Constant identifying agent type in agents_db
-        :param vif_type: Value for binding:vif_type when bound
-        :param vif_details: Dictionary with details for VIF driver when bound
-        :param supported_vnic_types: The binding:vnic_type values we can bind
-        """
-        super(SimpleAgentMechanismDriverBase, self).__init__(
-            agent_type, supported_vnic_types)
-        self.vif_type = vif_type
-        self.vif_details = vif_details
-
-    def try_to_bind_segment_for_agent(self, context, segment, agent):
-        if self.check_segment_for_agent(segment, agent):
-            context.set_binding(segment[api.ID],
-                                self.vif_type,
-                                self.vif_details)
-            return True
-        else:
-            return False
-
-    @abc.abstractmethod
-    def get_allowed_network_types(self, agent=None):
-        """Return the agent's or driver's allowed network types.
-
-        For example: return ('flat', ...). You can also refer to the
-        configuration the given agent exposes.
-        """
-        pass
-
-    @abc.abstractmethod
-    def get_mappings(self, agent):
-        """Return the agent's bridge or interface mappings.
-
-        For example: agent['configurations'].get('bridge_mappings', {}).
-        """
-        pass
-
-    def physnet_in_mappings(self, physnet, mappings):
-        """Is the physical network part of the given mappings?"""
-        return physnet in mappings
-
-    def check_segment_for_agent(self, segment, agent):
-        """Check if segment can be bound for agent.
-
-        :param segment: segment dictionary describing segment to bind
-        :param agent: agents_db entry describing agent to bind
-        :returns: True iff segment can be bound for agent
-
-        Called outside any transaction during bind_port so that derived
-        MechanismDrivers can use agent_db data along with built-in
-        knowledge of the corresponding agent's capabilities to
-        determine whether or not the specified network segment can be
-        bound for the agent.
-        """
-
-        mappings = self.get_mappings(agent)
-        allowed_network_types = self.get_allowed_network_types(agent)
-
-        LOG.debug("Checking segment: %(segment)s "
-                  "for mappings: %(mappings)s "
-                  "with network types: %(network_types)s",
-                  {'segment': segment, 'mappings': mappings,
-                   'network_types': allowed_network_types})
-
-        network_type = segment[api.NETWORK_TYPE]
-        if network_type not in allowed_network_types:
-            LOG.debug(
-                'Network %(network_id)s is of type %(network_type)s '
-                'but agent %(agent)s or mechanism driver only '
-                'support %(allowed_network_types)s.',
-                {'network_id': segment['id'],
-                 'network_type': network_type,
-                 'agent': agent['host'],
-                 'allowed_network_types': allowed_network_types})
-            return False
-
-        if network_type in [p_constants.TYPE_FLAT, p_constants.TYPE_VLAN]:
-            physnet = segment[api.PHYSICAL_NETWORK]
-            if not self.physnet_in_mappings(physnet, mappings):
-                LOG.debug(
-                    'Network %(network_id)s is connected to physical '
-                    'network %(physnet)s, but agent %(agent)s reported '
-                    'physical networks %(mappings)s. '
-                    'The physical network must be configured on the '
-                    'agent if binding is to succeed.',
-                    {'network_id': segment['id'],
-                     'physnet': physnet,
-                     'agent': agent['host'],
-                     'mappings': mappings})
-                return False
-
-        return True
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/__init__.py b/neutron/plugins/ml2/drivers/mech_sriov/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/__init__.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py
deleted file mode 100644 (file)
index 1173d8a..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.agent.common import config
-
-
-def parse_exclude_devices(exclude_list):
-    """Parse Exclude devices list
-
-    parses excluded device list in the form:
-    dev_name:pci_dev_1;pci_dev_2
-    @param exclude list: list of string pairs in "key:value" format
-                        the key part represents the network device name
-                        the value part is a list of PCI slots separated by ";"
-    """
-    exclude_mapping = {}
-    for dev_mapping in exclude_list:
-        try:
-            dev_name, exclude_devices = dev_mapping.split(":", 1)
-        except ValueError:
-            raise ValueError(_("Invalid mapping: '%s'") % dev_mapping)
-        dev_name = dev_name.strip()
-        if not dev_name:
-            raise ValueError(_("Missing key in mapping: '%s'") % dev_mapping)
-        if dev_name in exclude_mapping:
-            raise ValueError(_("Device %(dev_name)s in mapping: %(mapping)s "
-                               "not unique") % {'dev_name': dev_name,
-                                                'mapping': dev_mapping})
-        exclude_devices_list = exclude_devices.split(";")
-        exclude_devices_set = set()
-        for dev in exclude_devices_list:
-            dev = dev.strip()
-            if dev:
-                exclude_devices_set.add(dev)
-        exclude_mapping[dev_name] = exclude_devices_set
-    return exclude_mapping
-
-DEFAULT_DEVICE_MAPPINGS = []
-DEFAULT_EXCLUDE_DEVICES = []
-
-agent_opts = [
-    cfg.IntOpt('polling_interval', default=2,
-               help=_("The number of seconds the agent will wait between "
-                      "polling for local device changes.")),
-]
-
-sriov_nic_opts = [
-    cfg.ListOpt('physical_device_mappings',
-                default=DEFAULT_DEVICE_MAPPINGS,
-                help=_("Comma-separated list of "
-                       "<physical_network>:<network_device> tuples mapping "
-                       "physical network names to the agent's node-specific "
-                       "physical network device interfaces of SR-IOV physical "
-                       "function to be used for VLAN networks. All physical "
-                       "networks listed in network_vlan_ranges on the server "
-                       "should have mappings to appropriate interfaces on "
-                       "each agent.")),
-    cfg.ListOpt('exclude_devices',
-                default=DEFAULT_EXCLUDE_DEVICES,
-                help=_("Comma-separated list of "
-                       "<network_device>:<vfs_to_exclude> tuples, mapping "
-                       "network_device to the agent's node-specific list of "
-                       "virtual functions that should not be used for virtual "
-                       "networking. vfs_to_exclude is a semicolon-separated "
-                       "list of virtual functions to exclude from "
-                       "network_device. The network_device in the mapping "
-                       "should appear in the physical_device_mappings "
-                       "list.")),
-]
-
-
-cfg.CONF.register_opts(agent_opts, 'AGENT')
-cfg.CONF.register_opts(sriov_nic_opts, 'SRIOV_NIC')
-config.register_agent_state_opts_helper(cfg.CONF)
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py
deleted file mode 100644 (file)
index 2c96074..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from neutron._i18n import _
-from neutron.common import exceptions as n_exc
-
-
-class SriovNicError(n_exc.NeutronException):
-    pass
-
-
-class InvalidDeviceError(SriovNicError):
-    message = _("Invalid Device %(dev_name)s: %(reason)s")
-
-
-class IpCommandError(SriovNicError):
-    message = _("ip command failed: %(reason)s")
-
-
-class IpCommandOperationNotSupportedError(SriovNicError):
-    message = _("Operation not supported on device %(dev_name)s")
-
-
-class InvalidPciSlotError(SriovNicError):
-    message = _("Invalid pci slot %(pci_slot)s")
-
-
-class IpCommandDeviceError(SriovNicError):
-    message = _("ip command failed on device %(dev_name)s: %(reason)s")
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py
deleted file mode 100644 (file)
index ea0fac3..0000000
+++ /dev/null
@@ -1,393 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import re
-
-from oslo_log import log as logging
-import six
-
-from neutron._i18n import _, _LE, _LW
-from neutron.common import utils
-from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
-    import exceptions as exc
-from neutron.plugins.ml2.drivers.mech_sriov.agent import pci_lib
-
-LOG = logging.getLogger(__name__)
-
-
-class PciOsWrapper(object):
-    """OS wrapper for checking virtual functions"""
-
-    DEVICE_PATH = "/sys/class/net/%s/device"
-    PCI_PATH = "/sys/class/net/%s/device/virtfn%s/net"
-    VIRTFN_FORMAT = r"^virtfn(?P<vf_index>\d+)"
-    VIRTFN_REG_EX = re.compile(VIRTFN_FORMAT)
-
-    @classmethod
-    def scan_vf_devices(cls, dev_name):
-        """Scan os directories to get VF devices
-
-        @param dev_name: pf network device name
-        @return: list of virtual functions
-        """
-        vf_list = []
-        dev_path = cls.DEVICE_PATH % dev_name
-        if not os.path.isdir(dev_path):
-            LOG.error(_LE("Failed to get devices for %s"), dev_name)
-            raise exc.InvalidDeviceError(dev_name=dev_name,
-                                         reason=_("Device not found"))
-        file_list = os.listdir(dev_path)
-        for file_name in file_list:
-            pattern_match = cls.VIRTFN_REG_EX.match(file_name)
-            if pattern_match:
-                vf_index = int(pattern_match.group("vf_index"))
-                file_path = os.path.join(dev_path, file_name)
-                if os.path.islink(file_path):
-                    file_link = os.readlink(file_path)
-                    pci_slot = os.path.basename(file_link)
-                    vf_list.append((pci_slot, vf_index))
-        if not vf_list:
-            raise exc.InvalidDeviceError(
-                dev_name=dev_name,
-                reason=_("Device has no virtual functions"))
-        return vf_list
-
-    @classmethod
-    def is_assigned_vf(cls, dev_name, vf_index):
-        """Check if VF is assigned.
-
-        Checks if a given vf index of a given device name is assigned
-        by checking the relevant path in the system:
-        VF is assigned if:
-            Direct VF: PCI_PATH does not exist.
-            Macvtap VF: macvtap@<vf interface> interface exists in ip link show
-        @param dev_name: pf network device name
-        @param vf_index: vf index
-        """
-        path = cls.PCI_PATH % (dev_name, vf_index)
-
-        try:
-            ifname_list = os.listdir(path)
-        except OSError:
-            # PCI_PATH does not exist means that the DIRECT VF assigend
-            return True
-
-        # Note(moshele) kernel < 3.13 doesn't create symbolic link
-        # for macvtap interface. Therefore we workaround it
-        # by parsing ip link show and checking if macvtap interface exists
-        for ifname in ifname_list:
-            if pci_lib.PciDeviceIPWrapper.is_macvtap_assigned(ifname):
-                return True
-        return False
-
-
-class EmbSwitch(object):
-    """Class to manage logical embedded switch entity.
-
-    Embedded Switch object is logical entity representing all VFs
-    connected to  same physical network
-    Each physical network is mapped to PF network device interface,
-    meaning all its VF, excluding the devices in exclude_device list.
-    @ivar pci_slot_map: dictionary for mapping each pci slot to vf index
-    @ivar pci_dev_wrapper: pci device wrapper
-    """
-
-    def __init__(self, phys_net, dev_name, exclude_devices):
-        """Constructor
-
-        @param phys_net: physical network
-        @param dev_name: network device name
-        @param exclude_devices: list of pci slots to exclude
-        """
-        self.phys_net = phys_net
-        self.dev_name = dev_name
-        self.pci_slot_map = {}
-        self.pci_dev_wrapper = pci_lib.PciDeviceIPWrapper(dev_name)
-
-        self._load_devices(exclude_devices)
-
-    def _load_devices(self, exclude_devices):
-        """Load devices from driver and filter if needed.
-
-        @param exclude_devices: excluded devices mapping device_name: pci slots
-        """
-        scanned_pci_list = PciOsWrapper.scan_vf_devices(self.dev_name)
-        for pci_slot, vf_index in scanned_pci_list:
-            if pci_slot not in exclude_devices:
-                self.pci_slot_map[pci_slot] = vf_index
-
-    def get_pci_slot_list(self):
-        """Get list of VF addresses."""
-        return self.pci_slot_map.keys()
-
-    def get_assigned_devices_info(self):
-        """Get assigned Virtual Functions mac and pci slot
-        information and populates vf_to_pci_slot mappings
-
-        @return: list of VF pair (mac address, pci slot)
-        """
-        vf_to_pci_slot_mapping = {}
-        assigned_devices_info = []
-        for pci_slot, vf_index in self.pci_slot_map.items():
-            if not PciOsWrapper.is_assigned_vf(self.dev_name, vf_index):
-                continue
-            vf_to_pci_slot_mapping[vf_index] = pci_slot
-        if vf_to_pci_slot_mapping:
-            vf_to_mac_mapping = self.pci_dev_wrapper.get_assigned_macs(
-                list(vf_to_pci_slot_mapping.keys()))
-            for vf_index, mac in vf_to_mac_mapping.items():
-                pci_slot = vf_to_pci_slot_mapping[vf_index]
-                assigned_devices_info.append((mac, pci_slot))
-        return assigned_devices_info
-
-    def get_device_state(self, pci_slot):
-        """Get device state.
-
-        @param pci_slot: Virtual Function address
-        """
-        vf_index = self._get_vf_index(pci_slot)
-        return self.pci_dev_wrapper.get_vf_state(vf_index)
-
-    def set_device_state(self, pci_slot, state):
-        """Set device state.
-
-        @param pci_slot: Virtual Function address
-        @param state: link state
-        """
-        vf_index = self._get_vf_index(pci_slot)
-        return self.pci_dev_wrapper.set_vf_state(vf_index, state)
-
-    def set_device_max_rate(self, pci_slot, max_kbps):
-        """Set device max rate.
-
-        @param pci_slot: Virtual Function address
-        @param max_kbps: device max rate in kbps
-        """
-        vf_index = self._get_vf_index(pci_slot)
-        #(Note): ip link set max rate in Mbps therefore
-        #we need to convert the max_kbps to Mbps.
-        #Zero means to disable the rate so the lowest rate
-        #available is 1Mbps. Floating numbers are not allowed
-        if max_kbps > 0 and max_kbps < 1000:
-            max_mbps = 1
-        else:
-            max_mbps = utils.round_val(max_kbps / 1000.0)
-
-        log_dict = {
-            'max_rate': max_mbps,
-            'max_kbps': max_kbps,
-            'vf_index': vf_index
-        }
-        if max_kbps % 1000 != 0:
-            LOG.debug("Maximum rate for SR-IOV ports is counted in Mbps; "
-                      "setting %(max_rate)s Mbps limit for port %(vf_index)s "
-                      "instead of %(max_kbps)s kbps",
-                      log_dict)
-        else:
-            LOG.debug("Setting %(max_rate)s Mbps limit for port %(vf_index)s",
-                      log_dict)
-
-        return self.pci_dev_wrapper.set_vf_max_rate(vf_index, max_mbps)
-
-    def _get_vf_index(self, pci_slot):
-        vf_index = self.pci_slot_map.get(pci_slot)
-        if vf_index is None:
-            LOG.warning(_LW("Cannot find vf index for pci slot %s"),
-                        pci_slot)
-            raise exc.InvalidPciSlotError(pci_slot=pci_slot)
-        return vf_index
-
-    def set_device_spoofcheck(self, pci_slot, enabled):
-        """Set device spoofchecking
-
-        @param pci_slot: Virtual Function address
-        @param enabled: True to enable spoofcheck, False to disable
-        """
-        vf_index = self.pci_slot_map.get(pci_slot)
-        if vf_index is None:
-            raise exc.InvalidPciSlotError(pci_slot=pci_slot)
-        return self.pci_dev_wrapper.set_vf_spoofcheck(vf_index, enabled)
-
-    def get_pci_device(self, pci_slot):
-        """Get mac address for given Virtual Function address
-
-        @param pci_slot: pci slot
-        @return: MAC address of virtual function
-        """
-        vf_index = self.pci_slot_map.get(pci_slot)
-        mac = None
-        if vf_index is not None:
-            if PciOsWrapper.is_assigned_vf(self.dev_name, vf_index):
-                macs = self.pci_dev_wrapper.get_assigned_macs([vf_index])
-                mac = macs.get(vf_index)
-        return mac
-
-
-class ESwitchManager(object):
-    """Manages logical Embedded Switch entities for physical network."""
-
-    def __new__(cls):
-        # make it a singleton
-        if not hasattr(cls, '_instance'):
-            cls._instance = super(ESwitchManager, cls).__new__(cls)
-            cls.emb_switches_map = {}
-            cls.pci_slot_map = {}
-        return cls._instance
-
-    def device_exists(self, device_mac, pci_slot):
-        """Verify if device exists.
-
-        Check if a device mac exists and matches the given VF pci slot
-        @param device_mac: device mac
-        @param pci_slot: VF address
-        """
-        embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
-        if embedded_switch:
-            return True
-        return False
-
-    def get_assigned_devices_info(self, phys_net=None):
-        """Get all assigned devices.
-
-        Get all assigned devices belongs to given embedded switch
-        @param phys_net: physical network, if none get all assigned devices
-        @return: set of assigned VFs (mac address, pci slot) pair
-        """
-        if phys_net:
-            embedded_switch = self.emb_switches_map.get(phys_net, None)
-            if not embedded_switch:
-                return set()
-            eswitch_objects = [embedded_switch]
-        else:
-            eswitch_objects = self.emb_switches_map.values()
-        assigned_devices = set()
-        for embedded_switch in eswitch_objects:
-            for device in embedded_switch.get_assigned_devices_info():
-                assigned_devices.add(device)
-        return assigned_devices
-
-    def get_device_state(self, device_mac, pci_slot):
-        """Get device state.
-
-        Get the device state (up/True or down/False)
-        @param device_mac: device mac
-        @param pci_slot: VF PCI slot
-        @return: device state (True/False) None if failed
-        """
-        embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
-        if embedded_switch:
-            return embedded_switch.get_device_state(pci_slot)
-        return False
-
-    def set_device_max_rate(self, device_mac, pci_slot, max_kbps):
-        """Set device max rate
-
-        Sets the device max rate in kbps
-        @param device_mac: device mac
-        @param pci_slot: pci slot
-        @param max_kbps: device max rate in kbps
-        """
-        embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
-        if embedded_switch:
-            embedded_switch.set_device_max_rate(pci_slot,
-                                                max_kbps)
-
-    def set_device_state(self, device_mac, pci_slot, admin_state_up):
-        """Set device state
-
-        Sets the device state (up or down)
-        @param device_mac: device mac
-        @param pci_slot: pci slot
-        @param admin_state_up: device admin state True/False
-        """
-        embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
-        if embedded_switch:
-            embedded_switch.set_device_state(pci_slot,
-                                             admin_state_up)
-
-    def set_device_spoofcheck(self, device_mac, pci_slot, enabled):
-        """Set device spoofcheck
-
-        Sets device spoofchecking (enabled or disabled)
-        @param device_mac: device mac
-        @param pci_slot: pci slot
-        @param enabled: device spoofchecking
-        """
-        embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
-        if embedded_switch:
-            embedded_switch.set_device_spoofcheck(pci_slot,
-                                                  enabled)
-
-    def discover_devices(self, device_mappings, exclude_devices):
-        """Discover which Virtual functions to manage.
-
-        Discover devices, and create embedded switch object for network device
-        @param device_mappings: device mapping physical_network:device_name
-        @param exclude_devices: excluded devices mapping device_name: pci slots
-        """
-        if exclude_devices is None:
-            exclude_devices = {}
-        for phys_net, dev_name in six.iteritems(device_mappings):
-            self._create_emb_switch(phys_net, dev_name,
-                                    exclude_devices.get(dev_name, set()))
-
-    def _create_emb_switch(self, phys_net, dev_name, exclude_devices):
-        embedded_switch = EmbSwitch(phys_net, dev_name, exclude_devices)
-        self.emb_switches_map[phys_net] = embedded_switch
-        for pci_slot in embedded_switch.get_pci_slot_list():
-            self.pci_slot_map[pci_slot] = embedded_switch
-
-    def _get_emb_eswitch(self, device_mac, pci_slot):
-        """Get embedded switch.
-
-        Get embedded switch by pci slot and validate pci has device mac
-        @param device_mac: device mac
-        @param pci_slot: pci slot
-        """
-        embedded_switch = self.pci_slot_map.get(pci_slot)
-        if embedded_switch:
-            used_device_mac = embedded_switch.get_pci_device(pci_slot)
-            if used_device_mac != device_mac:
-                LOG.warning(_LW("device pci mismatch: %(device_mac)s "
-                                "- %(pci_slot)s"),
-                            {"device_mac": device_mac, "pci_slot": pci_slot})
-                embedded_switch = None
-        return embedded_switch
-
-    def clear_max_rate(self, pci_slot):
-        """Clear the max rate
-
-        Clear the max rate configuration from VF by setting it to 0
-        @param pci_slot: VF PCI slot
-        """
-        #(Note): we don't use the self._get_emb_eswitch here, because when
-        #clearing the VF it may be not assigned. This happens when libvirt
-        #releases the VF back to the hypervisor on delete VM. Therefore we
-        #should just clear the VF max rate according to pci_slot no matter
-        #if VF is assigned or not.
-        embedded_switch = self.pci_slot_map.get(pci_slot)
-        if embedded_switch:
-            #(Note): check the pci_slot is not assigned to some
-            # other port before resetting the max rate.
-            if embedded_switch.get_pci_device(pci_slot) is None:
-                embedded_switch.set_device_max_rate(pci_slot, 0)
-            else:
-                LOG.warning(_LW("VF with PCI slot %(pci_slot)s is already "
-                                "assigned; skipping reset maximum rate"),
-                            {'pci_slot': pci_slot})
-        else:
-            LOG.error(_LE("PCI slot %(pci_slot)s has no mapping to Embedded "
-                          "Switch; skipping"), {'pci_slot': pci_slot})
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py
deleted file mode 100755 (executable)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py
deleted file mode 100644 (file)
index a404e18..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2015 Mellanox Technologies, Ltd
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from neutron._i18n import _LE, _LI
-from neutron.agent.l2.extensions import qos
-from neutron.plugins.ml2.drivers.mech_sriov.agent.common import (
-    exceptions as exc)
-from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm
-from neutron.plugins.ml2.drivers.mech_sriov.mech_driver import (
-    mech_driver)
-
-LOG = logging.getLogger(__name__)
-
-
-class QosSRIOVAgentDriver(qos.QosAgentDriver):
-
-    SUPPORTED_RULES = (
-        mech_driver.SriovNicSwitchMechanismDriver.supported_qos_rule_types)
-
-    def __init__(self):
-        super(QosSRIOVAgentDriver, self).__init__()
-        self.eswitch_mgr = None
-
-    def initialize(self):
-        self.eswitch_mgr = esm.ESwitchManager()
-
-    def create_bandwidth_limit(self, port, rule):
-        self.update_bandwidth_limit(port, rule)
-
-    def update_bandwidth_limit(self, port, rule):
-        pci_slot = port['profile'].get('pci_slot')
-        device = port['device']
-        self._set_vf_max_rate(device, pci_slot, rule.max_kbps)
-
-    def delete_bandwidth_limit(self, port):
-        pci_slot = port['profile'].get('pci_slot')
-        if port.get('device_owner') is None:
-            self.eswitch_mgr.clear_max_rate(pci_slot)
-        else:
-            device = port['device']
-            self._set_vf_max_rate(device, pci_slot)
-
-    def _set_vf_max_rate(self, device, pci_slot, max_kbps=0):
-        if self.eswitch_mgr.device_exists(device, pci_slot):
-            try:
-                self.eswitch_mgr.set_device_max_rate(
-                    device, pci_slot, max_kbps)
-            except exc.SriovNicError:
-                LOG.exception(
-                    _LE("Failed to set device %s max rate"), device)
-        else:
-            LOG.info(_LI("No device with MAC %s defined on agent."), device)
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py
deleted file mode 100644 (file)
index 3074182..0000000
+++ /dev/null
@@ -1,208 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import re
-
-from oslo_log import log as logging
-
-from neutron._i18n import _LE, _LW
-from neutron.agent.linux import ip_lib
-from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
-    import exceptions as exc
-
-LOG = logging.getLogger(__name__)
-
-
-class PciDeviceIPWrapper(ip_lib.IPWrapper):
-    """Wrapper class for ip link commands.
-
-    wrapper for getting/setting pci device details using ip link...
-    """
-    VF_PATTERN = r"^vf\s+(?P<vf_index>\d+)\s+"
-    MAC_PATTERN = r"MAC\s+(?P<mac>[a-fA-F0-9:]+),"
-    STATE_PATTERN = r"\s+link-state\s+(?P<state>\w+)"
-    ANY_PATTERN = ".*,"
-    MACVTAP_PATTERN = r".*macvtap[0-9]+@(?P<vf_interface>[a-zA-Z0-9]+):"
-
-    VF_LINE_FORMAT = VF_PATTERN + MAC_PATTERN + ANY_PATTERN + STATE_PATTERN
-    VF_DETAILS_REG_EX = re.compile(VF_LINE_FORMAT)
-    MACVTAP_REG_EX = re.compile(MACVTAP_PATTERN)
-
-    IP_LINK_OP_NOT_SUPPORTED = 'RTNETLINK answers: Operation not supported'
-
-    class LinkState(object):
-        ENABLE = "enable"
-        DISABLE = "disable"
-
-    def __init__(self, dev_name):
-        super(PciDeviceIPWrapper, self).__init__()
-        self.dev_name = dev_name
-
-    def _set_feature(self, vf_index, feature, value):
-        """Sets vf feature
-
-        Checks if the feature is not supported or there's some
-        general error during ip link invocation and raises
-        exception accordingly.
-
-        :param vf_index: vf index
-        :param feature: name of a feature to be passed to ip link,
-                        such as 'state' or 'spoofchk'
-        :param value: value of the feature setting
-        """
-        try:
-            self._as_root([], "link", ("set", self.dev_name, "vf",
-                                       str(vf_index), feature, value))
-        except Exception as e:
-            if self.IP_LINK_OP_NOT_SUPPORTED in str(e):
-                raise exc.IpCommandOperationNotSupportedError(
-                    dev_name=self.dev_name)
-            else:
-                raise exc.IpCommandDeviceError(dev_name=self.dev_name,
-                                               reason=str(e))
-
-    def get_assigned_macs(self, vf_list):
-        """Get assigned mac addresses for vf list.
-
-        @param vf_list: list of vf indexes
-        @return: dict mapping of vf to mac
-        """
-        try:
-            out = self._as_root([], "link", ("show", self.dev_name))
-        except Exception as e:
-            LOG.exception(_LE("Failed executing ip command"))
-            raise exc.IpCommandDeviceError(dev_name=self.dev_name,
-                                           reason=e)
-        vf_to_mac_mapping = {}
-        vf_lines = self._get_vf_link_show(vf_list, out)
-        if vf_lines:
-            for vf_line in vf_lines:
-                vf_details = self._parse_vf_link_show(vf_line)
-                if vf_details:
-                    vf_num = vf_details.get('vf')
-                    vf_mac = vf_details.get("MAC")
-                    vf_to_mac_mapping[vf_num] = vf_mac
-        return vf_to_mac_mapping
-
-    def get_vf_state(self, vf_index):
-        """Get vf state {True/False}
-
-        @param vf_index: vf index
-        @todo: Handle "auto" state
-        """
-        try:
-            out = self._as_root([], "link", ("show", self.dev_name))
-        except Exception as e:
-            LOG.exception(_LE("Failed executing ip command"))
-            raise exc.IpCommandDeviceError(dev_name=self.dev_name,
-                                           reason=e)
-        vf_lines = self._get_vf_link_show([vf_index], out)
-        if vf_lines:
-            vf_details = self._parse_vf_link_show(vf_lines[0])
-            if vf_details:
-                state = vf_details.get("link-state",
-                                       self.LinkState.DISABLE)
-            if state != self.LinkState.DISABLE:
-                return True
-        return False
-
-    def set_vf_state(self, vf_index, state):
-        """sets vf state.
-
-        @param vf_index: vf index
-        @param state: required state {True/False}
-        """
-        status_str = self.LinkState.ENABLE if state else \
-            self.LinkState.DISABLE
-        self._set_feature(vf_index, "state", status_str)
-
-    def set_vf_spoofcheck(self, vf_index, enabled):
-        """sets vf spoofcheck
-
-        @param vf_index: vf index
-        @param enabled: True to enable spoof checking,
-                        False to disable
-        """
-        setting = "on" if enabled else "off"
-        self._set_feature(vf_index, "spoofchk", setting)
-
-    def set_vf_max_rate(self, vf_index, max_tx_rate):
-        """sets vf max rate.
-
-        @param vf_index: vf index
-        @param max_tx_rate: vf max tx rate in Mbps
-        """
-        self._set_feature(vf_index, "rate", str(max_tx_rate))
-
-    def _get_vf_link_show(self, vf_list, link_show_out):
-        """Get link show output for VFs
-
-        get vf link show command output filtered by given vf list
-        @param vf_list: list of vf indexes
-        @param link_show_out: link show command output
-        @return: list of output rows regarding given vf_list
-        """
-        vf_lines = []
-        for line in link_show_out.split("\n"):
-            line = line.strip()
-            if line.startswith("vf"):
-                details = line.split()
-                index = int(details[1])
-                if index in vf_list:
-                    vf_lines.append(line)
-        if not vf_lines:
-            LOG.warning(_LW("Cannot find vfs %(vfs)s in device %(dev_name)s"),
-                        {'vfs': vf_list, 'dev_name': self.dev_name})
-        return vf_lines
-
-    def _parse_vf_link_show(self, vf_line):
-        """Parses vf link show command output line.
-
-        @param vf_line: link show vf line
-        """
-        vf_details = {}
-        pattern_match = self.VF_DETAILS_REG_EX.match(vf_line)
-        if pattern_match:
-            vf_details["vf"] = int(pattern_match.group("vf_index"))
-            vf_details["MAC"] = pattern_match.group("mac")
-            vf_details["link-state"] = pattern_match.group("state")
-        else:
-            LOG.warning(_LW("failed to parse vf link show line %(line)s: "
-                            "for %(device)s"),
-                        {'line': vf_line, 'device': self.dev_name})
-        return vf_details
-
-    @classmethod
-    def is_macvtap_assigned(cls, ifname):
-        """Check if vf has macvtap interface assigned
-
-        Parses the output of ip link show command and checks
-        if macvtap[0-9]+@<vf interface> regex matches the
-        output.
-        @param ifname: vf interface name
-        @return: True on match otherwise False
-        """
-        try:
-            out = cls._execute([], "link", ("show", ), run_as_root=True)
-        except Exception as e:
-            LOG.error(_LE("Failed executing ip command: %s"), e)
-            raise exc.IpCommandError(reason=e)
-
-        for line in out.splitlines():
-            pattern_match = cls.MACVTAP_REG_EX.match(line)
-            if pattern_match:
-                if ifname == pattern_match.group('vf_interface'):
-                    return True
-        return False
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py
deleted file mode 100644 (file)
index 3f2edef..0000000
+++ /dev/null
@@ -1,421 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import socket
-import sys
-import time
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_service import loopingcall
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.agent.l2.extensions import manager as ext_manager
-from neutron.agent import rpc as agent_rpc
-from neutron.agent import securitygroups_rpc as sg_rpc
-from neutron.common import config as common_config
-from neutron.common import constants as n_constants
-from neutron.common import topics
-from neutron.common import utils as n_utils
-from neutron import context
-from neutron.extensions import portbindings
-from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config
-from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
-    import exceptions as exc
-from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm
-
-
-LOG = logging.getLogger(__name__)
-
-
-class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
-
-    # Set RPC API version to 1.0 by default.
-    # history
-    #   1.1 Support Security Group RPC
-    target = oslo_messaging.Target(version='1.1')
-
-    def __init__(self, context, agent, sg_agent):
-        super(SriovNicSwitchRpcCallbacks, self).__init__()
-        self.context = context
-        self.agent = agent
-        self.sg_agent = sg_agent
-
-    def port_update(self, context, **kwargs):
-        LOG.debug("port_update received")
-        port = kwargs.get('port')
-
-        vnic_type = port.get(portbindings.VNIC_TYPE)
-        if vnic_type and vnic_type == portbindings.VNIC_DIRECT_PHYSICAL:
-            LOG.debug("The SR-IOV agent doesn't handle %s ports.",
-                      portbindings.VNIC_DIRECT_PHYSICAL)
-            return
-
-        # Put the port mac address in the updated_devices set.
-        # Do not store port details, as if they're used for processing
-        # notifications there is no guarantee the notifications are
-        # processed in the same order as the relevant API requests.
-        mac = port['mac_address']
-        pci_slot = None
-        if port.get(portbindings.PROFILE):
-            pci_slot = port[portbindings.PROFILE].get('pci_slot')
-
-        if pci_slot:
-            self.agent.updated_devices.add((mac, pci_slot))
-            LOG.debug("port_update RPC received for port: %(id)s with MAC "
-                      "%(mac)s and PCI slot %(pci_slot)s slot",
-                      {'id': port['id'], 'mac': mac, 'pci_slot': pci_slot})
-        else:
-            LOG.debug("No PCI Slot for port %(id)s with MAC %(mac)s; "
-                      "skipping", {'id': port['id'], 'mac': mac,
-                                   'pci_slot': pci_slot})
-
-
-class SriovNicSwitchAgent(object):
-    def __init__(self, physical_devices_mappings, exclude_devices,
-                 polling_interval):
-
-        self.polling_interval = polling_interval
-        self.conf = cfg.CONF
-        self.setup_eswitch_mgr(physical_devices_mappings,
-                               exclude_devices)
-
-        # Stores port update notifications for processing in the main loop
-        self.updated_devices = set()
-        self.mac_to_port_id_mapping = {}
-
-        self.context = context.get_admin_context_without_session()
-        self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
-        self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
-        self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
-                self.sg_plugin_rpc)
-        self._setup_rpc()
-        self.ext_manager = self._create_agent_extension_manager(
-            self.connection)
-
-        configurations = {'device_mappings': physical_devices_mappings,
-                          'extensions': self.ext_manager.names()}
-        self.agent_state = {
-            'binary': 'neutron-sriov-nic-agent',
-            'host': self.conf.host,
-            'topic': n_constants.L2_AGENT_TOPIC,
-            'configurations': configurations,
-            'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH,
-            'start_flag': True}
-
-        # The initialization is complete; we can start receiving messages
-        self.connection.consume_in_threads()
-        # Initialize iteration counter
-        self.iter_num = 0
-
-    def _setup_rpc(self):
-        self.agent_id = 'nic-switch-agent.%s' % socket.gethostname()
-        LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
-
-        self.topic = topics.AGENT
-        self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
-        # RPC network init
-        # Handle updates from service
-        self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self,
-                                                     self.sg_agent)]
-        # Define the listening consumers for the agent
-        consumers = [[topics.PORT, topics.UPDATE],
-                     [topics.NETWORK, topics.DELETE],
-                     [topics.SECURITY_GROUP, topics.UPDATE]]
-        self.connection = agent_rpc.create_consumers(self.endpoints,
-                                                     self.topic,
-                                                     consumers,
-                                                     start_listening=False)
-
-        report_interval = cfg.CONF.AGENT.report_interval
-        if report_interval:
-            heartbeat = loopingcall.FixedIntervalLoopingCall(
-                self._report_state)
-            heartbeat.start(interval=report_interval)
-
-    def _report_state(self):
-        try:
-            devices = len(self.eswitch_mgr.get_assigned_devices_info())
-            self.agent_state.get('configurations')['devices'] = devices
-            self.state_rpc.report_state(self.context,
-                                        self.agent_state)
-            self.agent_state.pop('start_flag', None)
-        except Exception:
-            LOG.exception(_LE("Failed reporting state!"))
-
-    def _create_agent_extension_manager(self, connection):
-        ext_manager.register_opts(self.conf)
-        mgr = ext_manager.AgentExtensionsManager(self.conf)
-        mgr.initialize(connection, 'sriov')
-        return mgr
-
-    def setup_eswitch_mgr(self, device_mappings, exclude_devices=None):
-        exclude_devices = exclude_devices or {}
-        self.eswitch_mgr = esm.ESwitchManager()
-        self.eswitch_mgr.discover_devices(device_mappings, exclude_devices)
-
-    def scan_devices(self, registered_devices, updated_devices):
-        curr_devices = self.eswitch_mgr.get_assigned_devices_info()
-        device_info = {}
-        device_info['current'] = curr_devices
-        device_info['added'] = curr_devices - registered_devices
-        # we don't want to process updates for devices that don't exist
-        device_info['updated'] = updated_devices & curr_devices
-        # we need to clean up after devices are removed
-        device_info['removed'] = registered_devices - curr_devices
-        return device_info
-
-    def _device_info_has_changes(self, device_info):
-        return (device_info.get('added')
-                or device_info.get('updated')
-                or device_info.get('removed'))
-
-    def process_network_devices(self, device_info):
-        resync_a = False
-        resync_b = False
-
-        self.sg_agent.prepare_devices_filter(device_info.get('added'))
-
-        if device_info.get('updated'):
-            self.sg_agent.refresh_firewall()
-        # Updated devices are processed the same as new ones, as their
-        # admin_state_up may have changed. The set union prevents duplicating
-        # work when a device is new and updated in the same polling iteration.
-        devices_added_updated = (set(device_info.get('added'))
-                                 | set(device_info.get('updated')))
-        if devices_added_updated:
-            resync_a = self.treat_devices_added_updated(devices_added_updated)
-
-        if device_info.get('removed'):
-            resync_b = self.treat_devices_removed(device_info['removed'])
-        # If one of the above operations fails => resync with plugin
-        return (resync_a | resync_b)
-
-    def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True):
-        if self.eswitch_mgr.device_exists(device, pci_slot):
-            try:
-                self.eswitch_mgr.set_device_spoofcheck(device, pci_slot,
-                                                       spoofcheck)
-            except Exception:
-                LOG.warning(_LW("Failed to set spoofcheck for device %s"),
-                            device)
-            LOG.info(_LI("Device %(device)s spoofcheck %(spoofcheck)s"),
-                     {"device": device, "spoofcheck": spoofcheck})
-
-            try:
-                self.eswitch_mgr.set_device_state(device, pci_slot,
-                                                  admin_state_up)
-            except exc.IpCommandOperationNotSupportedError:
-                LOG.warning(_LW("Device %s does not support state change"),
-                            device)
-            except exc.SriovNicError:
-                LOG.warning(_LW("Failed to set device %s state"), device)
-                return
-            if admin_state_up:
-                # update plugin about port status
-                self.plugin_rpc.update_device_up(self.context,
-                                                 device,
-                                                 self.agent_id,
-                                                 cfg.CONF.host)
-            else:
-                self.plugin_rpc.update_device_down(self.context,
-                                                   device,
-                                                   self.agent_id,
-                                                   cfg.CONF.host)
-        else:
-            LOG.info(_LI("No device with MAC %s defined on agent."), device)
-
-    def treat_devices_added_updated(self, devices_info):
-        try:
-            macs_list = set([device_info[0] for device_info in devices_info])
-            devices_details_list = self.plugin_rpc.get_devices_details_list(
-                self.context, macs_list, self.agent_id)
-        except Exception as e:
-            LOG.debug("Unable to get port details for devices "
-                      "with MAC addresses %(devices)s: %(e)s",
-                      {'devices': macs_list, 'e': e})
-            # resync is needed
-            return True
-
-        for device_details in devices_details_list:
-            device = device_details['device']
-            LOG.debug("Port with MAC address %s is added", device)
-
-            if 'port_id' in device_details:
-                LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
-                         {'device': device, 'details': device_details})
-                port_id = device_details['port_id']
-                self.mac_to_port_id_mapping[device] = port_id
-                profile = device_details['profile']
-                spoofcheck = device_details.get('port_security_enabled', True)
-                self.treat_device(device,
-                                  profile.get('pci_slot'),
-                                  device_details['admin_state_up'],
-                                  spoofcheck)
-                self.ext_manager.handle_port(self.context, device_details)
-            else:
-                LOG.info(_LI("Device with MAC %s not defined on plugin"),
-                         device)
-        return False
-
-    def treat_devices_removed(self, devices):
-        resync = False
-        for device in devices:
-            mac, pci_slot = device
-            LOG.info(_LI("Removing device with MAC address %(mac)s and "
-                         "PCI slot %(pci_slot)s"),
-                     {'mac': mac, 'pci_slot': pci_slot})
-            try:
-                port_id = self.mac_to_port_id_mapping.get(mac)
-                if port_id:
-                    profile = {'pci_slot': pci_slot}
-                    port = {'port_id': port_id,
-                            'device': mac,
-                            'profile': profile}
-                    self.ext_manager.delete_port(self.context, port)
-                    del self.mac_to_port_id_mapping[mac]
-                else:
-                    LOG.warning(_LW("port_id to device with MAC "
-                                 "%s not found"), mac)
-                dev_details = self.plugin_rpc.update_device_down(self.context,
-                                                                 mac,
-                                                                 self.agent_id,
-                                                                 cfg.CONF.host)
-
-            except Exception as e:
-                LOG.debug("Removing port failed for device with MAC address "
-                          "%(mac)s and PCI slot %(pci_slot)s due to %(exc)s",
-                          {'mac': mac, 'pci_slot': pci_slot, 'exc': e})
-                resync = True
-                continue
-            if dev_details['exists']:
-                LOG.info(_LI("Port with MAC %(mac)s and PCI slot "
-                             "%(pci_slot)s updated."),
-                         {'mac': mac, 'pci_slot': pci_slot})
-            else:
-                LOG.debug("Device with MAC %(mac)s and PCI slot "
-                          "%(pci_slot)s not defined on plugin",
-                          {'mac': mac, 'pci_slot': pci_slot})
-        return resync
-
-    def daemon_loop(self):
-        sync = True
-        devices = set()
-
-        LOG.info(_LI("SRIOV NIC Agent RPC Daemon Started!"))
-
-        while True:
-            start = time.time()
-            LOG.debug("Agent rpc_loop - iteration:%d started",
-                      self.iter_num)
-            if sync:
-                LOG.info(_LI("Agent out of sync with plugin!"))
-                devices.clear()
-                sync = False
-            device_info = {}
-            # Save updated devices dict to perform rollback in case
-            # resync would be needed, and then clear self.updated_devices.
-            # As the greenthread should not yield between these
-            # two statements, this will should be thread-safe.
-            updated_devices_copy = self.updated_devices
-            self.updated_devices = set()
-            try:
-                device_info = self.scan_devices(devices, updated_devices_copy)
-                if self._device_info_has_changes(device_info):
-                    LOG.debug("Agent loop found changes! %s", device_info)
-                    # If treat devices fails - indicates must resync with
-                    # plugin
-                    sync = self.process_network_devices(device_info)
-                    devices = device_info['current']
-            except Exception:
-                LOG.exception(_LE("Error in agent loop. Devices info: %s"),
-                              device_info)
-                sync = True
-                # Restore devices that were removed from this set earlier
-                # without overwriting ones that may have arrived since.
-                self.updated_devices |= updated_devices_copy
-
-            # sleep till end of polling interval
-            elapsed = (time.time() - start)
-            if (elapsed < self.polling_interval):
-                time.sleep(self.polling_interval - elapsed)
-            else:
-                LOG.debug("Loop iteration exceeded interval "
-                          "(%(polling_interval)s vs. %(elapsed)s)!",
-                          {'polling_interval': self.polling_interval,
-                           'elapsed': elapsed})
-            self.iter_num = self.iter_num + 1
-
-
-class SriovNicAgentConfigParser(object):
-    def __init__(self):
-        self.device_mappings = {}
-        self.exclude_devices = {}
-
-    def parse(self):
-        """Parses device_mappings and exclude_devices.
-
-        Parse and validate the consistency in both mappings
-        """
-        self.device_mappings = n_utils.parse_mappings(
-            cfg.CONF.SRIOV_NIC.physical_device_mappings)
-        self.exclude_devices = config.parse_exclude_devices(
-            cfg.CONF.SRIOV_NIC.exclude_devices)
-        self._validate()
-
-    def _validate(self):
-        """Validate configuration.
-
-        Validate that network_device in excluded_device
-        exists in device mappings
-        """
-        dev_net_set = set(self.device_mappings.values())
-        for dev_name in self.exclude_devices.keys():
-            if dev_name not in dev_net_set:
-                raise ValueError(_("Device name %(dev_name)s is missing from "
-                                   "physical_device_mappings") % {'dev_name':
-                                                                  dev_name})
-
-
-def main():
-    common_config.init(sys.argv[1:])
-
-    common_config.setup_logging()
-    try:
-        config_parser = SriovNicAgentConfigParser()
-        config_parser.parse()
-        device_mappings = config_parser.device_mappings
-        exclude_devices = config_parser.exclude_devices
-
-    except ValueError:
-        LOG.exception(_LE("Failed on Agent configuration parse. "
-                          "Agent terminated!"))
-        raise SystemExit(1)
-    LOG.info(_LI("Physical Devices mappings: %s"), device_mappings)
-    LOG.info(_LI("Exclude Devices: %s"), exclude_devices)
-
-    polling_interval = cfg.CONF.AGENT.polling_interval
-    try:
-        agent = SriovNicSwitchAgent(device_mappings,
-                                    exclude_devices,
-                                    polling_interval)
-    except exc.SriovNicError:
-        LOG.exception(_LE("Agent Initialization Failed"))
-        raise SystemExit(1)
-    # Start everything.
-    LOG.info(_LI("Agent initialized successfully, now running... "))
-    agent.daemon_loop()
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py b/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/exceptions.py b/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/exceptions.py
deleted file mode 100644 (file)
index fd6ec90..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""Exceptions used by SRIOV Mechanism Driver."""
-
-from neutron._i18n import _
-from neutron.common import exceptions
-
-
-class SriovUnsupportedNetworkType(exceptions.NeutronException):
-    """Method was invoked for unsupported network type."""
-    message = _("Unsupported network type %(net_type)s.")
diff --git a/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py
deleted file mode 100644 (file)
index 9204cc9..0000000
+++ /dev/null
@@ -1,196 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from oslo_config import cfg
-from oslo_log import log
-
-from neutron._i18n import _, _LE, _LW
-from neutron.common import constants
-from neutron.extensions import portbindings
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2.drivers.mech_sriov.mech_driver \
-    import exceptions as exc
-from neutron.services.qos import qos_consts
-
-
-LOG = log.getLogger(__name__)
-VIF_TYPE_HW_VEB = 'hw_veb'
-FLAT_VLAN = 0
-
-sriov_opts = [
-    cfg.ListOpt('supported_pci_vendor_devs',
-               default=['15b3:1004', '8086:10ca'],
-               help=_("Comma-separated list of supported PCI vendor devices, "
-                      "as defined by vendor_id:product_id according to the "
-                      "PCI ID Repository. Default enables support for Intel "
-                      "and Mellanox SR-IOV capable NICs.")),
-]
-
-cfg.CONF.register_opts(sriov_opts, "ml2_sriov")
-
-
-class SriovNicSwitchMechanismDriver(api.MechanismDriver):
-    """Mechanism Driver for SR-IOV capable NIC based switching.
-
-    The SriovNicSwitchMechanismDriver integrates the ml2 plugin with the
-    sriovNicSwitch L2 agent depending on configuration option.
-    Port binding with this driver may require the sriovNicSwitch agent
-    to be running on the port's host, and that agent to have connectivity
-    to at least one segment of the port's network.
-    L2 agent is not essential for port binding; port binding is handled by
-    VIF Driver via libvirt domain XML.
-    L2 Agent presents in  order to manage port update events.
-    """
-
-    supported_qos_rule_types = [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT]
-
-    def __init__(self,
-                 agent_type=constants.AGENT_TYPE_NIC_SWITCH,
-                 vif_type=VIF_TYPE_HW_VEB,
-                 vif_details={portbindings.CAP_PORT_FILTER: False},
-                 supported_vnic_types=[portbindings.VNIC_DIRECT,
-                                       portbindings.VNIC_MACVTAP,
-                                       portbindings.VNIC_DIRECT_PHYSICAL],
-                 supported_pci_vendor_info=None):
-        """Initialize base class for SriovNicSwitch L2 agent type.
-
-        :param agent_type: Constant identifying agent type in agents_db
-        :param vif_type: Value for binding:vif_type when bound
-        :param vif_details: Dictionary with details for VIF driver when bound
-        :param supported_vnic_types: The binding:vnic_type values we can bind
-        :param supported_pci_vendor_info: The pci_vendor_info values to bind
-        """
-        self.agent_type = agent_type
-        self.supported_vnic_types = supported_vnic_types
-        self.vif_type = vif_type
-        self.vif_details = vif_details
-        self.supported_network_types = (p_const.TYPE_VLAN, p_const.TYPE_FLAT)
-
-    def initialize(self):
-        try:
-            self.pci_vendor_info = cfg.CONF.ml2_sriov.supported_pci_vendor_devs
-            self._check_pci_vendor_config(self.pci_vendor_info)
-        except ValueError:
-            LOG.exception(_LE("Failed to parse supported PCI vendor devices"))
-            raise cfg.Error(_("Parsing supported pci_vendor_devs failed"))
-
-    def bind_port(self, context):
-        LOG.debug("Attempting to bind port %(port)s on "
-                  "network %(network)s",
-                  {'port': context.current['id'],
-                   'network': context.network.current['id']})
-        vnic_type = context.current.get(portbindings.VNIC_TYPE,
-                                        portbindings.VNIC_NORMAL)
-        if vnic_type not in self.supported_vnic_types:
-            LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
-                      vnic_type)
-            return
-
-        if not self._check_supported_pci_vendor_device(context):
-            LOG.debug("Refusing to bind due to unsupported pci_vendor device")
-            return
-
-        if vnic_type == portbindings.VNIC_DIRECT_PHYSICAL:
-            # Physical functions don't support things like QoS properties,
-            # spoof checking, etc. so we might as well side-step the agent
-            # for now. The agent also doesn't currently recognize non-VF
-            # PCI devices so we won't get port status change updates
-            # either. This should be changed in the future so physical
-            # functions can use device mapping checks and the plugin can
-            # get port status updates.
-            self.try_to_bind(context, None)
-            return
-
-        for agent in context.host_agents(self.agent_type):
-            LOG.debug("Checking agent: %s", agent)
-            if agent['alive']:
-                if self.try_to_bind(context, agent):
-                    return
-            else:
-                LOG.warning(_LW("Attempting to bind with dead agent: %s"),
-                            agent)
-
-    def try_to_bind(self, context, agent):
-        for segment in context.segments_to_bind:
-            if self.check_segment(segment, agent):
-                port_status = (constants.PORT_STATUS_ACTIVE if agent is None
-                               else constants.PORT_STATUS_DOWN)
-                context.set_binding(segment[api.ID],
-                                    self.vif_type,
-                                    self._get_vif_details(segment),
-                                    port_status)
-                LOG.debug("Bound using segment: %s", segment)
-                return True
-        return False
-
-    def check_segment(self, segment, agent=None):
-        """Check if segment can be bound.
-
-        :param segment: segment dictionary describing segment to bind
-        :param agent: agents_db entry describing agent to bind or None
-        :returns: True if segment can be bound for agent
-        """
-        network_type = segment[api.NETWORK_TYPE]
-        if network_type in self.supported_network_types:
-            if agent:
-                mappings = agent['configurations'].get('device_mappings', {})
-                LOG.debug("Checking segment: %(segment)s "
-                          "for mappings: %(mappings)s ",
-                          {'segment': segment, 'mappings': mappings})
-                return segment[api.PHYSICAL_NETWORK] in mappings
-            return True
-        return False
-
-    def _check_supported_pci_vendor_device(self, context):
-        if self.pci_vendor_info:
-            profile = context.current.get(portbindings.PROFILE, {})
-            if not profile:
-                LOG.debug("Missing profile in port binding")
-                return False
-            pci_vendor_info = profile.get('pci_vendor_info')
-            if not pci_vendor_info:
-                LOG.debug("Missing pci vendor info in profile")
-                return False
-            if pci_vendor_info not in self.pci_vendor_info:
-                LOG.debug("Unsupported pci_vendor %s", pci_vendor_info)
-                return False
-            return True
-        return False
-
-    def _get_vif_details(self, segment):
-        network_type = segment[api.NETWORK_TYPE]
-        if network_type == p_const.TYPE_FLAT:
-            vlan_id = FLAT_VLAN
-        elif network_type == p_const.TYPE_VLAN:
-            vlan_id = segment[api.SEGMENTATION_ID]
-        else:
-            raise exc.SriovUnsupportedNetworkType(net_type=network_type)
-        vif_details = self.vif_details.copy()
-        vif_details[portbindings.VIF_DETAILS_VLAN] = str(vlan_id)
-        return vif_details
-
-    @staticmethod
-    def _check_pci_vendor_config(pci_vendor_list):
-        for pci_vendor_info in pci_vendor_list:
-            try:
-                vendor_id, product_id = [
-                    item.strip() for item in pci_vendor_info.split(':')
-                    if item.strip()]
-            except ValueError:
-                raise ValueError(_('Incorrect pci_vendor_info: "%s", should be'
-                                   ' pair vendor_id:product_id') %
-                                 pci_vendor_info)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/__init__.py b/neutron/plugins/ml2/drivers/openvswitch/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/__init__.py b/neutron/plugins/ml2/drivers/openvswitch/agent/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/common/__init__.py b/neutron/plugins/ml2/drivers/openvswitch/agent/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py b/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py
deleted file mode 100644 (file)
index 059510b..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-# Copyright 2012 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.agent.common import config
-from neutron.common import constants as n_const
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers.openvswitch.agent.common \
-    import constants
-
-
-DEFAULT_BRIDGE_MAPPINGS = []
-DEFAULT_VLAN_RANGES = []
-DEFAULT_TUNNEL_RANGES = []
-DEFAULT_TUNNEL_TYPES = []
-
-ovs_opts = [
-    cfg.StrOpt('integration_bridge', default='br-int',
-               help=_("Integration bridge to use. "
-                      "Do not change this parameter unless you have a good "
-                      "reason to. This is the name of the OVS integration "
-                      "bridge. There is one per hypervisor. The integration "
-                      "bridge acts as a virtual 'patch bay'. All VM VIFs are "
-                      "attached to this bridge and then 'patched' according "
-                      "to their network connectivity.")),
-    cfg.StrOpt('tunnel_bridge', default='br-tun',
-               help=_("Tunnel bridge to use.")),
-    cfg.StrOpt('int_peer_patch_port', default='patch-tun',
-               help=_("Peer patch port in integration bridge for tunnel "
-                      "bridge.")),
-    cfg.StrOpt('tun_peer_patch_port', default='patch-int',
-               help=_("Peer patch port in tunnel bridge for integration "
-                      "bridge.")),
-    cfg.IPOpt('local_ip', version=4,
-              help=_("Local IP address of tunnel endpoint.")),
-    cfg.ListOpt('bridge_mappings',
-                default=DEFAULT_BRIDGE_MAPPINGS,
-                help=_("Comma-separated list of <physical_network>:<bridge> "
-                       "tuples mapping physical network names to the agent's "
-                       "node-specific Open vSwitch bridge names to be used "
-                       "for flat and VLAN networks. The length of bridge "
-                       "names should be no more than 11. Each bridge must "
-                       "exist, and should have a physical network interface "
-                       "configured as a port. All physical networks "
-                       "configured on the server should have mappings to "
-                       "appropriate bridges on each agent. "
-                       "Note: If you remove a bridge from this "
-                       "mapping, make sure to disconnect it from the "
-                       "integration bridge as it won't be managed by the "
-                       "agent anymore. Deprecated for ofagent.")),
-    cfg.BoolOpt('use_veth_interconnection', default=False,
-                help=_("Use veths instead of patch ports to interconnect the "
-                       "integration bridge to physical networks. "
-                       "Support kernel without Open vSwitch patch port "
-                       "support so long as it is set to True.")),
-    cfg.StrOpt('of_interface', default='ovs-ofctl',
-               choices=['ovs-ofctl', 'native'],
-               help=_("OpenFlow interface to use.")),
-    cfg.StrOpt('datapath_type', default=constants.OVS_DATAPATH_SYSTEM,
-               choices=[constants.OVS_DATAPATH_SYSTEM,
-                        constants.OVS_DATAPATH_NETDEV],
-               help=_("OVS datapath to use. 'system' is the default value and "
-                      "corresponds to the kernel datapath. To enable the "
-                      "userspace datapath set this value to 'netdev'.")),
-    cfg.StrOpt('vhostuser_socket_dir', default=constants.VHOST_USER_SOCKET_DIR,
-               help=_("OVS vhost-user socket directory.")),
-    cfg.IPOpt('of_listen_address', default='127.0.0.1',
-              help=_("Address to listen on for OpenFlow connections. "
-                     "Used only for 'native' driver.")),
-    cfg.PortOpt('of_listen_port', default=6633,
-                help=_("Port to listen on for OpenFlow connections. "
-                       "Used only for 'native' driver.")),
-    cfg.IntOpt('of_connect_timeout', default=30,
-               help=_("Timeout in seconds to wait for "
-                      "the local switch connecting the controller. "
-                      "Used only for 'native' driver.")),
-    cfg.IntOpt('of_request_timeout', default=10,
-               help=_("Timeout in seconds to wait for a single "
-                      "OpenFlow request. "
-                      "Used only for 'native' driver.")),
-]
-
-agent_opts = [
-    cfg.IntOpt('polling_interval', default=2,
-               help=_("The number of seconds the agent will wait between "
-                      "polling for local device changes.")),
-    cfg.BoolOpt('minimize_polling',
-                default=True,
-                help=_("Minimize polling by monitoring ovsdb for interface "
-                       "changes.")),
-    cfg.IntOpt('ovsdb_monitor_respawn_interval',
-               default=constants.DEFAULT_OVSDBMON_RESPAWN,
-               help=_("The number of seconds to wait before respawning the "
-                      "ovsdb monitor after losing communication with it.")),
-    cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES,
-                help=_("Network types supported by the agent "
-                       "(gre and/or vxlan).")),
-    cfg.PortOpt('vxlan_udp_port', default=p_const.VXLAN_UDP_PORT,
-                help=_("The UDP port to use for VXLAN tunnels.")),
-    cfg.IntOpt('veth_mtu',
-               help=_("MTU size of veth interfaces")),
-    cfg.BoolOpt('l2_population', default=False,
-                help=_("Use ML2 l2population mechanism driver to learn "
-                       "remote MAC and IPs and improve tunnel scalability.")),
-    cfg.BoolOpt('arp_responder', default=False,
-                help=_("Enable local ARP responder if it is supported. "
-                       "Requires OVS 2.1 and ML2 l2population driver. "
-                       "Allows the switch (when supporting an overlay) "
-                       "to respond to an ARP request locally without "
-                       "performing a costly ARP broadcast into the overlay.")),
-    cfg.BoolOpt('prevent_arp_spoofing', default=True,
-                help=_("Enable suppression of ARP responses that don't match "
-                       "an IP address that belongs to the port from which "
-                       "they originate. Note: This prevents the VMs attached "
-                       "to this agent from spoofing, it doesn't protect them "
-                       "from other devices which have the capability to spoof "
-                       "(e.g. bare metal or VMs attached to agents without "
-                       "this flag set to True). Spoofing rules will not be "
-                       "added to any ports that have port security disabled. "
-                       "For LinuxBridge, this requires ebtables. For OVS, it "
-                       "requires a version that supports matching ARP "
-                       "headers.")),
-    cfg.BoolOpt('dont_fragment', default=True,
-                help=_("Set or un-set the don't fragment (DF) bit on "
-                       "outgoing IP packet carrying GRE/VXLAN tunnel.")),
-    cfg.BoolOpt('enable_distributed_routing', default=False,
-                help=_("Make the l2 agent run in DVR mode.")),
-    cfg.IntOpt('quitting_rpc_timeout', default=10,
-               help=_("Set new timeout in seconds for new rpc calls after "
-                      "agent receives SIGTERM. If value is set to 0, rpc "
-                      "timeout won't be changed")),
-    cfg.BoolOpt('drop_flows_on_start', default=False,
-                help=_("Reset flow table on start. Setting this to True will "
-                       "cause brief traffic interruption.")),
-    cfg.BoolOpt('tunnel_csum', default=False,
-                help=_("Set or un-set the tunnel header checksum  on "
-                       "outgoing IP packet carrying GRE/VXLAN tunnel.")),
-    cfg.StrOpt('agent_type', default=n_const.AGENT_TYPE_OVS,
-               deprecated_for_removal=True,
-               help=_("Selects the Agent Type reported"))
-]
-
-
-cfg.CONF.register_opts(ovs_opts, "OVS")
-cfg.CONF.register_opts(agent_opts, "AGENT")
-config.register_agent_state_opts_helper(cfg.CONF)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py b/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py
deleted file mode 100644 (file)
index bc6dafb..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from neutron.plugins.common import constants as p_const
-
-
-# Special vlan_id value in ovs_vlan_allocations table indicating flat network
-FLAT_VLAN_ID = -1
-
-# Topic for tunnel notifications between the plugin and agent
-TUNNEL = 'tunnel'
-
-# Name prefixes for veth device or patch port pair linking the integration
-# bridge with the physical bridge for a physical network
-PEER_INTEGRATION_PREFIX = 'int-'
-PEER_PHYSICAL_PREFIX = 'phy-'
-
-# Nonexistent peer used to create patch ports without associating them, it
-# allows to define flows before association
-NONEXISTENT_PEER = 'nonexistent-peer'
-
-# The different types of tunnels
-TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN,
-                        p_const.TYPE_GENEVE]
-
-### OpenFlow table IDs
-
-## Integration bridge (int_br)
-
-LOCAL_SWITCHING = 0
-
-# Various tables for DVR use of integration bridge flows
-DVR_TO_SRC_MAC = 1
-DVR_TO_SRC_MAC_VLAN = 2
-
-CANARY_TABLE = 23
-
-# Table for ARP poison/spoofing prevention rules
-ARP_SPOOF_TABLE = 24
-
-## Tunnel bridge (tun_br)
-
-# Various tables for tunneling flows
-DVR_PROCESS = 1
-PATCH_LV_TO_TUN = 2
-GRE_TUN_TO_LV = 3
-VXLAN_TUN_TO_LV = 4
-GENEVE_TUN_TO_LV = 6
-
-DVR_NOT_LEARN = 9
-LEARN_FROM_TUN = 10
-UCAST_TO_TUN = 20
-ARP_RESPONDER = 21
-FLOOD_TO_TUN = 22
-
-## Physical Bridges (phys_brs)
-
-# Various tables for DVR use of physical bridge flows
-DVR_PROCESS_VLAN = 1
-LOCAL_VLAN_TRANSLATION = 2
-DVR_NOT_LEARN_VLAN = 3
-
-### end of OpenFlow table IDs
-
-# type for ARP reply in ARP header
-ARP_REPLY = '0x2'
-
-# Map tunnel types to tables number
-TUN_TABLE = {p_const.TYPE_GRE: GRE_TUN_TO_LV,
-             p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV,
-             p_const.TYPE_GENEVE: GENEVE_TUN_TO_LV}
-
-
-# The default respawn interval for the ovsdb monitor
-DEFAULT_OVSDBMON_RESPAWN = 30
-
-# Represent invalid OF Port
-OFPORT_INVALID = -1
-
-ARP_RESPONDER_ACTIONS = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],'
-                         'mod_dl_src:%(mac)s,'
-                         'load:0x2->NXM_OF_ARP_OP[],'
-                         'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],'
-                         'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],'
-                         'load:%(mac)#x->NXM_NX_ARP_SHA[],'
-                         'load:%(ip)#x->NXM_OF_ARP_SPA[],'
-                         'in_port')
-
-# Represent ovs status
-OVS_RESTARTED = 0
-OVS_NORMAL = 1
-OVS_DEAD = 2
-
-EXTENSION_DRIVER_TYPE = 'ovs'
-
-# ovs datapath types
-OVS_DATAPATH_SYSTEM = 'system'
-OVS_DATAPATH_NETDEV = 'netdev'
-OVS_DPDK_VHOST_USER = 'dpdkvhostuser'
-
-# default ovs vhost-user socket location
-VHOST_USER_SOCKET_DIR = '/var/run/openvswitch'
-
-MAX_DEVICE_RETRIES = 5
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py b/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py b/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py
deleted file mode 100644 (file)
index 5977083..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright (c) 2015 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron.agent.common import ovs_lib
-from neutron.agent.l2.extensions import qos
-from neutron.plugins.ml2.drivers.openvswitch.mech_driver import (
-    mech_openvswitch)
-
-
-class QosOVSAgentDriver(qos.QosAgentDriver):
-
-    SUPPORTED_RULES = (
-        mech_openvswitch.OpenvswitchMechanismDriver.supported_qos_rule_types)
-
-    def __init__(self):
-        super(QosOVSAgentDriver, self).__init__()
-        self.br_int_name = cfg.CONF.OVS.integration_bridge
-        self.br_int = None
-
-    def initialize(self):
-        self.br_int = ovs_lib.OVSBridge(self.br_int_name)
-
-    def create_bandwidth_limit(self, port, rule):
-        self.update_bandwidth_limit(port, rule)
-
-    def update_bandwidth_limit(self, port, rule):
-        port_name = port['vif_port'].port_name
-        max_kbps = rule.max_kbps
-        max_burst_kbps = rule.max_burst_kbps
-
-        self.br_int.create_egress_bw_limit_for_port(port_name,
-                                                    max_kbps,
-                                                    max_burst_kbps)
-
-    def delete_bandwidth_limit(self, port):
-        port_name = port['vif_port'].port_name
-        self.br_int.delete_egress_bw_limit_for_port(port_name)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/main.py b/neutron/plugins/ml2/drivers/openvswitch/agent/main.py
deleted file mode 100644 (file)
index 2fd9652..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import importutils
-
-from neutron.common import config as common_config
-from neutron.common import utils as n_utils
-
-
-LOG = logging.getLogger(__name__)
-cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
-                      'common.config')
-
-
-_main_modules = {
-    'ovs-ofctl': 'neutron.plugins.ml2.drivers.openvswitch.agent.openflow.'
-                 'ovs_ofctl.main',
-    'native': 'neutron.plugins.ml2.drivers.openvswitch.agent.openflow.'
-                 'native.main',
-}
-
-
-def main():
-    common_config.init(sys.argv[1:])
-    driver_name = cfg.CONF.OVS.of_interface
-    mod_name = _main_modules[driver_name]
-    mod = importutils.import_module(mod_name)
-    mod.init_config()
-    common_config.setup_logging()
-    n_utils.log_opt_values(LOG)
-    mod.main()
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py
deleted file mode 100644 (file)
index a5551c1..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from ryu.lib.packet import ether_types
-from ryu.lib.packet import icmpv6
-from ryu.lib.packet import in_proto
-
-
-class OVSDVRProcessMixin(object):
-    """Common logic for br-tun and br-phys' DVR_PROCESS tables.
-
-    Inheriters should provide self.dvr_process_table_id and
-    self.dvr_process_next_table_id.
-    """
-
-    @staticmethod
-    def _dvr_process_ipv4_match(ofp, ofpp, vlan_tag, gateway_ip):
-        return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT,
-                             eth_type=ether_types.ETH_TYPE_ARP,
-                             arp_tpa=gateway_ip)
-
-    def install_dvr_process_ipv4(self, vlan_tag, gateway_ip):
-        # block ARP
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._dvr_process_ipv4_match(ofp, ofpp,
-            vlan_tag=vlan_tag, gateway_ip=gateway_ip)
-        self.install_drop(table_id=self.dvr_process_table_id,
-                          priority=3,
-                          match=match)
-
-    def delete_dvr_process_ipv4(self, vlan_tag, gateway_ip):
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._dvr_process_ipv4_match(ofp, ofpp,
-            vlan_tag=vlan_tag, gateway_ip=gateway_ip)
-        self.delete_flows(table_id=self.dvr_process_table_id, match=match)
-
-    @staticmethod
-    def _dvr_process_ipv6_match(ofp, ofpp, vlan_tag, gateway_mac):
-        return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT,
-                             eth_type=ether_types.ETH_TYPE_IPV6,
-                             ip_proto=in_proto.IPPROTO_ICMPV6,
-                             icmpv6_type=icmpv6.ND_ROUTER_ADVERT,
-                             eth_src=gateway_mac)
-
-    def install_dvr_process_ipv6(self, vlan_tag, gateway_mac):
-        # block RA
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._dvr_process_ipv6_match(ofp, ofpp,
-            vlan_tag=vlan_tag, gateway_mac=gateway_mac)
-        self.install_drop(table_id=self.dvr_process_table_id, priority=3,
-                          match=match)
-
-    def delete_dvr_process_ipv6(self, vlan_tag, gateway_mac):
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._dvr_process_ipv6_match(ofp, ofpp,
-            vlan_tag=vlan_tag, gateway_mac=gateway_mac)
-        self.delete_flows(table_id=self.dvr_process_table_id, match=match)
-
-    @staticmethod
-    def _dvr_process_in_match(ofp, ofpp, vlan_tag, vif_mac):
-        return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT,
-                             eth_dst=vif_mac)
-
-    @staticmethod
-    def _dvr_process_out_match(ofp, ofpp, vlan_tag, vif_mac):
-        return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT,
-                             eth_src=vif_mac)
-
-    def install_dvr_process(self, vlan_tag, vif_mac, dvr_mac_address):
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._dvr_process_in_match(ofp, ofpp,
-                                           vlan_tag=vlan_tag, vif_mac=vif_mac)
-        table_id = self.dvr_process_table_id
-        self.install_drop(table_id=table_id,
-                          priority=2,
-                          match=match)
-        match = self._dvr_process_out_match(ofp, ofpp,
-                                            vlan_tag=vlan_tag, vif_mac=vif_mac)
-        actions = [
-            ofpp.OFPActionSetField(eth_src=dvr_mac_address),
-        ]
-        instructions = [
-            ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
-            ofpp.OFPInstructionGotoTable(
-                table_id=self.dvr_process_next_table_id),
-        ]
-        self.install_instructions(table_id=table_id,
-                                  priority=1,
-                                  match=match,
-                                  instructions=instructions)
-
-    def delete_dvr_process(self, vlan_tag, vif_mac):
-        (_dp, ofp, ofpp) = self._get_dp()
-        table_id = self.dvr_process_table_id
-        match = self._dvr_process_in_match(ofp, ofpp,
-                                           vlan_tag=vlan_tag, vif_mac=vif_mac)
-        self.delete_flows(table_id=table_id, match=match)
-        match = self._dvr_process_out_match(ofp, ofpp,
-                                            vlan_tag=vlan_tag, vif_mac=vif_mac)
-        self.delete_flows(table_id=table_id, match=match)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py
deleted file mode 100644 (file)
index e7bfb0d..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-* references
-** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic
-"""
-
-from oslo_log import log as logging
-from ryu.lib.packet import ether_types
-from ryu.lib.packet import icmpv6
-from ryu.lib.packet import in_proto
-
-from neutron._i18n import _LE
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
-    import ovs_bridge
-
-
-LOG = logging.getLogger(__name__)
-
-
-class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge):
-    """openvswitch agent br-int specific logic."""
-
-    def setup_default_table(self):
-        self.install_normal()
-        self.setup_canary_table()
-        self.install_drop(table_id=constants.ARP_SPOOF_TABLE)
-
-    def setup_canary_table(self):
-        self.install_drop(constants.CANARY_TABLE)
-
-    def check_canary_table(self):
-        try:
-            flows = self.dump_flows(constants.CANARY_TABLE)
-        except RuntimeError:
-            LOG.exception(_LE("Failed to communicate with the switch"))
-            return constants.OVS_DEAD
-        return constants.OVS_NORMAL if flows else constants.OVS_RESTARTED
-
-    @staticmethod
-    def _local_vlan_match(_ofp, ofpp, port, vlan_vid):
-        return ofpp.OFPMatch(in_port=port, vlan_vid=vlan_vid)
-
-    def provision_local_vlan(self, port, lvid, segmentation_id):
-        (_dp, ofp, ofpp) = self._get_dp()
-        if segmentation_id is None:
-            vlan_vid = ofp.OFPVID_NONE
-            actions = [ofpp.OFPActionPushVlan()]
-        else:
-            vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
-            actions = []
-        match = self._local_vlan_match(ofp, ofpp, port, vlan_vid)
-        actions += [
-            ofpp.OFPActionSetField(vlan_vid=lvid | ofp.OFPVID_PRESENT),
-            ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
-        ]
-        self.install_apply_actions(priority=3,
-                                   match=match,
-                                   actions=actions)
-
-    def reclaim_local_vlan(self, port, segmentation_id):
-        (_dp, ofp, ofpp) = self._get_dp()
-        if segmentation_id is None:
-            vlan_vid = ofp.OFPVID_NONE
-        else:
-            vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
-        match = self._local_vlan_match(ofp, ofpp, port, vlan_vid)
-        self.delete_flows(match=match)
-
-    @staticmethod
-    def _dvr_to_src_mac_match(ofp, ofpp, vlan_tag, dst_mac):
-        return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT,
-                             eth_dst=dst_mac)
-
-    @staticmethod
-    def _dvr_to_src_mac_table_id(network_type):
-        if network_type == p_const.TYPE_VLAN:
-            return constants.DVR_TO_SRC_MAC_VLAN
-        else:
-            return constants.DVR_TO_SRC_MAC
-
-    def install_dvr_to_src_mac(self, network_type,
-                               vlan_tag, gateway_mac, dst_mac, dst_port):
-        table_id = self._dvr_to_src_mac_table_id(network_type)
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._dvr_to_src_mac_match(ofp, ofpp,
-                                           vlan_tag=vlan_tag, dst_mac=dst_mac)
-        actions = [
-            ofpp.OFPActionPopVlan(),
-            ofpp.OFPActionSetField(eth_src=gateway_mac),
-            ofpp.OFPActionOutput(dst_port, 0),
-        ]
-        self.install_apply_actions(table_id=table_id,
-                                   priority=4,
-                                   match=match,
-                                   actions=actions)
-
-    def delete_dvr_to_src_mac(self, network_type, vlan_tag, dst_mac):
-        table_id = self._dvr_to_src_mac_table_id(network_type)
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._dvr_to_src_mac_match(ofp, ofpp,
-                                           vlan_tag=vlan_tag, dst_mac=dst_mac)
-        self.delete_flows(table_id=table_id, match=match)
-
-    def add_dvr_mac_vlan(self, mac, port):
-        self.install_goto(table_id=constants.LOCAL_SWITCHING,
-                          priority=4,
-                          in_port=port,
-                          eth_src=mac,
-                          dest_table_id=constants.DVR_TO_SRC_MAC_VLAN)
-
-    def remove_dvr_mac_vlan(self, mac):
-        # REVISIT(yamamoto): match in_port as well?
-        self.delete_flows(table_id=constants.LOCAL_SWITCHING,
-                          eth_src=mac)
-
-    def add_dvr_mac_tun(self, mac, port):
-        self.install_goto(table_id=constants.LOCAL_SWITCHING,
-                          priority=2,
-                          in_port=port,
-                          eth_src=mac,
-                          dest_table_id=constants.DVR_TO_SRC_MAC)
-
-    def remove_dvr_mac_tun(self, mac, port):
-        self.delete_flows(table_id=constants.LOCAL_SWITCHING,
-                          in_port=port, eth_src=mac)
-
-    @staticmethod
-    def _arp_reply_match(ofp, ofpp, port):
-        return ofpp.OFPMatch(in_port=port,
-                             eth_type=ether_types.ETH_TYPE_ARP)
-
-    @staticmethod
-    def _icmpv6_reply_match(ofp, ofpp, port):
-        return ofpp.OFPMatch(in_port=port,
-                             eth_type=ether_types.ETH_TYPE_IPV6,
-                             ip_proto=in_proto.IPPROTO_ICMPV6,
-                             icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT)
-
-    def install_icmpv6_na_spoofing_protection(self, port, ip_addresses):
-        # Allow neighbor advertisements as long as they match addresses
-        # that actually belong to the port.
-        for ip in ip_addresses:
-            masked_ip = self._cidr_to_ryu(ip)
-            self.install_normal(
-                table_id=constants.ARP_SPOOF_TABLE, priority=2,
-                eth_type=ether_types.ETH_TYPE_IPV6,
-                ip_proto=in_proto.IPPROTO_ICMPV6,
-                icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT,
-                ipv6_nd_target=masked_ip, in_port=port)
-
-        # Now that the rules are ready, direct icmpv6 neighbor advertisement
-        # traffic from the port into the anti-spoof table.
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._icmpv6_reply_match(ofp, ofpp, port=port)
-        self.install_goto(table_id=constants.LOCAL_SWITCHING,
-                          priority=10,
-                          match=match,
-                          dest_table_id=constants.ARP_SPOOF_TABLE)
-
-    def install_arp_spoofing_protection(self, port, ip_addresses):
-        # allow ARP replies as long as they match addresses that actually
-        # belong to the port.
-        for ip in ip_addresses:
-            masked_ip = self._cidr_to_ryu(ip)
-            self.install_normal(table_id=constants.ARP_SPOOF_TABLE,
-                                priority=2,
-                                eth_type=ether_types.ETH_TYPE_ARP,
-                                arp_spa=masked_ip,
-                                in_port=port)
-
-        # Now that the rules are ready, direct ARP traffic from the port into
-        # the anti-spoof table.
-        # This strategy fails gracefully because OVS versions that can't match
-        # on ARP headers will just process traffic normally.
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._arp_reply_match(ofp, ofpp, port=port)
-        self.install_goto(table_id=constants.LOCAL_SWITCHING,
-                          priority=10,
-                          match=match,
-                          dest_table_id=constants.ARP_SPOOF_TABLE)
-
-    def delete_arp_spoofing_protection(self, port):
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._arp_reply_match(ofp, ofpp, port=port)
-        self.delete_flows(table_id=constants.LOCAL_SWITCHING,
-                          match=match)
-        match = self._icmpv6_reply_match(ofp, ofpp, port=port)
-        self.delete_flows(table_id=constants.LOCAL_SWITCHING,
-                          match=match)
-        self.delete_arp_spoofing_allow_rules(port)
-
-    def delete_arp_spoofing_allow_rules(self, port):
-        self.delete_flows(table_id=constants.ARP_SPOOF_TABLE,
-                          in_port=port)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_phys.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_phys.py
deleted file mode 100644 (file)
index a3aad0f..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
-    import br_dvr_process
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
-    import ovs_bridge
-
-
-class OVSPhysicalBridge(ovs_bridge.OVSAgentBridge,
-                        br_dvr_process.OVSDVRProcessMixin):
-    """openvswitch agent physical bridge specific logic."""
-
-    # Used by OVSDVRProcessMixin
-    dvr_process_table_id = constants.DVR_PROCESS_VLAN
-    dvr_process_next_table_id = constants.LOCAL_VLAN_TRANSLATION
-
-    def setup_default_table(self):
-        self.delete_flows()
-        self.install_normal()
-
-    @staticmethod
-    def _local_vlan_match(ofp, ofpp, port, lvid):
-        return ofpp.OFPMatch(in_port=port, vlan_vid=lvid | ofp.OFPVID_PRESENT)
-
-    def provision_local_vlan(self, port, lvid, segmentation_id, distributed):
-        table_id = constants.LOCAL_VLAN_TRANSLATION if distributed else 0
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._local_vlan_match(ofp, ofpp, port, lvid)
-        if segmentation_id is None:
-            actions = [ofpp.OFPActionPopVlan()]
-        else:
-            vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
-            actions = [ofpp.OFPActionSetField(vlan_vid=vlan_vid)]
-        actions += [ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)]
-        self.install_apply_actions(table_id=table_id,
-                                   priority=4,
-                                   match=match,
-                                   actions=actions)
-
-    def reclaim_local_vlan(self, port, lvid):
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._local_vlan_match(ofp, ofpp, port, lvid)
-        self.delete_flows(match=match)
-
-    def add_dvr_mac_vlan(self, mac, port):
-        self.install_output(table_id=constants.DVR_NOT_LEARN_VLAN,
-            priority=2, eth_src=mac, port=port)
-
-    def remove_dvr_mac_vlan(self, mac):
-        # REVISIT(yamamoto): match in_port as well?
-        self.delete_flows(table_id=constants.DVR_NOT_LEARN_VLAN,
-            eth_src=mac)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_tun.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_tun.py
deleted file mode 100644 (file)
index 6682eb1..0000000
+++ /dev/null
@@ -1,288 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# Copyright 2011 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from ryu.lib.packet import arp
-from ryu.lib.packet import ether_types
-
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
-    import br_dvr_process
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
-    import ovs_bridge
-
-
-class OVSTunnelBridge(ovs_bridge.OVSAgentBridge,
-                      br_dvr_process.OVSDVRProcessMixin):
-    """openvswitch agent tunnel bridge specific logic."""
-
-    # Used by OVSDVRProcessMixin
-    dvr_process_table_id = constants.DVR_PROCESS
-    dvr_process_next_table_id = constants.PATCH_LV_TO_TUN
-
-    def setup_default_table(self, patch_int_ofport, arp_responder_enabled):
-        (dp, ofp, ofpp) = self._get_dp()
-
-        # Table 0 (default) will sort incoming traffic depending on in_port
-        self.install_goto(dest_table_id=constants.PATCH_LV_TO_TUN,
-                          priority=1,
-                          in_port=patch_int_ofport)
-        self.install_drop()  # default drop
-
-        if arp_responder_enabled:
-            # ARP broadcast-ed request go to the local ARP_RESPONDER table to
-            # be locally resolved
-            # REVISIT(yamamoto): add arp_op=arp.ARP_REQUEST matcher?
-            self.install_goto(dest_table_id=constants.ARP_RESPONDER,
-                              table_id=constants.PATCH_LV_TO_TUN,
-                              priority=1,
-                              eth_dst="ff:ff:ff:ff:ff:ff",
-                              eth_type=ether_types.ETH_TYPE_ARP)
-
-        # PATCH_LV_TO_TUN table will handle packets coming from patch_int
-        # unicasts go to table UCAST_TO_TUN where remote addresses are learnt
-        self.install_goto(dest_table_id=constants.UCAST_TO_TUN,
-                          table_id=constants.PATCH_LV_TO_TUN,
-                          eth_dst=('00:00:00:00:00:00',
-                                   '01:00:00:00:00:00'))
-
-        # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding
-        self.install_goto(dest_table_id=constants.FLOOD_TO_TUN,
-                          table_id=constants.PATCH_LV_TO_TUN,
-                          eth_dst=('01:00:00:00:00:00',
-                                   '01:00:00:00:00:00'))
-
-        # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id
-        # for each tunnel type, and resubmit to table LEARN_FROM_TUN where
-        # remote mac addresses will be learnt
-        for tunnel_type in constants.TUNNEL_NETWORK_TYPES:
-            self.install_drop(table_id=constants.TUN_TABLE[tunnel_type])
-
-        # LEARN_FROM_TUN table will have a single flow using a learn action to
-        # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac
-        # addresses (assumes that lvid has already been set by a previous flow)
-        # Once remote mac addresses are learnt, output packet to patch_int
-        flow_specs = [
-            ofpp.NXFlowSpecMatch(src=('vlan_vid', 0),
-                                 dst=('vlan_vid', 0),
-                                 n_bits=12),
-            ofpp.NXFlowSpecMatch(src=('eth_src', 0),
-                                 dst=('eth_dst', 0),
-                                 n_bits=48),
-            ofpp.NXFlowSpecLoad(src=0,
-                                dst=('vlan_vid', 0),
-                                n_bits=12),
-            ofpp.NXFlowSpecLoad(src=('tunnel_id', 0),
-                                dst=('tunnel_id', 0),
-                                n_bits=64),
-            ofpp.NXFlowSpecOutput(src=('in_port', 0),
-                                  dst='',
-                                  n_bits=32),
-        ]
-        actions = [
-            ofpp.NXActionLearn(table_id=constants.UCAST_TO_TUN,
-                               cookie=self.agent_uuid_stamp,
-                               priority=1,
-                               hard_timeout=300,
-                               specs=flow_specs),
-            ofpp.OFPActionOutput(patch_int_ofport, 0),
-        ]
-        self.install_apply_actions(table_id=constants.LEARN_FROM_TUN,
-                                   priority=1,
-                                   actions=actions)
-
-        # Egress unicast will be handled in table UCAST_TO_TUN, where remote
-        # mac addresses will be learned. For now, just add a default flow that
-        # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them
-        # as broadcasts/multicasts
-        self.install_goto(dest_table_id=constants.FLOOD_TO_TUN,
-                          table_id=constants.UCAST_TO_TUN)
-
-        if arp_responder_enabled:
-            # If none of the ARP entries correspond to the requested IP, the
-            # broadcast-ed packet is resubmitted to the flooding table
-            self.install_goto(dest_table_id=constants.FLOOD_TO_TUN,
-                              table_id=constants.ARP_RESPONDER)
-
-        # FLOOD_TO_TUN will handle flooding in tunnels based on lvid,
-        # for now, add a default drop action
-        self.install_drop(table_id=constants.FLOOD_TO_TUN)
-
-    @staticmethod
-    def _local_vlan_match(_ofp, ofpp, tun_id):
-        return ofpp.OFPMatch(tunnel_id=tun_id)
-
-    def provision_local_vlan(self, network_type, lvid, segmentation_id,
-                             distributed=False):
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._local_vlan_match(ofp, ofpp, segmentation_id)
-        table_id = constants.TUN_TABLE[network_type]
-        if distributed:
-            dest_table_id = constants.DVR_NOT_LEARN
-        else:
-            dest_table_id = constants.LEARN_FROM_TUN
-        actions = [
-            ofpp.OFPActionPushVlan(),
-            ofpp.OFPActionSetField(vlan_vid=lvid | ofp.OFPVID_PRESENT),
-        ]
-        instructions = [
-            ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
-            ofpp.OFPInstructionGotoTable(table_id=dest_table_id)]
-        self.install_instructions(table_id=table_id,
-                                  priority=1,
-                                  match=match,
-                                  instructions=instructions)
-
-    def reclaim_local_vlan(self, network_type, segmentation_id):
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._local_vlan_match(ofp, ofpp, segmentation_id)
-        table_id = constants.TUN_TABLE[network_type]
-        self.delete_flows(table_id=table_id, match=match)
-
-    @staticmethod
-    def _flood_to_tun_match(ofp, ofpp, vlan):
-        return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT)
-
-    def install_flood_to_tun(self, vlan, tun_id, ports):
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._flood_to_tun_match(ofp, ofpp, vlan)
-        actions = [ofpp.OFPActionPopVlan(),
-                   ofpp.OFPActionSetField(tunnel_id=tun_id)]
-        for port in ports:
-            actions.append(ofpp.OFPActionOutput(port, 0))
-        self.install_apply_actions(table_id=constants.FLOOD_TO_TUN,
-                                   priority=1,
-                                   match=match,
-                                   actions=actions)
-
-    def delete_flood_to_tun(self, vlan):
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._flood_to_tun_match(ofp, ofpp, vlan)
-        self.delete_flows(table_id=constants.FLOOD_TO_TUN, match=match)
-
-    @staticmethod
-    def _unicast_to_tun_match(ofp, ofpp, vlan, mac):
-        return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT, eth_dst=mac)
-
-    def install_unicast_to_tun(self, vlan, tun_id, port, mac):
-        (_dp, ofp, ofpp) = self._get_dp()
-        match = self._unicast_to_tun_match(ofp, ofpp, vlan, mac)
-        actions = [ofpp.OFPActionPopVlan(),
-                   ofpp.OFPActionSetField(tunnel_id=tun_id),
-                   ofpp.OFPActionOutput(port, 0)]
-        self.install_apply_actions(table_id=constants.UCAST_TO_TUN,
-                                   priority=2,
-                                   match=match,
-                                   actions=actions)
-
-    def delete_unicast_to_tun(self, vlan, mac):
-        (_dp, ofp, ofpp) = self._get_dp()
-        if mac is None:
-            match = ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT)
-        else:
-            match = self._unicast_to_tun_match(ofp, ofpp, vlan, mac)
-        self.delete_flows(table_id=constants.UCAST_TO_TUN, match=match)
-
-    @staticmethod
-    def _arp_responder_match(ofp, ofpp, vlan, ip):
-        # REVISIT(yamamoto): add arp_op=arp.ARP_REQUEST matcher?
-        return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT,
-                             eth_type=ether_types.ETH_TYPE_ARP,
-                             arp_tpa=ip)
-
-    def install_arp_responder(self, vlan, ip, mac):
-        (dp, ofp, ofpp) = self._get_dp()
-        match = self._arp_responder_match(ofp, ofpp, vlan, ip)
-        actions = [ofpp.OFPActionSetField(arp_op=arp.ARP_REPLY),
-                   ofpp.NXActionRegMove(src_field='arp_sha',
-                                        dst_field='arp_tha',
-                                        n_bits=48),
-                   ofpp.NXActionRegMove(src_field='arp_spa',
-                                        dst_field='arp_tpa',
-                                        n_bits=32),
-                   ofpp.OFPActionSetField(arp_sha=mac),
-                   ofpp.OFPActionSetField(arp_spa=ip),
-                   ofpp.OFPActionOutput(ofp.OFPP_IN_PORT, 0)]
-        self.install_apply_actions(table_id=constants.ARP_RESPONDER,
-                                   priority=1,
-                                   match=match,
-                                   actions=actions)
-
-    def delete_arp_responder(self, vlan, ip):
-        (_dp, ofp, ofpp) = self._get_dp()
-        if ip is None:
-            # REVISIT(yamamoto): add arp_op=arp.ARP_REQUEST matcher?
-            match = ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT,
-                                  eth_type=ether_types.ETH_TYPE_ARP)
-        else:
-            match = self._arp_responder_match(ofp, ofpp, vlan, ip)
-        self.delete_flows(table_id=constants.ARP_RESPONDER, match=match)
-
-    def setup_tunnel_port(self, network_type, port):
-        self.install_goto(dest_table_id=constants.TUN_TABLE[network_type],
-                          priority=1,
-                          in_port=port)
-
-    def cleanup_tunnel_port(self, port):
-        self.delete_flows(in_port=port)
-
-    def add_dvr_mac_tun(self, mac, port):
-        self.install_output(table_id=constants.DVR_NOT_LEARN,
-                            priority=1,
-                            eth_src=mac,
-                            port=port)
-
-    def remove_dvr_mac_tun(self, mac):
-        # REVISIT(yamamoto): match in_port as well?
-        self.delete_flows(table_id=constants.DVR_NOT_LEARN,
-                          eth_src=mac)
-
-    def deferred(self):
-        # REVISIT(yamamoto): This is for API compat with "ovs-ofctl"
-        # interface.  Consider removing this mechanism when obsoleting
-        # "ovs-ofctl" interface.
-        # For "ovs-ofctl" interface, "deferred" mechanism would improve
-        # performance by batching flow-mods with a single ovs-ofctl command
-        # invocation.
-        # On the other hand, for this "native" interface, the overheads of
-        # each flow-mods are already minimum and batching doesn't make much
-        # sense.  Thus this method is left as no-op.
-        # It might be possible to send multiple flow-mods with a single
-        # barrier.  But it's unclear that level of performance optimization
-        # is desirable while it would certainly complicate error handling.
-        return self
-
-    def __enter__(self):
-        # REVISIT(yamamoto): See the comment on deferred().
-        return self
-
-    def __exit__(self, exc_type, exc_value, traceback):
-        # REVISIT(yamamoto): See the comment on deferred().
-        pass
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/main.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/main.py
deleted file mode 100644 (file)
index 6f3bd7b..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright (C) 2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from ryu.base import app_manager
-from ryu import cfg as ryu_cfg
-
-
-cfg.CONF.import_group(
-    'OVS',
-    'neutron.plugins.ml2.drivers.openvswitch.agent.common.config')
-
-
-def init_config():
-    ryu_cfg.CONF(project='ryu', args=[])
-    ryu_cfg.CONF.ofp_listen_host = cfg.CONF.OVS.of_listen_address
-    ryu_cfg.CONF.ofp_tcp_listen_port = cfg.CONF.OVS.of_listen_port
-
-
-def main():
-    app_manager.AppManager.run_apps([
-        'neutron.plugins.ml2.drivers.openvswitch.agent.'
-        'openflow.native.ovs_ryuapp',
-    ])
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py
deleted file mode 100644 (file)
index 42572ee..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import eventlet
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import excutils
-from oslo_utils import timeutils
-import ryu.app.ofctl.api as ofctl_api
-import ryu.exception as ryu_exc
-
-from neutron._i18n import _LE, _LW
-
-LOG = logging.getLogger(__name__)
-
-
-class OpenFlowSwitchMixin(object):
-    """Mixin to provide common convenient routines for an openflow switch.
-
-    NOTE(yamamoto): super() points to ovs_lib.OVSBridge.
-    See ovs_bridge.py how this class is actually used.
-    """
-
-    @staticmethod
-    def _cidr_to_ryu(ip):
-        n = netaddr.IPNetwork(ip)
-        if n.hostmask:
-            return (str(n.ip), str(n.netmask))
-        return str(n.ip)
-
-    def __init__(self, *args, **kwargs):
-        self._app = kwargs.pop('ryu_app')
-        super(OpenFlowSwitchMixin, self).__init__(*args, **kwargs)
-
-    def _get_dp_by_dpid(self, dpid_int):
-        """Get Ryu datapath object for the switch."""
-        timeout_sec = cfg.CONF.OVS.of_connect_timeout
-        start_time = timeutils.now()
-        while True:
-            dp = ofctl_api.get_datapath(self._app, dpid_int)
-            if dp is not None:
-                break
-            # The switch has not established a connection to us.
-            # Wait for a little.
-            if timeutils.now() > start_time + timeout_sec:
-                m = _LE("Switch connection timeout")
-                LOG.error(m)
-                # NOTE(yamamoto): use RuntimeError for compat with ovs_lib
-                raise RuntimeError(m)
-            eventlet.sleep(1)
-        return dp
-
-    def _send_msg(self, msg, reply_cls=None, reply_multi=False):
-        timeout_sec = cfg.CONF.OVS.of_request_timeout
-        timeout = eventlet.timeout.Timeout(seconds=timeout_sec)
-        try:
-            result = ofctl_api.send_msg(self._app, msg, reply_cls, reply_multi)
-        except ryu_exc.RyuException as e:
-            m = _LE("ofctl request %(request)s error %(error)s") % {
-                "request": msg,
-                "error": e,
-            }
-            LOG.error(m)
-            # NOTE(yamamoto): use RuntimeError for compat with ovs_lib
-            raise RuntimeError(m)
-        except eventlet.timeout.Timeout as e:
-            with excutils.save_and_reraise_exception() as ctx:
-                if e is timeout:
-                    ctx.reraise = False
-                    m = _LE("ofctl request %(request)s timed out") % {
-                        "request": msg,
-                    }
-                    LOG.error(m)
-                    # NOTE(yamamoto): use RuntimeError for compat with ovs_lib
-                    raise RuntimeError(m)
-        finally:
-            timeout.cancel()
-        LOG.debug("ofctl request %(request)s result %(result)s",
-                  {"request": msg, "result": result})
-        return result
-
-    @staticmethod
-    def _match(_ofp, ofpp, match, **match_kwargs):
-        if match is not None:
-            return match
-        return ofpp.OFPMatch(**match_kwargs)
-
-    def delete_flows(self, table_id=None, strict=False, priority=0,
-                     cookie=0, cookie_mask=0,
-                     match=None, **match_kwargs):
-        (dp, ofp, ofpp) = self._get_dp()
-        if table_id is None:
-            table_id = ofp.OFPTT_ALL
-        match = self._match(ofp, ofpp, match, **match_kwargs)
-        if strict:
-            cmd = ofp.OFPFC_DELETE_STRICT
-        else:
-            cmd = ofp.OFPFC_DELETE
-        msg = ofpp.OFPFlowMod(dp,
-                              command=cmd,
-                              cookie=cookie,
-                              cookie_mask=cookie_mask,
-                              table_id=table_id,
-                              match=match,
-                              priority=priority,
-                              out_group=ofp.OFPG_ANY,
-                              out_port=ofp.OFPP_ANY)
-        self._send_msg(msg)
-
-    def dump_flows(self, table_id=None):
-        (dp, ofp, ofpp) = self._get_dp()
-        if table_id is None:
-            table_id = ofp.OFPTT_ALL
-        msg = ofpp.OFPFlowStatsRequest(dp, table_id=table_id)
-        replies = self._send_msg(msg,
-                                 reply_cls=ofpp.OFPFlowStatsReply,
-                                 reply_multi=True)
-        flows = []
-        for rep in replies:
-            flows += rep.body
-        return flows
-
-    def cleanup_flows(self):
-        cookies = set([f.cookie for f in self.dump_flows()])
-        for c in cookies:
-            if c == self.agent_uuid_stamp:
-                continue
-            LOG.warn(_LW("Deleting flow with cookie 0x%(cookie)x") % {
-                'cookie': c})
-            self.delete_flows(cookie=c, cookie_mask=((1 << 64) - 1))
-
-    def install_goto_next(self, table_id):
-        self.install_goto(table_id=table_id, dest_table_id=table_id + 1)
-
-    def install_output(self, port, table_id=0, priority=0,
-                       match=None, **match_kwargs):
-        (_dp, ofp, ofpp) = self._get_dp()
-        actions = [ofpp.OFPActionOutput(port, 0)]
-        instructions = [ofpp.OFPInstructionActions(
-                        ofp.OFPIT_APPLY_ACTIONS, actions)]
-        self.install_instructions(table_id=table_id, priority=priority,
-                                  instructions=instructions,
-                                  match=match, **match_kwargs)
-
-    def install_normal(self, table_id=0, priority=0,
-                       match=None, **match_kwargs):
-        (_dp, ofp, _ofpp) = self._get_dp()
-        self.install_output(port=ofp.OFPP_NORMAL,
-                            table_id=table_id, priority=priority,
-                            match=match, **match_kwargs)
-
-    def install_goto(self, dest_table_id, table_id=0, priority=0,
-                     match=None, **match_kwargs):
-        (_dp, _ofp, ofpp) = self._get_dp()
-        instructions = [ofpp.OFPInstructionGotoTable(table_id=dest_table_id)]
-        self.install_instructions(table_id=table_id, priority=priority,
-                                  instructions=instructions,
-                                  match=match, **match_kwargs)
-
-    def install_drop(self, table_id=0, priority=0, match=None, **match_kwargs):
-        self.install_instructions(table_id=table_id, priority=priority,
-                                  instructions=[], match=match, **match_kwargs)
-
-    def install_instructions(self, instructions,
-                             table_id=0, priority=0,
-                             match=None, **match_kwargs):
-        (dp, ofp, ofpp) = self._get_dp()
-        match = self._match(ofp, ofpp, match, **match_kwargs)
-        msg = ofpp.OFPFlowMod(dp,
-                              table_id=table_id,
-                              cookie=self.agent_uuid_stamp,
-                              match=match,
-                              priority=priority,
-                              instructions=instructions)
-        self._send_msg(msg)
-
-    def install_apply_actions(self, actions,
-                              table_id=0, priority=0,
-                              match=None, **match_kwargs):
-        (dp, ofp, ofpp) = self._get_dp()
-        instructions = [
-            ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
-        ]
-        self.install_instructions(table_id=table_id,
-                                  priority=priority,
-                                  match=match,
-                                  instructions=instructions,
-                                  **match_kwargs)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py
deleted file mode 100644 (file)
index 85173a9..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-from oslo_utils import excutils
-
-from neutron._i18n import _LI
-from neutron.agent.common import ovs_lib
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
-    import ofswitch
-
-
-LOG = logging.getLogger(__name__)
-
-
-class OVSAgentBridge(ofswitch.OpenFlowSwitchMixin, ovs_lib.OVSBridge):
-    """Common code for bridges used by OVS agent"""
-
-    _cached_dpid = None
-
-    def _get_dp(self):
-        """Get (dp, ofp, ofpp) tuple for the switch.
-
-        A convenient method for openflow message composers.
-        """
-        while True:
-            dpid_int = self._cached_dpid
-            if dpid_int is None:
-                dpid_str = self.get_datapath_id()
-                LOG.info(_LI("Bridge %(br_name)s has datapath-ID %(dpid)s"),
-                         {"br_name": self.br_name, "dpid": dpid_str})
-                dpid_int = int(dpid_str, 16)
-            try:
-                dp = self._get_dp_by_dpid(dpid_int)
-            except RuntimeError:
-                with excutils.save_and_reraise_exception() as ctx:
-                    self._cached_dpid = None
-                    # Retry if dpid has been changed.
-                    # NOTE(yamamoto): Open vSwitch change its dpid on
-                    # some events.
-                    # REVISIT(yamamoto): Consider to set dpid statically.
-                    new_dpid_str = self.get_datapath_id()
-                    if new_dpid_str != dpid_str:
-                        LOG.info(_LI("Bridge %(br_name)s changed its "
-                                     "datapath-ID from %(old)s to %(new)s"), {
-                            "br_name": self.br_name,
-                            "old": dpid_str,
-                            "new": new_dpid_str,
-                        })
-                        ctx.reraise = False
-            else:
-                self._cached_dpid = dpid_int
-                return dp, dp.ofproto, dp.ofproto_parser
-
-    def setup_controllers(self, conf):
-        controllers = [
-            "tcp:%(address)s:%(port)s" % {
-                "address": conf.OVS.of_listen_address,
-                "port": conf.OVS.of_listen_port,
-            }
-        ]
-        self.set_protocols("OpenFlow13")
-        self.set_controller(controllers)
-
-    def drop_port(self, in_port):
-        self.install_drop(priority=2, in_port=in_port)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py
deleted file mode 100644 (file)
index 0409717..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright (C) 2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import functools
-
-import ryu.app.ofctl.api  # noqa
-from ryu.base import app_manager
-from ryu.lib import hub
-from ryu.ofproto import ofproto_v1_3
-
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
-    import br_int
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
-    import br_phys
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
-    import br_tun
-from neutron.plugins.ml2.drivers.openvswitch.agent \
-    import ovs_neutron_agent as ovs_agent
-
-
-class OVSNeutronAgentRyuApp(app_manager.RyuApp):
-    OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
-
-    def start(self):
-        # Start Ryu event loop thread
-        super(OVSNeutronAgentRyuApp, self).start()
-
-        def _make_br_cls(br_cls):
-            return functools.partial(br_cls, ryu_app=self)
-
-        # Start agent main loop thread
-        bridge_classes = {
-            'br_int': _make_br_cls(br_int.OVSIntegrationBridge),
-            'br_phys': _make_br_cls(br_phys.OVSPhysicalBridge),
-            'br_tun': _make_br_cls(br_tun.OVSTunnelBridge),
-        }
-        return hub.spawn(ovs_agent.main, bridge_classes)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_dvr_process.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_dvr_process.py
deleted file mode 100644 (file)
index 6fdb064..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# Copyright 2011 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import constants
-
-
-class OVSDVRProcessMixin(object):
-    """Common logic for br-tun and br-phys' DVR_PROCESS tables.
-
-    Inheriters should provide self.dvr_process_table_id and
-    self.dvr_process_next_table_id.
-    """
-
-    def install_dvr_process_ipv4(self, vlan_tag, gateway_ip):
-        # block ARP
-        self.add_flow(table=self.dvr_process_table_id,
-                      priority=3,
-                      dl_vlan=vlan_tag,
-                      proto='arp',
-                      nw_dst=gateway_ip,
-                      actions='drop')
-
-    def delete_dvr_process_ipv4(self, vlan_tag, gateway_ip):
-        self.delete_flows(table=self.dvr_process_table_id,
-                          dl_vlan=vlan_tag,
-                          proto='arp',
-                          nw_dst=gateway_ip)
-
-    def install_dvr_process_ipv6(self, vlan_tag, gateway_mac):
-        # block RA
-        self.add_flow(table=self.dvr_process_table_id,
-                      priority=3,
-                      dl_vlan=vlan_tag,
-                      proto='icmp6',
-                      icmp_type=constants.ICMPV6_TYPE_RA,
-                      dl_src=gateway_mac,
-                      actions='drop')
-
-    def delete_dvr_process_ipv6(self, vlan_tag, gateway_mac):
-        self.delete_flows(table=self.dvr_process_table_id,
-                          dl_vlan=vlan_tag,
-                          proto='icmp6',
-                          icmp_type=constants.ICMPV6_TYPE_RA,
-                          dl_src=gateway_mac)
-
-    def install_dvr_process(self, vlan_tag, vif_mac, dvr_mac_address):
-        self.add_flow(table=self.dvr_process_table_id,
-                      priority=2,
-                      dl_vlan=vlan_tag,
-                      dl_dst=vif_mac,
-                      actions="drop")
-        self.add_flow(table=self.dvr_process_table_id,
-                      priority=1,
-                      dl_vlan=vlan_tag,
-                      dl_src=vif_mac,
-                      actions="mod_dl_src:%s,resubmit(,%s)" %
-                      (dvr_mac_address, self.dvr_process_next_table_id))
-
-    def delete_dvr_process(self, vlan_tag, vif_mac):
-        self.delete_flows(table=self.dvr_process_table_id,
-                          dl_vlan=vlan_tag,
-                          dl_dst=vif_mac)
-        self.delete_flows(table=self.dvr_process_table_id,
-                          dl_vlan=vlan_tag,
-                          dl_src=vif_mac)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py
deleted file mode 100644 (file)
index d0aca03..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-* references
-** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic
-"""
-from neutron.common import constants as const
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
-    import ovs_bridge
-
-
-class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge):
-    """openvswitch agent br-int specific logic."""
-
-    def setup_default_table(self):
-        self.install_normal()
-        self.setup_canary_table()
-        self.install_drop(table_id=constants.ARP_SPOOF_TABLE)
-
-    def setup_canary_table(self):
-        self.install_drop(constants.CANARY_TABLE)
-
-    def check_canary_table(self):
-        canary_flows = self.dump_flows(constants.CANARY_TABLE)
-        if canary_flows == '':
-            return constants.OVS_RESTARTED
-        elif canary_flows is None:
-            return constants.OVS_DEAD
-        else:
-            return constants.OVS_NORMAL
-
-    def provision_local_vlan(self, port, lvid, segmentation_id):
-        if segmentation_id is None:
-            dl_vlan = 0xffff
-        else:
-            dl_vlan = segmentation_id
-        self.add_flow(priority=3,
-                      in_port=port,
-                      dl_vlan=dl_vlan,
-                      actions="mod_vlan_vid:%s,normal" % lvid)
-
-    def reclaim_local_vlan(self, port, segmentation_id):
-        if segmentation_id is None:
-            dl_vlan = 0xffff
-        else:
-            dl_vlan = segmentation_id
-        self.delete_flows(in_port=port, dl_vlan=dl_vlan)
-
-    @staticmethod
-    def _dvr_to_src_mac_table_id(network_type):
-        if network_type == p_const.TYPE_VLAN:
-            return constants.DVR_TO_SRC_MAC_VLAN
-        else:
-            return constants.DVR_TO_SRC_MAC
-
-    def install_dvr_to_src_mac(self, network_type,
-                               vlan_tag, gateway_mac, dst_mac, dst_port):
-        table_id = self._dvr_to_src_mac_table_id(network_type)
-        self.add_flow(table=table_id,
-                      priority=4,
-                      dl_vlan=vlan_tag,
-                      dl_dst=dst_mac,
-                      actions="strip_vlan,mod_dl_src:%s,"
-                      "output:%s" % (gateway_mac, dst_port))
-
-    def delete_dvr_to_src_mac(self, network_type, vlan_tag, dst_mac):
-        table_id = self._dvr_to_src_mac_table_id(network_type)
-        self.delete_flows(table=table_id,
-                          dl_vlan=vlan_tag,
-                          dl_dst=dst_mac)
-
-    def add_dvr_mac_vlan(self, mac, port):
-        self.install_goto(table_id=constants.LOCAL_SWITCHING,
-                          priority=4,
-                          in_port=port,
-                          eth_src=mac,
-                          dest_table_id=constants.DVR_TO_SRC_MAC_VLAN)
-
-    def remove_dvr_mac_vlan(self, mac):
-        # REVISIT(yamamoto): match in_port as well?
-        self.delete_flows(table_id=constants.LOCAL_SWITCHING,
-                          eth_src=mac)
-
-    def add_dvr_mac_tun(self, mac, port):
-        # Table LOCAL_SWITCHING will now sort DVR traffic from other
-        # traffic depending on in_port
-        self.install_goto(table_id=constants.LOCAL_SWITCHING,
-                          priority=2,
-                          in_port=port,
-                          eth_src=mac,
-                          dest_table_id=constants.DVR_TO_SRC_MAC)
-
-    def remove_dvr_mac_tun(self, mac, port):
-        self.delete_flows(table_id=constants.LOCAL_SWITCHING,
-                          in_port=port, eth_src=mac)
-
-    def install_icmpv6_na_spoofing_protection(self, port, ip_addresses):
-        # Allow neighbor advertisements as long as they match addresses
-        # that actually belong to the port.
-        for ip in ip_addresses:
-            self.install_normal(
-                table_id=constants.ARP_SPOOF_TABLE, priority=2,
-                dl_type=const.ETHERTYPE_IPV6, nw_proto=const.PROTO_NUM_ICMP_V6,
-                icmp_type=const.ICMPV6_TYPE_NA, nd_target=ip, in_port=port)
-
-        # Now that the rules are ready, direct icmpv6 neighbor advertisement
-        # traffic from the port into the anti-spoof table.
-        self.add_flow(table=constants.LOCAL_SWITCHING,
-                      priority=10, dl_type=const.ETHERTYPE_IPV6,
-                      nw_proto=const.PROTO_NUM_ICMP_V6,
-                      icmp_type=const.ICMPV6_TYPE_NA, in_port=port,
-                      actions=("resubmit(,%s)" % constants.ARP_SPOOF_TABLE))
-
-    def install_arp_spoofing_protection(self, port, ip_addresses):
-        # allow ARPs as long as they match addresses that actually
-        # belong to the port.
-        for ip in ip_addresses:
-            self.install_normal(
-                table_id=constants.ARP_SPOOF_TABLE, priority=2,
-                proto='arp', arp_spa=ip, in_port=port)
-
-        # Now that the rules are ready, direct ARP traffic from the port into
-        # the anti-spoof table.
-        # This strategy fails gracefully because OVS versions that can't match
-        # on ARP headers will just process traffic normally.
-        self.add_flow(table=constants.LOCAL_SWITCHING,
-                      priority=10, proto='arp', in_port=port,
-                      actions=("resubmit(,%s)" % constants.ARP_SPOOF_TABLE))
-
-    def delete_arp_spoofing_protection(self, port):
-        self.delete_flows(table_id=constants.LOCAL_SWITCHING,
-                          in_port=port, proto='arp')
-        self.delete_flows(table_id=constants.LOCAL_SWITCHING,
-                          in_port=port, nw_proto=const.PROTO_NUM_ICMP_V6,
-                          icmp_type=const.ICMPV6_TYPE_NA)
-        self.delete_arp_spoofing_allow_rules(port)
-
-    def delete_arp_spoofing_allow_rules(self, port):
-        self.delete_flows(table_id=constants.ARP_SPOOF_TABLE,
-                          in_port=port)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_phys.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_phys.py
deleted file mode 100644 (file)
index e76b7dd..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
-    import br_dvr_process
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
-    import ovs_bridge
-
-
-class OVSPhysicalBridge(ovs_bridge.OVSAgentBridge,
-                        br_dvr_process.OVSDVRProcessMixin):
-    """openvswitch agent physical bridge specific logic."""
-
-    # Used by OVSDVRProcessMixin
-    dvr_process_table_id = constants.DVR_PROCESS_VLAN
-    dvr_process_next_table_id = constants.LOCAL_VLAN_TRANSLATION
-
-    def setup_default_table(self):
-        self.delete_flows()
-        self.install_normal()
-
-    def provision_local_vlan(self, port, lvid, segmentation_id, distributed):
-        table_id = constants.LOCAL_VLAN_TRANSLATION if distributed else 0
-        if segmentation_id is None:
-            self.add_flow(table=table_id,
-                          priority=4,
-                          in_port=port,
-                          dl_vlan=lvid,
-                          actions="strip_vlan,normal")
-        else:
-            self.add_flow(table=table_id,
-                          priority=4,
-                          in_port=port,
-                          dl_vlan=lvid,
-                          actions="mod_vlan_vid:%s,normal" % segmentation_id)
-
-    def reclaim_local_vlan(self, port, lvid):
-        self.delete_flows(in_port=port, dl_vlan=lvid)
-
-    def add_dvr_mac_vlan(self, mac, port):
-        self.install_output(table_id=constants.DVR_NOT_LEARN_VLAN,
-            priority=2, eth_src=mac, port=port)
-
-    def remove_dvr_mac_vlan(self, mac):
-        # REVISIT(yamamoto): match in_port as well?
-        self.delete_flows(table_id=constants.DVR_NOT_LEARN_VLAN,
-            eth_src=mac)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_tun.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_tun.py
deleted file mode 100644 (file)
index b762576..0000000
+++ /dev/null
@@ -1,260 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# Copyright 2011 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import functools
-
-import netaddr
-
-from neutron.agent.common import ovs_lib
-from neutron.plugins.ml2.drivers.openvswitch.agent.common \
-    import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
-    import br_dvr_process
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
-    import ovs_bridge
-
-
-class OVSTunnelBridge(ovs_bridge.OVSAgentBridge,
-                      br_dvr_process.OVSDVRProcessMixin):
-    """openvswitch agent tunnel bridge specific logic."""
-
-    # Used by OVSDVRProcessMixin
-    dvr_process_table_id = constants.DVR_PROCESS
-    dvr_process_next_table_id = constants.PATCH_LV_TO_TUN
-
-    def setup_default_table(self, patch_int_ofport, arp_responder_enabled):
-        # Table 0 (default) will sort incoming traffic depending on in_port
-        with self.deferred() as deferred_br:
-            deferred_br.add_flow(priority=1,
-                                 in_port=patch_int_ofport,
-                                 actions="resubmit(,%s)" %
-                                 constants.PATCH_LV_TO_TUN)
-            deferred_br.add_flow(priority=0, actions="drop")
-
-            if arp_responder_enabled:
-                # ARP broadcast-ed request go to the local ARP_RESPONDER
-                # table to be locally resolved
-                # REVISIT(yamamoto): add arp_op=arp.ARP_REQUEST matcher?
-                deferred_br.add_flow(table=constants.PATCH_LV_TO_TUN,
-                                     priority=1,
-                                     proto='arp',
-                                     dl_dst="ff:ff:ff:ff:ff:ff",
-                                     actions=("resubmit(,%s)" %
-                                       constants.ARP_RESPONDER))
-
-            # PATCH_LV_TO_TUN table will handle packets coming from patch_int
-            # unicasts go to table UCAST_TO_TUN where remote addresses are
-            # learnt
-            deferred_br.add_flow(table=constants.PATCH_LV_TO_TUN,
-                                 priority=0,
-                                 dl_dst="00:00:00:00:00:00/01:00:00:00:00:00",
-                                 actions=("resubmit(,%s)" %
-                                   constants.UCAST_TO_TUN))
-
-            # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles
-            # flooding
-            deferred_br.add_flow(table=constants.PATCH_LV_TO_TUN,
-                                 priority=0,
-                                 dl_dst="01:00:00:00:00:00/01:00:00:00:00:00",
-                                 actions=("resubmit(,%s)" %
-                                   constants.FLOOD_TO_TUN))
-
-            # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id
-            # for each tunnel type, and resubmit to table LEARN_FROM_TUN where
-            # remote mac addresses will be learnt
-            for tunnel_type in constants.TUNNEL_NETWORK_TYPES:
-                deferred_br.add_flow(table=constants.TUN_TABLE[tunnel_type],
-                                     priority=0, actions="drop")
-
-            # LEARN_FROM_TUN table will have a single flow using a learn action
-            # to dynamically set-up flows in UCAST_TO_TUN corresponding to
-            # remote mac addresses (assumes that lvid has already been set by
-            # a previous flow)
-            learned_flow = ("cookie=%(cookie)s,"
-                            "table=%(table)s,"
-                            "priority=1,"
-                            "hard_timeout=300,"
-                            "NXM_OF_VLAN_TCI[0..11],"
-                            "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
-                            "load:0->NXM_OF_VLAN_TCI[],"
-                            "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],"
-                            "output:NXM_OF_IN_PORT[]" %
-                            {'cookie': self.agent_uuid_stamp,
-                             'table': constants.UCAST_TO_TUN})
-            # Once remote mac addresses are learnt, output packet to patch_int
-            deferred_br.add_flow(table=constants.LEARN_FROM_TUN,
-                                 priority=1,
-                                 actions="learn(%s),output:%s" %
-                                 (learned_flow, patch_int_ofport))
-
-            # Egress unicast will be handled in table UCAST_TO_TUN, where
-            # remote mac addresses will be learned. For now, just add a
-            # default flow that will resubmit unknown unicasts to table
-            #  FLOOD_TO_TUN to treat them as broadcasts/multicasts
-            deferred_br.add_flow(table=constants.UCAST_TO_TUN,
-                                 priority=0,
-                                 actions="resubmit(,%s)" %
-                                 constants.FLOOD_TO_TUN)
-
-            if arp_responder_enabled:
-                # If none of the ARP entries correspond to the requested IP,
-                # the broadcast-ed packet is resubmitted to the flooding table
-                deferred_br.add_flow(table=constants.ARP_RESPONDER,
-                                     priority=0,
-                                     actions="resubmit(,%s)" %
-                                     constants.FLOOD_TO_TUN)
-
-        # FLOOD_TO_TUN will handle flooding in tunnels based on lvid,
-        # for now, add a default drop action
-        self.install_drop(table_id=constants.FLOOD_TO_TUN)
-
-    def provision_local_vlan(self, network_type, lvid, segmentation_id,
-                             distributed=False):
-        if distributed:
-            table_id = constants.DVR_NOT_LEARN
-        else:
-            table_id = constants.LEARN_FROM_TUN
-        self.add_flow(table=constants.TUN_TABLE[network_type],
-                      priority=1,
-                      tun_id=segmentation_id,
-                      actions="mod_vlan_vid:%s,"
-                      "resubmit(,%s)" %
-                      (lvid, table_id))
-
-    def reclaim_local_vlan(self, network_type, segmentation_id):
-        self.delete_flows(table=constants.TUN_TABLE[network_type],
-                          tun_id=segmentation_id)
-
-    @staticmethod
-    def _ofport_set_to_str(ports_set):
-        return ",".join(map(str, ports_set))
-
-    def install_flood_to_tun(self, vlan, tun_id, ports, deferred_br=None):
-        br = deferred_br if deferred_br else self
-        br.mod_flow(table=constants.FLOOD_TO_TUN,
-                    dl_vlan=vlan,
-                    actions="strip_vlan,set_tunnel:%s,output:%s" %
-                    (tun_id, self._ofport_set_to_str(ports)))
-
-    def delete_flood_to_tun(self, vlan, deferred_br=None):
-        br = deferred_br if deferred_br else self
-        br.delete_flows(table=constants.FLOOD_TO_TUN, dl_vlan=vlan)
-
-    def install_unicast_to_tun(self, vlan, tun_id, port, mac,
-                               deferred_br=None):
-        br = deferred_br if deferred_br else self
-        br.add_flow(table=constants.UCAST_TO_TUN,
-                    priority=2,
-                    dl_vlan=vlan,
-                    dl_dst=mac,
-                    actions="strip_vlan,set_tunnel:%s,output:%s" %
-                    (tun_id, port))
-
-    def delete_unicast_to_tun(self, vlan, mac, deferred_br=None):
-        br = deferred_br if deferred_br else self
-        if mac is None:
-            br.delete_flows(table=constants.UCAST_TO_TUN,
-                            dl_vlan=vlan)
-        else:
-            br.delete_flows(table=constants.UCAST_TO_TUN,
-                            dl_vlan=vlan,
-                            dl_dst=mac)
-
-    def install_arp_responder(self, vlan, ip, mac, deferred_br=None):
-        br = deferred_br if deferred_br else self
-        actions = constants.ARP_RESPONDER_ACTIONS % {
-            'mac': netaddr.EUI(mac, dialect=netaddr.mac_unix),
-            'ip': netaddr.IPAddress(ip),
-        }
-        br.add_flow(table=constants.ARP_RESPONDER,
-                    priority=1,
-                    proto='arp',
-                    dl_vlan=vlan,
-                    nw_dst='%s' % ip,
-                    actions=actions)
-
-    def delete_arp_responder(self, vlan, ip, deferred_br=None):
-        br = deferred_br if deferred_br else self
-        if ip is None:
-            br.delete_flows(table=constants.ARP_RESPONDER,
-                            proto='arp',
-                            dl_vlan=vlan)
-        else:
-            br.delete_flows(table=constants.ARP_RESPONDER,
-                            proto='arp',
-                            dl_vlan=vlan,
-                            nw_dst='%s' % ip)
-
-    def setup_tunnel_port(self, network_type, port, deferred_br=None):
-        br = deferred_br if deferred_br else self
-        br.add_flow(priority=1,
-                    in_port=port,
-                    actions="resubmit(,%s)" %
-                    constants.TUN_TABLE[network_type])
-
-    def cleanup_tunnel_port(self, port, deferred_br=None):
-        br = deferred_br if deferred_br else self
-        br.delete_flows(in_port=port)
-
-    def add_dvr_mac_tun(self, mac, port):
-        # Table DVR_NOT_LEARN ensures unique dvr macs in the cloud
-        # are not learnt, as they may result in flow explosions
-        self.install_output(table_id=constants.DVR_NOT_LEARN,
-                            priority=1,
-                            eth_src=mac,
-                            port=port)
-
-    def remove_dvr_mac_tun(self, mac):
-        # REVISIT(yamamoto): match in_port as well?
-        self.delete_flows(table_id=constants.DVR_NOT_LEARN,
-                          eth_src=mac)
-
-    def deferred(self):
-        return DeferredOVSTunnelBridge(self)
-
-
-class DeferredOVSTunnelBridge(ovs_lib.DeferredOVSBridge):
-    _METHODS = [
-        'install_unicast_to_tun',
-        'delete_unicast_to_tun',
-        'install_flood_to_tun',
-        'delete_flood_to_tun',
-        'install_arp_responder',
-        'delete_arp_responder',
-        'setup_tunnel_port',
-        'cleanup_tunnel_port',
-    ]
-
-    def __getattr__(self, name):
-        if name in self._METHODS:
-            m = getattr(self.br, name)
-            return functools.partial(m, deferred_br=self)
-        return super(DeferredOVSTunnelBridge, self).__getattr__(name)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/main.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/main.py
deleted file mode 100644 (file)
index 51ed82c..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (C) 2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
-    import br_int
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
-    import br_phys
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
-    import br_tun
-from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent
-
-
-def init_config():
-    pass
-
-
-def main():
-    bridge_classes = {
-        'br_int': br_int.OVSIntegrationBridge,
-        'br_phys': br_phys.OVSPhysicalBridge,
-        'br_tun': br_tun.OVSTunnelBridge,
-    }
-    ovs_neutron_agent.main(bridge_classes)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py
deleted file mode 100644 (file)
index de52366..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-
-from oslo_log import log as logging
-
-from neutron._i18n import _LW
-
-LOG = logging.getLogger(__name__)
-
-# Field name mappings (from Ryu to ovs-ofctl)
-_keywords = {
-    'eth_src': 'dl_src',
-    'eth_dst': 'dl_dst',
-    'ipv4_src': 'nw_src',
-    'ipv4_dst': 'nw_dst',
-    'table_id': 'table',
-}
-
-
-class OpenFlowSwitchMixin(object):
-    """Mixin to provide common convenient routines for an openflow switch."""
-
-    @staticmethod
-    def _conv_args(kwargs):
-        for our_name, ovs_ofctl_name in _keywords.items():
-            if our_name in kwargs:
-                kwargs[ovs_ofctl_name] = kwargs.pop(our_name)
-        return kwargs
-
-    def dump_flows(self, table_id):
-        return self.dump_flows_for_table(table_id)
-
-    def dump_flows_all_tables(self):
-        return self.dump_all_flows()
-
-    def install_goto_next(self, table_id):
-        self.install_goto(table_id=table_id, dest_table_id=table_id + 1)
-
-    def install_output(self, port, table_id=0, priority=0, **kwargs):
-        self.add_flow(table=table_id,
-                      priority=priority,
-                      actions="output:%s" % port,
-                      **self._conv_args(kwargs))
-
-    def install_normal(self, table_id=0, priority=0, **kwargs):
-        self.add_flow(table=table_id,
-                      priority=priority,
-                      actions="normal",
-                      **self._conv_args(kwargs))
-
-    def install_goto(self, dest_table_id, table_id=0, priority=0, **kwargs):
-        self.add_flow(table=table_id,
-                      priority=priority,
-                      actions="resubmit(,%s)" % dest_table_id,
-                      **self._conv_args(kwargs))
-
-    def install_drop(self, table_id=0, priority=0, **kwargs):
-        self.add_flow(table=table_id,
-                      priority=priority,
-                      actions="drop",
-                      **self._conv_args(kwargs))
-
-    def delete_flows(self, **kwargs):
-        # NOTE(yamamoto): super() points to ovs_lib.OVSBridge.
-        # See ovs_bridge.py how this class is actually used.
-        if kwargs:
-            super(OpenFlowSwitchMixin, self).delete_flows(
-                **self._conv_args(kwargs))
-        else:
-            super(OpenFlowSwitchMixin, self).remove_all_flows()
-
-    def _filter_flows(self, flows):
-        LOG.debug("Agent uuid stamp used to filter flows: %s",
-                  self.agent_uuid_stamp)
-        cookie_re = re.compile('cookie=(0x[A-Fa-f0-9]*)')
-        table_re = re.compile('table=([0-9]*)')
-        for flow in flows:
-            fl_cookie = cookie_re.search(flow)
-            if not fl_cookie:
-                continue
-            fl_cookie = fl_cookie.group(1)
-            if int(fl_cookie, 16) != self.agent_uuid_stamp:
-                fl_table = table_re.search(flow)
-                if not fl_table:
-                    continue
-                fl_table = fl_table.group(1)
-                yield flow, fl_cookie, fl_table
-
-    def cleanup_flows(self):
-        flows = self.dump_flows_all_tables()
-        for flow, cookie, table in self._filter_flows(flows):
-            # deleting a stale flow should be rare.
-            # it might deserve some attention
-            LOG.warning(_LW("Deleting flow %s"), flow)
-            self.delete_flows(cookie=cookie + '/-1', table=table)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge.py
deleted file mode 100644 (file)
index 6e95753..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-from neutron.agent.common import ovs_lib
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
-    import ofswitch
-
-
-class OVSAgentBridge(ofswitch.OpenFlowSwitchMixin, ovs_lib.OVSBridge):
-    """Common code for bridges used by OVS agent"""
-
-    def setup_controllers(self, conf):
-        self.set_protocols("[OpenFlow10]")
-        self.del_controller()
-
-    def drop_port(self, in_port):
-        self.install_drop(priority=2, in_port=in_port)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py
deleted file mode 100644 (file)
index 2023c36..0000000
+++ /dev/null
@@ -1,708 +0,0 @@
-# Copyright 2014, Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_utils import excutils
-
-from neutron._i18n import _LE, _LI, _LW
-from neutron.common import constants as n_const
-from neutron.common import utils as n_utils
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-
-LOG = logging.getLogger(__name__)
-
-cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
-                      'agent.common.config')
-
-
-# A class to represent a DVR-hosted subnet including vif_ports resident on
-# that subnet
-class LocalDVRSubnetMapping(object):
-    def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID):
-        # set of compute ports on this dvr subnet
-        self.compute_ports = {}
-        self.subnet = subnet
-        self.csnat_ofport = csnat_ofport
-        self.dvr_owned = False
-
-    def __str__(self):
-        return ("subnet = %s compute_ports = %s csnat_port = %s"
-                " is_dvr_owned = %s" %
-                (self.subnet, self.get_compute_ofports(),
-                 self.get_csnat_ofport(), self.is_dvr_owned()))
-
-    def get_subnet_info(self):
-        return self.subnet
-
-    def set_dvr_owned(self, owned):
-        self.dvr_owned = owned
-
-    def is_dvr_owned(self):
-        return self.dvr_owned
-
-    def add_compute_ofport(self, vif_id, ofport):
-        self.compute_ports[vif_id] = ofport
-
-    def remove_compute_ofport(self, vif_id):
-        self.compute_ports.pop(vif_id, 0)
-
-    def remove_all_compute_ofports(self):
-        self.compute_ports.clear()
-
-    def get_compute_ofports(self):
-        return self.compute_ports
-
-    def set_csnat_ofport(self, ofport):
-        self.csnat_ofport = ofport
-
-    def get_csnat_ofport(self):
-        return self.csnat_ofport
-
-
-class OVSPort(object):
-    def __init__(self, id, ofport, mac, device_owner):
-        self.id = id
-        self.mac = mac
-        self.ofport = ofport
-        self.subnets = set()
-        self.device_owner = device_owner
-
-    def __str__(self):
-        return ("OVSPort: id = %s, ofport = %s, mac = %s, "
-                "device_owner = %s, subnets = %s" %
-                (self.id, self.ofport, self.mac,
-                 self.device_owner, self.subnets))
-
-    def add_subnet(self, subnet_id):
-        self.subnets.add(subnet_id)
-
-    def remove_subnet(self, subnet_id):
-        self.subnets.remove(subnet_id)
-
-    def remove_all_subnets(self):
-        self.subnets.clear()
-
-    def get_subnets(self):
-        return self.subnets
-
-    def get_device_owner(self):
-        return self.device_owner
-
-    def get_mac(self):
-        return self.mac
-
-    def get_ofport(self):
-        return self.ofport
-
-
-class OVSDVRNeutronAgent(object):
-    '''
-    Implements OVS-based DVR(Distributed Virtual Router), for overlay networks.
-    '''
-    # history
-    #   1.0 Initial version
-
-    def __init__(self, context, plugin_rpc, integ_br, tun_br,
-                 bridge_mappings, phys_brs, int_ofports, phys_ofports,
-                 patch_int_ofport=constants.OFPORT_INVALID,
-                 patch_tun_ofport=constants.OFPORT_INVALID,
-                 host=None, enable_tunneling=False,
-                 enable_distributed_routing=False):
-        self.context = context
-        self.plugin_rpc = plugin_rpc
-        self.host = host
-        self.enable_tunneling = enable_tunneling
-        self.enable_distributed_routing = enable_distributed_routing
-        self.bridge_mappings = bridge_mappings
-        self.phys_brs = phys_brs
-        self.int_ofports = int_ofports
-        self.phys_ofports = phys_ofports
-        self.reset_ovs_parameters(integ_br, tun_br,
-                                  patch_int_ofport, patch_tun_ofport)
-        self.reset_dvr_parameters()
-        self.dvr_mac_address = None
-        if self.enable_distributed_routing:
-            self.get_dvr_mac_address()
-        self.conf = cfg.CONF
-
-    def setup_dvr_flows(self):
-        self.setup_dvr_flows_on_integ_br()
-        self.setup_dvr_flows_on_tun_br()
-        self.setup_dvr_flows_on_phys_br()
-        self.setup_dvr_mac_flows_on_all_brs()
-
-    def reset_ovs_parameters(self, integ_br, tun_br,
-                             patch_int_ofport, patch_tun_ofport):
-        '''Reset the openvswitch parameters'''
-        self.int_br = integ_br
-        self.tun_br = tun_br
-        self.patch_int_ofport = patch_int_ofport
-        self.patch_tun_ofport = patch_tun_ofport
-
-    def reset_dvr_parameters(self):
-        '''Reset the DVR parameters'''
-        self.local_dvr_map = {}
-        self.local_csnat_map = {}
-        self.local_ports = {}
-        self.registered_dvr_macs = set()
-
-    def get_dvr_mac_address(self):
-        try:
-            self.get_dvr_mac_address_with_retry()
-        except oslo_messaging.RemoteError as e:
-            LOG.warning(_LW('L2 agent could not get DVR MAC address at '
-                            'startup due to RPC error.  It happens when the '
-                            'server does not support this RPC API.  Detailed '
-                            'message: %s'), e)
-        except oslo_messaging.MessagingTimeout:
-            LOG.error(_LE('DVR: Failed to obtain a valid local '
-                          'DVR MAC address - L2 Agent operating '
-                          'in Non-DVR Mode'))
-
-        if not self.in_distributed_mode():
-            # switch all traffic using L2 learning
-            # REVISIT(yamamoto): why to install the same flow as
-            # setup_integration_br?
-            self.int_br.install_normal()
-
-    def get_dvr_mac_address_with_retry(self):
-        # Get the local DVR MAC Address from the Neutron Server.
-        # This is the first place where we contact the server on startup
-        # so retry in case it's not ready to respond
-        for retry_count in reversed(range(5)):
-            try:
-                details = self.plugin_rpc.get_dvr_mac_address_by_host(
-                    self.context, self.host)
-            except oslo_messaging.MessagingTimeout as e:
-                with excutils.save_and_reraise_exception() as ctx:
-                    if retry_count > 0:
-                        ctx.reraise = False
-                        LOG.warning(_LW('L2 agent could not get DVR MAC '
-                                        'address from server. Retrying. '
-                                        'Detailed message: %s'), e)
-            else:
-                LOG.debug("L2 Agent DVR: Received response for "
-                          "get_dvr_mac_address_by_host() from "
-                          "plugin: %r", details)
-                self.dvr_mac_address = details['mac_address']
-                return
-
-    def setup_dvr_flows_on_integ_br(self):
-        '''Setup up initial dvr flows into br-int'''
-        if not self.in_distributed_mode():
-            return
-
-        LOG.info(_LI("L2 Agent operating in DVR Mode with MAC %s"),
-                 self.dvr_mac_address)
-        # Remove existing flows in integration bridge
-        if self.conf.AGENT.drop_flows_on_start:
-            self.int_br.delete_flows()
-
-        # Add a canary flow to int_br to track OVS restarts
-        self.int_br.setup_canary_table()
-
-        # Insert 'drop' action as the default for Table DVR_TO_SRC_MAC
-        self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC, priority=1)
-
-        self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN,
-                                 priority=1)
-
-        # Insert 'normal' action as the default for Table LOCAL_SWITCHING
-        self.int_br.install_normal(table_id=constants.LOCAL_SWITCHING,
-                                   priority=1)
-
-        for physical_network in self.bridge_mappings:
-            self.int_br.install_drop(table_id=constants.LOCAL_SWITCHING,
-                                     priority=2,
-                                     in_port=self.int_ofports[
-                                         physical_network])
-
-    def setup_dvr_flows_on_tun_br(self):
-        '''Setup up initial dvr flows into br-tun'''
-        if not self.enable_tunneling or not self.in_distributed_mode():
-            return
-
-        self.tun_br.install_goto(dest_table_id=constants.DVR_PROCESS,
-                                 priority=1,
-                                 in_port=self.patch_int_ofport)
-
-        # table-miss should be sent to learning table
-        self.tun_br.install_goto(table_id=constants.DVR_NOT_LEARN,
-                                 dest_table_id=constants.LEARN_FROM_TUN)
-
-        self.tun_br.install_goto(table_id=constants.DVR_PROCESS,
-                                 dest_table_id=constants.PATCH_LV_TO_TUN)
-
-    def setup_dvr_flows_on_phys_br(self):
-        '''Setup up initial dvr flows into br-phys'''
-        if not self.in_distributed_mode():
-            return
-
-        for physical_network in self.bridge_mappings:
-            self.phys_brs[physical_network].install_goto(
-                in_port=self.phys_ofports[physical_network],
-                priority=2,
-                dest_table_id=constants.DVR_PROCESS_VLAN)
-            self.phys_brs[physical_network].install_goto(
-                priority=1,
-                dest_table_id=constants.DVR_NOT_LEARN_VLAN)
-            self.phys_brs[physical_network].install_goto(
-                table_id=constants.DVR_PROCESS_VLAN,
-                priority=0,
-                dest_table_id=constants.LOCAL_VLAN_TRANSLATION)
-            self.phys_brs[physical_network].install_drop(
-                table_id=constants.LOCAL_VLAN_TRANSLATION,
-                in_port=self.phys_ofports[physical_network],
-                priority=2)
-            self.phys_brs[physical_network].install_normal(
-                table_id=constants.DVR_NOT_LEARN_VLAN,
-                priority=1)
-
-    def _add_dvr_mac_for_phys_br(self, physical_network, mac):
-        self.int_br.add_dvr_mac_vlan(mac=mac,
-                                     port=self.int_ofports[physical_network])
-        phys_br = self.phys_brs[physical_network]
-        phys_br.add_dvr_mac_vlan(mac=mac,
-                                 port=self.phys_ofports[physical_network])
-
-    def _remove_dvr_mac_for_phys_br(self, physical_network, mac):
-        # REVISIT(yamamoto): match in_port as well?
-        self.int_br.remove_dvr_mac_vlan(mac=mac)
-        phys_br = self.phys_brs[physical_network]
-        # REVISIT(yamamoto): match in_port as well?
-        phys_br.remove_dvr_mac_vlan(mac=mac)
-
-    def _add_dvr_mac_for_tun_br(self, mac):
-        self.int_br.add_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)
-        self.tun_br.add_dvr_mac_tun(mac=mac, port=self.patch_int_ofport)
-
-    def _remove_dvr_mac_for_tun_br(self, mac):
-        self.int_br.remove_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)
-        # REVISIT(yamamoto): match in_port as well?
-        self.tun_br.remove_dvr_mac_tun(mac=mac)
-
-    def _add_dvr_mac(self, mac):
-        for physical_network in self.bridge_mappings:
-            self._add_dvr_mac_for_phys_br(physical_network, mac)
-        if self.enable_tunneling:
-            self._add_dvr_mac_for_tun_br(mac)
-        LOG.debug("Added DVR MAC flow for %s", mac)
-        self.registered_dvr_macs.add(mac)
-
-    def _remove_dvr_mac(self, mac):
-        for physical_network in self.bridge_mappings:
-            self._remove_dvr_mac_for_phys_br(physical_network, mac)
-        if self.enable_tunneling:
-            self._remove_dvr_mac_for_tun_br(mac)
-        LOG.debug("Removed DVR MAC flow for %s", mac)
-        self.registered_dvr_macs.remove(mac)
-
-    def setup_dvr_mac_flows_on_all_brs(self):
-        if not self.in_distributed_mode():
-            LOG.debug("Not in distributed mode, ignoring invocation "
-                      "of get_dvr_mac_address_list() ")
-            return
-        dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context)
-        LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs)
-        for mac in dvr_macs:
-            if mac['mac_address'] == self.dvr_mac_address:
-                continue
-            self._add_dvr_mac(mac['mac_address'])
-
-    def dvr_mac_address_update(self, dvr_macs):
-        if not self.dvr_mac_address:
-            LOG.debug("Self mac unknown, ignoring this "
-                      "dvr_mac_address_update() ")
-            return
-
-        dvr_host_macs = set()
-        for entry in dvr_macs:
-            if entry['mac_address'] == self.dvr_mac_address:
-                continue
-            dvr_host_macs.add(entry['mac_address'])
-
-        if dvr_host_macs == self.registered_dvr_macs:
-            LOG.debug("DVR Mac address already up to date")
-            return
-
-        dvr_macs_added = dvr_host_macs - self.registered_dvr_macs
-        dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs
-
-        for oldmac in dvr_macs_removed:
-            self._remove_dvr_mac(oldmac)
-
-        for newmac in dvr_macs_added:
-            self._add_dvr_mac(newmac)
-
-    def in_distributed_mode(self):
-        return self.dvr_mac_address is not None
-
-    def process_tunneled_network(self, network_type, lvid, segmentation_id):
-        self.tun_br.provision_local_vlan(
-            network_type=network_type,
-            lvid=lvid,
-            segmentation_id=segmentation_id,
-            distributed=self.in_distributed_mode())
-
-    def _bind_distributed_router_interface_port(self, port, lvm,
-                                                fixed_ips, device_owner):
-        # since distributed router port must have only one fixed
-        # IP, directly use fixed_ips[0]
-        fixed_ip = fixed_ips[0]
-        subnet_uuid = fixed_ip['subnet_id']
-        csnat_ofport = constants.OFPORT_INVALID
-        ldm = None
-        if subnet_uuid in self.local_dvr_map:
-            ldm = self.local_dvr_map[subnet_uuid]
-            csnat_ofport = ldm.get_csnat_ofport()
-            if csnat_ofport == constants.OFPORT_INVALID:
-                LOG.error(_LE("DVR: Duplicate DVR router interface detected "
-                              "for subnet %s"), subnet_uuid)
-                return
-        else:
-            # set up LocalDVRSubnetMapping available for this subnet
-            subnet_info = self.plugin_rpc.get_subnet_for_dvr(
-                self.context, subnet_uuid, fixed_ips=fixed_ips)
-            if not subnet_info:
-                LOG.error(_LE("DVR: Unable to retrieve subnet information "
-                              "for subnet_id %s"), subnet_uuid)
-                return
-            LOG.debug("get_subnet_for_dvr for subnet %(uuid)s "
-                      "returned with %(info)s",
-                      {"uuid": subnet_uuid, "info": subnet_info})
-            ldm = LocalDVRSubnetMapping(subnet_info)
-            self.local_dvr_map[subnet_uuid] = ldm
-
-        # DVR takes over
-        ldm.set_dvr_owned(True)
-
-        vlan_to_use = lvm.vlan
-        if lvm.network_type == p_const.TYPE_VLAN:
-            vlan_to_use = lvm.segmentation_id
-
-        subnet_info = ldm.get_subnet_info()
-        ip_version = subnet_info['ip_version']
-        local_compute_ports = (
-            self.plugin_rpc.get_ports_on_host_by_subnet(
-                self.context, self.host, subnet_uuid))
-        LOG.debug("DVR: List of ports received from "
-                  "get_ports_on_host_by_subnet %s",
-                  local_compute_ports)
-        vif_by_id = self.int_br.get_vifs_by_ids(
-            [local_port['id'] for local_port in local_compute_ports])
-        for local_port in local_compute_ports:
-            vif = vif_by_id.get(local_port['id'])
-            if not vif:
-                continue
-            ldm.add_compute_ofport(vif.vif_id, vif.ofport)
-            if vif.vif_id in self.local_ports:
-                # ensure if a compute port is already on
-                # a different dvr routed subnet
-                # if yes, queue this subnet to that port
-                comp_ovsport = self.local_ports[vif.vif_id]
-                comp_ovsport.add_subnet(subnet_uuid)
-            else:
-                # the compute port is discovered first here that its on
-                # a dvr routed subnet queue this subnet to that port
-                comp_ovsport = OVSPort(vif.vif_id, vif.ofport,
-                                  vif.vif_mac, local_port['device_owner'])
-                comp_ovsport.add_subnet(subnet_uuid)
-                self.local_ports[vif.vif_id] = comp_ovsport
-            # create rule for just this vm port
-            self.int_br.install_dvr_to_src_mac(
-                network_type=lvm.network_type,
-                vlan_tag=vlan_to_use,
-                gateway_mac=subnet_info['gateway_mac'],
-                dst_mac=comp_ovsport.get_mac(),
-                dst_port=comp_ovsport.get_ofport())
-
-        if lvm.network_type == p_const.TYPE_VLAN:
-            # TODO(vivek) remove the IPv6 related flows once SNAT is not
-            # used for IPv6 DVR.
-            br = self.phys_brs[lvm.physical_network]
-        if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
-            br = self.tun_br
-        # TODO(vivek) remove the IPv6 related flows once SNAT is not
-        # used for IPv6 DVR.
-        if ip_version == 4:
-            br.install_dvr_process_ipv4(
-                vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip'])
-        else:
-            br.install_dvr_process_ipv6(
-                vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac'])
-        br.install_dvr_process(
-            vlan_tag=lvm.vlan, vif_mac=port.vif_mac,
-            dvr_mac_address=self.dvr_mac_address)
-
-        # the dvr router interface is itself a port, so capture it
-        # queue this subnet to that port. A subnet appears only once as
-        # a router interface on any given router
-        ovsport = OVSPort(port.vif_id, port.ofport,
-                          port.vif_mac, device_owner)
-        ovsport.add_subnet(subnet_uuid)
-        self.local_ports[port.vif_id] = ovsport
-
-    def _bind_port_on_dvr_subnet(self, port, lvm, fixed_ips,
-                                 device_owner):
-        # Handle new compute port added use-case
-        subnet_uuid = None
-        for ips in fixed_ips:
-            if ips['subnet_id'] not in self.local_dvr_map:
-                continue
-            subnet_uuid = ips['subnet_id']
-            ldm = self.local_dvr_map[subnet_uuid]
-            if not ldm.is_dvr_owned():
-                # well this is CSNAT stuff, let dvr come in
-                # and do plumbing for this vm later
-                continue
-
-            # This confirms that this compute port belongs
-            # to a dvr hosted subnet.
-            # Accommodate this VM Port into the existing rule in
-            # the integration bridge
-            LOG.debug("DVR: Plumbing compute port %s", port.vif_id)
-            subnet_info = ldm.get_subnet_info()
-            ldm.add_compute_ofport(port.vif_id, port.ofport)
-            if port.vif_id in self.local_ports:
-                # ensure if a compute port is already on a different
-                # dvr routed subnet
-                # if yes, queue this subnet to that port
-                ovsport = self.local_ports[port.vif_id]
-                ovsport.add_subnet(subnet_uuid)
-            else:
-                # the compute port is discovered first here that its
-                # on a dvr routed subnet, queue this subnet to that port
-                ovsport = OVSPort(port.vif_id, port.ofport,
-                                  port.vif_mac, device_owner)
-                ovsport.add_subnet(subnet_uuid)
-                self.local_ports[port.vif_id] = ovsport
-            vlan_to_use = lvm.vlan
-            if lvm.network_type == p_const.TYPE_VLAN:
-                vlan_to_use = lvm.segmentation_id
-            # create a rule for this vm port
-            self.int_br.install_dvr_to_src_mac(
-                network_type=lvm.network_type,
-                vlan_tag=vlan_to_use,
-                gateway_mac=subnet_info['gateway_mac'],
-                dst_mac=ovsport.get_mac(),
-                dst_port=ovsport.get_ofport())
-
-    def _bind_centralized_snat_port_on_dvr_subnet(self, port, lvm,
-                                                  fixed_ips, device_owner):
-        # since centralized-SNAT (CSNAT) port must have only one fixed
-        # IP, directly use fixed_ips[0]
-        fixed_ip = fixed_ips[0]
-        if port.vif_id in self.local_ports:
-            # throw an error if CSNAT port is already on a different
-            # dvr routed subnet
-            ovsport = self.local_ports[port.vif_id]
-            subs = list(ovsport.get_subnets())
-            if subs[0] == fixed_ip['subnet_id']:
-                return
-            LOG.error(_LE("Centralized-SNAT port %(port)s on subnet "
-                          "%(port_subnet)s already seen on a different "
-                          "subnet %(orig_subnet)s"), {
-                "port": port.vif_id,
-                "port_subnet": fixed_ip['subnet_id'],
-                "orig_subnet": subs[0],
-            })
-            return
-        subnet_uuid = fixed_ip['subnet_id']
-        ldm = None
-        subnet_info = None
-        if subnet_uuid not in self.local_dvr_map:
-            # no csnat ports seen on this subnet - create csnat state
-            # for this subnet
-            subnet_info = self.plugin_rpc.get_subnet_for_dvr(
-                self.context, subnet_uuid, fixed_ips=fixed_ips)
-            ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
-            self.local_dvr_map[subnet_uuid] = ldm
-        else:
-            ldm = self.local_dvr_map[subnet_uuid]
-            subnet_info = ldm.get_subnet_info()
-            # Store csnat OF Port in the existing DVRSubnetMap
-            ldm.set_csnat_ofport(port.ofport)
-
-        # create ovsPort footprint for csnat port
-        ovsport = OVSPort(port.vif_id, port.ofport,
-                          port.vif_mac, device_owner)
-        ovsport.add_subnet(subnet_uuid)
-        self.local_ports[port.vif_id] = ovsport
-        vlan_to_use = lvm.vlan
-        if lvm.network_type == p_const.TYPE_VLAN:
-            vlan_to_use = lvm.segmentation_id
-        self.int_br.install_dvr_to_src_mac(
-            network_type=lvm.network_type,
-            vlan_tag=vlan_to_use,
-            gateway_mac=subnet_info['gateway_mac'],
-            dst_mac=ovsport.get_mac(),
-            dst_port=ovsport.get_ofport())
-
-    def bind_port_to_dvr(self, port, local_vlan_map,
-                         fixed_ips, device_owner):
-        if not self.in_distributed_mode():
-            return
-
-        if local_vlan_map.network_type not in (constants.TUNNEL_NETWORK_TYPES
-                                               + [p_const.TYPE_VLAN]):
-            LOG.debug("DVR: Port %s is with network_type %s not supported"
-                      " for dvr plumbing" % (port.vif_id,
-                                             local_vlan_map.network_type))
-            return
-
-        if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
-            self._bind_distributed_router_interface_port(port,
-                                                         local_vlan_map,
-                                                         fixed_ips,
-                                                         device_owner)
-
-        if device_owner and n_utils.is_dvr_serviced(device_owner):
-            self._bind_port_on_dvr_subnet(port, local_vlan_map,
-                                          fixed_ips,
-                                          device_owner)
-
-        if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
-            self._bind_centralized_snat_port_on_dvr_subnet(port,
-                                                           local_vlan_map,
-                                                           fixed_ips,
-                                                           device_owner)
-
-    def _unbind_distributed_router_interface_port(self, port, lvm):
-        ovsport = self.local_ports[port.vif_id]
-        # removal of distributed router interface
-        subnet_ids = ovsport.get_subnets()
-        subnet_set = set(subnet_ids)
-        network_type = lvm.network_type
-        physical_network = lvm.physical_network
-        vlan_to_use = lvm.vlan
-        if network_type == p_const.TYPE_VLAN:
-            vlan_to_use = lvm.segmentation_id
-        # ensure we process for all the subnets laid on this removed port
-        for sub_uuid in subnet_set:
-            if sub_uuid not in self.local_dvr_map:
-                continue
-            ldm = self.local_dvr_map[sub_uuid]
-            subnet_info = ldm.get_subnet_info()
-            ip_version = subnet_info['ip_version']
-            # DVR is no more owner
-            ldm.set_dvr_owned(False)
-            # remove all vm rules for this dvr subnet
-            # clear of compute_ports altogether
-            compute_ports = ldm.get_compute_ofports()
-            for vif_id in compute_ports:
-                comp_port = self.local_ports[vif_id]
-                self.int_br.delete_dvr_to_src_mac(
-                    network_type=network_type,
-                    vlan_tag=vlan_to_use, dst_mac=comp_port.get_mac())
-            ldm.remove_all_compute_ofports()
-
-            if ldm.get_csnat_ofport() == constants.OFPORT_INVALID:
-                # if there is no csnat port for this subnet, remove
-                # this subnet from local_dvr_map, as no dvr (or) csnat
-                # ports available on this agent anymore
-                self.local_dvr_map.pop(sub_uuid, None)
-            if network_type == p_const.TYPE_VLAN:
-                br = self.phys_brs[physical_network]
-            if network_type in constants.TUNNEL_NETWORK_TYPES:
-                br = self.tun_br
-            if ip_version == 4:
-                br.delete_dvr_process_ipv4(
-                    vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip'])
-            else:
-                br.delete_dvr_process_ipv6(
-                    vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac'])
-            ovsport.remove_subnet(sub_uuid)
-
-        if lvm.network_type == p_const.TYPE_VLAN:
-            br = self.phys_brs[physical_network]
-        if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
-            br = self.tun_br
-        br.delete_dvr_process(vlan_tag=lvm.vlan, vif_mac=port.vif_mac)
-
-        # release port state
-        self.local_ports.pop(port.vif_id, None)
-
-    def _unbind_port_on_dvr_subnet(self, port, lvm):
-        ovsport = self.local_ports[port.vif_id]
-        # This confirms that this compute port being removed belonged
-        # to a dvr hosted subnet.
-        LOG.debug("DVR: Removing plumbing for compute port %s", port)
-        subnet_ids = ovsport.get_subnets()
-        # ensure we process for all the subnets laid on this port
-        for sub_uuid in subnet_ids:
-            if sub_uuid not in self.local_dvr_map:
-                continue
-            ldm = self.local_dvr_map[sub_uuid]
-            ldm.remove_compute_ofport(port.vif_id)
-            vlan_to_use = lvm.vlan
-            if lvm.network_type == p_const.TYPE_VLAN:
-                vlan_to_use = lvm.segmentation_id
-            # first remove this vm port rule
-            self.int_br.delete_dvr_to_src_mac(
-                network_type=lvm.network_type,
-                vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac())
-        # release port state
-        self.local_ports.pop(port.vif_id, None)
-
-    def _unbind_centralized_snat_port_on_dvr_subnet(self, port, lvm):
-        ovsport = self.local_ports[port.vif_id]
-        # This confirms that this compute port being removed belonged
-        # to a dvr hosted subnet.
-        LOG.debug("DVR: Removing plumbing for csnat port %s", port)
-        sub_uuid = list(ovsport.get_subnets())[0]
-        # ensure we process for all the subnets laid on this port
-        if sub_uuid not in self.local_dvr_map:
-            return
-        ldm = self.local_dvr_map[sub_uuid]
-        ldm.set_csnat_ofport(constants.OFPORT_INVALID)
-        vlan_to_use = lvm.vlan
-        if lvm.network_type == p_const.TYPE_VLAN:
-            vlan_to_use = lvm.segmentation_id
-        # then remove csnat port rule
-        self.int_br.delete_dvr_to_src_mac(
-            network_type=lvm.network_type,
-            vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac())
-        if not ldm.is_dvr_owned():
-            # if not owned by DVR (only used for csnat), remove this
-            # subnet state altogether
-            self.local_dvr_map.pop(sub_uuid, None)
-        # release port state
-        self.local_ports.pop(port.vif_id, None)
-
-    def unbind_port_from_dvr(self, vif_port, local_vlan_map):
-        if not self.in_distributed_mode():
-            return
-        # Handle port removed use-case
-        if vif_port and vif_port.vif_id not in self.local_ports:
-            LOG.debug("DVR: Non distributed port, ignoring %s", vif_port)
-            return
-
-        ovsport = self.local_ports[vif_port.vif_id]
-        device_owner = ovsport.get_device_owner()
-
-        if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
-            self._unbind_distributed_router_interface_port(vif_port,
-                                                           local_vlan_map)
-
-        if device_owner and n_utils.is_dvr_serviced(device_owner):
-            self._unbind_port_on_dvr_subnet(vif_port,
-                                            local_vlan_map)
-
-        if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
-            self._unbind_centralized_snat_port_on_dvr_subnet(vif_port,
-                                                             local_vlan_map)
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py
deleted file mode 100644 (file)
index 6231559..0000000
+++ /dev/null
@@ -1,2002 +0,0 @@
-# Copyright 2011 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import signal
-import sys
-import time
-import uuid
-
-import functools
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_service import loopingcall
-from oslo_service import systemd
-import six
-from six import moves
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.agent.common import ovs_lib
-from neutron.agent.common import polling
-from neutron.agent.common import utils
-from neutron.agent.l2.extensions import manager as ext_manager
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import polling as linux_polling
-from neutron.agent import rpc as agent_rpc
-from neutron.agent import securitygroups_rpc as sg_rpc
-from neutron.api.rpc.handlers import dvr_rpc
-from neutron.common import config
-from neutron.common import constants as n_const
-from neutron.common import exceptions
-from neutron.common import ipv6_utils as ipv6
-from neutron.common import topics
-from neutron.common import utils as n_utils
-from neutron import context
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.common import utils as p_utils
-from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
-from neutron.plugins.ml2.drivers.openvswitch.agent.common \
-    import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent \
-    import ovs_dvr_neutron_agent
-
-
-LOG = logging.getLogger(__name__)
-cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
-                      'agent.common.config')
-cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
-                      'common.config')
-
-# A placeholder for dead vlans.
-DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1
-UINT64_BITMASK = (1 << 64) - 1
-
-
-class _mac_mydialect(netaddr.mac_unix):
-    word_fmt = '%.2x'
-
-
-class DeviceListRetrievalError(exceptions.NeutronException):
-    message = _("Unable to retrieve port details for devices: %(devices)s ")
-
-
-class LocalVLANMapping(object):
-
-    def __init__(self, vlan, network_type, physical_network, segmentation_id,
-                 vif_ports=None):
-        if vif_ports is None:
-            vif_ports = {}
-        self.vlan = vlan
-        self.network_type = network_type
-        self.physical_network = physical_network
-        self.segmentation_id = segmentation_id
-        self.vif_ports = vif_ports
-        # set of tunnel ports on which packets should be flooded
-        self.tun_ofports = set()
-
-    def __str__(self):
-        return ("lv-id = %s type = %s phys-net = %s phys-id = %s" %
-                (self.vlan, self.network_type, self.physical_network,
-                 self.segmentation_id))
-
-
-class OVSPluginApi(agent_rpc.PluginApi):
-    pass
-
-
-def has_zero_prefixlen_address(ip_addresses):
-    return any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in ip_addresses)
-
-
-class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
-                      l2population_rpc.L2populationRpcCallBackTunnelMixin,
-                      dvr_rpc.DVRAgentRpcCallbackMixin):
-    '''Implements OVS-based tunneling, VLANs and flat networks.
-
-    Two local bridges are created: an integration bridge (defaults to
-    'br-int') and a tunneling bridge (defaults to 'br-tun'). An
-    additional bridge is created for each physical network interface
-    used for VLANs and/or flat networks.
-
-    All VM VIFs are plugged into the integration bridge. VM VIFs on a
-    given virtual network share a common "local" VLAN (i.e. not
-    propagated externally). The VLAN id of this local VLAN is mapped
-    to the physical networking details realizing that virtual network.
-
-    For virtual networks realized as GRE tunnels, a Logical Switch
-    (LS) identifier is used to differentiate tenant traffic on
-    inter-HV tunnels. A mesh of tunnels is created to other
-    Hypervisors in the cloud. These tunnels originate and terminate on
-    the tunneling bridge of each hypervisor. Port patching is done to
-    connect local VLANs on the integration bridge to inter-hypervisor
-    tunnels on the tunnel bridge.
-
-    For each virtual network realized as a VLAN or flat network, a
-    veth or a pair of patch ports is used to connect the local VLAN on
-    the integration bridge with the physical network bridge, with flow
-    rules adding, modifying, or stripping VLAN tags as necessary.
-    '''
-
-    # history
-    #   1.0 Initial version
-    #   1.1 Support Security Group RPC
-    #   1.2 Support DVR (Distributed Virtual Router) RPC
-    #   1.3 Added param devices_to_update to security_groups_provider_updated
-    #   1.4 Added support for network_update
-    target = oslo_messaging.Target(version='1.4')
-
-    def __init__(self, bridge_classes, conf=None):
-        '''Constructor.
-
-        :param bridge_classes: a dict for bridge classes.
-        :param conf: an instance of ConfigOpts
-        '''
-        super(OVSNeutronAgent, self).__init__()
-        self.conf = conf or cfg.CONF
-        self.ovs = ovs_lib.BaseOVS()
-        agent_conf = self.conf.AGENT
-        ovs_conf = self.conf.OVS
-
-        self.fullsync = False
-        # init bridge classes with configured datapath type.
-        self.br_int_cls, self.br_phys_cls, self.br_tun_cls = (
-            functools.partial(bridge_classes[b],
-                              datapath_type=ovs_conf.datapath_type)
-            for b in ('br_int', 'br_phys', 'br_tun'))
-
-        self.use_veth_interconnection = ovs_conf.use_veth_interconnection
-        self.veth_mtu = agent_conf.veth_mtu
-        self.available_local_vlans = set(moves.range(p_const.MIN_VLAN_TAG,
-                                                     p_const.MAX_VLAN_TAG))
-        self.tunnel_types = agent_conf.tunnel_types or []
-        self.l2_pop = agent_conf.l2_population
-        # TODO(ethuleau): Change ARP responder so it's not dependent on the
-        #                 ML2 l2 population mechanism driver.
-        self.enable_distributed_routing = agent_conf.enable_distributed_routing
-        self.arp_responder_enabled = agent_conf.arp_responder and self.l2_pop
-        self.prevent_arp_spoofing = agent_conf.prevent_arp_spoofing
-
-        host = self.conf.host
-        self.agent_id = 'ovs-agent-%s' % host
-
-        if self.tunnel_types:
-            self.enable_tunneling = True
-        else:
-            self.enable_tunneling = False
-
-        # Validate agent configurations
-        self._check_agent_configurations()
-
-        # Keep track of int_br's device count for use by _report_state()
-        self.int_br_device_count = 0
-
-        self.agent_uuid_stamp = uuid.uuid4().int & UINT64_BITMASK
-
-        self.int_br = self.br_int_cls(ovs_conf.integration_bridge)
-        self.setup_integration_br()
-        # Stores port update notifications for processing in main rpc loop
-        self.updated_ports = set()
-        # Stores port delete notifications
-        self.deleted_ports = set()
-
-        self.network_ports = collections.defaultdict(set)
-        # keeps association between ports and ofports to detect ofport change
-        self.vifname_to_ofport_map = {}
-        self.setup_rpc()
-        self.init_extension_manager(self.connection)
-        self.bridge_mappings = self._parse_bridge_mappings(
-            ovs_conf.bridge_mappings)
-        self.setup_physical_bridges(self.bridge_mappings)
-        self.local_vlan_map = {}
-
-        self.tun_br_ofports = {p_const.TYPE_GENEVE: {},
-                               p_const.TYPE_GRE: {},
-                               p_const.TYPE_VXLAN: {}}
-
-        self.polling_interval = agent_conf.polling_interval
-        self.minimize_polling = agent_conf.minimize_polling
-        self.ovsdb_monitor_respawn_interval = (
-            agent_conf.ovsdb_monitor_respawn_interval or
-            constants.DEFAULT_OVSDBMON_RESPAWN)
-        self.local_ip = ovs_conf.local_ip
-        self.tunnel_count = 0
-        self.vxlan_udp_port = agent_conf.vxlan_udp_port
-        self.dont_fragment = agent_conf.dont_fragment
-        self.tunnel_csum = agent_conf.tunnel_csum
-        self.tun_br = None
-        self.patch_int_ofport = constants.OFPORT_INVALID
-        self.patch_tun_ofport = constants.OFPORT_INVALID
-        if self.enable_tunneling:
-            # The patch_int_ofport and patch_tun_ofport are updated
-            # here inside the call to setup_tunnel_br()
-            self.setup_tunnel_br(ovs_conf.tunnel_bridge)
-
-        self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent(
-            self.context,
-            self.dvr_plugin_rpc,
-            self.int_br,
-            self.tun_br,
-            self.bridge_mappings,
-            self.phys_brs,
-            self.int_ofports,
-            self.phys_ofports,
-            self.patch_int_ofport,
-            self.patch_tun_ofport,
-            host,
-            self.enable_tunneling,
-            self.enable_distributed_routing)
-
-        self.agent_state = {
-            'binary': 'neutron-openvswitch-agent',
-            'host': host,
-            'topic': n_const.L2_AGENT_TOPIC,
-            'configurations': {'bridge_mappings': self.bridge_mappings,
-                               'tunnel_types': self.tunnel_types,
-                               'tunneling_ip': self.local_ip,
-                               'l2_population': self.l2_pop,
-                               'arp_responder_enabled':
-                               self.arp_responder_enabled,
-                               'enable_distributed_routing':
-                               self.enable_distributed_routing,
-                               'log_agent_heartbeats':
-                               agent_conf.log_agent_heartbeats,
-                               'extensions': self.ext_manager.names(),
-                               'datapath_type': ovs_conf.datapath_type,
-                               'ovs_capabilities': self.ovs.capabilities,
-                               'vhostuser_socket_dir':
-                               ovs_conf.vhostuser_socket_dir},
-            'agent_type': agent_conf.agent_type,
-            'start_flag': True}
-
-        report_interval = agent_conf.report_interval
-        if report_interval:
-            heartbeat = loopingcall.FixedIntervalLoopingCall(
-                self._report_state)
-            heartbeat.start(interval=report_interval)
-
-        if self.enable_tunneling:
-            self.setup_tunnel_br_flows()
-
-        self.dvr_agent.setup_dvr_flows()
-
-        # Collect additional bridges to monitor
-        self.ancillary_brs = self.setup_ancillary_bridges(
-            ovs_conf.integration_bridge, ovs_conf.tunnel_bridge)
-
-        # In order to keep existed device's local vlan unchanged,
-        # restore local vlan mapping at start
-        self._restore_local_vlan_map()
-
-        # Security group agent support
-        self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
-                self.sg_plugin_rpc, self.local_vlan_map,
-                defer_refresh_firewall=True)
-
-        # Initialize iteration counter
-        self.iter_num = 0
-        self.run_daemon_loop = True
-
-        self.catch_sigterm = False
-        self.catch_sighup = False
-
-        # The initialization is complete; we can start receiving messages
-        self.connection.consume_in_threads()
-
-        self.quitting_rpc_timeout = agent_conf.quitting_rpc_timeout
-
-    def _parse_bridge_mappings(self, bridge_mappings):
-        try:
-            return n_utils.parse_mappings(bridge_mappings)
-        except ValueError as e:
-            raise ValueError(_("Parsing bridge_mappings failed: %s.") % e)
-
-    def _report_state(self):
-        # How many devices are likely used by a VM
-        self.agent_state.get('configurations')['devices'] = (
-            self.int_br_device_count)
-        self.agent_state.get('configurations')['in_distributed_mode'] = (
-            self.dvr_agent.in_distributed_mode())
-
-        try:
-            agent_status = self.state_rpc.report_state(self.context,
-                                                       self.agent_state,
-                                                       True)
-            if agent_status == n_const.AGENT_REVIVED:
-                LOG.info(_LI('Agent has just been revived. '
-                             'Doing a full sync.'))
-                self.fullsync = True
-
-            if self.agent_state.pop('start_flag', None):
-                # On initial start, we notify systemd after initialization
-                # is complete.
-                systemd.notify_once()
-        except Exception:
-            LOG.exception(_LE("Failed reporting state!"))
-
-    def _restore_local_vlan_map(self):
-        self._local_vlan_hints = {}
-        cur_ports = self.int_br.get_vif_ports()
-        port_names = [p.port_name for p in cur_ports]
-        port_info = self.int_br.get_ports_attributes(
-            "Port", columns=["name", "other_config", "tag"], ports=port_names)
-        by_name = {x['name']: x for x in port_info}
-        for port in cur_ports:
-            # if a port was deleted between get_vif_ports and
-            # get_ports_attributes, we
-            # will get a KeyError
-            try:
-                local_vlan_map = by_name[port.port_name]['other_config']
-                local_vlan = by_name[port.port_name]['tag']
-            except KeyError:
-                continue
-            if not local_vlan:
-                continue
-            net_uuid = local_vlan_map.get('net_uuid')
-            if (net_uuid and net_uuid not in self._local_vlan_hints
-                and local_vlan != DEAD_VLAN_TAG):
-                self.available_local_vlans.remove(local_vlan)
-                self._local_vlan_hints[local_vlan_map['net_uuid']] = \
-                    local_vlan
-
-    def _dispose_local_vlan_hints(self):
-        self.available_local_vlans.update(self._local_vlan_hints.values())
-        self._local_vlan_hints = {}
-
-    def setup_rpc(self):
-        self.plugin_rpc = OVSPluginApi(topics.PLUGIN)
-        self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
-        self.dvr_plugin_rpc = dvr_rpc.DVRServerRpcApi(topics.PLUGIN)
-        self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
-
-        # RPC network init
-        self.context = context.get_admin_context_without_session()
-        # Define the listening consumers for the agent
-        consumers = [[topics.PORT, topics.UPDATE],
-                     [topics.PORT, topics.DELETE],
-                     [constants.TUNNEL, topics.UPDATE],
-                     [constants.TUNNEL, topics.DELETE],
-                     [topics.SECURITY_GROUP, topics.UPDATE],
-                     [topics.DVR, topics.UPDATE],
-                     [topics.NETWORK, topics.UPDATE]]
-        if self.l2_pop:
-            consumers.append([topics.L2POPULATION, topics.UPDATE])
-        self.connection = agent_rpc.create_consumers([self],
-                                                     topics.AGENT,
-                                                     consumers,
-                                                     start_listening=False)
-
-    def init_extension_manager(self, connection):
-        ext_manager.register_opts(self.conf)
-        self.ext_manager = (
-            ext_manager.AgentExtensionsManager(self.conf))
-        self.ext_manager.initialize(
-            connection, constants.EXTENSION_DRIVER_TYPE)
-
-    def get_net_uuid(self, vif_id):
-        for network_id, vlan_mapping in six.iteritems(self.local_vlan_map):
-            if vif_id in vlan_mapping.vif_ports:
-                return network_id
-
-    def port_update(self, context, **kwargs):
-        port = kwargs.get('port')
-        # Put the port identifier in the updated_ports set.
-        # Even if full port details might be provided to this call,
-        # they are not used since there is no guarantee the notifications
-        # are processed in the same order as the relevant API requests
-        self.updated_ports.add(port['id'])
-        LOG.debug("port_update message processed for port %s", port['id'])
-
-    def port_delete(self, context, **kwargs):
-        port_id = kwargs.get('port_id')
-        self.deleted_ports.add(port_id)
-        self.updated_ports.discard(port_id)
-        LOG.debug("port_delete message processed for port %s", port_id)
-
-    def network_update(self, context, **kwargs):
-        network_id = kwargs['network']['id']
-        for port_id in self.network_ports[network_id]:
-            # notifications could arrive out of order, if the port is deleted
-            # we don't want to update it anymore
-            if port_id not in self.deleted_ports:
-                self.updated_ports.add(port_id)
-        LOG.debug("network_update message processed for network "
-                  "%(network_id)s, with ports: %(ports)s",
-                  {'network_id': network_id,
-                   'ports': self.network_ports[network_id]})
-
-    def _clean_network_ports(self, port_id):
-        for port_set in self.network_ports.values():
-            if port_id in port_set:
-                port_set.remove(port_id)
-                break
-
-    def process_deleted_ports(self, port_info):
-        # don't try to process removed ports as deleted ports since
-        # they are already gone
-        if 'removed' in port_info:
-            self.deleted_ports -= port_info['removed']
-        deleted_ports = list(self.deleted_ports)
-        while self.deleted_ports:
-            port_id = self.deleted_ports.pop()
-            port = self.int_br.get_vif_port_by_id(port_id)
-            self._clean_network_ports(port_id)
-            self.ext_manager.delete_port(self.context,
-                                         {"vif_port": port,
-                                          "port_id": port_id})
-            # move to dead VLAN so deleted ports no
-            # longer have access to the network
-            if port:
-                # don't log errors since there is a chance someone will be
-                # removing the port from the bridge at the same time
-                self.port_dead(port, log_errors=False)
-            self.port_unbound(port_id)
-        # Flush firewall rules after ports are put on dead VLAN to be
-        # more secure
-        self.sg_agent.remove_devices_filter(deleted_ports)
-
-    def tunnel_update(self, context, **kwargs):
-        LOG.debug("tunnel_update received")
-        if not self.enable_tunneling:
-            return
-        tunnel_ip = kwargs.get('tunnel_ip')
-        tunnel_ip_hex = self.get_ip_in_hex(tunnel_ip)
-        if not tunnel_ip_hex:
-            return
-        tunnel_type = kwargs.get('tunnel_type')
-        if not tunnel_type:
-            LOG.error(_LE("No tunnel_type specified, cannot create tunnels"))
-            return
-        if tunnel_type not in self.tunnel_types:
-            LOG.error(_LE("tunnel_type %s not supported by agent"),
-                      tunnel_type)
-            return
-        if tunnel_ip == self.local_ip:
-            return
-        tun_name = '%s-%s' % (tunnel_type, tunnel_ip_hex)
-        if not self.l2_pop:
-            self._setup_tunnel_port(self.tun_br, tun_name, tunnel_ip,
-                                    tunnel_type)
-
-    def tunnel_delete(self, context, **kwargs):
-        LOG.debug("tunnel_delete received")
-        if not self.enable_tunneling:
-            return
-        tunnel_ip = kwargs.get('tunnel_ip')
-        if not tunnel_ip:
-            LOG.error(_LE("No tunnel_ip specified, cannot delete tunnels"))
-            return
-        tunnel_type = kwargs.get('tunnel_type')
-        if not tunnel_type:
-            LOG.error(_LE("No tunnel_type specified, cannot delete tunnels"))
-            return
-        if tunnel_type not in self.tunnel_types:
-            LOG.error(_LE("tunnel_type %s not supported by agent"),
-                      tunnel_type)
-            return
-        ofport = self.tun_br_ofports[tunnel_type].get(tunnel_ip)
-        self.cleanup_tunnel_port(self.tun_br, ofport, tunnel_type)
-
-    def _tunnel_port_lookup(self, network_type, remote_ip):
-        return self.tun_br_ofports[network_type].get(remote_ip)
-
-    def fdb_add(self, context, fdb_entries):
-        LOG.debug("fdb_add received")
-        for lvm, agent_ports in self.get_agent_ports(fdb_entries,
-                                                     self.local_vlan_map):
-            agent_ports.pop(self.local_ip, None)
-            if len(agent_ports):
-                if not self.enable_distributed_routing:
-                    with self.tun_br.deferred() as deferred_br:
-                        self.fdb_add_tun(context, deferred_br, lvm,
-                                         agent_ports, self._tunnel_port_lookup)
-                else:
-                    self.fdb_add_tun(context, self.tun_br, lvm,
-                                     agent_ports, self._tunnel_port_lookup)
-
-    def fdb_remove(self, context, fdb_entries):
-        LOG.debug("fdb_remove received")
-        for lvm, agent_ports in self.get_agent_ports(fdb_entries,
-                                                     self.local_vlan_map):
-            agent_ports.pop(self.local_ip, None)
-            if len(agent_ports):
-                if not self.enable_distributed_routing:
-                    with self.tun_br.deferred() as deferred_br:
-                        self.fdb_remove_tun(context, deferred_br, lvm,
-                                            agent_ports,
-                                            self._tunnel_port_lookup)
-                else:
-                    self.fdb_remove_tun(context, self.tun_br, lvm,
-                                        agent_ports, self._tunnel_port_lookup)
-
-    def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
-        if port_info == n_const.FLOODING_ENTRY:
-            lvm.tun_ofports.add(ofport)
-            br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
-                                    lvm.tun_ofports)
-        else:
-            self.setup_entry_for_arp_reply(br, 'add', lvm.vlan,
-                                           port_info.mac_address,
-                                           port_info.ip_address)
-            br.install_unicast_to_tun(lvm.vlan,
-                                      lvm.segmentation_id,
-                                      ofport,
-                                      port_info.mac_address)
-
-    def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
-        if port_info == n_const.FLOODING_ENTRY:
-            if ofport not in lvm.tun_ofports:
-                LOG.debug("attempt to remove a non-existent port %s", ofport)
-                return
-            lvm.tun_ofports.remove(ofport)
-            if len(lvm.tun_ofports) > 0:
-                br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
-                                        lvm.tun_ofports)
-            else:
-                # This local vlan doesn't require any more tunneling
-                br.delete_flood_to_tun(lvm.vlan)
-        else:
-            self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan,
-                                           port_info.mac_address,
-                                           port_info.ip_address)
-            br.delete_unicast_to_tun(lvm.vlan, port_info.mac_address)
-
-    def _fdb_chg_ip(self, context, fdb_entries):
-        LOG.debug("update chg_ip received")
-        with self.tun_br.deferred() as deferred_br:
-            self.fdb_chg_ip_tun(context, deferred_br, fdb_entries,
-                                self.local_ip, self.local_vlan_map)
-
-    def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
-                                  ip_address):
-        '''Set the ARP respond entry.
-
-        When the l2 population mechanism driver and OVS supports to edit ARP
-        fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the
-        tunnel bridge.
-        '''
-        if not self.arp_responder_enabled:
-            return
-
-        ip = netaddr.IPAddress(ip_address)
-        if ip.version == 6:
-            return
-
-        ip = str(ip)
-        mac = str(netaddr.EUI(mac_address, dialect=_mac_mydialect))
-
-        if action == 'add':
-            br.install_arp_responder(local_vid, ip, mac)
-        elif action == 'remove':
-            br.delete_arp_responder(local_vid, ip)
-        else:
-            LOG.warning(_LW('Action %s not supported'), action)
-
-    def _local_vlan_for_flat(self, lvid, physical_network):
-        phys_br = self.phys_brs[physical_network]
-        phys_port = self.phys_ofports[physical_network]
-        int_br = self.int_br
-        int_port = self.int_ofports[physical_network]
-        phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
-                                     segmentation_id=None,
-                                     distributed=False)
-        int_br.provision_local_vlan(port=int_port, lvid=lvid,
-                                    segmentation_id=None)
-
-    def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id):
-        distributed = self.enable_distributed_routing
-        phys_br = self.phys_brs[physical_network]
-        phys_port = self.phys_ofports[physical_network]
-        int_br = self.int_br
-        int_port = self.int_ofports[physical_network]
-        phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
-                                     segmentation_id=segmentation_id,
-                                     distributed=distributed)
-        int_br.provision_local_vlan(port=int_port, lvid=lvid,
-                                    segmentation_id=segmentation_id)
-
-    def provision_local_vlan(self, net_uuid, network_type, physical_network,
-                             segmentation_id):
-        '''Provisions a local VLAN.
-
-        :param net_uuid: the uuid of the network associated with this vlan.
-        :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat',
-                                               'local', 'geneve')
-        :param physical_network: the physical network for 'vlan' or 'flat'
-        :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
-        '''
-
-        # On a restart or crash of OVS, the network associated with this VLAN
-        # will already be assigned, so check for that here before assigning a
-        # new one.
-        lvm = self.local_vlan_map.get(net_uuid)
-        if lvm:
-            lvid = lvm.vlan
-        else:
-            lvid = self._local_vlan_hints.pop(net_uuid, None)
-            if lvid is None:
-                if not self.available_local_vlans:
-                    LOG.error(_LE("No local VLAN available for net-id=%s"),
-                              net_uuid)
-                    return
-                lvid = self.available_local_vlans.pop()
-            self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid,
-                                                             network_type,
-                                                             physical_network,
-                                                             segmentation_id)
-
-        LOG.info(_LI("Assigning %(vlan_id)s as local vlan for "
-                     "net-id=%(net_uuid)s"),
-                 {'vlan_id': lvid, 'net_uuid': net_uuid})
-
-        if network_type in constants.TUNNEL_NETWORK_TYPES:
-            if self.enable_tunneling:
-                # outbound broadcast/multicast
-                ofports = list(self.tun_br_ofports[network_type].values())
-                if ofports:
-                    self.tun_br.install_flood_to_tun(lvid,
-                                                     segmentation_id,
-                                                     ofports)
-                # inbound from tunnels: set lvid in the right table
-                # and resubmit to Table LEARN_FROM_TUN for mac learning
-                if self.enable_distributed_routing:
-                    self.dvr_agent.process_tunneled_network(
-                        network_type, lvid, segmentation_id)
-                else:
-                    self.tun_br.provision_local_vlan(
-                        network_type=network_type, lvid=lvid,
-                        segmentation_id=segmentation_id)
-            else:
-                LOG.error(_LE("Cannot provision %(network_type)s network for "
-                              "net-id=%(net_uuid)s - tunneling disabled"),
-                          {'network_type': network_type,
-                           'net_uuid': net_uuid})
-        elif network_type == p_const.TYPE_FLAT:
-            if physical_network in self.phys_brs:
-                self._local_vlan_for_flat(lvid, physical_network)
-            else:
-                LOG.error(_LE("Cannot provision flat network for "
-                              "net-id=%(net_uuid)s - no bridge for "
-                              "physical_network %(physical_network)s"),
-                          {'net_uuid': net_uuid,
-                           'physical_network': physical_network})
-        elif network_type == p_const.TYPE_VLAN:
-            if physical_network in self.phys_brs:
-                self._local_vlan_for_vlan(lvid, physical_network,
-                                          segmentation_id)
-            else:
-                LOG.error(_LE("Cannot provision VLAN network for "
-                              "net-id=%(net_uuid)s - no bridge for "
-                              "physical_network %(physical_network)s"),
-                          {'net_uuid': net_uuid,
-                           'physical_network': physical_network})
-        elif network_type == p_const.TYPE_LOCAL:
-            # no flows needed for local networks
-            pass
-        else:
-            LOG.error(_LE("Cannot provision unknown network type "
-                          "%(network_type)s for net-id=%(net_uuid)s"),
-                      {'network_type': network_type,
-                       'net_uuid': net_uuid})
-
-    def reclaim_local_vlan(self, net_uuid):
-        '''Reclaim a local VLAN.
-
-        :param net_uuid: the network uuid associated with this vlan.
-        '''
-        lvm = self.local_vlan_map.pop(net_uuid, None)
-        if lvm is None:
-            LOG.debug("Network %s not used on agent.", net_uuid)
-            return
-
-        LOG.info(_LI("Reclaiming vlan = %(vlan_id)s from "
-                     "net-id = %(net_uuid)s"),
-                 {'vlan_id': lvm.vlan, 'net_uuid': net_uuid})
-
-        if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
-            if self.enable_tunneling:
-                self.tun_br.reclaim_local_vlan(
-                    network_type=lvm.network_type,
-                    segmentation_id=lvm.segmentation_id)
-                self.tun_br.delete_flood_to_tun(lvm.vlan)
-                self.tun_br.delete_unicast_to_tun(lvm.vlan, None)
-                self.tun_br.delete_arp_responder(lvm.vlan, None)
-                if self.l2_pop:
-                    # Try to remove tunnel ports if not used by other networks
-                    for ofport in lvm.tun_ofports:
-                        self.cleanup_tunnel_port(self.tun_br, ofport,
-                                                 lvm.network_type)
-        elif lvm.network_type == p_const.TYPE_FLAT:
-            if lvm.physical_network in self.phys_brs:
-                # outbound
-                br = self.phys_brs[lvm.physical_network]
-                br.reclaim_local_vlan(
-                    port=self.phys_ofports[lvm.physical_network],
-                    lvid=lvm.vlan)
-                # inbound
-                br = self.int_br
-                br.reclaim_local_vlan(
-                    port=self.int_ofports[lvm.physical_network],
-                    segmentation_id=None)
-        elif lvm.network_type == p_const.TYPE_VLAN:
-            if lvm.physical_network in self.phys_brs:
-                # outbound
-                br = self.phys_brs[lvm.physical_network]
-                br.reclaim_local_vlan(
-                    port=self.phys_ofports[lvm.physical_network],
-                    lvid=lvm.vlan)
-                # inbound
-                br = self.int_br
-                br.reclaim_local_vlan(
-                    port=self.int_ofports[lvm.physical_network],
-                    segmentation_id=lvm.segmentation_id)
-        elif lvm.network_type == p_const.TYPE_LOCAL:
-            # no flows needed for local networks
-            pass
-        else:
-            LOG.error(_LE("Cannot reclaim unknown network type "
-                          "%(network_type)s for net-id=%(net_uuid)s"),
-                      {'network_type': lvm.network_type,
-                       'net_uuid': net_uuid})
-
-        self.available_local_vlans.add(lvm.vlan)
-
-    def port_bound(self, port, net_uuid,
-                   network_type, physical_network,
-                   segmentation_id, fixed_ips, device_owner,
-                   ovs_restarted):
-        '''Bind port to net_uuid/lsw_id and install flow for inbound traffic
-        to vm.
-
-        :param port: an ovs_lib.VifPort object.
-        :param net_uuid: the net_uuid this port is to be associated with.
-        :param network_type: the network type ('gre', 'vlan', 'flat', 'local')
-        :param physical_network: the physical network for 'vlan' or 'flat'
-        :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
-        :param fixed_ips: the ip addresses assigned to this port
-        :param device_owner: the string indicative of owner of this port
-        :param ovs_restarted: indicates if this is called for an OVS restart.
-        '''
-        if net_uuid not in self.local_vlan_map or ovs_restarted:
-            self.provision_local_vlan(net_uuid, network_type,
-                                      physical_network, segmentation_id)
-        lvm = self.local_vlan_map[net_uuid]
-        lvm.vif_ports[port.vif_id] = port
-
-        self.dvr_agent.bind_port_to_dvr(port, lvm,
-                                        fixed_ips,
-                                        device_owner)
-        port_other_config = self.int_br.db_get_val("Port", port.port_name,
-                                                   "other_config")
-        if port_other_config is None:
-            if port.vif_id in self.deleted_ports:
-                LOG.debug("Port %s deleted concurrently", port.vif_id)
-            elif port.vif_id in self.updated_ports:
-                LOG.error(_LE("Expected port %s not found"), port.vif_id)
-            else:
-                LOG.debug("Unable to get config for port %s", port.vif_id)
-            return False
-
-        vlan_mapping = {'net_uuid': net_uuid,
-                        'network_type': network_type,
-                        'physical_network': physical_network}
-        if segmentation_id is not None:
-            vlan_mapping['segmentation_id'] = segmentation_id
-        port_other_config.update(vlan_mapping)
-        self.int_br.set_db_attribute("Port", port.port_name, "other_config",
-                                     port_other_config)
-        return True
-
-    def _bind_devices(self, need_binding_ports):
-        devices_up = []
-        devices_down = []
-        port_names = [p['vif_port'].port_name for p in need_binding_ports]
-        port_info = self.int_br.get_ports_attributes(
-            "Port", columns=["name", "tag"], ports=port_names, if_exists=True)
-        tags_by_name = {x['name']: x['tag'] for x in port_info}
-        for port_detail in need_binding_ports:
-            lvm = self.local_vlan_map.get(port_detail['network_id'])
-            if not lvm:
-                # network for port was deleted. skip this port since it
-                # will need to be handled as a DEAD port in the next scan
-                continue
-            port = port_detail['vif_port']
-            device = port_detail['device']
-            # Do not bind a port if it's already bound
-            cur_tag = tags_by_name.get(port.port_name)
-            if cur_tag is None:
-                LOG.debug("Port %s was deleted concurrently, skipping it",
-                          port.port_name)
-                continue
-            if cur_tag != lvm.vlan:
-                self.int_br.delete_flows(in_port=port.ofport)
-            if self.prevent_arp_spoofing:
-                self.setup_arp_spoofing_protection(self.int_br,
-                                                   port, port_detail)
-            if cur_tag != lvm.vlan:
-                self.int_br.set_db_attribute(
-                    "Port", port.port_name, "tag", lvm.vlan)
-
-            # update plugin about port status
-            # FIXME(salv-orlando): Failures while updating device status
-            # must be handled appropriately. Otherwise this might prevent
-            # neutron server from sending network-vif-* events to the nova
-            # API server, thus possibly preventing instance spawn.
-            if port_detail.get('admin_state_up'):
-                LOG.debug("Setting status for %s to UP", device)
-                devices_up.append(device)
-            else:
-                LOG.debug("Setting status for %s to DOWN", device)
-                devices_down.append(device)
-        failed_devices = []
-        if devices_up or devices_down:
-            devices_set = self.plugin_rpc.update_device_list(
-                self.context, devices_up, devices_down, self.agent_id,
-                self.conf.host)
-            failed_devices = (devices_set.get('failed_devices_up') +
-                devices_set.get('failed_devices_down'))
-        if failed_devices:
-            LOG.error(_LE("Configuration for devices %s failed!"),
-                      failed_devices)
-            #TODO(rossella_s) handle better the resync in next patches,
-            # this is just to preserve the current behavior
-            raise DeviceListRetrievalError(devices=failed_devices)
-        LOG.info(_LI("Configuration for devices up %(up)s and devices "
-                     "down %(down)s completed."),
-                 {'up': devices_up, 'down': devices_down})
-
-    @staticmethod
-    def setup_arp_spoofing_protection(bridge, vif, port_details):
-        if not port_details.get('port_security_enabled', True):
-            LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because "
-                         "it has port security disabled"), vif.port_name)
-            bridge.delete_arp_spoofing_protection(port=vif.ofport)
-            return
-        if port_details['device_owner'].startswith(
-            n_const.DEVICE_OWNER_NETWORK_PREFIX):
-            LOG.debug("Skipping ARP spoofing rules for network owned port "
-                      "'%s'.", vif.port_name)
-            bridge.delete_arp_spoofing_protection(port=vif.ofport)
-            return
-        # clear any previous flows related to this port in our ARP table
-        bridge.delete_arp_spoofing_allow_rules(port=vif.ofport)
-        # collect all of the addresses and cidrs that belong to the port
-        addresses = {f['ip_address'] for f in port_details['fixed_ips']}
-        mac_addresses = {vif.vif_mac}
-        if port_details.get('allowed_address_pairs'):
-            addresses |= {p['ip_address']
-                          for p in port_details['allowed_address_pairs']}
-            mac_addresses |= {p['mac_address']
-                              for p in port_details['allowed_address_pairs']
-                              if p.get('mac_address')}
-
-        ipv6_addresses = {ip for ip in addresses
-                          if netaddr.IPNetwork(ip).version == 6}
-        # Allow neighbor advertisements for LLA address.
-        ipv6_addresses |= {str(ipv6.get_ipv6_addr_by_EUI64(
-                               n_const.IPV6_LLA_PREFIX, mac))
-                           for mac in mac_addresses}
-        if not has_zero_prefixlen_address(ipv6_addresses):
-            # Install protection only when prefix is not zero because a /0
-            # prefix allows any address anyway and the nd_target can only
-            # match on /1 or more.
-            bridge.install_icmpv6_na_spoofing_protection(port=vif.ofport,
-                ip_addresses=ipv6_addresses)
-
-        ipv4_addresses = {ip for ip in addresses
-                          if netaddr.IPNetwork(ip).version == 4}
-        if not has_zero_prefixlen_address(ipv4_addresses):
-            # Install protection only when prefix is not zero because a /0
-            # prefix allows any address anyway and the ARP_SPA can only
-            # match on /1 or more.
-            bridge.install_arp_spoofing_protection(port=vif.ofport,
-                                                   ip_addresses=ipv4_addresses)
-        else:
-            bridge.delete_arp_spoofing_protection(port=vif.ofport)
-
-    def port_unbound(self, vif_id, net_uuid=None):
-        '''Unbind port.
-
-        Removes corresponding local vlan mapping object if this is its last
-        VIF.
-
-        :param vif_id: the id of the vif
-        :param net_uuid: the net_uuid this port is associated with.
-        '''
-        if net_uuid is None:
-            net_uuid = self.get_net_uuid(vif_id)
-
-        if not self.local_vlan_map.get(net_uuid):
-            LOG.info(_LI('port_unbound(): net_uuid %s not in local_vlan_map'),
-                     net_uuid)
-            return
-
-        lvm = self.local_vlan_map[net_uuid]
-
-        if vif_id in lvm.vif_ports:
-            vif_port = lvm.vif_ports[vif_id]
-            self.dvr_agent.unbind_port_from_dvr(vif_port, lvm)
-        lvm.vif_ports.pop(vif_id, None)
-
-        if not lvm.vif_ports:
-            self.reclaim_local_vlan(net_uuid)
-
-    def port_dead(self, port, log_errors=True):
-        '''Once a port has no binding, put it on the "dead vlan".
-
-        :param port: an ovs_lib.VifPort object.
-        '''
-        # Don't kill a port if it's already dead
-        cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag",
-                                         log_errors=log_errors)
-        if cur_tag and cur_tag != DEAD_VLAN_TAG:
-            self.int_br.set_db_attribute("Port", port.port_name, "tag",
-                                         DEAD_VLAN_TAG, log_errors=log_errors)
-            self.int_br.drop_port(in_port=port.ofport)
-
-    def setup_integration_br(self):
-        '''Setup the integration bridge.
-
-        '''
-        self.int_br.set_agent_uuid_stamp(self.agent_uuid_stamp)
-        # Ensure the integration bridge is created.
-        # ovs_lib.OVSBridge.create() will run
-        #   ovs-vsctl -- --may-exist add-br BRIDGE_NAME
-        # which does nothing if bridge already exists.
-        self.int_br.create()
-        self.int_br.set_secure_mode()
-        self.int_br.setup_controllers(self.conf)
-
-        self.int_br.delete_port(self.conf.OVS.int_peer_patch_port)
-        if self.conf.AGENT.drop_flows_on_start:
-            self.int_br.delete_flows()
-        self.int_br.setup_default_table()
-
-    def setup_ancillary_bridges(self, integ_br, tun_br):
-        '''Setup ancillary bridges - for example br-ex.'''
-        ovs = ovs_lib.BaseOVS()
-        ovs_bridges = set(ovs.get_bridges())
-        # Remove all known bridges
-        ovs_bridges.remove(integ_br)
-        if self.enable_tunneling:
-            ovs_bridges.remove(tun_br)
-        br_names = [self.phys_brs[physical_network].br_name for
-                    physical_network in self.phys_brs]
-        ovs_bridges.difference_update(br_names)
-        # Filter list of bridges to those that have external
-        # bridge-id's configured
-        br_names = []
-        for bridge in ovs_bridges:
-            bridge_id = ovs.get_bridge_external_bridge_id(bridge)
-            if bridge_id != bridge:
-                br_names.append(bridge)
-        ovs_bridges.difference_update(br_names)
-        ancillary_bridges = []
-        for bridge in ovs_bridges:
-            br = ovs_lib.OVSBridge(bridge)
-            LOG.info(_LI('Adding %s to list of bridges.'), bridge)
-            ancillary_bridges.append(br)
-        return ancillary_bridges
-
-    def setup_tunnel_br(self, tun_br_name=None):
-        '''(re)initialize the tunnel bridge.
-
-        Creates tunnel bridge, and links it to the integration bridge
-        using a patch port.
-
-        :param tun_br_name: the name of the tunnel bridge.
-        '''
-        if not self.tun_br:
-            self.tun_br = self.br_tun_cls(tun_br_name)
-        self.tun_br.set_agent_uuid_stamp(self.agent_uuid_stamp)
-
-        # tun_br.create() won't recreate bridge if it exists, but will handle
-        # cases where something like datapath_type has changed
-        self.tun_br.create(secure_mode=True)
-        self.tun_br.setup_controllers(self.conf)
-        if (not self.int_br.port_exists(self.conf.OVS.int_peer_patch_port) or
-                self.patch_tun_ofport == ovs_lib.INVALID_OFPORT):
-            self.patch_tun_ofport = self.int_br.add_patch_port(
-                self.conf.OVS.int_peer_patch_port,
-                self.conf.OVS.tun_peer_patch_port)
-        if (not self.tun_br.port_exists(self.conf.OVS.tun_peer_patch_port) or
-                self.patch_int_ofport == ovs_lib.INVALID_OFPORT):
-            self.patch_int_ofport = self.tun_br.add_patch_port(
-                self.conf.OVS.tun_peer_patch_port,
-                self.conf.OVS.int_peer_patch_port)
-        if ovs_lib.INVALID_OFPORT in (self.patch_tun_ofport,
-                                      self.patch_int_ofport):
-            LOG.error(_LE("Failed to create OVS patch port. Cannot have "
-                          "tunneling enabled on this agent, since this "
-                          "version of OVS does not support tunnels or patch "
-                          "ports. Agent terminated!"))
-            exit(1)
-        if self.conf.AGENT.drop_flows_on_start:
-            self.tun_br.delete_flows()
-
-    def setup_tunnel_br_flows(self):
-        '''Setup the tunnel bridge.
-
-        Add all flows to the tunnel bridge.
-        '''
-        self.tun_br.setup_default_table(self.patch_int_ofport,
-                                        self.arp_responder_enabled)
-
-    def setup_physical_bridges(self, bridge_mappings):
-        '''Setup the physical network bridges.
-
-        Creates physical network bridges and links them to the
-        integration bridge using veths or patch ports.
-
-        :param bridge_mappings: map physical network names to bridge names.
-        '''
-        self.phys_brs = {}
-        self.int_ofports = {}
-        self.phys_ofports = {}
-        ip_wrapper = ip_lib.IPWrapper()
-        ovs = ovs_lib.BaseOVS()
-        ovs_bridges = ovs.get_bridges()
-        for physical_network, bridge in six.iteritems(bridge_mappings):
-            LOG.info(_LI("Mapping physical network %(physical_network)s to "
-                         "bridge %(bridge)s"),
-                     {'physical_network': physical_network,
-                      'bridge': bridge})
-            # setup physical bridge
-            if bridge not in ovs_bridges:
-                LOG.error(_LE("Bridge %(bridge)s for physical network "
-                              "%(physical_network)s does not exist. Agent "
-                              "terminated!"),
-                          {'physical_network': physical_network,
-                           'bridge': bridge})
-                sys.exit(1)
-            br = self.br_phys_cls(bridge)
-            # The bridge already exists, so create won't recreate it, but will
-            # handle things like changing the datapath_type
-            br.create()
-            br.setup_controllers(self.conf)
-            br.setup_default_table()
-            self.phys_brs[physical_network] = br
-
-            # interconnect physical and integration bridges using veth/patches
-            int_if_name = p_utils.get_interface_name(
-                bridge, prefix=constants.PEER_INTEGRATION_PREFIX)
-            phys_if_name = p_utils.get_interface_name(
-                bridge, prefix=constants.PEER_PHYSICAL_PREFIX)
-            # Interface type of port for physical and integration bridges must
-            # be same, so check only one of them.
-            int_type = self.int_br.db_get_val("Interface", int_if_name, "type")
-            if self.use_veth_interconnection:
-                # Drop ports if the interface types doesn't match the
-                # configuration value.
-                if int_type == 'patch':
-                    self.int_br.delete_port(int_if_name)
-                    br.delete_port(phys_if_name)
-                device = ip_lib.IPDevice(int_if_name)
-                if device.exists():
-                    device.link.delete()
-                    # Give udev a chance to process its rules here, to avoid
-                    # race conditions between commands launched by udev rules
-                    # and the subsequent call to ip_wrapper.add_veth
-                    utils.execute(['udevadm', 'settle', '--timeout=10'])
-                int_veth, phys_veth = ip_wrapper.add_veth(int_if_name,
-                                                          phys_if_name)
-                int_ofport = self.int_br.add_port(int_veth)
-                phys_ofport = br.add_port(phys_veth)
-            else:
-                # Drop ports if the interface type doesn't match the
-                # configuration value
-                if int_type == 'veth':
-                    self.int_br.delete_port(int_if_name)
-                    br.delete_port(phys_if_name)
-                # Create patch ports without associating them in order to block
-                # untranslated traffic before association
-                int_ofport = self.int_br.add_patch_port(
-                    int_if_name, constants.NONEXISTENT_PEER)
-                phys_ofport = br.add_patch_port(
-                    phys_if_name, constants.NONEXISTENT_PEER)
-
-            self.int_ofports[physical_network] = int_ofport
-            self.phys_ofports[physical_network] = phys_ofport
-
-            # block all untranslated traffic between bridges
-            self.int_br.drop_port(in_port=int_ofport)
-            br.drop_port(in_port=phys_ofport)
-
-            if self.use_veth_interconnection:
-                # enable veth to pass traffic
-                int_veth.link.set_up()
-                phys_veth.link.set_up()
-                if self.veth_mtu:
-                    # set up mtu size for veth interfaces
-                    int_veth.link.set_mtu(self.veth_mtu)
-                    phys_veth.link.set_mtu(self.veth_mtu)
-            else:
-                # associate patch ports to pass traffic
-                self.int_br.set_db_attribute('Interface', int_if_name,
-                                             'options:peer', phys_if_name)
-                br.set_db_attribute('Interface', phys_if_name,
-                                    'options:peer', int_if_name)
-
-    def update_stale_ofport_rules(self):
-        # right now the ARP spoofing rules are the only thing that utilizes
-        # ofport-based rules, so make arp_spoofing protection a conditional
-        # until something else uses ofport
-        if not self.prevent_arp_spoofing:
-            return []
-        previous = self.vifname_to_ofport_map
-        current = self.int_br.get_vif_port_to_ofport_map()
-
-        # if any ofport numbers have changed, re-process the devices as
-        # added ports so any rules based on ofport numbers are updated.
-        moved_ports = self._get_ofport_moves(current, previous)
-
-        # delete any stale rules based on removed ofports
-        ofports_deleted = set(previous.values()) - set(current.values())
-        for ofport in ofports_deleted:
-            self.int_br.delete_arp_spoofing_protection(port=ofport)
-
-        # store map for next iteration
-        self.vifname_to_ofport_map = current
-        return moved_ports
-
-    @staticmethod
-    def _get_ofport_moves(current, previous):
-        """Returns a list of moved ports.
-
-        Takes two port->ofport maps and returns a list ports that moved to a
-        different ofport. Deleted ports are not included.
-        """
-        port_moves = []
-        for name, ofport in previous.items():
-            if name not in current:
-                continue
-            current_ofport = current[name]
-            if ofport != current_ofport:
-                port_moves.append(name)
-        return port_moves
-
-    def _get_port_info(self, registered_ports, cur_ports,
-                       readd_registered_ports):
-        port_info = {'current': cur_ports}
-        # FIXME(salv-orlando): It's not really necessary to return early
-        # if nothing has changed.
-        if not readd_registered_ports and cur_ports == registered_ports:
-            return port_info
-
-        if readd_registered_ports:
-            port_info['added'] = cur_ports
-        else:
-            port_info['added'] = cur_ports - registered_ports
-        # Update port_info with ports not found on the integration bridge
-        port_info['removed'] = registered_ports - cur_ports
-        return port_info
-
-    def process_ports_events(self, events, registered_ports, ancillary_ports,
-                             old_ports_not_ready, updated_ports=None):
-        port_info = {}
-        port_info['added'] = set()
-        port_info['removed'] = set()
-        port_info['current'] = registered_ports
-
-        ancillary_port_info = {}
-        ancillary_port_info['added'] = set()
-        ancillary_port_info['removed'] = set()
-        ancillary_port_info['current'] = ancillary_ports
-        ports_not_ready_yet = set()
-
-        # if a port was added and then removed or viceversa since the agent
-        # can't know the order of the operations, check the status of the port
-        # to determine if the port was added or deleted
-        ports_removed_and_added = [
-            p for p in events['added'] if p in events['removed']]
-        for p in ports_removed_and_added:
-            if ovs_lib.BaseOVS().port_exists(p['name']):
-                events['removed'].remove(p)
-            else:
-                events['added'].remove(p)
-
-        #TODO(rossella_s): scanning the ancillary bridge won't be needed
-        # anymore when https://review.openstack.org/#/c/203381 since the bridge
-        # id stored in external_ids will be used to identify the bridge the
-        # port belongs to
-        cur_ancillary_ports = set()
-        for bridge in self.ancillary_brs:
-            cur_ancillary_ports |= bridge.get_vif_port_set()
-        cur_ancillary_ports |= ancillary_port_info['current']
-
-        def _process_port(port, ports, ancillary_ports):
-            # check 'iface-id' is set otherwise is not a port
-            # the agent should care about
-            if 'attached-mac' in port.get('external_ids', []):
-                iface_id = self.int_br.portid_from_external_ids(
-                    port['external_ids'])
-                if iface_id:
-                    if port['ofport'] == ovs_lib.UNASSIGNED_OFPORT:
-                        LOG.debug("Port %s not ready yet on the bridge",
-                                  iface_id)
-                        ports_not_ready_yet.add(port['name'])
-                        return
-                    # check if port belongs to ancillary bridge
-                    if iface_id in cur_ancillary_ports:
-                        ancillary_ports.add(iface_id)
-                    else:
-                        ports.add(iface_id)
-        if old_ports_not_ready:
-            old_ports_not_ready_attrs = self.int_br.get_ports_attributes(
-                'Interface', columns=['name', 'external_ids', 'ofport'],
-                ports=old_ports_not_ready, if_exists=True)
-            now_ready_ports = set(
-                [p['name'] for p in old_ports_not_ready_attrs])
-            LOG.debug("Ports %s are now ready", now_ready_ports)
-            old_ports_not_ready_yet = old_ports_not_ready - now_ready_ports
-            removed_ports = set([p['name'] for p in events['removed']])
-            old_ports_not_ready_yet -= removed_ports
-            LOG.debug("Ports %s were not ready at last iteration and are not "
-                      "ready yet", old_ports_not_ready_yet)
-            ports_not_ready_yet |= old_ports_not_ready_yet
-            events['added'].extend(old_ports_not_ready_attrs)
-
-        for port in events['added']:
-            _process_port(port, port_info['added'],
-                          ancillary_port_info['added'])
-        for port in events['removed']:
-            _process_port(port, port_info['removed'],
-                          ancillary_port_info['removed'])
-
-        if updated_ports is None:
-            updated_ports = set()
-        updated_ports.update(self.check_changed_vlans())
-
-        # Disregard devices that were never noticed by the agent
-        port_info['removed'] &= port_info['current']
-        port_info['current'] |= port_info['added']
-        port_info['current'] -= port_info['removed']
-
-        ancillary_port_info['removed'] &= ancillary_port_info['current']
-        ancillary_port_info['current'] |= ancillary_port_info['added']
-        ancillary_port_info['current'] -= ancillary_port_info['removed']
-
-        if updated_ports:
-            # Some updated ports might have been removed in the
-            # meanwhile, and therefore should not be processed.
-            # In this case the updated port won't be found among
-            # current ports.
-            updated_ports &= port_info['current']
-            port_info['updated'] = updated_ports
-        return port_info, ancillary_port_info, ports_not_ready_yet
-
-    def scan_ports(self, registered_ports, sync, updated_ports=None):
-        cur_ports = self.int_br.get_vif_port_set()
-        self.int_br_device_count = len(cur_ports)
-        port_info = self._get_port_info(registered_ports, cur_ports, sync)
-        if updated_ports is None:
-            updated_ports = set()
-        updated_ports.update(self.check_changed_vlans())
-        if updated_ports:
-            # Some updated ports might have been removed in the
-            # meanwhile, and therefore should not be processed.
-            # In this case the updated port won't be found among
-            # current ports.
-            updated_ports &= cur_ports
-            if updated_ports:
-                port_info['updated'] = updated_ports
-        return port_info
-
-    def scan_ancillary_ports(self, registered_ports, sync):
-        cur_ports = set()
-        for bridge in self.ancillary_brs:
-            cur_ports |= bridge.get_vif_port_set()
-        return self._get_port_info(registered_ports, cur_ports, sync)
-
-    def check_changed_vlans(self):
-        """Return ports which have lost their vlan tag.
-
-        The returned value is a set of port ids of the ports concerned by a
-        vlan tag loss.
-        """
-        port_tags = self.int_br.get_port_tag_dict()
-        changed_ports = set()
-        for lvm in self.local_vlan_map.values():
-            for port in lvm.vif_ports.values():
-                if (
-                    port.port_name in port_tags
-                    and port_tags[port.port_name] != lvm.vlan
-                ):
-                    LOG.info(
-                        _LI("Port '%(port_name)s' has lost "
-                            "its vlan tag '%(vlan_tag)d'!"),
-                        {'port_name': port.port_name,
-                         'vlan_tag': lvm.vlan}
-                    )
-                    changed_ports.add(port.vif_id)
-        return changed_ports
-
-    def treat_vif_port(self, vif_port, port_id, network_id, network_type,
-                       physical_network, segmentation_id, admin_state_up,
-                       fixed_ips, device_owner, ovs_restarted):
-        # When this function is called for a port, the port should have
-        # an OVS ofport configured, as only these ports were considered
-        # for being treated. If that does not happen, it is a potential
-        # error condition of which operators should be aware
-        port_needs_binding = True
-        if not vif_port.ofport:
-            LOG.warn(_LW("VIF port: %s has no ofport configured, "
-                         "and might not be able to transmit"), vif_port.vif_id)
-        if vif_port:
-            if admin_state_up:
-                port_needs_binding = self.port_bound(
-                    vif_port, network_id, network_type,
-                    physical_network, segmentation_id,
-                    fixed_ips, device_owner, ovs_restarted)
-            else:
-                LOG.info(_LI("VIF port: %s admin state up disabled, "
-                             "putting on the dead VLAN"), vif_port.vif_id)
-
-                self.port_dead(vif_port)
-                port_needs_binding = False
-        else:
-            LOG.debug("No VIF port for port %s defined on agent.", port_id)
-        return port_needs_binding
-
-    def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type):
-        ofport = br.add_tunnel_port(port_name,
-                                    remote_ip,
-                                    self.local_ip,
-                                    tunnel_type,
-                                    self.vxlan_udp_port,
-                                    self.dont_fragment,
-                                    self.tunnel_csum)
-        if ofport == ovs_lib.INVALID_OFPORT:
-            LOG.error(_LE("Failed to set-up %(type)s tunnel port to %(ip)s"),
-                      {'type': tunnel_type, 'ip': remote_ip})
-            return 0
-
-        self.tun_br_ofports[tunnel_type][remote_ip] = ofport
-        # Add flow in default table to resubmit to the right
-        # tunneling table (lvid will be set in the latter)
-        br.setup_tunnel_port(tunnel_type, ofport)
-
-        ofports = self.tun_br_ofports[tunnel_type].values()
-        if ofports and not self.l2_pop:
-            # Update flooding flows to include the new tunnel
-            for vlan_mapping in list(self.local_vlan_map.values()):
-                if vlan_mapping.network_type == tunnel_type:
-                    br.install_flood_to_tun(vlan_mapping.vlan,
-                                            vlan_mapping.segmentation_id,
-                                            ofports)
-        return ofport
-
-    def setup_tunnel_port(self, br, remote_ip, network_type):
-        remote_ip_hex = self.get_ip_in_hex(remote_ip)
-        if not remote_ip_hex:
-            return 0
-        port_name = '%s-%s' % (network_type, remote_ip_hex)
-        ofport = self._setup_tunnel_port(br,
-                                         port_name,
-                                         remote_ip,
-                                         network_type)
-        return ofport
-
-    def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
-        # Check if this tunnel port is still used
-        for lvm in self.local_vlan_map.values():
-            if tun_ofport in lvm.tun_ofports:
-                break
-        # If not, remove it
-        else:
-            items = list(self.tun_br_ofports[tunnel_type].items())
-            for remote_ip, ofport in items:
-                if ofport == tun_ofport:
-                    port_name = '%s-%s' % (tunnel_type,
-                                           self.get_ip_in_hex(remote_ip))
-                    br.delete_port(port_name)
-                    br.cleanup_tunnel_port(ofport)
-                    self.tun_br_ofports[tunnel_type].pop(remote_ip, None)
-
-    def treat_devices_added_or_updated(self, devices, ovs_restarted):
-        skipped_devices = []
-        need_binding_devices = []
-        security_disabled_devices = []
-        devices_details_list = (
-            self.plugin_rpc.get_devices_details_list_and_failed_devices(
-                self.context,
-                devices,
-                self.agent_id,
-                self.conf.host))
-        if devices_details_list.get('failed_devices'):
-            #TODO(rossella_s) handle better the resync in next patches,
-            # this is just to preserve the current behavior
-            raise DeviceListRetrievalError(devices=devices)
-
-        devices = devices_details_list.get('devices')
-        vif_by_id = self.int_br.get_vifs_by_ids(
-            [vif['device'] for vif in devices])
-        for details in devices:
-            device = details['device']
-            LOG.debug("Processing port: %s", device)
-            port = vif_by_id.get(device)
-            if not port:
-                # The port disappeared and cannot be processed
-                LOG.info(_LI("Port %s was not found on the integration bridge "
-                             "and will therefore not be processed"), device)
-                skipped_devices.append(device)
-                continue
-
-            if 'port_id' in details:
-                LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
-                         {'device': device, 'details': details})
-                details['vif_port'] = port
-                need_binding = self.treat_vif_port(port, details['port_id'],
-                                                   details['network_id'],
-                                                   details['network_type'],
-                                                   details['physical_network'],
-                                                   details['segmentation_id'],
-                                                   details['admin_state_up'],
-                                                   details['fixed_ips'],
-                                                   details['device_owner'],
-                                                   ovs_restarted)
-                if need_binding:
-                    need_binding_devices.append(details)
-
-                port_security = details['port_security_enabled']
-                has_sgs = 'security_groups' in details
-                if not port_security or not has_sgs:
-                    security_disabled_devices.append(device)
-                self._update_port_network(details['port_id'],
-                                          details['network_id'])
-                self.ext_manager.handle_port(self.context, details)
-            else:
-                LOG.warn(_LW("Device %s not defined on plugin"), device)
-                if (port and port.ofport != -1):
-                    self.port_dead(port)
-        return skipped_devices, need_binding_devices, security_disabled_devices
-
-    def _update_port_network(self, port_id, network_id):
-        self._clean_network_ports(port_id)
-        self.network_ports[network_id].add(port_id)
-
-    def treat_ancillary_devices_added(self, devices):
-        devices_details_list = (
-            self.plugin_rpc.get_devices_details_list_and_failed_devices(
-                self.context,
-                devices,
-                self.agent_id,
-                self.conf.host))
-        if devices_details_list.get('failed_devices'):
-            #TODO(rossella_s) handle better the resync in next patches,
-            # this is just to preserve the current behavior
-            raise DeviceListRetrievalError(devices=devices)
-        devices_added = [
-            d['device'] for d in devices_details_list.get('devices')]
-        LOG.info(_LI("Ancillary Ports %s added"), devices_added)
-
-        # update plugin about port status
-        devices_set_up = (
-            self.plugin_rpc.update_device_list(self.context,
-                                               devices_added,
-                                               [],
-                                               self.agent_id,
-                                               self.conf.host))
-        if devices_set_up.get('failed_devices_up'):
-            #TODO(rossella_s) handle better the resync in next patches,
-            # this is just to preserve the current behavior
-            raise DeviceListRetrievalError()
-
-    def treat_devices_removed(self, devices):
-        resync = False
-        self.sg_agent.remove_devices_filter(devices)
-        LOG.info(_LI("Ports %s removed"), devices)
-        devices_down = self.plugin_rpc.update_device_list(self.context,
-                                                          [],
-                                                          devices,
-                                                          self.agent_id,
-                                                          self.conf.host)
-        failed_devices = devices_down.get('failed_devices_down')
-        if failed_devices:
-            LOG.debug("Port removal failed for %(devices)s ", failed_devices)
-            resync = True
-        for device in devices:
-            self.port_unbound(device)
-        return resync
-
-    def treat_ancillary_devices_removed(self, devices):
-        resync = False
-        LOG.info(_LI("Ancillary ports %s removed"), devices)
-        devices_down = self.plugin_rpc.update_device_list(self.context,
-                                                          [],
-                                                          devices,
-                                                          self.agent_id,
-                                                          self.conf.host)
-        failed_devices = devices_down.get('failed_devices_down')
-        if failed_devices:
-            LOG.debug("Port removal failed for %(devices)s ", failed_devices)
-            resync = True
-        for detail in devices_down.get('devices_down'):
-            if detail['exists']:
-                LOG.info(_LI("Port %s updated."), detail['device'])
-                # Nothing to do regarding local networking
-            else:
-                LOG.debug("Device %s not defined on plugin", detail['device'])
-        return resync
-
-    def process_network_ports(self, port_info, ovs_restarted):
-        resync_a = False
-        resync_b = False
-        # TODO(salv-orlando): consider a solution for ensuring notifications
-        # are processed exactly in the same order in which they were
-        # received. This is tricky because there are two notification
-        # sources: the neutron server, and the ovs db monitor process
-        # If there is an exception while processing security groups ports
-        # will not be wired anyway, and a resync will be triggered
-        # VIF wiring needs to be performed always for 'new' devices.
-        # For updated ports, re-wiring is not needed in most cases, but needs
-        # to be performed anyway when the admin state of a device is changed.
-        # A device might be both in the 'added' and 'updated'
-        # list at the same time; avoid processing it twice.
-        devices_added_updated = (port_info.get('added', set()) |
-                                 port_info.get('updated', set()))
-        need_binding_devices = []
-        security_disabled_ports = []
-        if devices_added_updated:
-            start = time.time()
-            try:
-                (skipped_devices, need_binding_devices,
-                    security_disabled_ports) = (
-                    self.treat_devices_added_or_updated(
-                        devices_added_updated, ovs_restarted))
-                LOG.debug("process_network_ports - iteration:%(iter_num)d - "
-                          "treat_devices_added_or_updated completed. "
-                          "Skipped %(num_skipped)d devices of "
-                          "%(num_current)d devices currently available. "
-                          "Time elapsed: %(elapsed).3f",
-                          {'iter_num': self.iter_num,
-                           'num_skipped': len(skipped_devices),
-                           'num_current': len(port_info['current']),
-                           'elapsed': time.time() - start})
-                # Update the list of current ports storing only those which
-                # have been actually processed.
-                port_info['current'] = (port_info['current'] -
-                                        set(skipped_devices))
-            except DeviceListRetrievalError:
-                # Need to resync as there was an error with server
-                # communication.
-                LOG.exception(_LE("process_network_ports - iteration:%d - "
-                                  "failure while retrieving port details "
-                                  "from server"), self.iter_num)
-                resync_a = True
-
-        # TODO(salv-orlando): Optimize avoiding applying filters
-        # unnecessarily, (eg: when there are no IP address changes)
-        added_ports = port_info.get('added', set())
-        if security_disabled_ports:
-            added_ports -= set(security_disabled_ports)
-        self.sg_agent.setup_port_filters(added_ports,
-                                         port_info.get('updated', set()))
-        self._bind_devices(need_binding_devices)
-
-        if 'removed' in port_info and port_info['removed']:
-            start = time.time()
-            resync_b = self.treat_devices_removed(port_info['removed'])
-            LOG.debug("process_network_ports - iteration:%(iter_num)d - "
-                      "treat_devices_removed completed in %(elapsed).3f",
-                      {'iter_num': self.iter_num,
-                       'elapsed': time.time() - start})
-        # If one of the above operations fails => resync with plugin
-        return (resync_a | resync_b)
-
-    def process_ancillary_network_ports(self, port_info):
-        resync_a = False
-        resync_b = False
-        if 'added' in port_info and port_info['added']:
-            start = time.time()
-            try:
-                self.treat_ancillary_devices_added(port_info['added'])
-                LOG.debug("process_ancillary_network_ports - iteration: "
-                          "%(iter_num)d - treat_ancillary_devices_added "
-                          "completed in %(elapsed).3f",
-                          {'iter_num': self.iter_num,
-                           'elapsed': time.time() - start})
-            except DeviceListRetrievalError:
-                # Need to resync as there was an error with server
-                # communication.
-                LOG.exception(_LE("process_ancillary_network_ports - "
-                                  "iteration:%d - failure while retrieving "
-                                  "port details from server"), self.iter_num)
-                resync_a = True
-        if 'removed' in port_info and port_info['removed']:
-            start = time.time()
-            resync_b = self.treat_ancillary_devices_removed(
-                port_info['removed'])
-            LOG.debug("process_ancillary_network_ports - iteration: "
-                      "%(iter_num)d - treat_ancillary_devices_removed "
-                      "completed in %(elapsed).3f",
-                      {'iter_num': self.iter_num,
-                       'elapsed': time.time() - start})
-
-        # If one of the above operations fails => resync with plugin
-        return (resync_a | resync_b)
-
-    def get_ip_in_hex(self, ip_address):
-        try:
-            return '%08x' % netaddr.IPAddress(ip_address, version=4)
-        except Exception:
-            LOG.warn(_LW("Invalid remote IP: %s"), ip_address)
-            return
-
-    def tunnel_sync(self):
-        try:
-            for tunnel_type in self.tunnel_types:
-                details = self.plugin_rpc.tunnel_sync(self.context,
-                                                      self.local_ip,
-                                                      tunnel_type,
-                                                      self.conf.host)
-                if not self.l2_pop:
-                    tunnels = details['tunnels']
-                    for tunnel in tunnels:
-                        if self.local_ip != tunnel['ip_address']:
-                            remote_ip = tunnel['ip_address']
-                            remote_ip_hex = self.get_ip_in_hex(remote_ip)
-                            if not remote_ip_hex:
-                                continue
-                            tun_name = '%s-%s' % (tunnel_type, remote_ip_hex)
-                            self._setup_tunnel_port(self.tun_br,
-                                                    tun_name,
-                                                    tunnel['ip_address'],
-                                                    tunnel_type)
-        except Exception as e:
-            LOG.debug("Unable to sync tunnel IP %(local_ip)s: %(e)s",
-                      {'local_ip': self.local_ip, 'e': e})
-            return True
-        return False
-
-    def _agent_has_updates(self, polling_manager):
-        return (polling_manager.is_polling_required or
-                self.updated_ports or
-                self.deleted_ports or
-                self.sg_agent.firewall_refresh_needed())
-
-    def _port_info_has_changes(self, port_info):
-        return (port_info.get('added') or
-                port_info.get('removed') or
-                port_info.get('updated'))
-
-    def check_ovs_status(self):
-        # Check for the canary flow
-        status = self.int_br.check_canary_table()
-        if status == constants.OVS_RESTARTED:
-            LOG.warn(_LW("OVS is restarted. OVSNeutronAgent will reset "
-                         "bridges and recover ports."))
-        elif status == constants.OVS_DEAD:
-            LOG.warn(_LW("OVS is dead. OVSNeutronAgent will keep running "
-                         "and checking OVS status periodically."))
-        return status
-
-    def loop_count_and_wait(self, start_time, port_stats):
-        # sleep till end of polling interval
-        elapsed = time.time() - start_time
-        LOG.debug("Agent rpc_loop - iteration:%(iter_num)d "
-                  "completed. Processed ports statistics: "
-                  "%(port_stats)s. Elapsed:%(elapsed).3f",
-                  {'iter_num': self.iter_num,
-                   'port_stats': port_stats,
-                   'elapsed': elapsed})
-        if elapsed < self.polling_interval:
-            time.sleep(self.polling_interval - elapsed)
-        else:
-            LOG.debug("Loop iteration exceeded interval "
-                      "(%(polling_interval)s vs. %(elapsed)s)!",
-                      {'polling_interval': self.polling_interval,
-                       'elapsed': elapsed})
-        self.iter_num = self.iter_num + 1
-
-    def get_port_stats(self, port_info, ancillary_port_info):
-        port_stats = {
-            'regular': {
-                'added': len(port_info.get('added', [])),
-                'updated': len(port_info.get('updated', [])),
-                'removed': len(port_info.get('removed', []))}}
-        if self.ancillary_brs:
-            port_stats['ancillary'] = {
-                'added': len(ancillary_port_info.get('added', [])),
-                'removed': len(ancillary_port_info.get('removed', []))}
-        return port_stats
-
-    def cleanup_stale_flows(self):
-        bridges = [self.int_br]
-        if self.enable_tunneling:
-            bridges.append(self.tun_br)
-        for bridge in bridges:
-            LOG.info(_LI("Cleaning stale %s flows"), bridge.br_name)
-            bridge.cleanup_flows()
-
-    def process_port_info(self, start, polling_manager, sync, ovs_restarted,
-                       ports, ancillary_ports, updated_ports_copy,
-                       consecutive_resyncs, ports_not_ready_yet):
-        # There are polling managers that don't have get_events, e.g.
-        # AlwaysPoll used by windows implementations
-        # REVISIT (rossella_s) This needs to be reworked to hide implementation
-        # details regarding polling in BasePollingManager subclasses
-        if sync or not (hasattr(polling_manager, 'get_events')):
-            if sync:
-                LOG.info(_LI("Agent out of sync with plugin!"))
-                consecutive_resyncs = consecutive_resyncs + 1
-                if (consecutive_resyncs >=
-                        constants.MAX_DEVICE_RETRIES):
-                    LOG.warn(_LW(
-                        "Clearing cache of registered ports,"
-                        " retries to resync were > %s"),
-                             constants.MAX_DEVICE_RETRIES)
-                    ports.clear()
-                    ancillary_ports.clear()
-                    consecutive_resyncs = 0
-            else:
-                consecutive_resyncs = 0
-
-            # NOTE(rossella_s) don't empty the queue of events
-            # calling polling_manager.get_events() since
-            # the agent might miss some event (for example a port
-            # deletion)
-            reg_ports = (set() if ovs_restarted else ports)
-            port_info = self.scan_ports(reg_ports, sync,
-                                        updated_ports_copy)
-            # Treat ancillary devices if they exist
-            if self.ancillary_brs:
-                ancillary_port_info = self.scan_ancillary_ports(
-                    ancillary_ports, sync)
-                LOG.debug("Agent rpc_loop - iteration:%(iter_num)d"
-                          " - ancillary port info retrieved. "
-                          "Elapsed:%(elapsed).3f",
-                          {'iter_num': self.iter_num,
-                           'elapsed': time.time() - start})
-            else:
-                ancillary_port_info = {}
-
-        else:
-            consecutive_resyncs = 0
-            events = polling_manager.get_events()
-            port_info, ancillary_port_info, ports_not_ready_yet = (
-                self.process_ports_events(events, ports, ancillary_ports,
-                                          ports_not_ready_yet,
-                                          updated_ports_copy))
-        return (port_info, ancillary_port_info, consecutive_resyncs,
-                ports_not_ready_yet)
-
-    def rpc_loop(self, polling_manager=None):
-        if not polling_manager:
-            polling_manager = polling.get_polling_manager(
-                minimize_polling=False)
-
-        sync = False
-        ports = set()
-        updated_ports_copy = set()
-        ancillary_ports = set()
-        tunnel_sync = True
-        ovs_restarted = False
-        consecutive_resyncs = 0
-        need_clean_stale_flow = True
-        ports_not_ready_yet = set()
-        while self._check_and_handle_signal():
-            if self.fullsync:
-                LOG.info(_LI("rpc_loop doing a full sync."))
-                sync = True
-                self.fullsync = False
-            port_info = {}
-            ancillary_port_info = {}
-            start = time.time()
-            LOG.debug("Agent rpc_loop - iteration:%d started",
-                      self.iter_num)
-            ovs_status = self.check_ovs_status()
-            if ovs_status == constants.OVS_RESTARTED:
-                self.setup_integration_br()
-                self.setup_physical_bridges(self.bridge_mappings)
-                if self.enable_tunneling:
-                    self.setup_tunnel_br()
-                    self.setup_tunnel_br_flows()
-                    tunnel_sync = True
-                if self.enable_distributed_routing:
-                    self.dvr_agent.reset_ovs_parameters(self.int_br,
-                                                 self.tun_br,
-                                                 self.patch_int_ofport,
-                                                 self.patch_tun_ofport)
-                    self.dvr_agent.reset_dvr_parameters()
-                    self.dvr_agent.setup_dvr_flows()
-                # restart the polling manager so that it will signal as added
-                # all the current ports
-                # REVISIT (rossella_s) Define a method "reset" in
-                # BasePollingManager that will be implemented by AlwaysPoll as
-                # no action and by InterfacePollingMinimizer as start/stop
-                if isinstance(
-                    polling_manager, linux_polling.InterfacePollingMinimizer):
-                    polling_manager.stop()
-                    polling_manager.start()
-            elif ovs_status == constants.OVS_DEAD:
-                # Agent doesn't apply any operations when ovs is dead, to
-                # prevent unexpected failure or crash. Sleep and continue
-                # loop in which ovs status will be checked periodically.
-                port_stats = self.get_port_stats({}, {})
-                self.loop_count_and_wait(start, port_stats)
-                continue
-            # Notify the plugin of tunnel IP
-            if self.enable_tunneling and tunnel_sync:
-                LOG.info(_LI("Agent tunnel out of sync with plugin!"))
-                try:
-                    tunnel_sync = self.tunnel_sync()
-                except Exception:
-                    LOG.exception(_LE("Error while synchronizing tunnels"))
-                    tunnel_sync = True
-            ovs_restarted |= (ovs_status == constants.OVS_RESTARTED)
-            if (self._agent_has_updates(polling_manager) or sync
-                    or ports_not_ready_yet):
-                try:
-                    LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
-                              "starting polling. Elapsed:%(elapsed).3f",
-                              {'iter_num': self.iter_num,
-                               'elapsed': time.time() - start})
-                    # Save updated ports dict to perform rollback in
-                    # case resync would be needed, and then clear
-                    # self.updated_ports. As the greenthread should not yield
-                    # between these two statements, this will be thread-safe
-                    updated_ports_copy = self.updated_ports
-                    self.updated_ports = set()
-                    (port_info, ancillary_port_info, consecutive_resyncs,
-                     ports_not_ready_yet) = (self.process_port_info(
-                            start, polling_manager, sync, ovs_restarted,
-                            ports, ancillary_ports, updated_ports_copy,
-                            consecutive_resyncs, ports_not_ready_yet)
-                    )
-                    sync = False
-                    self.process_deleted_ports(port_info)
-                    ofport_changed_ports = self.update_stale_ofport_rules()
-                    if ofport_changed_ports:
-                        port_info.setdefault('updated', set()).update(
-                            ofport_changed_ports)
-                    LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
-                              "port information retrieved. "
-                              "Elapsed:%(elapsed).3f",
-                              {'iter_num': self.iter_num,
-                               'elapsed': time.time() - start})
-                    # Secure and wire/unwire VIFs and update their status
-                    # on Neutron server
-                    if (self._port_info_has_changes(port_info) or
-                        self.sg_agent.firewall_refresh_needed() or
-                        ovs_restarted):
-                        LOG.debug("Starting to process devices in:%s",
-                                  port_info)
-                        # If treat devices fails - must resync with plugin
-                        sync = self.process_network_ports(port_info,
-                                                          ovs_restarted)
-                        if not sync and need_clean_stale_flow:
-                            self.cleanup_stale_flows()
-                            need_clean_stale_flow = False
-                        LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
-                                  "ports processed. Elapsed:%(elapsed).3f",
-                                  {'iter_num': self.iter_num,
-                                   'elapsed': time.time() - start})
-
-                    ports = port_info['current']
-
-                    if self.ancillary_brs:
-                        sync |= self.process_ancillary_network_ports(
-                            ancillary_port_info)
-                        LOG.debug("Agent rpc_loop - iteration: "
-                                  "%(iter_num)d - ancillary ports "
-                                  "processed. Elapsed:%(elapsed).3f",
-                                  {'iter_num': self.iter_num,
-                                   'elapsed': time.time() - start})
-                        ancillary_ports = ancillary_port_info['current']
-
-                    polling_manager.polling_completed()
-                    # Keep this flag in the last line of "try" block,
-                    # so we can sure that no other Exception occurred.
-                    if not sync:
-                        ovs_restarted = False
-                        self._dispose_local_vlan_hints()
-                except Exception:
-                    LOG.exception(_LE("Error while processing VIF ports"))
-                    # Put the ports back in self.updated_port
-                    self.updated_ports |= updated_ports_copy
-                    sync = True
-            port_stats = self.get_port_stats(port_info, ancillary_port_info)
-            self.loop_count_and_wait(start, port_stats)
-
-    def daemon_loop(self):
-        # Start everything.
-        LOG.info(_LI("Agent initialized successfully, now running... "))
-        signal.signal(signal.SIGTERM, self._handle_sigterm)
-        if hasattr(signal, 'SIGHUP'):
-            signal.signal(signal.SIGHUP, self._handle_sighup)
-        with polling.get_polling_manager(
-            self.minimize_polling,
-            self.ovsdb_monitor_respawn_interval) as pm:
-
-            self.rpc_loop(polling_manager=pm)
-
-    def _handle_sigterm(self, signum, frame):
-        self.catch_sigterm = True
-        if self.quitting_rpc_timeout:
-            self.set_rpc_timeout(self.quitting_rpc_timeout)
-
-    def _handle_sighup(self, signum, frame):
-        self.catch_sighup = True
-
-    def _check_and_handle_signal(self):
-        if self.catch_sigterm:
-            LOG.info(_LI("Agent caught SIGTERM, quitting daemon loop."))
-            self.run_daemon_loop = False
-            self.catch_sigterm = False
-        if self.catch_sighup:
-            LOG.info(_LI("Agent caught SIGHUP, resetting."))
-            self.conf.reload_config_files()
-            config.setup_logging()
-            LOG.debug('Full set of CONF:')
-            self.conf.log_opt_values(LOG, logging.DEBUG)
-            self.catch_sighup = False
-        return self.run_daemon_loop
-
-    def set_rpc_timeout(self, timeout):
-        for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc,
-                        self.dvr_plugin_rpc, self.state_rpc):
-            rpc_api.client.timeout = timeout
-
-    def _check_agent_configurations(self):
-        if (self.enable_distributed_routing and self.enable_tunneling
-            and not self.l2_pop):
-
-            raise ValueError(_("DVR deployments for VXLAN/GRE/Geneve "
-                               "underlays require L2-pop to be enabled, "
-                               "in both the Agent and Server side."))
-
-
-def validate_local_ip(local_ip):
-    """Verify if the ip exists on the agent's host."""
-    if not ip_lib.IPWrapper().get_device_by_ip(local_ip):
-        LOG.error(_LE("Tunneling can't be enabled with invalid local_ip '%s'."
-                      " IP couldn't be found on this host's interfaces."),
-                  local_ip)
-        raise SystemExit(1)
-
-
-def validate_tunnel_config(tunnel_types, local_ip):
-    """Verify local ip and tunnel config if tunneling is enabled."""
-    if not tunnel_types:
-        return
-
-    validate_local_ip(local_ip)
-    for tun in tunnel_types:
-        if tun not in constants.TUNNEL_NETWORK_TYPES:
-            LOG.error(_LE('Invalid tunnel type specified: %s'), tun)
-            raise SystemExit(1)
-
-
-def prepare_xen_compute():
-    is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper
-    if is_xen_compute_host:
-        # Force ip_lib to always use the root helper to ensure that ip
-        # commands target xen dom0 rather than domU.
-        cfg.CONF.register_opts(ip_lib.OPTS)
-        cfg.CONF.set_default('ip_lib_force_root', True)
-
-
-def main(bridge_classes):
-    prepare_xen_compute()
-    validate_tunnel_config(cfg.CONF.AGENT.tunnel_types, cfg.CONF.OVS.local_ip)
-
-    try:
-        agent = OVSNeutronAgent(bridge_classes, cfg.CONF)
-    except (RuntimeError, ValueError) as e:
-        LOG.error(_LE("%s Agent terminated!"), e)
-        sys.exit(1)
-    agent.daemon_loop()
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/README b/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/README
deleted file mode 100644 (file)
index 0a02c99..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-This directory contains files that are required for the XenAPI support.
-They should be installed in the XenServer / Xen Cloud Platform dom0.
-
-If you install them manually, you will need to ensure that the newly
-added files are executable. You can do this by running the following
-command (from dom0):
-
-    chmod a+x /etc/xapi.d/plugins/*
-
-Otherwise, you can build an rpm by running the following command:
-
-    ./contrib/build-rpm.sh
-
-and install the rpm by running the following command (from dom0):
-
-    rpm -i openstack-neutron-xen-plugins.rpm
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/build-rpm.sh b/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/build-rpm.sh
deleted file mode 100755 (executable)
index 750bdde..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-
-thisdir=$(dirname $(readlink -f "$0"))
-export NEUTRON_ROOT="$thisdir/../../../../../../"
-export PYTHONPATH=$NEUTRON_ROOT
-
-cd $NEUTRON_ROOT
-VERSION=$(sh -c "(cat $NEUTRON_ROOT/neutron/version.py; \
-                  echo 'print version_info.release_string()') | \
-                  python")
-cd -
-
-PACKAGE=openstack-neutron-xen-plugins
-RPMBUILD_DIR=$PWD/rpmbuild
-if [ ! -d $RPMBUILD_DIR ]; then
-    echo $RPMBUILD_DIR is missing
-    exit 1
-fi
-
-for dir in BUILD BUILDROOT SRPMS RPMS SOURCES; do
-    rm -rf $RPMBUILD_DIR/$dir
-    mkdir -p $RPMBUILD_DIR/$dir
-done
-
-rm -rf /tmp/$PACKAGE
-mkdir /tmp/$PACKAGE
-cp -r ../etc/xapi.d /tmp/$PACKAGE
-tar czf $RPMBUILD_DIR/SOURCES/$PACKAGE.tar.gz -C /tmp $PACKAGE
-
-rpmbuild -ba --nodeps --define "_topdir $RPMBUILD_DIR"  \
-    --define "version $VERSION" \
-    $RPMBUILD_DIR/SPECS/$PACKAGE.spec
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec b/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec
deleted file mode 100644 (file)
index 8ba03ea..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-Name:           openstack-neutron-xen-plugins
-Version:        %{version}
-Release:        1
-Summary:        Files for XenAPI support.
-License:        ASL 2.0
-Group:          Applications/Utilities
-Source0:        openstack-neutron-xen-plugins.tar.gz
-BuildArch:      noarch
-BuildRoot:      %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
-
-%define debug_package %{nil}
-
-%description
-This package contains files that are required for XenAPI support for Neutron.
-
-%prep
-%setup -q -n openstack-neutron-xen-plugins
-
-%install
-rm -rf $RPM_BUILD_ROOT
-mkdir -p $RPM_BUILD_ROOT/etc
-cp -r xapi.d $RPM_BUILD_ROOT/etc
-chmod a+x $RPM_BUILD_ROOT/etc/xapi.d/plugins/*
-
-%clean
-rm -rf $RPM_BUILD_ROOT
-
-%files
-%defattr(-,root,root,-)
-/etc/xapi.d/plugins/*
diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap b/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap
deleted file mode 100644 (file)
index ca5d1c2..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2012 OpenStack Foundation
-# Copyright 2012 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-#
-# XenAPI plugin for executing network commands (ovs, iptables, etc) on dom0
-#
-
-import gettext
-gettext.install('neutron', unicode=1)
-try:
-    import json
-except ImportError:
-    import simplejson as json
-import subprocess
-
-import XenAPIPlugin
-
-
-ALLOWED_CMDS = [
-    'ip',
-    'ipset',
-    'iptables-save',
-    'iptables-restore',
-    'ip6tables-save',
-    'ip6tables-restore',
-    'sysctl',
-    # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
-    'ovs-ofctl',
-    'ovs-vsctl',
-    'ovsdb-client',
-    ]
-
-
-class PluginError(Exception):
-    """Base Exception class for all plugin errors."""
-    def __init__(self, *args):
-        Exception.__init__(self, *args)
-
-def _run_command(cmd, cmd_input):
-    """Abstracts out the basics of issuing system commands. If the command
-    returns anything in stderr, a PluginError is raised with that information.
-    Otherwise, the output from stdout is returned.
-    """
-    pipe = subprocess.PIPE
-    proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe,
-                            stderr=pipe, close_fds=True)
-    (out, err) = proc.communicate(cmd_input)
-
-    if proc.returncode != 0:
-        raise PluginError(err)
-    return out
-
-
-def run_command(session, args):
-    cmd = json.loads(args.get('cmd'))
-    if cmd and cmd[0] not in ALLOWED_CMDS:
-        msg = _("Dom0 execution of '%s' is not permitted") % cmd[0]
-        raise PluginError(msg)
-    result = _run_command(cmd, json.loads(args.get('cmd_input', 'null')))
-    return json.dumps(result)
-
-
-if __name__ == "__main__":
-    XenAPIPlugin.dispatch({"run_command": run_command})
diff --git a/neutron/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py b/neutron/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py b/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py
deleted file mode 100644 (file)
index 0a83524..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from neutron.agent import securitygroups_rpc
-from neutron.common import constants
-from neutron.extensions import portbindings
-from neutron.plugins.common import constants as p_constants
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2.drivers import mech_agent
-from neutron.plugins.ml2.drivers.openvswitch.agent.common \
-    import constants as a_const
-from neutron.services.qos import qos_consts
-
-
-class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
-    """Attach to networks using openvswitch L2 agent.
-
-    The OpenvswitchMechanismDriver integrates the ml2 plugin with the
-    openvswitch L2 agent. Port binding with this driver requires the
-    openvswitch agent to be running on the port's host, and that agent
-    to have connectivity to at least one segment of the port's
-    network.
-    """
-
-    supported_qos_rule_types = [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT]
-
-    def __init__(self):
-        sg_enabled = securitygroups_rpc.is_firewall_enabled()
-        vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled,
-                       portbindings.OVS_HYBRID_PLUG: sg_enabled}
-        super(OpenvswitchMechanismDriver, self).__init__(
-            constants.AGENT_TYPE_OVS,
-            portbindings.VIF_TYPE_OVS,
-            vif_details)
-
-    def get_allowed_network_types(self, agent):
-        return (agent['configurations'].get('tunnel_types', []) +
-                [p_constants.TYPE_LOCAL, p_constants.TYPE_FLAT,
-                 p_constants.TYPE_VLAN])
-
-    def get_mappings(self, agent):
-        return agent['configurations'].get('bridge_mappings', {})
-
-    def check_vlan_transparency(self, context):
-        """Currently Openvswitch driver doesn't support vlan transparency."""
-        return False
-
-    def try_to_bind_segment_for_agent(self, context, segment, agent):
-        if self.check_segment_for_agent(segment, agent):
-            context.set_binding(segment[api.ID],
-                                self.get_vif_type(agent, context),
-                                self.get_vif_details(agent, context))
-            return True
-        else:
-            return False
-
-    def get_vif_type(self, agent, context):
-        caps = agent['configurations'].get('ovs_capabilities', {})
-        if (a_const.OVS_DPDK_VHOST_USER in caps.get('iface_types', []) and
-                agent['configurations'].get('datapath_type') ==
-                a_const.OVS_DATAPATH_NETDEV):
-            return portbindings.VIF_TYPE_VHOST_USER
-        return self.vif_type
-
-    def get_vif_details(self, agent, context):
-        if (agent['configurations'].get('datapath_type') !=
-                a_const.OVS_DATAPATH_NETDEV):
-            return self.vif_details
-        caps = agent['configurations'].get('ovs_capabilities', {})
-        if a_const.OVS_DPDK_VHOST_USER in caps.get('iface_types', []):
-            sock_path = self.agent_vhu_sockpath(agent, context.current['id'])
-            return {
-                portbindings.CAP_PORT_FILTER: False,
-                portbindings.VHOST_USER_MODE:
-                    portbindings.VHOST_USER_MODE_CLIENT,
-                portbindings.VHOST_USER_OVS_PLUG: True,
-                portbindings.VHOST_USER_SOCKET: sock_path
-            }
-        return self.vif_details
-
-    @staticmethod
-    def agent_vhu_sockpath(agent, port_id):
-        """Return the agent's vhost-user socket path for a given port"""
-        sockdir = agent['configurations'].get('vhostuser_socket_dir',
-                                              a_const.VHOST_USER_SOCKET_DIR)
-        sock_name = (constants.VHOST_USER_DEVICE_PREFIX + port_id)[:14]
-        return os.path.join(sockdir, sock_name)
diff --git a/neutron/plugins/ml2/drivers/type_flat.py b/neutron/plugins/ml2/drivers/type_flat.py
deleted file mode 100644 (file)
index c47d9c7..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_log import log
-import six
-import sqlalchemy as sa
-
-from neutron._i18n import _, _LI, _LW
-from neutron.common import exceptions as exc
-from neutron.db import model_base
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2.drivers import helpers
-
-LOG = log.getLogger(__name__)
-
-flat_opts = [
-    cfg.ListOpt('flat_networks',
-                default='*',
-                help=_("List of physical_network names with which flat "
-                       "networks can be created. Use default '*' to allow "
-                       "flat networks with arbitrary physical_network names. "
-                       "Use an empty list to disable flat networks."))
-]
-
-cfg.CONF.register_opts(flat_opts, "ml2_type_flat")
-
-
-class FlatAllocation(model_base.BASEV2):
-    """Represent persistent allocation state of a physical network.
-
-    If a record exists for a physical network, then that physical
-    network has been allocated as a flat network.
-    """
-
-    __tablename__ = 'ml2_flat_allocations'
-
-    physical_network = sa.Column(sa.String(64), nullable=False,
-                                 primary_key=True)
-
-
-class FlatTypeDriver(helpers.BaseTypeDriver):
-    """Manage state for flat networks with ML2.
-
-    The FlatTypeDriver implements the 'flat' network_type. Flat
-    network segments provide connectivity between VMs and other
-    devices using any connected IEEE 802.1D conformant
-    physical_network, without the use of VLAN tags, tunneling, or
-    other segmentation mechanisms. Therefore at most one flat network
-    segment can exist on each available physical_network.
-    """
-
-    def __init__(self):
-        super(FlatTypeDriver, self).__init__()
-        self._parse_networks(cfg.CONF.ml2_type_flat.flat_networks)
-
-    def _parse_networks(self, entries):
-        self.flat_networks = entries
-        if '*' in self.flat_networks:
-            LOG.info(_LI("Arbitrary flat physical_network names allowed"))
-            self.flat_networks = None
-        elif not self.flat_networks:
-            LOG.info(_LI("Flat networks are disabled"))
-        else:
-            LOG.info(_LI("Allowable flat physical_network names: %s"),
-                     self.flat_networks)
-
-    def get_type(self):
-        return p_const.TYPE_FLAT
-
-    def initialize(self):
-        LOG.info(_LI("ML2 FlatTypeDriver initialization complete"))
-
-    def is_partial_segment(self, segment):
-        return False
-
-    def validate_provider_segment(self, segment):
-        physical_network = segment.get(api.PHYSICAL_NETWORK)
-        if not physical_network:
-            msg = _("physical_network required for flat provider network")
-            raise exc.InvalidInput(error_message=msg)
-        if self.flat_networks is not None and not self.flat_networks:
-            msg = _("Flat provider networks are disabled")
-            raise exc.InvalidInput(error_message=msg)
-        if self.flat_networks and physical_network not in self.flat_networks:
-            msg = (_("physical_network '%s' unknown for flat provider network")
-                   % physical_network)
-            raise exc.InvalidInput(error_message=msg)
-
-        for key, value in six.iteritems(segment):
-            if value and key not in [api.NETWORK_TYPE,
-                                     api.PHYSICAL_NETWORK]:
-                msg = _("%s prohibited for flat provider network") % key
-                raise exc.InvalidInput(error_message=msg)
-
-    def reserve_provider_segment(self, session, segment):
-        physical_network = segment[api.PHYSICAL_NETWORK]
-        with session.begin(subtransactions=True):
-            try:
-                LOG.debug("Reserving flat network on physical "
-                          "network %s", physical_network)
-                alloc = FlatAllocation(physical_network=physical_network)
-                alloc.save(session)
-            except db_exc.DBDuplicateEntry:
-                raise exc.FlatNetworkInUse(
-                    physical_network=physical_network)
-            segment[api.MTU] = self.get_mtu(alloc.physical_network)
-        return segment
-
-    def allocate_tenant_segment(self, session):
-        # Tenant flat networks are not supported.
-        return
-
-    def release_segment(self, session, segment):
-        physical_network = segment[api.PHYSICAL_NETWORK]
-        with session.begin(subtransactions=True):
-            count = (session.query(FlatAllocation).
-                     filter_by(physical_network=physical_network).
-                     delete())
-        if count:
-            LOG.debug("Releasing flat network on physical network %s",
-                      physical_network)
-        else:
-            LOG.warning(_LW("No flat network found on physical network %s"),
-                        physical_network)
-
-    def get_mtu(self, physical_network):
-        seg_mtu = super(FlatTypeDriver, self).get_mtu()
-        mtu = []
-        if seg_mtu > 0:
-            mtu.append(seg_mtu)
-        if physical_network in self.physnet_mtus:
-            mtu.append(int(self.physnet_mtus[physical_network]))
-        return min(mtu) if mtu else 0
diff --git a/neutron/plugins/ml2/drivers/type_geneve.py b/neutron/plugins/ml2/drivers/type_geneve.py
deleted file mode 100644 (file)
index 17f44b4..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log
-import sqlalchemy as sa
-from sqlalchemy import sql
-
-from neutron._i18n import _, _LE
-from neutron.common import exceptions as n_exc
-from neutron.db import model_base
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers import type_tunnel
-
-LOG = log.getLogger(__name__)
-
-geneve_opts = [
-    cfg.ListOpt('vni_ranges',
-                default=[],
-                help=_("Comma-separated list of <vni_min>:<vni_max> tuples "
-                       "enumerating ranges of Geneve VNI IDs that are "
-                       "available for tenant network allocation")),
-    cfg.IntOpt('max_header_size',
-               default=p_const.GENEVE_ENCAP_MIN_OVERHEAD,
-               help=_("Geneve encapsulation header size is dynamic, this "
-                      "value is used to calculate the maximum MTU "
-                      "for the driver. "
-                      "This is the sum of the sizes of the outer "
-                      "ETH + IP + UDP + GENEVE header sizes. "
-                      "The default size for this field is 50, which is the "
-                      "size of the Geneve header without any additional "
-                      "option headers.")),
-]
-
-cfg.CONF.register_opts(geneve_opts, "ml2_type_geneve")
-
-
-class GeneveAllocation(model_base.BASEV2):
-
-    __tablename__ = 'ml2_geneve_allocations'
-
-    geneve_vni = sa.Column(sa.Integer, nullable=False, primary_key=True,
-                        autoincrement=False)
-    allocated = sa.Column(sa.Boolean, nullable=False, default=False,
-                          server_default=sql.false(), index=True)
-
-
-class GeneveEndpoints(model_base.BASEV2):
-    """Represents tunnel endpoint in RPC mode."""
-
-    __tablename__ = 'ml2_geneve_endpoints'
-    __table_args__ = (
-        sa.UniqueConstraint('host',
-                            name='unique_ml2_geneve_endpoints0host'),
-        model_base.BASEV2.__table_args__
-    )
-    ip_address = sa.Column(sa.String(64), primary_key=True)
-    host = sa.Column(sa.String(255), nullable=True)
-
-    def __repr__(self):
-        return "<GeneveTunnelEndpoint(%s)>" % self.ip_address
-
-
-class GeneveTypeDriver(type_tunnel.EndpointTunnelTypeDriver):
-
-    def __init__(self):
-        super(GeneveTypeDriver, self).__init__(GeneveAllocation,
-                                               GeneveEndpoints)
-        self.max_encap_size = cfg.CONF.ml2_type_geneve.max_header_size
-
-    def get_type(self):
-        return p_const.TYPE_GENEVE
-
-    def initialize(self):
-        try:
-            self._initialize(cfg.CONF.ml2_type_geneve.vni_ranges)
-        except n_exc.NetworkTunnelRangeError:
-            LOG.error(_LE("Failed to parse vni_ranges. "
-                          "Service terminated!"))
-            raise SystemExit()
-
-    def get_endpoints(self):
-        """Get every geneve endpoints from database."""
-        geneve_endpoints = self._get_endpoints()
-        return [{'ip_address': geneve_endpoint.ip_address,
-                 'host': geneve_endpoint.host}
-                for geneve_endpoint in geneve_endpoints]
-
-    def add_endpoint(self, ip, host):
-        return self._add_endpoint(ip, host)
-
-    def get_mtu(self, physical_network=None):
-        mtu = super(GeneveTypeDriver, self).get_mtu()
-        return mtu - self.max_encap_size if mtu else 0
diff --git a/neutron/plugins/ml2/drivers/type_gre.py b/neutron/plugins/ml2/drivers/type_gre.py
deleted file mode 100644 (file)
index bec904b..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log
-import sqlalchemy as sa
-from sqlalchemy import sql
-
-from neutron._i18n import _, _LE
-from neutron.common import exceptions as n_exc
-from neutron.db import model_base
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers import type_tunnel
-
-LOG = log.getLogger(__name__)
-
-gre_opts = [
-    cfg.ListOpt('tunnel_id_ranges',
-                default=[],
-                help=_("Comma-separated list of <tun_min>:<tun_max> tuples "
-                       "enumerating ranges of GRE tunnel IDs that are "
-                       "available for tenant network allocation"))
-]
-
-cfg.CONF.register_opts(gre_opts, "ml2_type_gre")
-
-
-class GreAllocation(model_base.BASEV2):
-
-    __tablename__ = 'ml2_gre_allocations'
-
-    gre_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
-                       autoincrement=False)
-    allocated = sa.Column(sa.Boolean, nullable=False, default=False,
-                          server_default=sql.false(), index=True)
-
-
-class GreEndpoints(model_base.BASEV2):
-    """Represents tunnel endpoint in RPC mode."""
-
-    __tablename__ = 'ml2_gre_endpoints'
-    __table_args__ = (
-        sa.UniqueConstraint('host',
-                            name='unique_ml2_gre_endpoints0host'),
-        model_base.BASEV2.__table_args__
-    )
-    ip_address = sa.Column(sa.String(64), primary_key=True)
-    host = sa.Column(sa.String(255), nullable=True)
-
-    def __repr__(self):
-        return "<GreTunnelEndpoint(%s)>" % self.ip_address
-
-
-class GreTypeDriver(type_tunnel.EndpointTunnelTypeDriver):
-
-    def __init__(self):
-        super(GreTypeDriver, self).__init__(
-            GreAllocation, GreEndpoints)
-
-    def get_type(self):
-        return p_const.TYPE_GRE
-
-    def initialize(self):
-        try:
-            self._initialize(cfg.CONF.ml2_type_gre.tunnel_id_ranges)
-        except n_exc.NetworkTunnelRangeError:
-            LOG.exception(_LE("Failed to parse tunnel_id_ranges. "
-                              "Service terminated!"))
-            raise SystemExit()
-
-    def get_endpoints(self):
-        """Get every gre endpoints from database."""
-        gre_endpoints = self._get_endpoints()
-        return [{'ip_address': gre_endpoint.ip_address,
-                 'host': gre_endpoint.host}
-                for gre_endpoint in gre_endpoints]
-
-    def add_endpoint(self, ip, host):
-        return self._add_endpoint(ip, host)
-
-    def get_mtu(self, physical_network=None):
-        mtu = super(GreTypeDriver, self).get_mtu(physical_network)
-        return mtu - p_const.GRE_ENCAP_OVERHEAD if mtu else 0
diff --git a/neutron/plugins/ml2/drivers/type_local.py b/neutron/plugins/ml2/drivers/type_local.py
deleted file mode 100644 (file)
index 610878e..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log
-import six
-
-from neutron._i18n import _, _LI
-from neutron.common import exceptions as exc
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2 import driver_api as api
-
-LOG = log.getLogger(__name__)
-
-
-class LocalTypeDriver(api.TypeDriver):
-    """Manage state for local networks with ML2.
-
-    The LocalTypeDriver implements the 'local' network_type. Local
-    network segments provide connectivity between VMs and other
-    devices running on the same node, provided that a common local
-    network bridging technology is available to those devices. Local
-    network segments do not provide any connectivity between nodes.
-    """
-
-    def __init__(self):
-        LOG.info(_LI("ML2 LocalTypeDriver initialization complete"))
-
-    def get_type(self):
-        return p_const.TYPE_LOCAL
-
-    def initialize(self):
-        pass
-
-    def is_partial_segment(self, segment):
-        return False
-
-    def validate_provider_segment(self, segment):
-        for key, value in six.iteritems(segment):
-            if value and key != api.NETWORK_TYPE:
-                msg = _("%s prohibited for local provider network") % key
-                raise exc.InvalidInput(error_message=msg)
-
-    def reserve_provider_segment(self, session, segment):
-        # No resources to reserve
-        return segment
-
-    def allocate_tenant_segment(self, session):
-        # No resources to allocate
-        return {api.NETWORK_TYPE: p_const.TYPE_LOCAL}
-
-    def release_segment(self, session, segment):
-        # No resources to release
-        pass
-
-    def get_mtu(self, physical_network=None):
-        pass
diff --git a/neutron/plugins/ml2/drivers/type_tunnel.py b/neutron/plugins/ml2/drivers/type_tunnel.py
deleted file mode 100644 (file)
index 5e9e2df..0000000
+++ /dev/null
@@ -1,410 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import abc
-import itertools
-import operator
-
-from oslo_config import cfg
-from oslo_db import api as oslo_db_api
-from oslo_db import exception as db_exc
-from oslo_log import log
-from six import moves
-from sqlalchemy import or_
-
-from neutron._i18n import _, _LI, _LW
-from neutron.common import exceptions as exc
-from neutron.common import topics
-from neutron.db import api as db_api
-from neutron.plugins.common import utils as plugin_utils
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2.drivers import helpers
-
-LOG = log.getLogger(__name__)
-
-TUNNEL = 'tunnel'
-
-
-def chunks(iterable, chunk_size):
-    """Chunks data into chunk with size<=chunk_size."""
-    iterator = iter(iterable)
-    chunk = list(itertools.islice(iterator, 0, chunk_size))
-    while chunk:
-        yield chunk
-        chunk = list(itertools.islice(iterator, 0, chunk_size))
-
-
-class TunnelTypeDriver(helpers.SegmentTypeDriver):
-    """Define stable abstract interface for ML2 type drivers.
-
-    tunnel type networks rely on tunnel endpoints. This class defines abstract
-    methods to manage these endpoints.
-    """
-    BULK_SIZE = 100
-
-    def __init__(self, model):
-        super(TunnelTypeDriver, self).__init__(model)
-        self.segmentation_key = next(iter(self.primary_keys))
-
-    @abc.abstractmethod
-    def add_endpoint(self, ip, host):
-        """Register the endpoint in the type_driver database.
-
-        param ip: the IP address of the endpoint
-        param host: the Host name of the endpoint
-        """
-
-    @abc.abstractmethod
-    def get_endpoints(self):
-        """Get every endpoint managed by the type_driver
-
-        :returns a list of dict [{ip_address:endpoint_ip, host:endpoint_host},
-        ..]
-        """
-
-    @abc.abstractmethod
-    def get_endpoint_by_host(self, host):
-        """Get endpoint for a given host managed by the type_driver
-
-        param host: the Host name of the endpoint
-
-        if host found in type_driver database
-           :returns db object for that particular host
-        else
-           :returns None
-        """
-
-    @abc.abstractmethod
-    def get_endpoint_by_ip(self, ip):
-        """Get endpoint for a given tunnel ip managed by the type_driver
-
-        param ip: the IP address of the endpoint
-
-        if ip found in type_driver database
-           :returns db object for that particular ip
-        else
-           :returns None
-        """
-
-    @abc.abstractmethod
-    def delete_endpoint(self, ip):
-        """Delete the endpoint in the type_driver database.
-
-        param ip: the IP address of the endpoint
-        """
-
-    @abc.abstractmethod
-    def delete_endpoint_by_host_or_ip(self, host, ip):
-        """Delete the endpoint in the type_driver database.
-
-        This function will delete any endpoint matching the specified
-        ip or host.
-
-        param host: the host name of the endpoint
-        param ip: the IP address of the endpoint
-        """
-
-    def _initialize(self, raw_tunnel_ranges):
-        self.tunnel_ranges = []
-        self._parse_tunnel_ranges(raw_tunnel_ranges, self.tunnel_ranges)
-        self.sync_allocations()
-
-    def _parse_tunnel_ranges(self, tunnel_ranges, current_range):
-        for entry in tunnel_ranges:
-            entry = entry.strip()
-            try:
-                tun_min, tun_max = entry.split(':')
-                tun_min = tun_min.strip()
-                tun_max = tun_max.strip()
-                tunnel_range = int(tun_min), int(tun_max)
-            except ValueError as ex:
-                raise exc.NetworkTunnelRangeError(tunnel_range=entry, error=ex)
-            plugin_utils.verify_tunnel_range(tunnel_range, self.get_type())
-            current_range.append(tunnel_range)
-        LOG.info(_LI("%(type)s ID ranges: %(range)s"),
-                 {'type': self.get_type(), 'range': current_range})
-
-    @oslo_db_api.wrap_db_retry(
-        max_retries=db_api.MAX_RETRIES,
-        exception_checker=db_api.is_deadlock)
-    def sync_allocations(self):
-        # determine current configured allocatable tunnel ids
-        tunnel_ids = set()
-        for tun_min, tun_max in self.tunnel_ranges:
-            tunnel_ids |= set(moves.range(tun_min, tun_max + 1))
-
-        tunnel_id_getter = operator.attrgetter(self.segmentation_key)
-        tunnel_col = getattr(self.model, self.segmentation_key)
-        session = db_api.get_session()
-        with session.begin(subtransactions=True):
-            # remove from table unallocated tunnels not currently allocatable
-            # fetch results as list via all() because we'll be iterating
-            # through them twice
-            allocs = (session.query(self.model).
-                      with_lockmode("update").all())
-
-            # collect those vnis that needs to be deleted from db
-            unallocateds = (
-                tunnel_id_getter(a) for a in allocs if not a.allocated)
-            to_remove = (x for x in unallocateds if x not in tunnel_ids)
-            # Immediately delete tunnels in chunks. This leaves no work for
-            # flush at the end of transaction
-            for chunk in chunks(to_remove, self.BULK_SIZE):
-                session.query(self.model).filter(
-                    tunnel_col.in_(chunk)).delete(synchronize_session=False)
-
-            # collect vnis that need to be added
-            existings = {tunnel_id_getter(a) for a in allocs}
-            missings = list(tunnel_ids - existings)
-            for chunk in chunks(missings, self.BULK_SIZE):
-                bulk = [{self.segmentation_key: x, 'allocated': False}
-                        for x in chunk]
-                session.execute(self.model.__table__.insert(), bulk)
-
-    def is_partial_segment(self, segment):
-        return segment.get(api.SEGMENTATION_ID) is None
-
-    def validate_provider_segment(self, segment):
-        physical_network = segment.get(api.PHYSICAL_NETWORK)
-        if physical_network:
-            msg = _("provider:physical_network specified for %s "
-                    "network") % segment.get(api.NETWORK_TYPE)
-            raise exc.InvalidInput(error_message=msg)
-
-        for key, value in segment.items():
-            if value and key not in [api.NETWORK_TYPE,
-                                     api.SEGMENTATION_ID]:
-                msg = (_("%(key)s prohibited for %(tunnel)s provider network"),
-                       {'key': key, 'tunnel': segment.get(api.NETWORK_TYPE)})
-                raise exc.InvalidInput(error_message=msg)
-
-    def reserve_provider_segment(self, session, segment):
-        if self.is_partial_segment(segment):
-            alloc = self.allocate_partially_specified_segment(session)
-            if not alloc:
-                raise exc.NoNetworkAvailable()
-        else:
-            segmentation_id = segment.get(api.SEGMENTATION_ID)
-            alloc = self.allocate_fully_specified_segment(
-                session, **{self.segmentation_key: segmentation_id})
-            if not alloc:
-                raise exc.TunnelIdInUse(tunnel_id=segmentation_id)
-        return {api.NETWORK_TYPE: self.get_type(),
-                api.PHYSICAL_NETWORK: None,
-                api.SEGMENTATION_ID: getattr(alloc, self.segmentation_key),
-                api.MTU: self.get_mtu()}
-
-    def allocate_tenant_segment(self, session):
-        alloc = self.allocate_partially_specified_segment(session)
-        if not alloc:
-            return
-        return {api.NETWORK_TYPE: self.get_type(),
-                api.PHYSICAL_NETWORK: None,
-                api.SEGMENTATION_ID: getattr(alloc, self.segmentation_key),
-                api.MTU: self.get_mtu()}
-
-    def release_segment(self, session, segment):
-        tunnel_id = segment[api.SEGMENTATION_ID]
-
-        inside = any(lo <= tunnel_id <= hi for lo, hi in self.tunnel_ranges)
-
-        info = {'type': self.get_type(), 'id': tunnel_id}
-        with session.begin(subtransactions=True):
-            query = (session.query(self.model).
-                     filter_by(**{self.segmentation_key: tunnel_id}))
-            if inside:
-                count = query.update({"allocated": False})
-                if count:
-                    LOG.debug("Releasing %(type)s tunnel %(id)s to pool",
-                              info)
-            else:
-                count = query.delete()
-                if count:
-                    LOG.debug("Releasing %(type)s tunnel %(id)s outside pool",
-                              info)
-
-        if not count:
-            LOG.warning(_LW("%(type)s tunnel %(id)s not found"), info)
-
-    def get_allocation(self, session, tunnel_id):
-        return (session.query(self.model).
-                filter_by(**{self.segmentation_key: tunnel_id}).
-                first())
-
-    def get_mtu(self, physical_network=None):
-        seg_mtu = super(TunnelTypeDriver, self).get_mtu()
-        mtu = []
-        if seg_mtu > 0:
-            mtu.append(seg_mtu)
-        if cfg.CONF.ml2.path_mtu > 0:
-            mtu.append(cfg.CONF.ml2.path_mtu)
-        return min(mtu) if mtu else 0
-
-
-class EndpointTunnelTypeDriver(TunnelTypeDriver):
-
-    def __init__(self, segment_model, endpoint_model):
-        super(EndpointTunnelTypeDriver, self).__init__(segment_model)
-        self.endpoint_model = endpoint_model
-        self.segmentation_key = next(iter(self.primary_keys))
-
-    def get_endpoint_by_host(self, host):
-        LOG.debug("get_endpoint_by_host() called for host %s", host)
-        session = db_api.get_session()
-        return (session.query(self.endpoint_model).
-                filter_by(host=host).first())
-
-    def get_endpoint_by_ip(self, ip):
-        LOG.debug("get_endpoint_by_ip() called for ip %s", ip)
-        session = db_api.get_session()
-        return (session.query(self.endpoint_model).
-                filter_by(ip_address=ip).first())
-
-    def delete_endpoint(self, ip):
-        LOG.debug("delete_endpoint() called for ip %s", ip)
-        session = db_api.get_session()
-        with session.begin(subtransactions=True):
-            (session.query(self.endpoint_model).
-             filter_by(ip_address=ip).delete())
-
-    def delete_endpoint_by_host_or_ip(self, host, ip):
-        LOG.debug("delete_endpoint_by_host_or_ip() called for "
-                  "host %(host)s or %(ip)s", {'host': host, 'ip': ip})
-        session = db_api.get_session()
-        with session.begin(subtransactions=True):
-            session.query(self.endpoint_model).filter(
-                    or_(self.endpoint_model.host == host,
-                        self.endpoint_model.ip_address == ip)).delete()
-
-    def _get_endpoints(self):
-        LOG.debug("_get_endpoints() called")
-        session = db_api.get_session()
-        return session.query(self.endpoint_model)
-
-    def _add_endpoint(self, ip, host, **kwargs):
-        LOG.debug("_add_endpoint() called for ip %s", ip)
-        session = db_api.get_session()
-        try:
-            endpoint = self.endpoint_model(ip_address=ip, host=host, **kwargs)
-            endpoint.save(session)
-        except db_exc.DBDuplicateEntry:
-            endpoint = (session.query(self.endpoint_model).
-                        filter_by(ip_address=ip).one())
-            LOG.warning(_LW("Endpoint with ip %s already exists"), ip)
-        return endpoint
-
-
-class TunnelRpcCallbackMixin(object):
-
-    def setup_tunnel_callback_mixin(self, notifier, type_manager):
-        self._notifier = notifier
-        self._type_manager = type_manager
-
-    def tunnel_sync(self, rpc_context, **kwargs):
-        """Update new tunnel.
-
-        Updates the database with the tunnel IP. All listening agents will also
-        be notified about the new tunnel IP.
-        """
-        tunnel_ip = kwargs.get('tunnel_ip')
-        if not tunnel_ip:
-            msg = _("Tunnel IP value needed by the ML2 plugin")
-            raise exc.InvalidInput(error_message=msg)
-
-        tunnel_type = kwargs.get('tunnel_type')
-        if not tunnel_type:
-            msg = _("Network type value needed by the ML2 plugin")
-            raise exc.InvalidInput(error_message=msg)
-
-        host = kwargs.get('host')
-        driver = self._type_manager.drivers.get(tunnel_type)
-        if driver:
-            # The given conditional statements will verify the following
-            # things:
-            # 1. If host is not passed from an agent, it is a legacy mode.
-            # 2. If passed host and tunnel_ip are not found in the DB,
-            #    it is a new endpoint.
-            # 3. If host is passed from an agent and it is not found in DB
-            #    but the passed tunnel_ip is found, delete the endpoint
-            #    from DB and add the endpoint with (tunnel_ip, host),
-            #    it is an upgrade case.
-            # 4. If passed host is found in DB and passed tunnel ip is not
-            #    found, delete the endpoint belonging to that host and
-            #    add endpoint with latest (tunnel_ip, host), it is a case
-            #    where local_ip of an agent got changed.
-            # 5. If the passed host had another ip in the DB the host-id has
-            #    roamed to a different IP then delete any reference to the new
-            #    local_ip or the host id. Don't notify tunnel_delete for the
-            #    old IP since that one could have been taken by a different
-            #    agent host-id (neutron-ovs-cleanup should be used to clean up
-            #    the stale endpoints).
-            #    Finally create a new endpoint for the (tunnel_ip, host).
-            if host:
-                host_endpoint = driver.obj.get_endpoint_by_host(host)
-                ip_endpoint = driver.obj.get_endpoint_by_ip(tunnel_ip)
-
-                if (ip_endpoint and ip_endpoint.host is None
-                    and host_endpoint is None):
-                    driver.obj.delete_endpoint(ip_endpoint.ip_address)
-                elif (ip_endpoint and ip_endpoint.host != host):
-                    LOG.info(
-                        _LI("Tunnel IP %(ip)s was used by host %(host)s and "
-                            "will be assigned to %(new_host)s"),
-                        {'ip': ip_endpoint.ip_address,
-                         'host': ip_endpoint.host,
-                         'new_host': host})
-                    driver.obj.delete_endpoint_by_host_or_ip(
-                        host, ip_endpoint.ip_address)
-                elif (host_endpoint and host_endpoint.ip_address != tunnel_ip):
-                    # Notify all other listening agents to delete stale tunnels
-                    self._notifier.tunnel_delete(rpc_context,
-                        host_endpoint.ip_address, tunnel_type)
-                    driver.obj.delete_endpoint(host_endpoint.ip_address)
-
-            tunnel = driver.obj.add_endpoint(tunnel_ip, host)
-            tunnels = driver.obj.get_endpoints()
-            entry = {'tunnels': tunnels}
-            # Notify all other listening agents
-            self._notifier.tunnel_update(rpc_context, tunnel.ip_address,
-                                         tunnel_type)
-            # Return the list of tunnels IP's to the agent
-            return entry
-        else:
-            msg = _("Network type value '%s' not supported") % tunnel_type
-            raise exc.InvalidInput(error_message=msg)
-
-
-class TunnelAgentRpcApiMixin(object):
-
-    def _get_tunnel_update_topic(self):
-        return topics.get_topic_name(self.topic,
-                                     TUNNEL,
-                                     topics.UPDATE)
-
-    def tunnel_update(self, context, tunnel_ip, tunnel_type):
-        cctxt = self.client.prepare(topic=self._get_tunnel_update_topic(),
-                                    fanout=True)
-        cctxt.cast(context, 'tunnel_update', tunnel_ip=tunnel_ip,
-                   tunnel_type=tunnel_type)
-
-    def _get_tunnel_delete_topic(self):
-        return topics.get_topic_name(self.topic,
-                                     TUNNEL,
-                                     topics.DELETE)
-
-    def tunnel_delete(self, context, tunnel_ip, tunnel_type):
-        cctxt = self.client.prepare(topic=self._get_tunnel_delete_topic(),
-                                    fanout=True)
-        cctxt.cast(context, 'tunnel_delete', tunnel_ip=tunnel_ip,
-                   tunnel_type=tunnel_type)
diff --git a/neutron/plugins/ml2/drivers/type_vlan.py b/neutron/plugins/ml2/drivers/type_vlan.py
deleted file mode 100644 (file)
index 6d22670..0000000
+++ /dev/null
@@ -1,269 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-from oslo_config import cfg
-from oslo_log import log
-from six import moves
-import sqlalchemy as sa
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.common import exceptions as exc
-from neutron.db import api as db_api
-from neutron.db import model_base
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.common import utils as plugin_utils
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2.drivers import helpers
-
-LOG = log.getLogger(__name__)
-
-vlan_opts = [
-    cfg.ListOpt('network_vlan_ranges',
-                default=[],
-                help=_("List of <physical_network>:<vlan_min>:<vlan_max> or "
-                       "<physical_network> specifying physical_network names "
-                       "usable for VLAN provider and tenant networks, as "
-                       "well as ranges of VLAN tags on each available for "
-                       "allocation to tenant networks."))
-]
-
-cfg.CONF.register_opts(vlan_opts, "ml2_type_vlan")
-
-
-class VlanAllocation(model_base.BASEV2):
-    """Represent allocation state of a vlan_id on a physical network.
-
-    If allocated is False, the vlan_id on the physical_network is
-    available for allocation to a tenant network. If allocated is
-    True, the vlan_id on the physical_network is in use, either as a
-    tenant or provider network.
-
-    When an allocation is released, if the vlan_id for the
-    physical_network is inside the pool described by
-    VlanTypeDriver.network_vlan_ranges, then allocated is set to
-    False. If it is outside the pool, the record is deleted.
-    """
-
-    __tablename__ = 'ml2_vlan_allocations'
-    __table_args__ = (
-        sa.Index('ix_ml2_vlan_allocations_physical_network_allocated',
-                 'physical_network', 'allocated'),
-        model_base.BASEV2.__table_args__,)
-
-    physical_network = sa.Column(sa.String(64), nullable=False,
-                                 primary_key=True)
-    vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
-                        autoincrement=False)
-    allocated = sa.Column(sa.Boolean, nullable=False)
-
-
-class VlanTypeDriver(helpers.SegmentTypeDriver):
-    """Manage state for VLAN networks with ML2.
-
-    The VlanTypeDriver implements the 'vlan' network_type. VLAN
-    network segments provide connectivity between VMs and other
-    devices using any connected IEEE 802.1Q conformant
-    physical_network segmented into virtual networks via IEEE 802.1Q
-    headers. Up to 4094 VLAN network segments can exist on each
-    available physical_network.
-    """
-
-    def __init__(self):
-        super(VlanTypeDriver, self).__init__(VlanAllocation)
-        self._parse_network_vlan_ranges()
-
-    def _parse_network_vlan_ranges(self):
-        try:
-            self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
-                cfg.CONF.ml2_type_vlan.network_vlan_ranges)
-        except Exception:
-            LOG.exception(_LE("Failed to parse network_vlan_ranges. "
-                              "Service terminated!"))
-            sys.exit(1)
-        LOG.info(_LI("Network VLAN ranges: %s"), self.network_vlan_ranges)
-
-    def _sync_vlan_allocations(self):
-        session = db_api.get_session()
-        with session.begin(subtransactions=True):
-            # get existing allocations for all physical networks
-            allocations = dict()
-            allocs = (session.query(VlanAllocation).
-                      with_lockmode('update'))
-            for alloc in allocs:
-                if alloc.physical_network not in allocations:
-                    allocations[alloc.physical_network] = set()
-                allocations[alloc.physical_network].add(alloc)
-
-            # process vlan ranges for each configured physical network
-            for (physical_network,
-                 vlan_ranges) in self.network_vlan_ranges.items():
-                # determine current configured allocatable vlans for
-                # this physical network
-                vlan_ids = set()
-                for vlan_min, vlan_max in vlan_ranges:
-                    vlan_ids |= set(moves.range(vlan_min, vlan_max + 1))
-
-                # remove from table unallocated vlans not currently
-                # allocatable
-                if physical_network in allocations:
-                    for alloc in allocations[physical_network]:
-                        try:
-                            # see if vlan is allocatable
-                            vlan_ids.remove(alloc.vlan_id)
-                        except KeyError:
-                            # it's not allocatable, so check if its allocated
-                            if not alloc.allocated:
-                                # it's not, so remove it from table
-                                LOG.debug("Removing vlan %(vlan_id)s on "
-                                          "physical network "
-                                          "%(physical_network)s from pool",
-                                          {'vlan_id': alloc.vlan_id,
-                                           'physical_network':
-                                           physical_network})
-                                session.delete(alloc)
-                    del allocations[physical_network]
-
-                # add missing allocatable vlans to table
-                for vlan_id in sorted(vlan_ids):
-                    alloc = VlanAllocation(physical_network=physical_network,
-                                           vlan_id=vlan_id,
-                                           allocated=False)
-                    session.add(alloc)
-
-            # remove from table unallocated vlans for any unconfigured
-            # physical networks
-            for allocs in allocations.values():
-                for alloc in allocs:
-                    if not alloc.allocated:
-                        LOG.debug("Removing vlan %(vlan_id)s on physical "
-                                  "network %(physical_network)s from pool",
-                                  {'vlan_id': alloc.vlan_id,
-                                   'physical_network':
-                                   alloc.physical_network})
-                        session.delete(alloc)
-
-    def get_type(self):
-        return p_const.TYPE_VLAN
-
-    def initialize(self):
-        self._sync_vlan_allocations()
-        LOG.info(_LI("VlanTypeDriver initialization complete"))
-
-    def is_partial_segment(self, segment):
-        return segment.get(api.SEGMENTATION_ID) is None
-
-    def validate_provider_segment(self, segment):
-        physical_network = segment.get(api.PHYSICAL_NETWORK)
-        segmentation_id = segment.get(api.SEGMENTATION_ID)
-        if physical_network:
-            if physical_network not in self.network_vlan_ranges:
-                msg = (_("physical_network '%s' unknown "
-                         " for VLAN provider network") % physical_network)
-                raise exc.InvalidInput(error_message=msg)
-            if segmentation_id:
-                if not plugin_utils.is_valid_vlan_tag(segmentation_id):
-                    msg = (_("segmentation_id out of range (%(min)s through "
-                             "%(max)s)") %
-                           {'min': p_const.MIN_VLAN_TAG,
-                            'max': p_const.MAX_VLAN_TAG})
-                    raise exc.InvalidInput(error_message=msg)
-        elif segmentation_id:
-            msg = _("segmentation_id requires physical_network for VLAN "
-                    "provider network")
-            raise exc.InvalidInput(error_message=msg)
-
-        for key, value in segment.items():
-            if value and key not in [api.NETWORK_TYPE,
-                                     api.PHYSICAL_NETWORK,
-                                     api.SEGMENTATION_ID]:
-                msg = _("%s prohibited for VLAN provider network") % key
-                raise exc.InvalidInput(error_message=msg)
-
-    def reserve_provider_segment(self, session, segment):
-        filters = {}
-        physical_network = segment.get(api.PHYSICAL_NETWORK)
-        if physical_network is not None:
-            filters['physical_network'] = physical_network
-            vlan_id = segment.get(api.SEGMENTATION_ID)
-            if vlan_id is not None:
-                filters['vlan_id'] = vlan_id
-
-        if self.is_partial_segment(segment):
-            alloc = self.allocate_partially_specified_segment(
-                session, **filters)
-            if not alloc:
-                raise exc.NoNetworkAvailable()
-        else:
-            alloc = self.allocate_fully_specified_segment(
-                session, **filters)
-            if not alloc:
-                raise exc.VlanIdInUse(**filters)
-
-        return {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                api.PHYSICAL_NETWORK: alloc.physical_network,
-                api.SEGMENTATION_ID: alloc.vlan_id,
-                api.MTU: self.get_mtu(alloc.physical_network)}
-
-    def allocate_tenant_segment(self, session):
-        alloc = self.allocate_partially_specified_segment(session)
-        if not alloc:
-            return
-        return {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                api.PHYSICAL_NETWORK: alloc.physical_network,
-                api.SEGMENTATION_ID: alloc.vlan_id,
-                api.MTU: self.get_mtu(alloc.physical_network)}
-
-    def release_segment(self, session, segment):
-        physical_network = segment[api.PHYSICAL_NETWORK]
-        vlan_id = segment[api.SEGMENTATION_ID]
-
-        ranges = self.network_vlan_ranges.get(physical_network, [])
-        inside = any(lo <= vlan_id <= hi for lo, hi in ranges)
-
-        with session.begin(subtransactions=True):
-            query = (session.query(VlanAllocation).
-                     filter_by(physical_network=physical_network,
-                               vlan_id=vlan_id))
-            if inside:
-                count = query.update({"allocated": False})
-                if count:
-                    LOG.debug("Releasing vlan %(vlan_id)s on physical "
-                              "network %(physical_network)s to pool",
-                              {'vlan_id': vlan_id,
-                               'physical_network': physical_network})
-            else:
-                count = query.delete()
-                if count:
-                    LOG.debug("Releasing vlan %(vlan_id)s on physical "
-                              "network %(physical_network)s outside pool",
-                              {'vlan_id': vlan_id,
-                               'physical_network': physical_network})
-
-        if not count:
-            LOG.warning(_LW("No vlan_id %(vlan_id)s found on physical "
-                            "network %(physical_network)s"),
-                        {'vlan_id': vlan_id,
-                         'physical_network': physical_network})
-
-    def get_mtu(self, physical_network):
-        seg_mtu = super(VlanTypeDriver, self).get_mtu()
-        mtu = []
-        if seg_mtu > 0:
-            mtu.append(seg_mtu)
-        if physical_network in self.physnet_mtus:
-            mtu.append(int(self.physnet_mtus[physical_network]))
-        return min(mtu) if mtu else 0
diff --git a/neutron/plugins/ml2/drivers/type_vxlan.py b/neutron/plugins/ml2/drivers/type_vxlan.py
deleted file mode 100644 (file)
index d258d9d..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log
-import sqlalchemy as sa
-from sqlalchemy import sql
-
-from neutron._i18n import _, _LE
-from neutron.common import exceptions as n_exc
-from neutron.db import model_base
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers import type_tunnel
-
-LOG = log.getLogger(__name__)
-
-vxlan_opts = [
-    cfg.ListOpt('vni_ranges',
-                default=[],
-                help=_("Comma-separated list of <vni_min>:<vni_max> tuples "
-                       "enumerating ranges of VXLAN VNI IDs that are "
-                       "available for tenant network allocation")),
-    cfg.StrOpt('vxlan_group',
-               help=_("Multicast group for VXLAN. When configured, will "
-                      "enable sending all broadcast traffic to this multicast "
-                      "group. When left unconfigured, will disable multicast "
-                      "VXLAN mode.")),
-]
-
-cfg.CONF.register_opts(vxlan_opts, "ml2_type_vxlan")
-
-
-class VxlanAllocation(model_base.BASEV2):
-
-    __tablename__ = 'ml2_vxlan_allocations'
-
-    vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True,
-                          autoincrement=False)
-    allocated = sa.Column(sa.Boolean, nullable=False, default=False,
-                          server_default=sql.false(), index=True)
-
-
-class VxlanEndpoints(model_base.BASEV2):
-    """Represents tunnel endpoint in RPC mode."""
-
-    __tablename__ = 'ml2_vxlan_endpoints'
-    __table_args__ = (
-        sa.UniqueConstraint('host',
-                            name='unique_ml2_vxlan_endpoints0host'),
-        model_base.BASEV2.__table_args__
-    )
-    ip_address = sa.Column(sa.String(64), primary_key=True)
-    udp_port = sa.Column(sa.Integer, nullable=False)
-    host = sa.Column(sa.String(255), nullable=True)
-
-    def __repr__(self):
-        return "<VxlanTunnelEndpoint(%s)>" % self.ip_address
-
-
-class VxlanTypeDriver(type_tunnel.EndpointTunnelTypeDriver):
-
-    def __init__(self):
-        super(VxlanTypeDriver, self).__init__(
-            VxlanAllocation, VxlanEndpoints)
-
-    def get_type(self):
-        return p_const.TYPE_VXLAN
-
-    def initialize(self):
-        try:
-            self._initialize(cfg.CONF.ml2_type_vxlan.vni_ranges)
-        except n_exc.NetworkTunnelRangeError:
-            LOG.exception(_LE("Failed to parse vni_ranges. "
-                              "Service terminated!"))
-            raise SystemExit()
-
-    def get_endpoints(self):
-        """Get every vxlan endpoints from database."""
-        vxlan_endpoints = self._get_endpoints()
-        return [{'ip_address': vxlan_endpoint.ip_address,
-                 'udp_port': vxlan_endpoint.udp_port,
-                 'host': vxlan_endpoint.host}
-                for vxlan_endpoint in vxlan_endpoints]
-
-    def add_endpoint(self, ip, host, udp_port=p_const.VXLAN_UDP_PORT):
-        return self._add_endpoint(ip, host, udp_port=udp_port)
-
-    def get_mtu(self, physical_network=None):
-        mtu = super(VxlanTypeDriver, self).get_mtu()
-        return mtu - p_const.VXLAN_ENCAP_OVERHEAD if mtu else 0
diff --git a/neutron/plugins/ml2/extensions/__init__.py b/neutron/plugins/ml2/extensions/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/ml2/extensions/port_security.py b/neutron/plugins/ml2/extensions/port_security.py
deleted file mode 100644 (file)
index a2cb5cd..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2015 Intel Corporation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron._i18n import _LI
-from neutron.api.v2 import attributes as attrs
-from neutron.common import utils
-from neutron.db import common_db_mixin
-from neutron.db import portsecurity_db_common as ps_db_common
-from neutron.extensions import portsecurity as psec
-from neutron.plugins.ml2 import driver_api as api
-from oslo_log import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-class PortSecurityExtensionDriver(api.ExtensionDriver,
-                                  ps_db_common.PortSecurityDbCommon,
-                                  common_db_mixin.CommonDbMixin):
-    _supported_extension_alias = 'port-security'
-
-    def initialize(self):
-        LOG.info(_LI("PortSecurityExtensionDriver initialization complete"))
-
-    @property
-    def extension_alias(self):
-        return self._supported_extension_alias
-
-    def process_create_network(self, context, data, result):
-        # Create the network extension attributes.
-        if psec.PORTSECURITY not in data:
-            data[psec.PORTSECURITY] = (psec.EXTENDED_ATTRIBUTES_2_0['networks']
-                                       [psec.PORTSECURITY]['default'])
-        self._process_network_port_security_create(context, data, result)
-
-    def process_update_network(self, context, data, result):
-        # Update the network extension attributes.
-        if psec.PORTSECURITY in data:
-            self._process_network_port_security_update(context, data, result)
-
-    def process_create_port(self, context, data, result):
-        # Create the port extension attributes.
-        data[psec.PORTSECURITY] = self._determine_port_security(context, data)
-        self._process_port_port_security_create(context, data, result)
-
-    def process_update_port(self, context, data, result):
-        if psec.PORTSECURITY in data:
-            self._process_port_port_security_update(
-                context, data, result)
-
-    def extend_network_dict(self, session, db_data, result):
-        self._extend_port_security_dict(result, db_data)
-
-    def extend_port_dict(self, session, db_data, result):
-        self._extend_port_security_dict(result, db_data)
-
-    def _extend_port_security_dict(self, response_data, db_data):
-        if db_data.get('port_security') is None:
-            response_data[psec.PORTSECURITY] = (
-                psec.EXTENDED_ATTRIBUTES_2_0['networks']
-                [psec.PORTSECURITY]['default'])
-        else:
-            response_data[psec.PORTSECURITY] = (
-                                db_data['port_security'][psec.PORTSECURITY])
-
-    def _determine_port_security(self, context, port):
-        """Returns a boolean (port_security_enabled).
-
-        Port_security is the value associated with the port if one is present
-        otherwise the value associated with the network is returned.
-        """
-        # we don't apply security groups for dhcp, router
-        if port.get('device_owner') and utils.is_port_trusted(port):
-            return False
-
-        if attrs.is_attr_set(port.get(psec.PORTSECURITY)):
-            port_security_enabled = port[psec.PORTSECURITY]
-        else:
-            port_security_enabled = self._get_network_security_binding(
-                context, port['network_id'])
-
-        return port_security_enabled
diff --git a/neutron/plugins/ml2/extensions/qos.py b/neutron/plugins/ml2/extensions/qos.py
deleted file mode 100644 (file)
index 62c69a0..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright (c) 2015 Red Hat Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from neutron.core_extensions import base as base_core
-from neutron.core_extensions import qos as qos_core
-from neutron.plugins.ml2 import driver_api as api
-
-LOG = logging.getLogger(__name__)
-
-QOS_EXT_DRIVER_ALIAS = 'qos'
-
-
-class QosExtensionDriver(api.ExtensionDriver):
-
-    def initialize(self):
-        self.core_ext_handler = qos_core.QosCoreResourceExtension()
-        LOG.debug("QosExtensionDriver initialization complete")
-
-    def process_create_network(self, context, data, result):
-        self.core_ext_handler.process_fields(
-            context, base_core.NETWORK, data, result)
-
-    process_update_network = process_create_network
-
-    def process_create_port(self, context, data, result):
-        self.core_ext_handler.process_fields(
-            context, base_core.PORT, data, result)
-
-    process_update_port = process_create_port
-
-    def extend_network_dict(self, session, db_data, result):
-        result.update(
-            self.core_ext_handler.extract_fields(
-                base_core.NETWORK, db_data))
-
-    def extend_port_dict(self, session, db_data, result):
-        result.update(
-            self.core_ext_handler.extract_fields(base_core.PORT, db_data))
diff --git a/neutron/plugins/ml2/managers.py b/neutron/plugins/ml2/managers.py
deleted file mode 100644 (file)
index 97c6343..0000000
+++ /dev/null
@@ -1,882 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log
-from oslo_utils import excutils
-import six
-import stevedore
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.api.v2 import attributes
-from neutron.common import exceptions as exc
-from neutron.extensions import external_net
-from neutron.extensions import multiprovidernet as mpnet
-from neutron.extensions import portbindings
-from neutron.extensions import providernet as provider
-from neutron.extensions import vlantransparent
-from neutron.plugins.ml2.common import exceptions as ml2_exc
-from neutron.plugins.ml2 import db
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2 import models
-from neutron.services.qos import qos_consts
-
-LOG = log.getLogger(__name__)
-
-MAX_BINDING_LEVELS = 10
-
-
-class TypeManager(stevedore.named.NamedExtensionManager):
-    """Manage network segment types using drivers."""
-
-    def __init__(self):
-        # Mapping from type name to DriverManager
-        self.drivers = {}
-
-        LOG.info(_LI("Configured type driver names: %s"),
-                 cfg.CONF.ml2.type_drivers)
-        super(TypeManager, self).__init__('neutron.ml2.type_drivers',
-                                          cfg.CONF.ml2.type_drivers,
-                                          invoke_on_load=True)
-        LOG.info(_LI("Loaded type driver names: %s"), self.names())
-        self._register_types()
-        self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types)
-        self._check_external_network_type(cfg.CONF.ml2.external_network_type)
-
-    def _register_types(self):
-        for ext in self:
-            network_type = ext.obj.get_type()
-            if network_type in self.drivers:
-                LOG.error(_LE("Type driver '%(new_driver)s' ignored because"
-                              " type driver '%(old_driver)s' is already"
-                              " registered for type '%(type)s'"),
-                          {'new_driver': ext.name,
-                           'old_driver': self.drivers[network_type].name,
-                           'type': network_type})
-            else:
-                self.drivers[network_type] = ext
-        LOG.info(_LI("Registered types: %s"), self.drivers.keys())
-
-    def _check_tenant_network_types(self, types):
-        self.tenant_network_types = []
-        for network_type in types:
-            if network_type in self.drivers:
-                self.tenant_network_types.append(network_type)
-            else:
-                LOG.error(_LE("No type driver for tenant network_type: %s. "
-                              "Service terminated!"), network_type)
-                raise SystemExit(1)
-        LOG.info(_LI("Tenant network_types: %s"), self.tenant_network_types)
-
-    def _check_external_network_type(self, ext_network_type):
-        if ext_network_type and ext_network_type not in self.drivers:
-            LOG.error(_LE("No type driver for external network_type: %s. "
-                          "Service terminated!"), ext_network_type)
-            raise SystemExit(1)
-
-    def _process_provider_segment(self, segment):
-        (network_type, physical_network,
-         segmentation_id) = (self._get_attribute(segment, attr)
-                             for attr in provider.ATTRIBUTES)
-
-        if attributes.is_attr_set(network_type):
-            segment = {api.NETWORK_TYPE: network_type,
-                       api.PHYSICAL_NETWORK: physical_network,
-                       api.SEGMENTATION_ID: segmentation_id}
-            self.validate_provider_segment(segment)
-            return segment
-
-        msg = _("network_type required")
-        raise exc.InvalidInput(error_message=msg)
-
-    def _process_provider_create(self, network):
-        if any(attributes.is_attr_set(network.get(attr))
-               for attr in provider.ATTRIBUTES):
-            # Verify that multiprovider and provider attributes are not set
-            # at the same time.
-            if attributes.is_attr_set(network.get(mpnet.SEGMENTS)):
-                raise mpnet.SegmentsSetInConjunctionWithProviders()
-            segment = self._get_provider_segment(network)
-            return [self._process_provider_segment(segment)]
-        elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)):
-            segments = [self._process_provider_segment(s)
-                        for s in network[mpnet.SEGMENTS]]
-            mpnet.check_duplicate_segments(segments, self.is_partial_segment)
-            return segments
-
-    def _match_segment(self, segment, filters):
-        return all(not filters.get(attr) or segment.get(attr) in filters[attr]
-                   for attr in provider.ATTRIBUTES)
-
-    def _get_provider_segment(self, network):
-        # TODO(manishg): Placeholder method
-        # Code intended for operating on a provider segment should use
-        # this method to extract the segment, even though currently the
-        # segment attributes are part of the network dictionary. In the
-        # future, network and segment information will be decoupled and
-        # here we will do the job of extracting the segment information.
-        return network
-
-    def network_matches_filters(self, network, filters):
-        if not filters:
-            return True
-        if any(attributes.is_attr_set(network.get(attr))
-               for attr in provider.ATTRIBUTES):
-            segments = [self._get_provider_segment(network)]
-        elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)):
-            segments = self._get_attribute(network, mpnet.SEGMENTS)
-        else:
-            return True
-        return any(self._match_segment(s, filters) for s in segments)
-
-    def _get_attribute(self, attrs, key):
-        value = attrs.get(key)
-        if value is attributes.ATTR_NOT_SPECIFIED:
-            value = None
-        return value
-
-    def extend_network_dict_provider(self, context, network):
-        # this method is left for backward compat even though it would be
-        # easy to change the callers in tree to use the bulk function
-        return self.extend_networks_dict_provider(context, [network])
-
-    def extend_networks_dict_provider(self, context, networks):
-        ids = [network['id'] for network in networks]
-        net_segments = db.get_networks_segments(context.session, ids)
-        for network in networks:
-            segments = net_segments[network['id']]
-            self._extend_network_dict_provider(network, segments)
-
-    def _extend_network_dict_provider(self, network, segments):
-        if not segments:
-            LOG.error(_LE("Network %s has no segments"), network['id'])
-            for attr in provider.ATTRIBUTES:
-                network[attr] = None
-        elif len(segments) > 1:
-            network[mpnet.SEGMENTS] = [
-                {provider.NETWORK_TYPE: segment[api.NETWORK_TYPE],
-                 provider.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK],
-                 provider.SEGMENTATION_ID: segment[api.SEGMENTATION_ID]}
-                for segment in segments]
-        else:
-            segment = segments[0]
-            network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE]
-            network[provider.PHYSICAL_NETWORK] = segment[api.PHYSICAL_NETWORK]
-            network[provider.SEGMENTATION_ID] = segment[api.SEGMENTATION_ID]
-
-    def initialize(self):
-        for network_type, driver in six.iteritems(self.drivers):
-            LOG.info(_LI("Initializing driver for type '%s'"), network_type)
-            driver.obj.initialize()
-
-    def _add_network_segment(self, session, network_id, segment, mtu,
-                             segment_index=0):
-        db.add_network_segment(session, network_id, segment, segment_index)
-        if segment.get(api.MTU, 0) > 0:
-            mtu.append(segment[api.MTU])
-
-    def create_network_segments(self, context, network, tenant_id):
-        """Call type drivers to create network segments."""
-        segments = self._process_provider_create(network)
-        session = context.session
-        mtu = []
-        with session.begin(subtransactions=True):
-            network_id = network['id']
-            if segments:
-                for segment_index, segment in enumerate(segments):
-                    segment = self.reserve_provider_segment(
-                        session, segment)
-                    self._add_network_segment(session, network_id, segment,
-                                              mtu, segment_index)
-            elif (cfg.CONF.ml2.external_network_type and
-                  self._get_attribute(network, external_net.EXTERNAL)):
-                segment = self._allocate_ext_net_segment(session)
-                self._add_network_segment(session, network_id, segment, mtu)
-            else:
-                segment = self._allocate_tenant_net_segment(session)
-                self._add_network_segment(session, network_id, segment, mtu)
-        network[api.MTU] = min(mtu) if mtu else 0
-
-    def is_partial_segment(self, segment):
-        network_type = segment[api.NETWORK_TYPE]
-        driver = self.drivers.get(network_type)
-        if driver:
-            return driver.obj.is_partial_segment(segment)
-        else:
-            msg = _("network_type value '%s' not supported") % network_type
-            raise exc.InvalidInput(error_message=msg)
-
-    def validate_provider_segment(self, segment):
-        network_type = segment[api.NETWORK_TYPE]
-        driver = self.drivers.get(network_type)
-        if driver:
-            driver.obj.validate_provider_segment(segment)
-        else:
-            msg = _("network_type value '%s' not supported") % network_type
-            raise exc.InvalidInput(error_message=msg)
-
-    def reserve_provider_segment(self, session, segment):
-        network_type = segment.get(api.NETWORK_TYPE)
-        driver = self.drivers.get(network_type)
-        return driver.obj.reserve_provider_segment(session, segment)
-
-    def _allocate_segment(self, session, network_type):
-        driver = self.drivers.get(network_type)
-        return driver.obj.allocate_tenant_segment(session)
-
-    def _allocate_tenant_net_segment(self, session):
-        for network_type in self.tenant_network_types:
-            segment = self._allocate_segment(session, network_type)
-            if segment:
-                return segment
-        raise exc.NoNetworkAvailable()
-
-    def _allocate_ext_net_segment(self, session):
-        network_type = cfg.CONF.ml2.external_network_type
-        segment = self._allocate_segment(session, network_type)
-        if segment:
-            return segment
-        raise exc.NoNetworkAvailable()
-
-    def release_network_segments(self, session, network_id):
-        segments = db.get_network_segments(session, network_id,
-                                           filter_dynamic=None)
-
-        for segment in segments:
-            network_type = segment.get(api.NETWORK_TYPE)
-            driver = self.drivers.get(network_type)
-            if driver:
-                driver.obj.release_segment(session, segment)
-            else:
-                LOG.error(_LE("Failed to release segment '%s' because "
-                              "network type is not supported."), segment)
-
-    def allocate_dynamic_segment(self, session, network_id, segment):
-        """Allocate a dynamic segment using a partial or full segment dict."""
-        dynamic_segment = db.get_dynamic_segment(
-            session, network_id, segment.get(api.PHYSICAL_NETWORK),
-            segment.get(api.SEGMENTATION_ID))
-
-        if dynamic_segment:
-            return dynamic_segment
-
-        driver = self.drivers.get(segment.get(api.NETWORK_TYPE))
-        dynamic_segment = driver.obj.reserve_provider_segment(session, segment)
-        db.add_network_segment(session, network_id, dynamic_segment,
-                               is_dynamic=True)
-        return dynamic_segment
-
-    def release_dynamic_segment(self, session, segment_id):
-        """Delete a dynamic segment."""
-        segment = db.get_segment_by_id(session, segment_id)
-        if segment:
-            driver = self.drivers.get(segment.get(api.NETWORK_TYPE))
-            if driver:
-                driver.obj.release_segment(session, segment)
-                db.delete_network_segment(session, segment_id)
-            else:
-                LOG.error(_LE("Failed to release segment '%s' because "
-                              "network type is not supported."), segment)
-        else:
-            LOG.debug("No segment found with id %(segment_id)s", segment_id)
-
-
-class MechanismManager(stevedore.named.NamedExtensionManager):
-    """Manage networking mechanisms using drivers."""
-
-    def __init__(self):
-        # Registered mechanism drivers, keyed by name.
-        self.mech_drivers = {}
-        # Ordered list of mechanism drivers, defining
-        # the order in which the drivers are called.
-        self.ordered_mech_drivers = []
-
-        LOG.info(_LI("Configured mechanism driver names: %s"),
-                 cfg.CONF.ml2.mechanism_drivers)
-        super(MechanismManager, self).__init__('neutron.ml2.mechanism_drivers',
-                                               cfg.CONF.ml2.mechanism_drivers,
-                                               invoke_on_load=True,
-                                               name_order=True)
-        LOG.info(_LI("Loaded mechanism driver names: %s"), self.names())
-        self._register_mechanisms()
-
-    def _register_mechanisms(self):
-        """Register all mechanism drivers.
-
-        This method should only be called once in the MechanismManager
-        constructor.
-        """
-        for ext in self:
-            self.mech_drivers[ext.name] = ext
-            self.ordered_mech_drivers.append(ext)
-        LOG.info(_LI("Registered mechanism drivers: %s"),
-                 [driver.name for driver in self.ordered_mech_drivers])
-
-    @property
-    def supported_qos_rule_types(self):
-        if not self.ordered_mech_drivers:
-            return []
-
-        rule_types = set(qos_consts.VALID_RULE_TYPES)
-        binding_driver_found = False
-
-        # Recalculate on every call to allow drivers determine supported rule
-        # types dynamically
-        for driver in self.ordered_mech_drivers:
-            driver_obj = driver.obj
-            if driver_obj._supports_port_binding:
-                binding_driver_found = True
-                if hasattr(driver_obj, 'supported_qos_rule_types'):
-                    new_rule_types = \
-                        rule_types & set(driver_obj.supported_qos_rule_types)
-                    dropped_rule_types = new_rule_types - rule_types
-                    if dropped_rule_types:
-                        LOG.info(
-                            _LI("%(rule_types)s rule types disabled for ml2 "
-                                "because %(driver)s does not support them"),
-                            {'rule_types': ', '.join(dropped_rule_types),
-                             'driver': driver.name})
-                    rule_types = new_rule_types
-                else:
-                    # at least one of drivers does not support QoS, meaning
-                    # there are no rule types supported by all of them
-                    LOG.warn(
-                        _LW("%s does not support QoS; "
-                            "no rule types available"),
-                        driver.name)
-                    return []
-
-        if binding_driver_found:
-            rule_types = list(rule_types)
-        else:
-            rule_types = []
-        LOG.debug("Supported QoS rule types "
-                  "(common subset for all mech drivers): %s", rule_types)
-        return rule_types
-
-    def initialize(self):
-        for driver in self.ordered_mech_drivers:
-            LOG.info(_LI("Initializing mechanism driver '%s'"), driver.name)
-            driver.obj.initialize()
-
-    def _check_vlan_transparency(self, context):
-        """Helper method for checking vlan transparecncy support.
-
-        :param context: context parameter to pass to each method call
-        :raises: neutron.extensions.vlantransparent.
-        VlanTransparencyDriverError if any mechanism driver doesn't
-        support vlan transparency.
-        """
-        if context.current.get('vlan_transparent'):
-            for driver in self.ordered_mech_drivers:
-                if not driver.obj.check_vlan_transparency(context):
-                    raise vlantransparent.VlanTransparencyDriverError()
-
-    def _call_on_drivers(self, method_name, context,
-                         continue_on_failure=False):
-        """Helper method for calling a method across all mechanism drivers.
-
-        :param method_name: name of the method to call
-        :param context: context parameter to pass to each method call
-        :param continue_on_failure: whether or not to continue to call
-        all mechanism drivers once one has raised an exception
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver call fails.
-        """
-        error = False
-        for driver in self.ordered_mech_drivers:
-            try:
-                getattr(driver.obj, method_name)(context)
-            except Exception:
-                LOG.exception(
-                    _LE("Mechanism driver '%(name)s' failed in %(method)s"),
-                    {'name': driver.name, 'method': method_name}
-                )
-                error = True
-                if not continue_on_failure:
-                    break
-        if error:
-            raise ml2_exc.MechanismDriverError(
-                method=method_name
-            )
-
-    def create_network_precommit(self, context):
-        """Notify all mechanism drivers during network creation.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver create_network_precommit call fails.
-
-        Called within the database transaction. If a mechanism driver
-        raises an exception, then a MechanismDriverError is propogated
-        to the caller, triggering a rollback. There is no guarantee
-        that all mechanism drivers are called in this case.
-        """
-        self._check_vlan_transparency(context)
-        self._call_on_drivers("create_network_precommit", context)
-
-    def create_network_postcommit(self, context):
-        """Notify all mechanism drivers after network creation.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver create_network_postcommit call fails.
-
-        Called after the database transaction. If a mechanism driver
-        raises an exception, then a MechanismDriverError is propagated
-        to the caller, where the network will be deleted, triggering
-        any required cleanup. There is no guarantee that all mechanism
-        drivers are called in this case.
-        """
-        self._call_on_drivers("create_network_postcommit", context)
-
-    def update_network_precommit(self, context):
-        """Notify all mechanism drivers during network update.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver update_network_precommit call fails.
-
-        Called within the database transaction. If a mechanism driver
-        raises an exception, then a MechanismDriverError is propogated
-        to the caller, triggering a rollback. There is no guarantee
-        that all mechanism drivers are called in this case.
-        """
-        self._call_on_drivers("update_network_precommit", context)
-
-    def update_network_postcommit(self, context):
-        """Notify all mechanism drivers after network update.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver update_network_postcommit call fails.
-
-        Called after the database transaction. If any mechanism driver
-        raises an error, then the error is logged but we continue to
-        call every other mechanism driver. A MechanismDriverError is
-        then reraised at the end to notify the caller of a failure.
-        """
-        self._call_on_drivers("update_network_postcommit", context,
-                              continue_on_failure=True)
-
-    def delete_network_precommit(self, context):
-        """Notify all mechanism drivers during network deletion.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver delete_network_precommit call fails.
-
-        Called within the database transaction. If a mechanism driver
-        raises an exception, then a MechanismDriverError is propogated
-        to the caller, triggering a rollback. There is no guarantee
-        that all mechanism drivers are called in this case.
-        """
-        self._call_on_drivers("delete_network_precommit", context)
-
-    def delete_network_postcommit(self, context):
-        """Notify all mechanism drivers after network deletion.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver delete_network_postcommit call fails.
-
-        Called after the database transaction. If any mechanism driver
-        raises an error, then the error is logged but we continue to
-        call every other mechanism driver. A MechanismDriverError is
-        then reraised at the end to notify the caller of a failure. In
-        general we expect the caller to ignore the error, as the
-        network resource has already been deleted from the database
-        and it doesn't make sense to undo the action by recreating the
-        network.
-        """
-        self._call_on_drivers("delete_network_postcommit", context,
-                              continue_on_failure=True)
-
-    def create_subnet_precommit(self, context):
-        """Notify all mechanism drivers during subnet creation.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver create_subnet_precommit call fails.
-
-        Called within the database transaction. If a mechanism driver
-        raises an exception, then a MechanismDriverError is propogated
-        to the caller, triggering a rollback. There is no guarantee
-        that all mechanism drivers are called in this case.
-        """
-        self._call_on_drivers("create_subnet_precommit", context)
-
-    def create_subnet_postcommit(self, context):
-        """Notify all mechanism drivers after subnet creation.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver create_subnet_postcommit call fails.
-
-        Called after the database transaction. If a mechanism driver
-        raises an exception, then a MechanismDriverError is propagated
-        to the caller, where the subnet will be deleted, triggering
-        any required cleanup. There is no guarantee that all mechanism
-        drivers are called in this case.
-        """
-        self._call_on_drivers("create_subnet_postcommit", context)
-
-    def update_subnet_precommit(self, context):
-        """Notify all mechanism drivers during subnet update.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver update_subnet_precommit call fails.
-
-        Called within the database transaction. If a mechanism driver
-        raises an exception, then a MechanismDriverError is propogated
-        to the caller, triggering a rollback. There is no guarantee
-        that all mechanism drivers are called in this case.
-        """
-        self._call_on_drivers("update_subnet_precommit", context)
-
-    def update_subnet_postcommit(self, context):
-        """Notify all mechanism drivers after subnet update.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver update_subnet_postcommit call fails.
-
-        Called after the database transaction. If any mechanism driver
-        raises an error, then the error is logged but we continue to
-        call every other mechanism driver. A MechanismDriverError is
-        then reraised at the end to notify the caller of a failure.
-        """
-        self._call_on_drivers("update_subnet_postcommit", context,
-                              continue_on_failure=True)
-
-    def delete_subnet_precommit(self, context):
-        """Notify all mechanism drivers during subnet deletion.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver delete_subnet_precommit call fails.
-
-        Called within the database transaction. If a mechanism driver
-        raises an exception, then a MechanismDriverError is propogated
-        to the caller, triggering a rollback. There is no guarantee
-        that all mechanism drivers are called in this case.
-        """
-        self._call_on_drivers("delete_subnet_precommit", context)
-
-    def delete_subnet_postcommit(self, context):
-        """Notify all mechanism drivers after subnet deletion.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver delete_subnet_postcommit call fails.
-
-        Called after the database transaction. If any mechanism driver
-        raises an error, then the error is logged but we continue to
-        call every other mechanism driver. A MechanismDriverError is
-        then reraised at the end to notify the caller of a failure. In
-        general we expect the caller to ignore the error, as the
-        subnet resource has already been deleted from the database
-        and it doesn't make sense to undo the action by recreating the
-        subnet.
-        """
-        self._call_on_drivers("delete_subnet_postcommit", context,
-                              continue_on_failure=True)
-
-    def create_port_precommit(self, context):
-        """Notify all mechanism drivers during port creation.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver create_port_precommit call fails.
-
-        Called within the database transaction. If a mechanism driver
-        raises an exception, then a MechanismDriverError is propogated
-        to the caller, triggering a rollback. There is no guarantee
-        that all mechanism drivers are called in this case.
-        """
-        self._call_on_drivers("create_port_precommit", context)
-
-    def create_port_postcommit(self, context):
-        """Notify all mechanism drivers of port creation.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver create_port_postcommit call fails.
-
-        Called after the database transaction. Errors raised by
-        mechanism drivers are left to propagate to the caller, where
-        the port will be deleted, triggering any required
-        cleanup. There is no guarantee that all mechanism drivers are
-        called in this case.
-        """
-        self._call_on_drivers("create_port_postcommit", context)
-
-    def update_port_precommit(self, context):
-        """Notify all mechanism drivers during port update.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver update_port_precommit call fails.
-
-        Called within the database transaction. If a mechanism driver
-        raises an exception, then a MechanismDriverError is propogated
-        to the caller, triggering a rollback. There is no guarantee
-        that all mechanism drivers are called in this case.
-        """
-        self._call_on_drivers("update_port_precommit", context)
-
-    def update_port_postcommit(self, context):
-        """Notify all mechanism drivers after port update.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver update_port_postcommit call fails.
-
-        Called after the database transaction. If any mechanism driver
-        raises an error, then the error is logged but we continue to
-        call every other mechanism driver. A MechanismDriverError is
-        then reraised at the end to notify the caller of a failure.
-        """
-        self._call_on_drivers("update_port_postcommit", context,
-                              continue_on_failure=True)
-
-    def delete_port_precommit(self, context):
-        """Notify all mechanism drivers during port deletion.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver delete_port_precommit call fails.
-
-        Called within the database transaction. If a mechanism driver
-        raises an exception, then a MechanismDriverError is propogated
-        to the caller, triggering a rollback. There is no guarantee
-        that all mechanism drivers are called in this case.
-        """
-        self._call_on_drivers("delete_port_precommit", context)
-
-    def delete_port_postcommit(self, context):
-        """Notify all mechanism drivers after port deletion.
-
-        :raises: neutron.plugins.ml2.common.MechanismDriverError
-        if any mechanism driver delete_port_postcommit call fails.
-
-        Called after the database transaction. If any mechanism driver
-        raises an error, then the error is logged but we continue to
-        call every other mechanism driver. A MechanismDriverError is
-        then reraised at the end to notify the caller of a failure. In
-        general we expect the caller to ignore the error, as the
-        port resource has already been deleted from the database
-        and it doesn't make sense to undo the action by recreating the
-        port.
-        """
-        self._call_on_drivers("delete_port_postcommit", context,
-                              continue_on_failure=True)
-
-    def bind_port(self, context):
-        """Attempt to bind a port using registered mechanism drivers.
-
-        :param context: PortContext instance describing the port
-
-        Called outside any transaction to attempt to establish a port
-        binding.
-        """
-        binding = context._binding
-        LOG.debug("Attempting to bind port %(port)s on host %(host)s "
-                  "for vnic_type %(vnic_type)s with profile %(profile)s",
-                  {'port': context.current['id'],
-                   'host': context.host,
-                   'vnic_type': binding.vnic_type,
-                   'profile': binding.profile})
-        context._clear_binding_levels()
-        if not self._bind_port_level(context, 0,
-                                     context.network.network_segments):
-            binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED
-            LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"),
-                      {'port': context.current['id'],
-                       'host': context.host})
-
-    def _bind_port_level(self, context, level, segments_to_bind):
-        binding = context._binding
-        port_id = context.current['id']
-        LOG.debug("Attempting to bind port %(port)s on host %(host)s "
-                  "at level %(level)s using segments %(segments)s",
-                  {'port': port_id,
-                   'host': context.host,
-                   'level': level,
-                   'segments': segments_to_bind})
-
-        if level == MAX_BINDING_LEVELS:
-            LOG.error(_LE("Exceeded maximum binding levels attempting to bind "
-                        "port %(port)s on host %(host)s"),
-                      {'port': context.current['id'],
-                       'host': context.host})
-            return False
-
-        for driver in self.ordered_mech_drivers:
-            if not self._check_driver_to_bind(driver, segments_to_bind,
-                                              context._binding_levels):
-                continue
-            try:
-                context._prepare_to_bind(segments_to_bind)
-                driver.obj.bind_port(context)
-                segment = context._new_bound_segment
-                if segment:
-                    context._push_binding_level(
-                        models.PortBindingLevel(port_id=port_id,
-                                                host=context.host,
-                                                level=level,
-                                                driver=driver.name,
-                                                segment_id=segment))
-                    next_segments = context._next_segments_to_bind
-                    if next_segments:
-                        # Continue binding another level.
-                        if self._bind_port_level(context, level + 1,
-                                                 next_segments):
-                            return True
-                        else:
-                            context._pop_binding_level()
-                    else:
-                        # Binding complete.
-                        LOG.debug("Bound port: %(port)s, "
-                                  "host: %(host)s, "
-                                  "vif_type: %(vif_type)s, "
-                                  "vif_details: %(vif_details)s, "
-                                  "binding_levels: %(binding_levels)s",
-                                  {'port': port_id,
-                                   'host': context.host,
-                                   'vif_type': binding.vif_type,
-                                   'vif_details': binding.vif_details,
-                                   'binding_levels': context.binding_levels})
-                        return True
-            except Exception:
-                LOG.exception(_LE("Mechanism driver %s failed in "
-                                  "bind_port"),
-                              driver.name)
-        LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"),
-                  {'port': context.current['id'],
-                   'host': binding.host})
-
-    def _check_driver_to_bind(self, driver, segments_to_bind, binding_levels):
-        # To prevent a possible binding loop, don't try to bind with
-        # this driver if the same driver has already bound at a higher
-        # level to one of the segments we are currently trying to
-        # bind. Note that it is OK for the same driver to bind at
-        # multiple levels using different segments.
-        for level in binding_levels:
-            if (level.driver == driver and
-                level.segment_id in segments_to_bind):
-                return False
-        return True
-
-    def get_workers(self):
-        workers = []
-        for driver in self.ordered_mech_drivers:
-            workers += driver.obj.get_workers()
-        return workers
-
-
-class ExtensionManager(stevedore.named.NamedExtensionManager):
-    """Manage extension drivers using drivers."""
-
-    def __init__(self):
-        # Ordered list of extension drivers, defining
-        # the order in which the drivers are called.
-        self.ordered_ext_drivers = []
-
-        LOG.info(_LI("Configured extension driver names: %s"),
-                 cfg.CONF.ml2.extension_drivers)
-        super(ExtensionManager, self).__init__('neutron.ml2.extension_drivers',
-                                               cfg.CONF.ml2.extension_drivers,
-                                               invoke_on_load=True,
-                                               name_order=True)
-        LOG.info(_LI("Loaded extension driver names: %s"), self.names())
-        self._register_drivers()
-
-    def _register_drivers(self):
-        """Register all extension drivers.
-
-        This method should only be called once in the ExtensionManager
-        constructor.
-        """
-        for ext in self:
-            self.ordered_ext_drivers.append(ext)
-        LOG.info(_LI("Registered extension drivers: %s"),
-                 [driver.name for driver in self.ordered_ext_drivers])
-
-    def initialize(self):
-        # Initialize each driver in the list.
-        for driver in self.ordered_ext_drivers:
-            LOG.info(_LI("Initializing extension driver '%s'"), driver.name)
-            driver.obj.initialize()
-
-    def extension_aliases(self):
-        exts = []
-        for driver in self.ordered_ext_drivers:
-            alias = driver.obj.extension_alias
-            if alias:
-                exts.append(alias)
-                LOG.info(_LI("Got %(alias)s extension from driver '%(drv)s'"),
-                         {'alias': alias, 'drv': driver.name})
-        return exts
-
-    def _call_on_ext_drivers(self, method_name, plugin_context, data, result):
-        """Helper method for calling a method across all extension drivers."""
-        for driver in self.ordered_ext_drivers:
-            try:
-                getattr(driver.obj, method_name)(plugin_context, data, result)
-            except Exception:
-                with excutils.save_and_reraise_exception():
-                    LOG.info(_LI("Extension driver '%(name)s' failed in "
-                             "%(method)s"),
-                             {'name': driver.name, 'method': method_name})
-
-    def process_create_network(self, plugin_context, data, result):
-        """Notify all extension drivers during network creation."""
-        self._call_on_ext_drivers("process_create_network", plugin_context,
-                                  data, result)
-
-    def process_update_network(self, plugin_context, data, result):
-        """Notify all extension drivers during network update."""
-        self._call_on_ext_drivers("process_update_network", plugin_context,
-                                  data, result)
-
-    def process_create_subnet(self, plugin_context, data, result):
-        """Notify all extension drivers during subnet creation."""
-        self._call_on_ext_drivers("process_create_subnet", plugin_context,
-                                  data, result)
-
-    def process_update_subnet(self, plugin_context, data, result):
-        """Notify all extension drivers during subnet update."""
-        self._call_on_ext_drivers("process_update_subnet", plugin_context,
-                                  data, result)
-
-    def process_create_port(self, plugin_context, data, result):
-        """Notify all extension drivers during port creation."""
-        self._call_on_ext_drivers("process_create_port", plugin_context,
-                                  data, result)
-
-    def process_update_port(self, plugin_context, data, result):
-        """Notify all extension drivers during port update."""
-        self._call_on_ext_drivers("process_update_port", plugin_context,
-                                  data, result)
-
-    def _call_on_dict_driver(self, method_name, session, base_model, result):
-        for driver in self.ordered_ext_drivers:
-            try:
-                getattr(driver.obj, method_name)(session, base_model, result)
-            except Exception:
-                LOG.error(_LE("Extension driver '%(name)s' failed in "
-                          "%(method)s"),
-                          {'name': driver.name, 'method': method_name})
-                raise ml2_exc.ExtensionDriverError(driver=driver.name)
-
-    def extend_network_dict(self, session, base_model, result):
-        """Notify all extension drivers to extend network dictionary."""
-        self._call_on_dict_driver("extend_network_dict", session, base_model,
-                                  result)
-
-    def extend_subnet_dict(self, session, base_model, result):
-        """Notify all extension drivers to extend subnet dictionary."""
-        self._call_on_dict_driver("extend_subnet_dict", session, base_model,
-                                  result)
-
-    def extend_port_dict(self, session, base_model, result):
-        """Notify all extension drivers to extend port dictionary."""
-        self._call_on_dict_driver("extend_port_dict", session, base_model,
-                                  result)
diff --git a/neutron/plugins/ml2/models.py b/neutron/plugins/ml2/models.py
deleted file mode 100644 (file)
index db82853..0000000
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.extensions import portbindings
-
-BINDING_PROFILE_LEN = 4095
-
-
-class NetworkSegment(model_base.BASEV2, model_base.HasId):
-    """Represent persistent state of a network segment.
-
-    A network segment is a portion of a neutron network with a
-    specific physical realization. A neutron network can consist of
-    one or more segments.
-    """
-
-    __tablename__ = 'ml2_network_segments'
-
-    network_id = sa.Column(sa.String(36),
-                           sa.ForeignKey('networks.id', ondelete="CASCADE"),
-                           nullable=False)
-    network_type = sa.Column(sa.String(32), nullable=False)
-    physical_network = sa.Column(sa.String(64))
-    segmentation_id = sa.Column(sa.Integer)
-    is_dynamic = sa.Column(sa.Boolean, default=False, nullable=False,
-                           server_default=sa.sql.false())
-    segment_index = sa.Column(sa.Integer, nullable=False, server_default='0')
-
-
-class PortBinding(model_base.BASEV2):
-    """Represent binding-related state of a port.
-
-    A port binding stores the port attributes required for the
-    portbindings extension, as well as internal ml2 state such as
-    which MechanismDriver and which segment are used by the port
-    binding.
-    """
-
-    __tablename__ = 'ml2_port_bindings'
-
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey('ports.id', ondelete="CASCADE"),
-                        primary_key=True)
-    host = sa.Column(sa.String(255), nullable=False, default='',
-                     server_default='')
-    vnic_type = sa.Column(sa.String(64), nullable=False,
-                          default=portbindings.VNIC_NORMAL,
-                          server_default=portbindings.VNIC_NORMAL)
-    profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False,
-                        default='', server_default='')
-    vif_type = sa.Column(sa.String(64), nullable=False)
-    vif_details = sa.Column(sa.String(4095), nullable=False, default='',
-                            server_default='')
-
-    # Add a relationship to the Port model in order to instruct SQLAlchemy to
-    # eagerly load port bindings
-    port = orm.relationship(
-        models_v2.Port,
-        backref=orm.backref("port_binding",
-                            lazy='joined', uselist=False,
-                            cascade='delete'))
-
-
-class PortBindingLevel(model_base.BASEV2):
-    """Represent each level of a port binding.
-
-    Stores information associated with each level of an established
-    port binding. Different levels might correspond to the host and
-    ToR switch, for instance.
-    """
-
-    __tablename__ = 'ml2_port_binding_levels'
-
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey('ports.id', ondelete="CASCADE"),
-                        primary_key=True)
-    host = sa.Column(sa.String(255), nullable=False, primary_key=True)
-    level = sa.Column(sa.Integer, primary_key=True, autoincrement=False)
-    driver = sa.Column(sa.String(64))
-    segment_id = sa.Column(sa.String(36),
-                           sa.ForeignKey('ml2_network_segments.id',
-                                         ondelete="SET NULL"))
-
-
-class DVRPortBinding(model_base.BASEV2):
-    """Represent binding-related state of a DVR port.
-
-    Port binding for all the ports associated to a DVR identified by router_id.
-    """
-
-    __tablename__ = 'ml2_dvr_port_bindings'
-
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey('ports.id', ondelete="CASCADE"),
-                        primary_key=True)
-    host = sa.Column(sa.String(255), nullable=False, primary_key=True)
-    router_id = sa.Column(sa.String(36), nullable=True)
-    vif_type = sa.Column(sa.String(64), nullable=False)
-    vif_details = sa.Column(sa.String(4095), nullable=False, default='',
-                            server_default='')
-    vnic_type = sa.Column(sa.String(64), nullable=False,
-                          default=portbindings.VNIC_NORMAL,
-                          server_default=portbindings.VNIC_NORMAL)
-    profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False,
-                        default='', server_default='')
-    status = sa.Column(sa.String(16), nullable=False)
-
-    # Add a relationship to the Port model in order to instruct SQLAlchemy to
-    # eagerly load port bindings
-    port = orm.relationship(
-        models_v2.Port,
-        backref=orm.backref("dvr_port_binding",
-                            lazy='joined', uselist=False,
-                            cascade='delete'))
diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py
deleted file mode 100644 (file)
index c80b864..0000000
+++ /dev/null
@@ -1,1597 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from eventlet import greenthread
-from oslo_config import cfg
-from oslo_db import api as oslo_db_api
-from oslo_db import exception as os_db_exception
-from oslo_log import helpers as log_helpers
-from oslo_log import log
-from oslo_serialization import jsonutils
-from oslo_utils import excutils
-from oslo_utils import importutils
-from oslo_utils import uuidutils
-from sqlalchemy import exc as sql_exc
-from sqlalchemy.orm import exc as sa_exc
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.agent import securitygroups_rpc as sg_rpc
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.api.rpc.handlers import dhcp_rpc
-from neutron.api.rpc.handlers import dvr_rpc
-from neutron.api.rpc.handlers import metadata_rpc
-from neutron.api.rpc.handlers import resources_rpc
-from neutron.api.rpc.handlers import securitygroups_rpc
-from neutron.api.v2 import attributes
-from neutron.callbacks import events
-from neutron.callbacks import exceptions
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-from neutron.common import constants as const
-from neutron.common import exceptions as exc
-from neutron.common import ipv6_utils
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.common import utils
-from neutron.db import address_scope_db
-from neutron.db import agents_db
-from neutron.db import agentschedulers_db
-from neutron.db import allowedaddresspairs_db as addr_pair_db
-from neutron.db import api as db_api
-from neutron.db import db_base_plugin_v2
-from neutron.db import dvr_mac_db
-from neutron.db import external_net_db
-from neutron.db import extradhcpopt_db
-from neutron.db import models_v2
-from neutron.db import netmtu_db
-from neutron.db.quota import driver  # noqa
-from neutron.db import securitygroups_db
-from neutron.db import securitygroups_rpc_base as sg_db_rpc
-from neutron.db import vlantransparent_db
-from neutron.extensions import allowedaddresspairs as addr_pair
-from neutron.extensions import availability_zone as az_ext
-from neutron.extensions import extra_dhcp_opt as edo_ext
-from neutron.extensions import portbindings
-from neutron.extensions import portsecurity as psec
-from neutron.extensions import providernet as provider
-from neutron.extensions import vlantransparent
-from neutron import manager
-from neutron.plugins.common import constants as service_constants
-from neutron.plugins.ml2.common import exceptions as ml2_exc
-from neutron.plugins.ml2 import config  # noqa
-from neutron.plugins.ml2 import db
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2 import driver_context
-from neutron.plugins.ml2.extensions import qos as qos_ext
-from neutron.plugins.ml2 import managers
-from neutron.plugins.ml2 import models
-from neutron.plugins.ml2 import rpc
-from neutron.quota import resource_registry
-from neutron.services.qos import qos_consts
-
-LOG = log.getLogger(__name__)
-
-MAX_BIND_TRIES = 10
-
-
-SERVICE_PLUGINS_REQUIRED_DRIVERS = {
-    'qos': [qos_ext.QOS_EXT_DRIVER_ALIAS]
-}
-
-
-class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
-                dvr_mac_db.DVRDbMixin,
-                external_net_db.External_net_db_mixin,
-                sg_db_rpc.SecurityGroupServerRpcMixin,
-                agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
-                addr_pair_db.AllowedAddressPairsMixin,
-                vlantransparent_db.Vlantransparent_db_mixin,
-                extradhcpopt_db.ExtraDhcpOptMixin,
-                netmtu_db.Netmtu_db_mixin,
-                address_scope_db.AddressScopeDbMixin):
-
-    """Implement the Neutron L2 abstractions using modules.
-
-    Ml2Plugin is a Neutron plugin based on separately extensible sets
-    of network types and mechanisms for connecting to networks of
-    those types. The network types and mechanisms are implemented as
-    drivers loaded via Python entry points. Networks can be made up of
-    multiple segments (not yet fully implemented).
-    """
-
-    # This attribute specifies whether the plugin supports or not
-    # bulk/pagination/sorting operations. Name mangling is used in
-    # order to ensure it is qualified by class
-    __native_bulk_support = True
-    __native_pagination_support = True
-    __native_sorting_support = True
-
-    # List of supported extensions
-    _supported_extension_aliases = ["provider", "external-net", "binding",
-                                    "quotas", "security-group", "agent",
-                                    "dhcp_agent_scheduler",
-                                    "multi-provider", "allowed-address-pairs",
-                                    "extra_dhcp_opt", "subnet_allocation",
-                                    "net-mtu", "vlan-transparent",
-                                    "address-scope", "dns-integration",
-                                    "availability_zone",
-                                    "network_availability_zone"]
-
-    @property
-    def supported_extension_aliases(self):
-        if not hasattr(self, '_aliases'):
-            aliases = self._supported_extension_aliases[:]
-            aliases += self.extension_manager.extension_aliases()
-            sg_rpc.disable_security_group_extension_by_config(aliases)
-            vlantransparent.disable_extension_by_config(aliases)
-            self._aliases = aliases
-        return self._aliases
-
-    @resource_registry.tracked_resources(
-        network=models_v2.Network,
-        port=models_v2.Port,
-        subnet=models_v2.Subnet,
-        subnetpool=models_v2.SubnetPool,
-        security_group=securitygroups_db.SecurityGroup,
-        security_group_rule=securitygroups_db.SecurityGroupRule)
-    def __init__(self):
-        # First load drivers, then initialize DB, then initialize drivers
-        self.type_manager = managers.TypeManager()
-        self.extension_manager = managers.ExtensionManager()
-        self.mechanism_manager = managers.MechanismManager()
-        super(Ml2Plugin, self).__init__()
-        self.type_manager.initialize()
-        self.extension_manager.initialize()
-        self.mechanism_manager.initialize()
-        self._setup_dhcp()
-        self._start_rpc_notifiers()
-        self.add_agent_status_check(self.agent_health_check)
-        self._verify_service_plugins_requirements()
-        LOG.info(_LI("Modular L2 Plugin initialization complete"))
-
-    def _setup_rpc(self):
-        """Initialize components to support agent communication."""
-        self.endpoints = [
-            rpc.RpcCallbacks(self.notifier, self.type_manager),
-            securitygroups_rpc.SecurityGroupServerRpcCallback(),
-            dvr_rpc.DVRServerRpcCallback(),
-            dhcp_rpc.DhcpRpcCallback(),
-            agents_db.AgentExtRpcCallback(),
-            metadata_rpc.MetadataRpcCallback(),
-            resources_rpc.ResourcesPullRpcCallback()
-        ]
-
-    def _setup_dhcp(self):
-        """Initialize components to support DHCP."""
-        self.network_scheduler = importutils.import_object(
-            cfg.CONF.network_scheduler_driver
-        )
-        self.start_periodic_dhcp_agent_status_check()
-
-    def _verify_service_plugins_requirements(self):
-        for service_plugin in cfg.CONF.service_plugins:
-            extension_drivers = SERVICE_PLUGINS_REQUIRED_DRIVERS.get(
-                service_plugin, []
-            )
-            for extension_driver in extension_drivers:
-                if extension_driver not in self.extension_manager.names():
-                    raise ml2_exc.ExtensionDriverNotFound(
-                        driver=extension_driver, service_plugin=service_plugin
-                    )
-
-    @property
-    def supported_qos_rule_types(self):
-        return self.mechanism_manager.supported_qos_rule_types
-
-    @log_helpers.log_method_call
-    def _start_rpc_notifiers(self):
-        """Initialize RPC notifiers for agents."""
-        self.notifier = rpc.AgentNotifierApi(topics.AGENT)
-        self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
-            dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
-        )
-
-    @log_helpers.log_method_call
-    def start_rpc_listeners(self):
-        """Start the RPC loop to let the plugin communicate with agents."""
-        self._setup_rpc()
-        self.topic = topics.PLUGIN
-        self.conn = n_rpc.create_connection()
-        self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
-        # process state reports despite dedicated rpc workers
-        self.conn.create_consumer(topics.REPORTS,
-                                  [agents_db.AgentExtRpcCallback()],
-                                  fanout=False)
-        return self.conn.consume_in_threads()
-
-    def start_rpc_state_reports_listener(self):
-        self.conn_reports = n_rpc.create_connection(new=True)
-        self.conn_reports.create_consumer(topics.REPORTS,
-                                          [agents_db.AgentExtRpcCallback()],
-                                          fanout=False)
-        return self.conn_reports.consume_in_threads()
-
-    def _filter_nets_provider(self, context, networks, filters):
-        return [network
-                for network in networks
-                if self.type_manager.network_matches_filters(network, filters)
-                ]
-
-    def _check_mac_update_allowed(self, orig_port, port, binding):
-        unplugged_types = (portbindings.VIF_TYPE_BINDING_FAILED,
-                           portbindings.VIF_TYPE_UNBOUND)
-        new_mac = port.get('mac_address')
-        mac_change = (new_mac is not None and
-                      orig_port['mac_address'] != new_mac)
-        if (mac_change and binding.vif_type not in unplugged_types):
-            raise exc.PortBound(port_id=orig_port['id'],
-                                vif_type=binding.vif_type,
-                                old_mac=orig_port['mac_address'],
-                                new_mac=port['mac_address'])
-        return mac_change
-
-    def _process_port_binding(self, mech_context, attrs):
-        session = mech_context._plugin_context.session
-        binding = mech_context._binding
-        port = mech_context.current
-        port_id = port['id']
-        changes = False
-
-        host = attributes.ATTR_NOT_SPECIFIED
-        if attrs and portbindings.HOST_ID in attrs:
-            host = attrs.get(portbindings.HOST_ID) or ''
-
-        original_host = binding.host
-        if (attributes.is_attr_set(host) and
-            original_host != host):
-            binding.host = host
-            changes = True
-
-        vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE)
-        if (attributes.is_attr_set(vnic_type) and
-            binding.vnic_type != vnic_type):
-            binding.vnic_type = vnic_type
-            changes = True
-
-        # treat None as clear of profile.
-        profile = None
-        if attrs and portbindings.PROFILE in attrs:
-            profile = attrs.get(portbindings.PROFILE) or {}
-
-        if profile not in (None, attributes.ATTR_NOT_SPECIFIED,
-                           self._get_profile(binding)):
-            binding.profile = jsonutils.dumps(profile)
-            if len(binding.profile) > models.BINDING_PROFILE_LEN:
-                msg = _("binding:profile value too large")
-                raise exc.InvalidInput(error_message=msg)
-            changes = True
-
-        # Unbind the port if needed.
-        if changes:
-            binding.vif_type = portbindings.VIF_TYPE_UNBOUND
-            binding.vif_details = ''
-            db.clear_binding_levels(session, port_id, original_host)
-            mech_context._clear_binding_levels()
-
-        if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
-            binding.vif_type = portbindings.VIF_TYPE_UNBOUND
-            binding.vif_details = ''
-            db.clear_binding_levels(session, port_id, original_host)
-            mech_context._clear_binding_levels()
-            binding.host = ''
-
-        self._update_port_dict_binding(port, binding)
-        return changes
-
-    def _bind_port_if_needed(self, context, allow_notify=False,
-                             need_notify=False):
-        # Binding limit does not need to be tunable because no
-        # more than a couple of attempts should ever be required in
-        # normal operation.
-        for count in range(1, MAX_BIND_TRIES + 1):
-            if count > 1:
-                # multiple attempts shouldn't happen very often so we log each
-                # attempt after the 1st.
-                greenthread.sleep(0)  # yield
-                LOG.info(_LI("Attempt %(count)s to bind port %(port)s"),
-                         {'count': count, 'port': context.current['id']})
-            context, need_notify, try_again = self._attempt_binding(
-                context, need_notify)
-            if not try_again:
-                if allow_notify and need_notify:
-                    self._notify_port_updated(context)
-                return context
-
-        LOG.error(_LE("Failed to commit binding results for %(port)s "
-                      "after %(max)s tries"),
-                  {'port': context.current['id'], 'max': MAX_BIND_TRIES})
-        return context
-
-    def _attempt_binding(self, context, need_notify):
-        # Since the mechanism driver bind_port() calls must be made
-        # outside a DB transaction locking the port state, it is
-        # possible (but unlikely) that the port's state could change
-        # concurrently while these calls are being made. If another
-        # thread or process succeeds in binding the port before this
-        # thread commits its results, the already committed results are
-        # used. If attributes such as binding:host_id,
-        # binding:profile, or binding:vnic_type are updated
-        # concurrently, the try_again flag is returned to indicate that
-        # the commit was unsuccessful.
-        plugin_context = context._plugin_context
-        port_id = context.current['id']
-        binding = context._binding
-        try_again = False
-        # First, determine whether it is necessary and possible to
-        # bind the port.
-        if (binding.vif_type != portbindings.VIF_TYPE_UNBOUND
-                or not binding.host):
-            # We either don't need to bind the port or can't
-            return context, need_notify, try_again
-
-        # The port isn't already bound and the necessary
-        # information is available, so attempt to bind the port.
-        bind_context = self._bind_port(context)
-        # Now try to commit result of attempting to bind the port.
-        new_context, did_commit = self._commit_port_binding(
-            plugin_context, port_id, binding, bind_context)
-        if not new_context:
-            # The port has been deleted concurrently, so just
-            # return the unbound result from the initial
-            # transaction that completed before the deletion.
-            LOG.debug("Port %s has been deleted concurrently",
-                      port_id)
-            need_notify = False
-            return context, need_notify, try_again
-        # Need to notify if we succeed and our results were
-        # committed.
-        if did_commit and (new_context._binding.vif_type !=
-                           portbindings.VIF_TYPE_BINDING_FAILED):
-            need_notify = True
-            return new_context, need_notify, try_again
-        try_again = True
-        return new_context, need_notify, try_again
-
-    def _bind_port(self, orig_context):
-        # Construct a new PortContext from the one from the previous
-        # transaction.
-        port = orig_context.current
-        orig_binding = orig_context._binding
-        new_binding = models.PortBinding(
-            host=orig_binding.host,
-            vnic_type=orig_binding.vnic_type,
-            profile=orig_binding.profile,
-            vif_type=portbindings.VIF_TYPE_UNBOUND,
-            vif_details=''
-        )
-        self._update_port_dict_binding(port, new_binding)
-        new_context = driver_context.PortContext(
-            self, orig_context._plugin_context, port,
-            orig_context.network.current, new_binding, None)
-
-        # Attempt to bind the port and return the context with the
-        # result.
-        self.mechanism_manager.bind_port(new_context)
-        return new_context
-
-    def _commit_port_binding(self, plugin_context, port_id, orig_binding,
-                             new_context):
-        session = plugin_context.session
-        new_binding = new_context._binding
-
-        # After we've attempted to bind the port, we begin a
-        # transaction, get the current port state, and decide whether
-        # to commit the binding results.
-        with session.begin(subtransactions=True):
-            # Get the current port state and build a new PortContext
-            # reflecting this state as original state for subsequent
-            # mechanism driver update_port_*commit() calls.
-            port_db, cur_binding = db.get_locked_port_and_binding(session,
-                                                                  port_id)
-            if not port_db:
-                # The port has been deleted concurrently.
-                return (None, False)
-            oport = self._make_port_dict(port_db)
-            port = self._make_port_dict(port_db)
-            network = new_context.network.current
-            if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
-                # REVISIT(rkukura): The PortBinding instance from the
-                # ml2_port_bindings table, returned as cur_binding
-                # from db.get_locked_port_and_binding() above, is
-                # currently not used for DVR distributed ports, and is
-                # replaced here with the DVRPortBinding instance from
-                # the ml2_dvr_port_bindings table specific to the host
-                # on which the distributed port is being bound. It
-                # would be possible to optimize this code to avoid
-                # fetching the PortBinding instance in the DVR case,
-                # and even to avoid creating the unused entry in the
-                # ml2_port_bindings table. But the upcoming resolution
-                # for bug 1367391 will eliminate the
-                # ml2_dvr_port_bindings table, use the
-                # ml2_port_bindings table to store non-host-specific
-                # fields for both distributed and non-distributed
-                # ports, and introduce a new ml2_port_binding_hosts
-                # table for the fields that need to be host-specific
-                # in the distributed case. Since the PortBinding
-                # instance will then be needed, it does not make sense
-                # to optimize this code to avoid fetching it.
-                cur_binding = db.get_dvr_port_binding_by_host(
-                    session, port_id, orig_binding.host)
-            cur_context = driver_context.PortContext(
-                self, plugin_context, port, network, cur_binding, None,
-                original_port=oport)
-
-            # Commit our binding results only if port has not been
-            # successfully bound concurrently by another thread or
-            # process and no binding inputs have been changed.
-            commit = ((cur_binding.vif_type in
-                       [portbindings.VIF_TYPE_UNBOUND,
-                        portbindings.VIF_TYPE_BINDING_FAILED]) and
-                      orig_binding.host == cur_binding.host and
-                      orig_binding.vnic_type == cur_binding.vnic_type and
-                      orig_binding.profile == cur_binding.profile)
-
-            if commit:
-                # Update the port's binding state with our binding
-                # results.
-                cur_binding.vif_type = new_binding.vif_type
-                cur_binding.vif_details = new_binding.vif_details
-                db.clear_binding_levels(session, port_id, cur_binding.host)
-                db.set_binding_levels(session, new_context._binding_levels)
-                cur_context._binding_levels = new_context._binding_levels
-
-                # Update PortContext's port dictionary to reflect the
-                # updated binding state.
-                self._update_port_dict_binding(port, cur_binding)
-
-                # Update the port status if requested by the bound driver.
-                if (new_context._binding_levels and
-                    new_context._new_port_status):
-                    port_db.status = new_context._new_port_status
-                    port['status'] = new_context._new_port_status
-
-                # Call the mechanism driver precommit methods, commit
-                # the results, and call the postcommit methods.
-                self.mechanism_manager.update_port_precommit(cur_context)
-        if commit:
-            self.mechanism_manager.update_port_postcommit(cur_context)
-
-        # Continue, using the port state as of the transaction that
-        # just finished, whether that transaction committed new
-        # results or discovered concurrent port state changes.
-        return (cur_context, commit)
-
-    def _update_port_dict_binding(self, port, binding):
-        port[portbindings.VNIC_TYPE] = binding.vnic_type
-        port[portbindings.PROFILE] = self._get_profile(binding)
-        if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
-            port[portbindings.HOST_ID] = ''
-            port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_DISTRIBUTED
-            port[portbindings.VIF_DETAILS] = {}
-        else:
-            port[portbindings.HOST_ID] = binding.host
-            port[portbindings.VIF_TYPE] = binding.vif_type
-            port[portbindings.VIF_DETAILS] = self._get_vif_details(binding)
-
-    def _get_vif_details(self, binding):
-        if binding.vif_details:
-            try:
-                return jsonutils.loads(binding.vif_details)
-            except Exception:
-                LOG.error(_LE("Serialized vif_details DB value '%(value)s' "
-                              "for port %(port)s is invalid"),
-                          {'value': binding.vif_details,
-                           'port': binding.port_id})
-        return {}
-
-    def _get_profile(self, binding):
-        if binding.profile:
-            try:
-                return jsonutils.loads(binding.profile)
-            except Exception:
-                LOG.error(_LE("Serialized profile DB value '%(value)s' for "
-                              "port %(port)s is invalid"),
-                          {'value': binding.profile,
-                           'port': binding.port_id})
-        return {}
-
-    def _ml2_extend_port_dict_binding(self, port_res, port_db):
-        # None when called during unit tests for other plugins.
-        if port_db.port_binding:
-            self._update_port_dict_binding(port_res, port_db.port_binding)
-
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attributes.PORTS, ['_ml2_extend_port_dict_binding'])
-
-    # Register extend dict methods for network and port resources.
-    # Each mechanism driver that supports extend attribute for the resources
-    # can add those attribute to the result.
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-               attributes.NETWORKS, ['_ml2_md_extend_network_dict'])
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-               attributes.PORTS, ['_ml2_md_extend_port_dict'])
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-               attributes.SUBNETS, ['_ml2_md_extend_subnet_dict'])
-
-    def _ml2_md_extend_network_dict(self, result, netdb):
-        session = db_api.get_session()
-        with session.begin(subtransactions=True):
-            self.extension_manager.extend_network_dict(session, netdb, result)
-
-    def _ml2_md_extend_port_dict(self, result, portdb):
-        session = db_api.get_session()
-        with session.begin(subtransactions=True):
-            self.extension_manager.extend_port_dict(session, portdb, result)
-
-    def _ml2_md_extend_subnet_dict(self, result, subnetdb):
-        session = db_api.get_session()
-        with session.begin(subtransactions=True):
-            self.extension_manager.extend_subnet_dict(
-                session, subnetdb, result)
-
-    # Note - The following hook methods have "ml2" in their names so
-    # that they are not called twice during unit tests due to global
-    # registration of hooks in portbindings_db.py used by other
-    # plugins.
-
-    def _ml2_port_model_hook(self, context, original_model, query):
-        query = query.outerjoin(models.PortBinding,
-                                (original_model.id ==
-                                 models.PortBinding.port_id))
-        return query
-
-    def _ml2_port_result_filter_hook(self, query, filters):
-        values = filters and filters.get(portbindings.HOST_ID, [])
-        if not values:
-            return query
-        return query.filter(models.PortBinding.host.in_(values))
-
-    db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
-        models_v2.Port,
-        "ml2_port_bindings",
-        '_ml2_port_model_hook',
-        None,
-        '_ml2_port_result_filter_hook')
-
-    def _notify_port_updated(self, mech_context):
-        port = mech_context.current
-        segment = mech_context.bottom_bound_segment
-        if not segment:
-            # REVISIT(rkukura): This should notify agent to unplug port
-            network = mech_context.network.current
-            LOG.warning(_LW("In _notify_port_updated(), no bound segment for "
-                            "port %(port_id)s on network %(network_id)s"),
-                        {'port_id': port['id'],
-                         'network_id': network['id']})
-            return
-        self.notifier.port_update(mech_context._plugin_context, port,
-                                  segment[api.NETWORK_TYPE],
-                                  segment[api.SEGMENTATION_ID],
-                                  segment[api.PHYSICAL_NETWORK])
-
-    def _delete_objects(self, context, resource, objects):
-        delete_op = getattr(self, 'delete_%s' % resource)
-        for obj in objects:
-            try:
-                delete_op(context, obj['result']['id'])
-            except KeyError:
-                LOG.exception(_LE("Could not find %s to delete."),
-                              resource)
-            except Exception:
-                LOG.exception(_LE("Could not delete %(res)s %(id)s."),
-                              {'res': resource,
-                               'id': obj['result']['id']})
-
-    def _create_bulk_ml2(self, resource, context, request_items):
-        objects = []
-        collection = "%ss" % resource
-        items = request_items[collection]
-        try:
-            with context.session.begin(subtransactions=True):
-                obj_creator = getattr(self, '_create_%s_db' % resource)
-                for item in items:
-                    attrs = item[resource]
-                    result, mech_context = obj_creator(context, item)
-                    objects.append({'mech_context': mech_context,
-                                    'result': result,
-                                    'attributes': attrs})
-
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("An exception occurred while creating "
-                                  "the %(resource)s:%(item)s"),
-                              {'resource': resource, 'item': item})
-
-        try:
-            postcommit_op = getattr(self.mechanism_manager,
-                                    'create_%s_postcommit' % resource)
-            for obj in objects:
-                postcommit_op(obj['mech_context'])
-            return objects
-        except ml2_exc.MechanismDriverError:
-            with excutils.save_and_reraise_exception():
-                resource_ids = [res['result']['id'] for res in objects]
-                LOG.exception(_LE("mechanism_manager.create_%(res)s"
-                                  "_postcommit failed for %(res)s: "
-                                  "'%(failed_id)s'. Deleting "
-                                  "%(res)ss %(resource_ids)s"),
-                              {'res': resource,
-                               'failed_id': obj['result']['id'],
-                               'resource_ids': ', '.join(resource_ids)})
-                self._delete_objects(context, resource, objects)
-
-    def _create_network_db(self, context, network):
-        net_data = network[attributes.NETWORK]
-        tenant_id = net_data['tenant_id']
-        session = context.session
-        with session.begin(subtransactions=True):
-            self._ensure_default_security_group(context, tenant_id)
-            result = super(Ml2Plugin, self).create_network(context, network)
-            self.extension_manager.process_create_network(context, net_data,
-                                                          result)
-            self._process_l3_create(context, result, net_data)
-            net_data['id'] = result['id']
-            self.type_manager.create_network_segments(context, net_data,
-                                                      tenant_id)
-            self.type_manager.extend_network_dict_provider(context, result)
-            mech_context = driver_context.NetworkContext(self, context,
-                                                         result)
-            self.mechanism_manager.create_network_precommit(mech_context)
-
-            if net_data.get(api.MTU, 0) > 0:
-                res = super(Ml2Plugin, self).update_network(context,
-                    result['id'], {'network': {api.MTU: net_data[api.MTU]}})
-                result[api.MTU] = res.get(api.MTU, 0)
-
-            if az_ext.AZ_HINTS in net_data:
-                self.validate_availability_zones(context, 'network',
-                                                 net_data[az_ext.AZ_HINTS])
-                az_hints = az_ext.convert_az_list_to_string(
-                                                net_data[az_ext.AZ_HINTS])
-                res = super(Ml2Plugin, self).update_network(context,
-                    result['id'], {'network': {az_ext.AZ_HINTS: az_hints}})
-                result[az_ext.AZ_HINTS] = res[az_ext.AZ_HINTS]
-
-            # Update the transparent vlan if configured
-            if utils.is_extension_supported(self, 'vlan-transparent'):
-                vlt = vlantransparent.get_vlan_transparent(net_data)
-                super(Ml2Plugin, self).update_network(context,
-                    result['id'], {'network': {'vlan_transparent': vlt}})
-                result['vlan_transparent'] = vlt
-
-        return result, mech_context
-
-    def create_network(self, context, network):
-        result, mech_context = self._create_network_db(context, network)
-        try:
-            self.mechanism_manager.create_network_postcommit(mech_context)
-        except ml2_exc.MechanismDriverError:
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE("mechanism_manager.create_network_postcommit "
-                              "failed, deleting network '%s'"), result['id'])
-                self.delete_network(context, result['id'])
-
-        return result
-
-    def create_network_bulk(self, context, networks):
-        objects = self._create_bulk_ml2(attributes.NETWORK, context, networks)
-        return [obj['result'] for obj in objects]
-
-    def update_network(self, context, id, network):
-        net_data = network[attributes.NETWORK]
-        provider._raise_if_updates_provider_attributes(net_data)
-
-        session = context.session
-        with session.begin(subtransactions=True):
-            original_network = super(Ml2Plugin, self).get_network(context, id)
-            updated_network = super(Ml2Plugin, self).update_network(context,
-                                                                    id,
-                                                                    network)
-            self.extension_manager.process_update_network(context, net_data,
-                                                          updated_network)
-            self._process_l3_update(context, updated_network, net_data)
-            self.type_manager.extend_network_dict_provider(context,
-                                                           updated_network)
-
-            # TODO(QoS): Move out to the extension framework somehow.
-            need_network_update_notify = (
-                qos_consts.QOS_POLICY_ID in net_data and
-                original_network[qos_consts.QOS_POLICY_ID] !=
-                updated_network[qos_consts.QOS_POLICY_ID])
-
-            mech_context = driver_context.NetworkContext(
-                self, context, updated_network,
-                original_network=original_network)
-            self.mechanism_manager.update_network_precommit(mech_context)
-
-        # TODO(apech) - handle errors raised by update_network, potentially
-        # by re-calling update_network with the previous attributes. For
-        # now the error is propogated to the caller, which is expected to
-        # either undo/retry the operation or delete the resource.
-        self.mechanism_manager.update_network_postcommit(mech_context)
-        if need_network_update_notify:
-            self.notifier.network_update(context, updated_network)
-        return updated_network
-
-    def get_network(self, context, id, fields=None):
-        session = context.session
-        with session.begin(subtransactions=True):
-            result = super(Ml2Plugin, self).get_network(context, id, None)
-            self.type_manager.extend_network_dict_provider(context, result)
-
-        return self._fields(result, fields)
-
-    def get_networks(self, context, filters=None, fields=None,
-                     sorts=None, limit=None, marker=None, page_reverse=False):
-        session = context.session
-        with session.begin(subtransactions=True):
-            nets = super(Ml2Plugin,
-                         self).get_networks(context, filters, None, sorts,
-                                            limit, marker, page_reverse)
-            self.type_manager.extend_networks_dict_provider(context, nets)
-
-            nets = self._filter_nets_provider(context, nets, filters)
-
-        return [self._fields(net, fields) for net in nets]
-
-    def _delete_ports(self, context, port_ids):
-        for port_id in port_ids:
-            try:
-                self.delete_port(context, port_id)
-            except (exc.PortNotFound, sa_exc.ObjectDeletedError):
-                # concurrent port deletion can be performed by
-                # release_dhcp_port caused by concurrent subnet_delete
-                LOG.info(_LI("Port %s was deleted concurrently"), port_id)
-            except Exception:
-                with excutils.save_and_reraise_exception():
-                    LOG.exception(_LE("Exception auto-deleting port %s"),
-                                  port_id)
-
-    def _delete_subnets(self, context, subnet_ids):
-        for subnet_id in subnet_ids:
-            try:
-                self.delete_subnet(context, subnet_id)
-            except (exc.SubnetNotFound, sa_exc.ObjectDeletedError):
-                LOG.info(_LI("Subnet %s was deleted concurrently"),
-                         subnet_id)
-            except Exception:
-                with excutils.save_and_reraise_exception():
-                    LOG.exception(_LE("Exception auto-deleting subnet %s"),
-                                  subnet_id)
-
-    def delete_network(self, context, id):
-        # REVISIT(rkukura) The super(Ml2Plugin, self).delete_network()
-        # function is not used because it auto-deletes ports and
-        # subnets from the DB without invoking the derived class's
-        # delete_port() or delete_subnet(), preventing mechanism
-        # drivers from being called. This approach should be revisited
-        # when the API layer is reworked during icehouse.
-
-        LOG.debug("Deleting network %s", id)
-        session = context.session
-        while True:
-            try:
-                # REVISIT: Serialize this operation with a semaphore
-                # to prevent deadlock waiting to acquire a DB lock
-                # held by another thread in the same process, leading
-                # to 'lock wait timeout' errors.
-                #
-                # Process L3 first, since, depending on the L3 plugin, it may
-                # involve sending RPC notifications, and/or calling delete_port
-                # on this plugin.
-                # Additionally, a rollback may not be enough to undo the
-                # deletion of a floating IP with certain L3 backends.
-                self._process_l3_delete(context, id)
-                # Using query().with_lockmode isn't necessary. Foreign-key
-                # constraints prevent deletion if concurrent creation happens.
-                with session.begin(subtransactions=True):
-                    # Get ports to auto-delete.
-                    ports = (session.query(models_v2.Port).
-                             enable_eagerloads(False).
-                             filter_by(network_id=id).all())
-                    LOG.debug("Ports to auto-delete: %s", ports)
-                    only_auto_del = all(p.device_owner
-                                        in db_base_plugin_v2.
-                                        AUTO_DELETE_PORT_OWNERS
-                                        for p in ports)
-                    if not only_auto_del:
-                        LOG.debug("Tenant-owned ports exist")
-                        raise exc.NetworkInUse(net_id=id)
-
-                    # Get subnets to auto-delete.
-                    subnets = (session.query(models_v2.Subnet).
-                               enable_eagerloads(False).
-                               filter_by(network_id=id).all())
-                    LOG.debug("Subnets to auto-delete: %s", subnets)
-
-                    if not (ports or subnets):
-                        network = self.get_network(context, id)
-                        mech_context = driver_context.NetworkContext(self,
-                                                                     context,
-                                                                     network)
-                        self.mechanism_manager.delete_network_precommit(
-                            mech_context)
-
-                        self.type_manager.release_network_segments(session, id)
-                        record = self._get_network(context, id)
-                        LOG.debug("Deleting network record %s", record)
-                        session.delete(record)
-
-                        # The segment records are deleted via cascade from the
-                        # network record, so explicit removal is not necessary.
-                        LOG.debug("Committing transaction")
-                        break
-
-                    port_ids = [port.id for port in ports]
-                    subnet_ids = [subnet.id for subnet in subnets]
-            except os_db_exception.DBError as e:
-                with excutils.save_and_reraise_exception() as ctxt:
-                    if isinstance(e.inner_exception, sql_exc.IntegrityError):
-                        ctxt.reraise = False
-                        LOG.warning(_LW("A concurrent port creation has "
-                                        "occurred"))
-                        continue
-            self._delete_ports(context, port_ids)
-            self._delete_subnets(context, subnet_ids)
-
-        try:
-            self.mechanism_manager.delete_network_postcommit(mech_context)
-        except ml2_exc.MechanismDriverError:
-            # TODO(apech) - One or more mechanism driver failed to
-            # delete the network.  Ideally we'd notify the caller of
-            # the fact that an error occurred.
-            LOG.error(_LE("mechanism_manager.delete_network_postcommit"
-                          " failed"))
-        self.notifier.network_delete(context, id)
-
-    def _create_subnet_db(self, context, subnet):
-        session = context.session
-        with session.begin(subtransactions=True):
-            result = super(Ml2Plugin, self).create_subnet(context, subnet)
-            self.extension_manager.process_create_subnet(
-                context, subnet[attributes.SUBNET], result)
-            network = self.get_network(context, result['network_id'])
-            mech_context = driver_context.SubnetContext(self, context,
-                                                        result, network)
-            self.mechanism_manager.create_subnet_precommit(mech_context)
-
-        return result, mech_context
-
-    def create_subnet(self, context, subnet):
-        result, mech_context = self._create_subnet_db(context, subnet)
-        try:
-            self.mechanism_manager.create_subnet_postcommit(mech_context)
-        except ml2_exc.MechanismDriverError:
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE("mechanism_manager.create_subnet_postcommit "
-                              "failed, deleting subnet '%s'"), result['id'])
-                self.delete_subnet(context, result['id'])
-        return result
-
-    def create_subnet_bulk(self, context, subnets):
-        objects = self._create_bulk_ml2(attributes.SUBNET, context, subnets)
-        return [obj['result'] for obj in objects]
-
-    def update_subnet(self, context, id, subnet):
-        session = context.session
-        with session.begin(subtransactions=True):
-            original_subnet = super(Ml2Plugin, self).get_subnet(context, id)
-            updated_subnet = super(Ml2Plugin, self).update_subnet(
-                context, id, subnet)
-            self.extension_manager.process_update_subnet(
-                context, subnet[attributes.SUBNET], updated_subnet)
-            network = self.get_network(context, updated_subnet['network_id'])
-            mech_context = driver_context.SubnetContext(
-                self, context, updated_subnet, network,
-                original_subnet=original_subnet)
-            self.mechanism_manager.update_subnet_precommit(mech_context)
-
-        # TODO(apech) - handle errors raised by update_subnet, potentially
-        # by re-calling update_subnet with the previous attributes. For
-        # now the error is propogated to the caller, which is expected to
-        # either undo/retry the operation or delete the resource.
-        self.mechanism_manager.update_subnet_postcommit(mech_context)
-        return updated_subnet
-
-    def delete_subnet(self, context, id):
-        # REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet()
-        # function is not used because it deallocates the subnet's addresses
-        # from ports in the DB without invoking the derived class's
-        # update_port(), preventing mechanism drivers from being called.
-        # This approach should be revisited when the API layer is reworked
-        # during icehouse.
-
-        LOG.debug("Deleting subnet %s", id)
-        session = context.session
-        while True:
-            with session.begin(subtransactions=True):
-                record = self._get_subnet(context, id)
-                subnet = self._make_subnet_dict(record, None, context=context)
-                qry_allocated = (session.query(models_v2.IPAllocation).
-                                 filter_by(subnet_id=id).
-                                 join(models_v2.Port))
-                is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
-                # Remove network owned ports, and delete IP allocations
-                # for IPv6 addresses which were automatically generated
-                # via SLAAC
-                if is_auto_addr_subnet:
-                    self._subnet_check_ip_allocations_internal_router_ports(
-                            context, id)
-                else:
-                    qry_allocated = (
-                        qry_allocated.filter(models_v2.Port.device_owner.
-                        in_(db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS)))
-                allocated = qry_allocated.all()
-                # Delete all the IPAllocation that can be auto-deleted
-                if allocated:
-                    for x in allocated:
-                        session.delete(x)
-                LOG.debug("Ports to auto-deallocate: %s", allocated)
-                # Check if there are more IP allocations, unless
-                # is_auto_address_subnet is True. In that case the check is
-                # unnecessary. This additional check not only would be wasteful
-                # for this class of subnet, but is also error-prone since when
-                # the isolation level is set to READ COMMITTED allocations made
-                # concurrently will be returned by this query
-                if not is_auto_addr_subnet:
-                    alloc = self._subnet_check_ip_allocations(context, id)
-                    if alloc:
-                        user_alloc = self._subnet_get_user_allocation(
-                            context, id)
-                        if user_alloc:
-                            LOG.info(_LI("Found port (%(port_id)s, %(ip)s) "
-                                         "having IP allocation on subnet "
-                                         "%(subnet)s, cannot delete"),
-                                     {'ip': user_alloc.ip_address,
-                                      'port_id': user_alloc.port_id,
-                                      'subnet': id})
-                            raise exc.SubnetInUse(subnet_id=id)
-                        else:
-                            # allocation found and it was DHCP port
-                            # that appeared after autodelete ports were
-                            # removed - need to restart whole operation
-                            raise os_db_exception.RetryRequest(
-                                exc.SubnetInUse(subnet_id=id))
-
-                db_base_plugin_v2._check_subnet_not_used(context, id)
-
-                # If allocated is None, then all the IPAllocation were
-                # correctly deleted during the previous pass.
-                if not allocated:
-                    network = self.get_network(context, subnet['network_id'])
-                    mech_context = driver_context.SubnetContext(self, context,
-                                                                subnet,
-                                                                network)
-                    self.mechanism_manager.delete_subnet_precommit(
-                        mech_context)
-
-                    LOG.debug("Deleting subnet record")
-                    session.delete(record)
-
-                    # The super(Ml2Plugin, self).delete_subnet() is not called,
-                    # so need to manually call delete_subnet for pluggable ipam
-                    self.ipam.delete_subnet(context, id)
-
-                    LOG.debug("Committing transaction")
-                    break
-
-            for a in allocated:
-                if a.port:
-                    # calling update_port() for each allocation to remove the
-                    # IP from the port and call the MechanismDrivers
-                    data = {attributes.PORT:
-                            {'fixed_ips': [{'subnet_id': ip.subnet_id,
-                                            'ip_address': ip.ip_address}
-                                           for ip in a.port.fixed_ips
-                                           if ip.subnet_id != id]}}
-                    try:
-                        self.update_port(context, a.port_id, data)
-                    except exc.PortNotFound:
-                        LOG.debug("Port %s deleted concurrently", a.port_id)
-                    except Exception:
-                        with excutils.save_and_reraise_exception():
-                            LOG.exception(_LE("Exception deleting fixed_ip "
-                                              "from port %s"), a.port_id)
-
-        try:
-            self.mechanism_manager.delete_subnet_postcommit(mech_context)
-        except ml2_exc.MechanismDriverError:
-            # TODO(apech) - One or more mechanism driver failed to
-            # delete the subnet.  Ideally we'd notify the caller of
-            # the fact that an error occurred.
-            LOG.error(_LE("mechanism_manager.delete_subnet_postcommit failed"))
-
-    # TODO(yalei) - will be simplified after security group and address pair be
-    # converted to ext driver too.
-    def _portsec_ext_port_create_processing(self, context, port_data, port):
-        attrs = port[attributes.PORT]
-        port_security = ((port_data.get(psec.PORTSECURITY) is None) or
-                         port_data[psec.PORTSECURITY])
-
-        # allowed address pair checks
-        if self._check_update_has_allowed_address_pairs(port):
-            if not port_security:
-                raise addr_pair.AddressPairAndPortSecurityRequired()
-        else:
-            # remove ATTR_NOT_SPECIFIED
-            attrs[addr_pair.ADDRESS_PAIRS] = []
-
-        if port_security:
-            self._ensure_default_security_group_on_port(context, port)
-        elif self._check_update_has_security_groups(port):
-            raise psec.PortSecurityAndIPRequiredForSecurityGroups()
-
-    def _create_port_db(self, context, port):
-        attrs = port[attributes.PORT]
-        if not attrs.get('status'):
-            attrs['status'] = const.PORT_STATUS_DOWN
-
-        session = context.session
-        with db_api.exc_to_retry(os_db_exception.DBDuplicateEntry),\
-                session.begin(subtransactions=True):
-            dhcp_opts = attrs.get(edo_ext.EXTRADHCPOPTS, [])
-            result = super(Ml2Plugin, self).create_port(context, port)
-            self.extension_manager.process_create_port(context, attrs, result)
-            self._portsec_ext_port_create_processing(context, result, port)
-
-            # sgids must be got after portsec checked with security group
-            sgids = self._get_security_groups_on_port(context, port)
-            self._process_port_create_security_group(context, result, sgids)
-            network = self.get_network(context, result['network_id'])
-            binding = db.add_port_binding(session, result['id'])
-            mech_context = driver_context.PortContext(self, context, result,
-                                                      network, binding, None)
-            self._process_port_binding(mech_context, attrs)
-
-            result[addr_pair.ADDRESS_PAIRS] = (
-                self._process_create_allowed_address_pairs(
-                    context, result,
-                    attrs.get(addr_pair.ADDRESS_PAIRS)))
-            self._process_port_create_extra_dhcp_opts(context, result,
-                                                      dhcp_opts)
-            self.mechanism_manager.create_port_precommit(mech_context)
-
-        return result, mech_context
-
-    def create_port(self, context, port):
-        result, mech_context = self._create_port_db(context, port)
-        # notify any plugin that is interested in port create events
-        kwargs = {'context': context, 'port': result}
-        registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs)
-
-        try:
-            self.mechanism_manager.create_port_postcommit(mech_context)
-        except ml2_exc.MechanismDriverError:
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE("mechanism_manager.create_port_postcommit "
-                              "failed, deleting port '%s'"), result['id'])
-                self.delete_port(context, result['id'])
-
-        # REVISIT(rkukura): Is there any point in calling this before
-        # a binding has been successfully established?
-        self.notify_security_groups_member_updated(context, result)
-
-        try:
-            bound_context = self._bind_port_if_needed(mech_context)
-        except ml2_exc.MechanismDriverError:
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE("_bind_port_if_needed "
-                              "failed, deleting port '%s'"), result['id'])
-                self.delete_port(context, result['id'])
-
-        return bound_context.current
-
-    def create_port_bulk(self, context, ports):
-        objects = self._create_bulk_ml2(attributes.PORT, context, ports)
-
-        # REVISIT(rkukura): Is there any point in calling this before
-        # a binding has been successfully established?
-        results = [obj['result'] for obj in objects]
-        self.notify_security_groups_member_updated_bulk(context, results)
-
-        for obj in objects:
-            attrs = obj['attributes']
-            if attrs and attrs.get(portbindings.HOST_ID):
-                kwargs = {'context': context, 'port': obj['result']}
-                registry.notify(
-                    resources.PORT, events.AFTER_CREATE, self, **kwargs)
-
-        try:
-            for obj in objects:
-                obj['bound_context'] = self._bind_port_if_needed(
-                    obj['mech_context'])
-            return [obj['bound_context'].current for obj in objects]
-        except ml2_exc.MechanismDriverError:
-            with excutils.save_and_reraise_exception():
-                resource_ids = [res['result']['id'] for res in objects]
-                LOG.error(_LE("_bind_port_if_needed failed. "
-                              "Deleting all ports from create bulk '%s'"),
-                          resource_ids)
-                self._delete_objects(context, attributes.PORT, objects)
-
-    # TODO(yalei) - will be simplified after security group and address pair be
-    # converted to ext driver too.
-    def _portsec_ext_port_update_processing(self, updated_port, context, port,
-                                            id):
-        port_security = ((updated_port.get(psec.PORTSECURITY) is None) or
-                         updated_port[psec.PORTSECURITY])
-
-        if port_security:
-            return
-
-        # check the address-pairs
-        if self._check_update_has_allowed_address_pairs(port):
-            #  has address pairs in request
-            raise addr_pair.AddressPairAndPortSecurityRequired()
-        elif (not
-         self._check_update_deletes_allowed_address_pairs(port)):
-            # not a request for deleting the address-pairs
-            updated_port[addr_pair.ADDRESS_PAIRS] = (
-                    self.get_allowed_address_pairs(context, id))
-
-            # check if address pairs has been in db, if address pairs could
-            # be put in extension driver, we can refine here.
-            if updated_port[addr_pair.ADDRESS_PAIRS]:
-                raise addr_pair.AddressPairAndPortSecurityRequired()
-
-        # checks if security groups were updated adding/modifying
-        # security groups, port security is set
-        if self._check_update_has_security_groups(port):
-            raise psec.PortSecurityAndIPRequiredForSecurityGroups()
-        elif (not
-          self._check_update_deletes_security_groups(port)):
-            # Update did not have security groups passed in. Check
-            # that port does not have any security groups already on it.
-            filters = {'port_id': [id]}
-            security_groups = (
-                super(Ml2Plugin, self)._get_port_security_group_bindings(
-                        context, filters)
-                     )
-            if security_groups:
-                raise psec.PortSecurityPortHasSecurityGroup()
-
-    def update_port(self, context, id, port):
-        attrs = port[attributes.PORT]
-        need_port_update_notify = False
-        session = context.session
-        bound_mech_contexts = []
-
-        with db_api.exc_to_retry(os_db_exception.DBDuplicateEntry),\
-                session.begin(subtransactions=True):
-            port_db, binding = db.get_locked_port_and_binding(session, id)
-            if not port_db:
-                raise exc.PortNotFound(port_id=id)
-            mac_address_updated = self._check_mac_update_allowed(
-                port_db, attrs, binding)
-            need_port_update_notify |= mac_address_updated
-            original_port = self._make_port_dict(port_db)
-            updated_port = super(Ml2Plugin, self).update_port(context, id,
-                                                              port)
-            self.extension_manager.process_update_port(context, attrs,
-                                                       updated_port)
-            self._portsec_ext_port_update_processing(updated_port, context,
-                                                     port, id)
-
-            if (psec.PORTSECURITY in attrs) and (
-                        original_port[psec.PORTSECURITY] !=
-                        updated_port[psec.PORTSECURITY]):
-                need_port_update_notify = True
-            # TODO(QoS): Move out to the extension framework somehow.
-            # Follow https://review.openstack.org/#/c/169223 for a solution.
-            if (qos_consts.QOS_POLICY_ID in attrs and
-                    original_port[qos_consts.QOS_POLICY_ID] !=
-                    updated_port[qos_consts.QOS_POLICY_ID]):
-                need_port_update_notify = True
-
-            if addr_pair.ADDRESS_PAIRS in attrs:
-                need_port_update_notify |= (
-                    self.update_address_pairs_on_port(context, id, port,
-                                                      original_port,
-                                                      updated_port))
-            need_port_update_notify |= self.update_security_group_on_port(
-                context, id, port, original_port, updated_port)
-            network = self.get_network(context, original_port['network_id'])
-            need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
-                context, id, port, updated_port)
-            levels = db.get_binding_levels(session, id, binding.host)
-            mech_context = driver_context.PortContext(
-                self, context, updated_port, network, binding, levels,
-                original_port=original_port)
-            need_port_update_notify |= self._process_port_binding(
-                mech_context, attrs)
-            # For DVR router interface ports we need to retrieve the
-            # DVRPortbinding context instead of the normal port context.
-            # The normal Portbinding context does not have the status
-            # of the ports that are required by the l2pop to process the
-            # postcommit events.
-
-            # NOTE:Sometimes during the update_port call, the DVR router
-            # interface port may not have the port binding, so we cannot
-            # create a generic bindinglist that will address both the
-            # DVR and non-DVR cases here.
-            # TODO(Swami): This code need to be revisited.
-            if port_db['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
-                dvr_binding_list = db.get_dvr_port_bindings(session, id)
-                for dvr_binding in dvr_binding_list:
-                    levels = db.get_binding_levels(session, id,
-                                                   dvr_binding.host)
-                    dvr_mech_context = driver_context.PortContext(
-                        self, context, updated_port, network,
-                        dvr_binding, levels, original_port=original_port)
-                    self.mechanism_manager.update_port_precommit(
-                        dvr_mech_context)
-                    bound_mech_contexts.append(dvr_mech_context)
-            else:
-                self.mechanism_manager.update_port_precommit(mech_context)
-                bound_mech_contexts.append(mech_context)
-
-        # Notifications must be sent after the above transaction is complete
-        kwargs = {
-            'context': context,
-            'port': updated_port,
-            'mac_address_updated': mac_address_updated,
-            'original_port': original_port,
-        }
-        registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs)
-
-        # Note that DVR Interface ports will have bindings on
-        # multiple hosts, and so will have multiple mech_contexts,
-        # while other ports typically have just one.
-        # Since bound_mech_contexts has both the DVR and non-DVR
-        # contexts we can manage just with a single for loop.
-        try:
-            for mech_context in bound_mech_contexts:
-                self.mechanism_manager.update_port_postcommit(
-                    mech_context)
-        except ml2_exc.MechanismDriverError:
-            LOG.error(_LE("mechanism_manager.update_port_postcommit "
-                          "failed for port %s"), id)
-
-        self.check_and_notify_security_group_member_changed(
-            context, original_port, updated_port)
-        need_port_update_notify |= self.is_security_group_member_updated(
-            context, original_port, updated_port)
-
-        if original_port['admin_state_up'] != updated_port['admin_state_up']:
-            need_port_update_notify = True
-        # NOTE: In the case of DVR ports, the port-binding is done after
-        # router scheduling when sync_routers is called and so this call
-        # below may not be required for DVR routed interfaces. But still
-        # since we don't have the mech_context for the DVR router interfaces
-        # at certain times, we just pass the port-context and return it, so
-        # that we don't disturb other methods that are expecting a return
-        # value.
-        bound_context = self._bind_port_if_needed(
-            mech_context,
-            allow_notify=True,
-            need_notify=need_port_update_notify)
-        return bound_context.current
-
-    def _process_dvr_port_binding(self, mech_context, context, attrs):
-        session = mech_context._plugin_context.session
-        binding = mech_context._binding
-        port = mech_context.current
-        port_id = port['id']
-
-        if binding.vif_type != portbindings.VIF_TYPE_UNBOUND:
-            binding.vif_details = ''
-            binding.vif_type = portbindings.VIF_TYPE_UNBOUND
-            if binding.host:
-                db.clear_binding_levels(session, port_id, binding.host)
-            binding.host = ''
-
-        self._update_port_dict_binding(port, binding)
-        binding.host = attrs and attrs.get(portbindings.HOST_ID)
-        binding.router_id = attrs and attrs.get('device_id')
-
-    def update_dvr_port_binding(self, context, id, port):
-        attrs = port[attributes.PORT]
-
-        host = attrs and attrs.get(portbindings.HOST_ID)
-        host_set = attributes.is_attr_set(host)
-
-        if not host_set:
-            LOG.error(_LE("No Host supplied to bind DVR Port %s"), id)
-            return
-
-        session = context.session
-        binding = db.get_dvr_port_binding_by_host(session, id, host)
-        device_id = attrs and attrs.get('device_id')
-        router_id = binding and binding.get('router_id')
-        update_required = (not binding or
-            binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED or
-            router_id != device_id)
-        if update_required:
-            with session.begin(subtransactions=True):
-                try:
-                    orig_port = super(Ml2Plugin, self).get_port(context, id)
-                except exc.PortNotFound:
-                    LOG.debug("DVR Port %s has been deleted concurrently", id)
-                    return
-                if not binding:
-                    binding = db.ensure_dvr_port_binding(
-                        session, id, host, router_id=device_id)
-                network = self.get_network(context, orig_port['network_id'])
-                levels = db.get_binding_levels(session, id, host)
-                mech_context = driver_context.PortContext(self,
-                    context, orig_port, network,
-                    binding, levels, original_port=orig_port)
-                self._process_dvr_port_binding(mech_context, context, attrs)
-            self._bind_port_if_needed(mech_context)
-
-    def _pre_delete_port(self, context, port_id, port_check):
-        """Do some preliminary operations before deleting the port."""
-        LOG.debug("Deleting port %s", port_id)
-        try:
-            # notify interested parties of imminent port deletion;
-            # a failure here prevents the operation from happening
-            kwargs = {
-                'context': context,
-                'port_id': port_id,
-                'port_check': port_check
-            }
-            registry.notify(
-                resources.PORT, events.BEFORE_DELETE, self, **kwargs)
-        except exceptions.CallbackFailure as e:
-            # NOTE(armax): preserve old check's behavior
-            if len(e.errors) == 1:
-                raise e.errors[0].error
-            raise exc.ServicePortInUse(port_id=port_id, reason=e)
-
-    def delete_port(self, context, id, l3_port_check=True):
-        self._pre_delete_port(context, id, l3_port_check)
-        # TODO(armax): get rid of the l3 dependency in the with block
-        removed_routers = []
-        router_ids = []
-        l3plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        is_dvr_enabled = utils.is_extension_supported(
-            l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)
-
-        session = context.session
-        with session.begin(subtransactions=True):
-            port_db, binding = db.get_locked_port_and_binding(session, id)
-            if not port_db:
-                LOG.debug("The port '%s' was deleted", id)
-                return
-            port = self._make_port_dict(port_db)
-
-            network = self.get_network(context, port['network_id'])
-            bound_mech_contexts = []
-            device_owner = port['device_owner']
-            if device_owner == const.DEVICE_OWNER_DVR_INTERFACE:
-                bindings = db.get_dvr_port_bindings(context.session, id)
-                for bind in bindings:
-                    levels = db.get_binding_levels(context.session, id,
-                                                   bind.host)
-                    mech_context = driver_context.PortContext(
-                        self, context, port, network, bind, levels)
-                    self.mechanism_manager.delete_port_precommit(mech_context)
-                    bound_mech_contexts.append(mech_context)
-            else:
-                levels = db.get_binding_levels(context.session, id,
-                                               binding.host)
-                mech_context = driver_context.PortContext(
-                    self, context, port, network, binding, levels)
-                if is_dvr_enabled and utils.is_dvr_serviced(device_owner):
-                    removed_routers = l3plugin.dvr_deletens_if_no_port(
-                        context, id)
-                self.mechanism_manager.delete_port_precommit(mech_context)
-                bound_mech_contexts.append(mech_context)
-            if l3plugin:
-                router_ids = l3plugin.disassociate_floatingips(
-                    context, id, do_notify=False)
-
-            LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s",
-                      {"port_id": id, "owner": device_owner})
-            super(Ml2Plugin, self).delete_port(context, id)
-
-        self._post_delete_port(
-            context, port, router_ids, removed_routers, bound_mech_contexts)
-
-    def _post_delete_port(
-        self, context, port, router_ids, removed_routers, bound_mech_contexts):
-        kwargs = {
-            'context': context,
-            'port': port,
-            'router_ids': router_ids,
-            'removed_routers': removed_routers
-        }
-        registry.notify(resources.PORT, events.AFTER_DELETE, self, **kwargs)
-        try:
-            # Note that DVR Interface ports will have bindings on
-            # multiple hosts, and so will have multiple mech_contexts,
-            # while other ports typically have just one.
-            for mech_context in bound_mech_contexts:
-                self.mechanism_manager.delete_port_postcommit(mech_context)
-        except ml2_exc.MechanismDriverError:
-            # TODO(apech) - One or more mechanism driver failed to
-            # delete the port.  Ideally we'd notify the caller of the
-            # fact that an error occurred.
-            LOG.error(_LE("mechanism_manager.delete_port_postcommit failed for"
-                          " port %s"), port['id'])
-        self.notifier.port_delete(context, port['id'])
-        self.notify_security_groups_member_updated(context, port)
-
-    def get_bound_port_context(self, plugin_context, port_id, host=None,
-                               cached_networks=None):
-        session = plugin_context.session
-        with session.begin(subtransactions=True):
-            try:
-                port_db = (session.query(models_v2.Port).
-                           enable_eagerloads(False).
-                           filter(models_v2.Port.id.startswith(port_id)).
-                           one())
-            except sa_exc.NoResultFound:
-                LOG.debug("No ports have port_id starting with %s",
-                          port_id)
-                return
-            except sa_exc.MultipleResultsFound:
-                LOG.error(_LE("Multiple ports have port_id starting with %s"),
-                          port_id)
-                return
-            port = self._make_port_dict(port_db)
-            network = (cached_networks or {}).get(port['network_id'])
-
-            if not network:
-                network = self.get_network(plugin_context, port['network_id'])
-
-            if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
-                binding = db.get_dvr_port_binding_by_host(
-                    session, port['id'], host)
-                if not binding:
-                    LOG.error(_LE("Binding info for DVR port %s not found"),
-                              port_id)
-                    return None
-                levels = db.get_binding_levels(session, port_db.id, host)
-                port_context = driver_context.PortContext(
-                    self, plugin_context, port, network, binding, levels)
-            else:
-                # since eager loads are disabled in port_db query
-                # related attribute port_binding could disappear in
-                # concurrent port deletion.
-                # It's not an error condition.
-                binding = port_db.port_binding
-                if not binding:
-                    LOG.info(_LI("Binding info for port %s was not found, "
-                                 "it might have been deleted already."),
-                             port_id)
-                    return
-                levels = db.get_binding_levels(session, port_db.id,
-                                               port_db.port_binding.host)
-                port_context = driver_context.PortContext(
-                    self, plugin_context, port, network, binding, levels)
-
-        return self._bind_port_if_needed(port_context)
-
-    @oslo_db_api.wrap_db_retry(
-        max_retries=db_api.MAX_RETRIES, retry_on_request=True,
-        exception_checker=lambda e: isinstance(e, (sa_exc.StaleDataError,
-                                                   os_db_exception.DBDeadlock))
-    )
-    def update_port_status(self, context, port_id, status, host=None,
-                           network=None):
-        """
-        Returns port_id (non-truncated uuid) if the port exists.
-        Otherwise returns None.
-        network can be passed in to avoid another get_network call if
-        one was already performed by the caller.
-        """
-        updated = False
-        session = context.session
-        with session.begin(subtransactions=True):
-            port = db.get_port(session, port_id)
-            if not port:
-                LOG.debug("Port %(port)s update to %(val)s by agent not found",
-                          {'port': port_id, 'val': status})
-                return None
-            if (port.status != status and
-                port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE):
-                original_port = self._make_port_dict(port)
-                port.status = status
-                updated_port = self._make_port_dict(port)
-                network = network or self.get_network(
-                    context, original_port['network_id'])
-                levels = db.get_binding_levels(session, port.id,
-                                               port.port_binding.host)
-                mech_context = driver_context.PortContext(
-                    self, context, updated_port, network, port.port_binding,
-                    levels, original_port=original_port)
-                self.mechanism_manager.update_port_precommit(mech_context)
-                updated = True
-            elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
-                binding = db.get_dvr_port_binding_by_host(
-                    session, port['id'], host)
-                if not binding:
-                    return
-                binding['status'] = status
-                binding.update(binding)
-                updated = True
-
-        if (updated and
-            port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
-            with session.begin(subtransactions=True):
-                port = db.get_port(session, port_id)
-                if not port:
-                    LOG.warning(_LW("Port %s not found during update"),
-                                port_id)
-                    return
-                original_port = self._make_port_dict(port)
-                network = network or self.get_network(
-                    context, original_port['network_id'])
-                port.status = db.generate_dvr_port_status(session, port['id'])
-                updated_port = self._make_port_dict(port)
-                levels = db.get_binding_levels(session, port_id, host)
-                mech_context = (driver_context.PortContext(
-                    self, context, updated_port, network,
-                    binding, levels, original_port=original_port))
-                self.mechanism_manager.update_port_precommit(mech_context)
-
-        if updated:
-            self.mechanism_manager.update_port_postcommit(mech_context)
-
-        if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
-            db.delete_dvr_port_binding_if_stale(session, binding)
-
-        return port['id']
-
-    def port_bound_to_host(self, context, port_id, host):
-        port = db.get_port(context.session, port_id)
-        if not port:
-            LOG.debug("No Port match for: %s", port_id)
-            return False
-        if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
-            bindings = db.get_dvr_port_bindings(context.session, port_id)
-            for b in bindings:
-                if b.host == host:
-                    return True
-            LOG.debug("No binding found for DVR port %s", port['id'])
-            return False
-        else:
-            port_host = db.get_port_binding_host(context.session, port_id)
-            return (port_host == host)
-
-    def get_ports_from_devices(self, context, devices):
-        port_ids_to_devices = dict(
-            (self._device_to_port_id(context, device), device)
-            for device in devices)
-        port_ids = list(port_ids_to_devices.keys())
-        ports = db.get_ports_and_sgs(context, port_ids)
-        for port in ports:
-            # map back to original requested id
-            port_id = next((port_id for port_id in port_ids
-                           if port['id'].startswith(port_id)), None)
-            port['device'] = port_ids_to_devices.get(port_id)
-
-        return ports
-
-    @staticmethod
-    def _device_to_port_id(context, device):
-        # REVISIT(rkukura): Consider calling into MechanismDrivers to
-        # process device names, or having MechanismDrivers supply list
-        # of device prefixes to strip.
-        for prefix in const.INTERFACE_PREFIXES:
-            if device.startswith(prefix):
-                return device[len(prefix):]
-        # REVISIT(irenab): Consider calling into bound MD to
-        # handle the get_device_details RPC
-        if not uuidutils.is_uuid_like(device):
-            port = db.get_port_from_device_mac(context, device)
-            if port:
-                return port.id
-        return device
-
-    def get_workers(self):
-        return self.mechanism_manager.get_workers()
diff --git a/neutron/plugins/ml2/rpc.py b/neutron/plugins/ml2/rpc.py
deleted file mode 100644 (file)
index 2217a7f..0000000
+++ /dev/null
@@ -1,324 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log
-import oslo_messaging
-from sqlalchemy.orm import exc
-
-from neutron._i18n import _LE, _LW
-from neutron.api.rpc.handlers import dvr_rpc
-from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc
-from neutron.callbacks import events
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-from neutron.common import constants as n_const
-from neutron.common import exceptions
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.extensions import portbindings
-from neutron.extensions import portsecurity as psec
-from neutron import manager
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2.drivers import type_tunnel
-from neutron.services.qos import qos_consts
-# REVISIT(kmestery): Allow the type and mechanism drivers to supply the
-# mixins and eventually remove the direct dependencies on type_tunnel.
-
-LOG = log.getLogger(__name__)
-
-
-class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin):
-
-    # history
-    #   1.0 Initial version (from openvswitch/linuxbridge)
-    #   1.1 Support Security Group RPC
-    #   1.2 Support get_devices_details_list
-    #   1.3 get_device_details rpc signature upgrade to obtain 'host' and
-    #       return value to include fixed_ips and device_owner for
-    #       the device port
-    #   1.4 tunnel_sync rpc signature upgrade to obtain 'host'
-    #   1.5 Support update_device_list and
-    #       get_devices_details_list_and_failed_devices
-    target = oslo_messaging.Target(version='1.5')
-
-    def __init__(self, notifier, type_manager):
-        self.setup_tunnel_callback_mixin(notifier, type_manager)
-        super(RpcCallbacks, self).__init__()
-
-    def get_device_details(self, rpc_context, **kwargs):
-        """Agent requests device details."""
-        agent_id = kwargs.get('agent_id')
-        device = kwargs.get('device')
-        host = kwargs.get('host')
-        # cached networks used for reducing number of network db calls
-        # for server internal usage only
-        cached_networks = kwargs.get('cached_networks')
-        LOG.debug("Device %(device)s details requested by agent "
-                  "%(agent_id)s with host %(host)s",
-                  {'device': device, 'agent_id': agent_id, 'host': host})
-
-        plugin = manager.NeutronManager.get_plugin()
-        port_id = plugin._device_to_port_id(rpc_context, device)
-        port_context = plugin.get_bound_port_context(rpc_context,
-                                                     port_id,
-                                                     host,
-                                                     cached_networks)
-        if not port_context:
-            LOG.warning(_LW("Device %(device)s requested by agent "
-                            "%(agent_id)s not found in database"),
-                        {'device': device, 'agent_id': agent_id})
-            return {'device': device}
-
-        segment = port_context.bottom_bound_segment
-        port = port_context.current
-        # caching information about networks for future use
-        if cached_networks is not None:
-            if port['network_id'] not in cached_networks:
-                cached_networks[port['network_id']] = (
-                    port_context.network.current)
-
-        if not segment:
-            LOG.warning(_LW("Device %(device)s requested by agent "
-                            "%(agent_id)s on network %(network_id)s not "
-                            "bound, vif_type: %(vif_type)s"),
-                        {'device': device,
-                         'agent_id': agent_id,
-                         'network_id': port['network_id'],
-                         'vif_type': port_context.vif_type})
-            return {'device': device}
-
-        if (not host or host == port_context.host):
-            new_status = (n_const.PORT_STATUS_BUILD if port['admin_state_up']
-                          else n_const.PORT_STATUS_DOWN)
-            if port['status'] != new_status:
-                plugin.update_port_status(rpc_context,
-                                          port_id,
-                                          new_status,
-                                          host,
-                                          port_context.network.current)
-
-        network_qos_policy_id = port_context.network._network.get(
-            qos_consts.QOS_POLICY_ID)
-        entry = {'device': device,
-                 'network_id': port['network_id'],
-                 'port_id': port['id'],
-                 'mac_address': port['mac_address'],
-                 'admin_state_up': port['admin_state_up'],
-                 'network_type': segment[api.NETWORK_TYPE],
-                 'segmentation_id': segment[api.SEGMENTATION_ID],
-                 'physical_network': segment[api.PHYSICAL_NETWORK],
-                 'fixed_ips': port['fixed_ips'],
-                 'device_owner': port['device_owner'],
-                 'allowed_address_pairs': port['allowed_address_pairs'],
-                 'port_security_enabled': port.get(psec.PORTSECURITY, True),
-                 'qos_policy_id': port.get(qos_consts.QOS_POLICY_ID),
-                 'network_qos_policy_id': network_qos_policy_id,
-                 'profile': port[portbindings.PROFILE]}
-        if 'security_groups' in port:
-            entry['security_groups'] = port['security_groups']
-        LOG.debug("Returning: %s", entry)
-        return entry
-
-    def get_devices_details_list(self, rpc_context, **kwargs):
-        # cached networks used for reducing number of network db calls
-        cached_networks = {}
-        return [
-            self.get_device_details(
-                rpc_context,
-                device=device,
-                cached_networks=cached_networks,
-                **kwargs
-            )
-            for device in kwargs.pop('devices', [])
-        ]
-
-    def get_devices_details_list_and_failed_devices(self,
-                                                    rpc_context,
-                                                    **kwargs):
-        devices = []
-        failed_devices = []
-        cached_networks = {}
-        for device in kwargs.pop('devices', []):
-            try:
-                devices.append(self.get_device_details(
-                               rpc_context,
-                               device=device,
-                               cached_networks=cached_networks,
-                               **kwargs))
-            except Exception:
-                LOG.error(_LE("Failed to get details for device %s"),
-                          device)
-                failed_devices.append(device)
-
-        return {'devices': devices,
-                'failed_devices': failed_devices}
-
-    def update_device_down(self, rpc_context, **kwargs):
-        """Device no longer exists on agent."""
-        # TODO(garyk) - live migration and port status
-        agent_id = kwargs.get('agent_id')
-        device = kwargs.get('device')
-        host = kwargs.get('host')
-        LOG.debug("Device %(device)s no longer exists at agent "
-                  "%(agent_id)s",
-                  {'device': device, 'agent_id': agent_id})
-        plugin = manager.NeutronManager.get_plugin()
-        port_id = plugin._device_to_port_id(rpc_context, device)
-        port_exists = True
-        if (host and not plugin.port_bound_to_host(rpc_context,
-                                                   port_id, host)):
-            LOG.debug("Device %(device)s not bound to the"
-                      " agent host %(host)s",
-                      {'device': device, 'host': host})
-            return {'device': device,
-                    'exists': port_exists}
-
-        try:
-            port_exists = bool(plugin.update_port_status(
-                rpc_context, port_id, n_const.PORT_STATUS_DOWN, host))
-        except exc.StaleDataError:
-            port_exists = False
-            LOG.debug("delete_port and update_device_down are being executed "
-                      "concurrently. Ignoring StaleDataError.")
-
-        return {'device': device,
-                'exists': port_exists}
-
-    def update_device_up(self, rpc_context, **kwargs):
-        """Device is up on agent."""
-        agent_id = kwargs.get('agent_id')
-        device = kwargs.get('device')
-        host = kwargs.get('host')
-        LOG.debug("Device %(device)s up at agent %(agent_id)s",
-                  {'device': device, 'agent_id': agent_id})
-        plugin = manager.NeutronManager.get_plugin()
-        port_id = plugin._device_to_port_id(rpc_context, device)
-        if (host and not plugin.port_bound_to_host(rpc_context,
-                                                   port_id, host)):
-            LOG.debug("Device %(device)s not bound to the"
-                      " agent host %(host)s",
-                      {'device': device, 'host': host})
-            return
-
-        port_id = plugin.update_port_status(rpc_context, port_id,
-                                            n_const.PORT_STATUS_ACTIVE,
-                                            host)
-        try:
-            # NOTE(armax): it's best to remove all objects from the
-            # session, before we try to retrieve the new port object
-            rpc_context.session.expunge_all()
-            port = plugin._get_port(rpc_context, port_id)
-        except exceptions.PortNotFound:
-            LOG.debug('Port %s not found during update', port_id)
-        else:
-            kwargs = {
-                'context': rpc_context,
-                'port': port,
-                'update_device_up': True
-            }
-            registry.notify(
-                resources.PORT, events.AFTER_UPDATE, plugin, **kwargs)
-
-    def update_device_list(self, rpc_context, **kwargs):
-        devices_up = []
-        failed_devices_up = []
-        devices_down = []
-        failed_devices_down = []
-        devices = kwargs.get('devices_up')
-        if devices:
-            for device in devices:
-                try:
-                    self.update_device_up(
-                        rpc_context,
-                        device=device,
-                        **kwargs)
-                except Exception:
-                    failed_devices_up.append(device)
-                    LOG.error(_LE("Failed to update device %s up"), device)
-                else:
-                    devices_up.append(device)
-
-        devices = kwargs.get('devices_down')
-        if devices:
-            for device in devices:
-                try:
-                    dev = self.update_device_down(
-                        rpc_context,
-                        device=device,
-                        **kwargs)
-                except Exception:
-                    failed_devices_down.append(device)
-                    LOG.error(_LE("Failed to update device %s down"), device)
-                else:
-                    devices_down.append(dev)
-
-        return {'devices_up': devices_up,
-                'failed_devices_up': failed_devices_up,
-                'devices_down': devices_down,
-                'failed_devices_down': failed_devices_down}
-
-
-class AgentNotifierApi(dvr_rpc.DVRAgentRpcApiMixin,
-                       sg_rpc.SecurityGroupAgentRpcApiMixin,
-                       type_tunnel.TunnelAgentRpcApiMixin):
-    """Agent side of the openvswitch rpc API.
-
-    API version history:
-        1.0 - Initial version.
-        1.1 - Added get_active_networks_info, create_dhcp_port,
-              update_dhcp_port, and removed get_dhcp_port methods.
-        1.4 - Added network_update
-    """
-
-    def __init__(self, topic):
-        self.topic = topic
-        self.topic_network_delete = topics.get_topic_name(topic,
-                                                          topics.NETWORK,
-                                                          topics.DELETE)
-        self.topic_port_update = topics.get_topic_name(topic,
-                                                       topics.PORT,
-                                                       topics.UPDATE)
-        self.topic_port_delete = topics.get_topic_name(topic,
-                                                       topics.PORT,
-                                                       topics.DELETE)
-        self.topic_network_update = topics.get_topic_name(topic,
-                                                          topics.NETWORK,
-                                                          topics.UPDATE)
-
-        target = oslo_messaging.Target(topic=topic, version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def network_delete(self, context, network_id):
-        cctxt = self.client.prepare(topic=self.topic_network_delete,
-                                    fanout=True)
-        cctxt.cast(context, 'network_delete', network_id=network_id)
-
-    def port_update(self, context, port, network_type, segmentation_id,
-                    physical_network):
-        cctxt = self.client.prepare(topic=self.topic_port_update,
-                                    fanout=True)
-        cctxt.cast(context, 'port_update', port=port,
-                   network_type=network_type, segmentation_id=segmentation_id,
-                   physical_network=physical_network)
-
-    def port_delete(self, context, port_id):
-        cctxt = self.client.prepare(topic=self.topic_port_delete,
-                                    fanout=True)
-        cctxt.cast(context, 'port_delete', port_id=port_id)
-
-    def network_update(self, context, network):
-        cctxt = self.client.prepare(topic=self.topic_network_update,
-                                    fanout=True, version='1.4')
-        cctxt.cast(context, 'network_update', network=network)
diff --git a/neutron/policy.py b/neutron/policy.py
deleted file mode 100644 (file)
index ed68056..0000000
+++ /dev/null
@@ -1,412 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import re
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_policy import policy
-from oslo_utils import excutils
-from oslo_utils import importutils
-import six
-
-from neutron._i18n import _, _LE, _LW
-from neutron.api.v2 import attributes
-from neutron.common import constants as const
-from neutron.common import exceptions
-
-
-LOG = logging.getLogger(__name__)
-
-_ENFORCER = None
-ADMIN_CTX_POLICY = 'context_is_admin'
-ADVSVC_CTX_POLICY = 'context_is_advsvc'
-
-
-def reset():
-    global _ENFORCER
-    if _ENFORCER:
-        _ENFORCER.clear()
-        _ENFORCER = None
-
-
-def init(conf=cfg.CONF, policy_file=None):
-    """Init an instance of the Enforcer class."""
-
-    global _ENFORCER
-    if not _ENFORCER:
-        _ENFORCER = policy.Enforcer(conf, policy_file=policy_file)
-        _ENFORCER.load_rules(True)
-
-
-def refresh(policy_file=None):
-    """Reset policy and init a new instance of Enforcer."""
-    reset()
-    init(policy_file=policy_file)
-
-
-def get_resource_and_action(action, pluralized=None):
-    """Extract resource and action (write, read) from api operation."""
-    data = action.split(':', 1)[0].split('_', 1)
-    resource = pluralized or ("%ss" % data[-1])
-    return (resource, data[0] != 'get')
-
-
-def set_rules(policies, overwrite=True):
-    """Set rules based on the provided dict of rules.
-
-    :param policies: New policies to use. It should be an instance of dict.
-    :param overwrite: Whether to overwrite current rules or update them
-                          with the new rules.
-    """
-
-    LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path)
-    init()
-    _ENFORCER.set_rules(policies, overwrite)
-
-
-def _is_attribute_explicitly_set(attribute_name, resource, target, action):
-    """Verify that an attribute is present and is explicitly set."""
-    if 'update' in action:
-        # In the case of update, the function should not pay attention to a
-        # default value of an attribute, but check whether it was explicitly
-        # marked as being updated instead.
-        return (attribute_name in target[const.ATTRIBUTES_TO_UPDATE] and
-                target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED)
-    return ('default' in resource[attribute_name] and
-            attribute_name in target and
-            target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and
-            target[attribute_name] != resource[attribute_name]['default'])
-
-
-def _should_validate_sub_attributes(attribute, sub_attr):
-    """Verify that sub-attributes are iterable and should be validated."""
-    validate = attribute.get('validate')
-    return (validate and isinstance(sub_attr, collections.Iterable) and
-            any([k.startswith('type:dict') and
-                 v for (k, v) in six.iteritems(validate)]))
-
-
-def _build_subattr_match_rule(attr_name, attr, action, target):
-    """Create the rule to match for sub-attribute policy checks."""
-    # TODO(salv-orlando): Instead of relying on validator info, introduce
-    # typing for API attributes
-    # Expect a dict as type descriptor
-    validate = attr['validate']
-    key = list(filter(lambda k: k.startswith('type:dict'), validate.keys()))
-    if not key:
-        LOG.warn(_LW("Unable to find data type descriptor for attribute %s"),
-                 attr_name)
-        return
-    data = validate[key[0]]
-    if not isinstance(data, dict):
-        LOG.debug("Attribute type descriptor is not a dict. Unable to "
-                  "generate any sub-attr policy rule for %s.",
-                  attr_name)
-        return
-    sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' %
-                                       (action, attr_name,
-                                        sub_attr_name)) for
-                      sub_attr_name in data if sub_attr_name in
-                      target[attr_name]]
-    return policy.AndCheck(sub_attr_rules)
-
-
-def _process_rules_list(rules, match_rule):
-    """Recursively walk a policy rule to extract a list of match entries."""
-    if isinstance(match_rule, policy.RuleCheck):
-        rules.append(match_rule.match)
-    elif isinstance(match_rule, policy.AndCheck):
-        for rule in match_rule.rules:
-            _process_rules_list(rules, rule)
-    return rules
-
-
-def _build_match_rule(action, target, pluralized):
-    """Create the rule to match for a given action.
-
-    The policy rule to be matched is built in the following way:
-    1) add entries for matching permission on objects
-    2) add an entry for the specific action (e.g.: create_network)
-    3) add an entry for attributes of a resource for which the action
-       is being executed (e.g.: create_network:shared)
-    4) add an entry for sub-attributes of a resource for which the
-       action is being executed
-       (e.g.: create_router:external_gateway_info:network_id)
-    """
-    match_rule = policy.RuleCheck('rule', action)
-    resource, is_write = get_resource_and_action(action, pluralized)
-    # Attribute-based checks shall not be enforced on GETs
-    if is_write:
-        # assigning to variable with short name for improving readability
-        res_map = attributes.RESOURCE_ATTRIBUTE_MAP
-        if resource in res_map:
-            for attribute_name in res_map[resource]:
-                if _is_attribute_explicitly_set(attribute_name,
-                                                res_map[resource],
-                                                target, action):
-                    attribute = res_map[resource][attribute_name]
-                    if 'enforce_policy' in attribute:
-                        attr_rule = policy.RuleCheck('rule', '%s:%s' %
-                                                     (action, attribute_name))
-                        # Build match entries for sub-attributes
-                        if _should_validate_sub_attributes(
-                                attribute, target[attribute_name]):
-                            attr_rule = policy.AndCheck(
-                                [attr_rule, _build_subattr_match_rule(
-                                    attribute_name, attribute,
-                                    action, target)])
-                        match_rule = policy.AndCheck([match_rule, attr_rule])
-    return match_rule
-
-
-# This check is registered as 'tenant_id' so that it can override
-# GenericCheck which was used for validating parent resource ownership.
-# This will prevent us from having to handling backward compatibility
-# for policy.json
-# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks
-@policy.register('tenant_id')
-class OwnerCheck(policy.Check):
-    """Resource ownership check.
-
-    This check verifies the owner of the current resource, or of another
-    resource referenced by the one under analysis.
-    In the former case it falls back to a regular GenericCheck, whereas
-    in the latter case it leverages the plugin to load the referenced
-    resource and perform the check.
-    """
-    def __init__(self, kind, match):
-        # Process the match
-        try:
-            self.target_field = re.findall(r'^\%\((.*)\)s$',
-                                           match)[0]
-        except IndexError:
-            err_reason = (_("Unable to identify a target field from:%s. "
-                            "Match should be in the form %%(<field_name>)s") %
-                          match)
-            LOG.exception(err_reason)
-            raise exceptions.PolicyInitError(
-                policy="%s:%s" % (kind, match),
-                reason=err_reason)
-        super(OwnerCheck, self).__init__(kind, match)
-
-    def __call__(self, target, creds, enforcer):
-        if self.target_field not in target:
-            # policy needs a plugin check
-            # target field is in the form resource:field
-            # however if they're not separated by a colon, use an underscore
-            # as a separator for backward compatibility
-
-            def do_split(separator):
-                parent_res, parent_field = self.target_field.split(
-                    separator, 1)
-                return parent_res, parent_field
-
-            for separator in (':', '_'):
-                try:
-                    parent_res, parent_field = do_split(separator)
-                    break
-                except ValueError:
-                    LOG.debug("Unable to find ':' as separator in %s.",
-                              self.target_field)
-            else:
-                # If we are here split failed with both separators
-                err_reason = (_("Unable to find resource name in %s") %
-                              self.target_field)
-                LOG.error(err_reason)
-                raise exceptions.PolicyCheckError(
-                    policy="%s:%s" % (self.kind, self.match),
-                    reason=err_reason)
-            parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get(
-                "%ss" % parent_res, None)
-            if not parent_foreign_key:
-                err_reason = (_("Unable to verify match:%(match)s as the "
-                                "parent resource: %(res)s was not found") %
-                              {'match': self.match, 'res': parent_res})
-                LOG.error(err_reason)
-                raise exceptions.PolicyCheckError(
-                    policy="%s:%s" % (self.kind, self.match),
-                    reason=err_reason)
-            # NOTE(salv-orlando): This check currently assumes the parent
-            # resource is handled by the core plugin. It might be worth
-            # having a way to map resources to plugins so to make this
-            # check more general
-            # NOTE(ihrachys): if import is put in global, circular
-            # import failure occurs
-            manager = importutils.import_module('neutron.manager')
-            f = getattr(manager.NeutronManager.get_instance().plugin,
-                        'get_%s' % parent_res)
-            # f *must* exist, if not found it is better to let neutron
-            # explode. Check will be performed with admin context
-            context = importutils.import_module('neutron.context')
-            try:
-                data = f(context.get_admin_context(),
-                         target[parent_foreign_key],
-                         fields=[parent_field])
-                target[self.target_field] = data[parent_field]
-            except Exception:
-                with excutils.save_and_reraise_exception():
-                    LOG.exception(_LE('Policy check error while calling %s!'),
-                                  f)
-        match = self.match % target
-        if self.kind in creds:
-            return match == six.text_type(creds[self.kind])
-        return False
-
-
-@policy.register('field')
-class FieldCheck(policy.Check):
-    def __init__(self, kind, match):
-        # Process the match
-        resource, field_value = match.split(':', 1)
-        field, value = field_value.split('=', 1)
-
-        super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
-                                         (resource, field, value))
-
-        # Value might need conversion - we need help from the attribute map
-        try:
-            attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field]
-            conv_func = attr['convert_to']
-        except KeyError:
-            conv_func = lambda x: x
-
-        self.field = field
-        self.value = conv_func(value)
-        self.regex = re.compile(value[1:]) if value.startswith('~') else None
-
-    def __call__(self, target_dict, cred_dict, enforcer):
-        target_value = target_dict.get(self.field)
-        # target_value might be a boolean, explicitly compare with None
-        if target_value is None:
-            LOG.debug("Unable to find requested field: %(field)s in target: "
-                      "%(target_dict)s",
-                      {'field': self.field, 'target_dict': target_dict})
-            return False
-        if self.regex:
-            return bool(self.regex.match(target_value))
-        return target_value == self.value
-
-
-def _prepare_check(context, action, target, pluralized):
-    """Prepare rule, target, and credentials for the policy engine."""
-    # Compare with None to distinguish case in which target is {}
-    if target is None:
-        target = {}
-    match_rule = _build_match_rule(action, target, pluralized)
-    credentials = context.to_dict()
-    return match_rule, target, credentials
-
-
-def log_rule_list(match_rule):
-    if LOG.isEnabledFor(logging.DEBUG):
-        rules = _process_rules_list([], match_rule)
-        LOG.debug("Enforcing rules: %s", rules)
-
-
-def check(context, action, target, plugin=None, might_not_exist=False,
-          pluralized=None):
-    """Verifies that the action is valid on the target in this context.
-
-    :param context: neutron context
-    :param action: string representing the action to be checked
-        this should be colon separated for clarity.
-    :param target: dictionary representing the object of the action
-        for object creation this should be a dictionary representing the
-        location of the object e.g. ``{'project_id': context.project_id}``
-    :param plugin: currently unused and deprecated.
-        Kept for backward compatibility.
-    :param might_not_exist: If True the policy check is skipped (and the
-        function returns True) if the specified policy does not exist.
-        Defaults to false.
-    :param pluralized: pluralized case of resource
-        e.g. firewall_policy -> pluralized = "firewall_policies"
-
-    :return: Returns True if access is permitted else False.
-    """
-    # If we already know the context has admin rights do not perform an
-    # additional check and authorize the operation
-    if context.is_admin:
-        return True
-    if might_not_exist and not (_ENFORCER.rules and action in _ENFORCER.rules):
-        return True
-    match_rule, target, credentials = _prepare_check(context,
-                                                     action,
-                                                     target,
-                                                     pluralized)
-    result = _ENFORCER.enforce(match_rule,
-                               target,
-                               credentials,
-                               pluralized=pluralized)
-    # logging applied rules in case of failure
-    if not result:
-        log_rule_list(match_rule)
-    return result
-
-
-def enforce(context, action, target, plugin=None, pluralized=None):
-    """Verifies that the action is valid on the target in this context.
-
-    :param context: neutron context
-    :param action: string representing the action to be checked
-        this should be colon separated for clarity.
-    :param target: dictionary representing the object of the action
-        for object creation this should be a dictionary representing the
-        location of the object e.g. ``{'project_id': context.project_id}``
-    :param plugin: currently unused and deprecated.
-        Kept for backward compatibility.
-    :param pluralized: pluralized case of resource
-        e.g. firewall_policy -> pluralized = "firewall_policies"
-
-    :raises oslo_policy.policy.PolicyNotAuthorized:
-            if verification fails.
-    """
-    # If we already know the context has admin rights do not perform an
-    # additional check and authorize the operation
-    if context.is_admin:
-        return True
-    rule, target, credentials = _prepare_check(context,
-                                               action,
-                                               target,
-                                               pluralized)
-    try:
-        result = _ENFORCER.enforce(rule, target, credentials, action=action,
-                                   do_raise=True)
-    except policy.PolicyNotAuthorized:
-        with excutils.save_and_reraise_exception():
-            log_rule_list(rule)
-            LOG.debug("Failed policy check for '%s'", action)
-    return result
-
-
-def check_is_admin(context):
-    """Verify context has admin rights according to policy settings."""
-    init()
-    # the target is user-self
-    credentials = context.to_dict()
-    if ADMIN_CTX_POLICY not in _ENFORCER.rules:
-        return False
-    return _ENFORCER.enforce(ADMIN_CTX_POLICY, credentials, credentials)
-
-
-def check_is_advsvc(context):
-    """Verify context has advsvc rights according to policy settings."""
-    init()
-    # the target is user-self
-    credentials = context.to_dict()
-    if ADVSVC_CTX_POLICY not in _ENFORCER.rules:
-        return False
-    return _ENFORCER.enforce(ADVSVC_CTX_POLICY, credentials, credentials)
diff --git a/neutron/quota/__init__.py b/neutron/quota/__init__.py
deleted file mode 100644 (file)
index ce61682..0000000
+++ /dev/null
@@ -1,325 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""Quotas for instances, volumes, and floating ips."""
-
-import sys
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_log import versionutils
-from oslo_utils import importutils
-import six
-import webob
-
-from neutron._i18n import _, _LI, _LW
-from neutron.common import exceptions
-from neutron.db.quota import api as quota_api
-from neutron.quota import resource_registry
-
-
-LOG = logging.getLogger(__name__)
-QUOTA_DB_MODULE = 'neutron.db.quota.driver'
-QUOTA_DB_DRIVER = '%s.DbQuotaDriver' % QUOTA_DB_MODULE
-QUOTA_CONF_DRIVER = 'neutron.quota.ConfDriver'
-default_quota_items = ['network', 'subnet', 'port']
-
-
-quota_opts = [
-    cfg.ListOpt('quota_items',
-                default=default_quota_items,
-                deprecated_for_removal=True,
-                help=_('Resource name(s) that are supported in quota '
-                       'features. This option is now deprecated for '
-                       'removal.')),
-    cfg.IntOpt('default_quota',
-               default=-1,
-               help=_('Default number of resource allowed per tenant. '
-                      'A negative value means unlimited.')),
-    cfg.IntOpt('quota_network',
-               default=10,
-               help=_('Number of networks allowed per tenant. '
-                      'A negative value means unlimited.')),
-    cfg.IntOpt('quota_subnet',
-               default=10,
-               help=_('Number of subnets allowed per tenant, '
-                      'A negative value means unlimited.')),
-    cfg.IntOpt('quota_port',
-               default=50,
-               help=_('Number of ports allowed per tenant. '
-                      'A negative value means unlimited.')),
-    cfg.StrOpt('quota_driver',
-               default=QUOTA_DB_DRIVER,
-               help=_('Default driver to use for quota checks')),
-    cfg.BoolOpt('track_quota_usage',
-                default=True,
-                help=_('Keep in track in the database of current resource'
-                       'quota usage. Plugins which do not leverage the '
-                       'neutron database should set this flag to False')),
-]
-# Register the configuration options
-cfg.CONF.register_opts(quota_opts, 'QUOTAS')
-
-
-class ConfDriver(object):
-    """Configuration driver.
-
-    Driver to perform necessary checks to enforce quotas and obtain
-    quota information. The default driver utilizes the default values
-    in neutron.conf.
-    """
-
-    def _get_quotas(self, context, resources):
-        """Get quotas.
-
-        A helper method which retrieves the quotas for the specific
-        resources identified by keys, and which apply to the current
-        context.
-
-        :param context: The request context, for access checks.
-        :param resources: A dictionary of the registered resources.
-        """
-
-        quotas = {}
-        for resource in resources.values():
-            quotas[resource.name] = resource.default
-        return quotas
-
-    def limit_check(self, context, tenant_id,
-                    resources, values):
-        """Check simple quota limits.
-
-        For limits--those quotas for which there is no usage
-        synchronization function--this method checks that a set of
-        proposed values are permitted by the limit restriction.
-
-        If any of the proposed values is over the defined quota, an
-        OverQuota exception will be raised with the sorted list of the
-        resources which are too high.  Otherwise, the method returns
-        nothing.
-
-        :param context: The request context, for access checks.
-        :param tenant_id: The tenant_id to check quota.
-        :param resources: A dictionary of the registered resources.
-        :param values: A dictionary of the values to check against the
-                       quota.
-        """
-        # Ensure no value is less than zero
-        unders = [key for key, val in values.items() if val < 0]
-        if unders:
-            raise exceptions.InvalidQuotaValue(unders=sorted(unders))
-
-        # Get the applicable quotas
-        quotas = self._get_quotas(context, resources)
-
-        # Check the quotas and construct a list of the resources that
-        # would be put over limit by the desired values
-        overs = [key for key, val in values.items()
-                 if quotas[key] >= 0 and quotas[key] < val]
-        if overs:
-            raise exceptions.OverQuota(overs=sorted(overs), quotas=quotas,
-                                       usages={})
-
-    @staticmethod
-    def get_tenant_quotas(context, resources, tenant_id):
-        quotas = {}
-        sub_resources = dict((k, v) for k, v in resources.items())
-        for resource in sub_resources.values():
-            quotas[resource.name] = resource.default
-        return quotas
-
-    @staticmethod
-    def get_all_quotas(context, resources):
-        return []
-
-    @staticmethod
-    def delete_tenant_quota(context, tenant_id):
-        msg = _('Access to this resource was denied.')
-        raise webob.exc.HTTPForbidden(msg)
-
-    @staticmethod
-    def update_quota_limit(context, tenant_id, resource, limit):
-        msg = _('Access to this resource was denied.')
-        raise webob.exc.HTTPForbidden(msg)
-
-    def make_reservation(self, context, tenant_id, resources, deltas, plugin):
-        """This driver does not support reservations.
-
-        This routine is provided for backward compatibility purposes with
-        the API controllers which have now been adapted to make reservations
-        rather than counting resources and checking limits - as this
-        routine ultimately does.
-        """
-        for resource in deltas.keys():
-            count = QUOTAS.count(context, resource, plugin, tenant_id)
-            total_use = deltas.get(resource, 0) + count
-            deltas[resource] = total_use
-
-        self.limit_check(
-            context,
-            tenant_id,
-            resource_registry.get_all_resources(),
-            deltas)
-        # return a fake reservation - the REST controller expects it
-        return quota_api.ReservationInfo('fake', None, None, None)
-
-    def commit_reservation(self, context, reservation_id):
-        """This is a noop as this driver does not support reservations."""
-
-    def cancel_reservation(self, context, reservation_id):
-        """This is a noop as this driver does not support reservations."""
-
-
-class QuotaEngine(object):
-    """Represent the set of recognized quotas."""
-
-    _instance = None
-
-    @classmethod
-    def get_instance(cls):
-        if not cls._instance:
-            cls._instance = cls()
-        return cls._instance
-
-    def __init__(self, quota_driver_class=None):
-        """Initialize a Quota object."""
-        self._driver = None
-        self._driver_class = quota_driver_class
-
-    def get_driver(self):
-        if self._driver is None:
-            _driver_class = (self._driver_class or
-                             cfg.CONF.QUOTAS.quota_driver)
-            if (_driver_class == QUOTA_DB_DRIVER and
-                    QUOTA_DB_MODULE not in sys.modules):
-                # If quotas table is not loaded, force config quota driver.
-                _driver_class = QUOTA_CONF_DRIVER
-                LOG.info(_LI("ConfDriver is used as quota_driver because the "
-                             "loaded plugin does not support 'quotas' table."))
-            if isinstance(_driver_class, six.string_types):
-                _driver_class = importutils.import_object(_driver_class)
-            if isinstance(_driver_class, ConfDriver):
-                versionutils.report_deprecated_feature(
-                    LOG, _LW("The quota driver neutron.quota.ConfDriver is "
-                             "deprecated as of Liberty. "
-                             "neutron.db.quota.driver.DbQuotaDriver should "
-                             "be used in its place"))
-            self._driver = _driver_class
-            LOG.info(_LI('Loaded quota_driver: %s.'), _driver_class)
-        return self._driver
-
-    def count(self, context, resource_name, *args, **kwargs):
-        """Count a resource.
-
-        For countable resources, invokes the count() function and
-        returns its result.  Arguments following the context and
-        resource are passed directly to the count function declared by
-        the resource.
-
-        :param context: The request context, for access checks.
-        :param resource_name: The name of the resource, as a string.
-        """
-
-        # Get the resource
-        res = resource_registry.get_resource(resource_name)
-        if not res or not hasattr(res, 'count'):
-            raise exceptions.QuotaResourceUnknown(unknown=[resource_name])
-
-        return res.count(context, *args, **kwargs)
-
-    def make_reservation(self, context, tenant_id, deltas, plugin):
-        # Verify that resources are managed by the quota engine
-        # Ensure no value is less than zero
-        unders = [key for key, val in deltas.items() if val < 0]
-        if unders:
-            raise exceptions.InvalidQuotaValue(unders=sorted(unders))
-
-        requested_resources = set(deltas.keys())
-        all_resources = resource_registry.get_all_resources()
-        managed_resources = set([res for res in all_resources.keys()
-                                 if res in requested_resources])
-        # Make sure we accounted for all of them...
-        unknown_resources = requested_resources - managed_resources
-
-        if unknown_resources:
-            raise exceptions.QuotaResourceUnknown(
-                unknown=sorted(unknown_resources))
-        # FIXME(salv-orlando): There should be no reason for sending all the
-        # resource in the registry to the quota driver, but as other driver
-        # APIs request them, this will be sorted out with a different patch.
-        return self.get_driver().make_reservation(
-            context,
-            tenant_id,
-            all_resources,
-            deltas,
-            plugin)
-
-    def commit_reservation(self, context, reservation_id):
-        self.get_driver().commit_reservation(context, reservation_id)
-
-    def cancel_reservation(self, context, reservation_id):
-        self.get_driver().cancel_reservation(context, reservation_id)
-
-    def limit_check(self, context, tenant_id, **values):
-        """Check simple quota limits.
-
-        For limits--those quotas for which there is no usage
-        synchronization function--this method checks that a set of
-        proposed values are permitted by the limit restriction.  The
-        values to check are given as keyword arguments, where the key
-        identifies the specific quota limit to check, and the value is
-        the proposed value.
-
-        This method will raise a QuotaResourceUnknown exception if a
-        given resource is unknown or if it is not a countable resource.
-
-        If any of the proposed values exceeds the respective quota defined
-        for the tenant, an OverQuota exception will be raised.
-        The exception will include a sorted list with the resources
-        which exceed the quota limit. Otherwise, the method returns nothing.
-
-        :param context: Request context
-        :param tenant_id: Tenant for which the quota limit is being checked
-        :param values: Dict specifying requested deltas for each resource
-        """
-        # TODO(salv-orlando): Deprecate calls to this API
-        # Verify that resources are managed by the quota engine
-        requested_resources = set(values.keys())
-        managed_resources = set([res for res in
-                                 resource_registry.get_all_resources()
-                                 if res in requested_resources])
-
-        # Make sure we accounted for all of them...
-        unknown_resources = requested_resources - managed_resources
-        if unknown_resources:
-            raise exceptions.QuotaResourceUnknown(
-                unknown=sorted(unknown_resources))
-
-        return self.get_driver().limit_check(
-            context, tenant_id, resource_registry.get_all_resources(), values)
-
-
-QUOTAS = QuotaEngine.get_instance()
-
-
-def register_resources_from_config():
-    # This operation is now deprecated. All the neutron core and extended
-    # resource for which  quota limits are enforced explicitly register
-    # themselves with the quota engine.
-    for resource_item in (set(cfg.CONF.QUOTAS.quota_items) -
-                          set(default_quota_items)):
-        resource_registry.register_resource_by_name(resource_item)
-
-
-register_resources_from_config()
diff --git a/neutron/quota/resource.py b/neutron/quota/resource.py
deleted file mode 100644 (file)
index f2ec4e1..0000000
+++ /dev/null
@@ -1,311 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_db import api as oslo_db_api
-from oslo_db import exception as oslo_db_exception
-from oslo_log import log
-from oslo_utils import excutils
-from sqlalchemy import event
-from sqlalchemy import exc as sql_exc
-
-from neutron._i18n import _LE, _LW
-from neutron.db import api as db_api
-from neutron.db.quota import api as quota_api
-
-LOG = log.getLogger(__name__)
-
-
-def _count_resource(context, plugin, collection_name, tenant_id):
-    count_getter_name = "get_%s_count" % collection_name
-
-    # Some plugins support a count method for particular resources,
-    # using a DB's optimized counting features. We try to use that one
-    # if present. Otherwise just use regular getter to retrieve all objects
-    # and count in python, allowing older plugins to still be supported
-    try:
-        obj_count_getter = getattr(plugin, count_getter_name)
-        meh = obj_count_getter(context, filters={'tenant_id': [tenant_id]})
-        return meh
-    except (NotImplementedError, AttributeError):
-        obj_getter = getattr(plugin, "get_%s" % collection_name)
-        obj_list = obj_getter(context, filters={'tenant_id': [tenant_id]})
-        return len(obj_list) if obj_list else 0
-
-
-class BaseResource(object):
-    """Describe a single resource for quota checking."""
-
-    def __init__(self, name, flag, plural_name=None):
-        """Initializes a resource.
-
-        :param name: The name of the resource, i.e., "instances".
-        :param flag: The name of the flag or configuration option
-        :param plural_name: Plural form of the resource name. If not
-                            specified, it is generated automatically by
-                            appending an 's' to the resource name, unless
-                            it ends with a 'y'. In that case the last
-                            letter is removed, and 'ies' is appended.
-                            Dashes are always converted to underscores.
-        """
-
-        self.name = name
-        # If a plural name is not supplied, default to adding an 's' to
-        # the resource name, unless the resource name ends in 'y', in which
-        # case remove the 'y' and add 'ies'. Even if the code should not fiddle
-        # too much with English grammar, this is a rather common and easy to
-        # implement rule.
-        if plural_name:
-            self.plural_name = plural_name
-        elif self.name[-1] == 'y':
-            self.plural_name = "%sies" % self.name[:-1]
-        else:
-            self.plural_name = "%ss" % self.name
-        # always convert dashes to underscores
-        self.plural_name = self.plural_name.replace('-', '_')
-        self.flag = flag
-
-    @property
-    def default(self):
-        """Return the default value of the quota."""
-        # Any negative value will be interpreted as an infinite quota,
-        # and stored as -1 for compatibility with current behaviour
-        value = getattr(cfg.CONF.QUOTAS,
-                        self.flag,
-                        cfg.CONF.QUOTAS.default_quota)
-        return max(value, -1)
-
-    @property
-    def dirty(self):
-        """Return the current state of the Resource instance.
-
-        :returns: True if the resource count is out of sync with actual date,
-                  False if it is in sync, and None if the resource instance
-                  does not track usage.
-        """
-
-
-class CountableResource(BaseResource):
-    """Describe a resource where the counts are determined by a function."""
-
-    def __init__(self, name, count, flag=None, plural_name=None):
-        """Initializes a CountableResource.
-
-        Countable resources are those resources which directly
-        correspond to objects in the database, i.e., network, subnet,
-        etc.,.  A CountableResource must be constructed with a counting
-        function, which will be called to determine the current counts
-        of the resource.
-
-        The counting function will be passed the context, along with
-        the extra positional and keyword arguments that are passed to
-        Quota.count().  It should return an integer specifying the
-        count.
-
-        :param name: The name of the resource, i.e., "instances".
-        :param count: A callable which returns the count of the
-                      resource. The arguments passed are as described
-                      above.
-        :param flag: The name of the flag or configuration option
-                     which specifies the default value of the quota
-                     for this resource.
-        :param plural_name: Plural form of the resource name. If not
-                            specified, it is generated automatically by
-                            appending an 's' to the resource name, unless
-                            it ends with a 'y'. In that case the last
-                            letter is removed, and 'ies' is appended.
-                            Dashes are always converted to underscores.
-        """
-
-        super(CountableResource, self).__init__(
-            name, flag=flag, plural_name=plural_name)
-        self._count_func = count
-
-    def count(self, context, plugin, tenant_id, **kwargs):
-        return self._count_func(context, plugin, self.plural_name, tenant_id)
-
-
-class TrackedResource(BaseResource):
-    """Resource which keeps track of its usage data."""
-
-    def __init__(self, name, model_class, flag, plural_name=None):
-        """Initializes an instance for a given resource.
-
-        TrackedResource are directly mapped to data model classes.
-        Resource usage is tracked in the database, and the model class to
-        which this resource refers is monitored to ensure always "fresh"
-        usage data are employed when performing quota checks.
-
-        This class operates under the assumption that the model class
-        describing the resource has a tenant identifier attribute.
-
-        :param name: The name of the resource, i.e., "networks".
-        :param model_class: The sqlalchemy model class of the resource for
-                            which this instance is being created
-        :param flag: The name of the flag or configuration option
-                     which specifies the default value of the quota
-                     for this resource.
-        :param plural_name: Plural form of the resource name. If not
-                            specified, it is generated automatically by
-                            appending an 's' to the resource name, unless
-                            it ends with a 'y'. In that case the last
-                            letter is removed, and 'ies' is appended.
-                            Dashes are always converted to underscores.
-
-        """
-        super(TrackedResource, self).__init__(
-            name, flag=flag, plural_name=plural_name)
-        # Register events for addition/removal of records in the model class
-        # As tenant_id is immutable for all Neutron objects there is no need
-        # to register a listener for update events
-        self._model_class = model_class
-        self._dirty_tenants = set()
-        self._out_of_sync_tenants = set()
-
-    @property
-    def dirty(self):
-        return self._dirty_tenants
-
-    def mark_dirty(self, context):
-        if not self._dirty_tenants:
-            return
-        with db_api.autonested_transaction(context.session):
-            # It is not necessary to protect this operation with a lock.
-            # Indeed when this method is called the request has been processed
-            # and therefore all resources created or deleted.
-            # dirty_tenants will contain all the tenants for which the
-            # resource count is changed. The list might contain also tenants
-            # for which resource count was altered in other requests, but this
-            # won't be harmful.
-            dirty_tenants_snap = self._dirty_tenants.copy()
-            for tenant_id in dirty_tenants_snap:
-                quota_api.set_quota_usage_dirty(context, self.name, tenant_id)
-                LOG.debug(("Persisted dirty status for tenant:%(tenant_id)s "
-                           "on resource:%(resource)s"),
-                          {'tenant_id': tenant_id, 'resource': self.name})
-        self._out_of_sync_tenants |= dirty_tenants_snap
-        self._dirty_tenants -= dirty_tenants_snap
-
-    def _db_event_handler(self, mapper, _conn, target):
-        try:
-            tenant_id = target['tenant_id']
-        except AttributeError:
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE("Model class %s does not have a tenant_id "
-                              "attribute"), target)
-        self._dirty_tenants.add(tenant_id)
-
-    # Retry the operation if a duplicate entry exception is raised. This
-    # can happen is two or more workers are trying to create a resource of a
-    # give kind for the same tenant concurrently. Retrying the operation will
-    # ensure that an UPDATE statement is emitted rather than an INSERT one
-    @oslo_db_api.wrap_db_retry(
-        max_retries=db_api.MAX_RETRIES,
-        exception_checker=lambda exc:
-        isinstance(exc, (oslo_db_exception.DBDuplicateEntry,
-                         oslo_db_exception.DBDeadlock)))
-    def _set_quota_usage(self, context, tenant_id, in_use):
-        return quota_api.set_quota_usage(
-            context, self.name, tenant_id, in_use=in_use)
-
-    def _resync(self, context, tenant_id, in_use):
-        # Update quota usage
-        usage_info = self._set_quota_usage(context, tenant_id, in_use)
-
-        self._dirty_tenants.discard(tenant_id)
-        self._out_of_sync_tenants.discard(tenant_id)
-        LOG.debug(("Unset dirty status for tenant:%(tenant_id)s on "
-                   "resource:%(resource)s"),
-                  {'tenant_id': tenant_id, 'resource': self.name})
-        return usage_info
-
-    def resync(self, context, tenant_id):
-        if tenant_id not in self._out_of_sync_tenants:
-            return
-        LOG.debug(("Synchronizing usage tracker for tenant:%(tenant_id)s on "
-                   "resource:%(resource)s"),
-                  {'tenant_id': tenant_id, 'resource': self.name})
-        in_use = context.session.query(self._model_class).filter_by(
-            tenant_id=tenant_id).count()
-        # Update quota usage
-        return self._resync(context, tenant_id, in_use)
-
-    def count(self, context, _plugin, tenant_id, resync_usage=True):
-        """Return the current usage count for the resource.
-
-        This method will fetch aggregate information for resource usage
-        data, unless usage data are marked as "dirty".
-        In the latter case resource usage will be calculated counting
-        rows for tenant_id in the resource's database model.
-        Active reserved amount are instead always calculated by summing
-        amounts for matching records in the 'reservations' database model.
-
-        The _plugin and _resource parameters are unused but kept for
-        compatibility with the signature of the count method for
-        CountableResource instances.
-        """
-        # Load current usage data, setting a row-level lock on the DB
-        usage_info = quota_api.get_quota_usage_by_resource_and_tenant(
-            context, self.name, tenant_id, lock_for_update=True)
-        # Always fetch reservations, as they are not tracked by usage counters
-        reservations = quota_api.get_reservations_for_resources(
-            context, tenant_id, [self.name])
-        reserved = reservations.get(self.name, 0)
-
-        # If dirty or missing, calculate actual resource usage querying
-        # the database and set/create usage info data
-        # NOTE: this routine "trusts" usage counters at service startup. This
-        # assumption is generally valid, but if the database is tampered with,
-        # or if data migrations do not take care of usage counters, the
-        # assumption will not hold anymore
-        if (tenant_id in self._dirty_tenants or
-            not usage_info or usage_info.dirty):
-            LOG.debug(("Usage tracker for resource:%(resource)s and tenant:"
-                       "%(tenant_id)s is out of sync, need to count used "
-                       "quota"), {'resource': self.name,
-                                  'tenant_id': tenant_id})
-            in_use = context.session.query(self._model_class).filter_by(
-                tenant_id=tenant_id).count()
-
-            # Update quota usage, if requested (by default do not do that, as
-            # typically one counts before adding a record, and that would mark
-            # the usage counter as dirty again)
-            if resync_usage:
-                usage_info = self._resync(context, tenant_id, in_use)
-            else:
-                resource = usage_info.resource if usage_info else self.name
-                tenant_id = usage_info.tenant_id if usage_info else tenant_id
-                dirty = usage_info.dirty if usage_info else True
-                usage_info = quota_api.QuotaUsageInfo(
-                    resource, tenant_id, in_use, dirty)
-
-            LOG.debug(("Quota usage for %(resource)s was recalculated. "
-                       "Used quota:%(used)d."),
-                      {'resource': self.name,
-                       'used': usage_info.used})
-        return usage_info.used + reserved
-
-    def register_events(self):
-        event.listen(self._model_class, 'after_insert', self._db_event_handler)
-        event.listen(self._model_class, 'after_delete', self._db_event_handler)
-
-    def unregister_events(self):
-        try:
-            event.remove(self._model_class, 'after_insert',
-                         self._db_event_handler)
-            event.remove(self._model_class, 'after_delete',
-                         self._db_event_handler)
-        except sql_exc.InvalidRequestError:
-            LOG.warning(_LW("No sqlalchemy event for resource %s found"),
-                        self.name)
diff --git a/neutron/quota/resource_registry.py b/neutron/quota/resource_registry.py
deleted file mode 100644 (file)
index 26b9fbc..0000000
+++ /dev/null
@@ -1,248 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log
-import six
-
-from neutron._i18n import _LI, _LW
-from neutron.quota import resource
-
-LOG = log.getLogger(__name__)
-
-
-# Wrappers for easing access to the ResourceRegistry singleton
-
-
-def register_resource(resource):
-    ResourceRegistry.get_instance().register_resource(resource)
-
-
-def register_resource_by_name(resource_name, plural_name=None):
-    ResourceRegistry.get_instance().register_resource_by_name(
-        resource_name, plural_name)
-
-
-def get_all_resources():
-    return ResourceRegistry.get_instance().resources
-
-
-def get_resource(resource_name):
-    return ResourceRegistry.get_instance().get_resource(resource_name)
-
-
-def is_tracked(resource_name):
-    return ResourceRegistry.get_instance().is_tracked(resource_name)
-
-
-# auxiliary functions and decorators
-
-
-def set_resources_dirty(context):
-    """Sets the dirty bit for resources with usage changes.
-
-    This routine scans all registered resources, and, for those whose
-    dirty status is True, sets the dirty bit to True in the database
-    for the appropriate tenants.
-
-    Please note that this routine begins a nested transaction, and it
-    is not recommended that this transaction begins within another
-    transaction. For this reason the function will raise a SqlAlchemy
-    exception if such an attempt is made.
-
-    :param context: a Neutron request context with a DB session
-    """
-    if not cfg.CONF.QUOTAS.track_quota_usage:
-        return
-
-    for res in get_all_resources().values():
-        with context.session.begin(subtransactions=True):
-            if is_tracked(res.name) and res.dirty:
-                res.mark_dirty(context)
-
-
-def resync_resource(context, resource_name, tenant_id):
-    if not cfg.CONF.QUOTAS.track_quota_usage:
-        return
-
-    if is_tracked(resource_name):
-        res = get_resource(resource_name)
-        # If the resource is tracked count supports the resync_usage parameter
-        res.resync(context, tenant_id)
-
-
-def mark_resources_dirty(f):
-    """Decorator for functions which alter resource usage.
-
-    This decorator ensures set_resource_dirty is invoked after completion
-    of the decorated function.
-    """
-
-    @six.wraps(f)
-    def wrapper(_self, context, *args, **kwargs):
-        ret_val = f(_self, context, *args, **kwargs)
-        set_resources_dirty(context)
-        return ret_val
-
-    return wrapper
-
-
-class tracked_resources(object):
-    """Decorator for specifying resources for which usage should be tracked.
-
-    A plugin class can use this decorator to specify for which resources
-    usage info should be tracked into an appropriate table rather than being
-    explicitly counted.
-    """
-
-    def __init__(self, override=False, **kwargs):
-        self._tracked_resources = kwargs
-        self._override = override
-
-    def __call__(self, f):
-
-        @six.wraps(f)
-        def wrapper(*args, **kwargs):
-            registry = ResourceRegistry.get_instance()
-            for resource_name in self._tracked_resources:
-                registry.set_tracked_resource(
-                    resource_name,
-                    self._tracked_resources[resource_name],
-                    self._override)
-            return f(*args, **kwargs)
-
-        return wrapper
-
-
-class ResourceRegistry(object):
-    """Registry for resource subject to quota limits.
-
-    This class keeps track of Neutron resources for which quota limits are
-    enforced, regardless of whether their usage is being tracked or counted.
-
-    For tracked-usage resources, that is to say those resources for which
-    there are usage counters which are kept in sync with the actual number
-    of rows in the database, this class allows the plugin to register their
-    names either explicitly or through the @tracked_resources decorator,
-    which should preferably be applied to the __init__ method of the class.
-    """
-
-    _instance = None
-
-    @classmethod
-    def get_instance(cls):
-        if cls._instance is None:
-            cls._instance = cls()
-        return cls._instance
-
-    def __init__(self):
-        self._resources = {}
-        # Map usage tracked resources to the correspondent db model class
-        self._tracked_resource_mappings = {}
-
-    def __contains__(self, resource):
-        return resource in self._resources
-
-    def _create_resource_instance(self, resource_name, plural_name):
-        """Factory function for quota Resource.
-
-        This routine returns a resource instance of the appropriate type
-        according to system configuration.
-
-        If QUOTAS.track_quota_usage is True, and there is a model mapping for
-        the current resource, this function will return an instance of
-        AccountedResource; otherwise an instance of CountableResource.
-        """
-
-        if (not cfg.CONF.QUOTAS.track_quota_usage or
-            resource_name not in self._tracked_resource_mappings):
-            LOG.info(_LI("Creating instance of CountableResource for "
-                         "resource:%s"), resource_name)
-            return resource.CountableResource(
-                resource_name, resource._count_resource,
-                'quota_%s' % resource_name)
-        else:
-            LOG.info(_LI("Creating instance of TrackedResource for "
-                         "resource:%s"), resource_name)
-            return resource.TrackedResource(
-                resource_name,
-                self._tracked_resource_mappings[resource_name],
-                'quota_%s' % resource_name)
-
-    def set_tracked_resource(self, resource_name, model_class, override=False):
-        # Do not do anything if tracking is disabled by config
-        if not cfg.CONF.QUOTAS.track_quota_usage:
-            return
-
-        current_model_class = self._tracked_resource_mappings.setdefault(
-            resource_name, model_class)
-
-        # Check whether setdefault also set the entry in the dict
-        if current_model_class != model_class:
-            LOG.debug("A model class is already defined for %(resource)s: "
-                      "%(current_model_class)s. Override:%(override)s",
-                      {'resource': resource_name,
-                       'current_model_class': current_model_class,
-                       'override': override})
-            if override:
-                self._tracked_resource_mappings[resource_name] = model_class
-        LOG.debug("Tracking information for resource: %s configured",
-                  resource_name)
-
-    def is_tracked(self, resource_name):
-        """Find out if a resource if tracked or not.
-
-        :param resource_name: name of the resource.
-        :returns True if resource_name is registered and tracked, otherwise
-                 False. Please note that here when False it returned it
-                 simply means that resource_name is not a TrackedResource
-                 instance, it does not necessarily mean that the resource
-                 is not registered.
-        """
-        return resource_name in self._tracked_resource_mappings
-
-    def register_resource(self, resource):
-        if resource.name in self._resources:
-            LOG.warn(_LW('%s is already registered'), resource.name)
-        if resource.name in self._tracked_resource_mappings:
-            resource.register_events()
-        self._resources[resource.name] = resource
-
-    def register_resources(self, resources):
-        for res in resources:
-            self.register_resource(res)
-
-    def register_resource_by_name(self, resource_name,
-                                  plural_name=None):
-        """Register a resource by name."""
-        resource = self._create_resource_instance(
-            resource_name, plural_name)
-        self.register_resource(resource)
-
-    def unregister_resources(self):
-        """Unregister all resources."""
-        for (res_name, res) in self._resources.items():
-            if res_name in self._tracked_resource_mappings:
-                res.unregister_events()
-        self._resources.clear()
-        self._tracked_resource_mappings.clear()
-
-    def get_resource(self, resource_name):
-        """Return a resource given its name.
-
-        :returns: The resource instance or None if the resource is not found
-        """
-        return self._resources.get(resource_name)
-
-    @property
-    def resources(self):
-        return self._resources
diff --git a/neutron/scheduler/__init__.py b/neutron/scheduler/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/scheduler/base_resource_filter.py b/neutron/scheduler/base_resource_filter.py
deleted file mode 100644 (file)
index a2b1729..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-
-class BaseResourceFilter(object):
-    """Encapsulate logic that is specific to the resource type."""
-    @abc.abstractmethod
-    def filter_agents(self, plugin, context, resource):
-        """Return the agents that can host the resource."""
-
-    def bind(self, context, agents, resource_id):
-        """Bind the resource to the agents."""
-        with context.session.begin(subtransactions=True):
-            res = {}
-            for agent in agents:
-                # Load is being incremented here to reflect latest agent load
-                # even within the agent report interval. This will be very
-                # much necessary when bulk resource creation happens within a
-                # agent report interval time.
-                # NOTE: The resource being bound might or might not be of the
-                # same type which is accounted for the load. It isn't a
-                # problem because "+ 1" here does not meant to predict
-                # precisely what the load of the agent will be. The value will
-                # be corrected by the agent on the next report interval.
-                res['load'] = agent.load + 1
-                agent.update(res)
diff --git a/neutron/scheduler/base_scheduler.py b/neutron/scheduler/base_scheduler.py
deleted file mode 100644 (file)
index 561d12e..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-from operator import attrgetter
-import random
-
-
-class BaseScheduler(object):
-    """The base scheduler (agnostic to resource type).
-       Child classes of BaseScheduler must define the
-       self.resource_filter to filter agents of
-       particular type.
-    """
-    resource_filter = None
-
-    @abc.abstractmethod
-    def select(self, plugin, context, resource_hostable_agents,
-               resource_hosted_agents, num_agents_needed):
-        """Return a subset of agents based on the specific scheduling logic."""
-
-    def schedule(self, plugin, context, resource):
-        """Select and bind agents to a given resource."""
-        if not self.resource_filter:
-            return
-        # filter the agents that can host the resource
-        filtered_agents_dict = self.resource_filter.filter_agents(
-            plugin, context, resource)
-        num_agents = filtered_agents_dict['n_agents']
-        hostable_agents = filtered_agents_dict['hostable_agents']
-        hosted_agents = filtered_agents_dict['hosted_agents']
-        chosen_agents = self.select(plugin, context, hostable_agents,
-                                    hosted_agents, num_agents)
-        # bind the resource to the agents
-        self.resource_filter.bind(context, chosen_agents, resource['id'])
-        return chosen_agents
-
-
-class BaseChanceScheduler(BaseScheduler):
-    """Choose agents randomly."""
-
-    def __init__(self, resource_filter):
-        self.resource_filter = resource_filter
-
-    def select(self, plugin, context, resource_hostable_agents,
-               resource_hosted_agents, num_agents_needed):
-        chosen_agents = random.sample(resource_hostable_agents,
-                                      num_agents_needed)
-        return chosen_agents
-
-
-class BaseWeightScheduler(BaseScheduler):
-    """Choose agents based on load."""
-
-    def __init__(self, resource_filter):
-        self.resource_filter = resource_filter
-
-    def select(self, plugin, context, resource_hostable_agents,
-               resource_hosted_agents, num_agents_needed):
-        chosen_agents = sorted(resource_hostable_agents,
-                           key=attrgetter('load'))[0:num_agents_needed]
-        return chosen_agents
diff --git a/neutron/scheduler/dhcp_agent_scheduler.py b/neutron/scheduler/dhcp_agent_scheduler.py
deleted file mode 100644 (file)
index 5d2ce95..0000000
+++ /dev/null
@@ -1,243 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-import collections
-import heapq
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-from sqlalchemy import sql
-
-from neutron._i18n import _LI, _LW
-from neutron.common import constants
-from neutron.db import agents_db
-from neutron.db import agentschedulers_db
-from neutron.extensions import availability_zone as az_ext
-from neutron.scheduler import base_resource_filter
-from neutron.scheduler import base_scheduler
-
-LOG = logging.getLogger(__name__)
-
-
-class AutoScheduler(object):
-
-    def auto_schedule_networks(self, plugin, context, host):
-        """Schedule non-hosted networks to the DHCP agent on the specified
-           host.
-        """
-        agents_per_network = cfg.CONF.dhcp_agents_per_network
-        # a list of (agent, net_ids) tuples
-        bindings_to_add = []
-        with context.session.begin(subtransactions=True):
-            fields = ['network_id', 'enable_dhcp']
-            subnets = plugin.get_subnets(context, fields=fields)
-            net_ids = set(s['network_id'] for s in subnets
-                          if s['enable_dhcp'])
-            if not net_ids:
-                LOG.debug('No non-hosted networks')
-                return False
-            query = context.session.query(agents_db.Agent)
-            query = query.filter(agents_db.Agent.agent_type ==
-                                 constants.AGENT_TYPE_DHCP,
-                                 agents_db.Agent.host == host,
-                                 agents_db.Agent.admin_state_up == sql.true())
-            dhcp_agents = query.all()
-            for dhcp_agent in dhcp_agents:
-                if agents_db.AgentDbMixin.is_agent_down(
-                    dhcp_agent.heartbeat_timestamp):
-                    LOG.warn(_LW('DHCP agent %s is not active'), dhcp_agent.id)
-                    continue
-                for net_id in net_ids:
-                    agents = plugin.get_dhcp_agents_hosting_networks(
-                        context, [net_id])
-                    if len(agents) >= agents_per_network:
-                        continue
-                    if any(dhcp_agent.id == agent.id for agent in agents):
-                        continue
-                    net = plugin.get_network(context, net_id)
-                    az_hints = (net.get(az_ext.AZ_HINTS) or
-                                cfg.CONF.default_availability_zones)
-                    if (az_hints and
-                        dhcp_agent['availability_zone'] not in az_hints):
-                        continue
-                    bindings_to_add.append((dhcp_agent, net_id))
-        # do it outside transaction so particular scheduling results don't
-        # make other to fail
-        for agent, net_id in bindings_to_add:
-            self.resource_filter.bind(context, [agent], net_id)
-        return True
-
-
-class ChanceScheduler(base_scheduler.BaseChanceScheduler, AutoScheduler):
-
-    def __init__(self):
-        super(ChanceScheduler, self).__init__(DhcpFilter())
-
-
-class WeightScheduler(base_scheduler.BaseWeightScheduler, AutoScheduler):
-
-    def __init__(self):
-        super(WeightScheduler, self).__init__(DhcpFilter())
-
-
-class AZAwareWeightScheduler(WeightScheduler):
-
-    def select(self, plugin, context, resource_hostable_agents,
-               resource_hosted_agents, num_agents_needed):
-        """AZ aware scheduling
-           If the network has multiple AZs, agents are scheduled as
-           follows:
-           - select AZ with least agents scheduled for the network
-             (nondeterministic for AZs with same amount of agents scheduled)
-           - choose agent in the AZ with WeightScheduler
-        """
-        hostable_az_agents = collections.defaultdict(list)
-        num_az_agents = {}
-        for agent in resource_hostable_agents:
-            az_agent = agent['availability_zone']
-            hostable_az_agents[az_agent].append(agent)
-            if az_agent not in num_az_agents:
-                num_az_agents[az_agent] = 0
-        if num_agents_needed <= 0:
-            return []
-        for agent in resource_hosted_agents:
-            az_agent = agent['availability_zone']
-            if az_agent in num_az_agents:
-                num_az_agents[az_agent] += 1
-
-        num_az_q = [(value, key) for key, value in num_az_agents.items()]
-        heapq.heapify(num_az_q)
-        chosen_agents = []
-        while num_agents_needed > 0:
-            num, select_az = heapq.heappop(num_az_q)
-            select_agent = super(AZAwareWeightScheduler, self).select(
-                plugin, context, hostable_az_agents[select_az], [], 1)
-            chosen_agents.append(select_agent[0])
-            hostable_az_agents[select_az].remove(select_agent[0])
-            if hostable_az_agents[select_az]:
-                heapq.heappush(num_az_q, (num + 1, select_az))
-            num_agents_needed -= 1
-        return chosen_agents
-
-
-class DhcpFilter(base_resource_filter.BaseResourceFilter):
-
-    def bind(self, context, agents, network_id):
-        """Bind the network to the agents."""
-        # customize the bind logic
-        bound_agents = agents[:]
-        for agent in agents:
-            context.session.begin(subtransactions=True)
-            # saving agent_id to use it after rollback to avoid
-            # DetachedInstanceError
-            agent_id = agent.id
-            binding = agentschedulers_db.NetworkDhcpAgentBinding()
-            binding.dhcp_agent_id = agent_id
-            binding.network_id = network_id
-            try:
-                context.session.add(binding)
-                # try to actually write the changes and catch integrity
-                # DBDuplicateEntry
-                context.session.commit()
-            except db_exc.DBDuplicateEntry:
-                # it's totally ok, someone just did our job!
-                context.session.rollback()
-                bound_agents.remove(agent)
-                LOG.info(_LI('Agent %s already present'), agent_id)
-            LOG.debug('Network %(network_id)s is scheduled to be '
-                      'hosted by DHCP agent %(agent_id)s',
-                      {'network_id': network_id,
-                       'agent_id': agent_id})
-        super(DhcpFilter, self).bind(context, bound_agents, network_id)
-
-    def filter_agents(self, plugin, context, network):
-        """Return the agents that can host the network.
-
-        This function returns a dictionary which has 3 keys.
-        n_agents: The number of agents should be scheduled. If n_agents=0,
-        all networks are already scheduled or no more agent can host the
-        network.
-        hostable_agents: A list of agents which can host the network.
-        hosted_agents: A list of agents which already hosts the network.
-        """
-        agents_dict = self._get_network_hostable_dhcp_agents(
-                                    plugin, context, network)
-        if not agents_dict['hostable_agents'] or agents_dict['n_agents'] <= 0:
-            return {'n_agents': 0, 'hostable_agents': [],
-                    'hosted_agents': agents_dict['hosted_agents']}
-        return agents_dict
-
-    def _get_dhcp_agents_hosting_network(self, plugin, context, network):
-        """Return dhcp agents hosting the given network or None if a given
-           network is already hosted by enough number of agents.
-        """
-        agents_per_network = cfg.CONF.dhcp_agents_per_network
-        #TODO(gongysh) don't schedule the networks with only
-        # subnets whose enable_dhcp is false
-        with context.session.begin(subtransactions=True):
-            network_hosted_agents = plugin.get_dhcp_agents_hosting_networks(
-                context, [network['id']])
-            if len(network_hosted_agents) >= agents_per_network:
-                LOG.debug('Network %s is already hosted by enough agents.',
-                          network['id'])
-                return
-        return network_hosted_agents
-
-    def _get_active_agents(self, plugin, context, az_hints):
-        """Return a list of active dhcp agents."""
-        with context.session.begin(subtransactions=True):
-            filters = {'agent_type': [constants.AGENT_TYPE_DHCP],
-                       'admin_state_up': [True]}
-            if az_hints:
-                filters['availability_zone'] = az_hints
-            active_dhcp_agents = plugin.get_agents_db(
-                context, filters=filters)
-            if not active_dhcp_agents:
-                LOG.warn(_LW('No more DHCP agents'))
-                return []
-        return active_dhcp_agents
-
-    def _get_network_hostable_dhcp_agents(self, plugin, context, network):
-        """Provide information on hostable DHCP agents for network.
-
-        The returned value includes the number of agents that will actually
-        host the given network, a list of DHCP agents that can host the given
-        network, and a list of DHCP agents currently hosting the network.
-        """
-        hosted_agents = self._get_dhcp_agents_hosting_network(plugin,
-                                                              context, network)
-        if hosted_agents is None:
-            return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': []}
-        n_agents = cfg.CONF.dhcp_agents_per_network - len(hosted_agents)
-        az_hints = (network.get(az_ext.AZ_HINTS) or
-                    cfg.CONF.default_availability_zones)
-        active_dhcp_agents = self._get_active_agents(plugin, context, az_hints)
-        if not active_dhcp_agents:
-            return {'n_agents': 0, 'hostable_agents': [],
-                    'hosted_agents': hosted_agents}
-        hostable_dhcp_agents = [
-            agent for agent in set(active_dhcp_agents)
-            if agent not in hosted_agents and plugin.is_eligible_agent(
-                context, True, agent)
-        ]
-
-        if not hostable_dhcp_agents:
-            return {'n_agents': 0, 'hostable_agents': [],
-                    'hosted_agents': hosted_agents}
-        n_agents = min(len(hostable_dhcp_agents), n_agents)
-        return {'n_agents': n_agents, 'hostable_agents': hostable_dhcp_agents,
-                'hosted_agents': hosted_agents}
diff --git a/neutron/scheduler/l3_agent_scheduler.py b/neutron/scheduler/l3_agent_scheduler.py
deleted file mode 100644 (file)
index 488d974..0000000
+++ /dev/null
@@ -1,469 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import collections
-import itertools
-import random
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-import six
-from sqlalchemy import sql
-
-from neutron._i18n import _LE, _LW
-from neutron.common import constants
-from neutron.common import utils
-from neutron.db import l3_agentschedulers_db
-from neutron.db import l3_db
-from neutron.db import l3_hamode_db
-from neutron.extensions import availability_zone as az_ext
-
-
-LOG = logging.getLogger(__name__)
-cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class L3Scheduler(object):
-
-    def __init__(self):
-        self.min_ha_agents = cfg.CONF.min_l3_agents_per_router
-        self.max_ha_agents = cfg.CONF.max_l3_agents_per_router
-
-    @abc.abstractmethod
-    def schedule(self, plugin, context, router_id,
-                 candidates=None, hints=None):
-        """Schedule the router to an active L3 agent.
-
-        Schedule the router only if it is not already scheduled.
-        """
-        pass
-
-    def _router_has_binding(self, context, router_id, l3_agent_id):
-        router_binding_model = l3_agentschedulers_db.RouterL3AgentBinding
-
-        query = context.session.query(router_binding_model)
-        query = query.filter(router_binding_model.router_id == router_id,
-                             router_binding_model.l3_agent_id == l3_agent_id)
-
-        return query.count() > 0
-
-    def _filter_unscheduled_routers(self, context, plugin, routers):
-        """Filter from list of routers the ones that are not scheduled."""
-        unscheduled_routers = []
-        for router in routers:
-            l3_agents = plugin.get_l3_agents_hosting_routers(
-                context, [router['id']])
-            if l3_agents:
-                LOG.debug('Router %(router_id)s has already been '
-                          'hosted by L3 agent %(agent_id)s',
-                          {'router_id': router['id'],
-                           'agent_id': l3_agents[0]['id']})
-            else:
-                unscheduled_routers.append(router)
-        return unscheduled_routers
-
-    def _get_unscheduled_routers(self, context, plugin):
-        """Get routers with no agent binding."""
-        # TODO(gongysh) consider the disabled agent's router
-        no_agent_binding = ~sql.exists().where(
-            l3_db.Router.id ==
-            l3_agentschedulers_db.RouterL3AgentBinding.router_id)
-        query = context.session.query(l3_db.Router.id).filter(no_agent_binding)
-        unscheduled_router_ids = [router_id_[0] for router_id_ in query]
-        if unscheduled_router_ids:
-            return plugin.get_routers(
-                context, filters={'id': unscheduled_router_ids})
-        return []
-
-    def _get_routers_to_schedule(self, context, plugin,
-                                 router_ids=None, exclude_distributed=False):
-        """Verify that the routers specified need to be scheduled.
-
-        :param context: the context
-        :param plugin: the core plugin
-        :param router_ids: the list of routers to be checked for scheduling
-        :param exclude_distributed: whether or not to consider dvr routers
-        :returns: the list of routers to be scheduled
-        """
-        if router_ids is not None:
-            routers = plugin.get_routers(context, filters={'id': router_ids})
-            unscheduled_routers = self._filter_unscheduled_routers(
-                context, plugin, routers)
-        else:
-            unscheduled_routers = self._get_unscheduled_routers(context,
-                                                                plugin)
-
-        if exclude_distributed:
-            unscheduled_routers = [
-                r for r in unscheduled_routers if not r.get('distributed')
-            ]
-        return unscheduled_routers
-
-    def _get_routers_can_schedule(self, context, plugin, routers, l3_agent):
-        """Get the subset of routers that can be scheduled on the L3 agent."""
-        ids_to_discard = set()
-        for router in routers:
-            # check if the l3 agent is compatible with the router
-            candidates = plugin.get_l3_agent_candidates(
-                context, router, [l3_agent])
-            if not candidates:
-                ids_to_discard.add(router['id'])
-
-        return [r for r in routers if r['id'] not in ids_to_discard]
-
-    def auto_schedule_routers(self, plugin, context, host, router_ids):
-        """Schedule non-hosted routers to L3 Agent running on host.
-
-        If router_ids is given, each router in router_ids is scheduled
-        if it is not scheduled yet. Otherwise all unscheduled routers
-        are scheduled.
-        Do not schedule the routers which are hosted already
-        by active l3 agents.
-
-        :returns: True if routers have been successfully assigned to host
-        """
-        l3_agent = plugin.get_enabled_agent_on_host(
-            context, constants.AGENT_TYPE_L3, host)
-        if not l3_agent:
-            return False
-
-        # NOTE(armando-migliaccio): DVR routers should not be auto
-        # scheduled because auto-scheduling may interfere with the
-        # placement rules for IR and SNAT namespaces.
-        unscheduled_routers = self._get_routers_to_schedule(
-            context, plugin, router_ids, exclude_distributed=True)
-        if not unscheduled_routers:
-            if utils.is_extension_supported(
-                    plugin, constants.L3_HA_MODE_EXT_ALIAS):
-                return self._schedule_ha_routers_to_additional_agent(
-                    plugin, context, l3_agent)
-
-        target_routers = self._get_routers_can_schedule(
-            context, plugin, unscheduled_routers, l3_agent)
-        if not target_routers:
-            LOG.warn(_LW('No routers compatible with L3 agent configuration '
-                         'on host %s'), host)
-            return False
-
-        self._bind_routers(context, plugin, target_routers, l3_agent)
-        return True
-
-    def _get_candidates(self, plugin, context, sync_router):
-        """Return L3 agents where a router could be scheduled."""
-        with context.session.begin(subtransactions=True):
-            # allow one router is hosted by just
-            # one enabled l3 agent hosting since active is just a
-            # timing problem. Non-active l3 agent can return to
-            # active any time
-            current_l3_agents = plugin.get_l3_agents_hosting_routers(
-                context, [sync_router['id']], admin_state_up=True)
-            is_router_distributed = sync_router.get('distributed', False)
-            if current_l3_agents and not is_router_distributed:
-                LOG.debug('Router %(router_id)s has already been hosted '
-                          'by L3 agent %(agent_id)s',
-                          {'router_id': sync_router['id'],
-                           'agent_id': current_l3_agents[0]['id']})
-                return []
-
-            active_l3_agents = plugin.get_l3_agents(context, active=True)
-            if not active_l3_agents:
-                LOG.warn(_LW('No active L3 agents'))
-                return []
-            potential_candidates = list(
-                set(active_l3_agents) - set(current_l3_agents))
-            new_l3agents = []
-            if potential_candidates:
-                new_l3agents = plugin.get_l3_agent_candidates(
-                    context, sync_router, potential_candidates)
-                if not new_l3agents:
-                    LOG.warn(_LW('No L3 agents can host the router %s'),
-                             sync_router['id'])
-            return new_l3agents
-
-    def _bind_routers(self, context, plugin, routers, l3_agent):
-        for router in routers:
-            if router.get('ha'):
-                if not self._router_has_binding(context, router['id'],
-                                                l3_agent.id):
-                    self.create_ha_port_and_bind(
-                        plugin, context, router['id'],
-                        router['tenant_id'], l3_agent)
-            else:
-                self.bind_router(context, router['id'], l3_agent)
-
-    def bind_router(self, context, router_id, chosen_agent):
-        """Bind the router to the l3 agent which has been chosen."""
-        try:
-            with context.session.begin(subtransactions=True):
-                binding = l3_agentschedulers_db.RouterL3AgentBinding()
-                binding.l3_agent = chosen_agent
-                binding.router_id = router_id
-                context.session.add(binding)
-        except db_exc.DBDuplicateEntry:
-            LOG.debug('Router %(router_id)s has already been scheduled '
-                      'to L3 agent %(agent_id)s.',
-                      {'agent_id': chosen_agent.id,
-                       'router_id': router_id})
-            return
-        except db_exc.DBReferenceError:
-            LOG.debug('Router %s has already been removed '
-                      'by concurrent operation', router_id)
-            return
-
-        LOG.debug('Router %(router_id)s is scheduled to L3 agent '
-                  '%(agent_id)s', {'router_id': router_id,
-                                   'agent_id': chosen_agent.id})
-
-    def _schedule_router(self, plugin, context, router_id,
-                         candidates=None):
-        sync_router = plugin.get_router(context, router_id)
-        candidates = candidates or self._get_candidates(
-            plugin, context, sync_router)
-        chosen_agent = None
-        if sync_router.get('distributed', False):
-            for chosen_agent in candidates:
-                self.bind_router(context, router_id, chosen_agent)
-
-            # For Distributed routers check for SNAT Binding before
-            # calling the schedule_snat_router
-            snat_bindings = plugin.get_snat_bindings(context, [router_id])
-            router_gw_exists = sync_router.get('external_gateway_info', False)
-            if not snat_bindings and router_gw_exists:
-                # If GW exists for DVR routers and no SNAT binding
-                # call the schedule_snat_router
-                chosen_agent = plugin.schedule_snat_router(
-                    context, router_id, sync_router)
-            elif not router_gw_exists and snat_bindings:
-                # If DVR router and no Gateway but SNAT Binding exists then
-                # call the unbind_snat_servicenode to unbind the snat service
-                # from agent
-                plugin.unbind_snat_servicenode(context, router_id)
-        elif not candidates:
-            return
-        elif sync_router.get('ha', False):
-            chosen_agents = self._bind_ha_router(plugin, context,
-                                                 router_id, candidates)
-            if not chosen_agents:
-                return
-            chosen_agent = chosen_agents[-1]
-        else:
-            chosen_agent = self._choose_router_agent(
-                plugin, context, candidates)
-            self.bind_router(context, router_id, chosen_agent)
-        return chosen_agent
-
-    @abc.abstractmethod
-    def _choose_router_agent(self, plugin, context, candidates):
-        """Choose an agent from candidates based on a specific policy."""
-        pass
-
-    @abc.abstractmethod
-    def _choose_router_agents_for_ha(self, plugin, context, candidates):
-        """Choose agents from candidates based on a specific policy."""
-        pass
-
-    def _get_num_of_agents_for_ha(self, candidates_count):
-        return (min(self.max_ha_agents, candidates_count) if self.max_ha_agents
-                else candidates_count)
-
-    def _enough_candidates_for_ha(self, candidates):
-        if not candidates or len(candidates) < self.min_ha_agents:
-            LOG.error(_LE("Not enough candidates, a HA router needs at least "
-                          "%s agents"), self.min_ha_agents)
-            return False
-        return True
-
-    def create_ha_port_and_bind(self, plugin, context, router_id,
-                                tenant_id, agent):
-        """Creates and binds a new HA port for this agent."""
-        ha_network = plugin.get_ha_network(context, tenant_id)
-        port_binding = plugin.add_ha_port(context.elevated(), router_id,
-                                          ha_network.network.id, tenant_id)
-        with context.session.begin(subtransactions=True):
-            port_binding.l3_agent_id = agent['id']
-        self.bind_router(context, router_id, agent)
-
-    def get_ha_routers_l3_agents_counts(self, context, plugin, filters=None):
-        """Return a mapping (router, # agents) matching specified filters."""
-        return plugin.get_ha_routers_l3_agents_count(context)
-
-    def _schedule_ha_routers_to_additional_agent(self, plugin, context, agent):
-        """Bind already scheduled routers to the agent.
-
-        Retrieve the number of agents per router and check if the router has
-        to be scheduled on the given agent if max_l3_agents_per_router
-        is not yet reached.
-        """
-
-        routers_agents = self.get_ha_routers_l3_agents_counts(context, plugin,
-                                                              agent)
-        scheduled = False
-        admin_ctx = context.elevated()
-        for router, agents in routers_agents:
-            max_agents_not_reached = (
-                not self.max_ha_agents or agents < self.max_ha_agents)
-            if max_agents_not_reached:
-                if not self._router_has_binding(admin_ctx, router['id'],
-                                                agent.id):
-                    self.create_ha_port_and_bind(plugin, admin_ctx,
-                                                 router['id'],
-                                                 router['tenant_id'],
-                                                 agent)
-                    scheduled = True
-
-        return scheduled
-
-    def _bind_ha_router_to_agents(self, plugin, context, router_id,
-                                 chosen_agents):
-        port_bindings = plugin.get_ha_router_port_bindings(context,
-                                                           [router_id])
-        for port_binding, agent in zip(port_bindings, chosen_agents):
-            with context.session.begin(subtransactions=True):
-                port_binding.l3_agent_id = agent.id
-                self.bind_router(context, router_id, agent)
-
-            LOG.debug('HA Router %(router_id)s is scheduled to L3 agent '
-                      '%(agent_id)s)',
-                      {'router_id': router_id, 'agent_id': agent.id})
-
-    def _bind_ha_router(self, plugin, context, router_id, candidates):
-        """Bind a HA router to agents based on a specific policy."""
-
-        if not self._enough_candidates_for_ha(candidates):
-            return
-
-        chosen_agents = self._choose_router_agents_for_ha(
-            plugin, context, candidates)
-
-        self._bind_ha_router_to_agents(plugin, context, router_id,
-                                       chosen_agents)
-
-        return chosen_agents
-
-
-class ChanceScheduler(L3Scheduler):
-    """Randomly allocate an L3 agent for a router."""
-
-    def schedule(self, plugin, context, router_id,
-                 candidates=None):
-        return self._schedule_router(
-            plugin, context, router_id, candidates=candidates)
-
-    def _choose_router_agent(self, plugin, context, candidates):
-        return random.choice(candidates)
-
-    def _choose_router_agents_for_ha(self, plugin, context, candidates):
-        num_agents = self._get_num_of_agents_for_ha(len(candidates))
-        return random.sample(candidates, num_agents)
-
-
-class LeastRoutersScheduler(L3Scheduler):
-    """Allocate to an L3 agent with the least number of routers bound."""
-
-    def schedule(self, plugin, context, router_id,
-                 candidates=None):
-        return self._schedule_router(
-            plugin, context, router_id, candidates=candidates)
-
-    def _choose_router_agent(self, plugin, context, candidates):
-        candidate_ids = [candidate['id'] for candidate in candidates]
-        chosen_agent = plugin.get_l3_agent_with_min_routers(
-            context, candidate_ids)
-        return chosen_agent
-
-    def _choose_router_agents_for_ha(self, plugin, context, candidates):
-        num_agents = self._get_num_of_agents_for_ha(len(candidates))
-        ordered_agents = plugin.get_l3_agents_ordered_by_num_routers(
-            context, [candidate['id'] for candidate in candidates])
-        return ordered_agents[:num_agents]
-
-
-class AZLeastRoutersScheduler(LeastRoutersScheduler):
-    """Availability zone aware scheduler.
-
-       If a router is ha router, allocate L3 agents distributed AZs
-       according to router's az_hints.
-    """
-    def _get_az_hints(self, router):
-        return (router.get(az_ext.AZ_HINTS) or
-                cfg.CONF.default_availability_zones)
-
-    def _get_routers_can_schedule(self, context, plugin, routers, l3_agent):
-        """Overwrite L3Scheduler's method to filter by availability zone."""
-        target_routers = []
-        for r in routers:
-            az_hints = self._get_az_hints(r)
-            if not az_hints or l3_agent['availability_zone'] in az_hints:
-                target_routers.append(r)
-
-        if not target_routers:
-            return
-
-        return super(AZLeastRoutersScheduler, self)._get_routers_can_schedule(
-            context, plugin, target_routers, l3_agent)
-
-    def _get_candidates(self, plugin, context, sync_router):
-        """Overwrite L3Scheduler's method to filter by availability zone."""
-        all_candidates = (
-            super(AZLeastRoutersScheduler, self)._get_candidates(
-                plugin, context, sync_router))
-
-        candidates = []
-        az_hints = self._get_az_hints(sync_router)
-        for agent in all_candidates:
-            if not az_hints or agent['availability_zone'] in az_hints:
-                candidates.append(agent)
-
-        return candidates
-
-    def get_ha_routers_l3_agents_counts(self, context, plugin, filters=None):
-        """Overwrite L3Scheduler's method to filter by availability zone."""
-        all_routers_agents = (
-            super(AZLeastRoutersScheduler, self).
-            get_ha_routers_l3_agents_counts(context, plugin, filters))
-        if filters is None:
-            return all_routers_agents
-
-        routers_agents = []
-        for router, agents in all_routers_agents:
-            az_hints = self._get_az_hints(router)
-            if az_hints and filters['availability_zone'] not in az_hints:
-                continue
-            routers_agents.append((router, agents))
-
-        return routers_agents
-
-    def _choose_router_agents_for_ha(self, plugin, context, candidates):
-        ordered_agents = plugin.get_l3_agents_ordered_by_num_routers(
-            context, [candidate['id'] for candidate in candidates])
-        num_agents = self._get_num_of_agents_for_ha(len(ordered_agents))
-
-        # Order is kept in each az
-        group_by_az = collections.defaultdict(list)
-        for agent in ordered_agents:
-            az = agent['availability_zone']
-            group_by_az[az].append(agent)
-
-        selected_agents = []
-        for az, agents in itertools.cycle(group_by_az.items()):
-            if not agents:
-                continue
-            selected_agents.append(agents.pop(0))
-            if len(selected_agents) >= num_agents:
-                break
-        return selected_agents
diff --git a/neutron/server/__init__.py b/neutron/server/__init__.py
deleted file mode 100644 (file)
index 32ebdf2..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2011 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# If ../neutron/__init__.py exists, add ../ to Python search path, so that
-# it will override what happens to be installed in /usr/(local/)lib/python...
-
-import sys
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.common import config
-
-
-def boot_server(server_func):
-    # the configuration will be read into the cfg.CONF global data structure
-    config.init(sys.argv[1:])
-    config.setup_logging()
-    if not cfg.CONF.config_file:
-        sys.exit(_("ERROR: Unable to find configuration file via the default"
-                   " search paths (~/.neutron/, ~/, /etc/neutron/, /etc/) and"
-                   " the '--config-file' option!"))
-    try:
-        server_func()
-    except KeyboardInterrupt:
-        pass
-    except RuntimeError as e:
-        sys.exit(_("ERROR: %s") % e)
diff --git a/neutron/server/rpc_eventlet.py b/neutron/server/rpc_eventlet.py
deleted file mode 100644 (file)
index 8fd22cb..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2011 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# If ../neutron/__init__.py exists, add ../ to Python search path, so that
-# it will override what happens to be installed in /usr/(local/)lib/python...
-
-import eventlet
-from oslo_log import log
-
-from neutron._i18n import _LI
-from neutron import server
-from neutron import service
-
-LOG = log.getLogger(__name__)
-
-
-def _eventlet_rpc_server():
-    pool = eventlet.GreenPool()
-    LOG.info(_LI("Eventlet based AMQP RPC server starting..."))
-    try:
-        neutron_rpc = service.serve_rpc()
-    except NotImplementedError:
-        LOG.info(_LI("RPC was already started in parent process by "
-                     "plugin."))
-    else:
-        pool.spawn(neutron_rpc.wait)
-    pool.waitall()
-
-
-def main():
-    server.boot_server(_eventlet_rpc_server)
diff --git a/neutron/server/wsgi_eventlet.py b/neutron/server/wsgi_eventlet.py
deleted file mode 100644 (file)
index 7251355..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import eventlet
-
-from oslo_log import log
-
-from neutron._i18n import _LI
-from neutron import server
-from neutron import service
-
-LOG = log.getLogger(__name__)
-
-
-def _eventlet_wsgi_server():
-    pool = eventlet.GreenPool()
-
-    neutron_api = service.serve_wsgi(service.NeutronApiService)
-    api_thread = pool.spawn(neutron_api.wait)
-
-    try:
-        neutron_rpc = service.serve_rpc()
-    except NotImplementedError:
-        LOG.info(_LI("RPC was already started in parent process by "
-                     "plugin."))
-    else:
-        rpc_thread = pool.spawn(neutron_rpc.wait)
-
-        plugin_workers = service.start_plugin_workers()
-        for worker in plugin_workers:
-            pool.spawn(worker.wait)
-
-        # api and rpc should die together.  When one dies, kill the other.
-        rpc_thread.link(lambda gt: api_thread.kill())
-        api_thread.link(lambda gt: rpc_thread.kill())
-
-    pool.waitall()
-
-
-def main():
-    server.boot_server(_eventlet_wsgi_server)
diff --git a/neutron/server/wsgi_pecan.py b/neutron/server/wsgi_pecan.py
deleted file mode 100644 (file)
index e719465..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import logging as std_logging
-from wsgiref import simple_server
-
-from oslo_config import cfg
-from oslo_log import log
-from six.moves import socketserver
-
-from neutron._i18n import _LI, _LW
-from neutron.common import rpc as n_rpc
-from neutron.pecan_wsgi import app as pecan_app
-from neutron import server
-
-LOG = log.getLogger(__name__)
-
-
-class ThreadedSimpleServer(socketserver.ThreadingMixIn,
-                           simple_server.WSGIServer):
-    pass
-
-
-def _pecan_wsgi_server():
-    LOG.info(_LI("Pecan WSGI server starting..."))
-    # No AMQP connection should be created within this process
-    n_rpc.RPC_DISABLED = True
-    application = pecan_app.setup_app()
-
-    host = cfg.CONF.bind_host
-    port = cfg.CONF.bind_port
-
-    wsgi = simple_server.make_server(
-        host,
-        port,
-        application,
-        server_class=ThreadedSimpleServer
-    )
-    # Log option values
-    cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
-    LOG.warning(
-        _LW("Development Server Serving on http://%(host)s:%(port)s"),
-        {'host': host, 'port': port}
-    )
-
-    wsgi.serve_forever()
-
-
-def main():
-    server.boot_server(_pecan_wsgi_server)
diff --git a/neutron/service.py b/neutron/service.py
deleted file mode 100644 (file)
index 4f0a0fd..0000000
+++ /dev/null
@@ -1,360 +0,0 @@
-# Copyright 2011 VMware, Inc
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import inspect
-import os
-import random
-
-from oslo_concurrency import processutils
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_messaging import server as rpc_server
-from oslo_service import loopingcall
-from oslo_service import service as common_service
-from oslo_utils import excutils
-from oslo_utils import importutils
-
-from neutron._i18n import _, _LE, _LI
-from neutron.common import config
-from neutron.common import rpc as n_rpc
-from neutron import context
-from neutron.db import api as session
-from neutron import manager
-from neutron import worker
-from neutron import wsgi
-
-
-service_opts = [
-    cfg.IntOpt('periodic_interval',
-               default=40,
-               help=_('Seconds between running periodic tasks')),
-    cfg.IntOpt('api_workers',
-               help=_('Number of separate API worker processes for service. '
-                      'If not specified, the default is equal to the number '
-                      'of CPUs available for best performance.')),
-    cfg.IntOpt('rpc_workers',
-               default=1,
-               help=_('Number of RPC worker processes for service')),
-    cfg.IntOpt('rpc_state_report_workers',
-               default=1,
-               help=_('Number of RPC worker processes dedicated to state '
-                      'reports queue')),
-    cfg.IntOpt('periodic_fuzzy_delay',
-               default=5,
-               help=_('Range of seconds to randomly delay when starting the '
-                      'periodic task scheduler to reduce stampeding. '
-                      '(Disable by setting to 0)')),
-]
-CONF = cfg.CONF
-CONF.register_opts(service_opts)
-
-LOG = logging.getLogger(__name__)
-
-
-class WsgiService(object):
-    """Base class for WSGI based services.
-
-    For each api you define, you must also define these flags:
-    :<api>_listen: The address on which to listen
-    :<api>_listen_port: The port on which to listen
-
-    """
-
-    def __init__(self, app_name):
-        self.app_name = app_name
-        self.wsgi_app = None
-
-    def start(self):
-        self.wsgi_app = _run_wsgi(self.app_name)
-
-    def wait(self):
-        self.wsgi_app.wait()
-
-
-class NeutronApiService(WsgiService):
-    """Class for neutron-api service."""
-
-    @classmethod
-    def create(cls, app_name='neutron'):
-
-        # Setup logging early, supplying both the CLI options and the
-        # configuration mapping from the config file
-        # We only update the conf dict for the verbose and debug
-        # flags. Everything else must be set up in the conf file...
-        # Log the options used when starting if we're in debug mode...
-
-        config.setup_logging()
-        service = cls(app_name)
-        return service
-
-
-def serve_wsgi(cls):
-
-    try:
-        service = cls.create()
-        service.start()
-    except Exception:
-        with excutils.save_and_reraise_exception():
-            LOG.exception(_LE('Unrecoverable error: please check log '
-                              'for details.'))
-
-    return service
-
-
-def start_plugin_workers():
-    launchers = []
-    # NOTE(twilson) get_service_plugins also returns the core plugin
-    for plugin in manager.NeutronManager.get_unique_service_plugins():
-        # TODO(twilson) Instead of defaulting here, come up with a good way to
-        # share a common get_workers default between NeutronPluginBaseV2 and
-        # ServicePluginBase
-        for plugin_worker in getattr(plugin, 'get_workers', tuple)():
-            launcher = common_service.ProcessLauncher(cfg.CONF)
-            launcher.launch_service(plugin_worker)
-            launchers.append(launcher)
-    return launchers
-
-
-class RpcWorker(worker.NeutronWorker):
-    """Wraps a worker to be handled by ProcessLauncher"""
-    start_listeners_method = 'start_rpc_listeners'
-
-    def __init__(self, plugins):
-        self._plugins = plugins
-        self._servers = []
-
-    def start(self):
-        super(RpcWorker, self).start()
-        for plugin in self._plugins:
-            if hasattr(plugin, self.start_listeners_method):
-                servers = getattr(plugin, self.start_listeners_method)()
-                self._servers.extend(servers)
-
-    def wait(self):
-        try:
-            self._wait()
-        except Exception:
-            LOG.exception(_LE('done with wait'))
-            raise
-
-    def _wait(self):
-        LOG.debug('calling RpcWorker wait()')
-        for server in self._servers:
-            if isinstance(server, rpc_server.MessageHandlingServer):
-                LOG.debug('calling wait on %s', server)
-                server.wait()
-            else:
-                LOG.debug('NOT calling wait on %s', server)
-        LOG.debug('returning from RpcWorker wait()')
-
-    def stop(self):
-        LOG.debug('calling RpcWorker stop()')
-        for server in self._servers:
-            if isinstance(server, rpc_server.MessageHandlingServer):
-                LOG.debug('calling stop on %s', server)
-                server.stop()
-
-    @staticmethod
-    def reset():
-        config.reset_service()
-
-
-class RpcReportsWorker(RpcWorker):
-    start_listeners_method = 'start_rpc_state_reports_listener'
-
-
-def serve_rpc():
-    plugin = manager.NeutronManager.get_plugin()
-    service_plugins = (
-        manager.NeutronManager.get_service_plugins().values())
-
-    if cfg.CONF.rpc_workers < 1:
-        cfg.CONF.set_override('rpc_workers', 1)
-
-    # If 0 < rpc_workers then start_rpc_listeners would be called in a
-    # subprocess and we cannot simply catch the NotImplementedError.  It is
-    # simpler to check this up front by testing whether the plugin supports
-    # multiple RPC workers.
-    if not plugin.rpc_workers_supported():
-        LOG.debug("Active plugin doesn't implement start_rpc_listeners")
-        if 0 < cfg.CONF.rpc_workers:
-            LOG.error(_LE("'rpc_workers = %d' ignored because "
-                          "start_rpc_listeners is not implemented."),
-                      cfg.CONF.rpc_workers)
-        raise NotImplementedError()
-
-    try:
-        # passing service plugins only, because core plugin is among them
-        rpc = RpcWorker(service_plugins)
-        # dispose the whole pool before os.fork, otherwise there will
-        # be shared DB connections in child processes which may cause
-        # DB errors.
-        LOG.debug('using launcher for rpc, workers=%s', cfg.CONF.rpc_workers)
-        session.dispose()
-        launcher = common_service.ProcessLauncher(cfg.CONF, wait_interval=1.0)
-        launcher.launch_service(rpc, workers=cfg.CONF.rpc_workers)
-        if (cfg.CONF.rpc_state_report_workers > 0 and
-            plugin.rpc_state_report_workers_supported()):
-            rpc_state_rep = RpcReportsWorker([plugin])
-            LOG.debug('using launcher for state reports rpc, workers=%s',
-                      cfg.CONF.rpc_state_report_workers)
-            launcher.launch_service(
-                rpc_state_rep, workers=cfg.CONF.rpc_state_report_workers)
-
-        return launcher
-    except Exception:
-        with excutils.save_and_reraise_exception():
-            LOG.exception(_LE('Unrecoverable error: please check log for '
-                              'details.'))
-
-
-def _get_api_workers():
-    workers = cfg.CONF.api_workers
-    if not workers:
-        workers = processutils.get_worker_count()
-    return workers
-
-
-def _run_wsgi(app_name):
-    app = config.load_paste_app(app_name)
-    if not app:
-        LOG.error(_LE('No known API applications configured.'))
-        return
-    server = wsgi.Server("Neutron")
-    server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host,
-                 workers=_get_api_workers())
-    LOG.info(_LI("Neutron service started, listening on %(host)s:%(port)s"),
-             {'host': cfg.CONF.bind_host, 'port': cfg.CONF.bind_port})
-    return server
-
-
-class Service(n_rpc.Service):
-    """Service object for binaries running on hosts.
-
-    A service takes a manager and enables rpc by listening to queues based
-    on topic. It also periodically runs tasks on the manager.
-    """
-
-    def __init__(self, host, binary, topic, manager, report_interval=None,
-                 periodic_interval=None, periodic_fuzzy_delay=None,
-                 *args, **kwargs):
-
-        self.binary = binary
-        self.manager_class_name = manager
-        manager_class = importutils.import_class(self.manager_class_name)
-        self.manager = manager_class(host=host, *args, **kwargs)
-        self.report_interval = report_interval
-        self.periodic_interval = periodic_interval
-        self.periodic_fuzzy_delay = periodic_fuzzy_delay
-        self.saved_args, self.saved_kwargs = args, kwargs
-        self.timers = []
-        super(Service, self).__init__(host, topic, manager=self.manager)
-
-    def start(self):
-        self.manager.init_host()
-        super(Service, self).start()
-        if self.report_interval:
-            pulse = loopingcall.FixedIntervalLoopingCall(self.report_state)
-            pulse.start(interval=self.report_interval,
-                        initial_delay=self.report_interval)
-            self.timers.append(pulse)
-
-        if self.periodic_interval:
-            if self.periodic_fuzzy_delay:
-                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
-            else:
-                initial_delay = None
-
-            periodic = loopingcall.FixedIntervalLoopingCall(
-                self.periodic_tasks)
-            periodic.start(interval=self.periodic_interval,
-                           initial_delay=initial_delay)
-            self.timers.append(periodic)
-        self.manager.after_start()
-
-    def __getattr__(self, key):
-        manager = self.__dict__.get('manager', None)
-        return getattr(manager, key)
-
-    @classmethod
-    def create(cls, host=None, binary=None, topic=None, manager=None,
-               report_interval=None, periodic_interval=None,
-               periodic_fuzzy_delay=None):
-        """Instantiates class and passes back application object.
-
-        :param host: defaults to CONF.host
-        :param binary: defaults to basename of executable
-        :param topic: defaults to bin_name - 'neutron-' part
-        :param manager: defaults to CONF.<topic>_manager
-        :param report_interval: defaults to CONF.report_interval
-        :param periodic_interval: defaults to CONF.periodic_interval
-        :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
-
-        """
-        if not host:
-            host = CONF.host
-        if not binary:
-            binary = os.path.basename(inspect.stack()[-1][1])
-        if not topic:
-            topic = binary.rpartition('neutron-')[2]
-            topic = topic.replace("-", "_")
-        if not manager:
-            manager = CONF.get('%s_manager' % topic, None)
-        if report_interval is None:
-            report_interval = CONF.report_interval
-        if periodic_interval is None:
-            periodic_interval = CONF.periodic_interval
-        if periodic_fuzzy_delay is None:
-            periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
-        service_obj = cls(host, binary, topic, manager,
-                          report_interval=report_interval,
-                          periodic_interval=periodic_interval,
-                          periodic_fuzzy_delay=periodic_fuzzy_delay)
-
-        return service_obj
-
-    def kill(self):
-        """Destroy the service object."""
-        self.stop()
-
-    def stop(self):
-        super(Service, self).stop()
-        for x in self.timers:
-            try:
-                x.stop()
-            except Exception:
-                LOG.exception(_LE("Exception occurs when timer stops"))
-        self.timers = []
-
-    def wait(self):
-        super(Service, self).wait()
-        for x in self.timers:
-            try:
-                x.wait()
-            except Exception:
-                LOG.exception(_LE("Exception occurs when waiting for timer"))
-
-    def reset(self):
-        config.reset_service()
-
-    def periodic_tasks(self, raise_on_error=False):
-        """Tasks to be run at a periodic interval."""
-        ctxt = context.get_admin_context()
-        self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
-
-    def report_state(self):
-        """Update the state of this service."""
-        # Todo(gongysh) report state to neutron server
-        pass
diff --git a/neutron/services/__init__.py b/neutron/services/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/firewall/__init__.py b/neutron/services/firewall/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/firewall/agents/__init__.py b/neutron/services/firewall/agents/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/firewall/agents/firewall_agent_api.py b/neutron/services/firewall/agents/firewall_agent_api.py
deleted file mode 100644 (file)
index ede77c3..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-import oslo_messaging
-
-from neutron._i18n import _
-from neutron.common import rpc as n_rpc
-
-
-FWaaSOpts = [
-    cfg.StrOpt(
-        'driver',
-        default='',
-        help=_("Name of the FWaaS Driver")),
-    cfg.BoolOpt(
-        'enabled',
-        default=False,
-        help=_("Enable FWaaS")),
-]
-cfg.CONF.register_opts(FWaaSOpts, 'fwaas')
-
-
-class FWaaSPluginApiMixin(object):
-    """Agent side of the FWaaS agent to FWaaS Plugin RPC API."""
-
-    def __init__(self, topic, host):
-        self.host = host
-        target = oslo_messaging.Target(topic=topic, version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def set_firewall_status(self, context, firewall_id, status):
-        """Make a RPC to set the status of a firewall."""
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'set_firewall_status', host=self.host,
-                          firewall_id=firewall_id, status=status)
-
-    def firewall_deleted(self, context, firewall_id):
-        """Make a RPC to indicate that the firewall resources are deleted."""
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'firewall_deleted', host=self.host,
-                          firewall_id=firewall_id)
-
-
-class FWaaSAgentRpcCallbackMixin(object):
-    """Mixin for FWaaS agent Implementations."""
-
-    def __init__(self, host):
-
-        super(FWaaSAgentRpcCallbackMixin, self).__init__(host)
-
-    def create_firewall(self, context, firewall, host):
-        """Handle RPC cast from plugin to create a firewall."""
-        pass
-
-    def update_firewall(self, context, firewall, host):
-        """Handle RPC cast from plugin to update a firewall."""
-        pass
-
-    def delete_firewall(self, context, firewall, host):
-        """Handle RPC cast from plugin to delete a firewall."""
-        pass
diff --git a/neutron/services/firewall/agents/l3reference/__init__.py b/neutron/services/firewall/agents/l3reference/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py b/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py
deleted file mode 100644 (file)
index 6980364..0000000
+++ /dev/null
@@ -1,299 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import importutils
-
-from neutron._i18n import _, _LE
-from neutron.agent.linux import ip_lib
-from neutron.common import exceptions as nexception
-from neutron import context
-from neutron.plugins.common import constants
-from neutron.services.firewall.agents import firewall_agent_api as api
-from neutron.services import provider_configuration as provconf
-
-FIREWALL_PLUGIN = 'q-firewall-plugin'
-LOG = logging.getLogger(__name__)
-
-
-class FWaaSL3PluginApi(api.FWaaSPluginApiMixin):
-    """Agent side of the FWaaS agent to FWaaS Plugin RPC API."""
-
-    def __init__(self, topic, host):
-        super(FWaaSL3PluginApi, self).__init__(topic, host)
-
-    def get_firewalls_for_tenant(self, context, **kwargs):
-        """Get the Firewalls with rules from the Plugin to send to driver."""
-        LOG.debug("Retrieve Firewall with rules from Plugin")
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'get_firewalls_for_tenant', host=self.host)
-
-    def get_tenants_with_firewalls(self, context, **kwargs):
-        """Get all Tenants that have Firewalls configured from plugin."""
-        LOG.debug("Retrieve Tenants with Firewalls configured from Plugin")
-        cctxt = self.client.prepare()
-        return cctxt.call(context,
-                          'get_tenants_with_firewalls', host=self.host)
-
-
-class FWaaSL3AgentRpcCallback(api.FWaaSAgentRpcCallbackMixin):
-    """FWaaS Agent support to be used by Neutron L3 agent."""
-
-    def __init__(self, conf):
-        LOG.debug("Initializing firewall agent")
-        self.conf = conf
-        fwaas_driver_class_path = provconf.get_provider_driver_class(
-            cfg.CONF.fwaas.driver)
-        self.fwaas_enabled = cfg.CONF.fwaas.enabled
-
-        # None means l3-agent has no information on the server
-        # configuration due to the lack of RPC support.
-        if self.neutron_service_plugins is not None:
-            fwaas_plugin_configured = (constants.FIREWALL
-                                       in self.neutron_service_plugins)
-            if fwaas_plugin_configured and not self.fwaas_enabled:
-                msg = _("FWaaS plugin is configured in the server side, but "
-                        "FWaaS is disabled in L3-agent.")
-                LOG.error(msg)
-                raise SystemExit(1)
-            self.fwaas_enabled = self.fwaas_enabled and fwaas_plugin_configured
-
-        if self.fwaas_enabled:
-            try:
-                self.fwaas_driver = importutils.import_object(
-                    fwaas_driver_class_path)
-                LOG.debug("FWaaS Driver Loaded: '%s'", fwaas_driver_class_path)
-            except ImportError:
-                msg = _('Error importing FWaaS device driver: %s')
-                raise ImportError(msg % fwaas_driver_class_path)
-        self.services_sync = False
-        # setup RPC to msg fwaas plugin
-        self.fwplugin_rpc = FWaaSL3PluginApi(FIREWALL_PLUGIN, conf.host)
-        super(FWaaSL3AgentRpcCallback, self).__init__(host=conf.host)
-
-    def _get_router_info_list_for_tenant(self, routers, tenant_id):
-        """Returns the list of router info objects on which to apply the fw."""
-        root_ip = ip_lib.IPWrapper()
-        # Get the routers for the tenant
-        router_ids = [
-            router['id']
-            for router in routers
-            if router['tenant_id'] == tenant_id]
-        local_ns_list = root_ip.get_namespaces()
-
-        router_info_list = []
-        # Pick up namespaces for Tenant Routers
-        for rid in router_ids:
-            # for routers without an interface - get_routers returns
-            # the router - but this is not yet populated in router_info
-            if rid not in self.router_info:
-                continue
-            router_ns = self.router_info[rid].ns_name
-            if router_ns in local_ns_list:
-                router_info_list.append(self.router_info[rid])
-        return router_info_list
-
-    def _invoke_driver_for_plugin_api(self, context, fw, func_name):
-        """Invoke driver method for plugin API and provide status back."""
-        LOG.debug("%(func_name)s from agent for fw: %(fwid)s",
-                  {'func_name': func_name, 'fwid': fw['id']})
-        try:
-            routers = self.plugin_rpc.get_routers(context)
-            router_info_list = self._get_router_info_list_for_tenant(
-                routers,
-                fw['tenant_id'])
-            if not router_info_list:
-                LOG.debug('No Routers on tenant: %s', fw['tenant_id'])
-                # fw was created before any routers were added, and if a
-                # delete is sent then we need to ack so that plugin can
-                # cleanup.
-                if func_name == 'delete_firewall':
-                    self.fwplugin_rpc.firewall_deleted(context, fw['id'])
-                return
-            LOG.debug("Apply fw on Router List: '%s'",
-                      [ri.router['id'] for ri in router_info_list])
-            # call into the driver
-            try:
-                self.fwaas_driver.__getattribute__(func_name)(
-                    self.conf.agent_mode,
-                    router_info_list,
-                    fw)
-                if fw['admin_state_up']:
-                    status = constants.ACTIVE
-                else:
-                    status = constants.DOWN
-            except nexception.FirewallInternalDriverError:
-                LOG.error(_LE("Firewall Driver Error for %(func_name)s "
-                              "for fw: %(fwid)s"),
-                          {'func_name': func_name, 'fwid': fw['id']})
-                status = constants.ERROR
-            # delete needs different handling
-            if func_name == 'delete_firewall':
-                if status in [constants.ACTIVE, constants.DOWN]:
-                    self.fwplugin_rpc.firewall_deleted(context, fw['id'])
-            else:
-                self.fwplugin_rpc.set_firewall_status(
-                    context,
-                    fw['id'],
-                    status)
-        except Exception:
-            LOG.exception(
-                _LE("FWaaS RPC failure in %(func_name)s for fw: %(fwid)s"),
-                {'func_name': func_name, 'fwid': fw['id']})
-            self.services_sync = True
-        return
-
-    def _invoke_driver_for_sync_from_plugin(self, ctx, router_info_list, fw):
-        """Invoke the delete driver method for status of PENDING_DELETE and
-        update method for all other status to (re)apply on driver which is
-        Idempotent.
-        """
-        if fw['status'] == constants.PENDING_DELETE:
-            try:
-                self.fwaas_driver.delete_firewall(
-                    self.conf.agent_mode,
-                    router_info_list,
-                    fw)
-                self.fwplugin_rpc.firewall_deleted(
-                    ctx,
-                    fw['id'])
-            except nexception.FirewallInternalDriverError:
-                LOG.error(_LE("Firewall Driver Error on fw state %(fwmsg)s "
-                              "for fw: %(fwid)s"),
-                          {'fwmsg': fw['status'], 'fwid': fw['id']})
-                self.fwplugin_rpc.set_firewall_status(
-                    ctx,
-                    fw['id'],
-                    constants.ERROR)
-        else:
-            # PENDING_UPDATE, PENDING_CREATE, ...
-            try:
-                self.fwaas_driver.update_firewall(
-                    self.conf.agent_mode,
-                    router_info_list,
-                    fw)
-                if fw['admin_state_up']:
-                    status = constants.ACTIVE
-                else:
-                    status = constants.DOWN
-            except nexception.FirewallInternalDriverError:
-                LOG.error(_LE("Firewall Driver Error on fw state %(fwmsg)s "
-                              "for fw: %(fwid)s"),
-                          {'fwmsg': fw['status'], 'fwid': fw['id']})
-                status = constants.ERROR
-
-            self.fwplugin_rpc.set_firewall_status(
-                ctx,
-                fw['id'],
-                status)
-
-    def _process_router_add(self, ri):
-        """On router add, get fw with rules from plugin and update driver."""
-        LOG.debug("Process router add, router_id: '%s'", ri.router['id'])
-        routers = []
-        routers.append(ri.router)
-        router_info_list = self._get_router_info_list_for_tenant(
-            routers,
-            ri.router['tenant_id'])
-        if router_info_list:
-            # Get the firewall with rules
-            # for the tenant the router is on.
-            ctx = context.Context('', ri.router['tenant_id'])
-            fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx)
-            LOG.debug("Process router add, fw_list: '%s'",
-                      [fw['id'] for fw in fw_list])
-            for fw in fw_list:
-                self._invoke_driver_for_sync_from_plugin(
-                    ctx,
-                    router_info_list,
-                    fw)
-
-    def process_router_add(self, ri):
-        """On router add, get fw with rules from plugin and update driver."""
-        # avoid msg to plugin when fwaas is not configured
-        if not self.fwaas_enabled:
-            return
-        try:
-            self._process_router_add(ri)
-        except Exception:
-            LOG.exception(
-                _LE("FWaaS RPC info call failed for '%s'."),
-                ri.router['id'])
-            self.services_sync = True
-
-    def process_services_sync(self, ctx):
-        """On RPC issues sync with plugin and apply the sync data."""
-        # avoid msg to plugin when fwaas is not configured
-        if not self.fwaas_enabled:
-            return
-        try:
-            # get all routers
-            routers = self.plugin_rpc.get_routers(ctx)
-            # get the list of tenants with firewalls configured
-            # from the plugin
-            tenant_ids = self.fwplugin_rpc.get_tenants_with_firewalls(ctx)
-            LOG.debug("Tenants with Firewalls: '%s'", tenant_ids)
-            for tenant_id in tenant_ids:
-                ctx = context.Context('', tenant_id)
-                fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx)
-                if fw_list:
-                    # if fw present on tenant
-                    router_info_list = self._get_router_info_list_for_tenant(
-                        routers,
-                        tenant_id)
-                    if router_info_list:
-                        LOG.debug("Router List: '%s'",
-                                  [ri.router['id'] for ri in router_info_list])
-                        LOG.debug("fw_list: '%s'",
-                                  [fw['id'] for fw in fw_list])
-                        # apply sync data on fw for this tenant
-                        for fw in fw_list:
-                            # fw, routers present on this host for tenant
-                            # install
-                            LOG.debug("Apply fw on Router List: '%s'",
-                                      [ri.router['id']
-                                          for ri in router_info_list])
-                            # no need to apply sync data for ACTIVE fw
-                            if fw['status'] != constants.ACTIVE:
-                                self._invoke_driver_for_sync_from_plugin(
-                                    ctx,
-                                    router_info_list,
-                                    fw)
-            self.services_sync = False
-        except Exception:
-            LOG.exception(_LE("Failed fwaas process services sync"))
-            self.services_sync = True
-
-    def create_firewall(self, context, firewall, host):
-        """Handle Rpc from plugin to create a firewall."""
-        return self._invoke_driver_for_plugin_api(
-            context,
-            firewall,
-            'create_firewall')
-
-    def update_firewall(self, context, firewall, host):
-        """Handle Rpc from plugin to update a firewall."""
-        return self._invoke_driver_for_plugin_api(
-            context,
-            firewall,
-            'update_firewall')
-
-    def delete_firewall(self, context, firewall, host):
-        """Handle Rpc from plugin to delete a firewall."""
-        return self._invoke_driver_for_plugin_api(
-            context,
-            firewall,
-            'delete_firewall')
diff --git a/neutron/services/flavors/__init__.py b/neutron/services/flavors/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/flavors/flavors_plugin.py b/neutron/services/flavors/flavors_plugin.py
deleted file mode 100644 (file)
index 98bccde..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.db import flavors_db
-from neutron.plugins.common import constants
-from neutron.services import service_base
-
-
-class FlavorsPlugin(service_base.ServicePluginBase,
-                    flavors_db.FlavorsDbMixin):
-    """Implements Neutron Flavors Service plugin."""
-
-    supported_extension_aliases = ['flavors']
-
-    def get_plugin_type(self):
-        return constants.FLAVORS
-
-    def get_plugin_description(self):
-        return "Neutron Flavors and Service Profiles manager plugin"
diff --git a/neutron/services/l3_router/README b/neutron/services/l3_router/README
deleted file mode 100644 (file)
index f6ca35b..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-This service plugin implements the L3 routing functionality (resources router
-and floatingip) that in earlier releases before Havana was provided by core
-plugins (openvswitch, linuxbridge, ... etc).
-
-Core plugins can now choose not to implement L3 routing functionality and
-instead delegate that to the L3 routing service plugin.
-
-The required changes to a core plugin are in that case:
-- Do not inherit 'l3_db.L3_NAT_db_mixin' (or its descendants like extraroute)
-  anymore.
-- Remove "router" from 'supported_extension_aliases'.
-- Modify any 'self' references to members in L3_NAT_db_mixin to instead use
-  'manager.NeutronManager.get_service_plugins().get(constants.L3_ROUTER_NAT)'
-  For example,
-     self.prevent_l3_port_deletion(...)
-  becomes something like
-     plugin = manager.NeutronManager.get_service_plugins().get(
-         constants.L3_ROUTER_NAT)
-     if plugin:
-         plugin.prevent_l3_port_deletion(...)
-
-If the core plugin has relied on the L3Agent the following must also be changed:
-- Do not inherit 'l3_rpc_base.L3RpcCallbackMixin' in any '*RpcCallbacks' class.
-- Do not be a consumer of the topics.L3PLUGIN topic for RPC.
-
-To use the L3 routing service plugin, add
-'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin'
-to 'service_plugins' in '/etc/neutron/neutron.conf'.
-That is,
-service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
diff --git a/neutron/services/l3_router/__init__.py b/neutron/services/l3_router/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/l3_router/l3_router_plugin.py b/neutron/services/l3_router/l3_router_plugin.py
deleted file mode 100644 (file)
index 558d6da..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_log import helpers as log_helpers
-from oslo_utils import importutils
-
-from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
-from neutron.api.rpc.handlers import l3_rpc
-from neutron.common import constants as n_const
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.db import common_db_mixin
-from neutron.db import extraroute_db
-from neutron.db import l3_db
-from neutron.db import l3_dvrscheduler_db
-from neutron.db import l3_gwmode_db
-from neutron.db import l3_hamode_db
-from neutron.db import l3_hascheduler_db
-from neutron.plugins.common import constants
-from neutron.quota import resource_registry
-from neutron.services import service_base
-
-
-class L3RouterPlugin(service_base.ServicePluginBase,
-                     common_db_mixin.CommonDbMixin,
-                     extraroute_db.ExtraRoute_db_mixin,
-                     l3_hamode_db.L3_HA_NAT_db_mixin,
-                     l3_gwmode_db.L3_NAT_db_mixin,
-                     l3_dvrscheduler_db.L3_DVRsch_db_mixin,
-                     l3_hascheduler_db.L3_HA_scheduler_db_mixin):
-
-    """Implementation of the Neutron L3 Router Service Plugin.
-
-    This class implements a L3 service plugin that provides
-    router and floatingip resources and manages associated
-    request/response.
-    All DB related work is implemented in classes
-    l3_db.L3_NAT_db_mixin, l3_hamode_db.L3_HA_NAT_db_mixin,
-    l3_dvr_db.L3_NAT_with_dvr_db_mixin, and extraroute_db.ExtraRoute_db_mixin.
-    """
-    supported_extension_aliases = ["dvr", "router", "ext-gw-mode",
-                                   "extraroute", "l3_agent_scheduler",
-                                   "l3-ha", "router_availability_zone"]
-
-    @resource_registry.tracked_resources(router=l3_db.Router,
-                                         floatingip=l3_db.FloatingIP)
-    def __init__(self):
-        self.router_scheduler = importutils.import_object(
-            cfg.CONF.router_scheduler_driver)
-        self.start_periodic_l3_agent_status_check()
-        super(L3RouterPlugin, self).__init__()
-        if 'dvr' in self.supported_extension_aliases:
-            l3_dvrscheduler_db.subscribe()
-        l3_db.subscribe()
-        self.start_rpc_listeners()
-
-    @log_helpers.log_method_call
-    def start_rpc_listeners(self):
-        # RPC support
-        self.topic = topics.L3PLUGIN
-        self.conn = n_rpc.create_connection()
-        self.agent_notifiers.update(
-            {n_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})
-        self.endpoints = [l3_rpc.L3RpcCallback()]
-        self.conn.create_consumer(self.topic, self.endpoints,
-                                  fanout=False)
-        return self.conn.consume_in_threads()
-
-    def get_plugin_type(self):
-        return constants.L3_ROUTER_NAT
-
-    def get_plugin_description(self):
-        """returns string description of the plugin."""
-        return ("L3 Router Service Plugin for basic L3 forwarding"
-                " between (L2) Neutron networks and access to external"
-                " networks via a NAT gateway.")
-
-    def create_floatingip(self, context, floatingip):
-        """Create floating IP.
-
-        :param context: Neutron request context
-        :param floatingip: data for the floating IP being created
-        :returns: A floating IP object on success
-
-        As the l3 router plugin asynchronously creates floating IPs
-        leveraging the l3 agent, the initial status for the floating
-        IP object will be DOWN.
-        """
-        return super(L3RouterPlugin, self).create_floatingip(
-            context, floatingip,
-            initial_status=n_const.FLOATINGIP_STATUS_DOWN)
diff --git a/neutron/services/loadbalancer/__init__.py b/neutron/services/loadbalancer/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/metering/__init__.py b/neutron/services/metering/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/metering/agents/__init__.py b/neutron/services/metering/agents/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/metering/agents/metering_agent.py b/neutron/services/metering/agents/metering_agent.py
deleted file mode 100644 (file)
index d82afe7..0000000
+++ /dev/null
@@ -1,302 +0,0 @@
-# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sys
-import time
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-from oslo_service import loopingcall
-from oslo_service import periodic_task
-from oslo_service import service
-from oslo_utils import importutils
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.agent.common import config
-from neutron.agent import rpc as agent_rpc
-from neutron.common import config as common_config
-from neutron.common import constants as constants
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.common import utils
-from neutron import context
-from neutron import manager
-from neutron import service as neutron_service
-
-
-LOG = logging.getLogger(__name__)
-
-
-class MeteringPluginRpc(object):
-
-    def __init__(self, host):
-        # NOTE(yamamoto): super.__init__() call here is not only for
-        # aesthetics.  Because of multiple inheritances in MeteringAgent,
-        # it's actually necessary to initialize parent classes of
-        # manager.Manager correctly.
-        super(MeteringPluginRpc, self).__init__()
-        target = oslo_messaging.Target(topic=topics.METERING_PLUGIN,
-                                       version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def _get_sync_data_metering(self, context):
-        try:
-            cctxt = self.client.prepare()
-            return cctxt.call(context, 'get_sync_data_metering',
-                              host=self.host)
-        except Exception:
-            LOG.exception(_LE("Failed synchronizing routers"))
-
-
-class MeteringAgent(MeteringPluginRpc, manager.Manager):
-
-    Opts = [
-        cfg.StrOpt('driver',
-                   default='neutron.services.metering.drivers.noop.'
-                   'noop_driver.NoopMeteringDriver',
-                   help=_("Metering driver")),
-        cfg.IntOpt('measure_interval', default=30,
-                   help=_("Interval between two metering measures")),
-        cfg.IntOpt('report_interval', default=300,
-                   help=_("Interval between two metering reports")),
-    ]
-
-    def __init__(self, host, conf=None):
-        self.conf = conf or cfg.CONF
-        self._load_drivers()
-        self.context = context.get_admin_context_without_session()
-        self.metering_loop = loopingcall.FixedIntervalLoopingCall(
-            self._metering_loop
-        )
-        measure_interval = self.conf.measure_interval
-        self.last_report = 0
-        self.metering_loop.start(interval=measure_interval)
-        self.host = host
-
-        self.label_tenant_id = {}
-        self.routers = {}
-        self.metering_infos = {}
-        super(MeteringAgent, self).__init__(host=host)
-
-    def _load_drivers(self):
-        """Loads plugin-driver from configuration."""
-        LOG.info(_LI("Loading Metering driver %s"), self.conf.driver)
-        if not self.conf.driver:
-            raise SystemExit(_('A metering driver must be specified'))
-        self.metering_driver = importutils.import_object(
-            self.conf.driver, self, self.conf)
-
-    def _metering_notification(self):
-        for label_id, info in self.metering_infos.items():
-            data = {'label_id': label_id,
-                    'tenant_id': self.label_tenant_id.get(label_id),
-                    'pkts': info['pkts'],
-                    'bytes': info['bytes'],
-                    'time': info['time'],
-                    'first_update': info['first_update'],
-                    'last_update': info['last_update'],
-                    'host': self.host}
-
-            LOG.debug("Send metering report: %s", data)
-            notifier = n_rpc.get_notifier('metering')
-            notifier.info(self.context, 'l3.meter', data)
-            info['pkts'] = 0
-            info['bytes'] = 0
-            info['time'] = 0
-
-    def _purge_metering_info(self):
-        deadline_timestamp = int(time.time()) - self.conf.report_interval
-        label_ids = [
-            label_id
-            for label_id, info in self.metering_infos.items()
-            if info['last_update'] < deadline_timestamp]
-        for label_id in label_ids:
-            del self.metering_infos[label_id]
-
-    def _add_metering_info(self, label_id, pkts, bytes):
-        ts = int(time.time())
-        info = self.metering_infos.get(label_id, {'bytes': 0,
-                                                  'pkts': 0,
-                                                  'time': 0,
-                                                  'first_update': ts,
-                                                  'last_update': ts})
-        info['bytes'] += bytes
-        info['pkts'] += pkts
-        info['time'] += ts - info['last_update']
-        info['last_update'] = ts
-
-        self.metering_infos[label_id] = info
-
-        return info
-
-    def _add_metering_infos(self):
-        self.label_tenant_id = {}
-        for router in self.routers.values():
-            tenant_id = router['tenant_id']
-            labels = router.get(constants.METERING_LABEL_KEY, [])
-            for label in labels:
-                label_id = label['id']
-                self.label_tenant_id[label_id] = tenant_id
-
-            tenant_id = self.label_tenant_id.get
-        accs = self._get_traffic_counters(self.context, self.routers.values())
-        if not accs:
-            return
-
-        for label_id, acc in accs.items():
-            self._add_metering_info(label_id, acc['pkts'], acc['bytes'])
-
-    def _metering_loop(self):
-        self._add_metering_infos()
-
-        ts = int(time.time())
-        delta = ts - self.last_report
-
-        report_interval = self.conf.report_interval
-        if delta > report_interval:
-            self._metering_notification()
-            self._purge_metering_info()
-            self.last_report = ts
-
-    @utils.synchronized('metering-agent')
-    def _invoke_driver(self, context, meterings, func_name):
-        try:
-            return getattr(self.metering_driver, func_name)(context, meterings)
-        except AttributeError:
-            LOG.exception(_LE("Driver %(driver)s does not implement %(func)s"),
-                          {'driver': self.conf.driver,
-                           'func': func_name})
-        except RuntimeError:
-            LOG.exception(_LE("Driver %(driver)s:%(func)s runtime error"),
-                          {'driver': self.conf.driver,
-                           'func': func_name})
-
-    @periodic_task.periodic_task(run_immediately=True)
-    def _sync_routers_task(self, context):
-        routers = self._get_sync_data_metering(self.context)
-        if not routers:
-            return
-        self._update_routers(context, routers)
-
-    def router_deleted(self, context, router_id):
-        self._add_metering_infos()
-
-        if router_id in self.routers:
-            del self.routers[router_id]
-
-        return self._invoke_driver(context, router_id,
-                                   'remove_router')
-
-    def routers_updated(self, context, routers=None):
-        if not routers:
-            routers = self._get_sync_data_metering(self.context)
-        if not routers:
-            return
-        self._update_routers(context, routers)
-
-    def _update_routers(self, context, routers):
-        for router in routers:
-            self.routers[router['id']] = router
-
-        return self._invoke_driver(context, routers,
-                                   'update_routers')
-
-    def _get_traffic_counters(self, context, routers):
-        LOG.debug("Get router traffic counters")
-        return self._invoke_driver(context, routers, 'get_traffic_counters')
-
-    def add_metering_label_rule(self, context, routers):
-        return self._invoke_driver(context, routers,
-                                   'add_metering_label_rule')
-
-    def remove_metering_label_rule(self, context, routers):
-        return self._invoke_driver(context, routers,
-                                   'remove_metering_label_rule')
-
-    def update_metering_label_rules(self, context, routers):
-        LOG.debug("Update metering rules from agent")
-        return self._invoke_driver(context, routers,
-                                   'update_metering_label_rules')
-
-    def add_metering_label(self, context, routers):
-        LOG.debug("Creating a metering label from agent")
-        return self._invoke_driver(context, routers,
-                                   'add_metering_label')
-
-    def remove_metering_label(self, context, routers):
-        self._add_metering_infos()
-
-        LOG.debug("Delete a metering label from agent")
-        return self._invoke_driver(context, routers,
-                                   'remove_metering_label')
-
-
-class MeteringAgentWithStateReport(MeteringAgent):
-
-    def __init__(self, host, conf=None):
-        super(MeteringAgentWithStateReport, self).__init__(host=host,
-                                                           conf=conf)
-        self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
-        self.agent_state = {
-            'binary': 'neutron-metering-agent',
-            'host': host,
-            'topic': topics.METERING_AGENT,
-            'configurations': {
-                'metering_driver': self.conf.driver,
-                'measure_interval':
-                self.conf.measure_interval,
-                'report_interval': self.conf.report_interval
-            },
-            'start_flag': True,
-            'agent_type': constants.AGENT_TYPE_METERING}
-        report_interval = cfg.CONF.AGENT.report_interval
-        self.use_call = True
-        if report_interval:
-            self.heartbeat = loopingcall.FixedIntervalLoopingCall(
-                self._report_state)
-            self.heartbeat.start(interval=report_interval)
-
-    def _report_state(self):
-        try:
-            self.state_rpc.report_state(self.context, self.agent_state,
-                                        self.use_call)
-            self.agent_state.pop('start_flag', None)
-            self.use_call = False
-        except AttributeError:
-            # This means the server does not support report_state
-            LOG.warn(_LW("Neutron server does not support state report."
-                         " State report for this agent will be disabled."))
-            self.heartbeat.stop()
-            return
-        except Exception:
-            LOG.exception(_LE("Failed reporting state!"))
-
-    def agent_updated(self, context, payload):
-        LOG.info(_LI("agent_updated by server side %s!"), payload)
-
-
-def main():
-    conf = cfg.CONF
-    conf.register_opts(MeteringAgent.Opts)
-    config.register_agent_state_opts_helper(conf)
-    common_config.init(sys.argv[1:])
-    config.setup_logging()
-    server = neutron_service.Service.create(
-        binary='neutron-metering-agent',
-        topic=topics.METERING_AGENT,
-        report_interval=cfg.CONF.AGENT.report_interval,
-        manager='neutron.services.metering.agents.'
-                'metering_agent.MeteringAgentWithStateReport')
-    service.launch(cfg.CONF, server).wait()
diff --git a/neutron/services/metering/drivers/__init__.py b/neutron/services/metering/drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/metering/drivers/abstract_driver.py b/neutron/services/metering/drivers/abstract_driver.py
deleted file mode 100644 (file)
index 5169bdd..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-import six
-
-
-@six.add_metaclass(abc.ABCMeta)
-class MeteringAbstractDriver(object):
-    """Abstract Metering driver."""
-
-    def __init__(self, plugin, conf):
-        pass
-
-    @abc.abstractmethod
-    def update_routers(self, context, routers):
-        pass
-
-    @abc.abstractmethod
-    def remove_router(self, context, router_id):
-        pass
-
-    @abc.abstractmethod
-    def update_metering_label_rules(self, context, routers):
-        pass
-
-    @abc.abstractmethod
-    def add_metering_label(self, context, routers):
-        pass
-
-    @abc.abstractmethod
-    def remove_metering_label(self, context, routers):
-        pass
-
-    @abc.abstractmethod
-    def get_traffic_counters(self, context, routers):
-        pass
diff --git a/neutron/services/metering/drivers/iptables/__init__.py b/neutron/services/metering/drivers/iptables/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/metering/drivers/iptables/iptables_driver.py b/neutron/services/metering/drivers/iptables/iptables_driver.py
deleted file mode 100644 (file)
index 7a8c8fd..0000000
+++ /dev/null
@@ -1,370 +0,0 @@
-# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-from oslo_log import helpers as log_helpers
-from oslo_log import log as logging
-from oslo_utils import importutils
-import six
-
-from neutron._i18n import _, _LE, _LI
-from neutron.agent.common import config
-from neutron.agent.linux import interface
-from neutron.agent.linux import iptables_manager
-from neutron.common import constants as constants
-from neutron.common import ipv6_utils
-from neutron.services.metering.drivers import abstract_driver
-
-
-LOG = logging.getLogger(__name__)
-NS_PREFIX = 'qrouter-'
-WRAP_NAME = 'neutron-meter'
-EXTERNAL_DEV_PREFIX = 'qg-'
-TOP_CHAIN = WRAP_NAME + "-FORWARD"
-RULE = '-r-'
-LABEL = '-l-'
-
-config.register_interface_driver_opts_helper(cfg.CONF)
-cfg.CONF.register_opts(interface.OPTS)
-
-
-class IptablesManagerTransaction(object):
-    __transactions = {}
-
-    def __init__(self, im):
-        self.im = im
-
-        transaction = self.__transactions.get(im, 0)
-        transaction += 1
-        self.__transactions[im] = transaction
-
-    def __enter__(self):
-        return self.im
-
-    def __exit__(self, type, value, traceback):
-        transaction = self.__transactions.get(self.im)
-        if transaction == 1:
-            self.im.apply()
-            del self.__transactions[self.im]
-        else:
-            transaction -= 1
-            self.__transactions[self.im] = transaction
-
-
-class RouterWithMetering(object):
-
-    def __init__(self, conf, router):
-        self.conf = conf
-        self.id = router['id']
-        self.router = router
-        # TODO(cbrandily): deduplicate ns_name generation in metering/l3
-        self.ns_name = NS_PREFIX + self.id
-        self.iptables_manager = iptables_manager.IptablesManager(
-            namespace=self.ns_name,
-            binary_name=WRAP_NAME,
-            use_ipv6=ipv6_utils.is_enabled())
-        self.metering_labels = {}
-
-
-class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver):
-
-    def __init__(self, plugin, conf):
-        self.plugin = plugin
-        self.conf = conf or cfg.CONF
-        self.routers = {}
-
-        if not self.conf.interface_driver:
-            raise SystemExit(_('An interface driver must be specified'))
-        LOG.info(_LI("Loading interface driver %s"),
-                 self.conf.interface_driver)
-        self.driver = importutils.import_object(self.conf.interface_driver,
-                                                self.conf)
-
-    def _update_router(self, router):
-        r = self.routers.get(router['id'],
-                             RouterWithMetering(self.conf, router))
-        r.router = router
-        self.routers[r.id] = r
-
-        return r
-
-    @log_helpers.log_method_call
-    def update_routers(self, context, routers):
-        # disassociate removed routers
-        router_ids = set(router['id'] for router in routers)
-        for router_id, rm in six.iteritems(self.routers):
-            if router_id not in router_ids:
-                self._process_disassociate_metering_label(rm.router)
-
-        for router in routers:
-            old_gw_port_id = None
-            old_rm = self.routers.get(router['id'])
-            if old_rm:
-                old_gw_port_id = old_rm.router['gw_port_id']
-            gw_port_id = router['gw_port_id']
-
-            if gw_port_id != old_gw_port_id:
-                if old_rm:
-                    with IptablesManagerTransaction(old_rm.iptables_manager):
-                        self._process_disassociate_metering_label(router)
-                        if gw_port_id:
-                            self._process_associate_metering_label(router)
-                elif gw_port_id:
-                    self._process_associate_metering_label(router)
-
-    @log_helpers.log_method_call
-    def remove_router(self, context, router_id):
-        if router_id in self.routers:
-            del self.routers[router_id]
-
-    def get_external_device_name(self, port_id):
-        return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
-
-    def _process_metering_label_rules(self, rm, rules, label_chain,
-                                      rules_chain):
-        im = rm.iptables_manager
-        ext_dev = self.get_external_device_name(rm.router['gw_port_id'])
-        if not ext_dev:
-            return
-
-        for rule in rules:
-            self._add_rule_to_chain(ext_dev, rule, im,
-                                    label_chain, rules_chain)
-
-    def _process_metering_label_rule_add(self, rm, rule, ext_dev,
-                                         label_chain, rules_chain):
-        im = rm.iptables_manager
-        self._add_rule_to_chain(ext_dev, rule, im, label_chain, rules_chain)
-
-    def _process_metering_label_rule_delete(self, rm, rule, ext_dev,
-                                            label_chain, rules_chain):
-        im = rm.iptables_manager
-        self._remove_rule_from_chain(ext_dev, rule, im,
-                                     label_chain, rules_chain)
-
-    def _add_rule_to_chain(self, ext_dev, rule, im,
-                           label_chain, rules_chain):
-        ipt_rule = self._prepare_rule(ext_dev, rule, label_chain)
-        if rule['excluded']:
-            im.ipv4['filter'].add_rule(rules_chain, ipt_rule,
-                                       wrap=False, top=True)
-        else:
-            im.ipv4['filter'].add_rule(rules_chain, ipt_rule,
-                                       wrap=False, top=False)
-
-    def _remove_rule_from_chain(self, ext_dev, rule, im,
-                                label_chain, rules_chain):
-        ipt_rule = self._prepare_rule(ext_dev, rule, label_chain)
-        if rule['excluded']:
-            im.ipv4['filter'].remove_rule(rules_chain, ipt_rule,
-                                          wrap=False, top=True)
-        else:
-            im.ipv4['filter'].remove_rule(rules_chain, ipt_rule,
-                                          wrap=False, top=False)
-
-    def _prepare_rule(self, ext_dev, rule, label_chain):
-        remote_ip = rule['remote_ip_prefix']
-        if rule['direction'] == 'egress':
-            dir_opt = '-o %s -d %s' % (ext_dev, remote_ip)
-        else:
-            dir_opt = '-i %s -s %s' % (ext_dev, remote_ip)
-
-        if rule['excluded']:
-            ipt_rule = '%s -j RETURN' % dir_opt
-        else:
-            ipt_rule = '%s -j %s' % (dir_opt, label_chain)
-        return ipt_rule
-
-    def _process_associate_metering_label(self, router):
-        self._update_router(router)
-        rm = self.routers.get(router['id'])
-
-        with IptablesManagerTransaction(rm.iptables_manager):
-            labels = router.get(constants.METERING_LABEL_KEY, [])
-            for label in labels:
-                label_id = label['id']
-
-                label_chain = iptables_manager.get_chain_name(WRAP_NAME +
-                                                              LABEL + label_id,
-                                                              wrap=False)
-                rm.iptables_manager.ipv4['filter'].add_chain(label_chain,
-                                                             wrap=False)
-
-                rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
-                                                              RULE + label_id,
-                                                              wrap=False)
-                rm.iptables_manager.ipv4['filter'].add_chain(rules_chain,
-                                                             wrap=False)
-                rm.iptables_manager.ipv4['filter'].add_rule(TOP_CHAIN, '-j ' +
-                                                            rules_chain,
-                                                            wrap=False)
-
-                rm.iptables_manager.ipv4['filter'].add_rule(label_chain,
-                                                            '',
-                                                            wrap=False)
-
-                rules = label.get('rules')
-                if rules:
-                    self._process_metering_label_rules(rm, rules,
-                                                       label_chain,
-                                                       rules_chain)
-
-                rm.metering_labels[label_id] = label
-
-    def _process_disassociate_metering_label(self, router):
-        rm = self.routers.get(router['id'])
-        if not rm:
-            return
-
-        with IptablesManagerTransaction(rm.iptables_manager):
-            labels = router.get(constants.METERING_LABEL_KEY, [])
-            for label in labels:
-                label_id = label['id']
-                if label_id not in rm.metering_labels:
-                    continue
-
-                label_chain = iptables_manager.get_chain_name(WRAP_NAME +
-                                                              LABEL + label_id,
-                                                              wrap=False)
-                rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
-                                                              RULE + label_id,
-                                                              wrap=False)
-
-                rm.iptables_manager.ipv4['filter'].remove_chain(label_chain,
-                                                                wrap=False)
-                rm.iptables_manager.ipv4['filter'].remove_chain(rules_chain,
-                                                                wrap=False)
-
-                del rm.metering_labels[label_id]
-
-    @log_helpers.log_method_call
-    def add_metering_label(self, context, routers):
-        for router in routers:
-            self._process_associate_metering_label(router)
-
-    @log_helpers.log_method_call
-    def add_metering_label_rule(self, context, routers):
-        for router in routers:
-            self._add_metering_label_rule(router)
-
-    @log_helpers.log_method_call
-    def remove_metering_label_rule(self, context, routers):
-        for router in routers:
-            self._remove_metering_label_rule(router)
-
-    @log_helpers.log_method_call
-    def update_metering_label_rules(self, context, routers):
-        for router in routers:
-            self._update_metering_label_rules(router)
-
-    def _add_metering_label_rule(self, router):
-        self._process_metering_rule_action(router, 'create')
-
-    def _remove_metering_label_rule(self, router):
-        self._process_metering_rule_action(router, 'delete')
-
-    def _process_metering_rule_action(self, router, action):
-        rm = self.routers.get(router['id'])
-        if not rm:
-            return
-        ext_dev = self.get_external_device_name(rm.router['gw_port_id'])
-        if not ext_dev:
-            return
-        with IptablesManagerTransaction(rm.iptables_manager):
-            labels = router.get(constants.METERING_LABEL_KEY, [])
-            for label in labels:
-                label_id = label['id']
-                label_chain = iptables_manager.get_chain_name(WRAP_NAME +
-                                                              LABEL + label_id,
-                                                              wrap=False)
-
-                rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
-                                                              RULE + label_id,
-                                                              wrap=False)
-                rule = label.get('rule')
-                if rule:
-                    if action == 'create':
-                        self._process_metering_label_rule_add(rm, rule,
-                                                              ext_dev,
-                                                              label_chain,
-                                                              rules_chain)
-                    elif action == 'delete':
-                        self._process_metering_label_rule_delete(rm, rule,
-                                                                 ext_dev,
-                                                                 label_chain,
-                                                                 rules_chain)
-
-    def _update_metering_label_rules(self, router):
-        rm = self.routers.get(router['id'])
-        if not rm:
-            return
-
-        with IptablesManagerTransaction(rm.iptables_manager):
-            labels = router.get(constants.METERING_LABEL_KEY, [])
-            for label in labels:
-                label_id = label['id']
-
-                label_chain = iptables_manager.get_chain_name(WRAP_NAME +
-                                                              LABEL + label_id,
-                                                              wrap=False)
-                rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
-                                                              RULE + label_id,
-                                                              wrap=False)
-                rm.iptables_manager.ipv4['filter'].empty_chain(rules_chain,
-                                                               wrap=False)
-
-                rules = label.get('rules')
-                if rules:
-                    self._process_metering_label_rules(rm, rules,
-                                                       label_chain,
-                                                       rules_chain)
-
-    @log_helpers.log_method_call
-    def remove_metering_label(self, context, routers):
-        for router in routers:
-            self._process_disassociate_metering_label(router)
-
-    @log_helpers.log_method_call
-    def get_traffic_counters(self, context, routers):
-        accs = {}
-        for router in routers:
-            rm = self.routers.get(router['id'])
-            if not rm:
-                continue
-
-            for label_id, label in rm.metering_labels.items():
-                try:
-                    chain = iptables_manager.get_chain_name(WRAP_NAME +
-                                                            LABEL +
-                                                            label_id,
-                                                            wrap=False)
-
-                    chain_acc = rm.iptables_manager.get_traffic_counters(
-                        chain, wrap=False, zero=True)
-                except RuntimeError:
-                    LOG.exception(_LE('Failed to get traffic counters, '
-                                      'router: %s'), router)
-                    continue
-
-                if not chain_acc:
-                    continue
-
-                acc = accs.get(label_id, {'pkts': 0, 'bytes': 0})
-
-                acc['pkts'] += chain_acc['pkts']
-                acc['bytes'] += chain_acc['bytes']
-
-                accs[label_id] = acc
-
-        return accs
diff --git a/neutron/services/metering/drivers/noop/__init__.py b/neutron/services/metering/drivers/noop/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/metering/drivers/noop/noop_driver.py b/neutron/services/metering/drivers/noop/noop_driver.py
deleted file mode 100644 (file)
index 25df069..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import helpers as log_helpers
-
-from neutron.services.metering.drivers import abstract_driver
-
-
-class NoopMeteringDriver(abstract_driver.MeteringAbstractDriver):
-
-    @log_helpers.log_method_call
-    def update_routers(self, context, routers):
-        pass
-
-    @log_helpers.log_method_call
-    def remove_router(self, context, router_id):
-        pass
-
-    @log_helpers.log_method_call
-    def update_metering_label_rules(self, context, routers):
-        pass
-
-    @log_helpers.log_method_call
-    def add_metering_label_rule(self, context, routers):
-        pass
-
-    @log_helpers.log_method_call
-    def remove_metering_label_rule(self, context, routers):
-        pass
-
-    @log_helpers.log_method_call
-    def add_metering_label(self, context, routers):
-        pass
-
-    @log_helpers.log_method_call
-    def remove_metering_label(self, context, routers):
-        pass
-
-    @log_helpers.log_method_call
-    def get_traffic_counters(self, context, routers):
-        pass
diff --git a/neutron/services/metering/metering_plugin.py b/neutron/services/metering/metering_plugin.py
deleted file mode 100644 (file)
index 11a9286..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.db.metering import metering_db
-from neutron.db.metering import metering_rpc
-
-
-class MeteringPlugin(metering_db.MeteringDbMixin):
-    """Implementation of the Neutron Metering Service Plugin."""
-    supported_extension_aliases = ["metering"]
-    path_prefix = "/metering"
-
-    def __init__(self):
-        super(MeteringPlugin, self).__init__()
-
-        self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI()
-        self.start_rpc_listeners()
-
-    def start_rpc_listeners(self):
-        self.endpoints = [metering_rpc.MeteringRpcCallbacks(self)]
-        self.conn = n_rpc.create_connection()
-        self.conn.create_consumer(
-            topics.METERING_PLUGIN, self.endpoints, fanout=False)
-        return self.conn.consume_in_threads()
-
-    def create_metering_label(self, context, metering_label):
-        label = super(MeteringPlugin, self).create_metering_label(
-            context, metering_label)
-
-        data = self.get_sync_data_metering(context)
-        self.meter_rpc.add_metering_label(context, data)
-
-        return label
-
-    def delete_metering_label(self, context, label_id):
-        data = self.get_sync_data_metering(context, label_id)
-        label = super(MeteringPlugin, self).delete_metering_label(
-            context, label_id)
-
-        self.meter_rpc.remove_metering_label(context, data)
-
-        return label
-
-    def create_metering_label_rule(self, context, metering_label_rule):
-        rule = super(MeteringPlugin, self).create_metering_label_rule(
-            context, metering_label_rule)
-
-        data = self.get_sync_data_for_rule(context, rule)
-        self.meter_rpc.add_metering_label_rule(context, data)
-
-        return rule
-
-    def delete_metering_label_rule(self, context, rule_id):
-        rule = super(MeteringPlugin, self).delete_metering_label_rule(
-            context, rule_id)
-
-        data = self.get_sync_data_for_rule(context, rule)
-        self.meter_rpc.remove_metering_label_rule(context, data)
-        return rule
diff --git a/neutron/services/provider_configuration.py b/neutron/services/provider_configuration.py
deleted file mode 100644 (file)
index 722dddb..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-# Copyright 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import importlib
-import os
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import stevedore
-
-from neutron._i18n import _, _LW
-from neutron.api.v2 import attributes as attr
-from neutron.common import exceptions as n_exc
-
-LOG = logging.getLogger(__name__)
-
-SERVICE_PROVIDERS = 'neutron.service_providers'
-
-serviceprovider_opts = [
-    cfg.MultiStrOpt('service_provider', default=[],
-                    help=_('Defines providers for advanced services '
-                           'using the format: '
-                           '<service_type>:<name>:<driver>[:default]'))
-]
-
-cfg.CONF.register_opts(serviceprovider_opts, 'service_providers')
-
-
-class NeutronModule(object):
-    """A Neutron extension module."""
-
-    def __init__(self, service_module):
-        self.module_name = service_module
-        self.repo = {
-            'mod': self._import_or_none(),
-            'ini': None
-        }
-
-    def _import_or_none(self):
-            try:
-                return importlib.import_module(self.module_name)
-            except ImportError:
-                return None
-
-    def installed(self):
-        LOG.debug("NeutronModule installed = %s", self.module_name)
-        return self.module_name
-
-    def module(self):
-        return self.repo['mod']
-
-    # Return an INI parser for the child module
-    def ini(self, neutron_dir=None):
-        if self.repo['ini'] is None:
-            try:
-                neutron_dir = neutron_dir or cfg.CONF.config_dir
-            except cfg.NoSuchOptError:
-                pass
-            if neutron_dir is None:
-                neutron_dir = '/etc/neutron'
-
-            ini_file = cfg.ConfigOpts()
-            ini_file.register_opts(serviceprovider_opts, 'service_providers')
-            ini_path = os.path.join(neutron_dir, '%s.conf' % self.module_name)
-            if os.path.exists(ini_path):
-                ini_file(['--config-file', ini_path])
-            self.repo['ini'] = ini_file
-
-        return self.repo['ini']
-
-    def service_providers(self):
-        """Return the service providers for the extension module."""
-        providers = []
-        # Attempt to read the config from cfg.CONF first; when passing
-        # --config-dir, the option is merged from all the definitions
-        # made across all the imported config files
-        try:
-            providers = cfg.CONF.service_providers.service_provider
-        except cfg.NoSuchOptError:
-            pass
-
-        # Alternatively, if the option is not available, try to load
-        # it from the provider module's config file; this may be
-        # necessary, if modules are loaded on the fly (DevStack may
-        # be an example)
-        if not providers:
-            providers = self.ini().service_providers.service_provider
-
-        return providers
-
-
-#global scope function that should be used in service APIs
-def normalize_provider_name(name):
-    return name.lower()
-
-
-def get_provider_driver_class(driver, namespace=SERVICE_PROVIDERS):
-    """Return path to provider driver class
-
-    In order to keep backward compatibility with configs < Kilo, we need to
-    translate driver class paths after advanced services split. This is done by
-    defining old class path as entry point in neutron package.
-    """
-    try:
-        driver_manager = stevedore.driver.DriverManager(
-            namespace, driver).driver
-    except ImportError:
-        return driver
-    except RuntimeError:
-        return driver
-    new_driver = "%s.%s" % (driver_manager.__module__,
-                            driver_manager.__name__)
-    LOG.warning(_LW(
-        "The configured driver %(driver)s has been moved, automatically "
-        "using %(new_driver)s instead. Please update your config files, "
-        "as this automatic fixup will be removed in a future release."),
-        {'driver': driver, 'new_driver': new_driver})
-    return new_driver
-
-
-def parse_service_provider_opt(service_module='neutron'):
-
-    """Parse service definition opts and returns result."""
-    def validate_name(name):
-        if len(name) > attr.NAME_MAX_LEN:
-            raise n_exc.Invalid(
-                _("Provider name %(name)s is limited by %(len)s characters")
-                % {'name': name, 'len': attr.NAME_MAX_LEN})
-
-    neutron_mod = NeutronModule(service_module)
-    svc_providers_opt = neutron_mod.service_providers()
-
-    LOG.debug("Service providers = %s", svc_providers_opt)
-
-    res = []
-    for prov_def in svc_providers_opt:
-        split = prov_def.split(':')
-        try:
-            svc_type, name, driver = split[:3]
-        except ValueError:
-            raise n_exc.Invalid(_("Invalid service provider format"))
-        validate_name(name)
-        name = normalize_provider_name(name)
-        default = False
-        if len(split) == 4 and split[3]:
-            if split[3] == 'default':
-                default = True
-            else:
-                msg = (_("Invalid provider format. "
-                         "Last part should be 'default' or empty: %s") %
-                       prov_def)
-                LOG.error(msg)
-                raise n_exc.Invalid(msg)
-
-        driver = get_provider_driver_class(driver)
-        res.append({'service_type': svc_type,
-                    'name': name,
-                    'driver': driver,
-                    'default': default})
-    return res
-
-
-class ServiceProviderNotFound(n_exc.InvalidInput):
-    message = _("Service provider '%(provider)s' could not be found "
-                "for service type %(service_type)s")
-
-
-class DefaultServiceProviderNotFound(n_exc.InvalidInput):
-    message = _("Service type %(service_type)s does not have a default "
-                "service provider")
-
-
-class ServiceProviderAlreadyAssociated(n_exc.Conflict):
-    message = _("Resource '%(resource_id)s' is already associated with "
-                "provider '%(provider)s' for service type '%(service_type)s'")
-
-
-class ProviderConfiguration(object):
-
-    def __init__(self, svc_module='neutron'):
-        self.providers = {}
-        for prov in parse_service_provider_opt(svc_module):
-            self.add_provider(prov)
-
-    def _ensure_driver_unique(self, driver):
-        for k, v in self.providers.items():
-            if v['driver'] == driver:
-                msg = (_("Driver %s is not unique across providers") %
-                       driver)
-                LOG.error(msg)
-                raise n_exc.Invalid(msg)
-
-    def _ensure_default_unique(self, type, default):
-        if not default:
-            return
-        for k, v in self.providers.items():
-            if k[0] == type and v['default']:
-                msg = _("Multiple default providers "
-                        "for service %s") % type
-                LOG.error(msg)
-                raise n_exc.Invalid(msg)
-
-    def add_provider(self, provider):
-        self._ensure_driver_unique(provider['driver'])
-        self._ensure_default_unique(provider['service_type'],
-                                    provider['default'])
-        provider_type = (provider['service_type'], provider['name'])
-        if provider_type in self.providers:
-            msg = (_("Multiple providers specified for service "
-                     "%s") % provider['service_type'])
-            LOG.error(msg)
-            raise n_exc.Invalid(msg)
-        self.providers[provider_type] = {'driver': provider['driver'],
-                                         'default': provider['default']}
-
-    def _check_entry(self, k, v, filters):
-        # small helper to deal with query filters
-        if not filters:
-            return True
-        for index, key in enumerate(['service_type', 'name']):
-            if key in filters:
-                if k[index] not in filters[key]:
-                    return False
-
-        for key in ['driver', 'default']:
-            if key in filters:
-                if v[key] not in filters[key]:
-                    return False
-        return True
-
-    def _fields(self, resource, fields):
-        if fields:
-            return dict(((key, item) for key, item in resource.items()
-                         if key in fields))
-        return resource
-
-    def get_service_providers(self, filters=None, fields=None):
-        return [self._fields({'service_type': k[0],
-                              'name': k[1],
-                              'driver': v['driver'],
-                              'default': v['default']},
-                             fields)
-                for k, v in self.providers.items()
-                if self._check_entry(k, v, filters)]
diff --git a/neutron/services/qos/__init__.py b/neutron/services/qos/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/qos/notification_drivers/__init__.py b/neutron/services/qos/notification_drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/qos/notification_drivers/manager.py b/neutron/services/qos/notification_drivers/manager.py
deleted file mode 100644 (file)
index 5a8c89a..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from neutron._i18n import _, _LI
-from neutron import manager
-
-QOS_DRIVER_NAMESPACE = 'neutron.qos.notification_drivers'
-QOS_PLUGIN_OPTS = [
-    cfg.ListOpt('notification_drivers',
-                default='message_queue',
-                help=_('Drivers list to use to send the update notification')),
-]
-
-cfg.CONF.register_opts(QOS_PLUGIN_OPTS, "qos")
-
-LOG = logging.getLogger(__name__)
-
-
-class QosServiceNotificationDriverManager(object):
-
-    def __init__(self):
-        self.notification_drivers = []
-        self._load_drivers(cfg.CONF.qos.notification_drivers)
-
-    def update_policy(self, context, qos_policy):
-        for driver in self.notification_drivers:
-            driver.update_policy(context, qos_policy)
-
-    def delete_policy(self, context, qos_policy):
-        for driver in self.notification_drivers:
-            driver.delete_policy(context, qos_policy)
-
-    def create_policy(self, context, qos_policy):
-        for driver in self.notification_drivers:
-            driver.create_policy(context, qos_policy)
-
-    def _load_drivers(self, notification_drivers):
-        """Load all the instances of the configured QoS notification drivers
-
-        :param notification_drivers: comma separated string
-        """
-        if not notification_drivers:
-            raise SystemExit(_('A QoS driver must be specified'))
-        LOG.debug("Loading QoS notification drivers: %s", notification_drivers)
-        for notification_driver in notification_drivers:
-            driver_ins = self._load_driver_instance(notification_driver)
-            self.notification_drivers.append(driver_ins)
-
-    def _load_driver_instance(self, notification_driver):
-        """Returns an instance of the configured QoS notification driver
-
-        :returns: An instance of Driver for the QoS notification
-        """
-        mgr = manager.NeutronManager
-        driver = mgr.load_class_for_provider(QOS_DRIVER_NAMESPACE,
-                                             notification_driver)
-        driver_instance = driver()
-        LOG.info(
-            _LI("Loading %(name)s (%(description)s) notification driver "
-                "for QoS plugin"),
-            {"name": notification_driver,
-             "description": driver_instance.get_description()})
-        return driver_instance
diff --git a/neutron/services/qos/notification_drivers/message_queue.py b/neutron/services/qos/notification_drivers/message_queue.py
deleted file mode 100644 (file)
index 1ffeda0..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from neutron._i18n import _LW
-from neutron.api.rpc.callbacks import events
-from neutron.api.rpc.callbacks.producer import registry
-from neutron.api.rpc.callbacks import resources
-from neutron.api.rpc.handlers import resources_rpc
-from neutron.objects.qos import policy as policy_object
-from neutron.services.qos.notification_drivers import qos_base
-
-
-LOG = logging.getLogger(__name__)
-
-
-def _get_qos_policy_cb(resource, policy_id, **kwargs):
-    context = kwargs.get('context')
-    if context is None:
-        LOG.warning(_LW(
-            'Received %(resource)s %(policy_id)s without context'),
-            {'resource': resource, 'policy_id': policy_id}
-        )
-        return
-
-    policy = policy_object.QosPolicy.get_by_id(context, policy_id)
-    return policy
-
-
-class RpcQosServiceNotificationDriver(
-    qos_base.QosServiceNotificationDriverBase):
-    """RPC message queue service notification driver for QoS."""
-
-    def __init__(self):
-        self.notification_api = resources_rpc.ResourcesPushRpcApi()
-        registry.provide(_get_qos_policy_cb, resources.QOS_POLICY)
-
-    def get_description(self):
-        return "Message queue updates"
-
-    def create_policy(self, context, policy):
-        #No need to update agents on create
-        pass
-
-    def update_policy(self, context, policy):
-        self.notification_api.push(context, policy, events.UPDATED)
-
-    def delete_policy(self, context, policy):
-        self.notification_api.push(context, policy, events.DELETED)
diff --git a/neutron/services/qos/notification_drivers/qos_base.py b/neutron/services/qos/notification_drivers/qos_base.py
deleted file mode 100644 (file)
index 50f98f0..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import abc
-
-import six
-
-
-@six.add_metaclass(abc.ABCMeta)
-class QosServiceNotificationDriverBase(object):
-    """QoS service notification driver base class."""
-
-    @abc.abstractmethod
-    def get_description(self):
-        """Get the notification driver description.
-        """
-
-    @abc.abstractmethod
-    def create_policy(self, context, policy):
-        """Create the QoS policy."""
-
-    @abc.abstractmethod
-    def update_policy(self, context, policy):
-        """Update the QoS policy.
-
-        Apply changes to the QoS policy.
-        """
-
-    @abc.abstractmethod
-    def delete_policy(self, context, policy):
-        """Delete the QoS policy.
-
-        Remove all rules for this policy and free up all the resources.
-        """
diff --git a/neutron/services/qos/qos_consts.py b/neutron/services/qos/qos_consts.py
deleted file mode 100644 (file)
index 3eb78d5..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2015 Red Hat Inc.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-RULE_TYPE_BANDWIDTH_LIMIT = 'bandwidth_limit'
-VALID_RULE_TYPES = [RULE_TYPE_BANDWIDTH_LIMIT]
-
-QOS_POLICY_ID = 'qos_policy_id'
diff --git a/neutron/services/qos/qos_plugin.py b/neutron/services/qos/qos_plugin.py
deleted file mode 100644 (file)
index 1327a74..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-# Copyright (c) 2015 Red Hat Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import exceptions as n_exc
-from neutron.db import api as db_api
-from neutron.db import db_base_plugin_common
-from neutron.extensions import qos
-from neutron.objects.qos import policy as policy_object
-from neutron.objects.qos import rule as rule_object
-from neutron.objects.qos import rule_type as rule_type_object
-from neutron.services.qos.notification_drivers import manager as driver_mgr
-from neutron.services.qos import qos_consts
-
-
-class QoSPlugin(qos.QoSPluginBase):
-    """Implementation of the Neutron QoS Service Plugin.
-
-    This class implements a Quality of Service plugin that
-    provides quality of service parameters over ports and
-    networks.
-
-    """
-    supported_extension_aliases = ['qos']
-
-    def __init__(self):
-        super(QoSPlugin, self).__init__()
-        self.notification_driver_manager = (
-            driver_mgr.QosServiceNotificationDriverManager())
-
-    @db_base_plugin_common.convert_result_to_dict
-    def create_policy(self, context, policy):
-        policy = policy_object.QosPolicy(context, **policy['policy'])
-        policy.create()
-        self.notification_driver_manager.create_policy(context, policy)
-        return policy
-
-    @db_base_plugin_common.convert_result_to_dict
-    def update_policy(self, context, policy_id, policy):
-        policy = policy_object.QosPolicy(context, **policy['policy'])
-        policy.id = policy_id
-        policy.update()
-        self.notification_driver_manager.update_policy(context, policy)
-        return policy
-
-    def delete_policy(self, context, policy_id):
-        policy = policy_object.QosPolicy(context)
-        policy.id = policy_id
-        self.notification_driver_manager.delete_policy(context, policy)
-        policy.delete()
-
-    def _get_policy_obj(self, context, policy_id):
-        obj = policy_object.QosPolicy.get_by_id(context, policy_id)
-        if obj is None:
-            raise n_exc.QosPolicyNotFound(policy_id=policy_id)
-        return obj
-
-    @db_base_plugin_common.filter_fields
-    @db_base_plugin_common.convert_result_to_dict
-    def get_policy(self, context, policy_id, fields=None):
-        return self._get_policy_obj(context, policy_id)
-
-    @db_base_plugin_common.filter_fields
-    @db_base_plugin_common.convert_result_to_dict
-    def get_policies(self, context, filters=None, fields=None,
-                     sorts=None, limit=None, marker=None,
-                     page_reverse=False):
-        return policy_object.QosPolicy.get_objects(context, **filters)
-
-    #TODO(QoS): Consider adding a proxy catch-all for rules, so
-    #           we capture the API function call, and just pass
-    #           the rule type as a parameter removing lots of
-    #           future code duplication when we have more rules.
-    @db_base_plugin_common.convert_result_to_dict
-    def create_policy_bandwidth_limit_rule(self, context, policy_id,
-                                           bandwidth_limit_rule):
-        # make sure we will have a policy object to push resource update
-        with db_api.autonested_transaction(context.session):
-            # first, validate that we have access to the policy
-            policy = self._get_policy_obj(context, policy_id)
-            rule = rule_object.QosBandwidthLimitRule(
-                context, qos_policy_id=policy_id,
-                **bandwidth_limit_rule['bandwidth_limit_rule'])
-            rule.create()
-            policy.reload_rules()
-        self.notification_driver_manager.update_policy(context, policy)
-        return rule
-
-    @db_base_plugin_common.convert_result_to_dict
-    def update_policy_bandwidth_limit_rule(self, context, rule_id, policy_id,
-                                           bandwidth_limit_rule):
-        # make sure we will have a policy object to push resource update
-        with db_api.autonested_transaction(context.session):
-            # first, validate that we have access to the policy
-            policy = self._get_policy_obj(context, policy_id)
-            # check if the rule belong to the policy
-            policy.get_rule_by_id(rule_id)
-            rule = rule_object.QosBandwidthLimitRule(
-                context, **bandwidth_limit_rule['bandwidth_limit_rule'])
-            rule.id = rule_id
-            rule.update()
-            policy.reload_rules()
-        self.notification_driver_manager.update_policy(context, policy)
-        return rule
-
-    def delete_policy_bandwidth_limit_rule(self, context, rule_id, policy_id):
-        # make sure we will have a policy object to push resource update
-        with db_api.autonested_transaction(context.session):
-            # first, validate that we have access to the policy
-            policy = self._get_policy_obj(context, policy_id)
-            rule = policy.get_rule_by_id(rule_id)
-            rule.delete()
-            policy.reload_rules()
-        self.notification_driver_manager.update_policy(context, policy)
-
-    @db_base_plugin_common.filter_fields
-    @db_base_plugin_common.convert_result_to_dict
-    def get_policy_bandwidth_limit_rule(self, context, rule_id,
-                                        policy_id, fields=None):
-        # make sure we have access to the policy when fetching the rule
-        with db_api.autonested_transaction(context.session):
-            # first, validate that we have access to the policy
-            self._get_policy_obj(context, policy_id)
-            rule = rule_object.QosBandwidthLimitRule.get_by_id(
-                context, rule_id)
-        if not rule:
-            raise n_exc.QosRuleNotFound(policy_id=policy_id, rule_id=rule_id)
-        return rule
-
-    @db_base_plugin_common.filter_fields
-    @db_base_plugin_common.convert_result_to_dict
-    def get_policy_bandwidth_limit_rules(self, context, policy_id,
-                                         filters=None, fields=None,
-                                         sorts=None, limit=None,
-                                         marker=None, page_reverse=False):
-        # make sure we have access to the policy when fetching rules
-        with db_api.autonested_transaction(context.session):
-            # first, validate that we have access to the policy
-            self._get_policy_obj(context, policy_id)
-            filters = filters or dict()
-            filters[qos_consts.QOS_POLICY_ID] = policy_id
-            return rule_object.QosBandwidthLimitRule.get_objects(context,
-                                                                 **filters)
-
-    # TODO(QoS): enforce rule types when accessing rule objects
-    @db_base_plugin_common.filter_fields
-    @db_base_plugin_common.convert_result_to_dict
-    def get_rule_types(self, context, filters=None, fields=None,
-                       sorts=None, limit=None,
-                       marker=None, page_reverse=False):
-        return rule_type_object.QosRuleType.get_objects(**filters)
diff --git a/neutron/services/rbac/__init__.py b/neutron/services/rbac/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/service_base.py b/neutron/services/service_base.py
deleted file mode 100644 (file)
index 5fa9bcf..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-from oslo_log import log as logging
-from oslo_utils import excutils
-from oslo_utils import importutils
-import six
-
-from neutron._i18n import _, _LE, _LI
-from neutron.api import extensions
-from neutron.db import servicetype_db as sdb
-from neutron.services import provider_configuration as pconf
-
-LOG = logging.getLogger(__name__)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class ServicePluginBase(extensions.PluginInterface):
-    """Define base interface for any Advanced Service plugin."""
-    supported_extension_aliases = []
-
-    @abc.abstractmethod
-    def get_plugin_type(self):
-        """Return one of predefined service types.
-
-        See neutron/plugins/common/constants.py
-        """
-        pass
-
-    @abc.abstractmethod
-    def get_plugin_description(self):
-        """Return string description of the plugin."""
-        pass
-
-    def get_workers(self):
-        """Returns a collection of NeutronWorkers"""
-        return ()
-
-
-def load_drivers(service_type, plugin):
-    """Loads drivers for specific service.
-
-    Passes plugin instance to driver's constructor
-    """
-    service_type_manager = sdb.ServiceTypeManager.get_instance()
-    providers = (service_type_manager.
-                 get_service_providers(
-                     None,
-                     filters={'service_type': [service_type]})
-                 )
-    if not providers:
-        msg = (_("No providers specified for '%s' service, exiting") %
-               service_type)
-        LOG.error(msg)
-        raise SystemExit(1)
-
-    drivers = {}
-    for provider in providers:
-        try:
-            drivers[provider['name']] = importutils.import_object(
-                provider['driver'], plugin
-            )
-            LOG.debug("Loaded '%(provider)s' provider for service "
-                      "%(service_type)s",
-                      {'provider': provider['driver'],
-                       'service_type': service_type})
-        except ImportError:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("Error loading provider '%(provider)s' for "
-                                  "service %(service_type)s"),
-                              {'provider': provider['driver'],
-                               'service_type': service_type})
-
-    default_provider = None
-    try:
-        provider = service_type_manager.get_default_service_provider(
-            None, service_type)
-        default_provider = provider['name']
-    except pconf.DefaultServiceProviderNotFound:
-        LOG.info(_LI("Default provider is not specified for service type %s"),
-                 service_type)
-
-    return drivers, default_provider
diff --git a/neutron/services/vpn/__init__.py b/neutron/services/vpn/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/__init__.py b/neutron/tests/__init__.py
deleted file mode 100644 (file)
index 1850daa..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.common import eventlet_utils
-
-eventlet_utils.monkey_patch()
diff --git a/neutron/tests/api/__init__.py b/neutron/tests/api/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/api/admin/__init__.py b/neutron/tests/api/admin/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/api/admin/test_agent_management.py b/neutron/tests/api/admin/test_agent_management.py
deleted file mode 100644 (file)
index 228fdc6..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.tests.api import base
-from neutron.tests.tempest.common import tempest_fixtures as fixtures
-from neutron.tests.tempest import test
-
-
-class AgentManagementTestJSON(base.BaseAdminNetworkTest):
-
-    @classmethod
-    def resource_setup(cls):
-        super(AgentManagementTestJSON, cls).resource_setup()
-        if not test.is_extension_enabled('agent', 'network'):
-            msg = "agent extension not enabled."
-            raise cls.skipException(msg)
-        body = cls.admin_client.list_agents()
-        agents = body['agents']
-        cls.agent = agents[0]  # don't modify this agent
-        cls.dyn_agent = agents[1]
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('9c80f04d-11f3-44a4-8738-ed2f879b0ff4')
-    def test_list_agent(self):
-        body = self.admin_client.list_agents()
-        agents = body['agents']
-        # Heartbeats must be excluded from comparison
-        self.agent.pop('heartbeat_timestamp', None)
-        self.agent.pop('configurations', None)
-        for agent in agents:
-            agent.pop('heartbeat_timestamp', None)
-            agent.pop('configurations', None)
-        self.assertIn(self.agent, agents)
-
-    @test.attr(type=['smoke'])
-    @test.idempotent_id('e335be47-b9a1-46fd-be30-0874c0b751e6')
-    def test_list_agents_non_admin(self):
-        body = self.client.list_agents()
-        self.assertEqual(len(body["agents"]), 0)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('869bc8e8-0fda-4a30-9b71-f8a7cf58ca9f')
-    def test_show_agent(self):
-        body = self.admin_client.show_agent(self.agent['id'])
-        agent = body['agent']
-        self.assertEqual(agent['id'], self.agent['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('371dfc5b-55b9-4cb5-ac82-c40eadaac941')
-    def test_update_agent_status(self):
-        origin_status = self.agent['admin_state_up']
-        # Try to update the 'admin_state_up' to the original
-        # one to avoid the negative effect.
-        agent_status = {'admin_state_up': origin_status}
-        body = self.admin_client.update_agent(agent_id=self.agent['id'],
-                                              agent_info=agent_status)
-        updated_status = body['agent']['admin_state_up']
-        self.assertEqual(origin_status, updated_status)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('68a94a14-1243-46e6-83bf-157627e31556')
-    def test_update_agent_description(self):
-        self.useFixture(fixtures.LockFixture('agent_description'))
-        description = 'description for update agent.'
-        agent_description = {'description': description}
-        body = self.admin_client.update_agent(agent_id=self.dyn_agent['id'],
-                                              agent_info=agent_description)
-        self.addCleanup(self._restore_agent)
-        updated_description = body['agent']['description']
-        self.assertEqual(updated_description, description)
-
-    def _restore_agent(self):
-        """
-        Restore the agent description after update test.
-        """
-        description = self.dyn_agent['description']
-        origin_agent = {'description': description}
-        self.admin_client.update_agent(agent_id=self.dyn_agent['id'],
-                                       agent_info=origin_agent)
diff --git a/neutron/tests/api/admin/test_dhcp_agent_scheduler.py b/neutron/tests/api/admin/test_dhcp_agent_scheduler.py
deleted file mode 100644 (file)
index befd2ef..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.tests.api import base
-from neutron.tests.tempest import test
-
-
-class DHCPAgentSchedulersTestJSON(base.BaseAdminNetworkTest):
-
-    @classmethod
-    def resource_setup(cls):
-        super(DHCPAgentSchedulersTestJSON, cls).resource_setup()
-        if not test.is_extension_enabled('dhcp_agent_scheduler', 'network'):
-            msg = "dhcp_agent_scheduler extension not enabled."
-            raise cls.skipException(msg)
-        # Create a network and make sure it will be hosted by a
-        # dhcp agent: this is done by creating a regular port
-        cls.network = cls.create_network()
-        cls.subnet = cls.create_subnet(cls.network)
-        cls.cidr = cls.subnet['cidr']
-        cls.port = cls.create_port(cls.network)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('5032b1fe-eb42-4a64-8f3b-6e189d8b5c7d')
-    def test_list_dhcp_agent_hosting_network(self):
-        self.admin_client.list_dhcp_agent_hosting_network(
-            self.network['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('30c48f98-e45d-4ffb-841c-b8aad57c7587')
-    def test_list_networks_hosted_by_one_dhcp(self):
-        body = self.admin_client.list_dhcp_agent_hosting_network(
-            self.network['id'])
-        agents = body['agents']
-        self.assertIsNotNone(agents)
-        agent = agents[0]
-        self.assertTrue(self._check_network_in_dhcp_agent(
-            self.network['id'], agent))
-
-    def _check_network_in_dhcp_agent(self, network_id, agent):
-        network_ids = []
-        body = self.admin_client.list_networks_hosted_by_one_dhcp_agent(
-            agent['id'])
-        networks = body['networks']
-        for network in networks:
-            network_ids.append(network['id'])
-        return network_id in network_ids
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('a0856713-6549-470c-a656-e97c8df9a14d')
-    def test_add_remove_network_from_dhcp_agent(self):
-        # The agent is now bound to the network, we can free the port
-        self.client.delete_port(self.port['id'])
-        self.ports.remove(self.port)
-        agent = dict()
-        agent['agent_type'] = None
-        body = self.admin_client.list_agents()
-        agents = body['agents']
-        for a in agents:
-            if a['agent_type'] == 'DHCP agent':
-                agent = a
-                break
-        self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find '
-                         'DHCP agent in agent list though dhcp_agent_scheduler'
-                         ' is enabled.')
-        network = self.create_network()
-        network_id = network['id']
-        if self._check_network_in_dhcp_agent(network_id, agent):
-            self._remove_network_from_dhcp_agent(network_id, agent)
-            self._add_dhcp_agent_to_network(network_id, agent)
-        else:
-            self._add_dhcp_agent_to_network(network_id, agent)
-            self._remove_network_from_dhcp_agent(network_id, agent)
-
-    def _remove_network_from_dhcp_agent(self, network_id, agent):
-        self.admin_client.remove_network_from_dhcp_agent(
-            agent_id=agent['id'],
-            network_id=network_id)
-        self.assertFalse(self._check_network_in_dhcp_agent(
-            network_id, agent))
-
-    def _add_dhcp_agent_to_network(self, network_id, agent):
-        self.admin_client.add_dhcp_agent_to_network(agent['id'],
-                                                    network_id)
-        self.assertTrue(self._check_network_in_dhcp_agent(
-            network_id, agent))
diff --git a/neutron/tests/api/admin/test_extension_driver_port_security_admin.py b/neutron/tests/api/admin/test_extension_driver_port_security_admin.py
deleted file mode 100644 (file)
index 2e28371..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2015 Cisco Systems, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.tests.api import base
-from neutron.tests.api import base_security_groups as base_security
-from neutron.tests.tempest import test
-from tempest_lib import exceptions as lib_exc
-
-
-class PortSecurityAdminTests(base_security.BaseSecGroupTest,
-                             base.BaseAdminNetworkTest):
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('d39a96e2-2dea-4feb-8093-e7ac991ce6f8')
-    def test_create_port_security_false_on_shared_network(self):
-        network = self.create_shared_network()
-        self.assertTrue(network['shared'])
-        self.create_subnet(network, client=self.admin_client)
-        self.assertRaises(lib_exc.Forbidden, self.create_port,
-                          network, port_security_enabled=False)
diff --git a/neutron/tests/api/admin/test_external_network_extension.py b/neutron/tests/api/admin/test_external_network_extension.py
deleted file mode 100644 (file)
index b996dab..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.common.utils import data_utils
-
-from neutron.tests.api import base
-from neutron.tests.tempest import test
-
-
-class ExternalNetworksTestJSON(base.BaseAdminNetworkTest):
-
-    @classmethod
-    def resource_setup(cls):
-        super(ExternalNetworksTestJSON, cls).resource_setup()
-        cls.network = cls.create_network()
-
-    def _create_network(self, external=True):
-        post_body = {'name': data_utils.rand_name('network-')}
-        if external:
-            post_body['router:external'] = external
-        body = self.admin_client.create_network(**post_body)
-        network = body['network']
-        self.addCleanup(self.admin_client.delete_network, network['id'])
-        return network
-
-    @test.idempotent_id('462be770-b310-4df9-9c42-773217e4c8b1')
-    def test_create_external_network(self):
-        # Create a network as an admin user specifying the
-        # external network extension attribute
-        ext_network = self._create_network()
-        # Verifies router:external parameter
-        self.assertIsNotNone(ext_network['id'])
-        self.assertTrue(ext_network['router:external'])
-
-    @test.idempotent_id('4db5417a-e11c-474d-a361-af00ebef57c5')
-    def test_update_external_network(self):
-        # Update a network as an admin user specifying the
-        # external network extension attribute
-        network = self._create_network(external=False)
-        self.assertFalse(network.get('router:external', False))
-        update_body = {'router:external': True}
-        body = self.admin_client.update_network(network['id'],
-                                                **update_body)
-        updated_network = body['network']
-        # Verify that router:external parameter was updated
-        self.assertTrue(updated_network['router:external'])
-
-    @test.idempotent_id('39be4c9b-a57e-4ff9-b7c7-b218e209dfcc')
-    def test_list_external_networks(self):
-        # Create external_net
-        external_network = self._create_network()
-        # List networks as a normal user and confirm the external
-        # network extension attribute is returned for those networks
-        # that were created as external
-        body = self.client.list_networks()
-        networks_list = [net['id'] for net in body['networks']]
-        self.assertIn(external_network['id'], networks_list)
-        self.assertIn(self.network['id'], networks_list)
-        for net in body['networks']:
-            if net['id'] == self.network['id']:
-                self.assertFalse(net['router:external'])
-            elif net['id'] == external_network['id']:
-                self.assertTrue(net['router:external'])
-
-    @test.idempotent_id('2ac50ab2-7ebd-4e27-b3ce-a9e399faaea2')
-    def test_show_external_networks_attribute(self):
-        # Create external_net
-        external_network = self._create_network()
-        # Show an external network as a normal user and confirm the
-        # external network extension attribute is returned.
-        body = self.client.show_network(external_network['id'])
-        show_ext_net = body['network']
-        self.assertEqual(external_network['name'], show_ext_net['name'])
-        self.assertEqual(external_network['id'], show_ext_net['id'])
-        self.assertTrue(show_ext_net['router:external'])
-        body = self.client.show_network(self.network['id'])
-        show_net = body['network']
-        # Verify with show that router:external is False for network
-        self.assertEqual(self.network['name'], show_net['name'])
-        self.assertEqual(self.network['id'], show_net['id'])
-        self.assertFalse(show_net['router:external'])
-
-    @test.idempotent_id('82068503-2cf2-4ed4-b3be-ecb89432e4bb')
-    def test_delete_external_networks_with_floating_ip(self):
-        """Verifies external network can be deleted while still holding
-        (unassociated) floating IPs
-
-        """
-        # Set cls.client to admin to use base.create_subnet()
-        client = self.admin_client
-        body = client.create_network(**{'router:external': True})
-        external_network = body['network']
-        self.addCleanup(self._try_delete_resource,
-                        client.delete_network,
-                        external_network['id'])
-        subnet = self.create_subnet(external_network, client=client,
-                                    enable_dhcp=False)
-        body = client.create_floatingip(
-            floating_network_id=external_network['id'])
-        created_floating_ip = body['floatingip']
-        self.addCleanup(self._try_delete_resource,
-                        client.delete_floatingip,
-                        created_floating_ip['id'])
-        floatingip_list = client.list_floatingips(
-            network=external_network['id'])
-        self.assertIn(created_floating_ip['id'],
-                      (f['id'] for f in floatingip_list['floatingips']))
-        client.delete_network(external_network['id'])
-        # Verifies floating ip is deleted
-        floatingip_list = client.list_floatingips()
-        self.assertNotIn(created_floating_ip['id'],
-                         (f['id'] for f in floatingip_list['floatingips']))
-        # Verifies subnet is deleted
-        subnet_list = client.list_subnets()
-        self.assertNotIn(subnet['id'],
-                         (s['id'] for s in subnet_list))
-        # Removes subnet from the cleanup list
-        self.subnets.remove(subnet)
diff --git a/neutron/tests/api/admin/test_external_networks_negative.py b/neutron/tests/api/admin/test_external_networks_negative.py
deleted file mode 100644 (file)
index 35d12f0..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import base
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class ExternalNetworksAdminNegativeTestJSON(base.BaseAdminNetworkTest):
-
-    @test.attr(type=['negative'])
-    @test.idempotent_id('d402ae6c-0be0-4d8e-833b-a738895d98d0')
-    def test_create_port_with_precreated_floatingip_as_fixed_ip(self):
-        """
-        External networks can be used to create both floating-ip as well
-        as instance-ip. So, creating an instance-ip with a value of a
-        pre-created floating-ip should be denied.
-        """
-
-        # create a floating ip
-        client = self.admin_client
-        body = client.create_floatingip(
-            floating_network_id=CONF.network.public_network_id)
-        created_floating_ip = body['floatingip']
-        self.addCleanup(self._try_delete_resource,
-                        client.delete_floatingip,
-                        created_floating_ip['id'])
-        floating_ip_address = created_floating_ip['floating_ip_address']
-        self.assertIsNotNone(floating_ip_address)
-
-        # use the same value of floatingip as fixed-ip to create_port()
-        fixed_ips = [{'ip_address': floating_ip_address}]
-
-        # create a port which will internally create an instance-ip
-        self.assertRaises(lib_exc.Conflict,
-                          client.create_port,
-                          network_id=CONF.network.public_network_id,
-                          fixed_ips=fixed_ips)
diff --git a/neutron/tests/api/admin/test_floating_ips_admin_actions.py b/neutron/tests/api/admin/test_floating_ips_admin_actions.py
deleted file mode 100644 (file)
index b95cfe4..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import testtools
-
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import base
-from neutron.tests.api import clients
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class FloatingIPAdminTestJSON(base.BaseAdminNetworkTest):
-
-    force_tenant_isolation = True
-
-    @classmethod
-    def resource_setup(cls):
-        super(FloatingIPAdminTestJSON, cls).resource_setup()
-        cls.ext_net_id = CONF.network.public_network_id
-        cls.floating_ip = cls.create_floatingip(cls.ext_net_id)
-        cls.alt_manager = clients.Manager(cls.isolated_creds.get_alt_creds())
-        admin_manager = clients.AdminManager()
-        cls.identity_admin_client = admin_manager.identity_client
-        cls.alt_client = cls.alt_manager.network_client
-        cls.network = cls.create_network()
-        cls.subnet = cls.create_subnet(cls.network)
-        cls.router = cls.create_router(data_utils.rand_name('router-'),
-                                       external_network_id=cls.ext_net_id)
-        cls.create_router_interface(cls.router['id'], cls.subnet['id'])
-        cls.port = cls.create_port(cls.network)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('64f2100b-5471-4ded-b46c-ddeeeb4f231b')
-    def test_list_floating_ips_from_admin_and_nonadmin(self):
-        # Create floating ip from admin user
-        floating_ip_admin = self.admin_client.create_floatingip(
-            floating_network_id=self.ext_net_id)
-        self.addCleanup(self.admin_client.delete_floatingip,
-                        floating_ip_admin['floatingip']['id'])
-        # Create floating ip from alt user
-        body = self.alt_client.create_floatingip(
-            floating_network_id=self.ext_net_id)
-        floating_ip_alt = body['floatingip']
-        self.addCleanup(self.alt_client.delete_floatingip,
-                        floating_ip_alt['id'])
-        # List floating ips from admin
-        body = self.admin_client.list_floatingips()
-        floating_ip_ids_admin = [f['id'] for f in body['floatingips']]
-        # Check that admin sees all floating ips
-        self.assertIn(self.floating_ip['id'], floating_ip_ids_admin)
-        self.assertIn(floating_ip_admin['floatingip']['id'],
-                      floating_ip_ids_admin)
-        self.assertIn(floating_ip_alt['id'], floating_ip_ids_admin)
-        # List floating ips from nonadmin
-        body = self.client.list_floatingips()
-        floating_ip_ids = [f['id'] for f in body['floatingips']]
-        # Check that nonadmin user doesn't see floating ip created from admin
-        # and floating ip that is created in another tenant (alt user)
-        self.assertIn(self.floating_ip['id'], floating_ip_ids)
-        self.assertNotIn(floating_ip_admin['floatingip']['id'],
-                         floating_ip_ids)
-        self.assertNotIn(floating_ip_alt['id'], floating_ip_ids)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('32727cc3-abe2-4485-a16e-48f2d54c14f2')
-    def test_create_list_show_floating_ip_with_tenant_id_by_admin(self):
-        # Creates a floating IP
-        body = self.admin_client.create_floatingip(
-            floating_network_id=self.ext_net_id,
-            tenant_id=self.network['tenant_id'],
-            port_id=self.port['id'])
-        created_floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip,
-                        created_floating_ip['id'])
-        self.assertIsNotNone(created_floating_ip['id'])
-        self.assertIsNotNone(created_floating_ip['tenant_id'])
-        self.assertIsNotNone(created_floating_ip['floating_ip_address'])
-        self.assertEqual(created_floating_ip['port_id'], self.port['id'])
-        self.assertEqual(created_floating_ip['floating_network_id'],
-                         self.ext_net_id)
-        port = self.port['fixed_ips']
-        self.assertEqual(created_floating_ip['fixed_ip_address'],
-                         port[0]['ip_address'])
-        # Verifies the details of a floating_ip
-        floating_ip = self.admin_client.show_floatingip(
-            created_floating_ip['id'])
-        shown_floating_ip = floating_ip['floatingip']
-        self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
-        self.assertEqual(shown_floating_ip['floating_network_id'],
-                         self.ext_net_id)
-        self.assertEqual(shown_floating_ip['tenant_id'],
-                         self.network['tenant_id'])
-        self.assertEqual(shown_floating_ip['floating_ip_address'],
-                         created_floating_ip['floating_ip_address'])
-        self.assertEqual(shown_floating_ip['port_id'], self.port['id'])
-        # Verify the floating ip exists in the list of all floating_ips
-        floating_ips = self.admin_client.list_floatingips()
-        floatingip_id_list = [f['id'] for f in floating_ips['floatingips']]
-        self.assertIn(created_floating_ip['id'], floatingip_id_list)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('11116ee9-4e99-5b15-b8e1-aa7df92ca589')
-    def test_associate_floating_ip_with_port_from_another_tenant(self):
-        body = self.admin_client.create_floatingip(
-            floating_network_id=self.ext_net_id)
-        floating_ip = body['floatingip']
-        test_tenant = data_utils.rand_name('test_tenant_')
-        test_description = data_utils.rand_name('desc_')
-        tenant = self.identity_admin_client.create_tenant(
-            name=test_tenant, description=test_description)
-        tenant_id = tenant['id']
-        self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
-
-        port = self.admin_client.create_port(network_id=self.network['id'],
-                                             tenant_id=tenant_id)
-        self.addCleanup(self.admin_client.delete_port, port['port']['id'])
-        self.assertRaises(lib_exc.BadRequest,
-                          self.admin_client.update_floatingip,
-                          floating_ip['id'], port_id=port['port']['id'])
-
-    @testtools.skipUnless(
-        CONF.network_feature_enabled.specify_floating_ip_address_available,
-        "Feature for specifying floating IP address is disabled")
-    @test.attr(type='smoke')
-    @test.idempotent_id('332a8ae4-402e-4b98-bb6f-532e5a87b8e0')
-    def test_create_floatingip_with_specified_ip_address(self):
-        # other tests may end up stealing the IP before we can use it
-        # since it's on the external network so we need to retry if it's
-        # in use.
-        for i in range(100):
-            fip = self.get_unused_ip(self.ext_net_id, ip_version=4)
-            try:
-                body = self.admin_client.create_floatingip(
-                    floating_network_id=self.ext_net_id,
-                    floating_ip_address=fip)
-                break
-            except lib_exc.Conflict:
-                pass
-        else:
-            self.fail("Could not get an unused IP after 100 attempts")
-        created_floating_ip = body['floatingip']
-        self.addCleanup(self.admin_client.delete_floatingip,
-                        created_floating_ip['id'])
-        self.assertIsNotNone(created_floating_ip['id'])
-        self.assertIsNotNone(created_floating_ip['tenant_id'])
-        self.assertEqual(created_floating_ip['floating_ip_address'], fip)
diff --git a/neutron/tests/api/admin/test_l3_agent_scheduler.py b/neutron/tests/api/admin/test_l3_agent_scheduler.py
deleted file mode 100644 (file)
index 8dfda7e..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.common.utils import data_utils
-
-from neutron.tests.api import base
-from neutron.tests.tempest import exceptions
-from neutron.tests.tempest import test
-
-AGENT_TYPE = 'L3 agent'
-AGENT_MODES = (
-    'legacy',
-    'dvr_snat'
-)
-
-
-class L3AgentSchedulerTestJSON(base.BaseAdminNetworkTest):
-    _agent_mode = 'legacy'
-
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
-
-        List routers that the given L3 agent is hosting.
-        List L3 agents hosting the given router.
-        Add and Remove Router to L3 agent
-
-    v2.0 of the Neutron API is assumed.
-
-    The l3_agent_scheduler extension is required for these tests.
-    """
-
-    @classmethod
-    def skip_checks(cls):
-        super(L3AgentSchedulerTestJSON, cls).skip_checks()
-        if not test.is_extension_enabled('l3_agent_scheduler', 'network'):
-            msg = "L3 Agent Scheduler Extension not enabled."
-            raise cls.skipException(msg)
-
-    @classmethod
-    def resource_setup(cls):
-        super(L3AgentSchedulerTestJSON, cls).resource_setup()
-        body = cls.admin_client.list_agents()
-        agents = body['agents']
-        for agent in agents:
-            # TODO(armax): falling back on default _agent_mode can be
-            # dropped as soon as Icehouse is dropped.
-            agent_mode = (
-                agent['configurations'].get('agent_mode', cls._agent_mode))
-            if agent['agent_type'] == AGENT_TYPE and agent_mode in AGENT_MODES:
-                cls.agent = agent
-                break
-        else:
-            msg = "L3 Agent Scheduler enabled in conf, but L3 Agent not found"
-            raise exceptions.InvalidConfiguration(msg)
-        cls.router = cls.create_router(data_utils.rand_name('router'))
-        # NOTE(armax): If DVR is an available extension, and the created router
-        # is indeed a distributed one, more resources need to be provisioned
-        # in order to bind the router to the L3 agent.
-        # That said, let's preserve the existing test logic, where the extra
-        # query and setup steps are only required if the extension is available
-        # and only if the router's default type is distributed.
-        if test.is_extension_enabled('dvr', 'network'):
-            is_dvr_router = cls.admin_client.show_router(
-                cls.router['id'])['router'].get('distributed', False)
-            if is_dvr_router:
-                cls.network = cls.create_network()
-                cls.create_subnet(cls.network)
-                cls.port = cls.create_port(cls.network)
-                cls.client.add_router_interface_with_port_id(
-                    cls.router['id'], cls.port['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('b7ce6e89-e837-4ded-9b78-9ed3c9c6a45a')
-    def test_list_routers_on_l3_agent(self):
-        self.admin_client.list_routers_on_l3_agent(self.agent['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('9464e5e7-8625-49c3-8fd1-89c52be59d66')
-    def test_add_list_remove_router_on_l3_agent(self):
-        l3_agent_ids = list()
-        self.admin_client.add_router_to_l3_agent(
-            self.agent['id'],
-            self.router['id'])
-        body = (
-            self.admin_client.list_l3_agents_hosting_router(self.router['id']))
-        for agent in body['agents']:
-            l3_agent_ids.append(agent['id'])
-            self.assertIn('agent_type', agent)
-            self.assertEqual('L3 agent', agent['agent_type'])
-        self.assertIn(self.agent['id'], l3_agent_ids)
-        body = self.admin_client.remove_router_from_l3_agent(
-            self.agent['id'],
-            self.router['id'])
-        # NOTE(afazekas): The deletion not asserted, because neutron
-        # is not forbidden to reschedule the router to the same agent
diff --git a/neutron/tests/api/admin/test_quotas.py b/neutron/tests/api/admin/test_quotas.py
deleted file mode 100644 (file)
index ab563ce..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import six
-from tempest_lib.common.utils import data_utils
-
-from neutron.tests.api import base
-from neutron.tests.tempest import test
-
-
-class QuotasTest(base.BaseAdminNetworkTest):
-
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
-
-        list quotas for tenants who have non-default quota values
-        show quotas for a specified tenant
-        update quotas for a specified tenant
-        reset quotas to default values for a specified tenant
-
-    v2.0 of the API is assumed.
-    It is also assumed that the per-tenant quota extension API is configured
-    in /etc/neutron/neutron.conf as follows:
-
-        quota_driver = neutron.db.driver.DbQuotaDriver
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        super(QuotasTest, cls).resource_setup()
-        if not test.is_extension_enabled('quotas', 'network'):
-            msg = "quotas extension not enabled."
-            raise cls.skipException(msg)
-        cls.identity_admin_client = cls.os_adm.identity_client
-
-    @test.attr(type='gate')
-    @test.idempotent_id('2390f766-836d-40ef-9aeb-e810d78207fb')
-    def test_quotas(self):
-        # Add a tenant to conduct the test
-        test_tenant = data_utils.rand_name('test_tenant_')
-        test_description = data_utils.rand_name('desc_')
-        tenant = self.identity_admin_client.create_tenant(
-            name=test_tenant,
-            description=test_description)
-        tenant_id = tenant['id']
-        self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
-
-        new_quotas = {'network': 0, 'security_group': 0}
-
-        # Change quotas for tenant
-        quota_set = self.admin_client.update_quotas(tenant_id,
-                                                    **new_quotas)
-        self.addCleanup(self.admin_client.reset_quotas, tenant_id)
-        for key, value in six.iteritems(new_quotas):
-            self.assertEqual(value, quota_set[key])
-
-        # Confirm our tenant is listed among tenants with non default quotas
-        non_default_quotas = self.admin_client.list_quotas()
-        found = False
-        for qs in non_default_quotas['quotas']:
-            if qs['tenant_id'] == tenant_id:
-                found = True
-        self.assertTrue(found)
-
-        # Confirm from API quotas were changed as requested for tenant
-        quota_set = self.admin_client.show_quotas(tenant_id)
-        quota_set = quota_set['quota']
-        for key, value in six.iteritems(new_quotas):
-            self.assertEqual(value, quota_set[key])
-
-        # Reset quotas to default and confirm
-        self.admin_client.reset_quotas(tenant_id)
-        non_default_quotas = self.admin_client.list_quotas()
-        for q in non_default_quotas['quotas']:
-            self.assertNotEqual(tenant_id, q['tenant_id'])
diff --git a/neutron/tests/api/admin/test_routers_dvr.py b/neutron/tests/api/admin/test_routers_dvr.py
deleted file mode 100644 (file)
index 592fded..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.common.utils import data_utils
-
-from neutron.tests.api import base_routers as base
-from neutron.tests.tempest import test
-
-
-class RoutersTestDVR(base.BaseRouterTest):
-
-    @classmethod
-    def resource_setup(cls):
-        for ext in ['router', 'dvr']:
-            if not test.is_extension_enabled(ext, 'network'):
-                msg = "%s extension not enabled." % ext
-                raise cls.skipException(msg)
-        # The check above will pass if api_extensions=all, which does
-        # not mean DVR extension itself is present.
-        # Instead, we have to check whether DVR is actually present by using
-        # admin credentials to create router with distributed=True attribute
-        # and checking for BadRequest exception and that the resulting router
-        # has a distributed attribute.
-        super(RoutersTestDVR, cls).resource_setup()
-        name = data_utils.rand_name('pretest-check')
-        router = cls.admin_client.create_router(name)
-        if 'distributed' not in router['router']:
-            msg = "'distributed' attribute not found. DVR Possibly not enabled"
-            raise cls.skipException(msg)
-        cls.admin_client.delete_router(router['router']['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('08a2a0a8-f1e4-4b34-8e30-e522e836c44e')
-    def test_distributed_router_creation(self):
-        """
-        Test uses administrative credentials to creates a
-        DVR (Distributed Virtual Routing) router using the
-        distributed=True.
-
-        Acceptance
-        The router is created and the "distributed" attribute is
-        set to True
-        """
-        name = data_utils.rand_name('router')
-        router = self.admin_client.create_router(name, distributed=True)
-        self.addCleanup(self.admin_client.delete_router,
-                        router['router']['id'])
-        self.assertTrue(router['router']['distributed'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('8a0a72b4-7290-4677-afeb-b4ffe37bc352')
-    def test_centralized_router_creation(self):
-        """
-        Test uses administrative credentials to creates a
-        CVR (Centralized Virtual Routing) router using the
-        distributed=False.
-
-        Acceptance
-        The router is created and the "distributed" attribute is
-        set to False, thus making it a "Centralized Virtual Router"
-        as opposed to a "Distributed Virtual Router"
-        """
-        name = data_utils.rand_name('router')
-        router = self.admin_client.create_router(name, distributed=False)
-        self.addCleanup(self.admin_client.delete_router,
-                        router['router']['id'])
-        self.assertFalse(router['router']['distributed'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('acd43596-c1fb-439d-ada8-31ad48ae3c2e')
-    def test_centralized_router_update_to_dvr(self):
-        """
-        Test uses administrative credentials to creates a
-        CVR (Centralized Virtual Routing) router using the
-        distributed=False.Then it will "update" the router
-        distributed attribute to True
-
-        Acceptance
-        The router is created and the "distributed" attribute is
-        set to False. Once the router is updated, the distributed
-        attribute will be set to True
-        """
-        name = data_utils.rand_name('router')
-        # router needs to be in admin state down in order to be upgraded to DVR
-        router = self.admin_client.create_router(name, distributed=False,
-                                                 admin_state_up=False)
-        self.addCleanup(self.admin_client.delete_router,
-                        router['router']['id'])
-        self.assertFalse(router['router']['distributed'])
-        router = self.admin_client.update_router(router['router']['id'],
-                                                 distributed=True)
-        self.assertTrue(router['router']['distributed'])
diff --git a/neutron/tests/api/admin/test_shared_network_extension.py b/neutron/tests/api/admin/test_shared_network_extension.py
deleted file mode 100644 (file)
index 04f7028..0000000
+++ /dev/null
@@ -1,419 +0,0 @@
-# Copyright 2015 Hewlett-Packard Development Company, L.P.dsvsv
-# Copyright 2015 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import uuid
-
-from tempest_lib import exceptions as lib_exc
-import testtools
-
-from neutron.tests.api import base
-from neutron.tests.api import clients
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-from tempest_lib.common.utils import data_utils
-
-CONF = config.CONF
-
-
-class SharedNetworksTest(base.BaseAdminNetworkTest):
-
-    @classmethod
-    def resource_setup(cls):
-        super(SharedNetworksTest, cls).resource_setup()
-        cls.shared_network = cls.create_shared_network()
-
-    @test.idempotent_id('6661d219-b96d-4597-ad10-55766123421a')
-    def test_filtering_shared_networks(self):
-        # this test is necessary because the 'shared' column does not actually
-        # exist on networks so the filter function has to translate it into
-        # queries against the RBAC table
-        self.create_network()
-        self._check_shared_correct(
-            self.client.list_networks(shared=True)['networks'], True)
-        self._check_shared_correct(
-            self.admin_client.list_networks(shared=True)['networks'], True)
-        self._check_shared_correct(
-            self.client.list_networks(shared=False)['networks'], False)
-        self._check_shared_correct(
-            self.admin_client.list_networks(shared=False)['networks'], False)
-
-    def _check_shared_correct(self, items, shared):
-        self.assertNotEmpty(items)
-        self.assertTrue(all(n['shared'] == shared for n in items))
-
-    @test.idempotent_id('6661d219-b96d-4597-ad10-51672353421a')
-    def test_filtering_shared_subnets(self):
-        # shared subnets need to be tested because their shared status isn't
-        # visible as a regular API attribute and it's solely dependent on the
-        # parent network
-        reg = self.create_network()
-        priv = self.create_subnet(reg, client=self.client)
-        shared = self.create_subnet(self.shared_network,
-                                    client=self.admin_client)
-        self.assertIn(shared, self.client.list_subnets(shared=True)['subnets'])
-        self.assertIn(shared,
-            self.admin_client.list_subnets(shared=True)['subnets'])
-        self.assertNotIn(priv,
-            self.client.list_subnets(shared=True)['subnets'])
-        self.assertNotIn(priv,
-            self.admin_client.list_subnets(shared=True)['subnets'])
-        self.assertIn(priv, self.client.list_subnets(shared=False)['subnets'])
-        self.assertIn(priv,
-            self.admin_client.list_subnets(shared=False)['subnets'])
-        self.assertNotIn(shared,
-            self.client.list_subnets(shared=False)['subnets'])
-        self.assertNotIn(shared,
-            self.admin_client.list_subnets(shared=False)['subnets'])
-
-    @test.idempotent_id('6661d219-b96d-4597-ad10-55766ce4abf7')
-    def test_create_update_shared_network(self):
-        shared_network = self.create_shared_network()
-        net_id = shared_network['id']
-        self.assertEqual('ACTIVE', shared_network['status'])
-        self.assertIsNotNone(shared_network['id'])
-        self.assertTrue(self.shared_network['shared'])
-        new_name = "New_shared_network"
-        body = self.admin_client.update_network(net_id, name=new_name,
-                                                admin_state_up=False,
-                                                shared=False)
-        updated_net = body['network']
-        self.assertEqual(new_name, updated_net['name'])
-        self.assertFalse(updated_net['shared'])
-        self.assertFalse(updated_net['admin_state_up'])
-
-    @test.idempotent_id('9c31fabb-0181-464f-9ace-95144fe9ca77')
-    def test_create_port_shared_network_as_non_admin_tenant(self):
-        # create a port as non admin
-        body = self.client.create_port(network_id=self.shared_network['id'])
-        port = body['port']
-        self.addCleanup(self.admin_client.delete_port, port['id'])
-        # verify the tenant id of admin network and non admin port
-        self.assertNotEqual(self.shared_network['tenant_id'],
-                            port['tenant_id'])
-
-    @test.idempotent_id('3e39c4a6-9caf-4710-88f1-d20073c6dd76')
-    def test_create_bulk_shared_network(self):
-        # Creates 2 networks in one request
-        net_nm = [data_utils.rand_name('network'),
-                  data_utils.rand_name('network')]
-        body = self.admin_client.create_bulk_network(net_nm, shared=True)
-        created_networks = body['networks']
-        for net in created_networks:
-            self.addCleanup(self.admin_client.delete_network, net['id'])
-            self.assertIsNotNone(net['id'])
-            self.assertTrue(net['shared'])
-
-    def _list_shared_networks(self, user):
-        body = user.list_networks(shared=True)
-        networks_list = [net['id'] for net in body['networks']]
-        self.assertIn(self.shared_network['id'], networks_list)
-        self.assertTrue(self.shared_network['shared'])
-
-    @test.idempotent_id('a064a9fd-e02f-474a-8159-f828cd636a28')
-    def test_list_shared_networks(self):
-        # List the shared networks and confirm that
-        # shared network extension attribute is returned for those networks
-        # that are created as shared
-        self._list_shared_networks(self.admin_client)
-        self._list_shared_networks(self.client)
-
-    def _show_shared_network(self, user):
-        body = user.show_network(self.shared_network['id'])
-        show_shared_net = body['network']
-        self.assertEqual(self.shared_network['name'], show_shared_net['name'])
-        self.assertEqual(self.shared_network['id'], show_shared_net['id'])
-        self.assertTrue(show_shared_net['shared'])
-
-    @test.idempotent_id('e03c92a2-638d-4bfa-b50a-b1f66f087e58')
-    def test_show_shared_networks_attribute(self):
-        # Show a shared network and confirm that
-        # shared network extension attribute is returned.
-        self._show_shared_network(self.admin_client)
-        self._show_shared_network(self.client)
-
-
-class AllowedAddressPairSharedNetworkTest(base.BaseAdminNetworkTest):
-    allowed_address_pairs = [{'ip_address': '1.1.1.1'}]
-
-    @classmethod
-    def skip_checks(cls):
-        super(AllowedAddressPairSharedNetworkTest, cls).skip_checks()
-        if not test.is_extension_enabled('allowed-address-pairs', 'network'):
-            msg = "Allowed Address Pairs extension not enabled."
-            raise cls.skipException(msg)
-
-    @classmethod
-    def resource_setup(cls):
-        super(AllowedAddressPairSharedNetworkTest, cls).resource_setup()
-        cls.network = cls.create_shared_network()
-        cls.create_subnet(cls.network, client=cls.admin_client)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-ffffffff1fff')
-    def test_create_with_address_pair_blocked_on_other_network(self):
-        with testtools.ExpectedException(lib_exc.Forbidden):
-            self.create_port(self.network,
-                             allowed_address_pairs=self.allowed_address_pairs)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-ffffffff2fff')
-    def test_update_with_address_pair_blocked_on_other_network(self):
-        port = self.create_port(self.network)
-        with testtools.ExpectedException(lib_exc.Forbidden):
-            self.update_port(
-                port, allowed_address_pairs=self.allowed_address_pairs)
-
-
-class RBACSharedNetworksTest(base.BaseAdminNetworkTest):
-
-    force_tenant_isolation = True
-
-    @classmethod
-    def resource_setup(cls):
-        super(RBACSharedNetworksTest, cls).resource_setup()
-        if not test.is_extension_enabled('rbac_policies', 'network'):
-            msg = "rbac extension not enabled."
-            raise cls.skipException(msg)
-        creds = cls.isolated_creds.get_alt_creds()
-        cls.client2 = clients.Manager(credentials=creds).network_client
-
-    def _make_admin_net_and_subnet_shared_to_tenant_id(self, tenant_id):
-        net = self.admin_client.create_network(
-            name=data_utils.rand_name('test-network-'))['network']
-        self.addCleanup(self.admin_client.delete_network, net['id'])
-        subnet = self.create_subnet(net, client=self.admin_client)
-        # network is shared to first unprivileged client by default
-        pol = self.admin_client.create_rbac_policy(
-            object_type='network', object_id=net['id'],
-            action='access_as_shared', target_tenant=tenant_id
-        )['rbac_policy']
-        return {'network': net, 'subnet': subnet, 'policy': pol}
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-afffffff1fff')
-    def test_network_only_visible_to_policy_target(self):
-        net = self._make_admin_net_and_subnet_shared_to_tenant_id(
-            self.client.tenant_id)['network']
-        self.client.show_network(net['id'])
-        with testtools.ExpectedException(lib_exc.NotFound):
-            # client2 has not been granted access
-            self.client2.show_network(net['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-afffffff2fff')
-    def test_subnet_on_network_only_visible_to_policy_target(self):
-        sub = self._make_admin_net_and_subnet_shared_to_tenant_id(
-            self.client.tenant_id)['subnet']
-        self.client.show_subnet(sub['id'])
-        with testtools.ExpectedException(lib_exc.NotFound):
-            # client2 has not been granted access
-            self.client2.show_subnet(sub['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-afffffff2eee')
-    def test_policy_target_update(self):
-        res = self._make_admin_net_and_subnet_shared_to_tenant_id(
-            self.client.tenant_id)
-        # change to client2
-        update_res = self.admin_client.update_rbac_policy(
-                res['policy']['id'], target_tenant=self.client2.tenant_id)
-        self.assertEqual(self.client2.tenant_id,
-                         update_res['rbac_policy']['target_tenant'])
-        # make sure everything else stayed the same
-        res['policy'].pop('target_tenant')
-        update_res['rbac_policy'].pop('target_tenant')
-        self.assertEqual(res['policy'], update_res['rbac_policy'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-afffffff3fff')
-    def test_port_presence_prevents_network_rbac_policy_deletion(self):
-        res = self._make_admin_net_and_subnet_shared_to_tenant_id(
-            self.client.tenant_id)
-        port = self.client.create_port(network_id=res['network']['id'])['port']
-        # a port on the network should prevent the deletion of a policy
-        # required for it to exist
-        with testtools.ExpectedException(lib_exc.Conflict):
-            self.admin_client.delete_rbac_policy(res['policy']['id'])
-
-        # a wildcard policy should allow the specific policy to be deleted
-        # since it allows the remaining port
-        wild = self.admin_client.create_rbac_policy(
-            object_type='network', object_id=res['network']['id'],
-            action='access_as_shared', target_tenant='*')['rbac_policy']
-        self.admin_client.delete_rbac_policy(res['policy']['id'])
-
-        # now that wildcard is the only remaining, it should be subjected to
-        # to the same restriction
-        with testtools.ExpectedException(lib_exc.Conflict):
-            self.admin_client.delete_rbac_policy(wild['id'])
-        # similarly, we can't update the policy to a different tenant
-        with testtools.ExpectedException(lib_exc.Conflict):
-            self.admin_client.update_rbac_policy(
-                wild['id'], target_tenant=self.client2.tenant_id)
-
-        self.client.delete_port(port['id'])
-        # anchor is gone, delete should pass
-        self.admin_client.delete_rbac_policy(wild['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-beefbeefbeef')
-    def test_tenant_can_delete_port_on_own_network(self):
-        # TODO(kevinbenton): make adjustments to the db lookup to
-        # make this work.
-        msg = "Non-admin cannot currently delete other's ports."
-        raise self.skipException(msg)
-        # pylint: disable=unreachable
-        net = self.create_network()  # owned by self.client
-        self.client.create_rbac_policy(
-            object_type='network', object_id=net['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
-        port = self.client2.create_port(network_id=net['id'])['port']
-        self.client.delete_port(port['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-afffffff4fff')
-    def test_regular_client_shares_to_another_regular_client(self):
-        net = self.create_network()  # owned by self.client
-        with testtools.ExpectedException(lib_exc.NotFound):
-            self.client2.show_network(net['id'])
-        pol = self.client.create_rbac_policy(
-            object_type='network', object_id=net['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
-        self.client2.show_network(net['id'])
-
-        self.assertIn(pol['rbac_policy'],
-                      self.client.list_rbac_policies()['rbac_policies'])
-        # ensure that 'client2' can't see the policy sharing the network to it
-        # because the policy belongs to 'client'
-        self.assertNotIn(pol['rbac_policy']['id'],
-            [p['id']
-             for p in self.client2.list_rbac_policies()['rbac_policies']])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('bf5052b8-b11e-407c-8e43-113447404d3e')
-    def test_filter_fields(self):
-        net = self.create_network()
-        self.client.create_rbac_policy(
-            object_type='network', object_id=net['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
-        field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
-                      ('tenant_id', 'target_tenant'))
-        for fields in field_args:
-            res = self.client.list_rbac_policies(fields=fields)
-            self.assertEqual(set(fields), set(res['rbac_policies'][0].keys()))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-afffffff5fff')
-    def test_policy_show(self):
-        res = self._make_admin_net_and_subnet_shared_to_tenant_id(
-            self.client.tenant_id)
-        p1 = res['policy']
-        p2 = self.admin_client.create_rbac_policy(
-            object_type='network', object_id=res['network']['id'],
-            action='access_as_shared',
-            target_tenant='*')['rbac_policy']
-
-        self.assertEqual(
-            p1, self.admin_client.show_rbac_policy(p1['id'])['rbac_policy'])
-        self.assertEqual(
-            p2, self.admin_client.show_rbac_policy(p2['id'])['rbac_policy'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('e7bcb1ea-4877-4266-87bb-76f68b421f31')
-    def test_filter_policies(self):
-        net = self.create_network()
-        pol1 = self.client.create_rbac_policy(
-            object_type='network', object_id=net['id'],
-            action='access_as_shared',
-            target_tenant=self.client2.tenant_id)['rbac_policy']
-        pol2 = self.client.create_rbac_policy(
-            object_type='network', object_id=net['id'],
-            action='access_as_shared',
-            target_tenant=self.client.tenant_id)['rbac_policy']
-        res1 = self.client.list_rbac_policies(id=pol1['id'])['rbac_policies']
-        res2 = self.client.list_rbac_policies(id=pol2['id'])['rbac_policies']
-        self.assertEqual(1, len(res1))
-        self.assertEqual(1, len(res2))
-        self.assertEqual(pol1['id'], res1[0]['id'])
-        self.assertEqual(pol2['id'], res2[0]['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-afffffff6fff')
-    def test_regular_client_blocked_from_sharing_anothers_network(self):
-        net = self._make_admin_net_and_subnet_shared_to_tenant_id(
-            self.client.tenant_id)['network']
-        with testtools.ExpectedException(lib_exc.BadRequest):
-            self.client.create_rbac_policy(
-                object_type='network', object_id=net['id'],
-                action='access_as_shared', target_tenant=self.client.tenant_id)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('c5f8f785-ce8d-4430-af7e-a236205862fb')
-    def test_rbac_policy_quota(self):
-        if not test.is_extension_enabled('quotas', 'network'):
-            msg = "quotas extension not enabled."
-            raise self.skipException(msg)
-        quota = self.client.show_quotas(self.client.tenant_id)['quota']
-        max_policies = quota['rbac_policy']
-        self.assertGreater(max_policies, 0)
-        net = self.client.create_network(
-            name=data_utils.rand_name('test-network-'))['network']
-        self.addCleanup(self.client.delete_network, net['id'])
-        with testtools.ExpectedException(lib_exc.Conflict):
-            for i in range(0, max_policies + 1):
-                self.admin_client.create_rbac_policy(
-                    object_type='network', object_id=net['id'],
-                    action='access_as_shared',
-                    target_tenant=str(uuid.uuid4()).replace('-', ''))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-afffffff7fff')
-    def test_regular_client_blocked_from_sharing_with_wildcard(self):
-        net = self.create_network()
-        with testtools.ExpectedException(lib_exc.Forbidden):
-            self.client.create_rbac_policy(
-                object_type='network', object_id=net['id'],
-                action='access_as_shared', target_tenant='*')
-        # ensure it works on update as well
-        pol = self.client.create_rbac_policy(
-            object_type='network', object_id=net['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
-        with testtools.ExpectedException(lib_exc.Forbidden):
-            self.client.update_rbac_policy(pol['rbac_policy']['id'],
-                                           target_tenant='*')
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-aeeeeeee7fff')
-    def test_filtering_works_with_rbac_records_present(self):
-        resp = self._make_admin_net_and_subnet_shared_to_tenant_id(
-            self.client.tenant_id)
-        net = resp['network']['id']
-        sub = resp['subnet']['id']
-        self.admin_client.create_rbac_policy(
-            object_type='network', object_id=net,
-            action='access_as_shared', target_tenant='*')
-        self._assert_shared_object_id_listing_presence('subnets', False, sub)
-        self._assert_shared_object_id_listing_presence('subnets', True, sub)
-        self._assert_shared_object_id_listing_presence('networks', False, net)
-        self._assert_shared_object_id_listing_presence('networks', True, net)
-
-    def _assert_shared_object_id_listing_presence(self, resource, shared, oid):
-        lister = getattr(self.admin_client, 'list_%s' % resource)
-        objects = [o['id'] for o in lister(shared=shared)[resource]]
-        if shared:
-            self.assertIn(oid, objects)
-        else:
-            self.assertNotIn(oid, objects)
diff --git a/neutron/tests/api/base.py b/neutron/tests/api/base.py
deleted file mode 100644 (file)
index 076963b..0000000
+++ /dev/null
@@ -1,511 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import clients
-from neutron.tests.tempest import config
-from neutron.tests.tempest import exceptions
-import neutron.tests.tempest.test
-
-CONF = config.CONF
-
-
-class BaseNetworkTest(neutron.tests.tempest.test.BaseTestCase):
-
-    """
-    Base class for the Neutron tests that use the Tempest Neutron REST client
-
-    Per the Neutron API Guide, API v1.x was removed from the source code tree
-    (docs.openstack.org/api/openstack-network/2.0/content/Overview-d1e71.html)
-    Therefore, v2.x of the Neutron API is assumed. It is also assumed that the
-    following options are defined in the [network] section of etc/tempest.conf:
-
-        tenant_network_cidr with a block of cidr's from which smaller blocks
-        can be allocated for tenant networks
-
-        tenant_network_mask_bits with the mask bits to be used to partition the
-        block defined by tenant-network_cidr
-
-    Finally, it is assumed that the following option is defined in the
-    [service_available] section of etc/tempest.conf
-
-        neutron as True
-    """
-
-    force_tenant_isolation = False
-
-    # Default to ipv4.
-    _ip_version = 4
-
-    @classmethod
-    def resource_setup(cls):
-        # Create no network resources for these test.
-        cls.set_network_resources()
-        super(BaseNetworkTest, cls).resource_setup()
-        if not CONF.service_available.neutron:
-            raise cls.skipException("Neutron support is required")
-        if cls._ip_version == 6 and not CONF.network_feature_enabled.ipv6:
-            raise cls.skipException("IPv6 Tests are disabled.")
-
-        os = cls.get_client_manager()
-
-        cls.network_cfg = CONF.network
-        cls.client = os.network_client
-        cls.networks = []
-        cls.shared_networks = []
-        cls.subnets = []
-        cls.ports = []
-        cls.routers = []
-        cls.vpnservices = []
-        cls.ikepolicies = []
-        cls.floating_ips = []
-        cls.metering_labels = []
-        cls.service_profiles = []
-        cls.flavors = []
-        cls.metering_label_rules = []
-        cls.fw_rules = []
-        cls.fw_policies = []
-        cls.ipsecpolicies = []
-        cls.qos_rules = []
-        cls.qos_policies = []
-        cls.ethertype = "IPv" + str(cls._ip_version)
-        cls.address_scopes = []
-        cls.admin_address_scopes = []
-        cls.subnetpools = []
-        cls.admin_subnetpools = []
-
-    @classmethod
-    def resource_cleanup(cls):
-        if CONF.service_available.neutron:
-            # Clean up ipsec policies
-            for ipsecpolicy in cls.ipsecpolicies:
-                cls._try_delete_resource(cls.client.delete_ipsecpolicy,
-                                         ipsecpolicy['id'])
-            # Clean up firewall policies
-            for fw_policy in cls.fw_policies:
-                cls._try_delete_resource(cls.client.delete_firewall_policy,
-                                         fw_policy['id'])
-            # Clean up firewall rules
-            for fw_rule in cls.fw_rules:
-                cls._try_delete_resource(cls.client.delete_firewall_rule,
-                                         fw_rule['id'])
-            # Clean up ike policies
-            for ikepolicy in cls.ikepolicies:
-                cls._try_delete_resource(cls.client.delete_ikepolicy,
-                                         ikepolicy['id'])
-            # Clean up vpn services
-            for vpnservice in cls.vpnservices:
-                cls._try_delete_resource(cls.client.delete_vpnservice,
-                                         vpnservice['id'])
-            # Clean up QoS rules
-            for qos_rule in cls.qos_rules:
-                cls._try_delete_resource(cls.admin_client.delete_qos_rule,
-                                         qos_rule['id'])
-            # Clean up QoS policies
-            for qos_policy in cls.qos_policies:
-                cls._try_delete_resource(cls.admin_client.delete_qos_policy,
-                                         qos_policy['id'])
-            # Clean up floating IPs
-            for floating_ip in cls.floating_ips:
-                cls._try_delete_resource(cls.client.delete_floatingip,
-                                         floating_ip['id'])
-            # Clean up routers
-            for router in cls.routers:
-                cls._try_delete_resource(cls.delete_router,
-                                         router)
-            # Clean up metering label rules
-            for metering_label_rule in cls.metering_label_rules:
-                cls._try_delete_resource(
-                    cls.admin_client.delete_metering_label_rule,
-                    metering_label_rule['id'])
-            # Clean up metering labels
-            for metering_label in cls.metering_labels:
-                cls._try_delete_resource(
-                    cls.admin_client.delete_metering_label,
-                    metering_label['id'])
-            # Clean up flavors
-            for flavor in cls.flavors:
-                cls._try_delete_resource(
-                    cls.admin_client.delete_flavor,
-                    flavor['id'])
-            # Clean up service profiles
-            for service_profile in cls.service_profiles:
-                cls._try_delete_resource(
-                    cls.admin_client.delete_service_profile,
-                    service_profile['id'])
-            # Clean up ports
-            for port in cls.ports:
-                cls._try_delete_resource(cls.client.delete_port,
-                                         port['id'])
-            # Clean up subnets
-            for subnet in cls.subnets:
-                cls._try_delete_resource(cls.client.delete_subnet,
-                                         subnet['id'])
-            # Clean up networks
-            for network in cls.networks:
-                cls._try_delete_resource(cls.client.delete_network,
-                                         network['id'])
-
-            # Clean up shared networks
-            for network in cls.shared_networks:
-                cls._try_delete_resource(cls.admin_client.delete_network,
-                                         network['id'])
-
-            for subnetpool in cls.subnetpools:
-                cls._try_delete_resource(cls.client.delete_subnetpool,
-                                         subnetpool['id'])
-
-            for subnetpool in cls.admin_subnetpools:
-                cls._try_delete_resource(cls.admin_client.delete_subnetpool,
-                                         subnetpool['id'])
-
-            for address_scope in cls.address_scopes:
-                cls._try_delete_resource(cls.client.delete_address_scope,
-                                         address_scope['id'])
-
-            for address_scope in cls.admin_address_scopes:
-                cls._try_delete_resource(
-                    cls.admin_client.delete_address_scope,
-                    address_scope['id'])
-
-            cls.clear_isolated_creds()
-        super(BaseNetworkTest, cls).resource_cleanup()
-
-    @classmethod
-    def _try_delete_resource(cls, delete_callable, *args, **kwargs):
-        """Cleanup resources in case of test-failure
-
-        Some resources are explicitly deleted by the test.
-        If the test failed to delete a resource, this method will execute
-        the appropriate delete methods. Otherwise, the method ignores NotFound
-        exceptions thrown for resources that were correctly deleted by the
-        test.
-
-        :param delete_callable: delete method
-        :param args: arguments for delete method
-        :param kwargs: keyword arguments for delete method
-        """
-        try:
-            delete_callable(*args, **kwargs)
-        # if resource is not found, this means it was deleted in the test
-        except lib_exc.NotFound:
-            pass
-
-    @classmethod
-    def create_network(cls, network_name=None, **kwargs):
-        """Wrapper utility that returns a test network."""
-        network_name = network_name or data_utils.rand_name('test-network-')
-
-        body = cls.client.create_network(name=network_name, **kwargs)
-        network = body['network']
-        cls.networks.append(network)
-        return network
-
-    @classmethod
-    def create_shared_network(cls, network_name=None, **post_body):
-        network_name = network_name or data_utils.rand_name('sharednetwork-')
-        post_body.update({'name': network_name, 'shared': True})
-        body = cls.admin_client.create_network(**post_body)
-        network = body['network']
-        cls.shared_networks.append(network)
-        return network
-
-    @classmethod
-    def create_subnet(cls, network, gateway='', cidr=None, mask_bits=None,
-                      ip_version=None, client=None, **kwargs):
-        """Wrapper utility that returns a test subnet."""
-
-        # allow tests to use admin client
-        if not client:
-            client = cls.client
-
-        # The cidr and mask_bits depend on the ip version.
-        ip_version = ip_version if ip_version is not None else cls._ip_version
-        gateway_not_set = gateway == ''
-        if ip_version == 4:
-            cidr = cidr or netaddr.IPNetwork(CONF.network.tenant_network_cidr)
-            mask_bits = mask_bits or CONF.network.tenant_network_mask_bits
-        elif ip_version == 6:
-            cidr = (
-                cidr or netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr))
-            mask_bits = mask_bits or CONF.network.tenant_network_v6_mask_bits
-        # Find a cidr that is not in use yet and create a subnet with it
-        for subnet_cidr in cidr.subnet(mask_bits):
-            if gateway_not_set:
-                gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
-            else:
-                gateway_ip = gateway
-            try:
-                body = client.create_subnet(
-                    network_id=network['id'],
-                    cidr=str(subnet_cidr),
-                    ip_version=ip_version,
-                    gateway_ip=gateway_ip,
-                    **kwargs)
-                break
-            except lib_exc.BadRequest as e:
-                is_overlapping_cidr = 'overlaps with another subnet' in str(e)
-                if not is_overlapping_cidr:
-                    raise
-        else:
-            message = 'Available CIDR for subnet creation could not be found'
-            raise exceptions.BuildErrorException(message)
-        subnet = body['subnet']
-        cls.subnets.append(subnet)
-        return subnet
-
-    @classmethod
-    def create_port(cls, network, **kwargs):
-        """Wrapper utility that returns a test port."""
-        body = cls.client.create_port(network_id=network['id'],
-                                      **kwargs)
-        port = body['port']
-        cls.ports.append(port)
-        return port
-
-    @classmethod
-    def update_port(cls, port, **kwargs):
-        """Wrapper utility that updates a test port."""
-        body = cls.client.update_port(port['id'],
-                                      **kwargs)
-        return body['port']
-
-    @classmethod
-    def create_router(cls, router_name=None, admin_state_up=False,
-                      external_network_id=None, enable_snat=None,
-                      **kwargs):
-        ext_gw_info = {}
-        if external_network_id:
-            ext_gw_info['network_id'] = external_network_id
-        if enable_snat:
-            ext_gw_info['enable_snat'] = enable_snat
-        body = cls.client.create_router(
-            router_name, external_gateway_info=ext_gw_info,
-            admin_state_up=admin_state_up, **kwargs)
-        router = body['router']
-        cls.routers.append(router)
-        return router
-
-    @classmethod
-    def create_floatingip(cls, external_network_id):
-        """Wrapper utility that returns a test floating IP."""
-        body = cls.client.create_floatingip(
-            floating_network_id=external_network_id)
-        fip = body['floatingip']
-        cls.floating_ips.append(fip)
-        return fip
-
-    @classmethod
-    def create_router_interface(cls, router_id, subnet_id):
-        """Wrapper utility that returns a router interface."""
-        interface = cls.client.add_router_interface_with_subnet_id(
-            router_id, subnet_id)
-        return interface
-
-    @classmethod
-    def create_vpnservice(cls, subnet_id, router_id):
-        """Wrapper utility that returns a test vpn service."""
-        body = cls.client.create_vpnservice(
-            subnet_id=subnet_id, router_id=router_id, admin_state_up=True,
-            name=data_utils.rand_name("vpnservice-"))
-        vpnservice = body['vpnservice']
-        cls.vpnservices.append(vpnservice)
-        return vpnservice
-
-    @classmethod
-    def create_ikepolicy(cls, name):
-        """Wrapper utility that returns a test ike policy."""
-        body = cls.client.create_ikepolicy(name=name)
-        ikepolicy = body['ikepolicy']
-        cls.ikepolicies.append(ikepolicy)
-        return ikepolicy
-
-    @classmethod
-    def create_firewall_rule(cls, action, protocol):
-        """Wrapper utility that returns a test firewall rule."""
-        body = cls.client.create_firewall_rule(
-            name=data_utils.rand_name("fw-rule"),
-            action=action,
-            protocol=protocol)
-        fw_rule = body['firewall_rule']
-        cls.fw_rules.append(fw_rule)
-        return fw_rule
-
-    @classmethod
-    def create_firewall_policy(cls):
-        """Wrapper utility that returns a test firewall policy."""
-        body = cls.client.create_firewall_policy(
-            name=data_utils.rand_name("fw-policy"))
-        fw_policy = body['firewall_policy']
-        cls.fw_policies.append(fw_policy)
-        return fw_policy
-
-    @classmethod
-    def create_qos_policy(cls, name, description, shared, tenant_id=None):
-        """Wrapper utility that returns a test QoS policy."""
-        body = cls.admin_client.create_qos_policy(
-            name, description, shared, tenant_id)
-        qos_policy = body['policy']
-        cls.qos_policies.append(qos_policy)
-        return qos_policy
-
-    @classmethod
-    def create_qos_bandwidth_limit_rule(cls, policy_id,
-                                       max_kbps, max_burst_kbps):
-        """Wrapper utility that returns a test QoS bandwidth limit rule."""
-        body = cls.admin_client.create_bandwidth_limit_rule(
-            policy_id, max_kbps, max_burst_kbps)
-        qos_rule = body['bandwidth_limit_rule']
-        cls.qos_rules.append(qos_rule)
-        return qos_rule
-
-    @classmethod
-    def delete_router(cls, router):
-        body = cls.client.list_router_interfaces(router['id'])
-        interfaces = body['ports']
-        for i in interfaces:
-            try:
-                cls.client.remove_router_interface_with_subnet_id(
-                    router['id'], i['fixed_ips'][0]['subnet_id'])
-            except lib_exc.NotFound:
-                pass
-        cls.client.delete_router(router['id'])
-
-    @classmethod
-    def create_ipsecpolicy(cls, name):
-        """Wrapper utility that returns a test ipsec policy."""
-        body = cls.client.create_ipsecpolicy(name=name)
-        ipsecpolicy = body['ipsecpolicy']
-        cls.ipsecpolicies.append(ipsecpolicy)
-        return ipsecpolicy
-
-    @classmethod
-    def create_address_scope(cls, name, is_admin=False, **kwargs):
-        if is_admin:
-            body = cls.admin_client.create_address_scope(name=name, **kwargs)
-            cls.admin_address_scopes.append(body['address_scope'])
-        else:
-            body = cls.client.create_address_scope(name=name, **kwargs)
-            cls.address_scopes.append(body['address_scope'])
-        return body['address_scope']
-
-    @classmethod
-    def create_subnetpool(cls, name, is_admin=False, **kwargs):
-        if is_admin:
-            body = cls.admin_client.create_subnetpool(name=name, **kwargs)
-            cls.admin_subnetpools.append(body['subnetpool'])
-        else:
-            body = cls.client.create_subnetpool(name=name, **kwargs)
-            cls.subnetpools.append(body['subnetpool'])
-        return body['subnetpool']
-
-
-class BaseAdminNetworkTest(BaseNetworkTest):
-
-    @classmethod
-    def resource_setup(cls):
-        super(BaseAdminNetworkTest, cls).resource_setup()
-
-        try:
-            creds = cls.isolated_creds.get_admin_creds()
-            cls.os_adm = clients.Manager(credentials=creds)
-        except NotImplementedError:
-            msg = ("Missing Administrative Network API credentials "
-                   "in configuration.")
-            raise cls.skipException(msg)
-        cls.admin_client = cls.os_adm.network_client
-
-    @classmethod
-    def create_metering_label(cls, name, description):
-        """Wrapper utility that returns a test metering label."""
-        body = cls.admin_client.create_metering_label(
-            description=description,
-            name=data_utils.rand_name("metering-label"))
-        metering_label = body['metering_label']
-        cls.metering_labels.append(metering_label)
-        return metering_label
-
-    @classmethod
-    def create_metering_label_rule(cls, remote_ip_prefix, direction,
-                                   metering_label_id):
-        """Wrapper utility that returns a test metering label rule."""
-        body = cls.admin_client.create_metering_label_rule(
-            remote_ip_prefix=remote_ip_prefix, direction=direction,
-            metering_label_id=metering_label_id)
-        metering_label_rule = body['metering_label_rule']
-        cls.metering_label_rules.append(metering_label_rule)
-        return metering_label_rule
-
-    @classmethod
-    def create_flavor(cls, name, description, service_type):
-        """Wrapper utility that returns a test flavor."""
-        body = cls.admin_client.create_flavor(
-            description=description, service_type=service_type,
-            name=name)
-        flavor = body['flavor']
-        cls.flavors.append(flavor)
-        return flavor
-
-    @classmethod
-    def create_service_profile(cls, description, metainfo, driver):
-        """Wrapper utility that returns a test service profile."""
-        body = cls.admin_client.create_service_profile(
-            driver=driver, metainfo=metainfo, description=description)
-        service_profile = body['service_profile']
-        cls.service_profiles.append(service_profile)
-        return service_profile
-
-    @classmethod
-    def get_unused_ip(cls, net_id, ip_version=None):
-        """Get an unused ip address in a allocaion pool of net"""
-        body = cls.admin_client.list_ports(network_id=net_id)
-        ports = body['ports']
-        used_ips = []
-        for port in ports:
-            used_ips.extend(
-                [fixed_ip['ip_address'] for fixed_ip in port['fixed_ips']])
-        body = cls.admin_client.list_subnets(network_id=net_id)
-        subnets = body['subnets']
-
-        for subnet in subnets:
-            if ip_version and subnet['ip_version'] != ip_version:
-                continue
-            cidr = subnet['cidr']
-            allocation_pools = subnet['allocation_pools']
-            iterators = []
-            if allocation_pools:
-                for allocation_pool in allocation_pools:
-                    iterators.append(netaddr.iter_iprange(
-                        allocation_pool['start'], allocation_pool['end']))
-            else:
-                net = netaddr.IPNetwork(cidr)
-
-                def _iterip():
-                    for ip in net:
-                        if ip not in (net.network, net.broadcast):
-                            yield ip
-                iterators.append(iter(_iterip()))
-
-            for iterator in iterators:
-                for ip in iterator:
-                    if str(ip) not in used_ips:
-                        return str(ip)
-
-        message = (
-            "net(%s) has no usable IP address in allocation pools" % net_id)
-        raise exceptions.InvalidConfiguration(message)
diff --git a/neutron/tests/api/base_routers.py b/neutron/tests/api/base_routers.py
deleted file mode 100644 (file)
index bbd069d..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.tests.api import base
-
-
-class BaseRouterTest(base.BaseAdminNetworkTest):
-    # NOTE(salv-orlando): This class inherits from BaseAdminNetworkTest
-    # as some router operations, such as enabling or disabling SNAT
-    # require admin credentials by default
-
-    @classmethod
-    def resource_setup(cls):
-        super(BaseRouterTest, cls).resource_setup()
-
-    def _cleanup_router(self, router):
-        self.delete_router(router)
-        self.routers.remove(router)
-
-    def _create_router(self, name, admin_state_up=False,
-                       external_network_id=None, enable_snat=None):
-        # associate a cleanup with created routers to avoid quota limits
-        router = self.create_router(name, admin_state_up,
-                                    external_network_id, enable_snat)
-        self.addCleanup(self._cleanup_router, router)
-        return router
-
-    def _delete_router(self, router_id, network_client=None):
-        client = network_client or self.client
-        client.delete_router(router_id)
-        # Asserting that the router is not found in the list
-        # after deletion
-        list_body = self.client.list_routers()
-        routers_list = list()
-        for router in list_body['routers']:
-            routers_list.append(router['id'])
-        self.assertNotIn(router_id, routers_list)
-
-    def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
-        interface = self.client.add_router_interface_with_subnet_id(
-            router_id, subnet_id)
-        self.addCleanup(self._remove_router_interface_with_subnet_id,
-                        router_id, subnet_id)
-        self.assertEqual(subnet_id, interface['subnet_id'])
-        return interface
-
-    def _remove_router_interface_with_subnet_id(self, router_id, subnet_id):
-        body = self.client.remove_router_interface_with_subnet_id(
-            router_id, subnet_id)
-        self.assertEqual(subnet_id, body['subnet_id'])
-
-    def _remove_router_interface_with_port_id(self, router_id, port_id):
-        body = self.client.remove_router_interface_with_port_id(router_id,
-                                                                port_id)
-        self.assertEqual(port_id, body['port_id'])
diff --git a/neutron/tests/api/base_security_groups.py b/neutron/tests/api/base_security_groups.py
deleted file mode 100644 (file)
index 37b0aaa..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.common.utils import data_utils
-
-from neutron.tests.api import base
-
-
-class BaseSecGroupTest(base.BaseNetworkTest):
-
-    @classmethod
-    def resource_setup(cls):
-        super(BaseSecGroupTest, cls).resource_setup()
-
-    def _create_security_group(self):
-        # Create a security group
-        name = data_utils.rand_name('secgroup-')
-        group_create_body = self.client.create_security_group(name=name)
-        self.addCleanup(self._delete_security_group,
-                        group_create_body['security_group']['id'])
-        self.assertEqual(group_create_body['security_group']['name'], name)
-        return group_create_body, name
-
-    def _delete_security_group(self, secgroup_id):
-        self.client.delete_security_group(secgroup_id)
-        # Asserting that the security group is not found in the list
-        # after deletion
-        list_body = self.client.list_security_groups()
-        secgroup_list = list()
-        for secgroup in list_body['security_groups']:
-            secgroup_list.append(secgroup['id'])
-        self.assertNotIn(secgroup_id, secgroup_list)
-
-    def _delete_security_group_rule(self, rule_id):
-        self.client.delete_security_group_rule(rule_id)
-        # Asserting that the security group is not found in the list
-        # after deletion
-        list_body = self.client.list_security_group_rules()
-        rules_list = list()
-        for rule in list_body['security_group_rules']:
-            rules_list.append(rule['id'])
-        self.assertNotIn(rule_id, rules_list)
diff --git a/neutron/tests/api/clients.py b/neutron/tests/api/clients.py
deleted file mode 100644 (file)
index 9fbdbac..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.services.identity.v2.token_client import TokenClient
-from tempest_lib.services.identity.v3.token_client import V3TokenClient
-
-from neutron.tests.tempest.common import cred_provider
-from neutron.tests.tempest import config
-from neutron.tests.tempest import manager
-from neutron.tests.tempest.services.identity.v2.json.identity_client import \
-    IdentityClientJSON
-from neutron.tests.tempest.services.identity.v3.json.credentials_client \
-     import CredentialsClientJSON
-from neutron.tests.tempest.services.identity.v3.json.endpoints_client import \
-    EndPointClientJSON
-from neutron.tests.tempest.services.identity.v3.json.identity_client import \
-    IdentityV3ClientJSON
-from neutron.tests.tempest.services.identity.v3.json.policy_client import \
-     PolicyClientJSON
-from neutron.tests.tempest.services.identity.v3.json.region_client import \
-     RegionClientJSON
-from neutron.tests.tempest.services.identity.v3.json.service_client import \
-    ServiceClientJSON
-from neutron.tests.tempest.services.network.json.network_client import \
-     NetworkClientJSON
-
-
-CONF = config.CONF
-
-
-class Manager(manager.Manager):
-
-    """
-    Top level manager for OpenStack tempest clients
-    """
-
-    default_params = {
-        'disable_ssl_certificate_validation':
-            CONF.identity.disable_ssl_certificate_validation,
-        'ca_certs': CONF.identity.ca_certificates_file,
-        'trace_requests': CONF.debug.trace_requests
-    }
-
-    # NOTE: Tempest uses timeout values of compute API if project specific
-    # timeout values don't exist.
-    default_params_with_timeout_values = {
-        'build_interval': CONF.compute.build_interval,
-        'build_timeout': CONF.compute.build_timeout
-    }
-    default_params_with_timeout_values.update(default_params)
-
-    def __init__(self, credentials=None, service=None):
-        super(Manager, self).__init__(credentials=credentials)
-
-        self._set_identity_clients()
-
-        self.network_client = NetworkClientJSON(
-            self.auth_provider,
-            CONF.network.catalog_type,
-            CONF.network.region or CONF.identity.region,
-            endpoint_type=CONF.network.endpoint_type,
-            build_interval=CONF.network.build_interval,
-            build_timeout=CONF.network.build_timeout,
-            **self.default_params)
-
-    def _set_identity_clients(self):
-        params = {
-            'service': CONF.identity.catalog_type,
-            'region': CONF.identity.region,
-            'endpoint_type': 'adminURL'
-        }
-        params.update(self.default_params_with_timeout_values)
-
-        self.identity_client = IdentityClientJSON(self.auth_provider,
-                                                  **params)
-        self.identity_v3_client = IdentityV3ClientJSON(self.auth_provider,
-                                                       **params)
-        self.endpoints_client = EndPointClientJSON(self.auth_provider,
-                                                   **params)
-        self.service_client = ServiceClientJSON(self.auth_provider, **params)
-        self.policy_client = PolicyClientJSON(self.auth_provider, **params)
-        self.region_client = RegionClientJSON(self.auth_provider, **params)
-        self.credentials_client = CredentialsClientJSON(self.auth_provider,
-                                                        **params)
-        # Token clients do not use the catalog. They only need default_params.
-        self.token_client = TokenClient(CONF.identity.uri,
-                                        **self.default_params)
-        if CONF.identity_feature_enabled.api_v3:
-            self.token_v3_client = V3TokenClient(CONF.identity.uri_v3,
-                                                 **self.default_params)
-
-
-class AdminManager(Manager):
-
-    """
-    Manager object that uses the admin credentials for its
-    managed client objects
-    """
-
-    def __init__(self, service=None):
-        super(AdminManager, self).__init__(
-            credentials=cred_provider.get_configured_credentials(
-                'identity_admin'),
-            service=service)
diff --git a/neutron/tests/api/test_address_scopes.py b/neutron/tests/api/test_address_scopes.py
deleted file mode 100644 (file)
index 8696e09..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import base
-from neutron.tests.api import clients
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-ADDRESS_SCOPE_NAME = 'smoke-address-scope'
-
-
-class AddressScopeTestBase(base.BaseNetworkTest):
-
-    @classmethod
-    def resource_setup(cls):
-        super(AddressScopeTestBase, cls).resource_setup()
-        if not test.is_extension_enabled('address-scope', 'network'):
-            msg = "address-scope extension not enabled."
-            raise cls.skipException(msg)
-        try:
-            creds = cls.isolated_creds.get_admin_creds()
-            cls.os_adm = clients.Manager(credentials=creds)
-        except NotImplementedError:
-            msg = ("Missing Administrative Network API credentials "
-                   "in configuration.")
-            raise cls.skipException(msg)
-        cls.admin_client = cls.os_adm.network_client
-
-    def _create_address_scope(self, is_admin=False, **kwargs):
-        name = data_utils.rand_name(ADDRESS_SCOPE_NAME)
-        return self.create_address_scope(name=name, is_admin=is_admin,
-                                         **kwargs)
-
-    def _test_update_address_scope_helper(self, is_admin=False, shared=None):
-        address_scope = self._create_address_scope(is_admin=is_admin,
-                                                   ip_version=4)
-
-        if is_admin:
-            client = self.admin_client
-        else:
-            client = self.client
-
-        kwargs = {'name': 'new_name'}
-        if shared is not None:
-            kwargs['shared'] = shared
-
-        client.update_address_scope(address_scope['id'], **kwargs)
-        body = client.show_address_scope(address_scope['id'])
-        address_scope = body['address_scope']
-        self.assertEqual('new_name', address_scope['name'])
-        return address_scope
-
-
-class AddressScopeTest(AddressScopeTestBase):
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('045f9294-8b1a-4848-b6a8-edf1b41e9d06')
-    def test_tenant_create_list_address_scope(self):
-        address_scope = self._create_address_scope(ip_version=4)
-        body = self.client.list_address_scopes()
-        returned_address_scopes = body['address_scopes']
-        self.assertIn(address_scope['id'],
-                      [a_s['id'] for a_s in returned_address_scopes],
-                      "Created address scope id should be in the list")
-        self.assertIn(address_scope['name'],
-                      [a_s['name'] for a_s in returned_address_scopes],
-                      "Created address scope name should be in the list")
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('85e0326b-4c75-4b92-bd6e-7c7de6aaf05c')
-    def test_show_address_scope(self):
-        address_scope = self._create_address_scope(ip_version=4)
-        body = self.client.show_address_scope(
-            address_scope['id'])
-        returned_address_scope = body['address_scope']
-        self.assertEqual(address_scope['id'], returned_address_scope['id'])
-        self.assertEqual(address_scope['name'],
-                         returned_address_scope['name'])
-        self.assertFalse(returned_address_scope['shared'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('85a259b2-ace6-4e32-9657-a9a392b452aa')
-    def test_tenant_update_address_scope(self):
-        self._test_update_address_scope_helper()
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('22b3b600-72a8-4b60-bc94-0f29dd6271df')
-    def test_delete_address_scope(self):
-        address_scope = self._create_address_scope(ip_version=4)
-        self.client.delete_address_scope(address_scope['id'])
-        self.assertRaises(lib_exc.NotFound, self.client.show_address_scope,
-                          address_scope['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('5a06c287-8036-4d04-9d78-def8e06d43df')
-    def test_admin_create_shared_address_scope(self):
-        address_scope = self._create_address_scope(is_admin=True, shared=True,
-                                                   ip_version=4)
-        body = self.admin_client.show_address_scope(
-            address_scope['id'])
-        returned_address_scope = body['address_scope']
-        self.assertEqual(address_scope['name'],
-                         returned_address_scope['name'])
-        self.assertTrue(returned_address_scope['shared'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('e9e1ccdd-9ccd-4076-9503-71820529508b')
-    def test_admin_update_shared_address_scope(self):
-        address_scope = self._test_update_address_scope_helper(is_admin=True,
-                                                               shared=True)
-        self.assertTrue(address_scope['shared'])
diff --git a/neutron/tests/api/test_address_scopes_negative.py b/neutron/tests/api/test_address_scopes_negative.py
deleted file mode 100644 (file)
index feed39b..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import test_address_scopes
-from neutron.tests.tempest import test
-
-
-class AddressScopeTestNegative(test_address_scopes.AddressScopeTestBase):
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('9c92ec34-0c50-4104-aa47-9ce98d5088df')
-    def test_tenant_create_shared_address_scope(self):
-        self.assertRaises(lib_exc.Forbidden, self._create_address_scope,
-                          shared=True, ip_version=4)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('a857b61e-bf53-4fab-b21a-b0daaf81b5bd')
-    def test_tenant_update_address_scope_shared_true(self):
-        self.assertRaises(lib_exc.Forbidden,
-                          self._test_update_address_scope_helper, shared=True)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('a859ef2f-9c76-4e2e-ba0f-e0339a489e8c')
-    def test_tenant_update_address_scope_shared_false(self):
-        self.assertRaises(lib_exc.Forbidden,
-                          self._test_update_address_scope_helper, shared=False)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('9b6dd7ad-cabb-4f55-bd5e-e61176ef41f6')
-    def test_get_non_existent_address_scope(self):
-        non_exist_id = data_utils.rand_name('address_scope')
-        self.assertRaises(lib_exc.NotFound, self.client.show_address_scope,
-                          non_exist_id)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('ef213552-f2da-487d-bf4a-e1705d115ff1')
-    def test_tenant_get_not_shared_admin_address_scope(self):
-        address_scope = self._create_address_scope(is_admin=True,
-                                                   ip_version=4)
-        # None-shared admin address scope cannot be retrieved by tenant user.
-        self.assertRaises(lib_exc.NotFound, self.client.show_address_scope,
-                          address_scope['id'])
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('5c25dc6a-1e92-467a-9cc7-cda74b6003db')
-    def test_delete_non_existent_address_scope(self):
-        non_exist_id = data_utils.rand_name('address_scope')
-        self.assertRaises(lib_exc.NotFound, self.client.delete_address_scope,
-                          non_exist_id)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('47c25dc5-e886-4a84-88c3-ac5031969661')
-    def test_update_non_existent_address_scope(self):
-        non_exist_id = data_utils.rand_name('address_scope')
-        self.assertRaises(lib_exc.NotFound, self.client.update_address_scope,
-                          non_exist_id, name='foo-name')
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('702d0515-82cb-4207-b0d9-703336e54665')
-    def test_update_shared_address_scope_to_unshare(self):
-        address_scope = self._create_address_scope(is_admin=True, shared=True,
-                                                   ip_version=4)
-        self.assertRaises(lib_exc.BadRequest,
-                          self.admin_client.update_address_scope,
-                          address_scope['id'], name='new-name', shared=False)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('1e471e5c-6f9c-437a-9257-fd9bc4b6f0fb')
-    def test_delete_address_scope_associated_with_subnetpool(self):
-        address_scope = self._create_address_scope(ip_version=4)
-        prefixes = [u'10.11.12.0/24']
-        subnetpool_data = {
-            'name': 'foo-subnetpool',
-            'min_prefixlen': '29', 'prefixes': prefixes,
-            'address_scope_id': address_scope['id']}
-        self.create_subnetpool(**subnetpool_data)
-        self.assertRaises(lib_exc.Conflict, self.client.delete_address_scope,
-                          address_scope['id'])
diff --git a/neutron/tests/api/test_allowed_address_pair.py b/neutron/tests/api/test_allowed_address_pair.py
deleted file mode 100644 (file)
index 5209a8f..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-
-from neutron.tests.api import base
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class AllowedAddressPairTestJSON(base.BaseNetworkTest):
-
-    """
-    Tests the Neutron Allowed Address Pair API extension using the Tempest
-    REST client. The following API operations are tested with this extension:
-
-        create port
-        list ports
-        update port
-        show port
-
-    v2.0 of the Neutron API is assumed. It is also assumed that the following
-    options are defined in the [network-feature-enabled] section of
-    etc/tempest.conf
-
-        api_extensions
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        super(AllowedAddressPairTestJSON, cls).resource_setup()
-        if not test.is_extension_enabled('allowed-address-pairs', 'network'):
-            msg = "Allowed Address Pairs extension not enabled."
-            raise cls.skipException(msg)
-        cls.network = cls.create_network()
-        cls.create_subnet(cls.network)
-        port = cls.create_port(cls.network)
-        cls.ip_address = port['fixed_ips'][0]['ip_address']
-        cls.mac_address = port['mac_address']
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86c3529b-1231-40de-803c-00e40882f043')
-    def test_create_list_port_with_address_pair(self):
-        # Create port with allowed address pair attribute
-        allowed_address_pairs = [{'ip_address': self.ip_address,
-                                  'mac_address': self.mac_address}]
-        body = self.client.create_port(
-            network_id=self.network['id'],
-            allowed_address_pairs=allowed_address_pairs)
-        port_id = body['port']['id']
-        self.addCleanup(self.client.delete_port, port_id)
-
-        # Confirm port was created with allowed address pair attribute
-        body = self.client.list_ports()
-        ports = body['ports']
-        port = [p for p in ports if p['id'] == port_id]
-        msg = 'Created port not found in list of ports returned by Neutron'
-        self.assertTrue(port, msg)
-        self._confirm_allowed_address_pair(port[0], self.ip_address)
-
-    @test.attr(type='smoke')
-    def _update_port_with_address(self, address, mac_address=None, **kwargs):
-        # Create a port without allowed address pair
-        body = self.client.create_port(network_id=self.network['id'])
-        port_id = body['port']['id']
-        self.addCleanup(self.client.delete_port, port_id)
-        if mac_address is None:
-            mac_address = self.mac_address
-
-        # Update allowed address pair attribute of port
-        allowed_address_pairs = [{'ip_address': address,
-                                  'mac_address': mac_address}]
-        if kwargs:
-            allowed_address_pairs.append(kwargs['allowed_address_pairs'])
-        body = self.client.update_port(
-            port_id, allowed_address_pairs=allowed_address_pairs)
-        allowed_address_pair = body['port']['allowed_address_pairs']
-        self.assertEqual(allowed_address_pair, allowed_address_pairs)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('9599b337-272c-47fd-b3cf-509414414ac4')
-    def test_update_port_with_address_pair(self):
-        # Update port with allowed address pair
-        self._update_port_with_address(self.ip_address)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('4d6d178f-34f6-4bff-a01c-0a2f8fe909e4')
-    def test_update_port_with_cidr_address_pair(self):
-        # Update allowed address pair with cidr
-        cidr = str(netaddr.IPNetwork(CONF.network.tenant_network_cidr))
-        self._update_port_with_address(cidr)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933')
-    def test_update_port_with_multiple_ip_mac_address_pair(self):
-        # Create an ip _address and mac_address through port create
-        resp = self.client.create_port(network_id=self.network['id'])
-        newportid = resp['port']['id']
-        self.addCleanup(self.client.delete_port, newportid)
-        ipaddress = resp['port']['fixed_ips'][0]['ip_address']
-        macaddress = resp['port']['mac_address']
-
-        # Update allowed address pair port with multiple ip and  mac
-        allowed_address_pairs = {'ip_address': ipaddress,
-                                 'mac_address': macaddress}
-        self._update_port_with_address(
-            self.ip_address, self.mac_address,
-            allowed_address_pairs=allowed_address_pairs)
-
-    def _confirm_allowed_address_pair(self, port, ip):
-        msg = 'Port allowed address pairs should not be empty'
-        self.assertTrue(port['allowed_address_pairs'], msg)
-        ip_address = port['allowed_address_pairs'][0]['ip_address']
-        mac_address = port['allowed_address_pairs'][0]['mac_address']
-        self.assertEqual(ip_address, ip)
-        self.assertEqual(mac_address, self.mac_address)
-
-
-class AllowedAddressPairIpV6TestJSON(AllowedAddressPairTestJSON):
-    _ip_version = 6
diff --git a/neutron/tests/api/test_dhcp_ipv6.py b/neutron/tests/api/test_dhcp_ipv6.py
deleted file mode 100644 (file)
index 0cc2d76..0000000
+++ /dev/null
@@ -1,408 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-import random
-
-import six
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.common import constants
-from neutron.tests.api import base
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class NetworksTestDHCPv6(base.BaseNetworkTest):
-    _ip_version = 6
-
-    """ Test DHCPv6 specific features using SLAAC, stateless and
-    stateful settings for subnets. Also it shall check dual-stack
-    functionality (IPv4 + IPv6 together).
-    The tests include:
-        generating of SLAAC EUI-64 address in subnets with various settings
-        receiving SLAAC addresses in combinations of various subnets
-        receiving stateful IPv6 addresses
-        addressing in subnets with router
-    """
-
-    @classmethod
-    def skip_checks(cls):
-        msg = None
-        if not CONF.network_feature_enabled.ipv6:
-            msg = "IPv6 is not enabled"
-        elif not CONF.network_feature_enabled.ipv6_subnet_attributes:
-            msg = "DHCPv6 attributes are not enabled."
-        if msg:
-            raise cls.skipException(msg)
-
-    @classmethod
-    def resource_setup(cls):
-        super(NetworksTestDHCPv6, cls).resource_setup()
-        cls.network = cls.create_network()
-
-    def _remove_from_list_by_index(self, things_list, elem):
-        for index, i in enumerate(things_list):
-            if i['id'] == elem['id']:
-                break
-        del things_list[index]
-
-    def _clean_network(self):
-        body = self.client.list_ports()
-        ports = body['ports']
-        for port in ports:
-            if (port['device_owner'].startswith(
-                    constants.DEVICE_OWNER_ROUTER_INTF)
-                and port['device_id'] in [r['id'] for r in self.routers]):
-                self.client.remove_router_interface_with_port_id(
-                    port['device_id'], port['id']
-                )
-            else:
-                if port['id'] in [p['id'] for p in self.ports]:
-                    self.client.delete_port(port['id'])
-                    self._remove_from_list_by_index(self.ports, port)
-        body = self.client.list_subnets()
-        subnets = body['subnets']
-        for subnet in subnets:
-            if subnet['id'] in [s['id'] for s in self.subnets]:
-                self.client.delete_subnet(subnet['id'])
-                self._remove_from_list_by_index(self.subnets, subnet)
-        body = self.client.list_routers()
-        routers = body['routers']
-        for router in routers:
-            if router['id'] in [r['id'] for r in self.routers]:
-                self.client.delete_router(router['id'])
-                self._remove_from_list_by_index(self.routers, router)
-
-    def _get_ips_from_subnet(self, **kwargs):
-        subnet = self.create_subnet(self.network, **kwargs)
-        port_mac = data_utils.rand_mac_address()
-        port = self.create_port(self.network, mac_address=port_mac)
-        real_ip = next(iter(port['fixed_ips']), None)['ip_address']
-        eui_ip = data_utils.get_ipv6_addr_by_EUI64(subnet['cidr'],
-                                                   port_mac).format()
-        return real_ip, eui_ip
-
-    @test.idempotent_id('e5517e62-6f16-430d-a672-f80875493d4c')
-    def test_dhcpv6_stateless_eui64(self):
-        """When subnets configured with IPv6 SLAAC (AOM=100) and DHCPv6
-        stateless (AOM=110) both for radvd and dnsmasq, port shall receive IP
-        address calculated from its MAC.
-        """
-        for ra_mode, add_mode in (
-                ('slaac', 'slaac'),
-                ('dhcpv6-stateless', 'dhcpv6-stateless'),
-        ):
-            kwargs = {'ipv6_ra_mode': ra_mode,
-                      'ipv6_address_mode': add_mode}
-            real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
-            self._clean_network()
-            self.assertEqual(eui_ip, real_ip,
-                             ('Real port IP is %s, but shall be %s when '
-                              'ipv6_ra_mode=%s and ipv6_address_mode=%s') % (
-                                 real_ip, eui_ip, ra_mode, add_mode))
-
-    @test.idempotent_id('ae2f4a5d-03ff-4c42-a3b0-ce2fcb7ea832')
-    def test_dhcpv6_stateless_no_ra(self):
-        """When subnets configured with IPv6 SLAAC and DHCPv6 stateless
-        and there is no radvd, port shall receive IP address calculated
-        from its MAC and mask of subnet.
-        """
-        for ra_mode, add_mode in (
-                (None, 'slaac'),
-                (None, 'dhcpv6-stateless'),
-        ):
-            kwargs = {'ipv6_ra_mode': ra_mode,
-                      'ipv6_address_mode': add_mode}
-            kwargs = {k: v for k, v in six.iteritems(kwargs) if v}
-            real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
-            self._clean_network()
-            self.assertEqual(eui_ip, real_ip,
-                             ('Real port IP %s shall be equal to EUI-64 %s'
-                              'when ipv6_ra_mode=%s,ipv6_address_mode=%s') % (
-                                 real_ip, eui_ip,
-                                 ra_mode if ra_mode else "Off",
-                                 add_mode if add_mode else "Off"))
-
-    @test.idempotent_id('81f18ef6-95b5-4584-9966-10d480b7496a')
-    def test_dhcpv6_invalid_options(self):
-        """Different configurations for radvd and dnsmasq are not allowed"""
-        for ra_mode, add_mode in (
-                ('dhcpv6-stateless', 'dhcpv6-stateful'),
-                ('dhcpv6-stateless', 'slaac'),
-                ('slaac', 'dhcpv6-stateful'),
-                ('dhcpv6-stateful', 'dhcpv6-stateless'),
-                ('dhcpv6-stateful', 'slaac'),
-                ('slaac', 'dhcpv6-stateless'),
-        ):
-            kwargs = {'ipv6_ra_mode': ra_mode,
-                      'ipv6_address_mode': add_mode}
-            self.assertRaises(lib_exc.BadRequest,
-                              self.create_subnet,
-                              self.network,
-                              **kwargs)
-
-    @test.idempotent_id('21635b6f-165a-4d42-bf49-7d195e47342f')
-    def test_dhcpv6_stateless_no_ra_no_dhcp(self):
-        """If no radvd option and no dnsmasq option is configured
-        port shall receive IP from fixed IPs list of subnet.
-        """
-        real_ip, eui_ip = self._get_ips_from_subnet()
-        self._clean_network()
-        self.assertNotEqual(eui_ip, real_ip,
-                            ('Real port IP %s equal to EUI-64 %s when '
-                             'ipv6_ra_mode=Off and ipv6_address_mode=Off,'
-                             'but shall be taken from fixed IPs') % (
-                                real_ip, eui_ip))
-
-    @test.idempotent_id('4544adf7-bb5f-4bdc-b769-b3e77026cef2')
-    def test_dhcpv6_two_subnets(self):
-        """When one IPv6 subnet configured with IPv6 SLAAC or DHCPv6 stateless
-        and other IPv6 is with DHCPv6 stateful, port shall receive EUI-64 IP
-        addresses from first subnet and DHCPv6 address from second one.
-        Order of subnet creating should be unimportant.
-        """
-        for order in ("slaac_first", "dhcp_first"):
-            for ra_mode, add_mode in (
-                    ('slaac', 'slaac'),
-                    ('dhcpv6-stateless', 'dhcpv6-stateless'),
-            ):
-                kwargs = {'ipv6_ra_mode': ra_mode,
-                          'ipv6_address_mode': add_mode}
-                kwargs_dhcp = {'ipv6_address_mode': 'dhcpv6-stateful'}
-                if order == "slaac_first":
-                    subnet_slaac = self.create_subnet(self.network, **kwargs)
-                    subnet_dhcp = self.create_subnet(
-                        self.network, **kwargs_dhcp)
-                else:
-                    subnet_dhcp = self.create_subnet(
-                        self.network, **kwargs_dhcp)
-                    subnet_slaac = self.create_subnet(self.network, **kwargs)
-                port_mac = data_utils.rand_mac_address()
-                dhcp_ip = subnet_dhcp["allocation_pools"][0]["start"]
-                eui_ip = data_utils.get_ipv6_addr_by_EUI64(
-                    subnet_slaac['cidr'],
-                    port_mac
-                ).format()
-                # TODO(sergsh): remove this when 1219795 is fixed
-                dhcp_ip = [dhcp_ip, (netaddr.IPAddress(dhcp_ip) + 1).format()]
-                port = self.create_port(self.network, mac_address=port_mac)
-                real_ips = dict([(k['subnet_id'], k['ip_address'])
-                                 for k in port['fixed_ips']])
-                real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
-                                             for sub in subnet_dhcp,
-                                             subnet_slaac]
-                self.client.delete_port(port['id'])
-                self.ports.pop()
-                body = self.client.list_ports()
-                ports_id_list = [i['id'] for i in body['ports']]
-                self.assertNotIn(port['id'], ports_id_list)
-                self._clean_network()
-                self.assertEqual(real_eui_ip,
-                                 eui_ip,
-                                 'Real IP is {0}, but shall be {1}'.format(
-                                     real_eui_ip,
-                                     eui_ip))
-                self.assertIn(
-                    real_dhcp_ip, dhcp_ip,
-                    'Real IP is {0}, but shall be one from {1}'.format(
-                        real_dhcp_ip,
-                        str(dhcp_ip)))
-
-    @test.idempotent_id('4256c61d-c538-41ea-9147-3c450c36669e')
-    def test_dhcpv6_64_subnets(self):
-        """When a Network contains two subnets, one being an IPv6 subnet
-        configured with ipv6_ra_mode either as slaac or dhcpv6-stateless,
-        and the other subnet being an IPv4 subnet, a port attached to the
-        network shall receive IP addresses from the subnets as follows: An
-        IPv6 address calculated using EUI-64 from the first subnet, and an
-        IPv4 address from the second subnet. The ordering of the subnets
-        that the port is associated with should not affect this behavior.
-        """
-        for order in ("slaac_first", "dhcp_first"):
-            for ra_mode, add_mode in (
-                    ('slaac', 'slaac'),
-                    ('dhcpv6-stateless', 'dhcpv6-stateless'),
-            ):
-                kwargs = {'ipv6_ra_mode': ra_mode,
-                          'ipv6_address_mode': add_mode}
-                if order == "slaac_first":
-                    subnet_slaac = self.create_subnet(self.network, **kwargs)
-                    subnet_dhcp = self.create_subnet(
-                        self.network, ip_version=4)
-                else:
-                    subnet_dhcp = self.create_subnet(
-                        self.network, ip_version=4)
-                    subnet_slaac = self.create_subnet(self.network, **kwargs)
-                port_mac = data_utils.rand_mac_address()
-                dhcp_ip = subnet_dhcp["allocation_pools"][0]["start"]
-                eui_ip = data_utils.get_ipv6_addr_by_EUI64(
-                    subnet_slaac['cidr'],
-                    port_mac
-                ).format()
-                # TODO(sergsh): remove this when 1219795 is fixed
-                dhcp_ip = [dhcp_ip, (netaddr.IPAddress(dhcp_ip) + 1).format()]
-                port = self.create_port(self.network, mac_address=port_mac)
-                real_ips = dict([(k['subnet_id'], k['ip_address'])
-                                 for k in port['fixed_ips']])
-                real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
-                                             for sub in subnet_dhcp,
-                                             subnet_slaac]
-                self._clean_network()
-                self.assertTrue({real_eui_ip,
-                                 real_dhcp_ip}.issubset([eui_ip] + dhcp_ip))
-                self.assertEqual(real_eui_ip,
-                                 eui_ip,
-                                 'Real IP is {0}, but shall be {1}'.format(
-                                     real_eui_ip,
-                                     eui_ip))
-                self.assertIn(
-                    real_dhcp_ip, dhcp_ip,
-                    'Real IP is {0}, but shall be one from {1}'.format(
-                        real_dhcp_ip,
-                        str(dhcp_ip)))
-
-    @test.idempotent_id('4ab211a0-276f-4552-9070-51e27f58fecf')
-    def test_dhcp_stateful(self):
-        """With all options below, DHCPv6 shall allocate first
-        address from subnet pool to port.
-        """
-        for ra_mode, add_mode in (
-                ('dhcpv6-stateful', 'dhcpv6-stateful'),
-                ('dhcpv6-stateful', None),
-                (None, 'dhcpv6-stateful'),
-        ):
-            kwargs = {'ipv6_ra_mode': ra_mode,
-                      'ipv6_address_mode': add_mode}
-            kwargs = {k: v for k, v in six.iteritems(kwargs) if v}
-            subnet = self.create_subnet(self.network, **kwargs)
-            port = self.create_port(self.network)
-            port_ip = next(iter(port['fixed_ips']), None)['ip_address']
-            dhcp_ip = subnet["allocation_pools"][0]["start"]
-            # TODO(sergsh): remove this when 1219795 is fixed
-            dhcp_ip = [dhcp_ip, (netaddr.IPAddress(dhcp_ip) + 1).format()]
-            self._clean_network()
-            self.assertIn(
-                port_ip, dhcp_ip,
-                'Real IP is {0}, but shall be one from {1}'.format(
-                    port_ip,
-                    str(dhcp_ip)))
-
-    @test.idempotent_id('51a5e97f-f02e-4e4e-9a17-a69811d300e3')
-    def test_dhcp_stateful_fixedips(self):
-        """With all options below, port shall be able to get
-        requested IP from fixed IP range not depending on
-        DHCPv6 stateful (not SLAAC!) settings configured.
-        """
-        for ra_mode, add_mode in (
-                ('dhcpv6-stateful', 'dhcpv6-stateful'),
-                ('dhcpv6-stateful', None),
-                (None, 'dhcpv6-stateful'),
-        ):
-            kwargs = {'ipv6_ra_mode': ra_mode,
-                      'ipv6_address_mode': add_mode}
-            kwargs = {k: v for k, v in six.iteritems(kwargs) if v}
-            subnet = self.create_subnet(self.network, **kwargs)
-            ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
-                                       subnet["allocation_pools"][0]["end"])
-            ip = netaddr.IPAddress(random.randrange(ip_range.first,
-                                                    ip_range.last)).format()
-            port = self.create_port(self.network,
-                                    fixed_ips=[{'subnet_id': subnet['id'],
-                                                'ip_address': ip}])
-            port_ip = next(iter(port['fixed_ips']), None)['ip_address']
-            self._clean_network()
-            self.assertEqual(port_ip, ip,
-                             ("Port IP %s is not as fixed IP from "
-                              "port create request: %s") % (
-                                 port_ip, ip))
-
-    @test.idempotent_id('98244d88-d990-4570-91d4-6b25d70d08af')
-    def test_dhcp_stateful_fixedips_outrange(self):
-        """When port gets IP address from fixed IP range it
-        shall be checked if it's from subnets range.
-        """
-        kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful',
-                  'ipv6_address_mode': 'dhcpv6-stateful'}
-        subnet = self.create_subnet(self.network, **kwargs)
-        ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
-                                   subnet["allocation_pools"][0]["end"])
-        for i in range(1, 3):
-            ip = netaddr.IPAddress(ip_range.last + i).format()
-            self.assertRaises(lib_exc.BadRequest,
-                              self.create_port,
-                              self.network,
-                              fixed_ips=[{'subnet_id': subnet['id'],
-                                          'ip_address': ip}])
-
-    @test.idempotent_id('57b8302b-cba9-4fbb-8835-9168df029051')
-    def test_dhcp_stateful_fixedips_duplicate(self):
-        """When port gets IP address from fixed IP range it
-        shall be checked if it's not duplicate.
-        """
-        kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful',
-                  'ipv6_address_mode': 'dhcpv6-stateful'}
-        subnet = self.create_subnet(self.network, **kwargs)
-        ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
-                                   subnet["allocation_pools"][0]["end"])
-        ip = netaddr.IPAddress(random.randrange(
-            ip_range.first, ip_range.last)).format()
-        self.create_port(self.network,
-                         fixed_ips=[
-                             {'subnet_id': subnet['id'],
-                              'ip_address': ip}])
-        self.assertRaisesRegexp(lib_exc.Conflict,
-                                "object with that identifier already exists",
-                                self.create_port,
-                                self.network,
-                                fixed_ips=[{'subnet_id': subnet['id'],
-                                            'ip_address': ip}])
-
-    def _create_subnet_router(self, kwargs):
-        subnet = self.create_subnet(self.network, **kwargs)
-        router = self.create_router(
-            router_name=data_utils.rand_name("routerv6-"),
-            admin_state_up=True)
-        port = self.create_router_interface(router['id'],
-                                            subnet['id'])
-        body = self.client.show_port(port['port_id'])
-        return subnet, body['port']
-
-    @test.idempotent_id('e98f65db-68f4-4330-9fea-abd8c5192d4d')
-    def test_dhcp_stateful_router(self):
-        """With all options below the router interface shall
-        receive DHCPv6 IP address from allocation pool.
-        """
-        for ra_mode, add_mode in (
-                ('dhcpv6-stateful', 'dhcpv6-stateful'),
-                ('dhcpv6-stateful', None),
-        ):
-            kwargs = {'ipv6_ra_mode': ra_mode,
-                      'ipv6_address_mode': add_mode}
-            kwargs = {k: v for k, v in six.iteritems(kwargs) if v}
-            subnet, port = self._create_subnet_router(kwargs)
-            port_ip = next(iter(port['fixed_ips']), None)['ip_address']
-            self._clean_network()
-            self.assertEqual(port_ip, subnet['gateway_ip'],
-                             ("Port IP %s is not as first IP from "
-                              "subnets allocation pool: %s") % (
-                                 port_ip, subnet['gateway_ip']))
-
-    def tearDown(self):
-        self._clean_network()
-        super(NetworksTestDHCPv6, self).tearDown()
diff --git a/neutron/tests/api/test_extension_driver_port_security.py b/neutron/tests/api/test_extension_driver_port_security.py
deleted file mode 100644 (file)
index 6e5d32e..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import ddt
-
-from neutron.tests.api import base
-from neutron.tests.api import base_security_groups as base_security
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-from tempest_lib import exceptions as lib_exc
-
-CONF = config.CONF
-FAKE_IP = '10.0.0.1'
-FAKE_MAC = '00:25:64:e8:19:dd'
-
-
-@ddt.ddt
-class PortSecTest(base_security.BaseSecGroupTest,
-                  base.BaseNetworkTest):
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('7c338ddf-e64e-4118-bd33-e49a1f2f1495')
-    @test.requires_ext(extension='port-security', service='network')
-    def test_port_sec_default_value(self):
-        # Default port-sec value is True, and the attr of the port will inherit
-        # from the port-sec of the network when it not be specified in API
-        network = self.create_network()
-        self.assertTrue(network['port_security_enabled'])
-        self.create_subnet(network)
-        port = self.create_port(network)
-        self.assertTrue(port['port_security_enabled'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('e60eafd2-31de-4c38-8106-55447d033b57')
-    @test.requires_ext(extension='port-security', service='network')
-    @ddt.unpack
-    @ddt.data({'port_sec_net': False, 'port_sec_port': True, 'expected': True},
-              {'port_sec_net': True, 'port_sec_port': False,
-               'expected': False})
-    def test_port_sec_specific_value(self, port_sec_net, port_sec_port,
-                                     expected):
-        network = self.create_network(port_security_enabled=port_sec_net)
-        self.create_subnet(network)
-        port = self.create_port(network, port_security_enabled=port_sec_port)
-        self.assertEqual(network['port_security_enabled'], port_sec_net)
-        self.assertEqual(port['port_security_enabled'], expected)
-
-    @test.attr(type=['smoke'])
-    @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60')
-    @test.requires_ext(extension='port-security', service='network')
-    def test_create_port_sec_with_security_group(self):
-        network = self.create_network(port_security_enabled=True)
-        self.create_subnet(network)
-
-        port = self.create_port(network, security_groups=[])
-        self.assertTrue(port['port_security_enabled'])
-        self.client.delete_port(port['id'])
-
-        port = self.create_port(network, security_groups=[],
-                                port_security_enabled=False)
-        self.assertFalse(port['port_security_enabled'])
-        self.assertEmpty(port['security_groups'])
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60')
-    @test.requires_ext(extension='port-security', service='network')
-    def test_port_sec_update_port_failed(self):
-        network = self.create_network()
-        self.create_subnet(network)
-
-        sec_group_body, sec_group_name = self._create_security_group()
-        port = self.create_port(network)
-
-        # Exception when set port-sec to False with sec-group defined
-        self.assertRaises(lib_exc.Conflict, self.update_port, port,
-                          port_security_enabled=False)
-
-        port = self.update_port(port, security_groups=[],
-                                port_security_enabled=False)
-        self.assertEmpty(port['security_groups'])
-        self.assertFalse(port['port_security_enabled'])
-        port = self.update_port(
-            port, security_groups=[sec_group_body['security_group']['id']],
-            port_security_enabled=True)
-
-        self.assertNotEmpty(port['security_groups'])
-        self.assertTrue(port['port_security_enabled'])
-
-        # Remove security group from port before deletion on resource_cleanup
-        self.update_port(port, security_groups=[])
-
-    @test.attr(type=['smoke'])
-    @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60')
-    @test.requires_ext(extension='port-security', service='network')
-    def test_port_sec_update_pass(self):
-        network = self.create_network()
-        self.create_subnet(network)
-        sec_group, _ = self._create_security_group()
-        sec_group_id = sec_group['security_group']['id']
-        port = self.create_port(network, security_groups=[sec_group_id],
-                                port_security_enabled=True)
-
-        self.assertNotEmpty(port['security_groups'])
-        self.assertTrue(port['port_security_enabled'])
-
-        port = self.update_port(port, security_groups=[])
-        self.assertEmpty(port['security_groups'])
-        self.assertTrue(port['port_security_enabled'])
-
-        port = self.update_port(port, security_groups=[sec_group_id])
-        self.assertNotEmpty(port['security_groups'])
-        port = self.update_port(port, security_groups=[],
-                                port_security_enabled=False)
-        self.assertEmpty(port['security_groups'])
-        self.assertFalse(port['port_security_enabled'])
-
-    @test.attr(type=['smoke'])
-    @test.idempotent_id('2df6114b-b8c3-48a1-96e8-47f08159d35c')
-    @test.requires_ext(extension='port-security', service='network')
-    def test_delete_with_port_sec(self):
-        network = self.create_network(port_security_enabled=True)
-        port = self.create_port(network=network,
-                                port_security_enabled=True)
-        self.client.delete_port(port['id'])
-        self.assertTrue(self.client.is_resource_deleted('port', port['id']))
-        self.client.delete_network(network['id'])
-        self.assertTrue(
-            self.client.is_resource_deleted('network', network['id']))
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('ed93e453-3f8d-495e-8e7e-b0e268c2ebd9')
-    def test_allow_address_pairs(self):
-        network = self.create_network()
-        self.create_subnet(network)
-        port = self.create_port(network=network, port_security_enabled=False)
-        allowed_address_pairs = [{'ip_address': FAKE_IP,
-                                  'mac_address': FAKE_MAC}]
-
-        # Exception when set address-pairs with port-sec is False
-        self.assertRaises(lib_exc.Conflict,
-                          self.update_port, port,
-                          allowed_address_pairs=allowed_address_pairs)
diff --git a/neutron/tests/api/test_extensions.py b/neutron/tests/api/test_extensions.py
deleted file mode 100644 (file)
index 1e3d824..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright 2013 OpenStack, Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-from neutron.tests.api import base
-from neutron.tests.tempest import test
-
-
-class ExtensionsTestJSON(base.BaseNetworkTest):
-
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
-
-        List all available extensions
-
-    v2.0 of the Neutron API is assumed. It is also assumed that the following
-    options are defined in the [network] section of etc/tempest.conf:
-
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        super(ExtensionsTestJSON, cls).resource_setup()
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('ef28c7e6-e646-4979-9d67-deb207bc5564')
-    def test_list_show_extensions(self):
-        # List available extensions for the tenant
-        expected_alias = ['security-group', 'l3_agent_scheduler',
-                          'ext-gw-mode', 'binding', 'quotas',
-                          'agent', 'dhcp_agent_scheduler', 'provider',
-                          'router', 'extraroute', 'external-net',
-                          'allowed-address-pairs', 'extra_dhcp_opt']
-        expected_alias = [ext for ext in expected_alias if
-                          test.is_extension_enabled(ext, 'network')]
-        actual_alias = list()
-        extensions = self.client.list_extensions()
-        list_extensions = extensions['extensions']
-        # Show and verify the details of the available extensions
-        for ext in list_extensions:
-            ext_name = ext['name']
-            ext_alias = ext['alias']
-            actual_alias.append(ext['alias'])
-            ext_details = self.client.show_extension(ext_alias)
-            ext_details = ext_details['extension']
-
-            self.assertIsNotNone(ext_details)
-            self.assertIn('updated', ext_details.keys())
-            self.assertIn('name', ext_details.keys())
-            self.assertIn('description', ext_details.keys())
-            self.assertIn('links', ext_details.keys())
-            self.assertIn('alias', ext_details.keys())
-            self.assertEqual(ext_details['name'], ext_name)
-            self.assertEqual(ext_details['alias'], ext_alias)
-            self.assertEqual(ext_details, ext)
-        # Verify if expected extensions are present in the actual list
-        # of extensions returned, but only for those that have been
-        # enabled via configuration
-        for e in expected_alias:
-            if test.is_extension_enabled(e, 'network'):
-                self.assertIn(e, actual_alias)
diff --git a/neutron/tests/api/test_extra_dhcp_options.py b/neutron/tests/api/test_extra_dhcp_options.py
deleted file mode 100644 (file)
index 57e4329..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.common.utils import data_utils
-
-from neutron.tests.api import base
-from neutron.tests.tempest import test
-
-
-class ExtraDHCPOptionsTestJSON(base.BaseNetworkTest):
-
-    """
-    Tests the following operations with the Extra DHCP Options Neutron API
-    extension:
-
-        port create
-        port list
-        port show
-        port update
-
-    v2.0 of the Neutron API is assumed. It is also assumed that the Extra
-    DHCP Options extension is enabled in the [network-feature-enabled]
-    section of etc/tempest.conf
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        super(ExtraDHCPOptionsTestJSON, cls).resource_setup()
-        if not test.is_extension_enabled('extra_dhcp_opt', 'network'):
-            msg = "Extra DHCP Options extension not enabled."
-            raise cls.skipException(msg)
-        cls.network = cls.create_network()
-        cls.subnet = cls.create_subnet(cls.network)
-        cls.port = cls.create_port(cls.network)
-        cls.ip_tftp = ('123.123.123.123' if cls._ip_version == 4
-                       else '2015::dead')
-        cls.ip_server = ('123.123.123.45' if cls._ip_version == 4
-                         else '2015::badd')
-        cls.extra_dhcp_opts = [
-            {'opt_value': 'pxelinux.0', 'opt_name': 'bootfile-name'},
-            {'opt_value': cls.ip_tftp, 'opt_name': 'tftp-server'},
-            {'opt_value': cls.ip_server, 'opt_name': 'server-ip-address'}
-        ]
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9')
-    def test_create_list_port_with_extra_dhcp_options(self):
-        # Create a port with Extra DHCP Options
-        body = self.client.create_port(
-            network_id=self.network['id'],
-            extra_dhcp_opts=self.extra_dhcp_opts)
-        port_id = body['port']['id']
-        self.addCleanup(self.client.delete_port, port_id)
-
-        # Confirm port created has Extra DHCP Options
-        body = self.client.list_ports()
-        ports = body['ports']
-        port = [p for p in ports if p['id'] == port_id]
-        self.assertTrue(port)
-        self._confirm_extra_dhcp_options(port[0], self.extra_dhcp_opts)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('9a6aebf4-86ee-4f47-b07a-7f7232c55607')
-    def test_update_show_port_with_extra_dhcp_options(self):
-        # Update port with extra dhcp options
-        name = data_utils.rand_name('new-port-name')
-        body = self.client.update_port(
-            self.port['id'],
-            name=name,
-            extra_dhcp_opts=self.extra_dhcp_opts)
-        # Confirm extra dhcp options were added to the port
-        body = self.client.show_port(self.port['id'])
-        self._confirm_extra_dhcp_options(body['port'], self.extra_dhcp_opts)
-
-    def _confirm_extra_dhcp_options(self, port, extra_dhcp_opts):
-        retrieved = port['extra_dhcp_opts']
-        self.assertEqual(len(retrieved), len(extra_dhcp_opts))
-        for retrieved_option in retrieved:
-            for option in extra_dhcp_opts:
-                if (retrieved_option['opt_value'] == option['opt_value'] and
-                    retrieved_option['opt_name'] == option['opt_name']):
-                    break
-            else:
-                self.fail('Extra DHCP option not found in port %s' %
-                          str(retrieved_option))
-
-
-class ExtraDHCPOptionsIpV6TestJSON(ExtraDHCPOptionsTestJSON):
-    _ip_version = 6
diff --git a/neutron/tests/api/test_flavors_extensions.py b/neutron/tests/api/test_flavors_extensions.py
deleted file mode 100644 (file)
index 1ca68ef..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-# Copyright 2015 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import base
-from neutron.tests.tempest import test
-
-
-class TestFlavorsJson(base.BaseAdminNetworkTest):
-
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
-
-        List, Show, Create, Update, Delete Flavors
-        List, Show, Create, Update, Delete service profiles
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        super(TestFlavorsJson, cls).resource_setup()
-        if not test.is_extension_enabled('flavors', 'network'):
-            msg = "flavors extension not enabled."
-            raise cls.skipException(msg)
-
-        # Use flavors service type as know this is loaded
-        service_type = "FLAVORS"
-        description_flavor = "flavor is created by tempest"
-        name_flavor = "Best flavor created by tempest"
-
-        # The check above will pass if api_extensions=all, which does
-        # not mean flavors extension itself is present.
-        try:
-            cls.flavor = cls.create_flavor(name_flavor, description_flavor,
-                                           service_type)
-        except lib_exc.NotFound:
-            msg = "flavors plugin not enabled."
-            raise cls.skipException(msg)
-
-        description_sp = "service profile created by tempest"
-        # Drivers are supported as is an empty driver field.  Use an
-        # empty field for now since otherwise driver is validated against the
-        # servicetype configuration which may differ in test scenarios.
-        driver = ""
-        metainfo = '{"data": "value"}'
-        cls.service_profile = cls.create_service_profile(
-            description=description_sp, metainfo=metainfo, driver=driver)
-
-    def _delete_service_profile(self, service_profile_id):
-        # Deletes a service profile and verifies if it is deleted or not
-        self.admin_client.delete_service_profile(service_profile_id)
-        # Asserting that service profile is not found in list after deletion
-        labels = self.admin_client.list_service_profiles(id=service_profile_id)
-        self.assertEqual(len(labels['service_profiles']), 0)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50')
-    def test_create_update_delete_service_profile(self):
-        # Creates a service profile
-        description = "service_profile created by tempest"
-        driver = ""
-        metainfo = '{"data": "value"}'
-        body = self.admin_client.create_service_profile(
-            description=description, driver=driver, metainfo=metainfo)
-        service_profile = body['service_profile']
-        # Updates a service profile
-        self.admin_client.update_service_profile(service_profile['id'],
-                                                 enabled=False)
-        self.assertTrue(service_profile['enabled'])
-        # Deletes a service profile
-        self.addCleanup(self._delete_service_profile,
-                        service_profile['id'])
-        # Assert whether created service profiles are found in service profile
-        # lists or fail if created service profiles are not found in service
-        # profiles list
-        labels = (self.admin_client.list_service_profiles(
-                  id=service_profile['id']))
-        self.assertEqual(len(labels['service_profiles']), 1)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50')
-    def test_create_update_delete_flavor(self):
-        # Creates a flavor
-        description = "flavor created by tempest"
-        service = "FLAVORS"
-        name = "Best flavor created by tempest"
-        body = self.admin_client.create_flavor(name=name, service_type=service,
-                                               description=description)
-        flavor = body['flavor']
-        # Updates a flavor
-        self.admin_client.update_flavor(flavor['id'], enabled=False)
-        self.assertTrue(flavor['enabled'])
-        # Deletes a flavor
-        self.addCleanup(self._delete_flavor, flavor['id'])
-        # Assert whether created flavors are found in flavor lists or fail
-        # if created flavors are not found in flavors list
-        labels = (self.admin_client.list_flavors(id=flavor['id']))
-        self.assertEqual(len(labels['flavors']), 1)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
-    def test_show_service_profile(self):
-        # Verifies the details of a service profile
-        body = self.admin_client.show_service_profile(
-            self.service_profile['id'])
-        service_profile = body['service_profile']
-        self.assertEqual(self.service_profile['id'], service_profile['id'])
-        self.assertEqual(self.service_profile['description'],
-                         service_profile['description'])
-        self.assertEqual(self.service_profile['metainfo'],
-                         service_profile['metainfo'])
-        self.assertTrue(service_profile['enabled'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
-    def test_show_flavor(self):
-        # Verifies the details of a flavor
-        body = self.admin_client.show_flavor(self.flavor['id'])
-        flavor = body['flavor']
-        self.assertEqual(self.flavor['id'], flavor['id'])
-        self.assertEqual(self.flavor['description'], flavor['description'])
-        self.assertEqual(self.flavor['name'], flavor['name'])
-        self.assertTrue(flavor['enabled'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
-    def test_list_flavors(self):
-        # Verify flavor lists
-        body = self.admin_client.list_flavors(id=33)
-        flavors = body['flavors']
-        self.assertEqual(0, len(flavors))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
-    def test_list_service_profiles(self):
-        # Verify service profiles lists
-        body = self.admin_client.list_service_profiles(id=33)
-        service_profiles = body['service_profiles']
-        self.assertEqual(0, len(service_profiles))
-
-    def _delete_flavor(self, flavor_id):
-        # Deletes a flavor and verifies if it is deleted or not
-        self.admin_client.delete_flavor(flavor_id)
-        # Asserting that the flavor is not found in list after deletion
-        labels = self.admin_client.list_flavors(id=flavor_id)
-        self.assertEqual(len(labels['flavors']), 0)
-
-
-class TestFlavorsIpV6TestJSON(TestFlavorsJson):
-    _ip_version = 6
diff --git a/neutron/tests/api/test_floating_ips.py b/neutron/tests/api/test_floating_ips.py
deleted file mode 100644 (file)
index 78ea35b..0000000
+++ /dev/null
@@ -1,219 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-from tempest_lib.common.utils import data_utils
-
-from neutron.tests.api import base
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class FloatingIPTestJSON(base.BaseNetworkTest):
-
-    """
-    Tests the following operations in the Quantum API using the REST client for
-    Neutron:
-
-        Create a Floating IP
-        Update a Floating IP
-        Delete a Floating IP
-        List all Floating IPs
-        Show Floating IP details
-        Associate a Floating IP with a port and then delete that port
-        Associate a Floating IP with a port and then with a port on another
-        router
-
-    v2.0 of the Neutron API is assumed. It is also assumed that the following
-    options are defined in the [network] section of etc/tempest.conf:
-
-        public_network_id which is the id for the external network present
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        super(FloatingIPTestJSON, cls).resource_setup()
-        if not test.is_extension_enabled('router', 'network'):
-            msg = "router extension not enabled."
-            raise cls.skipException(msg)
-        cls.ext_net_id = CONF.network.public_network_id
-
-        # Create network, subnet, router and add interface
-        cls.network = cls.create_network()
-        cls.subnet = cls.create_subnet(cls.network)
-        cls.router = cls.create_router(data_utils.rand_name('router-'),
-                                       external_network_id=cls.ext_net_id)
-        cls.create_router_interface(cls.router['id'], cls.subnet['id'])
-        cls.port = list()
-        # Create two ports one each for Creation and Updating of floatingIP
-        for i in range(2):
-            cls.create_port(cls.network)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('62595970-ab1c-4b7f-8fcc-fddfe55e8718')
-    def test_create_list_show_update_delete_floating_ip(self):
-        # Creates a floating IP
-        body = self.client.create_floatingip(
-            floating_network_id=self.ext_net_id,
-            port_id=self.ports[0]['id'])
-        created_floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip,
-                        created_floating_ip['id'])
-        self.assertIsNotNone(created_floating_ip['id'])
-        self.assertIsNotNone(created_floating_ip['tenant_id'])
-        self.assertIsNotNone(created_floating_ip['floating_ip_address'])
-        self.assertEqual(created_floating_ip['port_id'], self.ports[0]['id'])
-        self.assertEqual(created_floating_ip['floating_network_id'],
-                         self.ext_net_id)
-        self.assertIn(created_floating_ip['fixed_ip_address'],
-                      [ip['ip_address'] for ip in self.ports[0]['fixed_ips']])
-        # Verifies the details of a floating_ip
-        floating_ip = self.client.show_floatingip(created_floating_ip['id'])
-        shown_floating_ip = floating_ip['floatingip']
-        self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
-        self.assertEqual(shown_floating_ip['floating_network_id'],
-                         self.ext_net_id)
-        self.assertEqual(shown_floating_ip['tenant_id'],
-                         created_floating_ip['tenant_id'])
-        self.assertEqual(shown_floating_ip['floating_ip_address'],
-                         created_floating_ip['floating_ip_address'])
-        self.assertEqual(shown_floating_ip['port_id'], self.ports[0]['id'])
-
-        # Verify the floating ip exists in the list of all floating_ips
-        floating_ips = self.client.list_floatingips()
-        floatingip_id_list = list()
-        for f in floating_ips['floatingips']:
-            floatingip_id_list.append(f['id'])
-        self.assertIn(created_floating_ip['id'], floatingip_id_list)
-        # Associate floating IP to the other port
-        floating_ip = self.client.update_floatingip(
-            created_floating_ip['id'],
-            port_id=self.ports[1]['id'])
-        updated_floating_ip = floating_ip['floatingip']
-        self.assertEqual(updated_floating_ip['port_id'], self.ports[1]['id'])
-        self.assertEqual(updated_floating_ip['fixed_ip_address'],
-                         self.ports[1]['fixed_ips'][0]['ip_address'])
-        self.assertEqual(updated_floating_ip['router_id'], self.router['id'])
-
-        # Disassociate floating IP from the port
-        floating_ip = self.client.update_floatingip(
-            created_floating_ip['id'],
-            port_id=None)
-        updated_floating_ip = floating_ip['floatingip']
-        self.assertIsNone(updated_floating_ip['port_id'])
-        self.assertIsNone(updated_floating_ip['fixed_ip_address'])
-        self.assertIsNone(updated_floating_ip['router_id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('e1f6bffd-442f-4668-b30e-df13f2705e77')
-    def test_floating_ip_delete_port(self):
-        # Create a floating IP
-        body = self.client.create_floatingip(
-            floating_network_id=self.ext_net_id)
-        created_floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip,
-                        created_floating_ip['id'])
-        # Create a port
-        port = self.client.create_port(network_id=self.network['id'])
-        created_port = port['port']
-        floating_ip = self.client.update_floatingip(
-            created_floating_ip['id'],
-            port_id=created_port['id'])
-        # Delete port
-        self.client.delete_port(created_port['id'])
-        # Verifies the details of the floating_ip
-        floating_ip = self.client.show_floatingip(created_floating_ip['id'])
-        shown_floating_ip = floating_ip['floatingip']
-        # Confirm the fields are back to None
-        self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
-        self.assertIsNone(shown_floating_ip['port_id'])
-        self.assertIsNone(shown_floating_ip['fixed_ip_address'])
-        self.assertIsNone(shown_floating_ip['router_id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('1bb2f731-fe5a-4b8c-8409-799ade1bed4d')
-    def test_floating_ip_update_different_router(self):
-        # Associate a floating IP to a port on a router
-        body = self.client.create_floatingip(
-            floating_network_id=self.ext_net_id,
-            port_id=self.ports[1]['id'])
-        created_floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip,
-                        created_floating_ip['id'])
-        self.assertEqual(created_floating_ip['router_id'], self.router['id'])
-        network2 = self.create_network()
-        subnet2 = self.create_subnet(network2)
-        router2 = self.create_router(data_utils.rand_name('router-'),
-                                     external_network_id=self.ext_net_id)
-        self.create_router_interface(router2['id'], subnet2['id'])
-        port_other_router = self.create_port(network2)
-        # Associate floating IP to the other port on another router
-        floating_ip = self.client.update_floatingip(
-            created_floating_ip['id'],
-            port_id=port_other_router['id'])
-        updated_floating_ip = floating_ip['floatingip']
-        self.assertEqual(updated_floating_ip['router_id'], router2['id'])
-        self.assertEqual(updated_floating_ip['port_id'],
-                         port_other_router['id'])
-        self.assertIsNotNone(updated_floating_ip['fixed_ip_address'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('36de4bd0-f09c-43e3-a8e1-1decc1ffd3a5')
-    def test_create_floating_ip_specifying_a_fixed_ip_address(self):
-        body = self.client.create_floatingip(
-            floating_network_id=self.ext_net_id,
-            port_id=self.ports[1]['id'],
-            fixed_ip_address=self.ports[1]['fixed_ips'][0]['ip_address'])
-        created_floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip,
-                        created_floating_ip['id'])
-        self.assertIsNotNone(created_floating_ip['id'])
-        self.assertEqual(created_floating_ip['fixed_ip_address'],
-                         self.ports[1]['fixed_ips'][0]['ip_address'])
-        floating_ip = self.client.update_floatingip(
-            created_floating_ip['id'],
-            port_id=None)
-        self.assertIsNone(floating_ip['floatingip']['port_id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('45c4c683-ea97-41ef-9c51-5e9802f2f3d7')
-    def test_create_update_floatingip_with_port_multiple_ip_address(self):
-        # Find out ips that can be used for tests
-        ips = list(netaddr.IPNetwork(self.subnet['cidr']))
-        list_ips = [str(ip) for ip in ips[-3:-1]]
-        fixed_ips = [{'ip_address': list_ips[0]}, {'ip_address': list_ips[1]}]
-        # Create port
-        body = self.client.create_port(network_id=self.network['id'],
-                                       fixed_ips=fixed_ips)
-        port = body['port']
-        self.addCleanup(self.client.delete_port, port['id'])
-        # Create floating ip
-        body = self.client.create_floatingip(
-            floating_network_id=self.ext_net_id,
-            port_id=port['id'],
-            fixed_ip_address=list_ips[0])
-        floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip, floating_ip['id'])
-        self.assertIsNotNone(floating_ip['id'])
-        self.assertEqual(floating_ip['fixed_ip_address'], list_ips[0])
-        # Update floating ip
-        body = self.client.update_floatingip(floating_ip['id'],
-                                             port_id=port['id'],
-                                             fixed_ip_address=list_ips[1])
-        update_floating_ip = body['floatingip']
-        self.assertEqual(update_floating_ip['fixed_ip_address'],
-                         list_ips[1])
diff --git a/neutron/tests/api/test_floating_ips_negative.py b/neutron/tests/api/test_floating_ips_negative.py
deleted file mode 100644 (file)
index 0752888..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import base
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class FloatingIPNegativeTestJSON(base.BaseNetworkTest):
-
-    """
-    Test the following negative  operations for floating ips:
-
-        Create floatingip with a port that is unreachable to external network
-        Create floatingip in private network
-        Associate floatingip with port that is unreachable to external network
-        Associate floating ip to port that has already another floating ip
-        Associate floating ip with port from another tenant
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        super(FloatingIPNegativeTestJSON, cls).resource_setup()
-        if not test.is_extension_enabled('router', 'network'):
-            msg = "router extension not enabled."
-            raise cls.skipException(msg)
-        cls.ext_net_id = CONF.network.public_network_id
-        # Create a network with a subnet connected to a router.
-        cls.network = cls.create_network()
-        cls.subnet = cls.create_subnet(cls.network)
-        cls.router = cls.create_router(data_utils.rand_name('router'))
-        cls.create_router_interface(cls.router['id'], cls.subnet['id'])
-        cls.port = cls.create_port(cls.network)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('22996ea8-4a81-4b27-b6e1-fa5df92fa5e8')
-    def test_create_floatingip_with_port_ext_net_unreachable(self):
-        self.assertRaises(lib_exc.NotFound, self.client.create_floatingip,
-                          floating_network_id=self.ext_net_id,
-                          port_id=self.port['id'],
-                          fixed_ip_address=self.port['fixed_ips'][0]
-                                                    ['ip_address'])
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('50b9aeb4-9f0b-48ee-aa31-fa955a48ff54')
-    def test_create_floatingip_in_private_network(self):
-        self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_floatingip,
-                          floating_network_id=self.network['id'],
-                          port_id=self.port['id'],
-                          fixed_ip_address=self.port['fixed_ips'][0]
-                                                    ['ip_address'])
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('6b3b8797-6d43-4191-985c-c48b773eb429')
-    def test_associate_floatingip_port_ext_net_unreachable(self):
-        # Create floating ip
-        body = self.client.create_floatingip(
-            floating_network_id=self.ext_net_id)
-        floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip, floating_ip['id'])
-        # Associate floating IP to the other port
-        self.assertRaises(lib_exc.NotFound, self.client.update_floatingip,
-                          floating_ip['id'], port_id=self.port['id'],
-                          fixed_ip_address=self.port['fixed_ips'][0]
-                          ['ip_address'])
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('0b5b8797-6de7-4191-905c-a48b888eb429')
-    def test_associate_floatingip_with_port_with_floatingip(self):
-        net = self.create_network()
-        subnet = self.create_subnet(net)
-        r = self.create_router('test')
-        self.create_router_interface(r['id'], subnet['id'])
-        self.client.update_router(
-            r['id'],
-            external_gateway_info={
-                'network_id': self.ext_net_id})
-        self.addCleanup(self.client.update_router, self.router['id'],
-                        external_gateway_info={})
-        port = self.create_port(net)
-        body1 = self.client.create_floatingip(
-            floating_network_id=self.ext_net_id)
-        floating_ip1 = body1['floatingip']
-        self.addCleanup(self.client.delete_floatingip, floating_ip1['id'])
-        body2 = self.client.create_floatingip(
-            floating_network_id=self.ext_net_id)
-        floating_ip2 = body2['floatingip']
-        self.addCleanup(self.client.delete_floatingip, floating_ip2['id'])
-        self.client.update_floatingip(floating_ip1['id'],
-                                      port_id=port['id'])
-        self.assertRaises(lib_exc.Conflict, self.client.update_floatingip,
-                          floating_ip2['id'], port_id=port['id'])
diff --git a/neutron/tests/api/test_fwaas_extensions.py b/neutron/tests/api/test_fwaas_extensions.py
deleted file mode 100644 (file)
index 55745c4..0000000
+++ /dev/null
@@ -1,391 +0,0 @@
-# Copyright 2014 NEC Corporation. All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import six
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import base
-from neutron.tests.tempest import config
-from neutron.tests.tempest import exceptions
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class FWaaSExtensionTestJSON(base.BaseNetworkTest):
-
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
-
-        List firewall rules
-        Create firewall rule
-        Update firewall rule
-        Delete firewall rule
-        Show firewall rule
-        List firewall policies
-        Create firewall policy
-        Update firewall policy
-        Insert firewall rule to policy
-        Remove firewall rule from policy
-        Insert firewall rule after/before rule in policy
-        Update firewall policy audited attribute
-        Delete firewall policy
-        Show firewall policy
-        List firewall
-        Create firewall
-        Update firewall
-        Delete firewall
-        Show firewall
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        super(FWaaSExtensionTestJSON, cls).resource_setup()
-        if not test.is_extension_enabled('fwaas', 'network'):
-            msg = "FWaaS Extension not enabled."
-            raise cls.skipException(msg)
-        cls.fw_rule = cls.create_firewall_rule("allow", "tcp")
-        cls.fw_policy = cls.create_firewall_policy()
-
-    def _try_delete_policy(self, policy_id):
-        # delete policy, if it exists
-        try:
-            self.client.delete_firewall_policy(policy_id)
-        # if policy is not found, this means it was deleted in the test
-        except lib_exc.NotFound:
-            pass
-
-    def _try_delete_rule(self, rule_id):
-        # delete rule, if it exists
-        try:
-            self.client.delete_firewall_rule(rule_id)
-        # if rule is not found, this means it was deleted in the test
-        except lib_exc.NotFound:
-            pass
-
-    def _try_delete_firewall(self, fw_id):
-        # delete firewall, if it exists
-        try:
-            self.client.delete_firewall(fw_id)
-        # if firewall is not found, this means it was deleted in the test
-        except lib_exc.NotFound:
-            pass
-
-        self.client.wait_for_resource_deletion('firewall', fw_id)
-
-    def _wait_until_ready(self, fw_id):
-        target_states = ('ACTIVE', 'CREATED')
-
-        def _wait():
-            firewall = self.client.show_firewall(fw_id)
-            firewall = firewall['firewall']
-            return firewall['status'] in target_states
-
-        if not test.call_until_true(_wait, CONF.network.build_timeout,
-                                    CONF.network.build_interval):
-            m = ("Timed out waiting for firewall %s to reach %s state(s)" %
-                 (fw_id, target_states))
-            raise exceptions.TimeoutException(m)
-
-    @test.idempotent_id('1b84cf01-9c09-4ce7-bc72-b15e39076468')
-    def test_list_firewall_rules(self):
-        # List firewall rules
-        fw_rules = self.client.list_firewall_rules()
-        fw_rules = fw_rules['firewall_rules']
-        self.assertIn((self.fw_rule['id'],
-                       self.fw_rule['name'],
-                       self.fw_rule['action'],
-                       self.fw_rule['protocol'],
-                       self.fw_rule['ip_version'],
-                       self.fw_rule['enabled']),
-                      [(m['id'],
-                        m['name'],
-                        m['action'],
-                        m['protocol'],
-                        m['ip_version'],
-                        m['enabled']) for m in fw_rules])
-
-    @test.idempotent_id('563564f7-7077-4f5e-8cdc-51f37ae5a2b9')
-    def test_create_update_delete_firewall_rule(self):
-        # Create firewall rule
-        body = self.client.create_firewall_rule(
-            name=data_utils.rand_name("fw-rule"),
-            action="allow",
-            protocol="tcp")
-        fw_rule_id = body['firewall_rule']['id']
-
-        # Update firewall rule
-        body = self.client.update_firewall_rule(fw_rule_id,
-                                                shared=True)
-        self.assertTrue(body["firewall_rule"]['shared'])
-
-        # Delete firewall rule
-        self.client.delete_firewall_rule(fw_rule_id)
-        # Confirm deletion
-        fw_rules = self.client.list_firewall_rules()
-        self.assertNotIn(fw_rule_id,
-                         [m['id'] for m in fw_rules['firewall_rules']])
-
-    @test.idempotent_id('3ff8c08e-26ff-4034-ae48-810ed213a998')
-    def test_show_firewall_rule(self):
-        # show a created firewall rule
-        fw_rule = self.client.show_firewall_rule(self.fw_rule['id'])
-        for key, value in six.iteritems(fw_rule['firewall_rule']):
-            self.assertEqual(self.fw_rule[key], value)
-
-    @test.idempotent_id('1086dd93-a4c0-4bbb-a1bd-6d4bc62c199f')
-    def test_list_firewall_policies(self):
-        fw_policies = self.client.list_firewall_policies()
-        fw_policies = fw_policies['firewall_policies']
-        self.assertIn((self.fw_policy['id'],
-                       self.fw_policy['name'],
-                       self.fw_policy['firewall_rules']),
-                      [(m['id'],
-                        m['name'],
-                        m['firewall_rules']) for m in fw_policies])
-
-    @test.idempotent_id('bbf37b6c-498c-421e-9c95-45897d3ed775')
-    def test_create_update_delete_firewall_policy(self):
-        # Create firewall policy
-        body = self.client.create_firewall_policy(
-            name=data_utils.rand_name("fw-policy"))
-        fw_policy_id = body['firewall_policy']['id']
-        self.addCleanup(self._try_delete_policy, fw_policy_id)
-
-        # Update firewall policy
-        body = self.client.update_firewall_policy(fw_policy_id,
-                                                  shared=True,
-                                                  name="updated_policy")
-        updated_fw_policy = body["firewall_policy"]
-        self.assertTrue(updated_fw_policy['shared'])
-        self.assertEqual("updated_policy", updated_fw_policy['name'])
-
-        # Delete firewall policy
-        self.client.delete_firewall_policy(fw_policy_id)
-        # Confirm deletion
-        fw_policies = self.client.list_firewall_policies()
-        fw_policies = fw_policies['firewall_policies']
-        self.assertNotIn(fw_policy_id, [m['id'] for m in fw_policies])
-
-    @test.idempotent_id('1df59b3a-517e-41d4-96f6-fc31cf4ecff2')
-    def test_show_firewall_policy(self):
-        # show a created firewall policy
-        fw_policy = self.client.show_firewall_policy(self.fw_policy['id'])
-        fw_policy = fw_policy['firewall_policy']
-        for key, value in six.iteritems(fw_policy):
-            self.assertEqual(self.fw_policy[key], value)
-
-    @test.idempotent_id('02082a03-3cdd-4789-986a-1327dd80bfb7')
-    def test_create_show_delete_firewall(self):
-        # Create tenant network resources required for an ACTIVE firewall
-        network = self.create_network()
-        subnet = self.create_subnet(network)
-        router = self.create_router(
-            data_utils.rand_name('router-'),
-            admin_state_up=True)
-        self.client.add_router_interface_with_subnet_id(
-            router['id'], subnet['id'])
-
-        # Create firewall
-        body = self.client.create_firewall(
-            name=data_utils.rand_name("firewall"),
-            firewall_policy_id=self.fw_policy['id'])
-        created_firewall = body['firewall']
-        firewall_id = created_firewall['id']
-        self.addCleanup(self._try_delete_firewall, firewall_id)
-
-        # Wait for the firewall resource to become ready
-        self._wait_until_ready(firewall_id)
-
-        # show a created firewall
-        firewall = self.client.show_firewall(firewall_id)
-        firewall = firewall['firewall']
-
-        for key, value in six.iteritems(firewall):
-            if key == 'status':
-                continue
-            self.assertEqual(created_firewall[key], value)
-
-        # list firewall
-        firewalls = self.client.list_firewalls()
-        firewalls = firewalls['firewalls']
-        self.assertIn((created_firewall['id'],
-                       created_firewall['name'],
-                       created_firewall['firewall_policy_id']),
-                      [(m['id'],
-                        m['name'],
-                        m['firewall_policy_id']) for m in firewalls])
-
-        # Delete firewall
-        self.client.delete_firewall(firewall_id)
-
-    @test.idempotent_id('1355cf5c-77d4-4bb9-87d7-e50c194d08b5')
-    def test_firewall_insertion_mode_add_remove_router(self):
-        # Create routers
-        router1 = self.create_router(
-            data_utils.rand_name('router-'),
-            admin_state_up=True)
-        router2 = self.create_router(
-            data_utils.rand_name('router-'),
-            admin_state_up=True)
-
-        # Create firewall on a router1
-        body = self.client.create_firewall(
-            name=data_utils.rand_name("firewall"),
-            firewall_policy_id=self.fw_policy['id'],
-            router_ids=[router1['id']])
-        created_firewall = body['firewall']
-        firewall_id = created_firewall['id']
-        self.addCleanup(self._try_delete_firewall, firewall_id)
-
-        self.assertEqual([router1['id']], created_firewall['router_ids'])
-
-        # Wait for the firewall resource to become ready
-        self._wait_until_ready(firewall_id)
-
-        # Add router2 to the firewall
-        body = self.client.update_firewall(
-            firewall_id, router_ids=[router1['id'], router2['id']])
-        updated_firewall = body['firewall']
-        self.assertIn(router2['id'], updated_firewall['router_ids'])
-        self.assertEqual(2, len(updated_firewall['router_ids']))
-
-        # Wait for the firewall resource to become ready
-        self._wait_until_ready(firewall_id)
-
-        # Remove router1 from the firewall
-        body = self.client.update_firewall(
-            firewall_id, router_ids=[router2['id']])
-        updated_firewall = body['firewall']
-        self.assertNotIn(router1['id'], updated_firewall['router_ids'])
-        self.assertEqual(1, len(updated_firewall['router_ids']))
-
-    @test.idempotent_id('c60ceff5-d51f-451d-b6e6-cb983d16ab6b')
-    def test_firewall_insertion_mode_one_firewall_per_router(self):
-        # Create router required for an ACTIVE firewall
-        router = self.create_router(
-            data_utils.rand_name('router1-'),
-            admin_state_up=True)
-
-        # Create firewall
-        body = self.client.create_firewall(
-            name=data_utils.rand_name("firewall"),
-            firewall_policy_id=self.fw_policy['id'],
-            router_ids=[router['id']])
-        created_firewall = body['firewall']
-        self.addCleanup(self._try_delete_firewall, created_firewall['id'])
-
-        # Try to create firewall with the same router
-        self.assertRaisesRegexp(
-            lib_exc.Conflict,
-            "An object with that identifier already exists",
-            self.client.create_firewall,
-            name=data_utils.rand_name("firewall"),
-            firewall_policy_id=self.fw_policy['id'],
-            router_ids=[router['id']])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('53305b4b-9897-4e01-87c0-2ae386083180')
-    def test_firewall_rule_insertion_position_removal_rule_from_policy(self):
-        # Create firewall rule
-        body = self.client.create_firewall_rule(
-            name=data_utils.rand_name("fw-rule"),
-            action="allow",
-            protocol="tcp")
-        fw_rule_id1 = body['firewall_rule']['id']
-        self.addCleanup(self._try_delete_rule, fw_rule_id1)
-        # Create firewall policy
-        body = self.client.create_firewall_policy(
-            name=data_utils.rand_name("fw-policy"))
-        fw_policy_id = body['firewall_policy']['id']
-        self.addCleanup(self._try_delete_policy, fw_policy_id)
-
-        # Insert rule to firewall policy
-        self.client.insert_firewall_rule_in_policy(
-            fw_policy_id, fw_rule_id1, '', '')
-
-        # Verify insertion of rule in policy
-        self.assertIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
-        # Create another firewall rule
-        body = self.client.create_firewall_rule(
-            name=data_utils.rand_name("fw-rule"),
-            action="allow",
-            protocol="icmp")
-        fw_rule_id2 = body['firewall_rule']['id']
-        self.addCleanup(self._try_delete_rule, fw_rule_id2)
-
-        # Insert rule to firewall policy after the first rule
-        self.client.insert_firewall_rule_in_policy(
-            fw_policy_id, fw_rule_id2, fw_rule_id1, '')
-
-        # Verify the position of rule after insertion
-        fw_rule = self.client.show_firewall_rule(
-            fw_rule_id2)
-
-        self.assertEqual(int(fw_rule['firewall_rule']['position']), 2)
-        # Remove rule from the firewall policy
-        self.client.remove_firewall_rule_from_policy(
-            fw_policy_id, fw_rule_id2)
-        # Insert rule to firewall policy before the first rule
-        self.client.insert_firewall_rule_in_policy(
-            fw_policy_id, fw_rule_id2, '', fw_rule_id1)
-        # Verify the position of rule after insertion
-        fw_rule = self.client.show_firewall_rule(
-            fw_rule_id2)
-        self.assertEqual(int(fw_rule['firewall_rule']['position']), 1)
-        # Remove rule from the firewall policy
-        self.client.remove_firewall_rule_from_policy(
-            fw_policy_id, fw_rule_id2)
-        # Verify removal of rule from firewall policy
-        self.assertNotIn(fw_rule_id2, self._get_list_fw_rule_ids(fw_policy_id))
-
-        # Remove rule from the firewall policy
-        self.client.remove_firewall_rule_from_policy(
-            fw_policy_id, fw_rule_id1)
-
-        # Verify removal of rule from firewall policy
-        self.assertNotIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
-
-    def _get_list_fw_rule_ids(self, fw_policy_id):
-        fw_policy = self.client.show_firewall_policy(
-            fw_policy_id)
-        return [ruleid for ruleid in fw_policy['firewall_policy']
-                ['firewall_rules']]
-
-    @test.idempotent_id('8515ca8a-0d2f-4298-b5ff-6f924e4587ca')
-    def test_update_firewall_policy_audited_attribute(self):
-        # Create firewall rule
-        body = self.client.create_firewall_rule(
-            name=data_utils.rand_name("fw-rule"),
-            action="allow",
-            protocol="icmp")
-        fw_rule_id = body['firewall_rule']['id']
-        self.addCleanup(self._try_delete_rule, fw_rule_id)
-        # Create firewall policy
-        body = self.client.create_firewall_policy(
-            name=data_utils.rand_name('fw-policy'))
-        fw_policy_id = body['firewall_policy']['id']
-        self.addCleanup(self._try_delete_policy, fw_policy_id)
-        self.assertFalse(body['firewall_policy']['audited'])
-        # Update firewall policy audited attribute to true
-        self.client.update_firewall_policy(fw_policy_id,
-                                           audited=True)
-        # Insert Firewall rule to firewall policy
-        self.client.insert_firewall_rule_in_policy(
-            fw_policy_id, fw_rule_id, '', '')
-        body = self.client.show_firewall_policy(
-            fw_policy_id)
-        self.assertFalse(body['firewall_policy']['audited'])
diff --git a/neutron/tests/api/test_metering_extensions.py b/neutron/tests/api/test_metering_extensions.py
deleted file mode 100644 (file)
index 6c75948..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest_lib.common.utils import data_utils
-
-from neutron.tests.api import base
-from neutron.tests.tempest import test
-
-
-class MeteringTestJSON(base.BaseAdminNetworkTest):
-
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
-
-        List, Show, Create, Delete Metering labels
-        List, Show, Create, Delete Metering labels rules
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        super(MeteringTestJSON, cls).resource_setup()
-        if not test.is_extension_enabled('metering', 'network'):
-            msg = "metering extension not enabled."
-            raise cls.skipException(msg)
-        description = "metering label created by tempest"
-        name = data_utils.rand_name("metering-label")
-        cls.metering_label = cls.create_metering_label(name, description)
-        remote_ip_prefix = ("10.0.0.0/24" if cls._ip_version == 4
-                            else "fd02::/64")
-        direction = "ingress"
-        cls.metering_label_rule = cls.create_metering_label_rule(
-            remote_ip_prefix, direction,
-            metering_label_id=cls.metering_label['id'])
-
-    def _delete_metering_label(self, metering_label_id):
-        # Deletes a label and verifies if it is deleted or not
-        self.admin_client.delete_metering_label(metering_label_id)
-        # Asserting that the label is not found in list after deletion
-        labels = self.admin_client.list_metering_labels(id=metering_label_id)
-        self.assertEqual(len(labels['metering_labels']), 0)
-
-    def _delete_metering_label_rule(self, metering_label_rule_id):
-        # Deletes a rule and verifies if it is deleted or not
-        self.admin_client.delete_metering_label_rule(
-            metering_label_rule_id)
-        # Asserting that the rule is not found in list after deletion
-        rules = (self.admin_client.list_metering_label_rules(
-                 id=metering_label_rule_id))
-        self.assertEqual(len(rules['metering_label_rules']), 0)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
-    def test_list_metering_labels(self):
-        # Verify label filtering
-        body = self.admin_client.list_metering_labels(id=33)
-        metering_labels = body['metering_labels']
-        self.assertEqual(0, len(metering_labels))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50')
-    def test_create_delete_metering_label_with_filters(self):
-        # Creates a label
-        name = data_utils.rand_name('metering-label-')
-        description = "label created by tempest"
-        body = self.admin_client.create_metering_label(name=name,
-                                                       description=description)
-        metering_label = body['metering_label']
-        self.addCleanup(self._delete_metering_label,
-                        metering_label['id'])
-        # Assert whether created labels are found in labels list or fail
-        # if created labels are not found in labels list
-        labels = (self.admin_client.list_metering_labels(
-                  id=metering_label['id']))
-        self.assertEqual(len(labels['metering_labels']), 1)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
-    def test_show_metering_label(self):
-        # Verifies the details of a label
-        body = self.admin_client.show_metering_label(self.metering_label['id'])
-        metering_label = body['metering_label']
-        self.assertEqual(self.metering_label['id'], metering_label['id'])
-        self.assertEqual(self.metering_label['tenant_id'],
-                         metering_label['tenant_id'])
-        self.assertEqual(self.metering_label['name'], metering_label['name'])
-        self.assertEqual(self.metering_label['description'],
-                         metering_label['description'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('cc832399-6681-493b-9d79-0202831a1281')
-    def test_list_metering_label_rules(self):
-        # Verify rule filtering
-        body = self.admin_client.list_metering_label_rules(id=33)
-        metering_label_rules = body['metering_label_rules']
-        self.assertEqual(0, len(metering_label_rules))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('f4d547cd-3aee-408f-bf36-454f8825e045')
-    def test_create_delete_metering_label_rule_with_filters(self):
-        # Creates a rule
-        remote_ip_prefix = ("10.0.1.0/24" if self._ip_version == 4
-                            else "fd03::/64")
-        body = (self.admin_client.create_metering_label_rule(
-                remote_ip_prefix=remote_ip_prefix,
-                direction="ingress",
-                metering_label_id=self.metering_label['id']))
-        metering_label_rule = body['metering_label_rule']
-        self.addCleanup(self._delete_metering_label_rule,
-                        metering_label_rule['id'])
-        # Assert whether created rules are found in rules list or fail
-        # if created rules are not found in rules list
-        rules = (self.admin_client.list_metering_label_rules(
-                 id=metering_label_rule['id']))
-        self.assertEqual(len(rules['metering_label_rules']), 1)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('b7354489-96ea-41f3-9452-bace120fb4a7')
-    def test_show_metering_label_rule(self):
-        # Verifies the details of a rule
-        body = (self.admin_client.show_metering_label_rule(
-                self.metering_label_rule['id']))
-        metering_label_rule = body['metering_label_rule']
-        self.assertEqual(self.metering_label_rule['id'],
-                         metering_label_rule['id'])
-        self.assertEqual(self.metering_label_rule['remote_ip_prefix'],
-                         metering_label_rule['remote_ip_prefix'])
-        self.assertEqual(self.metering_label_rule['direction'],
-                         metering_label_rule['direction'])
-        self.assertEqual(self.metering_label_rule['metering_label_id'],
-                         metering_label_rule['metering_label_id'])
-        self.assertFalse(metering_label_rule['excluded'])
-
-
-class MeteringIpV6TestJSON(MeteringTestJSON):
-    _ip_version = 6
diff --git a/neutron/tests/api/test_networks.py b/neutron/tests/api/test_networks.py
deleted file mode 100644 (file)
index a0cd963..0000000
+++ /dev/null
@@ -1,683 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import itertools
-
-import netaddr
-import six
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import base
-from neutron.tests.tempest.common import custom_matchers
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class NetworksTestJSON(base.BaseNetworkTest):
-
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
-
-        create a network for a tenant
-        list tenant's networks
-        show a tenant network details
-        create a subnet for a tenant
-        list tenant's subnets
-        show a tenant subnet details
-        network update
-        subnet update
-        delete a network also deletes its subnets
-        list external networks
-
-        All subnet tests are run once with ipv4 and once with ipv6.
-
-    v2.0 of the Neutron API is assumed. It is also assumed that the following
-    options are defined in the [network] section of etc/tempest.conf:
-
-        tenant_network_cidr with a block of cidr's from which smaller blocks
-        can be allocated for tenant ipv4 subnets
-
-        tenant_network_v6_cidr is the equivalent for ipv6 subnets
-
-        tenant_network_mask_bits with the mask bits to be used to partition the
-        block defined by tenant_network_cidr
-
-        tenant_network_v6_mask_bits is the equivalent for ipv6 subnets
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        super(NetworksTestJSON, cls).resource_setup()
-        cls.network = cls.create_network()
-        cls.name = cls.network['name']
-        cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network,
-                                                               cls._ip_version)
-        cls.cidr = cls.subnet['cidr']
-        cls._subnet_data = {6: {'gateway':
-                                str(cls._get_gateway_from_tempest_conf(6)),
-                                'allocation_pools':
-                                cls._get_allocation_pools_from_gateway(6),
-                                'dns_nameservers': ['2001:4860:4860::8844',
-                                                    '2001:4860:4860::8888'],
-                                'host_routes': [{'destination': '2001::/64',
-                                                 'nexthop': '2003::1'}],
-                                'new_host_routes': [{'destination':
-                                                     '2001::/64',
-                                                     'nexthop': '2005::1'}],
-                                'new_dns_nameservers':
-                                ['2001:4860:4860::7744',
-                                 '2001:4860:4860::7888']},
-                            4: {'gateway':
-                                str(cls._get_gateway_from_tempest_conf(4)),
-                                'allocation_pools':
-                                cls._get_allocation_pools_from_gateway(4),
-                                'dns_nameservers': ['8.8.4.4', '8.8.8.8'],
-                                'host_routes': [{'destination': '10.20.0.0/32',
-                                                 'nexthop': '10.100.1.1'}],
-                                'new_host_routes': [{'destination':
-                                                     '10.20.0.0/32',
-                                                     'nexthop':
-                                                     '10.100.1.2'}],
-                                'new_dns_nameservers': ['7.8.8.8', '7.8.4.4']}}
-
-    @classmethod
-    def _create_subnet_with_last_subnet_block(cls, network, ip_version):
-        """Derive last subnet CIDR block from tenant CIDR and
-           create the subnet with that derived CIDR
-        """
-        if ip_version == 4:
-            cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
-            mask_bits = CONF.network.tenant_network_mask_bits
-        elif ip_version == 6:
-            cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
-            mask_bits = CONF.network.tenant_network_v6_mask_bits
-
-        subnet_cidr = list(cidr.subnet(mask_bits))[-1]
-        gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
-        return cls.create_subnet(network, gateway=gateway_ip,
-                                 cidr=subnet_cidr, mask_bits=mask_bits)
-
-    @classmethod
-    def _get_gateway_from_tempest_conf(cls, ip_version):
-        """Return first subnet gateway for configured CIDR """
-        if ip_version == 4:
-            cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
-            mask_bits = CONF.network.tenant_network_mask_bits
-        elif ip_version == 6:
-            cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
-            mask_bits = CONF.network.tenant_network_v6_mask_bits
-
-        if mask_bits >= cidr.prefixlen:
-            return netaddr.IPAddress(cidr) + 1
-        else:
-            for subnet in cidr.subnet(mask_bits):
-                return netaddr.IPAddress(subnet) + 1
-
-    @classmethod
-    def _get_allocation_pools_from_gateway(cls, ip_version):
-        """Return allocation range for subnet of given gateway"""
-        gateway = cls._get_gateway_from_tempest_conf(ip_version)
-        return [{'start': str(gateway + 2), 'end': str(gateway + 3)}]
-
-    def subnet_dict(self, include_keys):
-        """Return a subnet dict which has include_keys and their corresponding
-           value from self._subnet_data
-        """
-        return dict((key, self._subnet_data[self._ip_version][key])
-                    for key in include_keys)
-
-    def _compare_resource_attrs(self, actual, expected):
-        exclude_keys = set(actual).symmetric_difference(expected)
-        self.assertThat(actual, custom_matchers.MatchesDictExceptForKeys(
-                        expected, exclude_keys))
-
-    def _delete_network(self, network):
-        # Deleting network also deletes its subnets if exists
-        self.client.delete_network(network['id'])
-        if network in self.networks:
-            self.networks.remove(network)
-        for subnet in self.subnets:
-            if subnet['network_id'] == network['id']:
-                self.subnets.remove(subnet)
-
-    def _create_verify_delete_subnet(self, cidr=None, mask_bits=None,
-                                     **kwargs):
-        network = self.create_network()
-        net_id = network['id']
-        gateway = kwargs.pop('gateway', None)
-        subnet = self.create_subnet(network, gateway, cidr, mask_bits,
-                                    **kwargs)
-        compare_args_full = dict(gateway_ip=gateway, cidr=cidr,
-                                 mask_bits=mask_bits, **kwargs)
-        compare_args = dict((k, v) for k, v in six.iteritems(compare_args_full)
-                            if v is not None)
-
-        if 'dns_nameservers' in set(subnet).intersection(compare_args):
-            self.assertEqual(sorted(compare_args['dns_nameservers']),
-                             sorted(subnet['dns_nameservers']))
-            del subnet['dns_nameservers'], compare_args['dns_nameservers']
-
-        self._compare_resource_attrs(subnet, compare_args)
-        self.client.delete_network(net_id)
-        self.networks.pop()
-        self.subnets.pop()
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('0e269138-0da6-4efc-a46d-578161e7b221')
-    def test_create_update_delete_network_subnet(self):
-        # Create a network
-        name = data_utils.rand_name('network-')
-        network = self.create_network(network_name=name)
-        self.addCleanup(self._delete_network, network)
-        net_id = network['id']
-        self.assertEqual('ACTIVE', network['status'])
-        # Verify network update
-        new_name = "New_network"
-        body = self.client.update_network(net_id, name=new_name)
-        updated_net = body['network']
-        self.assertEqual(updated_net['name'], new_name)
-        # Find a cidr that is not in use yet and create a subnet with it
-        subnet = self.create_subnet(network)
-        subnet_id = subnet['id']
-        # Verify subnet update
-        new_name = "New_subnet"
-        body = self.client.update_subnet(subnet_id, name=new_name)
-        updated_subnet = body['subnet']
-        self.assertEqual(updated_subnet['name'], new_name)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('2bf13842-c93f-4a69-83ed-717d2ec3b44e')
-    def test_show_network(self):
-        # Verify the details of a network
-        body = self.client.show_network(self.network['id'])
-        network = body['network']
-        fields = ['id', 'name']
-        if test.is_extension_enabled('net-mtu', 'network'):
-            fields.append('mtu')
-        for key in fields:
-            self.assertEqual(network[key], self.network[key])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('867819bb-c4b6-45f7-acf9-90edcf70aa5e')
-    def test_show_network_fields(self):
-        # Verify specific fields of a network
-        fields = ['id', 'name']
-        if test.is_extension_enabled('net-mtu', 'network'):
-            fields.append('mtu')
-        body = self.client.show_network(self.network['id'],
-                                        fields=fields)
-        network = body['network']
-        self.assertEqual(sorted(network.keys()), sorted(fields))
-        for field_name in fields:
-            self.assertEqual(network[field_name], self.network[field_name])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('f7ffdeda-e200-4a7a-bcbe-05716e86bf43')
-    def test_list_networks(self):
-        # Verify the network exists in the list of all networks
-        body = self.client.list_networks()
-        networks = [network['id'] for network in body['networks']
-                    if network['id'] == self.network['id']]
-        self.assertNotEmpty(networks, "Created network not found in the list")
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('6ae6d24f-9194-4869-9c85-c313cb20e080')
-    def test_list_networks_fields(self):
-        # Verify specific fields of the networks
-        fields = ['id', 'name']
-        if test.is_extension_enabled('net-mtu', 'network'):
-            fields.append('mtu')
-        body = self.client.list_networks(fields=fields)
-        networks = body['networks']
-        self.assertNotEmpty(networks, "Network list returned is empty")
-        for network in networks:
-            self.assertEqual(sorted(network.keys()), sorted(fields))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc')
-    def test_show_subnet(self):
-        # Verify the details of a subnet
-        body = self.client.show_subnet(self.subnet['id'])
-        subnet = body['subnet']
-        self.assertNotEmpty(subnet, "Subnet returned has no fields")
-        for key in ['id', 'cidr']:
-            self.assertIn(key, subnet)
-            self.assertEqual(subnet[key], self.subnet[key])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('270fff0b-8bfc-411f-a184-1e8fd35286f0')
-    def test_show_subnet_fields(self):
-        # Verify specific fields of a subnet
-        fields = ['id', 'network_id']
-        body = self.client.show_subnet(self.subnet['id'],
-                                       fields=fields)
-        subnet = body['subnet']
-        self.assertEqual(sorted(subnet.keys()), sorted(fields))
-        for field_name in fields:
-            self.assertEqual(subnet[field_name], self.subnet[field_name])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('db68ba48-f4ea-49e9-81d1-e367f6d0b20a')
-    def test_list_subnets(self):
-        # Verify the subnet exists in the list of all subnets
-        body = self.client.list_subnets()
-        subnets = [subnet['id'] for subnet in body['subnets']
-                   if subnet['id'] == self.subnet['id']]
-        self.assertNotEmpty(subnets, "Created subnet not found in the list")
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('842589e3-9663-46b0-85e4-7f01273b0412')
-    def test_list_subnets_fields(self):
-        # Verify specific fields of subnets
-        fields = ['id', 'network_id']
-        body = self.client.list_subnets(fields=fields)
-        subnets = body['subnets']
-        self.assertNotEmpty(subnets, "Subnet list returned is empty")
-        for subnet in subnets:
-            self.assertEqual(sorted(subnet.keys()), sorted(fields))
-
-    def _try_delete_network(self, net_id):
-        # delete network, if it exists
-        try:
-            self.client.delete_network(net_id)
-        # if network is not found, this means it was deleted in the test
-        except lib_exc.NotFound:
-            pass
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('f04f61a9-b7f3-4194-90b2-9bcf660d1bfe')
-    def test_delete_network_with_subnet(self):
-        # Creates a network
-        name = data_utils.rand_name('network-')
-        body = self.client.create_network(name=name)
-        network = body['network']
-        net_id = network['id']
-        self.addCleanup(self._try_delete_network, net_id)
-
-        # Find a cidr that is not in use yet and create a subnet with it
-        subnet = self.create_subnet(network)
-        subnet_id = subnet['id']
-
-        # Delete network while the subnet still exists
-        body = self.client.delete_network(net_id)
-
-        # Verify that the subnet got automatically deleted.
-        self.assertRaises(lib_exc.NotFound, self.client.show_subnet,
-                          subnet_id)
-
-        # Since create_subnet adds the subnet to the delete list, and it is
-        # is actually deleted here - this will create and issue, hence remove
-        # it from the list.
-        self.subnets.pop()
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('d2d596e2-8e76-47a9-ac51-d4648009f4d3')
-    def test_create_delete_subnet_without_gateway(self):
-        self._create_verify_delete_subnet()
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('9393b468-186d-496d-aa36-732348cd76e7')
-    def test_create_delete_subnet_with_gw(self):
-        self._create_verify_delete_subnet(
-            **self.subnet_dict(['gateway']))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('bec949c4-3147-4ba6-af5f-cd2306118404')
-    def test_create_delete_subnet_with_allocation_pools(self):
-        self._create_verify_delete_subnet(
-            **self.subnet_dict(['allocation_pools']))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('8217a149-0c6c-4cfb-93db-0486f707d13f')
-    def test_create_delete_subnet_with_gw_and_allocation_pools(self):
-        self._create_verify_delete_subnet(**self.subnet_dict(
-            ['gateway', 'allocation_pools']))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('d830de0a-be47-468f-8f02-1fd996118289')
-    def test_create_delete_subnet_with_host_routes_and_dns_nameservers(self):
-        self._create_verify_delete_subnet(
-            **self.subnet_dict(['host_routes', 'dns_nameservers']))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('94ce038d-ff0a-4a4c-a56b-09da3ca0b55d')
-    def test_create_delete_subnet_with_dhcp_enabled(self):
-        self._create_verify_delete_subnet(enable_dhcp=True)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('3d3852eb-3009-49ec-97ac-5ce83b73010a')
-    def test_update_subnet_gw_dns_host_routes_dhcp(self):
-        network = self.create_network()
-        self.addCleanup(self._delete_network, network)
-
-        subnet = self.create_subnet(
-            network, **self.subnet_dict(['gateway', 'host_routes',
-                                        'dns_nameservers',
-                                         'allocation_pools']))
-        subnet_id = subnet['id']
-        new_gateway = str(netaddr.IPAddress(
-                          self._subnet_data[self._ip_version]['gateway']) + 1)
-        # Verify subnet update
-        new_host_routes = self._subnet_data[self._ip_version][
-            'new_host_routes']
-
-        new_dns_nameservers = self._subnet_data[self._ip_version][
-            'new_dns_nameservers']
-        kwargs = {'host_routes': new_host_routes,
-                  'dns_nameservers': new_dns_nameservers,
-                  'gateway_ip': new_gateway, 'enable_dhcp': True}
-
-        new_name = "New_subnet"
-        body = self.client.update_subnet(subnet_id, name=new_name,
-                                         **kwargs)
-        updated_subnet = body['subnet']
-        kwargs['name'] = new_name
-        self.assertEqual(sorted(updated_subnet['dns_nameservers']),
-                         sorted(kwargs['dns_nameservers']))
-        del subnet['dns_nameservers'], kwargs['dns_nameservers']
-
-        self._compare_resource_attrs(updated_subnet, kwargs)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('a4d9ec4c-0306-4111-a75c-db01a709030b')
-    def test_create_delete_subnet_all_attributes(self):
-        self._create_verify_delete_subnet(
-            enable_dhcp=True,
-            **self.subnet_dict(['gateway', 'host_routes', 'dns_nameservers']))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('af774677-42a9-4e4b-bb58-16fe6a5bc1ec')
-    def test_external_network_visibility(self):
-        """Verifies user can see external networks but not subnets."""
-        body = self.client.list_networks(**{'router:external': True})
-        networks = [network['id'] for network in body['networks']]
-        self.assertNotEmpty(networks, "No external networks found")
-
-        nonexternal = [net for net in body['networks'] if
-                       not net['router:external']]
-        self.assertEmpty(nonexternal, "Found non-external networks"
-                                      " in filtered list (%s)." % nonexternal)
-        self.assertIn(CONF.network.public_network_id, networks)
-
-        subnets_iter = (network['subnets'] for network in body['networks'])
-        # subnets_iter is a list (iterator) of lists. This flattens it to a
-        # list of UUIDs
-        public_subnets_iter = itertools.chain(*subnets_iter)
-        body = self.client.list_subnets()
-        subnets = [sub['id'] for sub in body['subnets']
-                   if sub['id'] in public_subnets_iter]
-        self.assertEmpty(subnets, "Public subnets visible")
-
-
-class BulkNetworkOpsTestJSON(base.BaseNetworkTest):
-
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
-
-        bulk network creation
-        bulk subnet creation
-        bulk port creation
-        list tenant's networks
-
-    v2.0 of the Neutron API is assumed. It is also assumed that the following
-    options are defined in the [network] section of etc/tempest.conf:
-
-        tenant_network_cidr with a block of cidr's from which smaller blocks
-        can be allocated for tenant networks
-
-        tenant_network_mask_bits with the mask bits to be used to partition the
-        block defined by tenant-network_cidr
-    """
-
-    def _delete_networks(self, created_networks):
-        for n in created_networks:
-            self.client.delete_network(n['id'])
-        # Asserting that the networks are not found in the list after deletion
-        body = self.client.list_networks()
-        networks_list = [network['id'] for network in body['networks']]
-        for n in created_networks:
-            self.assertNotIn(n['id'], networks_list)
-
-    def _delete_subnets(self, created_subnets):
-        for n in created_subnets:
-            self.client.delete_subnet(n['id'])
-        # Asserting that the subnets are not found in the list after deletion
-        body = self.client.list_subnets()
-        subnets_list = [subnet['id'] for subnet in body['subnets']]
-        for n in created_subnets:
-            self.assertNotIn(n['id'], subnets_list)
-
-    def _delete_ports(self, created_ports):
-        for n in created_ports:
-            self.client.delete_port(n['id'])
-        # Asserting that the ports are not found in the list after deletion
-        body = self.client.list_ports()
-        ports_list = [port['id'] for port in body['ports']]
-        for n in created_ports:
-            self.assertNotIn(n['id'], ports_list)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2')
-    def test_bulk_create_delete_network(self):
-        # Creates 2 networks in one request
-        network_names = [data_utils.rand_name('network-'),
-                         data_utils.rand_name('network-')]
-        body = self.client.create_bulk_network(network_names)
-        created_networks = body['networks']
-        self.addCleanup(self._delete_networks, created_networks)
-        # Asserting that the networks are found in the list after creation
-        body = self.client.list_networks()
-        networks_list = [network['id'] for network in body['networks']]
-        for n in created_networks:
-            self.assertIsNotNone(n['id'])
-            self.assertIn(n['id'], networks_list)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('8936533b-c0aa-4f29-8e53-6cc873aec489')
-    def test_bulk_create_delete_subnet(self):
-        networks = [self.create_network(), self.create_network()]
-        # Creates 2 subnets in one request
-        if self._ip_version == 4:
-            cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
-            mask_bits = CONF.network.tenant_network_mask_bits
-        else:
-            cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
-            mask_bits = CONF.network.tenant_network_v6_mask_bits
-
-        cidrs = [subnet_cidr for subnet_cidr in cidr.subnet(mask_bits)]
-
-        names = [data_utils.rand_name('subnet-') for i in range(len(networks))]
-        subnets_list = []
-        for i in range(len(names)):
-            p1 = {
-                'network_id': networks[i]['id'],
-                'cidr': str(cidrs[(i)]),
-                'name': names[i],
-                'ip_version': self._ip_version
-            }
-            subnets_list.append(p1)
-        del subnets_list[1]['name']
-        body = self.client.create_bulk_subnet(subnets_list)
-        created_subnets = body['subnets']
-        self.addCleanup(self._delete_subnets, created_subnets)
-        # Asserting that the subnets are found in the list after creation
-        body = self.client.list_subnets()
-        subnets_list = [subnet['id'] for subnet in body['subnets']]
-        for n in created_subnets:
-            self.assertIsNotNone(n['id'])
-            self.assertIn(n['id'], subnets_list)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('48037ff2-e889-4c3b-b86a-8e3f34d2d060')
-    def test_bulk_create_delete_port(self):
-        networks = [self.create_network(), self.create_network()]
-        # Creates 2 ports in one request
-        names = [data_utils.rand_name('port-') for i in range(len(networks))]
-        port_list = []
-        state = [True, False]
-        for i in range(len(names)):
-            p1 = {
-                'network_id': networks[i]['id'],
-                'name': names[i],
-                'admin_state_up': state[i],
-            }
-            port_list.append(p1)
-        del port_list[1]['name']
-        body = self.client.create_bulk_port(port_list)
-        created_ports = body['ports']
-        self.addCleanup(self._delete_ports, created_ports)
-        # Asserting that the ports are found in the list after creation
-        body = self.client.list_ports()
-        ports_list = [port['id'] for port in body['ports']]
-        for n in created_ports:
-            self.assertIsNotNone(n['id'])
-            self.assertIn(n['id'], ports_list)
-
-
-class BulkNetworkOpsIpV6TestJSON(BulkNetworkOpsTestJSON):
-    _ip_version = 6
-
-
-class NetworksIpV6TestJSON(NetworksTestJSON):
-    _ip_version = 6
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('e41a4888-65a6-418c-a095-f7c2ef4ad59a')
-    def test_create_delete_subnet_with_gw(self):
-        net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
-        gateway = str(netaddr.IPAddress(net.first + 2))
-        name = data_utils.rand_name('network-')
-        network = self.create_network(network_name=name)
-        subnet = self.create_subnet(network, gateway)
-        # Verifies Subnet GW in IPv6
-        self.assertEqual(subnet['gateway_ip'], gateway)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('ebb4fd95-524f-46af-83c1-0305b239338f')
-    def test_create_delete_subnet_with_default_gw(self):
-        net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
-        gateway_ip = str(netaddr.IPAddress(net.first + 1))
-        name = data_utils.rand_name('network-')
-        network = self.create_network(network_name=name)
-        subnet = self.create_subnet(network)
-        # Verifies Subnet GW in IPv6
-        self.assertEqual(subnet['gateway_ip'], gateway_ip)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('a9653883-b2a4-469b-8c3c-4518430a7e55')
-    def test_create_list_subnet_with_no_gw64_one_network(self):
-        name = data_utils.rand_name('network-')
-        network = self.create_network(name)
-        ipv6_gateway = self.subnet_dict(['gateway'])['gateway']
-        subnet1 = self.create_subnet(network,
-                                     ip_version=6,
-                                     gateway=ipv6_gateway)
-        self.assertEqual(netaddr.IPNetwork(subnet1['cidr']).version, 6,
-                         'The created subnet is not IPv6')
-        subnet2 = self.create_subnet(network,
-                                     gateway=None,
-                                     ip_version=4)
-        self.assertEqual(netaddr.IPNetwork(subnet2['cidr']).version, 4,
-                         'The created subnet is not IPv4')
-        # Verifies Subnet GW is set in IPv6
-        self.assertEqual(subnet1['gateway_ip'], ipv6_gateway)
-        # Verifies Subnet GW is None in IPv4
-        self.assertIsNone(subnet2['gateway_ip'])
-        # Verifies all 2 subnets in the same network
-        body = self.client.list_subnets()
-        subnets = [sub['id'] for sub in body['subnets']
-                   if sub['network_id'] == network['id']]
-        test_subnet_ids = [sub['id'] for sub in (subnet1, subnet2)]
-        self.assertItemsEqual(subnets,
-                              test_subnet_ids,
-                              'Subnet are not in the same network')
-
-
-class NetworksIpV6TestAttrs(NetworksIpV6TestJSON):
-
-    @classmethod
-    def resource_setup(cls):
-        if not CONF.network_feature_enabled.ipv6_subnet_attributes:
-            raise cls.skipException("IPv6 extended attributes for "
-                                    "subnets not available")
-        super(NetworksIpV6TestAttrs, cls).resource_setup()
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('da40cd1b-a833-4354-9a85-cd9b8a3b74ca')
-    def test_create_delete_subnet_with_v6_attributes_stateful(self):
-        self._create_verify_delete_subnet(
-            gateway=self._subnet_data[self._ip_version]['gateway'],
-            ipv6_ra_mode='dhcpv6-stateful',
-            ipv6_address_mode='dhcpv6-stateful')
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('176b030f-a923-4040-a755-9dc94329e60c')
-    def test_create_delete_subnet_with_v6_attributes_slaac(self):
-        self._create_verify_delete_subnet(
-            ipv6_ra_mode='slaac',
-            ipv6_address_mode='slaac')
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('7d410310-8c86-4902-adf9-865d08e31adb')
-    def test_create_delete_subnet_with_v6_attributes_stateless(self):
-        self._create_verify_delete_subnet(
-            ipv6_ra_mode='dhcpv6-stateless',
-            ipv6_address_mode='dhcpv6-stateless')
-
-    def _test_delete_subnet_with_ports(self, mode):
-        """Create subnet and delete it with existing ports"""
-        slaac_network = self.create_network()
-        subnet_slaac = self.create_subnet(slaac_network,
-                                          **{'ipv6_ra_mode': mode,
-                                             'ipv6_address_mode': mode})
-        port = self.create_port(slaac_network)
-        self.assertIsNotNone(port['fixed_ips'][0]['ip_address'])
-        self.client.delete_subnet(subnet_slaac['id'])
-        self.subnets.pop()
-        subnets = self.client.list_subnets()
-        subnet_ids = [subnet['id'] for subnet in subnets['subnets']]
-        self.assertNotIn(subnet_slaac['id'], subnet_ids,
-                         "Subnet wasn't deleted")
-        self.assertRaisesRegexp(
-            lib_exc.Conflict,
-            "There are one or more ports still in use on the network",
-            self.client.delete_network,
-            slaac_network['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('88554555-ebf8-41ef-9300-4926d45e06e9')
-    def test_create_delete_slaac_subnet_with_ports(self):
-        """Test deleting subnet with SLAAC ports
-
-        Create subnet with SLAAC, create ports in network
-        and then you shall be able to delete subnet without port
-        deletion. But you still can not delete the network.
-        """
-        self._test_delete_subnet_with_ports("slaac")
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('2de6ab5a-fcf0-4144-9813-f91a940291f1')
-    def test_create_delete_stateless_subnet_with_ports(self):
-        """Test deleting subnet with DHCPv6 stateless ports
-
-        Create subnet with DHCPv6 stateless, create ports in network
-        and then you shall be able to delete subnet without port
-        deletion. But you still can not delete the network.
-        """
-        self._test_delete_subnet_with_ports("dhcpv6-stateless")
diff --git a/neutron/tests/api/test_networks_negative.py b/neutron/tests/api/test_networks_negative.py
deleted file mode 100644 (file)
index c4a30f5..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2013 Huawei Technologies Co.,LTD.
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import base
-from neutron.tests.tempest import test
-
-
-class NetworksNegativeTestJSON(base.BaseNetworkTest):
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('9293e937-824d-42d2-8d5b-e985ea67002a')
-    def test_show_non_existent_network(self):
-        non_exist_id = data_utils.rand_name('network')
-        self.assertRaises(lib_exc.NotFound, self.client.show_network,
-                          non_exist_id)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('d746b40c-5e09-4043-99f7-cba1be8b70df')
-    def test_show_non_existent_subnet(self):
-        non_exist_id = data_utils.rand_name('subnet')
-        self.assertRaises(lib_exc.NotFound, self.client.show_subnet,
-                          non_exist_id)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('a954861d-cbfd-44e8-b0a9-7fab111f235d')
-    def test_show_non_existent_port(self):
-        non_exist_id = data_utils.rand_name('port')
-        self.assertRaises(lib_exc.NotFound, self.client.show_port,
-                          non_exist_id)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('98bfe4e3-574e-4012-8b17-b2647063de87')
-    def test_update_non_existent_network(self):
-        non_exist_id = data_utils.rand_name('network')
-        self.assertRaises(lib_exc.NotFound, self.client.update_network,
-                          non_exist_id, name="new_name")
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('03795047-4a94-4120-a0a1-bd376e36fd4e')
-    def test_delete_non_existent_network(self):
-        non_exist_id = data_utils.rand_name('network')
-        self.assertRaises(lib_exc.NotFound, self.client.delete_network,
-                          non_exist_id)
diff --git a/neutron/tests/api/test_ports.py b/neutron/tests/api/test_ports.py
deleted file mode 100644 (file)
index 5978856..0000000
+++ /dev/null
@@ -1,403 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import socket
-
-import netaddr
-from tempest_lib.common.utils import data_utils
-
-from neutron.tests.api import base
-from neutron.tests.api import base_security_groups as sec_base
-from neutron.tests.tempest.common import custom_matchers
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class PortsTestJSON(sec_base.BaseSecGroupTest):
-
-    """
-    Test the following operations for ports:
-
-        port create
-        port delete
-        port list
-        port show
-        port update
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        super(PortsTestJSON, cls).resource_setup()
-        cls.network = cls.create_network()
-        cls.port = cls.create_port(cls.network)
-
-    def _delete_port(self, port_id):
-        self.client.delete_port(port_id)
-        body = self.client.list_ports()
-        ports_list = body['ports']
-        self.assertNotIn(port_id, [n['id'] for n in ports_list])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
-    def test_create_update_delete_port(self):
-        # Verify port creation
-        body = self.client.create_port(network_id=self.network['id'])
-        port = body['port']
-        # Schedule port deletion with verification upon test completion
-        self.addCleanup(self._delete_port, port['id'])
-        self.assertTrue(port['admin_state_up'])
-        # Verify port update
-        new_name = "New_Port"
-        body = self.client.update_port(port['id'],
-                                       name=new_name,
-                                       admin_state_up=False)
-        updated_port = body['port']
-        self.assertEqual(updated_port['name'], new_name)
-        self.assertFalse(updated_port['admin_state_up'])
-
-    @test.idempotent_id('67f1b811-f8db-43e2-86bd-72c074d4a42c')
-    def test_create_bulk_port(self):
-        network1 = self.network
-        name = data_utils.rand_name('network-')
-        network2 = self.create_network(network_name=name)
-        network_list = [network1['id'], network2['id']]
-        port_list = [{'network_id': net_id} for net_id in network_list]
-        body = self.client.create_bulk_port(port_list)
-        created_ports = body['ports']
-        port1 = created_ports[0]
-        port2 = created_ports[1]
-        self.addCleanup(self._delete_port, port1['id'])
-        self.addCleanup(self._delete_port, port2['id'])
-        self.assertEqual(port1['network_id'], network1['id'])
-        self.assertEqual(port2['network_id'], network2['id'])
-        self.assertTrue(port1['admin_state_up'])
-        self.assertTrue(port2['admin_state_up'])
-
-    @classmethod
-    def _get_ipaddress_from_tempest_conf(cls):
-        """Return first subnet gateway for configured CIDR """
-        if cls._ip_version == 4:
-            cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
-
-        elif cls._ip_version == 6:
-            cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
-
-        return netaddr.IPAddress(cidr)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
-    def test_create_port_in_allowed_allocation_pools(self):
-        network = self.create_network()
-        net_id = network['id']
-        address = self._get_ipaddress_from_tempest_conf()
-        allocation_pools = {'allocation_pools': [{'start': str(address + 4),
-                                                  'end': str(address + 6)}]}
-        subnet = self.create_subnet(network, **allocation_pools)
-        self.addCleanup(self.client.delete_subnet, subnet['id'])
-        body = self.client.create_port(network_id=net_id)
-        self.addCleanup(self.client.delete_port, body['port']['id'])
-        port = body['port']
-        ip_address = port['fixed_ips'][0]['ip_address']
-        start_ip_address = allocation_pools['allocation_pools'][0]['start']
-        end_ip_address = allocation_pools['allocation_pools'][0]['end']
-        ip_range = netaddr.IPRange(start_ip_address, end_ip_address)
-        self.assertIn(ip_address, ip_range)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('c9a685bd-e83f-499c-939f-9f7863ca259f')
-    def test_show_port(self):
-        # Verify the details of port
-        body = self.client.show_port(self.port['id'])
-        port = body['port']
-        self.assertIn('id', port)
-        # TODO(Santosh)- This is a temporary workaround to compare create_port
-        # and show_port dict elements.Remove this once extra_dhcp_opts issue
-        # gets fixed in neutron.( bug - 1365341.)
-        self.assertThat(self.port,
-                        custom_matchers.MatchesDictExceptForKeys
-                        (port, excluded_keys=['extra_dhcp_opts']))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd')
-    def test_show_port_fields(self):
-        # Verify specific fields of a port
-        fields = ['id', 'mac_address']
-        body = self.client.show_port(self.port['id'],
-                                     fields=fields)
-        port = body['port']
-        self.assertEqual(sorted(port.keys()), sorted(fields))
-        for field_name in fields:
-            self.assertEqual(port[field_name], self.port[field_name])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('cf95b358-3e92-4a29-a148-52445e1ac50e')
-    def test_list_ports(self):
-        # Verify the port exists in the list of all ports
-        body = self.client.list_ports()
-        ports = [port['id'] for port in body['ports']
-                 if port['id'] == self.port['id']]
-        self.assertNotEmpty(ports, "Created port not found in the list")
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
-    def test_port_list_filter_by_router_id(self):
-        # Create a router
-        network = self.create_network()
-        self.addCleanup(self.client.delete_network, network['id'])
-        subnet = self.create_subnet(network)
-        self.addCleanup(self.client.delete_subnet, subnet['id'])
-        router = self.create_router(data_utils.rand_name('router-'))
-        self.addCleanup(self.client.delete_router, router['id'])
-        port = self.client.create_port(network_id=network['id'])
-        # Add router interface to port created above
-        self.client.add_router_interface_with_port_id(
-            router['id'], port['port']['id'])
-        self.addCleanup(self.client.remove_router_interface_with_port_id,
-                        router['id'], port['port']['id'])
-        # List ports filtered by router_id
-        port_list = self.client.list_ports(device_id=router['id'])
-        ports = port_list['ports']
-        self.assertEqual(len(ports), 1)
-        self.assertEqual(ports[0]['id'], port['port']['id'])
-        self.assertEqual(ports[0]['device_id'], router['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('ff7f117f-f034-4e0e-abff-ccef05c454b4')
-    def test_list_ports_fields(self):
-        # Verify specific fields of ports
-        fields = ['id', 'mac_address']
-        body = self.client.list_ports(fields=fields)
-        ports = body['ports']
-        self.assertNotEmpty(ports, "Port list returned is empty")
-        # Asserting the fields returned are correct
-        for port in ports:
-            self.assertEqual(sorted(fields), sorted(port.keys()))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('63aeadd4-3b49-427f-a3b1-19ca81f06270')
-    def test_create_update_port_with_second_ip(self):
-        # Create a network with two subnets
-        network = self.create_network()
-        self.addCleanup(self.client.delete_network, network['id'])
-        subnet_1 = self.create_subnet(network)
-        self.addCleanup(self.client.delete_subnet, subnet_1['id'])
-        subnet_2 = self.create_subnet(network)
-        self.addCleanup(self.client.delete_subnet, subnet_2['id'])
-        fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
-        fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
-
-        fixed_ips = fixed_ip_1 + fixed_ip_2
-
-        # Create a port with multiple IP addresses
-        port = self.create_port(network,
-                                fixed_ips=fixed_ips)
-        self.addCleanup(self.client.delete_port, port['id'])
-        self.assertEqual(2, len(port['fixed_ips']))
-        check_fixed_ips = [subnet_1['id'], subnet_2['id']]
-        for item in port['fixed_ips']:
-            self.assertIn(item['subnet_id'], check_fixed_ips)
-
-        # Update the port to return to a single IP address
-        port = self.update_port(port, fixed_ips=fixed_ip_1)
-        self.assertEqual(1, len(port['fixed_ips']))
-
-        # Update the port with a second IP address from second subnet
-        port = self.update_port(port, fixed_ips=fixed_ips)
-        self.assertEqual(2, len(port['fixed_ips']))
-
-    def _update_port_with_security_groups(self, security_groups_names):
-        subnet_1 = self.create_subnet(self.network)
-        self.addCleanup(self.client.delete_subnet, subnet_1['id'])
-        fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
-
-        security_groups_list = list()
-        for name in security_groups_names:
-            group_create_body = self.client.create_security_group(
-                name=name)
-            self.addCleanup(self.client.delete_security_group,
-                            group_create_body['security_group']['id'])
-            security_groups_list.append(group_create_body['security_group']
-                                        ['id'])
-        # Create a port
-        sec_grp_name = data_utils.rand_name('secgroup')
-        security_group = self.client.create_security_group(name=sec_grp_name)
-        self.addCleanup(self.client.delete_security_group,
-                        security_group['security_group']['id'])
-        post_body = {
-            "name": data_utils.rand_name('port-'),
-            "security_groups": [security_group['security_group']['id']],
-            "network_id": self.network['id'],
-            "admin_state_up": True,
-            "fixed_ips": fixed_ip_1}
-        body = self.client.create_port(**post_body)
-        self.addCleanup(self.client.delete_port, body['port']['id'])
-        port = body['port']
-
-        # Update the port with security groups
-        subnet_2 = self.create_subnet(self.network)
-        fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
-        update_body = {"name": data_utils.rand_name('port-'),
-                       "admin_state_up": False,
-                       "fixed_ips": fixed_ip_2,
-                       "security_groups": security_groups_list}
-        body = self.client.update_port(port['id'], **update_body)
-        port_show = body['port']
-        # Verify the security groups and other attributes updated to port
-        exclude_keys = set(port_show).symmetric_difference(update_body)
-        exclude_keys.add('fixed_ips')
-        exclude_keys.add('security_groups')
-        self.assertThat(port_show, custom_matchers.MatchesDictExceptForKeys(
-                        update_body, exclude_keys))
-        self.assertEqual(fixed_ip_2[0]['subnet_id'],
-                         port_show['fixed_ips'][0]['subnet_id'])
-
-        for security_group in security_groups_list:
-            self.assertIn(security_group, port_show['security_groups'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('58091b66-4ff4-4cc1-a549-05d60c7acd1a')
-    def test_update_port_with_security_group_and_extra_attributes(self):
-        self._update_port_with_security_groups(
-            [data_utils.rand_name('secgroup')])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('edf6766d-3d40-4621-bc6e-2521a44c257d')
-    def test_update_port_with_two_security_groups_and_extra_attributes(self):
-        self._update_port_with_security_groups(
-            [data_utils.rand_name('secgroup'),
-             data_utils.rand_name('secgroup')])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('13e95171-6cbd-489c-9d7c-3f9c58215c18')
-    def test_create_show_delete_port_user_defined_mac(self):
-        # Create a port for a legal mac
-        body = self.client.create_port(network_id=self.network['id'])
-        old_port = body['port']
-        free_mac_address = old_port['mac_address']
-        self.client.delete_port(old_port['id'])
-        # Create a new port with user defined mac
-        body = self.client.create_port(network_id=self.network['id'],
-                                       mac_address=free_mac_address)
-        self.addCleanup(self.client.delete_port, body['port']['id'])
-        port = body['port']
-        body = self.client.show_port(port['id'])
-        show_port = body['port']
-        self.assertEqual(free_mac_address,
-                         show_port['mac_address'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('4179dcb9-1382-4ced-84fe-1b91c54f5735')
-    def test_create_port_with_no_securitygroups(self):
-        network = self.create_network()
-        self.addCleanup(self.client.delete_network, network['id'])
-        subnet = self.create_subnet(network)
-        self.addCleanup(self.client.delete_subnet, subnet['id'])
-        port = self.create_port(network, security_groups=[])
-        self.addCleanup(self.client.delete_port, port['id'])
-        self.assertIsNotNone(port['security_groups'])
-        self.assertEmpty(port['security_groups'])
-
-
-class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
-
-    @classmethod
-    def resource_setup(cls):
-        super(PortsAdminExtendedAttrsTestJSON, cls).resource_setup()
-        cls.identity_client = cls._get_identity_admin_client()
-        cls.tenant = cls.identity_client.get_tenant_by_name(
-            CONF.identity.tenant_name)
-        cls.network = cls.create_network()
-        cls.host_id = socket.gethostname()
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('8e8569c1-9ac7-44db-8bc1-f5fb2814f29b')
-    def test_create_port_binding_ext_attr(self):
-        post_body = {"network_id": self.network['id'],
-                     "binding:host_id": self.host_id}
-        body = self.admin_client.create_port(**post_body)
-        port = body['port']
-        self.addCleanup(self.admin_client.delete_port, port['id'])
-        host_id = port['binding:host_id']
-        self.assertIsNotNone(host_id)
-        self.assertEqual(self.host_id, host_id)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('6f6c412c-711f-444d-8502-0ac30fbf5dd5')
-    def test_update_port_binding_ext_attr(self):
-        post_body = {"network_id": self.network['id']}
-        body = self.admin_client.create_port(**post_body)
-        port = body['port']
-        self.addCleanup(self.admin_client.delete_port, port['id'])
-        update_body = {"binding:host_id": self.host_id}
-        body = self.admin_client.update_port(port['id'], **update_body)
-        updated_port = body['port']
-        host_id = updated_port['binding:host_id']
-        self.assertIsNotNone(host_id)
-        self.assertEqual(self.host_id, host_id)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8')
-    def test_list_ports_binding_ext_attr(self):
-        # Create a new port
-        post_body = {"network_id": self.network['id']}
-        body = self.admin_client.create_port(**post_body)
-        port = body['port']
-        self.addCleanup(self.admin_client.delete_port, port['id'])
-
-        # Update the port's binding attributes so that is now 'bound'
-        # to a host
-        update_body = {"binding:host_id": self.host_id}
-        self.admin_client.update_port(port['id'], **update_body)
-
-        # List all ports, ensure new port is part of list and its binding
-        # attributes are set and accurate
-        body = self.admin_client.list_ports()
-        ports_list = body['ports']
-        pids_list = [p['id'] for p in ports_list]
-        self.assertIn(port['id'], pids_list)
-        listed_port = [p for p in ports_list if p['id'] == port['id']]
-        self.assertEqual(1, len(listed_port),
-                         'Multiple ports listed with id %s in ports listing: '
-                         '%s' % (port['id'], ports_list))
-        self.assertEqual(self.host_id, listed_port[0]['binding:host_id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13')
-    def test_show_port_binding_ext_attr(self):
-        body = self.admin_client.create_port(network_id=self.network['id'])
-        port = body['port']
-        self.addCleanup(self.admin_client.delete_port, port['id'])
-        body = self.admin_client.show_port(port['id'])
-        show_port = body['port']
-        self.assertEqual(port['binding:host_id'],
-                         show_port['binding:host_id'])
-        self.assertEqual(port['binding:vif_type'],
-                         show_port['binding:vif_type'])
-        self.assertEqual(port['binding:vif_details'],
-                         show_port['binding:vif_details'])
-
-
-class PortsIpV6TestJSON(PortsTestJSON):
-    _ip_version = 6
-    _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
-    _tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
-
-
-class PortsAdminExtendedAttrsIpV6TestJSON(PortsAdminExtendedAttrsTestJSON):
-    _ip_version = 6
-    _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
-    _tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
diff --git a/neutron/tests/api/test_qos.py b/neutron/tests/api/test_qos.py
deleted file mode 100644 (file)
index 5f26afd..0000000
+++ /dev/null
@@ -1,453 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib import exceptions
-import testtools
-
-from neutron.services.qos import qos_consts
-from neutron.tests.api import base
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class QosTestJSON(base.BaseAdminNetworkTest):
-    @classmethod
-    def resource_setup(cls):
-        super(QosTestJSON, cls).resource_setup()
-        if not test.is_extension_enabled('qos', 'network'):
-            msg = "qos extension not enabled."
-            raise cls.skipException(msg)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('108fbdf7-3463-4e47-9871-d07f3dcf5bbb')
-    def test_create_policy(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy desc1',
-                                        shared=False)
-
-        # Test 'show policy'
-        retrieved_policy = self.admin_client.show_qos_policy(policy['id'])
-        retrieved_policy = retrieved_policy['policy']
-        self.assertEqual('test-policy', retrieved_policy['name'])
-        self.assertEqual('test policy desc1', retrieved_policy['description'])
-        self.assertFalse(retrieved_policy['shared'])
-
-        # Test 'list policies'
-        policies = self.admin_client.list_qos_policies()['policies']
-        policies_ids = [p['id'] for p in policies]
-        self.assertIn(policy['id'], policies_ids)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('f8d20e92-f06d-4805-b54f-230f77715815')
-    def test_list_policy_filter_by_name(self):
-        self.create_qos_policy(name='test', description='test policy',
-                               shared=False)
-        self.create_qos_policy(name='test2', description='test policy',
-                               shared=False)
-
-        policies = (self.admin_client.
-                    list_qos_policies(name='test')['policies'])
-        self.assertEqual(1, len(policies))
-
-        retrieved_policy = policies[0]
-        self.assertEqual('test', retrieved_policy['name'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('8e88a54b-f0b2-4b7d-b061-a15d93c2c7d6')
-    def test_policy_update(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='',
-                                        shared=False)
-        self.admin_client.update_qos_policy(policy['id'],
-                                            description='test policy desc2',
-                                            shared=True)
-
-        retrieved_policy = self.admin_client.show_qos_policy(policy['id'])
-        retrieved_policy = retrieved_policy['policy']
-        self.assertEqual('test policy desc2', retrieved_policy['description'])
-        self.assertTrue(retrieved_policy['shared'])
-        self.assertEqual([], retrieved_policy['rules'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('1cb42653-54bd-4a9a-b888-c55e18199201')
-    def test_delete_policy(self):
-        policy = self.admin_client.create_qos_policy(
-            'test-policy', 'desc', True)['policy']
-
-        retrieved_policy = self.admin_client.show_qos_policy(policy['id'])
-        retrieved_policy = retrieved_policy['policy']
-        self.assertEqual('test-policy', retrieved_policy['name'])
-
-        self.admin_client.delete_qos_policy(policy['id'])
-        self.assertRaises(exceptions.NotFound,
-                          self.admin_client.show_qos_policy, policy['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('cf776f77-8d3d-49f2-8572-12d6a1557224')
-    def test_list_admin_rule_types(self):
-        self._test_list_rule_types(self.admin_client)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('49c8ea35-83a9-453a-bd23-239cf3b13929')
-    def test_list_regular_rule_types(self):
-        self._test_list_rule_types(self.client)
-
-    def _test_list_rule_types(self, client):
-        # List supported rule types
-        # TODO(QoS): since in gate we run both ovs and linuxbridge ml2 drivers,
-        # and since Linux Bridge ml2 driver does not have QoS support yet, ml2
-        # plugin reports no rule types are supported. Once linuxbridge will
-        # receive support for QoS, the list of expected rule types will change.
-        #
-        # In theory, we could make the test conditional on which ml2 drivers
-        # are enabled in gate (or more specifically, on which supported qos
-        # rules are claimed by core plugin), but that option doesn't seem to be
-        # available thru tempest_lib framework
-        expected_rule_types = []
-        expected_rule_details = ['type']
-
-        rule_types = client.list_qos_rule_types()
-        actual_list_rule_types = rule_types['rule_types']
-        actual_rule_types = [rule['type'] for rule in actual_list_rule_types]
-
-        # Verify that only required fields present in rule details
-        for rule in actual_list_rule_types:
-            self.assertEqual(tuple(rule.keys()), tuple(expected_rule_details))
-
-        # Verify if expected rules are present in the actual rules list
-        for rule in expected_rule_types:
-            self.assertIn(rule, actual_rule_types)
-
-    def _disassociate_network(self, client, network_id):
-        client.update_network(network_id, qos_policy_id=None)
-        updated_network = self.admin_client.show_network(network_id)
-        self.assertIsNone(updated_network['network']['qos_policy_id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('65b9ef75-1911-406a-bbdb-ca1d68d528b0')
-    def test_policy_association_with_admin_network(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=False)
-        network = self.create_shared_network('test network',
-                                             qos_policy_id=policy['id'])
-
-        retrieved_network = self.admin_client.show_network(network['id'])
-        self.assertEqual(
-            policy['id'], retrieved_network['network']['qos_policy_id'])
-
-        self._disassociate_network(self.admin_client, network['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('1738de5d-0476-4163-9022-5e1b548c208e')
-    def test_policy_association_with_tenant_network(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=True)
-        network = self.create_network('test network',
-                                      qos_policy_id=policy['id'])
-
-        retrieved_network = self.admin_client.show_network(network['id'])
-        self.assertEqual(
-            policy['id'], retrieved_network['network']['qos_policy_id'])
-
-        self._disassociate_network(self.client, network['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('9efe63d0-836f-4cc2-b00c-468e63aa614e')
-    def test_policy_association_with_network_nonexistent_policy(self):
-        self.assertRaises(
-            exceptions.NotFound,
-            self.create_network,
-            'test network',
-            qos_policy_id='9efe63d0-836f-4cc2-b00c-468e63aa614e')
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('1aa55a79-324f-47d9-a076-894a8fc2448b')
-    def test_policy_association_with_network_non_shared_policy(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=False)
-        self.assertRaises(
-            exceptions.NotFound,
-            self.create_network,
-            'test network', qos_policy_id=policy['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('09a9392c-1359-4cbb-989f-fb768e5834a8')
-    def test_policy_update_association_with_admin_network(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=False)
-        network = self.create_shared_network('test network')
-        retrieved_network = self.admin_client.show_network(network['id'])
-        self.assertIsNone(retrieved_network['network']['qos_policy_id'])
-
-        self.admin_client.update_network(network['id'],
-                                         qos_policy_id=policy['id'])
-        retrieved_network = self.admin_client.show_network(network['id'])
-        self.assertEqual(
-            policy['id'], retrieved_network['network']['qos_policy_id'])
-
-        self._disassociate_network(self.admin_client, network['id'])
-
-    def _disassociate_port(self, port_id):
-        self.client.update_port(port_id, qos_policy_id=None)
-        updated_port = self.admin_client.show_port(port_id)
-        self.assertIsNone(updated_port['port']['qos_policy_id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('98fcd95e-84cf-4746-860e-44692e674f2e')
-    def test_policy_association_with_port_shared_policy(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=True)
-        network = self.create_shared_network('test network')
-        port = self.create_port(network, qos_policy_id=policy['id'])
-
-        retrieved_port = self.admin_client.show_port(port['id'])
-        self.assertEqual(
-            policy['id'], retrieved_port['port']['qos_policy_id'])
-
-        self._disassociate_port(port['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('49e02f5a-e1dd-41d5-9855-cfa37f2d195e')
-    def test_policy_association_with_port_nonexistent_policy(self):
-        network = self.create_shared_network('test network')
-        self.assertRaises(
-            exceptions.NotFound,
-            self.create_port,
-            network,
-            qos_policy_id='49e02f5a-e1dd-41d5-9855-cfa37f2d195e')
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('f53d961c-9fe5-4422-8b66-7add972c6031')
-    def test_policy_association_with_port_non_shared_policy(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=False)
-        network = self.create_shared_network('test network')
-        self.assertRaises(
-            exceptions.NotFound,
-            self.create_port,
-            network, qos_policy_id=policy['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('f8163237-fba9-4db5-9526-bad6d2343c76')
-    def test_policy_update_association_with_port_shared_policy(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=True)
-        network = self.create_shared_network('test network')
-        port = self.create_port(network)
-        retrieved_port = self.admin_client.show_port(port['id'])
-        self.assertIsNone(retrieved_port['port']['qos_policy_id'])
-
-        self.client.update_port(port['id'], qos_policy_id=policy['id'])
-        retrieved_port = self.admin_client.show_port(port['id'])
-        self.assertEqual(
-            policy['id'], retrieved_port['port']['qos_policy_id'])
-
-        self._disassociate_port(port['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('18163237-8ba9-4db5-9525-bad6d2343c75')
-    def test_delete_not_allowed_if_policy_in_use_by_network(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=True)
-        network = self.create_shared_network(
-            'test network', qos_policy_id=policy['id'])
-        self.assertRaises(
-            exceptions.Conflict,
-            self.admin_client.delete_qos_policy, policy['id'])
-
-        self._disassociate_network(self.admin_client, network['id'])
-        self.admin_client.delete_qos_policy(policy['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('24153230-84a9-4dd5-9525-bad6d2343c75')
-    def test_delete_not_allowed_if_policy_in_use_by_port(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=True)
-        network = self.create_shared_network('test network')
-        port = self.create_port(network, qos_policy_id=policy['id'])
-        self.assertRaises(
-            exceptions.Conflict,
-            self.admin_client.delete_qos_policy, policy['id'])
-
-        self._disassociate_port(port['id'])
-        self.admin_client.delete_qos_policy(policy['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('a2a5849b-dd06-4b18-9664-0b6828a1fc27')
-    def test_qos_policy_delete_with_rules(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=False)
-        self.admin_client.create_bandwidth_limit_rule(
-            policy['id'], 200, 1337)['bandwidth_limit_rule']
-
-        self.admin_client.delete_qos_policy(policy['id'])
-
-        with testtools.ExpectedException(exceptions.NotFound):
-            self.admin_client.show_qos_policy(policy['id'])
-
-
-class QosBandwidthLimitRuleTestJSON(base.BaseAdminNetworkTest):
-    @classmethod
-    def resource_setup(cls):
-        super(QosBandwidthLimitRuleTestJSON, cls).resource_setup()
-        if not test.is_extension_enabled('qos', 'network'):
-            msg = "qos extension not enabled."
-            raise cls.skipException(msg)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('8a59b00b-3e9c-4787-92f8-93a5cdf5e378')
-    def test_rule_create(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=False)
-        rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
-                                                    max_kbps=200,
-                                                    max_burst_kbps=1337)
-
-        # Test 'show rule'
-        retrieved_rule = self.admin_client.show_bandwidth_limit_rule(
-            policy['id'], rule['id'])
-        retrieved_rule = retrieved_rule['bandwidth_limit_rule']
-        self.assertEqual(rule['id'], retrieved_rule['id'])
-        self.assertEqual(200, retrieved_rule['max_kbps'])
-        self.assertEqual(1337, retrieved_rule['max_burst_kbps'])
-
-        # Test 'list rules'
-        rules = self.admin_client.list_bandwidth_limit_rules(policy['id'])
-        rules = rules['bandwidth_limit_rules']
-        rules_ids = [r['id'] for r in rules]
-        self.assertIn(rule['id'], rules_ids)
-
-        # Test 'show policy'
-        retrieved_policy = self.admin_client.show_qos_policy(policy['id'])
-        policy_rules = retrieved_policy['policy']['rules']
-        self.assertEqual(1, len(policy_rules))
-        self.assertEqual(rule['id'], policy_rules[0]['id'])
-        self.assertEqual(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT,
-                         policy_rules[0]['type'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('8a59b00b-ab01-4787-92f8-93a5cdf5e378')
-    def test_rule_create_fail_for_the_same_type(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=False)
-        self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
-                                             max_kbps=200,
-                                             max_burst_kbps=1337)
-
-        self.assertRaises(exceptions.Conflict,
-                          self.create_qos_bandwidth_limit_rule,
-                          policy_id=policy['id'],
-                          max_kbps=201, max_burst_kbps=1338)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('149a6988-2568-47d2-931e-2dbc858943b3')
-    def test_rule_update(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=False)
-        rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
-                                                    max_kbps=1,
-                                                    max_burst_kbps=1)
-
-        self.admin_client.update_bandwidth_limit_rule(policy['id'],
-                                                      rule['id'],
-                                                      max_kbps=200,
-                                                      max_burst_kbps=1337)
-
-        retrieved_policy = self.admin_client.show_bandwidth_limit_rule(
-            policy['id'], rule['id'])
-        retrieved_policy = retrieved_policy['bandwidth_limit_rule']
-        self.assertEqual(200, retrieved_policy['max_kbps'])
-        self.assertEqual(1337, retrieved_policy['max_burst_kbps'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('67ee6efd-7b33-4a68-927d-275b4f8ba958')
-    def test_rule_delete(self):
-        policy = self.create_qos_policy(name='test-policy',
-                                        description='test policy',
-                                        shared=False)
-        rule = self.admin_client.create_bandwidth_limit_rule(
-            policy['id'], 200, 1337)['bandwidth_limit_rule']
-
-        retrieved_policy = self.admin_client.show_bandwidth_limit_rule(
-            policy['id'], rule['id'])
-        retrieved_policy = retrieved_policy['bandwidth_limit_rule']
-        self.assertEqual(rule['id'], retrieved_policy['id'])
-
-        self.admin_client.delete_bandwidth_limit_rule(policy['id'], rule['id'])
-        self.assertRaises(exceptions.NotFound,
-                          self.admin_client.show_bandwidth_limit_rule,
-                          policy['id'], rule['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('f211222c-5808-46cb-a961-983bbab6b852')
-    def test_rule_create_rule_nonexistent_policy(self):
-        self.assertRaises(
-            exceptions.NotFound,
-            self.create_qos_bandwidth_limit_rule,
-            'policy', 200, 1337)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('eed8e2a6-22da-421b-89b9-935a2c1a1b50')
-    def test_policy_create_forbidden_for_regular_tenants(self):
-        self.assertRaises(
-            exceptions.Forbidden,
-            self.client.create_qos_policy,
-            'test-policy', 'test policy', False)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('a4a2e7ad-786f-4927-a85a-e545a93bd274')
-    def test_rule_create_forbidden_for_regular_tenants(self):
-        self.assertRaises(
-            exceptions.Forbidden,
-            self.client.create_bandwidth_limit_rule,
-            'policy', 1, 2)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('ce0bd0c2-54d9-4e29-85f1-cfb36ac3ebe2')
-    def test_get_rules_by_policy(self):
-        policy1 = self.create_qos_policy(name='test-policy1',
-                                         description='test policy1',
-                                         shared=False)
-        rule1 = self.create_qos_bandwidth_limit_rule(policy_id=policy1['id'],
-                                                     max_kbps=200,
-                                                     max_burst_kbps=1337)
-
-        policy2 = self.create_qos_policy(name='test-policy2',
-                                         description='test policy2',
-                                         shared=False)
-        rule2 = self.create_qos_bandwidth_limit_rule(policy_id=policy2['id'],
-                                                     max_kbps=5000,
-                                                     max_burst_kbps=2523)
-
-        # Test 'list rules'
-        rules = self.admin_client.list_bandwidth_limit_rules(policy1['id'])
-        rules = rules['bandwidth_limit_rules']
-        rules_ids = [r['id'] for r in rules]
-        self.assertIn(rule1['id'], rules_ids)
-        self.assertNotIn(rule2['id'], rules_ids)
diff --git a/neutron/tests/api/test_routers.py b/neutron/tests/api/test_routers.py
deleted file mode 100644 (file)
index 064157e..0000000
+++ /dev/null
@@ -1,408 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-import six
-from tempest_lib.common.utils import data_utils
-
-from neutron.tests.api import base_routers as base
-from neutron.tests.api import clients
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class RoutersTest(base.BaseRouterTest):
-
-    @classmethod
-    def resource_setup(cls):
-        super(RoutersTest, cls).resource_setup()
-        if not test.is_extension_enabled('router', 'network'):
-            msg = "router extension not enabled."
-            raise cls.skipException(msg)
-        admin_manager = clients.AdminManager()
-        cls.identity_admin_client = admin_manager.identity_client
-        cls.tenant_cidr = (CONF.network.tenant_network_cidr
-                           if cls._ip_version == 4 else
-                           CONF.network.tenant_network_v6_cidr)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('f64403e2-8483-4b34-8ccd-b09a87bcc68c')
-    def test_create_show_list_update_delete_router(self):
-        # Create a router
-        # NOTE(salv-orlando): Do not invoke self.create_router
-        # as we need to check the response code
-        name = data_utils.rand_name('router-')
-        create_body = self.client.create_router(
-            name, external_gateway_info={
-                "network_id": CONF.network.public_network_id},
-            admin_state_up=False)
-        self.addCleanup(self._delete_router, create_body['router']['id'])
-        self.assertEqual(create_body['router']['name'], name)
-        self.assertEqual(
-            create_body['router']['external_gateway_info']['network_id'],
-            CONF.network.public_network_id)
-        self.assertFalse(create_body['router']['admin_state_up'])
-        # Show details of the created router
-        show_body = self.client.show_router(create_body['router']['id'])
-        self.assertEqual(show_body['router']['name'], name)
-        self.assertEqual(
-            show_body['router']['external_gateway_info']['network_id'],
-            CONF.network.public_network_id)
-        self.assertFalse(show_body['router']['admin_state_up'])
-        # List routers and verify if created router is there in response
-        list_body = self.client.list_routers()
-        routers_list = list()
-        for router in list_body['routers']:
-            routers_list.append(router['id'])
-        self.assertIn(create_body['router']['id'], routers_list)
-        # Update the name of router and verify if it is updated
-        updated_name = 'updated ' + name
-        update_body = self.client.update_router(create_body['router']['id'],
-                                                name=updated_name)
-        self.assertEqual(update_body['router']['name'], updated_name)
-        show_body = self.client.show_router(
-            create_body['router']['id'])
-        self.assertEqual(show_body['router']['name'], updated_name)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('e54dd3a3-4352-4921-b09d-44369ae17397')
-    def test_create_router_setting_tenant_id(self):
-        # Test creating router from admin user setting tenant_id.
-        test_tenant = data_utils.rand_name('test_tenant_')
-        test_description = data_utils.rand_name('desc_')
-        tenant = self.identity_admin_client.create_tenant(
-            name=test_tenant, description=test_description)
-        tenant_id = tenant['id']
-        self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
-
-        name = data_utils.rand_name('router-')
-        create_body = self.admin_client.create_router(name,
-                                                      tenant_id=tenant_id)
-        self.addCleanup(self.admin_client.delete_router,
-                        create_body['router']['id'])
-        self.assertEqual(tenant_id, create_body['router']['tenant_id'])
-
-    @test.idempotent_id('847257cc-6afd-4154-b8fb-af49f5670ce8')
-    @test.requires_ext(extension='ext-gw-mode', service='network')
-    @test.attr(type='smoke')
-    def test_create_router_with_default_snat_value(self):
-        # Create a router with default snat rule
-        name = data_utils.rand_name('router')
-        router = self._create_router(
-            name, external_network_id=CONF.network.public_network_id)
-        self._verify_router_gateway(
-            router['id'], {'network_id': CONF.network.public_network_id,
-                           'enable_snat': True})
-
-    @test.idempotent_id('ea74068d-09e9-4fd7-8995-9b6a1ace920f')
-    @test.requires_ext(extension='ext-gw-mode', service='network')
-    @test.attr(type='smoke')
-    def test_create_router_with_snat_explicit(self):
-        name = data_utils.rand_name('snat-router')
-        # Create a router enabling snat attributes
-        enable_snat_states = [False, True]
-        for enable_snat in enable_snat_states:
-            external_gateway_info = {
-                'network_id': CONF.network.public_network_id,
-                'enable_snat': enable_snat}
-            create_body = self.admin_client.create_router(
-                name, external_gateway_info=external_gateway_info)
-            self.addCleanup(self.admin_client.delete_router,
-                            create_body['router']['id'])
-            # Verify snat attributes after router creation
-            self._verify_router_gateway(create_body['router']['id'],
-                                        exp_ext_gw_info=external_gateway_info)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('b42e6e39-2e37-49cc-a6f4-8467e940900a')
-    def test_add_remove_router_interface_with_subnet_id(self):
-        network = self.create_network()
-        subnet = self.create_subnet(network)
-        router = self._create_router(data_utils.rand_name('router-'))
-        # Add router interface with subnet id
-        interface = self.client.add_router_interface_with_subnet_id(
-            router['id'], subnet['id'])
-        self.addCleanup(self._remove_router_interface_with_subnet_id,
-                        router['id'], subnet['id'])
-        self.assertIn('subnet_id', interface.keys())
-        self.assertIn('port_id', interface.keys())
-        # Verify router id is equal to device id in port details
-        show_port_body = self.client.show_port(
-            interface['port_id'])
-        self.assertEqual(show_port_body['port']['device_id'],
-                         router['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('2b7d2f37-6748-4d78-92e5-1d590234f0d5')
-    def test_add_remove_router_interface_with_port_id(self):
-        network = self.create_network()
-        self.create_subnet(network)
-        router = self._create_router(data_utils.rand_name('router-'))
-        port_body = self.client.create_port(
-            network_id=network['id'])
-        # add router interface to port created above
-        interface = self.client.add_router_interface_with_port_id(
-            router['id'], port_body['port']['id'])
-        self.addCleanup(self._remove_router_interface_with_port_id,
-                        router['id'], port_body['port']['id'])
-        self.assertIn('subnet_id', interface.keys())
-        self.assertIn('port_id', interface.keys())
-        # Verify router id is equal to device id in port details
-        show_port_body = self.client.show_port(
-            interface['port_id'])
-        self.assertEqual(show_port_body['port']['device_id'],
-                         router['id'])
-
-    def _verify_router_gateway(self, router_id, exp_ext_gw_info=None):
-        show_body = self.admin_client.show_router(router_id)
-        actual_ext_gw_info = show_body['router']['external_gateway_info']
-        if exp_ext_gw_info is None:
-            self.assertIsNone(actual_ext_gw_info)
-            return
-        # Verify only keys passed in exp_ext_gw_info
-        for k, v in six.iteritems(exp_ext_gw_info):
-            self.assertEqual(v, actual_ext_gw_info[k])
-
-    def _verify_gateway_port(self, router_id):
-        list_body = self.admin_client.list_ports(
-            network_id=CONF.network.public_network_id,
-            device_id=router_id)
-        self.assertEqual(len(list_body['ports']), 1)
-        gw_port = list_body['ports'][0]
-        fixed_ips = gw_port['fixed_ips']
-        self.assertGreaterEqual(len(fixed_ips), 1)
-        public_net_body = self.admin_client.show_network(
-            CONF.network.public_network_id)
-        public_subnet_id = public_net_body['network']['subnets'][0]
-        self.assertIn(public_subnet_id,
-                      [x['subnet_id'] for x in fixed_ips])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('6cc285d8-46bf-4f36-9b1a-783e3008ba79')
-    def test_update_router_set_gateway(self):
-        router = self._create_router(data_utils.rand_name('router-'))
-        self.client.update_router(
-            router['id'],
-            external_gateway_info={
-                'network_id': CONF.network.public_network_id})
-        # Verify operation - router
-        self._verify_router_gateway(
-            router['id'],
-            {'network_id': CONF.network.public_network_id})
-        self._verify_gateway_port(router['id'])
-
-    @test.idempotent_id('b386c111-3b21-466d-880c-5e72b01e1a33')
-    @test.requires_ext(extension='ext-gw-mode', service='network')
-    @test.attr(type='smoke')
-    def test_update_router_set_gateway_with_snat_explicit(self):
-        router = self._create_router(data_utils.rand_name('router-'))
-        self.admin_client.update_router_with_snat_gw_info(
-            router['id'],
-            external_gateway_info={
-                'network_id': CONF.network.public_network_id,
-                'enable_snat': True})
-        self._verify_router_gateway(
-            router['id'],
-            {'network_id': CONF.network.public_network_id,
-             'enable_snat': True})
-        self._verify_gateway_port(router['id'])
-
-    @test.idempotent_id('96536bc7-8262-4fb2-9967-5c46940fa279')
-    @test.requires_ext(extension='ext-gw-mode', service='network')
-    @test.attr(type='smoke')
-    def test_update_router_set_gateway_without_snat(self):
-        router = self._create_router(data_utils.rand_name('router-'))
-        self.admin_client.update_router_with_snat_gw_info(
-            router['id'],
-            external_gateway_info={
-                'network_id': CONF.network.public_network_id,
-                'enable_snat': False})
-        self._verify_router_gateway(
-            router['id'],
-            {'network_id': CONF.network.public_network_id,
-             'enable_snat': False})
-        self._verify_gateway_port(router['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('ad81b7ee-4f81-407b-a19c-17e623f763e8')
-    def test_update_router_unset_gateway(self):
-        router = self._create_router(
-            data_utils.rand_name('router-'),
-            external_network_id=CONF.network.public_network_id)
-        self.client.update_router(router['id'], external_gateway_info={})
-        self._verify_router_gateway(router['id'])
-        # No gateway port expected
-        list_body = self.admin_client.list_ports(
-            network_id=CONF.network.public_network_id,
-            device_id=router['id'])
-        self.assertFalse(list_body['ports'])
-
-    @test.idempotent_id('f2faf994-97f4-410b-a831-9bc977b64374')
-    @test.requires_ext(extension='ext-gw-mode', service='network')
-    @test.attr(type='smoke')
-    def test_update_router_reset_gateway_without_snat(self):
-        router = self._create_router(
-            data_utils.rand_name('router-'),
-            external_network_id=CONF.network.public_network_id)
-        self.admin_client.update_router_with_snat_gw_info(
-            router['id'],
-            external_gateway_info={
-                'network_id': CONF.network.public_network_id,
-                'enable_snat': False})
-        self._verify_router_gateway(
-            router['id'],
-            {'network_id': CONF.network.public_network_id,
-             'enable_snat': False})
-        self._verify_gateway_port(router['id'])
-
-    @test.idempotent_id('c86ac3a8-50bd-4b00-a6b8-62af84a0765c')
-    @test.requires_ext(extension='extraroute', service='network')
-    @test.attr(type='smoke')
-    def test_update_extra_route(self):
-        self.network = self.create_network()
-        self.name = self.network['name']
-        self.subnet = self.create_subnet(self.network)
-        # Add router interface with subnet id
-        self.router = self._create_router(
-            data_utils.rand_name('router-'), True)
-        self.create_router_interface(self.router['id'], self.subnet['id'])
-        self.addCleanup(
-            self._delete_extra_routes,
-            self.router['id'])
-        # Update router extra route, second ip of the range is
-        # used as next hop
-        cidr = netaddr.IPNetwork(self.subnet['cidr'])
-        next_hop = str(cidr[2])
-        destination = str(self.subnet['cidr'])
-        extra_route = self.client.update_extra_routes(self.router['id'],
-                                                      next_hop, destination)
-        self.assertEqual(1, len(extra_route['router']['routes']))
-        self.assertEqual(destination,
-                         extra_route['router']['routes'][0]['destination'])
-        self.assertEqual(next_hop,
-                         extra_route['router']['routes'][0]['nexthop'])
-        show_body = self.client.show_router(self.router['id'])
-        self.assertEqual(destination,
-                         show_body['router']['routes'][0]['destination'])
-        self.assertEqual(next_hop,
-                         show_body['router']['routes'][0]['nexthop'])
-
-    def _delete_extra_routes(self, router_id):
-        self.client.delete_extra_routes(router_id)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('a8902683-c788-4246-95c7-ad9c6d63a4d9')
-    def test_update_router_admin_state(self):
-        self.router = self._create_router(data_utils.rand_name('router-'))
-        self.assertFalse(self.router['admin_state_up'])
-        # Update router admin state
-        update_body = self.client.update_router(self.router['id'],
-                                                admin_state_up=True)
-        self.assertTrue(update_body['router']['admin_state_up'])
-        show_body = self.client.show_router(self.router['id'])
-        self.assertTrue(show_body['router']['admin_state_up'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('802c73c9-c937-4cef-824b-2191e24a6aab')
-    def test_add_multiple_router_interfaces(self):
-        network01 = self.create_network(
-            network_name=data_utils.rand_name('router-network01-'))
-        network02 = self.create_network(
-            network_name=data_utils.rand_name('router-network02-'))
-        subnet01 = self.create_subnet(network01)
-        sub02_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
-        subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
-        router = self._create_router(data_utils.rand_name('router-'))
-        interface01 = self._add_router_interface_with_subnet_id(router['id'],
-                                                                subnet01['id'])
-        self._verify_router_interface(router['id'], subnet01['id'],
-                                      interface01['port_id'])
-        interface02 = self._add_router_interface_with_subnet_id(router['id'],
-                                                                subnet02['id'])
-        self._verify_router_interface(router['id'], subnet02['id'],
-                                      interface02['port_id'])
-
-    def _verify_router_interface(self, router_id, subnet_id, port_id):
-        show_port_body = self.client.show_port(port_id)
-        interface_port = show_port_body['port']
-        self.assertEqual(router_id, interface_port['device_id'])
-        self.assertEqual(subnet_id,
-                         interface_port['fixed_ips'][0]['subnet_id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('01f185d1-d1a6-4cf9-abf7-e0e1384c169c')
-    def test_network_attached_with_two_routers(self):
-        network = self.create_network(data_utils.rand_name('network1'))
-        self.create_subnet(network)
-        port1 = self.create_port(network)
-        port2 = self.create_port(network)
-        router1 = self._create_router(data_utils.rand_name('router1'))
-        router2 = self._create_router(data_utils.rand_name('router2'))
-        self.client.add_router_interface_with_port_id(
-            router1['id'], port1['id'])
-        self.client.add_router_interface_with_port_id(
-            router2['id'], port2['id'])
-        self.addCleanup(self.client.remove_router_interface_with_port_id,
-                        router1['id'], port1['id'])
-        self.addCleanup(self.client.remove_router_interface_with_port_id,
-                        router2['id'], port2['id'])
-        body = self.client.show_port(port1['id'])
-        port_show1 = body['port']
-        body = self.client.show_port(port2['id'])
-        port_show2 = body['port']
-        self.assertEqual(port_show1['network_id'], network['id'])
-        self.assertEqual(port_show2['network_id'], network['id'])
-        self.assertEqual(port_show1['device_id'], router1['id'])
-        self.assertEqual(port_show2['device_id'], router2['id'])
-
-
-class RoutersIpV6Test(RoutersTest):
-    _ip_version = 6
-
-
-class DvrRoutersTest(base.BaseRouterTest):
-
-    @classmethod
-    def skip_checks(cls):
-        super(DvrRoutersTest, cls).skip_checks()
-        if not test.is_extension_enabled('dvr', 'network'):
-            msg = "DVR extension not enabled."
-            raise cls.skipException(msg)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('141297aa-3424-455d-aa8d-f2d95731e00a')
-    def test_create_distributed_router(self):
-        name = data_utils.rand_name('router')
-        create_body = self.admin_client.create_router(
-            name, distributed=True)
-        self.addCleanup(self._delete_router,
-                        create_body['router']['id'],
-                        self.admin_client)
-        self.assertTrue(create_body['router']['distributed'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('644d7a4a-01a1-4b68-bb8d-0c0042cb1729')
-    def test_convert_centralized_router(self):
-        router = self._create_router(data_utils.rand_name('router'))
-        self.assertNotIn('distributed', router)
-        update_body = self.admin_client.update_router(router['id'],
-                                                      distributed=True)
-        self.assertTrue(update_body['router']['distributed'])
-        show_body = self.admin_client.show_router(router['id'])
-        self.assertTrue(show_body['router']['distributed'])
-        show_body = self.client.show_router(router['id'])
-        self.assertNotIn('distributed', show_body['router'])
diff --git a/neutron/tests/api/test_routers_negative.py b/neutron/tests/api/test_routers_negative.py
deleted file mode 100644 (file)
index f87f4ae..0000000
+++ /dev/null
@@ -1,138 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-import testtools
-
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import base_routers as base
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class RoutersNegativeTest(base.BaseRouterTest):
-
-    @classmethod
-    def resource_setup(cls):
-        super(RoutersNegativeTest, cls).resource_setup()
-        if not test.is_extension_enabled('router', 'network'):
-            msg = "router extension not enabled."
-            raise cls.skipException(msg)
-        cls.router = cls.create_router(data_utils.rand_name('router-'))
-        cls.network = cls.create_network()
-        cls.subnet = cls.create_subnet(cls.network)
-        cls.tenant_cidr = (CONF.network.tenant_network_cidr
-                           if cls._ip_version == 4 else
-                           CONF.network.tenant_network_v6_cidr)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('37a94fc0-a834-45b9-bd23-9a81d2fd1e22')
-    def test_router_add_gateway_invalid_network_returns_404(self):
-        self.assertRaises(lib_exc.NotFound,
-                          self.client.update_router,
-                          self.router['id'],
-                          external_gateway_info={
-                              'network_id': self.router['id']})
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('11836a18-0b15-4327-a50b-f0d9dc66bddd')
-    def test_router_add_gateway_net_not_external_returns_400(self):
-        alt_network = self.create_network(
-            network_name=data_utils.rand_name('router-negative-'))
-        sub_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
-        self.create_subnet(alt_network, cidr=sub_cidr)
-        self.assertRaises(lib_exc.BadRequest,
-                          self.client.update_router,
-                          self.router['id'],
-                          external_gateway_info={
-                              'network_id': alt_network['id']})
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('957751a3-3c68-4fa2-93b6-eb52ea10db6e')
-    def test_add_router_interfaces_on_overlapping_subnets_returns_400(self):
-        network01 = self.create_network(
-            network_name=data_utils.rand_name('router-network01-'))
-        network02 = self.create_network(
-            network_name=data_utils.rand_name('router-network02-'))
-        subnet01 = self.create_subnet(network01)
-        subnet02 = self.create_subnet(network02)
-        self._add_router_interface_with_subnet_id(self.router['id'],
-                                                  subnet01['id'])
-        self.assertRaises(lib_exc.BadRequest,
-                          self._add_router_interface_with_subnet_id,
-                          self.router['id'],
-                          subnet02['id'])
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('04df80f9-224d-47f5-837a-bf23e33d1c20')
-    def test_router_remove_interface_in_use_returns_409(self):
-        self.client.add_router_interface_with_subnet_id(
-            self.router['id'], self.subnet['id'])
-        self.assertRaises(lib_exc.Conflict,
-                          self.client.delete_router,
-                          self.router['id'])
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('c2a70d72-8826-43a7-8208-0209e6360c47')
-    def test_show_non_existent_router_returns_404(self):
-        router = data_utils.rand_name('non_exist_router')
-        self.assertRaises(lib_exc.NotFound, self.client.show_router,
-                          router)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('b23d1569-8b0c-4169-8d4b-6abd34fad5c7')
-    def test_update_non_existent_router_returns_404(self):
-        router = data_utils.rand_name('non_exist_router')
-        self.assertRaises(lib_exc.NotFound, self.client.update_router,
-                          router, name="new_name")
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('c7edc5ad-d09d-41e6-a344-5c0c31e2e3e4')
-    def test_delete_non_existent_router_returns_404(self):
-        router = data_utils.rand_name('non_exist_router')
-        self.assertRaises(lib_exc.NotFound, self.client.delete_router,
-                          router)
-
-
-class RoutersNegativeIpV6Test(RoutersNegativeTest):
-    _ip_version = 6
-
-
-class DvrRoutersNegativeTest(base.BaseRouterTest):
-
-    @classmethod
-    def skip_checks(cls):
-        super(DvrRoutersNegativeTest, cls).skip_checks()
-        if not test.is_extension_enabled('dvr', 'network'):
-            msg = "DVR extension not enabled."
-            raise cls.skipException(msg)
-
-    @classmethod
-    def resource_setup(cls):
-        super(DvrRoutersNegativeTest, cls).resource_setup()
-        cls.router = cls.create_router(data_utils.rand_name('router'))
-        cls.network = cls.create_network()
-        cls.subnet = cls.create_subnet(cls.network)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('4990b055-8fc7-48ab-bba7-aa28beaad0b9')
-    def test_router_create_tenant_distributed_returns_forbidden(self):
-        with testtools.ExpectedException(lib_exc.Forbidden):
-            self.create_router(
-                data_utils.rand_name('router'), distributed=True)
diff --git a/neutron/tests/api/test_security_groups.py b/neutron/tests/api/test_security_groups.py
deleted file mode 100644 (file)
index 1e4d7ce..0000000
+++ /dev/null
@@ -1,244 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import six
-from tempest_lib.common.utils import data_utils
-
-from neutron.tests.api import base_security_groups as base
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class SecGroupTest(base.BaseSecGroupTest):
-
-    _tenant_network_cidr = CONF.network.tenant_network_cidr
-
-    @classmethod
-    def resource_setup(cls):
-        super(SecGroupTest, cls).resource_setup()
-        if not test.is_extension_enabled('security-group', 'network'):
-            msg = "security-group extension not enabled."
-            raise cls.skipException(msg)
-
-    def _create_verify_security_group_rule(self, sg_id, direction,
-                                           ethertype, protocol,
-                                           port_range_min,
-                                           port_range_max,
-                                           remote_group_id=None,
-                                           remote_ip_prefix=None):
-        # Create Security Group rule with the input params and validate
-        # that SG rule is created with the same parameters.
-        rule_create_body = self.client.create_security_group_rule(
-            security_group_id=sg_id,
-            direction=direction,
-            ethertype=ethertype,
-            protocol=protocol,
-            port_range_min=port_range_min,
-            port_range_max=port_range_max,
-            remote_group_id=remote_group_id,
-            remote_ip_prefix=remote_ip_prefix
-        )
-
-        sec_group_rule = rule_create_body['security_group_rule']
-        self.addCleanup(self._delete_security_group_rule,
-                        sec_group_rule['id'])
-
-        expected = {'direction': direction, 'protocol': protocol,
-                    'ethertype': ethertype, 'port_range_min': port_range_min,
-                    'port_range_max': port_range_max,
-                    'remote_group_id': remote_group_id,
-                    'remote_ip_prefix': remote_ip_prefix}
-        for key, value in six.iteritems(expected):
-            self.assertEqual(value, sec_group_rule[key],
-                             "Field %s of the created security group "
-                             "rule does not match with %s." %
-                             (key, value))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('e30abd17-fef9-4739-8617-dc26da88e686')
-    def test_list_security_groups(self):
-        # Verify the that security group belonging to tenant exist in list
-        body = self.client.list_security_groups()
-        security_groups = body['security_groups']
-        found = None
-        for n in security_groups:
-            if (n['name'] == 'default'):
-                found = n['id']
-        msg = "Security-group list doesn't contain default security-group"
-        self.assertIsNotNone(found, msg)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('bfd128e5-3c92-44b6-9d66-7fe29d22c802')
-    def test_create_list_update_show_delete_security_group(self):
-        group_create_body, name = self._create_security_group()
-
-        # List security groups and verify if created group is there in response
-        list_body = self.client.list_security_groups()
-        secgroup_list = list()
-        for secgroup in list_body['security_groups']:
-            secgroup_list.append(secgroup['id'])
-        self.assertIn(group_create_body['security_group']['id'], secgroup_list)
-        # Update the security group
-        new_name = data_utils.rand_name('security-')
-        new_description = data_utils.rand_name('security-description')
-        update_body = self.client.update_security_group(
-            group_create_body['security_group']['id'],
-            name=new_name,
-            description=new_description)
-        # Verify if security group is updated
-        self.assertEqual(update_body['security_group']['name'], new_name)
-        self.assertEqual(update_body['security_group']['description'],
-                         new_description)
-        # Show details of the updated security group
-        show_body = self.client.show_security_group(
-            group_create_body['security_group']['id'])
-        self.assertEqual(show_body['security_group']['name'], new_name)
-        self.assertEqual(show_body['security_group']['description'],
-                         new_description)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('cfb99e0e-7410-4a3d-8a0c-959a63ee77e9')
-    def test_create_show_delete_security_group_rule(self):
-        group_create_body, _ = self._create_security_group()
-
-        # Create rules for each protocol
-        protocols = ['tcp', 'udp', 'icmp']
-        for protocol in protocols:
-            rule_create_body = self.client.create_security_group_rule(
-                security_group_id=group_create_body['security_group']['id'],
-                protocol=protocol,
-                direction='ingress',
-                ethertype=self.ethertype
-            )
-
-            # Show details of the created security rule
-            show_rule_body = self.client.show_security_group_rule(
-                rule_create_body['security_group_rule']['id']
-            )
-            create_dict = rule_create_body['security_group_rule']
-            for key, value in six.iteritems(create_dict):
-                self.assertEqual(value,
-                                 show_rule_body['security_group_rule'][key],
-                                 "%s does not match." % key)
-
-            # List rules and verify created rule is in response
-            rule_list_body = self.client.list_security_group_rules()
-            rule_list = [rule['id']
-                         for rule in rule_list_body['security_group_rules']]
-            self.assertIn(rule_create_body['security_group_rule']['id'],
-                          rule_list)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('87dfbcf9-1849-43ea-b1e4-efa3eeae9f71')
-    def test_create_security_group_rule_with_additional_args(self):
-        """Verify security group rule with additional arguments works.
-
-        direction:ingress, ethertype:[IPv4/IPv6],
-        protocol:tcp, port_range_min:77, port_range_max:77
-        """
-        group_create_body, _ = self._create_security_group()
-        sg_id = group_create_body['security_group']['id']
-        direction = 'ingress'
-        protocol = 'tcp'
-        port_range_min = 77
-        port_range_max = 77
-        self._create_verify_security_group_rule(sg_id, direction,
-                                                self.ethertype, protocol,
-                                                port_range_min,
-                                                port_range_max)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('c9463db8-b44d-4f52-b6c0-8dbda99f26ce')
-    def test_create_security_group_rule_with_icmp_type_code(self):
-        """Verify security group rule for icmp protocol works.
-
-        Specify icmp type (port_range_min) and icmp code
-        (port_range_max) with different values. A separate testcase
-        is added for icmp protocol as icmp validation would be
-        different from tcp/udp.
-        """
-        group_create_body, _ = self._create_security_group()
-
-        sg_id = group_create_body['security_group']['id']
-        direction = 'ingress'
-        protocol = 'icmp'
-        icmp_type_codes = [(3, 2), (3, 0), (8, 0), (0, 0), (11, None)]
-        for icmp_type, icmp_code in icmp_type_codes:
-            self._create_verify_security_group_rule(sg_id, direction,
-                                                    self.ethertype, protocol,
-                                                    icmp_type, icmp_code)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('c2ed2deb-7a0c-44d8-8b4c-a5825b5c310b')
-    def test_create_security_group_rule_with_remote_group_id(self):
-        # Verify creating security group rule with remote_group_id works
-        sg1_body, _ = self._create_security_group()
-        sg2_body, _ = self._create_security_group()
-
-        sg_id = sg1_body['security_group']['id']
-        direction = 'ingress'
-        protocol = 'udp'
-        port_range_min = 50
-        port_range_max = 55
-        remote_id = sg2_body['security_group']['id']
-        self._create_verify_security_group_rule(sg_id, direction,
-                                                self.ethertype, protocol,
-                                                port_range_min,
-                                                port_range_max,
-                                                remote_group_id=remote_id)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('16459776-5da2-4634-bce4-4b55ee3ec188')
-    def test_create_security_group_rule_with_remote_ip_prefix(self):
-        # Verify creating security group rule with remote_ip_prefix works
-        sg1_body, _ = self._create_security_group()
-
-        sg_id = sg1_body['security_group']['id']
-        direction = 'ingress'
-        protocol = 'tcp'
-        port_range_min = 76
-        port_range_max = 77
-        ip_prefix = self._tenant_network_cidr
-        self._create_verify_security_group_rule(sg_id, direction,
-                                                self.ethertype, protocol,
-                                                port_range_min,
-                                                port_range_max,
-                                                remote_ip_prefix=ip_prefix)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('0a307599-6655-4220-bebc-fd70c64f2290')
-    def test_create_security_group_rule_with_protocol_integer_value(self):
-        # Verify creating security group rule with the
-        # protocol as integer value
-        # arguments : "protocol": 17
-        group_create_body, _ = self._create_security_group()
-        direction = 'ingress'
-        protocol = 17
-        security_group_id = group_create_body['security_group']['id']
-        rule_create_body = self.client.create_security_group_rule(
-            security_group_id=security_group_id,
-            direction=direction,
-            protocol=protocol
-        )
-        sec_group_rule = rule_create_body['security_group_rule']
-        self.assertEqual(sec_group_rule['direction'], direction)
-        self.assertEqual(int(sec_group_rule['protocol']), protocol)
-
-
-class SecGroupIPv6Test(SecGroupTest):
-    _ip_version = 6
-    _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
diff --git a/neutron/tests/api/test_security_groups_negative.py b/neutron/tests/api/test_security_groups_negative.py
deleted file mode 100644 (file)
index 2e40d7a..0000000
+++ /dev/null
@@ -1,237 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import uuid
-
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import base_security_groups as base
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class NegativeSecGroupTest(base.BaseSecGroupTest):
-
-    _tenant_network_cidr = CONF.network.tenant_network_cidr
-
-    @classmethod
-    def resource_setup(cls):
-        super(NegativeSecGroupTest, cls).resource_setup()
-        if not test.is_extension_enabled('security-group', 'network'):
-            msg = "security-group extension not enabled."
-            raise cls.skipException(msg)
-
-    @test.attr(type=['negative', 'gate'])
-    @test.idempotent_id('424fd5c3-9ddc-486a-b45f-39bf0c820fc6')
-    def test_show_non_existent_security_group(self):
-        non_exist_id = str(uuid.uuid4())
-        self.assertRaises(lib_exc.NotFound, self.client.show_security_group,
-                          non_exist_id)
-
-    @test.attr(type=['negative', 'gate'])
-    @test.idempotent_id('4c094c09-000b-4e41-8100-9617600c02a6')
-    def test_show_non_existent_security_group_rule(self):
-        non_exist_id = str(uuid.uuid4())
-        self.assertRaises(lib_exc.NotFound,
-                          self.client.show_security_group_rule,
-                          non_exist_id)
-
-    @test.attr(type=['negative', 'gate'])
-    @test.idempotent_id('1f1bb89d-5664-4956-9fcd-83ee0fa603df')
-    def test_delete_non_existent_security_group(self):
-        non_exist_id = str(uuid.uuid4())
-        self.assertRaises(lib_exc.NotFound,
-                          self.client.delete_security_group,
-                          non_exist_id
-                          )
-
-    @test.attr(type=['negative', 'gate'])
-    @test.idempotent_id('981bdc22-ce48-41ed-900a-73148b583958')
-    def test_create_security_group_rule_with_bad_protocol(self):
-        group_create_body, _ = self._create_security_group()
-
-        # Create rule with bad protocol name
-        pname = 'bad_protocol_name'
-        self.assertRaises(
-            lib_exc.BadRequest, self.client.create_security_group_rule,
-            security_group_id=group_create_body['security_group']['id'],
-            protocol=pname, direction='ingress', ethertype=self.ethertype)
-
-    @test.attr(type=['negative', 'gate'])
-    @test.idempotent_id('5f8daf69-3c5f-4aaa-88c9-db1d66f68679')
-    def test_create_security_group_rule_with_bad_remote_ip_prefix(self):
-        group_create_body, _ = self._create_security_group()
-
-        # Create rule with bad remote_ip_prefix
-        prefix = ['192.168.1./24', '192.168.1.1/33', 'bad_prefix', '256']
-        for remote_ip_prefix in prefix:
-            self.assertRaises(
-                lib_exc.BadRequest, self.client.create_security_group_rule,
-                security_group_id=group_create_body['security_group']['id'],
-                protocol='tcp', direction='ingress', ethertype=self.ethertype,
-                remote_ip_prefix=remote_ip_prefix)
-
-    @test.attr(type=['negative', 'gate'])
-    @test.idempotent_id('4bf786fd-2f02-443c-9716-5b98e159a49a')
-    def test_create_security_group_rule_with_non_existent_remote_groupid(self):
-        group_create_body, _ = self._create_security_group()
-        non_exist_id = str(uuid.uuid4())
-
-        # Create rule with non existent remote_group_id
-        group_ids = ['bad_group_id', non_exist_id]
-        for remote_group_id in group_ids:
-            self.assertRaises(
-                lib_exc.NotFound, self.client.create_security_group_rule,
-                security_group_id=group_create_body['security_group']['id'],
-                protocol='tcp', direction='ingress', ethertype=self.ethertype,
-                remote_group_id=remote_group_id)
-
-    @test.attr(type=['negative', 'gate'])
-    @test.idempotent_id('b5c4b247-6b02-435b-b088-d10d45650881')
-    def test_create_security_group_rule_with_remote_ip_and_group(self):
-        sg1_body, _ = self._create_security_group()
-        sg2_body, _ = self._create_security_group()
-
-        # Create rule specifying both remote_ip_prefix and remote_group_id
-        prefix = self._tenant_network_cidr
-        self.assertRaises(
-            lib_exc.BadRequest, self.client.create_security_group_rule,
-            security_group_id=sg1_body['security_group']['id'],
-            protocol='tcp', direction='ingress',
-            ethertype=self.ethertype, remote_ip_prefix=prefix,
-            remote_group_id=sg2_body['security_group']['id'])
-
-    @test.attr(type=['negative', 'gate'])
-    @test.idempotent_id('5666968c-fff3-40d6-9efc-df1c8bd01abb')
-    def test_create_security_group_rule_with_bad_ethertype(self):
-        group_create_body, _ = self._create_security_group()
-
-        # Create rule with bad ethertype
-        ethertype = 'bad_ethertype'
-        self.assertRaises(
-            lib_exc.BadRequest, self.client.create_security_group_rule,
-            security_group_id=group_create_body['security_group']['id'],
-            protocol='udp', direction='ingress', ethertype=ethertype)
-
-    @test.attr(type=['negative', 'gate'])
-    @test.idempotent_id('0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d')
-    def test_create_security_group_rule_with_invalid_ports(self):
-        group_create_body, _ = self._create_security_group()
-
-        # Create rule for tcp protocol with invalid ports
-        states = [(-16, 80, 'Invalid value for port -16'),
-                  (80, 79, 'port_range_min must be <= port_range_max'),
-                  (80, 65536, 'Invalid value for port 65536'),
-                  (None, 6, 'port_range_min must be <= port_range_max'),
-                  (-16, 65536, 'Invalid value for port')]
-        for pmin, pmax, msg in states:
-            ex = self.assertRaises(
-                lib_exc.BadRequest, self.client.create_security_group_rule,
-                security_group_id=group_create_body['security_group']['id'],
-                protocol='tcp', port_range_min=pmin, port_range_max=pmax,
-                direction='ingress', ethertype=self.ethertype)
-            self.assertIn(msg, str(ex))
-
-        # Create rule for icmp protocol with invalid ports
-        states = [(1, 256, 'Invalid value for ICMP code'),
-                  (-1, 25, 'Invalid value'),
-                  (None, 6, 'ICMP type (port-range-min) is missing'),
-                  (300, 1, 'Invalid value for ICMP type')]
-        for pmin, pmax, msg in states:
-            ex = self.assertRaises(
-                lib_exc.BadRequest, self.client.create_security_group_rule,
-                security_group_id=group_create_body['security_group']['id'],
-                protocol='icmp', port_range_min=pmin, port_range_max=pmax,
-                direction='ingress', ethertype=self.ethertype)
-            self.assertIn(msg, str(ex))
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('2323061e-9fbf-4eb0-b547-7e8fafc90849')
-    def test_create_additional_default_security_group_fails(self):
-        # Create security group named 'default', it should be failed.
-        name = 'default'
-        self.assertRaises(lib_exc.Conflict,
-                          self.client.create_security_group,
-                          name=name)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('8fde898f-ce88-493b-adc9-4e4692879fc5')
-    def test_create_duplicate_security_group_rule_fails(self):
-        # Create duplicate security group rule, it should fail.
-        body, _ = self._create_security_group()
-
-        min_port = 66
-        max_port = 67
-        # Create a rule with valid params
-        self.client.create_security_group_rule(
-            security_group_id=body['security_group']['id'],
-            direction='ingress',
-            ethertype=self.ethertype,
-            protocol='tcp',
-            port_range_min=min_port,
-            port_range_max=max_port
-        )
-
-        # Try creating the same security group rule, it should fail
-        self.assertRaises(
-            lib_exc.Conflict, self.client.create_security_group_rule,
-            security_group_id=body['security_group']['id'],
-            protocol='tcp', direction='ingress', ethertype=self.ethertype,
-            port_range_min=min_port, port_range_max=max_port)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('be308db6-a7cf-4d5c-9baf-71bafd73f35e')
-    def test_create_security_group_rule_with_non_existent_security_group(self):
-        # Create security group rules with not existing security group.
-        non_existent_sg = str(uuid.uuid4())
-        self.assertRaises(lib_exc.NotFound,
-                          self.client.create_security_group_rule,
-                          security_group_id=non_existent_sg,
-                          direction='ingress', ethertype=self.ethertype)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('55100aa8-b24f-333c-0bef-64eefd85f15c')
-    def test_update_default_security_group_name(self):
-        sg_list = self.client.list_security_groups(name='default')
-        sg = sg_list['security_groups'][0]
-        self.assertRaises(lib_exc.Conflict, self.client.update_security_group,
-                          sg['id'], name='test')
-
-
-class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
-    _ip_version = 6
-    _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
-
-    @test.attr(type=['negative', 'gate'])
-    @test.idempotent_id('7607439c-af73-499e-bf64-f687fd12a842')
-    def test_create_security_group_rule_wrong_ip_prefix_version(self):
-        group_create_body, _ = self._create_security_group()
-
-        # Create rule with bad remote_ip_prefix
-        pairs = ({'ethertype': 'IPv6',
-                  'ip_prefix': CONF.network.tenant_network_cidr},
-                 {'ethertype': 'IPv4',
-                  'ip_prefix': CONF.network.tenant_network_v6_cidr})
-        for pair in pairs:
-            self.assertRaisesRegexp(
-                lib_exc.BadRequest,
-                "Conflicting value ethertype",
-                self.client.create_security_group_rule,
-                security_group_id=group_create_body['security_group']['id'],
-                protocol='tcp', direction='ingress',
-                ethertype=pair['ethertype'],
-                remote_ip_prefix=pair['ip_prefix'])
diff --git a/neutron/tests/api/test_service_type_management.py b/neutron/tests/api/test_service_type_management.py
deleted file mode 100644 (file)
index 4bafb60..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.tests.api import base
-from neutron.tests.tempest import test
-
-
-class ServiceTypeManagementTest(base.BaseNetworkTest):
-
-    @classmethod
-    def resource_setup(cls):
-        super(ServiceTypeManagementTest, cls).resource_setup()
-        if not test.is_extension_enabled('service-type', 'network'):
-            msg = "Neutron Service Type Management not enabled."
-            raise cls.skipException(msg)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('2cbbeea9-f010-40f6-8df5-4eaa0c918ea6')
-    def test_service_provider_list(self):
-        body = self.client.list_service_providers()
-        self.assertIsInstance(body['service_providers'], list)
diff --git a/neutron/tests/api/test_subnetpools.py b/neutron/tests/api/test_subnetpools.py
deleted file mode 100644 (file)
index 252071d..0000000
+++ /dev/null
@@ -1,351 +0,0 @@
-# Copyright 2015 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.common.utils import data_utils
-
-from neutron.tests.api import base
-from neutron.tests.api import clients
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-SUBNETPOOL_NAME = 'smoke-subnetpool'
-SUBNET_NAME = 'smoke-subnet'
-
-
-class SubnetPoolsTestBase(base.BaseNetworkTest):
-
-    @classmethod
-    def resource_setup(cls):
-        super(SubnetPoolsTestBase, cls).resource_setup()
-        min_prefixlen = '29'
-        prefixes = [u'10.11.12.0/24']
-        cls._subnetpool_data = {'prefixes': prefixes,
-                                'min_prefixlen': min_prefixlen}
-        try:
-            creds = cls.isolated_creds.get_admin_creds()
-            cls.os_adm = clients.Manager(credentials=creds)
-        except NotImplementedError:
-            msg = ("Missing Administrative Network API credentials "
-                   "in configuration.")
-            raise cls.skipException(msg)
-        cls.admin_client = cls.os_adm.network_client
-
-    def _create_subnetpool(self, is_admin=False, **kwargs):
-        if 'name' not in kwargs:
-            name = data_utils.rand_name(SUBNETPOOL_NAME)
-        else:
-            name = kwargs.pop('name')
-
-        if 'prefixes' not in kwargs:
-            kwargs['prefixes'] = self._subnetpool_data['prefixes']
-
-        if 'min_prefixlen' not in kwargs:
-            kwargs['min_prefixlen'] = self._subnetpool_data['min_prefixlen']
-
-        return self.create_subnetpool(name=name, is_admin=is_admin, **kwargs)
-
-
-class SubnetPoolsTest(SubnetPoolsTestBase):
-
-    min_prefixlen = '28'
-    max_prefixlen = '31'
-    _ip_version = 4
-    subnet_cidr = u'10.11.12.0/31'
-    new_prefix = u'10.11.15.0/24'
-    larger_prefix = u'10.11.0.0/16'
-
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
-
-        create a subnetpool for a tenant
-        list tenant's subnetpools
-        show a tenant subnetpool details
-        subnetpool update
-        delete a subnetpool
-
-        All subnetpool tests are run once with ipv4 and once with ipv6.
-
-    v2.0 of the Neutron API is assumed.
-
-    """
-
-    def _new_subnetpool_attributes(self):
-        new_name = data_utils.rand_name(SUBNETPOOL_NAME)
-        return {'name': new_name, 'min_prefixlen': self.min_prefixlen,
-                'max_prefixlen': self.max_prefixlen}
-
-    def _check_equality_updated_subnetpool(self, expected_values,
-                                           updated_pool):
-        self.assertEqual(expected_values['name'],
-                         updated_pool['name'])
-        self.assertEqual(expected_values['min_prefixlen'],
-                         updated_pool['min_prefixlen'])
-        self.assertEqual(expected_values['max_prefixlen'],
-                         updated_pool['max_prefixlen'])
-        # expected_values may not contains all subnetpool values
-        if 'prefixes' in expected_values:
-            self.assertEqual(expected_values['prefixes'],
-                             updated_pool['prefixes'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('6e1781ec-b45b-4042-aebe-f485c022996e')
-    def test_create_list_subnetpool(self):
-        created_subnetpool = self._create_subnetpool()
-        body = self.client.list_subnetpools()
-        subnetpools = body['subnetpools']
-        self.assertIn(created_subnetpool['id'],
-                      [sp['id'] for sp in subnetpools],
-                      "Created subnetpool id should be in the list")
-        self.assertIn(created_subnetpool['name'],
-                      [sp['name'] for sp in subnetpools],
-                      "Created subnetpool name should be in the list")
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('741d08c2-1e3f-42be-99c7-0ea93c5b728c')
-    def test_get_subnetpool(self):
-        created_subnetpool = self._create_subnetpool()
-        prefixlen = self._subnetpool_data['min_prefixlen']
-        body = self.client.show_subnetpool(created_subnetpool['id'])
-        subnetpool = body['subnetpool']
-        self.assertEqual(created_subnetpool['name'], subnetpool['name'])
-        self.assertEqual(created_subnetpool['id'], subnetpool['id'])
-        self.assertEqual(prefixlen, subnetpool['min_prefixlen'])
-        self.assertEqual(prefixlen, subnetpool['default_prefixlen'])
-        self.assertFalse(subnetpool['shared'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('764f1b93-1c4a-4513-9e7b-6c2fc5e9270c')
-    def test_tenant_update_subnetpool(self):
-        created_subnetpool = self._create_subnetpool()
-        pool_id = created_subnetpool['id']
-        subnetpool_data = self._new_subnetpool_attributes()
-        self.client.update_subnetpool(created_subnetpool['id'],
-                                      **subnetpool_data)
-
-        body = self.client.show_subnetpool(pool_id)
-        subnetpool = body['subnetpool']
-        self._check_equality_updated_subnetpool(subnetpool_data,
-                                                subnetpool)
-        self.assertFalse(subnetpool['shared'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('4b496082-c992-4319-90be-d4a7ce646290')
-    def test_update_subnetpool_prefixes_append(self):
-        # We can append new prefixes to subnetpool
-        create_subnetpool = self._create_subnetpool()
-        pool_id = create_subnetpool['id']
-        old_prefixes = self._subnetpool_data['prefixes']
-        new_prefixes = old_prefixes[:]
-        new_prefixes.append(self.new_prefix)
-        subnetpool_data = {'prefixes': new_prefixes}
-        self.client.update_subnetpool(pool_id, **subnetpool_data)
-        body = self.client.show_subnetpool(pool_id)
-        prefixes = body['subnetpool']['prefixes']
-        self.assertIn(self.new_prefix, prefixes)
-        self.assertIn(old_prefixes[0], prefixes)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('2cae5d6a-9d32-42d8-8067-f13970ae13bb')
-    def test_update_subnetpool_prefixes_extend(self):
-        # We can extend current subnetpool prefixes
-        created_subnetpool = self._create_subnetpool()
-        pool_id = created_subnetpool['id']
-        old_prefixes = self._subnetpool_data['prefixes']
-        subnetpool_data = {'prefixes': [self.larger_prefix]}
-        self.client.update_subnetpool(pool_id, **subnetpool_data)
-        body = self.client.show_subnetpool(pool_id)
-        prefixes = body['subnetpool']['prefixes']
-        self.assertIn(self.larger_prefix, prefixes)
-        self.assertNotIn(old_prefixes[0], prefixes)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('d70c6c35-913b-4f24-909f-14cd0d29b2d2')
-    def test_admin_create_shared_subnetpool(self):
-        created_subnetpool = self._create_subnetpool(is_admin=True,
-                                                     shared=True)
-        pool_id = created_subnetpool['id']
-        # Shared subnetpool can be retrieved by tenant user.
-        body = self.client.show_subnetpool(pool_id)
-        subnetpool = body['subnetpool']
-        self.assertEqual(created_subnetpool['name'], subnetpool['name'])
-        self.assertTrue(subnetpool['shared'])
-
-    def _create_subnet_from_pool(self, subnet_values=None, pool_values=None):
-        if pool_values is None:
-            pool_values = {}
-
-        created_subnetpool = self._create_subnetpool(**pool_values)
-        pool_id = created_subnetpool['id']
-        subnet_name = data_utils.rand_name(SUBNETPOOL_NAME)
-        network = self.create_network()
-        subnet_kwargs = {'name': subnet_name,
-                         'subnetpool_id': pool_id}
-        if subnet_values:
-            subnet_kwargs.update(subnet_values)
-        # not creating the subnet using the base.create_subnet because
-        # that function needs to be enhanced to support subnet_create when
-        # prefixlen and subnetpool_id is specified.
-        body = self.client.create_subnet(
-            network_id=network['id'],
-            ip_version=self._ip_version,
-            **subnet_kwargs)
-        subnet = body['subnet']
-        return pool_id, subnet
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('1362ed7d-3089-42eb-b3a5-d6cb8398ee77')
-    def test_create_subnet_from_pool_with_prefixlen(self):
-        subnet_values = {"prefixlen": self.max_prefixlen}
-        pool_id, subnet = self._create_subnet_from_pool(
-            subnet_values=subnet_values)
-        cidr = str(subnet['cidr'])
-        self.assertEqual(pool_id, subnet['subnetpool_id'])
-        self.assertTrue(cidr.endswith(str(self.max_prefixlen)))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('86b86189-9789-4582-9c3b-7e2bfe5735ee')
-    def test_create_subnet_from_pool_with_subnet_cidr(self):
-        subnet_values = {"cidr": self.subnet_cidr}
-        pool_id, subnet = self._create_subnet_from_pool(
-            subnet_values=subnet_values)
-        cidr = str(subnet['cidr'])
-        self.assertEqual(pool_id, subnet['subnetpool_id'])
-        self.assertEqual(cidr, self.subnet_cidr)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('83f76e3a-9c40-40c2-a015-b7c5242178d8')
-    def test_create_subnet_from_pool_with_default_prefixlen(self):
-        # If neither cidr nor prefixlen is specified,
-        # subnet will use subnetpool default_prefixlen for cidr.
-        pool_id, subnet = self._create_subnet_from_pool()
-        cidr = str(subnet['cidr'])
-        self.assertEqual(pool_id, subnet['subnetpool_id'])
-        prefixlen = self._subnetpool_data['min_prefixlen']
-        self.assertTrue(cidr.endswith(str(prefixlen)))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('a64af292-ec52-4bde-b654-a6984acaf477')
-    def test_create_subnet_from_pool_with_quota(self):
-        pool_values = {'default_quota': 4}
-        subnet_values = {"prefixlen": self.max_prefixlen}
-        pool_id, subnet = self._create_subnet_from_pool(
-            subnet_values=subnet_values, pool_values=pool_values)
-        cidr = str(subnet['cidr'])
-        self.assertEqual(pool_id, subnet['subnetpool_id'])
-        self.assertTrue(cidr.endswith(str(self.max_prefixlen)))
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('49b44c64-1619-4b29-b527-ffc3c3115dc4')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_create_subnetpool_associate_address_scope(self):
-        address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'),
-            ip_version=self._ip_version)
-        created_subnetpool = self._create_subnetpool(
-            address_scope_id=address_scope['id'])
-        body = self.client.show_subnetpool(created_subnetpool['id'])
-        self.assertEqual(address_scope['id'],
-                         body['subnetpool']['address_scope_id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('910b6393-db24-4f6f-87dc-b36892ad6c8c')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_update_subnetpool_associate_address_scope(self):
-        address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'),
-            ip_version=self._ip_version)
-        created_subnetpool = self._create_subnetpool()
-        pool_id = created_subnetpool['id']
-        body = self.client.show_subnetpool(pool_id)
-        self.assertIsNone(body['subnetpool']['address_scope_id'])
-        self.client.update_subnetpool(pool_id,
-                                      address_scope_id=address_scope['id'])
-        body = self.client.show_subnetpool(pool_id)
-        self.assertEqual(address_scope['id'],
-                         body['subnetpool']['address_scope_id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('18302e80-46a3-4563-82ac-ccd1dd57f652')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_update_subnetpool_associate_another_address_scope(self):
-        address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'),
-            ip_version=self._ip_version)
-        another_address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'),
-            ip_version=self._ip_version)
-        created_subnetpool = self._create_subnetpool(
-            address_scope_id=address_scope['id'])
-        pool_id = created_subnetpool['id']
-        body = self.client.show_subnetpool(pool_id)
-        self.assertEqual(address_scope['id'],
-                         body['subnetpool']['address_scope_id'])
-        self.client.update_subnetpool(
-            pool_id, address_scope_id=another_address_scope['id'])
-        body = self.client.show_subnetpool(pool_id)
-        self.assertEqual(another_address_scope['id'],
-                         body['subnetpool']['address_scope_id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('f8970048-e41b-42d6-934b-a1297b07706a')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_update_subnetpool_disassociate_address_scope(self):
-        address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'),
-            ip_version=self._ip_version)
-        created_subnetpool = self._create_subnetpool(
-            address_scope_id=address_scope['id'])
-        pool_id = created_subnetpool['id']
-        body = self.client.show_subnetpool(pool_id)
-        self.assertEqual(address_scope['id'],
-                         body['subnetpool']['address_scope_id'])
-        self.client.update_subnetpool(pool_id,
-                                      address_scope_id=None)
-        body = self.client.show_subnetpool(pool_id)
-        self.assertIsNone(body['subnetpool']['address_scope_id'])
-
-
-class SubnetPoolsTestV6(SubnetPoolsTest):
-
-    min_prefixlen = '48'
-    max_prefixlen = '64'
-    _ip_version = 6
-    subnet_cidr = '2001:db8:3::/64'
-    new_prefix = u'2001:db8:5::/64'
-    larger_prefix = u'2001:db8::/32'
-
-    @classmethod
-    def resource_setup(cls):
-        super(SubnetPoolsTestV6, cls).resource_setup()
-        min_prefixlen = '64'
-        prefixes = [u'2001:db8:3::/48']
-        cls._subnetpool_data = {'min_prefixlen': min_prefixlen,
-                                'prefixes': prefixes}
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('f62d73dc-cf6f-4879-b94b-dab53982bf3b')
-    def test_create_dual_stack_subnets_from_subnetpools(self):
-        pool_id_v6, subnet_v6 = self._create_subnet_from_pool()
-        pool_values_v4 = {'prefixes': ['192.168.0.0/16'],
-                          'min_prefixlen': 21,
-                          'max_prefixlen': 32}
-        create_v4_subnetpool = self._create_subnetpool(**pool_values_v4)
-        pool_id_v4 = create_v4_subnetpool['id']
-        subnet_v4 = self.client.create_subnet(
-            network_id=subnet_v6['network_id'], ip_version=4,
-            subnetpool_id=pool_id_v4)['subnet']
-        self.assertEqual(subnet_v4['network_id'], subnet_v6['network_id'])
diff --git a/neutron/tests/api/test_subnetpools_negative.py b/neutron/tests/api/test_subnetpools_negative.py
deleted file mode 100644 (file)
index 9d94086..0000000
+++ /dev/null
@@ -1,269 +0,0 @@
-# Copyright 2015 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-import uuid
-
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import test_subnetpools
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-SUBNETPOOL_NAME = 'smoke-subnetpool'
-
-
-class SubnetPoolsNegativeTestJSON(test_subnetpools.SubnetPoolsTestBase):
-
-    smaller_prefix = u'10.11.12.0/26'
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('0212a042-603a-4f46-99e0-e37de9374d30')
-    def test_get_non_existent_subnetpool(self):
-        non_exist_id = data_utils.rand_name('subnetpool')
-        self.assertRaises(lib_exc.NotFound, self.client.show_subnetpool,
-                          non_exist_id)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('dc9336e5-f28f-4658-a0b0-cc79e607007d')
-    def test_tenant_get_not_shared_admin_subnetpool(self):
-        created_subnetpool = self._create_subnetpool(is_admin=True)
-        # None-shared admin subnetpool cannot be retrieved by tenant user.
-        self.assertRaises(lib_exc.NotFound, self.client.show_subnetpool,
-                          created_subnetpool['id'])
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('5e1f2f86-d81a-498c-82ed-32a49f4dc4d3')
-    def test_delete_non_existent_subnetpool(self):
-        non_exist_id = data_utils.rand_name('subnetpool')
-        self.assertRaises(lib_exc.NotFound, self.client.delete_subnetpool,
-                          non_exist_id)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('d1143fe2-212b-4e23-a308-d18f7d8d78d6')
-    def test_tenant_create_shared_subnetpool(self):
-        # 'shared' subnetpool can only be created by admin.
-        self.assertRaises(lib_exc.Forbidden, self._create_subnetpool,
-                          is_admin=False, shared=True)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('4be84d30-60ca-4bd3-8512-db5b36ce1378')
-    def test_update_non_existent_subnetpool(self):
-        non_exist_id = data_utils.rand_name('subnetpool')
-        self.assertRaises(lib_exc.NotFound, self.client.update_subnetpool,
-                          non_exist_id, name='foo-name')
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('e6cd6d87-6173-45dd-bf04-c18ea7ec7537')
-    def test_update_subnetpool_not_modifiable_shared(self):
-        # 'shared' attributes can be specified during creation.
-        # But this attribute is not modifiable after creation.
-        created_subnetpool = self._create_subnetpool(is_admin=True)
-        pool_id = created_subnetpool['id']
-        self.assertRaises(lib_exc.BadRequest, self.client.update_subnetpool,
-                          pool_id, shared=True)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('62f7c43b-bff1-4def-8bb7-4754b840aaad')
-    def test_update_subnetpool_prefixes_shrink(self):
-        # Shrink current subnetpool prefixes is not supported
-        created_subnetpool = self._create_subnetpool()
-        self.assertRaises(lib_exc.BadRequest,
-                          self.client.update_subnetpool,
-                          created_subnetpool['id'],
-                          prefixes=[self.smaller_prefix])
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('fc011824-153e-4469-97ad-9808eb88cae1')
-    def test_create_subnet_different_pools_same_network(self):
-        network = self.create_network(network_name='smoke-network')
-        created_subnetpool = self._create_subnetpool(
-            is_admin=True, prefixes=['192.168.0.0/16'])
-        subnet = self.create_subnet(
-            network, cidr=netaddr.IPNetwork('10.10.10.0/24'), ip_version=4,
-            gateway=None, client=self.admin_client)
-        # add the subnet created by admin to the cleanUp because the
-        # the base.py doesn't delete it using the admin client
-        self.addCleanup(self.admin_client.delete_subnet, subnet['id'])
-        self.assertRaises(lib_exc.BadRequest, self.create_subnet, network,
-                          ip_version=4,
-                          subnetpool_id=created_subnetpool['id'],
-                          client=self.admin_client)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('9589e332-638e-476e-81bd-013d964aa3cb')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_create_subnetpool_associate_invalid_address_scope(self):
-        self.assertRaises(lib_exc.BadRequest, self._create_subnetpool,
-                          address_scope_id='foo-addr-scope')
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('3b6c5942-485d-4964-a560-55608af020b5')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_create_subnetpool_associate_non_exist_address_scope(self):
-        self.assertRaises(lib_exc.NotFound, self._create_subnetpool,
-                          address_scope_id=str(uuid.uuid4()))
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('2dfb4269-8657-485a-a053-b022e911456e')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_create_subnetpool_associate_address_scope_prefix_intersect(self):
-        address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'),
-            ip_version=4)
-        addr_scope_id = address_scope['id']
-        self._create_subnetpool(address_scope_id=addr_scope_id)
-        subnetpool_data = {'name': 'foo-subnetpool',
-                           'prefixes': [u'10.11.12.13/24'],
-                           'min_prefixlen': '29',
-                           'address_scope_id': addr_scope_id}
-        self.assertRaises(lib_exc.Conflict, self._create_subnetpool,
-                          **subnetpool_data)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('83a19a13-5384-42e2-b579-43fc69c80914')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_create_sp_associate_address_scope_multiple_prefix_intersect(self):
-        address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'),
-            ip_version=4)
-        addr_scope_id = address_scope['id']
-        self._create_subnetpool(prefixes=[u'20.0.0.0/18', u'30.0.0.0/18'],
-                                address_scope_id=addr_scope_id)
-        prefixes = [u'40.0.0.0/18', u'50.0.0.0/18', u'30.0.0.0/12']
-        subnetpool_data = {'name': 'foo-subnetpool',
-                           'prefixes': prefixes,
-                           'min_prefixlen': '29',
-                           'address_scope_id': addr_scope_id}
-        self.assertRaises(lib_exc.Conflict, self._create_subnetpool,
-                          **subnetpool_data)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('f06d8e7b-908b-4e94-b570-8156be6a4bf1')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_create_subnetpool_associate_address_scope_of_other_owner(self):
-        address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'), is_admin=True,
-            ip_version=4)
-        self.assertRaises(lib_exc.NotFound, self._create_subnetpool,
-                          address_scope_id=address_scope['id'])
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('3396ec6c-cb80-4ebe-b897-84e904580bdf')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_tenant_create_subnetpool_associate_shared_address_scope(self):
-        address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'), is_admin=True,
-            shared=True, ip_version=4)
-        self.assertRaises(lib_exc.BadRequest, self._create_subnetpool,
-                          address_scope_id=address_scope['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('6d3d9ad5-32d4-4d63-aa00-8c62f73e2881')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_update_subnetpool_associate_address_scope_of_other_owner(self):
-        address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'), is_admin=True,
-            ip_version=4)
-        address_scope_id = address_scope['id']
-        created_subbnetpool = self._create_subnetpool(self.client)
-        self.assertRaises(lib_exc.NotFound, self.client.update_subnetpool,
-                          created_subbnetpool['id'],
-                          address_scope_id=address_scope_id)
-
-    def _test_update_subnetpool_prefix_intersect_helper(
-            self, pool_1_prefixes, pool_2_prefixes, pool_1_updated_prefixes):
-        # create two subnet pools associating  to an address scope.
-        # Updating the first subnet pool with the prefix intersecting
-        # with the second one should be a failure
-        address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'), ip_version=4)
-        addr_scope_id = address_scope['id']
-        pool_values = {'address_scope_id': addr_scope_id,
-                       'prefixes': pool_1_prefixes}
-        created_subnetpool_1 = self._create_subnetpool(**pool_values)
-        pool_id_1 = created_subnetpool_1['id']
-        pool_values = {'address_scope_id': addr_scope_id,
-                       'prefixes': pool_2_prefixes}
-        self._create_subnetpool(**pool_values)
-        # now update the pool_id_1 with the prefix intersecting with
-        # pool_id_2
-        self.assertRaises(lib_exc.Conflict, self.client.update_subnetpool,
-                          pool_id_1, prefixes=pool_1_updated_prefixes)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('96006292-7214-40e0-a471-153fb76e6b31')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_update_subnetpool_prefix_intersect(self):
-        pool_1_prefix = [u'20.0.0.0/18']
-        pool_2_prefix = [u'20.10.0.0/24']
-        pool_1_updated_prefix = [u'20.0.0.0/12']
-        self._test_update_subnetpool_prefix_intersect_helper(
-            pool_1_prefix, pool_2_prefix, pool_1_updated_prefix)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('4d3f8a79-c530-4e59-9acf-6c05968adbfe')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_update_subnetpool_multiple_prefix_intersect(self):
-        pool_1_prefixes = [u'20.0.0.0/18', u'30.0.0.0/18']
-        pool_2_prefixes = [u'20.10.0.0/24', u'40.0.0.0/18', '50.0.0.0/18']
-        pool_1_updated_prefixes = [u'20.0.0.0/18', u'30.0.0.0/18',
-                                   u'50.0.0.0/12']
-        self._test_update_subnetpool_prefix_intersect_helper(
-            pool_1_prefixes, pool_2_prefixes, pool_1_updated_prefixes)
-
-    @test.attr(type=['negative', 'smoke'])
-    @test.idempotent_id('7438e49e-1351-45d8-937b-892059fb97f5')
-    @test.requires_ext(extension='address-scope', service='network')
-    def test_tenant_update_sp_prefix_associated_with_shared_addr_scope(self):
-        address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'), is_admin=True,
-            shared=True, ip_version=4)
-        addr_scope_id = address_scope['id']
-        pool_values = {'prefixes': [u'20.0.0.0/18', u'30.0.0.0/18']}
-
-        created_subnetpool = self._create_subnetpool(**pool_values)
-        pool_id = created_subnetpool['id']
-        # associate the subnetpool to the address scope as an admin
-        self.admin_client.update_subnetpool(pool_id,
-                                            address_scope_id=addr_scope_id)
-        body = self.admin_client.show_subnetpool(pool_id)
-        self.assertEqual(addr_scope_id,
-                         body['subnetpool']['address_scope_id'])
-
-        # updating the subnetpool prefix by the tenant user should fail
-        # since the tenant is not the owner of address scope
-        update_prefixes = [u'20.0.0.0/18', u'30.0.0.0/18', u'40.0.0.0/18']
-        self.assertRaises(lib_exc.BadRequest, self.client.update_subnetpool,
-                          pool_id, prefixes=update_prefixes)
-
-        # admin can update the prefixes
-        self.admin_client.update_subnetpool(pool_id, prefixes=update_prefixes)
-        body = self.admin_client.show_subnetpool(pool_id)
-        self.assertEqual(update_prefixes,
-                         body['subnetpool']['prefixes'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('648fee7d-a909-4ced-bad3-3a169444c0a8')
-    def test_update_subnetpool_associate_address_scope_wrong_ip_version(self):
-        address_scope = self.create_address_scope(
-            name=data_utils.rand_name('smoke-address-scope'),
-            ip_version=6)
-        created_subnetpool = self._create_subnetpool()
-        self.assertRaises(lib_exc.BadRequest, self.client.update_subnetpool,
-                          created_subnetpool['id'],
-                          address_scope_id=address_scope['id'])
diff --git a/neutron/tests/api/test_vpnaas_extensions.py b/neutron/tests/api/test_vpnaas_extensions.py
deleted file mode 100644 (file)
index e645454..0000000
+++ /dev/null
@@ -1,328 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import six
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import base
-from neutron.tests.tempest import config
-from neutron.tests.tempest import test
-
-CONF = config.CONF
-
-
-class VPNaaSTestJSON(base.BaseAdminNetworkTest):
-
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
-        List, Show, Create, Delete, and Update VPN Service
-        List, Show, Create, Delete, and Update IKE policy
-        List, Show, Create, Delete, and Update IPSec policy
-    """
-
-    @classmethod
-    def resource_setup(cls):
-        if not test.is_extension_enabled('vpnaas', 'network'):
-            msg = "vpnaas extension not enabled."
-            raise cls.skipException(msg)
-        super(VPNaaSTestJSON, cls).resource_setup()
-        cls.ext_net_id = CONF.network.public_network_id
-        cls.network = cls.create_network()
-        cls.subnet = cls.create_subnet(cls.network)
-        cls.router = cls.create_router(
-            data_utils.rand_name("router"),
-            external_network_id=CONF.network.public_network_id)
-        cls.create_router_interface(cls.router['id'], cls.subnet['id'])
-        cls.vpnservice = cls.create_vpnservice(cls.subnet['id'],
-                                               cls.router['id'])
-
-        cls.ikepolicy = cls.create_ikepolicy(
-            data_utils.rand_name("ike-policy-"))
-        cls.ipsecpolicy = cls.create_ipsecpolicy(
-            data_utils.rand_name("ipsec-policy-"))
-
-    def _delete_ike_policy(self, ike_policy_id):
-        # Deletes a ike policy and verifies if it is deleted or not
-        ike_list = list()
-        all_ike = self.client.list_ikepolicies()
-        for ike in all_ike['ikepolicies']:
-            ike_list.append(ike['id'])
-        if ike_policy_id in ike_list:
-            self.client.delete_ikepolicy(ike_policy_id)
-            # Asserting that the policy is not found in list after deletion
-            ikepolicies = self.client.list_ikepolicies()
-            ike_id_list = list()
-            for i in ikepolicies['ikepolicies']:
-                ike_id_list.append(i['id'])
-            self.assertNotIn(ike_policy_id, ike_id_list)
-
-    def _delete_ipsec_policy(self, ipsec_policy_id):
-        # Deletes an ike policy if it exists
-        try:
-            self.client.delete_ipsecpolicy(ipsec_policy_id)
-
-        except lib_exc.NotFound:
-            pass
-
-    def _assertExpected(self, expected, actual):
-        # Check if not expected keys/values exists in actual response body
-        for key, value in six.iteritems(expected):
-            self.assertIn(key, actual)
-            self.assertEqual(value, actual[key])
-
-    def _delete_vpn_service(self, vpn_service_id):
-        self.client.delete_vpnservice(vpn_service_id)
-        # Asserting if vpn service is found in the list after deletion
-        body = self.client.list_vpnservices()
-        vpn_services = [vs['id'] for vs in body['vpnservices']]
-        self.assertNotIn(vpn_service_id, vpn_services)
-
-    def _get_tenant_id(self):
-        """
-        Returns the tenant_id of the client current user
-        """
-        # TODO(jroovers) This is a temporary workaround to get the tenant_id
-        # of the current client. Replace this once tenant_isolation for
-        # neutron is fixed.
-        body = self.client.show_network(self.network['id'])
-        return body['network']['tenant_id']
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('14311574-0737-4e53-ac05-f7ae27742eed')
-    def test_admin_create_ipsec_policy_for_tenant(self):
-        tenant_id = self._get_tenant_id()
-        # Create IPSec policy for the newly created tenant
-        name = data_utils.rand_name('ipsec-policy')
-        body = (self.admin_client.
-                create_ipsecpolicy(name=name, tenant_id=tenant_id))
-        ipsecpolicy = body['ipsecpolicy']
-        self.assertIsNotNone(ipsecpolicy['id'])
-        self.addCleanup(self.admin_client.delete_ipsecpolicy,
-                        ipsecpolicy['id'])
-
-        # Assert that created ipsec policy is found in API list call
-        body = self.client.list_ipsecpolicies()
-        ipsecpolicies = [policy['id'] for policy in body['ipsecpolicies']]
-        self.assertIn(ipsecpolicy['id'], ipsecpolicies)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('b62acdc6-0c53-4d84-84aa-859b22b79799')
-    def test_admin_create_vpn_service_for_tenant(self):
-        tenant_id = self._get_tenant_id()
-
-        # Create vpn service for the newly created tenant
-        network2 = self.create_network()
-        subnet2 = self.create_subnet(network2)
-        router2 = self.create_router(data_utils.rand_name('router-'),
-                                     external_network_id=self.ext_net_id)
-        self.create_router_interface(router2['id'], subnet2['id'])
-        name = data_utils.rand_name('vpn-service')
-        body = self.admin_client.create_vpnservice(
-            subnet_id=subnet2['id'],
-            router_id=router2['id'],
-            name=name,
-            admin_state_up=True,
-            tenant_id=tenant_id)
-        vpnservice = body['vpnservice']
-        self.assertIsNotNone(vpnservice['id'])
-        self.addCleanup(self.admin_client.delete_vpnservice, vpnservice['id'])
-        # Assert that created vpnservice is found in API list call
-        body = self.client.list_vpnservices()
-        vpn_services = [vs['id'] for vs in body['vpnservices']]
-        self.assertIn(vpnservice['id'], vpn_services)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('58cc4a1c-443b-4f39-8fb6-c19d39f343ab')
-    def test_admin_create_ike_policy_for_tenant(self):
-        tenant_id = self._get_tenant_id()
-
-        # Create IKE policy for the newly created tenant
-        name = data_utils.rand_name('ike-policy')
-        body = (self.admin_client.
-                create_ikepolicy(name=name, ike_version="v1",
-                                 encryption_algorithm="aes-128",
-                                 auth_algorithm="sha1",
-                                 tenant_id=tenant_id))
-        ikepolicy = body['ikepolicy']
-        self.assertIsNotNone(ikepolicy['id'])
-        self.addCleanup(self.admin_client.delete_ikepolicy, ikepolicy['id'])
-
-        # Assert that created ike policy is found in API list call
-        body = self.client.list_ikepolicies()
-        ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
-        self.assertIn(ikepolicy['id'], ikepolicies)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('de5bb04c-3a1f-46b1-b329-7a8abba5c7f1')
-    def test_list_vpn_services(self):
-        # Verify the VPN service exists in the list of all VPN services
-        body = self.client.list_vpnservices()
-        vpnservices = body['vpnservices']
-        self.assertIn(self.vpnservice['id'], [v['id'] for v in vpnservices])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('aacb13b1-fdc7-41fd-bab2-32621aee1878')
-    def test_create_update_delete_vpn_service(self):
-        # Creates a VPN service and sets up deletion
-        network1 = self.create_network()
-        subnet1 = self.create_subnet(network1)
-        router1 = self.create_router(data_utils.rand_name('router-'),
-                                     external_network_id=self.ext_net_id)
-        self.create_router_interface(router1['id'], subnet1['id'])
-        name = data_utils.rand_name('vpn-service1')
-        body = self.client.create_vpnservice(subnet_id=subnet1['id'],
-                                             router_id=router1['id'],
-                                             name=name,
-                                             admin_state_up=True)
-        vpnservice = body['vpnservice']
-        self.addCleanup(self._delete_vpn_service, vpnservice['id'])
-        # Assert if created vpnservices are not found in vpnservices list
-        body = self.client.list_vpnservices()
-        vpn_services = [vs['id'] for vs in body['vpnservices']]
-        self.assertIsNotNone(vpnservice['id'])
-        self.assertIn(vpnservice['id'], vpn_services)
-
-        # TODO(raies): implement logic to update  vpnservice
-        # VPNaaS client function to update is implemented.
-        # But precondition is that current state of vpnservice
-        # should be "ACTIVE" not "PENDING*"
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('0dedfc1d-f8ee-4e2a-bfd4-7997b9dc17ff')
-    def test_show_vpn_service(self):
-        # Verifies the details of a vpn service
-        body = self.client.show_vpnservice(self.vpnservice['id'])
-        vpnservice = body['vpnservice']
-        self.assertEqual(self.vpnservice['id'], vpnservice['id'])
-        self.assertEqual(self.vpnservice['name'], vpnservice['name'])
-        self.assertEqual(self.vpnservice['description'],
-                         vpnservice['description'])
-        self.assertEqual(self.vpnservice['router_id'], vpnservice['router_id'])
-        self.assertEqual(self.vpnservice['subnet_id'], vpnservice['subnet_id'])
-        self.assertEqual(self.vpnservice['tenant_id'], vpnservice['tenant_id'])
-        valid_status = ["ACTIVE", "DOWN", "BUILD", "ERROR", "PENDING_CREATE",
-                        "PENDING_UPDATE", "PENDING_DELETE"]
-        self.assertIn(vpnservice['status'], valid_status)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('e0fb6200-da3d-4869-8340-a8c1956ca618')
-    def test_list_ike_policies(self):
-        # Verify the ike policy exists in the list of all IKE policies
-        body = self.client.list_ikepolicies()
-        ikepolicies = body['ikepolicies']
-        self.assertIn(self.ikepolicy['id'], [i['id'] for i in ikepolicies])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('d61f29a5-160c-487d-bc0d-22e32e731b44')
-    def test_create_update_delete_ike_policy(self):
-        # Creates a IKE policy
-        name = data_utils.rand_name('ike-policy')
-        body = (self.client.create_ikepolicy(
-                name=name,
-                ike_version="v1",
-                encryption_algorithm="aes-128",
-                auth_algorithm="sha1"))
-        ikepolicy = body['ikepolicy']
-        self.assertIsNotNone(ikepolicy['id'])
-        self.addCleanup(self._delete_ike_policy, ikepolicy['id'])
-
-        # Update IKE Policy
-        new_ike = {'name': data_utils.rand_name("New-IKE"),
-                   'description': "Updated ike policy",
-                   'encryption_algorithm': "aes-256",
-                   'ike_version': "v2",
-                   'pfs': "group14",
-                   'lifetime': {'units': "seconds", 'value': 2000}}
-        self.client.update_ikepolicy(ikepolicy['id'], **new_ike)
-        # Confirm that update was successful by verifying using 'show'
-        body = self.client.show_ikepolicy(ikepolicy['id'])
-        ike_policy = body['ikepolicy']
-        for key, value in six.iteritems(new_ike):
-            self.assertIn(key, ike_policy)
-            self.assertEqual(value, ike_policy[key])
-
-        # Verification of ike policy delete
-        self.client.delete_ikepolicy(ikepolicy['id'])
-        body = self.client.list_ikepolicies()
-        ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
-        self.assertNotIn(ike_policy['id'], ikepolicies)
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('b5fcf3a3-9407-452d-b8a8-e7c6c32baea8')
-    def test_show_ike_policy(self):
-        # Verifies the details of a ike policy
-        body = self.client.show_ikepolicy(self.ikepolicy['id'])
-        ikepolicy = body['ikepolicy']
-        self.assertEqual(self.ikepolicy['id'], ikepolicy['id'])
-        self.assertEqual(self.ikepolicy['name'], ikepolicy['name'])
-        self.assertEqual(self.ikepolicy['description'],
-                         ikepolicy['description'])
-        self.assertEqual(self.ikepolicy['encryption_algorithm'],
-                         ikepolicy['encryption_algorithm'])
-        self.assertEqual(self.ikepolicy['auth_algorithm'],
-                         ikepolicy['auth_algorithm'])
-        self.assertEqual(self.ikepolicy['tenant_id'],
-                         ikepolicy['tenant_id'])
-        self.assertEqual(self.ikepolicy['pfs'],
-                         ikepolicy['pfs'])
-        self.assertEqual(self.ikepolicy['phase1_negotiation_mode'],
-                         ikepolicy['phase1_negotiation_mode'])
-        self.assertEqual(self.ikepolicy['ike_version'],
-                         ikepolicy['ike_version'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('19ea0a2f-add9-44be-b732-ffd8a7b42f37')
-    def test_list_ipsec_policies(self):
-        # Verify the ipsec policy exists in the list of all ipsec policies
-        body = self.client.list_ipsecpolicies()
-        ipsecpolicies = body['ipsecpolicies']
-        self.assertIn(self.ipsecpolicy['id'], [i['id'] for i in ipsecpolicies])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('9c1701c9-329a-4e5d-930a-1ead1b3f86ad')
-    def test_create_update_delete_ipsec_policy(self):
-        # Creates an ipsec policy
-        ipsec_policy_body = {'name': data_utils.rand_name('ipsec-policy'),
-                             'pfs': 'group5',
-                             'encryption_algorithm': "aes-128",
-                             'auth_algorithm': 'sha1'}
-        resp_body = self.client.create_ipsecpolicy(**ipsec_policy_body)
-        ipsecpolicy = resp_body['ipsecpolicy']
-        self.addCleanup(self._delete_ipsec_policy, ipsecpolicy['id'])
-        self._assertExpected(ipsec_policy_body, ipsecpolicy)
-        # Verification of ipsec policy update
-        new_ipsec = {'description': 'Updated ipsec policy',
-                     'pfs': 'group2',
-                     'name': data_utils.rand_name("New-IPSec"),
-                     'encryption_algorithm': "aes-256",
-                     'lifetime': {'units': "seconds", 'value': '2000'}}
-        body = self.client.update_ipsecpolicy(ipsecpolicy['id'],
-                                              **new_ipsec)
-        updated_ipsec_policy = body['ipsecpolicy']
-        self._assertExpected(new_ipsec, updated_ipsec_policy)
-        # Verification of ipsec policy delete
-        self.client.delete_ipsecpolicy(ipsecpolicy['id'])
-        self.assertRaises(lib_exc.NotFound,
-                          self.client.delete_ipsecpolicy, ipsecpolicy['id'])
-
-    @test.attr(type='smoke')
-    @test.idempotent_id('601f8a05-9d3c-4539-a400-1c4b3a21b03b')
-    def test_show_ipsec_policy(self):
-        # Verifies the details of an ipsec policy
-        body = self.client.show_ipsecpolicy(self.ipsecpolicy['id'])
-        ipsecpolicy = body['ipsecpolicy']
-        self._assertExpected(self.ipsecpolicy, ipsecpolicy)
diff --git a/neutron/tests/base.py b/neutron/tests/base.py
deleted file mode 100644 (file)
index 6fe73f6..0000000
+++ /dev/null
@@ -1,407 +0,0 @@
-# Copyright 2010-2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Base test cases for all neutron tests.
-"""
-
-import contextlib
-import gc
-import os
-import os.path
-import random
-import weakref
-
-import eventlet.timeout
-import fixtures
-import mock
-from oslo_concurrency.fixture import lockutils
-from oslo_config import cfg
-from oslo_messaging import conffixture as messaging_conffixture
-from oslo_utils import strutils
-from oslotest import base
-import six
-
-from neutron._i18n import _
-from neutron.agent.linux import external_process
-from neutron.api.rpc.callbacks.consumer import registry as rpc_consumer_reg
-from neutron.callbacks import manager as registry_manager
-from neutron.callbacks import registry
-from neutron.common import config
-from neutron.common import constants
-from neutron.common import rpc as n_rpc
-from neutron.db import agentschedulers_db
-from neutron import manager
-from neutron import policy
-from neutron.tests import fake_notifier
-from neutron.tests import post_mortem_debug
-from neutron.tests import tools
-
-
-CONF = cfg.CONF
-CONF.import_opt('state_path', 'neutron.common.config')
-
-ROOTDIR = os.path.dirname(__file__)
-ETCDIR = os.path.join(ROOTDIR, 'etc')
-
-
-def etcdir(*p):
-    return os.path.join(ETCDIR, *p)
-
-
-def fake_use_fatal_exceptions(*args):
-    return True
-
-
-def fake_consume_in_threads(self):
-    return []
-
-
-def get_rand_name(max_length=None, prefix='test'):
-    """Return a random string.
-
-    The string will start with 'prefix' and will be exactly 'max_length'.
-    If 'max_length' is None, then exactly 8 random characters, each
-    hexadecimal, will be added. In case len(prefix) <= len(max_length),
-    ValueError will be raised to indicate the problem.
-    """
-
-    if max_length:
-        length = max_length - len(prefix)
-        if length <= 0:
-            raise ValueError("'max_length' must be bigger than 'len(prefix)'.")
-
-        suffix = ''.join(str(random.randint(0, 9)) for i in range(length))
-    else:
-        suffix = hex(random.randint(0x10000000, 0x7fffffff))[2:]
-    return prefix + suffix
-
-
-def get_rand_device_name(prefix='test'):
-    return get_rand_name(
-        max_length=constants.DEVICE_NAME_MAX_LEN, prefix=prefix)
-
-
-def bool_from_env(key, strict=False, default=False):
-    value = os.environ.get(key)
-    return strutils.bool_from_string(value, strict=strict, default=default)
-
-
-def sanitize_log_path(path):
-    # Sanitize the string so that its log path is shell friendly
-    return path.replace(' ', '-').replace('(', '_').replace(')', '_')
-
-
-class AttributeDict(dict):
-
-    """
-    Provide attribute access (dict.key) to dictionary values.
-    """
-
-    def __getattr__(self, name):
-        """Allow attribute access for all keys in the dict."""
-        if name in self:
-            return self[name]
-        raise AttributeError(_("Unknown attribute '%s'.") % name)
-
-
-class DietTestCase(base.BaseTestCase):
-    """Same great taste, less filling.
-
-    BaseTestCase is responsible for doing lots of plugin-centric setup
-    that not all tests require (or can tolerate).  This class provides
-    only functionality that is common across all tests.
-    """
-
-    def setUp(self):
-        super(DietTestCase, self).setUp()
-
-        # FIXME(amuller): this must be called in the Neutron unit tests base
-        # class to initialize the DB connection string. Moving this may cause
-        # non-deterministic failures. Bug #1489098 for more info.
-        config.set_db_defaults()
-
-        # Configure this first to ensure pm debugging support for setUp()
-        debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
-        if debugger:
-            self.addOnException(post_mortem_debug.get_exception_handler(
-                debugger))
-
-        # Make sure we see all relevant deprecation warnings when running tests
-        self.useFixture(tools.WarningsFixture())
-
-        # NOTE(ihrachys): oslotest already sets stopall for cleanup, but it
-        # does it using six.moves.mock (the library was moved into
-        # unittest.mock in Python 3.4). So until we switch to six.moves.mock
-        # everywhere in unit tests, we can't remove this setup. The base class
-        # is used in 3party projects, so we would need to switch all of them to
-        # six before removing the cleanup callback from here.
-        self.addCleanup(mock.patch.stopall)
-
-        self.addOnException(self.check_for_systemexit)
-        self.orig_pid = os.getpid()
-
-    def check_for_systemexit(self, exc_info):
-        if isinstance(exc_info[1], SystemExit):
-            if os.getpid() != self.orig_pid:
-                # Subprocess - let it just exit
-                raise
-            # This makes sys.exit(0) still a failure
-            self.force_failure = True
-
-    @contextlib.contextmanager
-    def assert_max_execution_time(self, max_execution_time=5):
-        with eventlet.timeout.Timeout(max_execution_time, False):
-            yield
-            return
-        self.fail('Execution of this test timed out')
-
-    def assertOrderedEqual(self, expected, actual):
-        expect_val = self.sort_dict_lists(expected)
-        actual_val = self.sort_dict_lists(actual)
-        self.assertEqual(expect_val, actual_val)
-
-    def sort_dict_lists(self, dic):
-        for key, value in six.iteritems(dic):
-            if isinstance(value, list):
-                dic[key] = sorted(value)
-            elif isinstance(value, dict):
-                dic[key] = self.sort_dict_lists(value)
-        return dic
-
-    def assertDictSupersetOf(self, expected_subset, actual_superset):
-        """Checks that actual dict contains the expected dict.
-
-        After checking that the arguments are of the right type, this checks
-        that each item in expected_subset is in, and matches, what is in
-        actual_superset. Separate tests are done, so that detailed info can
-        be reported upon failure.
-        """
-        if not isinstance(expected_subset, dict):
-            self.fail("expected_subset (%s) is not an instance of dict" %
-                      type(expected_subset))
-        if not isinstance(actual_superset, dict):
-            self.fail("actual_superset (%s) is not an instance of dict" %
-                      type(actual_superset))
-        for k, v in expected_subset.items():
-            self.assertIn(k, actual_superset)
-            self.assertEqual(v, actual_superset[k],
-                             "Key %(key)s expected: %(exp)r, actual %(act)r" %
-                             {'key': k, 'exp': v, 'act': actual_superset[k]})
-
-
-class ProcessMonitorFixture(fixtures.Fixture):
-    """Test fixture to capture and cleanup any spawn process monitor."""
-
-    def _setUp(self):
-        self.old_callable = (
-            external_process.ProcessMonitor._spawn_checking_thread)
-        p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor."
-                       "_spawn_checking_thread",
-                       new=lambda x: self.record_calls(x))
-        p.start()
-        self.instances = []
-        self.addCleanup(self.stop)
-
-    def stop(self):
-        for instance in self.instances:
-            instance.stop()
-
-    def record_calls(self, instance):
-        self.old_callable(instance)
-        self.instances.append(instance)
-
-
-class BaseTestCase(DietTestCase):
-
-    @staticmethod
-    def config_parse(conf=None, args=None):
-        """Create the default configurations."""
-        # neutron.conf includes rpc_backend which needs to be cleaned up
-        if args is None:
-            args = []
-        args += ['--config-file', etcdir('neutron.conf')]
-        if conf is None:
-            config.init(args=args)
-        else:
-            conf(args)
-
-    def setUp(self):
-        super(BaseTestCase, self).setUp()
-
-        self.useFixture(lockutils.ExternalLockFixture())
-
-        cfg.CONF.set_override('state_path', self.get_default_temp_dir().path)
-
-        self.addCleanup(CONF.reset)
-        self.useFixture(ProcessMonitorFixture())
-
-        self.useFixture(fixtures.MonkeyPatch(
-            'neutron.common.exceptions.NeutronException.use_fatal_exceptions',
-            fake_use_fatal_exceptions))
-
-        self.useFixture(fixtures.MonkeyPatch(
-            'oslo_config.cfg.find_config_files',
-            lambda project=None, prog=None, extension=None: []))
-
-        self.setup_rpc_mocks()
-        self.setup_config()
-        self.setup_test_registry_instance()
-
-        policy.init()
-        self.addCleanup(policy.reset)
-        self.addCleanup(rpc_consumer_reg.clear)
-
-    def get_new_temp_dir(self):
-        """Create a new temporary directory.
-
-        :returns fixtures.TempDir
-        """
-        return self.useFixture(fixtures.TempDir())
-
-    def get_default_temp_dir(self):
-        """Create a default temporary directory.
-
-        Returns the same directory during the whole test case.
-
-        :returns fixtures.TempDir
-        """
-        if not hasattr(self, '_temp_dir'):
-            self._temp_dir = self.get_new_temp_dir()
-        return self._temp_dir
-
-    def get_temp_file_path(self, filename, root=None):
-        """Returns an absolute path for a temporary file.
-
-        If root is None, the file is created in default temporary directory. It
-        also creates the directory if it's not initialized yet.
-
-        If root is not None, the file is created inside the directory passed as
-        root= argument.
-
-        :param filename: filename
-        :type filename: string
-        :param root: temporary directory to create a new file in
-        :type root: fixtures.TempDir
-        :returns absolute file path string
-        """
-        root = root or self.get_default_temp_dir()
-        return root.join(filename)
-
-    def setup_rpc_mocks(self):
-        # don't actually start RPC listeners when testing
-        self.useFixture(fixtures.MonkeyPatch(
-            'neutron.common.rpc.Connection.consume_in_threads',
-            fake_consume_in_threads))
-
-        self.useFixture(fixtures.MonkeyPatch(
-            'oslo_messaging.Notifier', fake_notifier.FakeNotifier))
-
-        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
-        self.messaging_conf.transport_driver = 'fake'
-        # NOTE(russellb) We want all calls to return immediately.
-        self.messaging_conf.response_timeout = 0
-        self.useFixture(self.messaging_conf)
-
-        self.addCleanup(n_rpc.clear_extra_exmods)
-        n_rpc.add_extra_exmods('neutron.test')
-
-        self.addCleanup(n_rpc.cleanup)
-        n_rpc.init(CONF)
-
-    def setup_test_registry_instance(self):
-        """Give a private copy of the registry to each test."""
-        self._callback_manager = registry_manager.CallbacksManager()
-        mock.patch.object(registry, '_get_callback_manager',
-                          return_value=self._callback_manager).start()
-
-    def setup_config(self, args=None):
-        """Tests that need a non-default config can override this method."""
-        self.config_parse(args=args)
-
-    def config(self, **kw):
-        """Override some configuration values.
-
-        The keyword arguments are the names of configuration options to
-        override and their values.
-
-        If a group argument is supplied, the overrides are applied to
-        the specified configuration option group.
-
-        All overrides are automatically cleared at the end of the current
-        test by the fixtures cleanup process.
-        """
-        group = kw.pop('group', None)
-        for k, v in six.iteritems(kw):
-            CONF.set_override(k, v, group)
-
-    def setup_coreplugin(self, core_plugin=None):
-        cp = PluginFixture(core_plugin)
-        self.useFixture(cp)
-        self.patched_dhcp_periodic = cp.patched_dhcp_periodic
-
-    def setup_notification_driver(self, notification_driver=None):
-        self.addCleanup(fake_notifier.reset)
-        if notification_driver is None:
-            notification_driver = [fake_notifier.__name__]
-        cfg.CONF.set_override("notification_driver", notification_driver)
-
-
-class PluginFixture(fixtures.Fixture):
-
-    def __init__(self, core_plugin=None):
-        super(PluginFixture, self).__init__()
-        self.core_plugin = core_plugin
-
-    def _setUp(self):
-        self.dhcp_periodic_p = mock.patch(
-            'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
-            'start_periodic_dhcp_agent_status_check')
-        self.patched_dhcp_periodic = self.dhcp_periodic_p.start()
-        self.agent_health_check_p = mock.patch(
-            'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
-            'add_agent_status_check')
-        self.agent_health_check = self.agent_health_check_p.start()
-        # Plugin cleanup should be triggered last so that
-        # test-specific cleanup has a chance to release references.
-        self.addCleanup(self.cleanup_core_plugin)
-        if self.core_plugin is not None:
-            cfg.CONF.set_override('core_plugin', self.core_plugin)
-
-    def cleanup_core_plugin(self):
-        """Ensure that the core plugin is deallocated."""
-        nm = manager.NeutronManager
-        if not nm.has_instance():
-            return
-
-        # TODO(marun) Fix plugins that do not properly initialize notifiers
-        agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
-
-        # Perform a check for deallocation only if explicitly
-        # configured to do so since calling gc.collect() after every
-        # test increases test suite execution time by ~50%.
-        check_plugin_deallocation = (
-            bool_from_env('OS_CHECK_PLUGIN_DEALLOCATION'))
-        if check_plugin_deallocation:
-            plugin = weakref.ref(nm._instance.plugin)
-
-        nm.clear_instance()
-
-        if check_plugin_deallocation:
-            gc.collect()
-
-            # TODO(marun) Ensure that mocks are deallocated?
-            if plugin() and not isinstance(plugin(), mock.Base):
-                raise AssertionError(
-                    'The plugin for this test was not deallocated.')
diff --git a/neutron/tests/common/__init__.py b/neutron/tests/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/common/agents/__init__.py b/neutron/tests/common/agents/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/common/agents/l2_extensions.py b/neutron/tests/common/agents/l2_extensions.py
deleted file mode 100644 (file)
index 11b354e..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.linux import utils as agent_utils
-
-
-def wait_until_bandwidth_limit_rule_applied(bridge, port_vif, rule):
-    def _bandwidth_limit_rule_applied():
-        bw_rule = bridge.get_egress_bw_limit_for_port(port_vif)
-        expected = None, None
-        if rule:
-            expected = rule.max_kbps, rule.max_burst_kbps
-        return bw_rule == expected
-
-    agent_utils.wait_until_true(_bandwidth_limit_rule_applied)
diff --git a/neutron/tests/common/agents/l3_agent.py b/neutron/tests/common/agents/l3_agent.py
deleted file mode 100755 (executable)
index 410cce6..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import sys
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.agent.l3 import agent
-from neutron.agent.l3 import namespaces
-from neutron.agent import l3_agent
-
-
-class L3NATAgentForTest(agent.L3NATAgentWithStateReport):
-    def __init__(self, host, conf=None):
-        ns_suffix = '@%s' % cfg.CONF.test_namespace_suffix
-
-        # Mock out building of namespace names
-        orig_build_ns_name = namespaces.build_ns_name
-
-        def build_ns_name(prefix, identifier):
-            return "%s%s" % (orig_build_ns_name(prefix, identifier), ns_suffix)
-
-        build_ns = mock.patch.object(namespaces, 'build_ns_name').start()
-        build_ns.side_effect = build_ns_name
-
-        # Mock the parsing prefix from namespace names
-        orig_get_prefix = namespaces.get_prefix_from_ns_name
-
-        def get_prefix_from_ns_name(ns_name):
-            if ns_name.endswith(ns_suffix):
-                return orig_get_prefix(ns_name[:-len(ns_suffix)])
-
-        parse_prefix = mock.patch.object(namespaces,
-                                         'get_prefix_from_ns_name').start()
-        parse_prefix.side_effect = get_prefix_from_ns_name
-
-        # Mock the parsing id from namespace names
-        orig_get_id = namespaces.get_id_from_ns_name
-
-        def get_id_from_ns_name(ns_name):
-            if ns_name.endswith(ns_suffix):
-                return orig_get_id(ns_name[:-len(ns_suffix)])
-
-        parse_id = mock.patch.object(namespaces, 'get_id_from_ns_name').start()
-        parse_id.side_effect = get_id_from_ns_name
-
-        super(L3NATAgentForTest, self).__init__(host, conf)
-
-
-OPTS = [
-    cfg.StrOpt('test_namespace_suffix', default='testprefix',
-               help=_("Suffix to append to all namespace names.")),
-]
-
-
-def register_opts(conf):
-    conf.register_opts(OPTS)
-
-
-def main(manager='neutron.tests.common.agents.l3_agent.L3NATAgentForTest'):
-    register_opts(cfg.CONF)
-    l3_agent.main(manager=manager)
-
-
-if __name__ == "__main__":
-    sys.exit(main())
diff --git a/neutron/tests/common/base.py b/neutron/tests/common/base.py
deleted file mode 100644 (file)
index eff4a0b..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import functools
-import unittest.case
-
-from oslo_db.sqlalchemy import test_base
-import testtools.testcase
-
-from neutron.common import constants as n_const
-from neutron.tests import base
-from neutron.tests import tools
-
-
-def create_resource(prefix, creation_func, *args, **kwargs):
-    """Create a new resource that does not already exist.
-
-    If prefix isn't 'max_length' in size, a random suffix is concatenated to
-    ensure it is random. Otherwise, 'prefix' is used as is.
-
-    :param prefix: The prefix for a randomly generated name
-    :param creation_func: A function taking the name of the resource
-           to be created as it's first argument.  An error is assumed
-           to indicate a name collision.
-    :param *args *kwargs: These will be passed to the create function.
-    """
-
-    # Don't generate a random name if prefix is already full-length.
-    if len(prefix) == n_const.DEVICE_NAME_MAX_LEN:
-        return creation_func(prefix, *args, **kwargs)
-
-    while True:
-        name = base.get_rand_name(
-            max_length=n_const.DEVICE_NAME_MAX_LEN,
-            prefix=prefix)
-        try:
-            return creation_func(name, *args, **kwargs)
-        except RuntimeError:
-            pass
-
-
-def no_skip_on_missing_deps(wrapped):
-    """Do not allow a method/test to skip on missing dependencies.
-
-    This decorator raises an error if a skip is raised by wrapped method when
-    OS_FAIL_ON_MISSING_DEPS is evaluated to True. This decorator should be used
-    only for missing dependencies (including missing system requirements).
-    """
-
-    @functools.wraps(wrapped)
-    def wrapper(*args, **kwargs):
-        try:
-            return wrapped(*args, **kwargs)
-        except (testtools.TestCase.skipException, unittest.case.SkipTest) as e:
-            if base.bool_from_env('OS_FAIL_ON_MISSING_DEPS'):
-                tools.fail(
-                    '%s cannot be skipped because OS_FAIL_ON_MISSING_DEPS '
-                    'is enabled, skip reason: %s' % (wrapped.__name__, e))
-            raise
-    return wrapper
-
-
-class MySQLTestCase(test_base.MySQLOpportunisticTestCase):
-    """Base test class for MySQL tests.
-
-    If the MySQL db is unavailable then this test is skipped, unless
-    OS_FAIL_ON_MISSING_DEPS is enabled.
-    """
-    SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS')
-
-
-class PostgreSQLTestCase(test_base.PostgreSQLOpportunisticTestCase):
-    """Base test class for PostgreSQL tests.
-
-    If the PostgreSQL db is unavailable then this test is skipped, unless
-    OS_FAIL_ON_MISSING_DEPS is enabled.
-    """
-    SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS')
diff --git a/neutron/tests/common/config_fixtures.py b/neutron/tests/common/config_fixtures.py
deleted file mode 100644 (file)
index 2fce0e5..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os.path
-
-import fixtures
-import six
-
-from neutron.tests import base
-
-
-class ConfigDict(base.AttributeDict):
-    def update(self, other):
-        self.convert_to_attr_dict(other)
-        super(ConfigDict, self).update(other)
-
-    def convert_to_attr_dict(self, other):
-        """Convert nested dicts to AttributeDict.
-
-        :param other: dictionary to be directly modified.
-        """
-        for key, value in six.iteritems(other):
-            if isinstance(value, dict):
-                if not isinstance(value, base.AttributeDict):
-                    other[key] = base.AttributeDict(value)
-                self.convert_to_attr_dict(value)
-
-
-class ConfigFileFixture(fixtures.Fixture):
-    """A fixture that knows how to translate configurations to files.
-
-    :param base_filename: the filename to use on disk.
-    :param config: a ConfigDict instance.
-    :param temp_dir: an existing temporary directory to use for storage.
-    """
-
-    def __init__(self, base_filename, config, temp_dir):
-        super(ConfigFileFixture, self).__init__()
-        self.base_filename = base_filename
-        self.config = config
-        self.temp_dir = temp_dir
-
-    def _setUp(self):
-        config_parser = self.dict_to_config_parser(self.config)
-        # Need to randomly generate a unique folder to put the file in
-        self.filename = os.path.join(self.temp_dir, self.base_filename)
-        with open(self.filename, 'w') as f:
-            config_parser.write(f)
-            f.flush()
-
-    def dict_to_config_parser(self, config_dict):
-        config_parser = six.moves.configparser.SafeConfigParser()
-        for section, section_dict in six.iteritems(config_dict):
-            if section != 'DEFAULT':
-                config_parser.add_section(section)
-            for option, value in six.iteritems(section_dict):
-                config_parser.set(section, option, value)
-        return config_parser
diff --git a/neutron/tests/common/conn_testers.py b/neutron/tests/common/conn_testers.py
deleted file mode 100644 (file)
index 7d99ddb..0000000
+++ /dev/null
@@ -1,307 +0,0 @@
-# All Rights Reserved.
-#
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import functools
-
-import fixtures
-
-from neutron.agent import firewall
-from neutron.tests.common import machine_fixtures
-from neutron.tests.common import net_helpers
-
-
-class ConnectionTesterException(Exception):
-    pass
-
-
-def _validate_direction(f):
-    @functools.wraps(f)
-    def wrap(self, direction, *args, **kwargs):
-        if direction not in (firewall.INGRESS_DIRECTION,
-                             firewall.EGRESS_DIRECTION):
-            raise ConnectionTesterException('Unknown direction %s' % direction)
-        return f(self, direction, *args, **kwargs)
-    return wrap
-
-
-class ConnectionTester(fixtures.Fixture):
-    """Base class for testers
-
-    This class implements API for various methods for testing connectivity. The
-    concrete implementation relies on how encapsulated resources are
-    configured. That means child classes should define resources by themselves
-    (e.g. endpoints connected through linux bridge or ovs bridge).
-
-    """
-
-    UDP = net_helpers.NetcatTester.UDP
-    TCP = net_helpers.NetcatTester.TCP
-    ICMP = 'icmp'
-    ARP = 'arp'
-    INGRESS = firewall.INGRESS_DIRECTION
-    EGRESS = firewall.EGRESS_DIRECTION
-
-    def _setUp(self):
-        self._protocol_to_method = {
-            self.UDP: self._test_transport_connectivity,
-            self.TCP: self._test_transport_connectivity,
-            self.ICMP: self._test_icmp_connectivity,
-            self.ARP: self._test_arp_connectivity}
-        self._nc_testers = dict()
-        self._pingers = dict()
-        self.addCleanup(self.cleanup)
-
-    def cleanup(self):
-        for nc in self._nc_testers.values():
-            nc.stop_processes()
-        for pinger in self._pingers.values():
-            pinger.stop()
-
-    @property
-    def vm_namespace(self):
-        return self._vm.namespace
-
-    @property
-    def vm_ip_address(self):
-        return self._vm.ip
-
-    @property
-    def vm_ip_cidr(self):
-        return self._vm.ip_cidr
-
-    @vm_ip_cidr.setter
-    def vm_ip_cidr(self, ip_cidr):
-        self._vm.ip_cidr = ip_cidr
-
-    @property
-    def vm_mac_address(self):
-        return self._vm.port.link.address
-
-    @vm_mac_address.setter
-    def vm_mac_address(self, mac_address):
-        self._vm.mac_address = mac_address
-
-    @property
-    def peer_mac_address(self):
-        return self._peer.port.link.address
-
-    @peer_mac_address.setter
-    def peer_mac_address(self, mac_address):
-        self._peer.mac_address = mac_address
-
-    @property
-    def peer_namespace(self):
-        return self._peer.namespace
-
-    @property
-    def peer_ip_address(self):
-        return self._peer.ip
-
-    def flush_arp_tables(self):
-        """Flush arptables in all used namespaces"""
-        for machine in (self._peer, self._vm):
-            machine.port.neigh.flush(4, 'all')
-
-    def _test_transport_connectivity(self, direction, protocol, src_port,
-                                     dst_port):
-        nc_tester = self._create_nc_tester(direction, protocol, src_port,
-                                           dst_port)
-        try:
-            nc_tester.test_connectivity()
-        except RuntimeError as exc:
-            raise ConnectionTesterException(
-                "%s connection over %s protocol with %s source port and "
-                "%s destination port can't be established: %s" % (
-                    direction, protocol, src_port, dst_port, exc))
-
-    @_validate_direction
-    def _get_namespace_and_address(self, direction):
-        if direction == self.INGRESS:
-            return self.peer_namespace, self.vm_ip_address
-        return self.vm_namespace, self.peer_ip_address
-
-    def _test_icmp_connectivity(self, direction, protocol, src_port, dst_port):
-        src_namespace, ip_address = self._get_namespace_and_address(direction)
-        try:
-            net_helpers.assert_ping(src_namespace, ip_address)
-        except RuntimeError:
-            raise ConnectionTesterException(
-                "ICMP packets can't get from %s namespace to %s address" % (
-                    src_namespace, ip_address))
-
-    def _test_arp_connectivity(self, direction, protocol, src_port, dst_port):
-        src_namespace, ip_address = self._get_namespace_and_address(direction)
-        try:
-            net_helpers.assert_arping(src_namespace, ip_address)
-        except RuntimeError:
-            raise ConnectionTesterException(
-                "ARP queries to %s address have no response from %s namespace"
-                % (ip_address, src_namespace))
-
-    @_validate_direction
-    def assert_connection(self, direction, protocol, src_port=None,
-                          dst_port=None):
-        testing_method = self._protocol_to_method[protocol]
-        testing_method(direction, protocol, src_port, dst_port)
-
-    def assert_no_connection(self, direction, protocol, src_port=None,
-                             dst_port=None):
-        try:
-            self.assert_connection(direction, protocol, src_port, dst_port)
-        except ConnectionTesterException:
-            pass
-        else:
-            dst_port_info = str()
-            src_port_info = str()
-            if dst_port is not None:
-                dst_port_info = " and destination port %d" % dst_port
-            if src_port is not None:
-                src_port_info = " and source port %d" % src_port
-            raise ConnectionTesterException("%s connection with protocol %s, "
-                                            "source port %s, destination "
-                                            "port %s was established but it "
-                                            "shouldn't be possible" % (
-                                                direction, protocol,
-                                                src_port_info, dst_port_info))
-
-    @_validate_direction
-    def assert_established_connection(self, direction, protocol, src_port=None,
-                                      dst_port=None):
-        nc_params = (direction, protocol, src_port, dst_port)
-        nc_tester = self._nc_testers.get(nc_params)
-        if nc_tester:
-            if nc_tester.is_established:
-                nc_tester.test_connectivity()
-            else:
-                raise ConnectionTesterException(
-                    '%s connection with protocol %s, source port %s and '
-                    'destination port %s is not established' % nc_params)
-        else:
-            raise ConnectionTesterException(
-                "Attempting to test established %s connection with protocol %s"
-                ", source port %s and destination port %s that hasn't been "
-                "established yet by calling establish_connection()"
-                % nc_params)
-
-    def assert_no_established_connection(self, direction, protocol,
-                                         src_port=None, dst_port=None):
-        try:
-            self.assert_established_connection(direction, protocol, src_port,
-                                               dst_port)
-        except ConnectionTesterException:
-            pass
-        else:
-            raise ConnectionTesterException(
-                'Established %s connection with protocol %s, source port %s, '
-                'destination port %s can still send packets through' % (
-                    direction, protocol, src_port, dst_port))
-
-    @_validate_direction
-    def establish_connection(self, direction, protocol, src_port=None,
-                             dst_port=None):
-        nc_tester = self._create_nc_tester(direction, protocol, src_port,
-                                           dst_port)
-        nc_tester.establish_connection()
-        self.addCleanup(nc_tester.stop_processes)
-
-    def _create_nc_tester(self, direction, protocol, src_port, dst_port):
-        """Create netcat tester
-
-        If there already exists a netcat tester that has established
-        connection, exception is raised.
-        """
-        nc_key = (direction, protocol, src_port, dst_port)
-        nc_tester = self._nc_testers.get(nc_key)
-        if nc_tester and nc_tester.is_established:
-            raise ConnectionTesterException(
-                '%s connection using %s protocol, source port %s and '
-                'destination port %s is already established' % (
-                    direction, protocol, src_port, dst_port))
-
-        if direction == self.INGRESS:
-            client_ns = self.peer_namespace
-            server_ns = self.vm_namespace
-            server_addr = self.vm_ip_address
-        else:
-            client_ns = self.vm_namespace
-            server_ns = self.peer_namespace
-            server_addr = self.peer_ip_address
-
-        server_port = dst_port or net_helpers.get_free_namespace_port(
-            protocol, server_ns)
-        nc_tester = net_helpers.NetcatTester(client_namespace=client_ns,
-                                             server_namespace=server_ns,
-                                             address=server_addr,
-                                             protocol=protocol,
-                                             src_port=src_port,
-                                             dst_port=server_port)
-        self._nc_testers[nc_key] = nc_tester
-        return nc_tester
-
-    def _get_pinger(self, direction):
-        try:
-            pinger = self._pingers[direction]
-        except KeyError:
-            src_namespace, dst_address = self._get_namespace_and_address(
-                direction)
-            pinger = net_helpers.Pinger(src_namespace, dst_address)
-            self._pingers[direction] = pinger
-        return pinger
-
-    def start_sending_icmp(self, direction):
-        pinger = self._get_pinger(direction)
-        pinger.start()
-
-    def stop_sending_icmp(self, direction):
-        pinger = self._get_pinger(direction)
-        pinger.stop()
-
-    def get_sent_icmp_packets(self, direction):
-        pinger = self._get_pinger(direction)
-        return pinger.sent
-
-    def get_received_icmp_packets(self, direction):
-        pinger = self._get_pinger(direction)
-        return pinger.received
-
-
-class LinuxBridgeConnectionTester(ConnectionTester):
-    """Tester with linux bridge in the middle
-
-    Both endpoints are placed in their separated namespace connected to
-    bridge's namespace via veth pair.
-
-    """
-
-    def _setUp(self):
-        super(LinuxBridgeConnectionTester, self)._setUp()
-        self._bridge = self.useFixture(net_helpers.LinuxBridgeFixture()).bridge
-        self._peer, self._vm = self.useFixture(
-            machine_fixtures.PeerMachines(self._bridge)).machines
-
-    @property
-    def bridge_namespace(self):
-        return self._bridge.namespace
-
-    @property
-    def vm_port_id(self):
-        return net_helpers.VethFixture.get_peer_name(self._vm.port.name)
-
-    @property
-    def peer_port_id(self):
-        return net_helpers.VethFixture.get_peer_name(self._peer.port.name)
-
-    def flush_arp_tables(self):
-        self._bridge.neigh.flush(4, 'all')
-        super(LinuxBridgeConnectionTester, self).flush_arp_tables()
diff --git a/neutron/tests/common/helpers.py b/neutron/tests/common/helpers.py
deleted file mode 100644 (file)
index b144454..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import datetime
-import os
-
-from oslo_utils import timeutils
-import six
-import testtools
-
-import neutron
-from neutron.common import constants
-from neutron.common import topics
-from neutron import context
-from neutron.db import agents_db
-from neutron.db import common_db_mixin
-
-HOST = 'localhost'
-DEFAULT_AZ = 'nova'
-
-
-def find_file(filename, path):
-    """Find a file with name 'filename' located in 'path'."""
-    for root, _, files in os.walk(path):
-        if filename in files:
-            return os.path.abspath(os.path.join(root, filename))
-
-
-def find_sample_file(filename):
-    """Find a file with name 'filename' located in the sample directory."""
-    return find_file(
-        filename,
-        path=os.path.join(neutron.__path__[0], '..', 'etc'))
-
-
-class FakePlugin(common_db_mixin.CommonDbMixin,
-                 agents_db.AgentDbMixin):
-    pass
-
-
-def _get_l3_agent_dict(host, agent_mode, internal_only=True,
-                       ext_net_id='', ext_bridge='', router_id=None,
-                       az=DEFAULT_AZ):
-    return {
-        'agent_type': constants.AGENT_TYPE_L3,
-        'binary': 'neutron-l3-agent',
-        'host': host,
-        'topic': topics.L3_AGENT,
-        'availability_zone': az,
-        'configurations': {'agent_mode': agent_mode,
-                           'handle_internal_only_routers': internal_only,
-                           'external_network_bridge': ext_bridge,
-                           'gateway_external_network_id': ext_net_id,
-                           'router_id': router_id}}
-
-
-def _register_agent(agent):
-    plugin = FakePlugin()
-    admin_context = context.get_admin_context()
-    plugin.create_or_update_agent(admin_context, agent)
-    return plugin._get_agent_by_type_and_host(
-        admin_context, agent['agent_type'], agent['host'])
-
-
-def register_l3_agent(host=HOST, agent_mode=constants.L3_AGENT_MODE_LEGACY,
-                      internal_only=True, ext_net_id='', ext_bridge='',
-                      router_id=None, az=DEFAULT_AZ):
-    agent = _get_l3_agent_dict(host, agent_mode, internal_only, ext_net_id,
-                               ext_bridge, router_id, az)
-    return _register_agent(agent)
-
-
-def _get_dhcp_agent_dict(host, networks=0, az=DEFAULT_AZ):
-    agent = {
-        'binary': 'neutron-dhcp-agent',
-        'host': host,
-        'topic': topics.DHCP_AGENT,
-        'agent_type': constants.AGENT_TYPE_DHCP,
-        'availability_zone': az,
-        'configurations': {'dhcp_driver': 'dhcp_driver',
-                           'networks': networks}}
-    return agent
-
-
-def register_dhcp_agent(host=HOST, networks=0, admin_state_up=True,
-                        alive=True, az=DEFAULT_AZ):
-    agent = _register_agent(
-        _get_dhcp_agent_dict(host, networks, az=az))
-
-    if not admin_state_up:
-        set_agent_admin_state(agent['id'])
-    if not alive:
-        kill_agent(agent['id'])
-
-    return FakePlugin()._get_agent_by_type_and_host(
-        context.get_admin_context(), agent['agent_type'], agent['host'])
-
-
-def kill_agent(agent_id):
-    hour_ago = timeutils.utcnow() - datetime.timedelta(hours=1)
-    FakePlugin().update_agent(
-        context.get_admin_context(),
-        agent_id,
-        {'agent': {
-            'started_at': hour_ago,
-            'heartbeat_timestamp': hour_ago}})
-
-
-def set_agent_admin_state(agent_id, admin_state_up=False):
-    FakePlugin().update_agent(
-        context.get_admin_context(),
-        agent_id,
-        {'agent': {'admin_state_up': admin_state_up}})
-
-
-def _get_ovs_agent_dict(host, agent_type, binary, tunnel_types,
-                        tunneling_ip='20.0.0.1', interface_mappings=None,
-                        l2pop_network_types=None):
-    agent = {
-        'binary': binary,
-        'host': host,
-        'topic': constants.L2_AGENT_TOPIC,
-        'configurations': {'tunneling_ip': tunneling_ip,
-                           'tunnel_types': tunnel_types},
-        'agent_type': agent_type,
-        'tunnel_type': [],
-        'start_flag': True}
-
-    if interface_mappings is not None:
-        agent['configurations']['interface_mappings'] = interface_mappings
-    if l2pop_network_types is not None:
-        agent['configurations']['l2pop_network_types'] = l2pop_network_types
-    return agent
-
-
-def register_ovs_agent(host=HOST, agent_type=constants.AGENT_TYPE_OVS,
-                       binary='neutron-openvswitch-agent',
-                       tunnel_types=['vxlan'], tunneling_ip='20.0.0.1',
-                       interface_mappings=None,
-                       l2pop_network_types=None):
-    agent = _get_ovs_agent_dict(host, agent_type, binary, tunnel_types,
-                                tunneling_ip, interface_mappings,
-                                l2pop_network_types)
-    return _register_agent(agent)
-
-
-def requires_py2(testcase):
-    return testtools.skipUnless(six.PY2, "requires python 2.x")(testcase)
-
-
-def requires_py3(testcase):
-    return testtools.skipUnless(six.PY3, "requires python 3.x")(testcase)
diff --git a/neutron/tests/common/l3_test_common.py b/neutron/tests/common/l3_test_common.py
deleted file mode 100644 (file)
index a81f2d6..0000000
+++ /dev/null
@@ -1,303 +0,0 @@
-# Copyright (c) 2015 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-import netaddr
-from oslo_utils import uuidutils
-from six import moves
-
-from neutron.common import constants as l3_constants
-
-_uuid = uuidutils.generate_uuid
-
-
-class FakeDev(object):
-    def __init__(self, name):
-        self.name = name
-
-
-def get_ha_interface(ip='169.254.192.1', mac='12:34:56:78:2b:5d'):
-    subnet_id = _uuid()
-    return {'admin_state_up': True,
-            'device_id': _uuid(),
-            'device_owner': l3_constants.DEVICE_OWNER_ROUTER_HA_INTF,
-            'fixed_ips': [{'ip_address': ip,
-                           'prefixlen': 18,
-                           'subnet_id': subnet_id}],
-            'id': _uuid(),
-            'mac_address': mac,
-            'name': u'L3 HA Admin port 0',
-            'network_id': _uuid(),
-            'status': u'ACTIVE',
-            'subnets': [{'cidr': '169.254.192.0/18',
-                         'gateway_ip': '169.254.255.254',
-                         'id': subnet_id}],
-            'tenant_id': '',
-            'agent_id': _uuid(),
-            'agent_host': 'aaa',
-            'priority': 1}
-
-
-def prepare_router_data(ip_version=4, enable_snat=None, num_internal_ports=1,
-                        enable_floating_ip=False, enable_ha=False,
-                        extra_routes=False, dual_stack=False,
-                        v6_ext_gw_with_sub=True, **kwargs):
-    fixed_ips = []
-    subnets = []
-    gateway_mac = kwargs.get('gateway_mac', 'ca:fe:de:ad:be:ee')
-    extra_subnets = []
-    for loop_version in (4, 6):
-        if loop_version == 4 and (ip_version == 4 or dual_stack):
-            ip_address = kwargs.get('ip_address', '19.4.4.4')
-            prefixlen = 24
-            subnet_cidr = kwargs.get('subnet_cidr', '19.4.4.0/24')
-            gateway_ip = kwargs.get('gateway_ip', '19.4.4.1')
-            _extra_subnet = {'cidr': '9.4.5.0/24'}
-        elif (loop_version == 6 and (ip_version == 6 or dual_stack) and
-              v6_ext_gw_with_sub):
-            ip_address = kwargs.get('ip_address', 'fd00::4')
-            prefixlen = 64
-            subnet_cidr = kwargs.get('subnet_cidr', 'fd00::/64')
-            gateway_ip = kwargs.get('gateway_ip', 'fd00::1')
-            _extra_subnet = {'cidr': 'fd01::/64'}
-        else:
-            continue
-        subnet_id = _uuid()
-        fixed_ips.append({'ip_address': ip_address,
-                          'subnet_id': subnet_id,
-                          'prefixlen': prefixlen})
-        subnets.append({'id': subnet_id,
-                        'cidr': subnet_cidr,
-                        'gateway_ip': gateway_ip})
-        extra_subnets.append(_extra_subnet)
-    if not fixed_ips and v6_ext_gw_with_sub:
-        raise ValueError("Invalid ip_version: %s" % ip_version)
-
-    router_id = _uuid()
-    ex_gw_port = {'id': _uuid(),
-                  'mac_address': gateway_mac,
-                  'network_id': _uuid(),
-                  'fixed_ips': fixed_ips,
-                  'subnets': subnets,
-                  'extra_subnets': extra_subnets}
-
-    routes = []
-    if extra_routes:
-        routes = [{'destination': '8.8.8.0/24', 'nexthop': '19.4.4.4'}]
-
-    router = {
-        'id': router_id,
-        'distributed': False,
-        l3_constants.INTERFACE_KEY: [],
-        'routes': routes,
-        'gw_port': ex_gw_port}
-
-    if enable_floating_ip:
-        router[l3_constants.FLOATINGIP_KEY] = [{
-            'id': _uuid(),
-            'port_id': _uuid(),
-            'status': 'DOWN',
-            'floating_ip_address': '19.4.4.2',
-            'fixed_ip_address': '10.0.0.1'}]
-
-    router_append_interface(router, count=num_internal_ports,
-                            ip_version=ip_version, dual_stack=dual_stack)
-    if enable_ha:
-        router['ha'] = True
-        router['ha_vr_id'] = 1
-        router[l3_constants.HA_INTERFACE_KEY] = (get_ha_interface())
-
-    if enable_snat is not None:
-        router['enable_snat'] = enable_snat
-    return router
-
-
-def get_subnet_id(port):
-    return port['fixed_ips'][0]['subnet_id']
-
-
-def router_append_interface(router, count=1, ip_version=4, ra_mode=None,
-                            addr_mode=None, dual_stack=False):
-    interfaces = router[l3_constants.INTERFACE_KEY]
-    current = sum(
-        [netaddr.IPNetwork(subnet['cidr']).version == ip_version
-         for p in interfaces for subnet in p['subnets']])
-
-    mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
-    mac_address.dialect = netaddr.mac_unix
-    for i in range(current, current + count):
-        fixed_ips = []
-        subnets = []
-        for loop_version in (4, 6):
-            if loop_version == 4 and (ip_version == 4 or dual_stack):
-                ip_pool = '35.4.%i.4'
-                cidr_pool = '35.4.%i.0/24'
-                prefixlen = 24
-                gw_pool = '35.4.%i.1'
-            elif loop_version == 6 and (ip_version == 6 or dual_stack):
-                ip_pool = 'fd01:%x:1::6'
-                cidr_pool = 'fd01:%x:1::/64'
-                prefixlen = 64
-                gw_pool = 'fd01:%x:1::1'
-            else:
-                continue
-            subnet_id = _uuid()
-            fixed_ips.append({'ip_address': ip_pool % i,
-                              'subnet_id': subnet_id,
-                              'prefixlen': prefixlen})
-            subnets.append({'id': subnet_id,
-                            'cidr': cidr_pool % i,
-                            'gateway_ip': gw_pool % i,
-                            'ipv6_ra_mode': ra_mode,
-                            'ipv6_address_mode': addr_mode})
-        if not fixed_ips:
-            raise ValueError("Invalid ip_version: %s" % ip_version)
-
-        interfaces.append(
-            {'id': _uuid(),
-             'network_id': _uuid(),
-             'admin_state_up': True,
-             'fixed_ips': fixed_ips,
-             'mac_address': str(mac_address),
-             'subnets': subnets})
-        mac_address.value += 1
-
-
-def router_append_subnet(router, count=1, ip_version=4,
-                         ipv6_subnet_modes=None, interface_id=None,
-                         dns_nameservers=None):
-    if ip_version == 6:
-        subnet_mode_none = {'ra_mode': None, 'address_mode': None}
-        if not ipv6_subnet_modes:
-            ipv6_subnet_modes = [subnet_mode_none] * count
-        elif len(ipv6_subnet_modes) != count:
-            ipv6_subnet_modes.extend([subnet_mode_none for i in
-                                      moves.range(len(ipv6_subnet_modes),
-                                                  count)])
-
-    if ip_version == 4:
-        ip_pool = '35.4.%i.4'
-        cidr_pool = '35.4.%i.0/24'
-        prefixlen = 24
-        gw_pool = '35.4.%i.1'
-    elif ip_version == 6:
-        ip_pool = 'fd01:%x::6'
-        cidr_pool = 'fd01:%x::/64'
-        prefixlen = 64
-        gw_pool = 'fd01:%x::1'
-    else:
-        raise ValueError("Invalid ip_version: %s" % ip_version)
-
-    interfaces = copy.deepcopy(router.get(l3_constants.INTERFACE_KEY, []))
-    if interface_id:
-        try:
-            interface = next(i for i in interfaces
-                         if i['id'] == interface_id)
-        except StopIteration:
-            raise ValueError("interface_id not found")
-
-        fixed_ips, subnets = interface['fixed_ips'], interface['subnets']
-    else:
-        interface = None
-        fixed_ips, subnets = [], []
-
-    num_existing_subnets = len(subnets)
-    for i in moves.range(count):
-        subnet_id = _uuid()
-        fixed_ips.append(
-                {'ip_address': ip_pool % (i + num_existing_subnets),
-                 'subnet_id': subnet_id,
-                 'prefixlen': prefixlen})
-        subnets.append(
-                {'id': subnet_id,
-                 'cidr': cidr_pool % (i + num_existing_subnets),
-                 'gateway_ip': gw_pool % (i + num_existing_subnets),
-                 'dns_nameservers': dns_nameservers,
-                 'ipv6_ra_mode': ipv6_subnet_modes[i]['ra_mode'],
-                 'ipv6_address_mode': ipv6_subnet_modes[i]['address_mode']})
-
-    if interface:
-        # Update old interface
-        index = interfaces.index(interface)
-        interfaces[index].update({'fixed_ips': fixed_ips, 'subnets': subnets})
-    else:
-        # New interface appended to interfaces list
-        mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
-        mac_address.dialect = netaddr.mac_unix
-        interfaces.append(
-            {'id': _uuid(),
-             'network_id': _uuid(),
-             'admin_state_up': True,
-             'mac_address': str(mac_address),
-             'fixed_ips': fixed_ips,
-             'subnets': subnets})
-
-    router[l3_constants.INTERFACE_KEY] = interfaces
-
-
-def router_append_pd_enabled_subnet(router, count=1):
-    interfaces = router[l3_constants.INTERFACE_KEY]
-    current = sum(netaddr.IPNetwork(subnet['cidr']).version == 6
-                  for p in interfaces for subnet in p['subnets'])
-
-    mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
-    mac_address.dialect = netaddr.mac_unix
-    pd_intfs = []
-    for i in range(current, current + count):
-        subnet_id = _uuid()
-        intf = {'id': _uuid(),
-                'network_id': _uuid(),
-                'admin_state_up': True,
-                'fixed_ips': [{'ip_address': '::1',
-                               'prefixlen': 64,
-                               'subnet_id': subnet_id}],
-                'mac_address': str(mac_address),
-                'subnets': [{'id': subnet_id,
-                             'cidr': l3_constants.PROVISIONAL_IPV6_PD_PREFIX,
-                             'gateway_ip': '::1',
-                             'ipv6_ra_mode': l3_constants.IPV6_SLAAC,
-                             'subnetpool_id': l3_constants.IPV6_PD_POOL_ID}]}
-        interfaces.append(intf)
-        pd_intfs.append(intf)
-        mac_address.value += 1
-    return pd_intfs
-
-
-def prepare_ext_gw_test(context, ri, dual_stack=False):
-    subnet_id = _uuid()
-    fixed_ips = [{'subnet_id': subnet_id,
-                  'ip_address': '20.0.0.30',
-                  'prefixlen': 24}]
-    subnets = [{'id': subnet_id,
-                'cidr': '20.0.0.0/24',
-                'gateway_ip': '20.0.0.1'}]
-    if dual_stack:
-        subnet_id_v6 = _uuid()
-        fixed_ips.append({'subnet_id': subnet_id_v6,
-                          'ip_address': '2001:192:168:100::2',
-                          'prefixlen': 64})
-        subnets.append({'id': subnet_id_v6,
-                        'cidr': '2001:192:168:100::/64',
-                        'gateway_ip': '2001:192:168:100::1'})
-    ex_gw_port = {'fixed_ips': fixed_ips,
-                  'subnets': subnets,
-                  'extra_subnets': [{'cidr': '172.16.0.0/24'}],
-                  'id': _uuid(),
-                  'network_id': _uuid(),
-                  'mac_address': 'ca:fe:de:ad:be:ef'}
-    interface_name = ri.get_external_device_name(ex_gw_port['id'])
-
-    context.device_exists.return_value = True
-
-    return interface_name, ex_gw_port
diff --git a/neutron/tests/common/machine_fixtures.py b/neutron/tests/common/machine_fixtures.py
deleted file mode 100644 (file)
index c812e5e..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-# Copyright (c) 2015 Thales Services SAS
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import functools
-
-import fixtures
-
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.tests.common import net_helpers
-
-
-class FakeMachineBase(fixtures.Fixture):
-    """Create a fake machine.
-
-    :ivar bridge: bridge on which the fake machine is bound
-    :ivar ip_cidr: fake machine ip_cidr
-    :type ip_cidr: str
-    :ivar ip: fake machine ip
-    :type ip: str
-    :ivar gateway_ip: fake machine gateway ip
-    :type gateway_ip: str
-
-    :ivar namespace: namespace emulating the machine
-    :type namespace: str
-    :ivar port: port binding the namespace to the bridge
-    :type port: IPDevice
-    """
-
-    def __init__(self):
-        self.port = None
-
-    def _setUp(self):
-        ns_fixture = self.useFixture(
-            net_helpers.NamespaceFixture())
-        self.namespace = ns_fixture.name
-
-    def execute(self, *args, **kwargs):
-        ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
-        return ns_ip_wrapper.netns.execute(*args, **kwargs)
-
-    def ping_predicate(self, dst_ip):
-        try:
-            self.assert_ping(dst_ip)
-        except RuntimeError:
-            return False
-        return True
-
-    def block_until_ping(self, dst_ip):
-        predicate = functools.partial(self.ping_predicate, dst_ip)
-        utils.wait_until_true(predicate)
-
-    def assert_ping(self, dst_ip):
-        net_helpers.assert_ping(self.namespace, dst_ip)
-
-    def assert_no_ping(self, dst_ip):
-        net_helpers.assert_no_ping(self.namespace, dst_ip)
-
-    @property
-    def ip(self):
-        raise NotImplementedError()
-
-    @property
-    def ip_cidr(self):
-        raise NotImplementedError()
-
-    @property
-    def mac_address(self):
-        return self.port.link.address
-
-
-class FakeMachine(FakeMachineBase):
-
-    def __init__(self, bridge, ip_cidr, gateway_ip=None):
-        super(FakeMachine, self).__init__()
-        self.bridge = bridge
-        self._ip_cidr = ip_cidr
-        self.gateway_ip = gateway_ip
-
-    def _setUp(self):
-        super(FakeMachine, self)._setUp()
-
-        self.port = self.useFixture(
-            net_helpers.PortFixture.get(self.bridge, self.namespace)).port
-        self.port.addr.add(self._ip_cidr)
-
-        if self.gateway_ip:
-            net_helpers.set_namespace_gateway(self.port, self.gateway_ip)
-
-    @property
-    def ip(self):
-        return self._ip_cidr.partition('/')[0]
-
-    @property
-    def ip_cidr(self):
-        return self._ip_cidr
-
-    @ip_cidr.setter
-    def ip_cidr(self, ip_cidr):
-        self.port.addr.add(ip_cidr)
-        self.port.addr.delete(self._ip_cidr)
-        self._ip_cidr = ip_cidr
-
-    @FakeMachineBase.mac_address.setter
-    def mac_address(self, mac_address):
-        self.port.link.set_down()
-        self.port.link.set_address(mac_address)
-        self.port.link.set_up()
-
-
-class PeerMachines(fixtures.Fixture):
-    """Create 'amount' peered machines on an ip_cidr.
-
-    :ivar bridge: bridge on which peer machines are bound
-    :ivar ip_cidr: ip_cidr on which peer machines have ips
-    :type ip_cidr: str
-    :ivar machines: fake machines
-    :type machines: FakeMachine list
-    """
-
-    CIDR = '192.168.0.1/24'
-
-    def __init__(self, bridge, ip_cidr=None, gateway_ip=None, amount=2):
-        super(PeerMachines, self).__init__()
-        self.bridge = bridge
-        self.ip_cidr = ip_cidr or self.CIDR
-        self.gateway_ip = gateway_ip
-        self.amount = amount
-
-    def _setUp(self):
-        self.machines = []
-
-        for index in range(self.amount):
-            ip_cidr = net_helpers.increment_ip_cidr(self.ip_cidr, index)
-            self.machines.append(
-                self.useFixture(
-                    FakeMachine(self.bridge, ip_cidr, self.gateway_ip)))
diff --git a/neutron/tests/common/net_helpers.py b/neutron/tests/common/net_helpers.py
deleted file mode 100644 (file)
index 3ccd8aa..0000000
+++ /dev/null
@@ -1,689 +0,0 @@
-# Copyright (c) 2015 Thales Services SAS
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import abc
-from concurrent import futures
-import contextlib
-import functools
-import os
-import random
-import re
-import select
-import shlex
-import signal
-import subprocess
-
-import fixtures
-import netaddr
-from oslo_config import cfg
-from oslo_utils import uuidutils
-import six
-
-from neutron.agent.common import config
-from neutron.agent.common import ovs_lib
-from neutron.agent.linux import bridge_lib
-from neutron.agent.linux import interface
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import constants as n_const
-from neutron.db import db_base_plugin_common
-from neutron.tests import base as tests_base
-from neutron.tests.common import base as common_base
-from neutron.tests import tools
-
-UNDEFINED = object()
-
-NS_PREFIX = 'test-'
-BR_PREFIX = 'test-br'
-PORT_PREFIX = 'test-port'
-VETH0_PREFIX = 'test-veth0'
-VETH1_PREFIX = 'test-veth1'
-PATCH_PREFIX = 'patch'
-
-SS_SOURCE_PORT_PATTERN = re.compile(
-    r'^.*\s+\d+\s+.*:(?P<port>\d+)\s+[0-9:].*')
-
-READ_TIMEOUT = os.environ.get('OS_TEST_READ_TIMEOUT', 5)
-
-CHILD_PROCESS_TIMEOUT = os.environ.get('OS_TEST_CHILD_PROCESS_TIMEOUT', 20)
-CHILD_PROCESS_SLEEP = os.environ.get('OS_TEST_CHILD_PROCESS_SLEEP', 0.5)
-
-TRANSPORT_PROTOCOLS = (n_const.PROTO_NAME_TCP, n_const.PROTO_NAME_UDP)
-
-
-def increment_ip_cidr(ip_cidr, offset=1):
-    """Increment ip_cidr offset times.
-
-    example: increment_ip_cidr("1.2.3.4/24", 2) ==> "1.2.3.6/24"
-    """
-    net0 = netaddr.IPNetwork(ip_cidr)
-    net = netaddr.IPNetwork(ip_cidr)
-    net.value += offset
-    if not net0.network < net.ip < net0[-1]:
-        tools.fail(
-            'Incorrect ip_cidr,offset tuple (%s,%s): "incremented" ip_cidr is '
-            'outside ip_cidr' % (ip_cidr, offset))
-    return str(net)
-
-
-def set_namespace_gateway(port_dev, gateway_ip):
-    """Set gateway for the namespace associated to the port."""
-    if not port_dev.namespace:
-        tools.fail('tests should not change test machine gateway')
-    port_dev.route.add_gateway(gateway_ip)
-
-
-def assert_ping(src_namespace, dst_ip, timeout=1, count=1):
-    ipversion = netaddr.IPAddress(dst_ip).version
-    ping_command = 'ping' if ipversion == 4 else 'ping6'
-    ns_ip_wrapper = ip_lib.IPWrapper(src_namespace)
-    ns_ip_wrapper.netns.execute([ping_command, '-c', count, '-W', timeout,
-                                 dst_ip])
-
-
-@contextlib.contextmanager
-def async_ping(namespace, ips):
-    with futures.ThreadPoolExecutor(max_workers=len(ips)) as executor:
-        fs = [executor.submit(assert_ping, namespace, ip, count=10)
-              for ip in ips]
-        yield lambda: all(f.done() for f in fs)
-        futures.wait(fs)
-        for f in fs:
-            f.result()
-
-
-def assert_no_ping(src_namespace, dst_ip, timeout=1, count=1):
-    try:
-        assert_ping(src_namespace, dst_ip, timeout, count)
-    except RuntimeError:
-        pass
-    else:
-        tools.fail("destination ip %(destination)s is replying to ping from "
-                   "namespace %(ns)s, but it shouldn't" %
-                   {'ns': src_namespace, 'destination': dst_ip})
-
-
-def assert_arping(src_namespace, dst_ip, source=None, timeout=1, count=1):
-    """Send arp request using arping executable.
-
-    NOTE: ARP protocol is used in IPv4 only. IPv6 uses Neighbour Discovery
-    Protocol instead.
-    """
-    ns_ip_wrapper = ip_lib.IPWrapper(src_namespace)
-    arping_cmd = ['arping', '-c', count, '-w', timeout]
-    if source:
-        arping_cmd.extend(['-s', source])
-    arping_cmd.append(dst_ip)
-    ns_ip_wrapper.netns.execute(arping_cmd)
-
-
-def assert_no_arping(src_namespace, dst_ip, source=None, timeout=1, count=1):
-    try:
-        assert_arping(src_namespace, dst_ip, source, timeout, count)
-    except RuntimeError:
-        pass
-    else:
-        tools.fail("destination ip %(destination)s is replying to arp from "
-                   "namespace %(ns)s, but it shouldn't" %
-                   {'ns': src_namespace, 'destination': dst_ip})
-
-
-def _get_source_ports_from_ss_output(output):
-    ports = set()
-    for line in output.splitlines():
-        match = SS_SOURCE_PORT_PATTERN.match(line)
-        if match:
-            ports.add(match.group('port'))
-    return ports
-
-
-def get_unused_port(used, start=1024, end=65535):
-    candidates = set(range(start, end + 1))
-    return random.choice(list(candidates - used))
-
-
-def get_free_namespace_port(protocol, namespace=None):
-    """Return an unused port from given namespace
-
-    WARNING: This function returns a port that is free at the execution time of
-             this function. If this port is used later for binding then there
-             is a potential danger that port will be no longer free. It's up to
-             the programmer to handle error if port is already in use.
-
-    :param protocol: Return free port for given protocol. Supported protocols
-                     are 'tcp' and 'udp'.
-    """
-    if protocol == n_const.PROTO_NAME_TCP:
-        param = '-tna'
-    elif protocol == n_const.PROTO_NAME_UDP:
-        param = '-una'
-    else:
-        raise ValueError("Unsupported procotol %s" % protocol)
-
-    ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
-    output = ip_wrapper.netns.execute(['ss', param])
-    used_ports = _get_source_ports_from_ss_output(output)
-
-    return get_unused_port(used_ports)
-
-
-def create_patch_ports(source, destination):
-    """Hook up two OVS bridges.
-
-    The result is two patch ports, each end connected to a bridge.
-    The two patch port names will start with 'patch-', followed by identical
-    four characters. For example patch-xyzw-fedora, and patch-xyzw-ubuntu,
-    where fedora and ubuntu are random strings.
-
-    :param source: Instance of OVSBridge
-    :param destination: Instance of OVSBridge
-    """
-    common = tests_base.get_rand_name(max_length=4, prefix='')
-    prefix = '%s-%s-' % (PATCH_PREFIX, common)
-
-    source_name = tests_base.get_rand_device_name(prefix=prefix)
-    destination_name = tests_base.get_rand_device_name(prefix=prefix)
-
-    source.add_patch_port(source_name, destination_name)
-    destination.add_patch_port(destination_name, source_name)
-
-
-class RootHelperProcess(subprocess.Popen):
-    def __init__(self, cmd, *args, **kwargs):
-        for arg in ('stdin', 'stdout', 'stderr'):
-            kwargs.setdefault(arg, subprocess.PIPE)
-        self.namespace = kwargs.pop('namespace', None)
-        self.cmd = cmd
-        if self.namespace is not None:
-            cmd = ['ip', 'netns', 'exec', self.namespace] + cmd
-        root_helper = config.get_root_helper(utils.cfg.CONF)
-        cmd = shlex.split(root_helper) + cmd
-        self.child_pid = None
-        super(RootHelperProcess, self).__init__(cmd, *args, **kwargs)
-        self._wait_for_child_process()
-
-    def kill(self, sig=signal.SIGKILL):
-        pid = self.child_pid or str(self.pid)
-        utils.execute(['kill', '-%d' % sig, pid], run_as_root=True)
-
-    def read_stdout(self, timeout=None):
-        return self._read_stream(self.stdout, timeout)
-
-    @staticmethod
-    def _read_stream(stream, timeout):
-        if timeout:
-            poller = select.poll()
-            poller.register(stream.fileno())
-            poll_predicate = functools.partial(poller.poll, 1)
-            utils.wait_until_true(poll_predicate, timeout, 0.1,
-                                  RuntimeError(
-                                      'No output in %.2f seconds' % timeout))
-        return stream.readline()
-
-    def writeline(self, data):
-        self.stdin.write(data + os.linesep)
-        self.stdin.flush()
-
-    def _wait_for_child_process(self, timeout=CHILD_PROCESS_TIMEOUT,
-                                sleep=CHILD_PROCESS_SLEEP):
-        def child_is_running():
-            child_pid = utils.get_root_helper_child_pid(
-                self.pid, run_as_root=True)
-            if utils.pid_invoked_with_cmdline(child_pid, self.cmd):
-                return True
-
-        utils.wait_until_true(
-            child_is_running,
-            timeout,
-            exception=RuntimeError("Process %s hasn't been spawned "
-                                   "in %d seconds" % (self.cmd, timeout)))
-        self.child_pid = utils.get_root_helper_child_pid(
-            self.pid, run_as_root=True)
-
-    @property
-    def is_running(self):
-        return self.poll() is None
-
-
-class Pinger(object):
-    """Class for sending ICMP packets asynchronously
-
-    The aim is to keep sending ICMP packets on background while executing other
-    code. After background 'ping' command is stopped, statistics are available.
-
-    Difference to assert_(no_)ping() functions located in this module is that
-    these methods send given count of ICMP packets while they wait for the
-    exit code of 'ping' command.
-
-    >>> pinger = Pinger('pinger_test', '192.168.0.2')
-
-    >>> pinger.start(); time.sleep(5); pinger.stop()
-
-    >>> pinger.sent, pinger.received
-    7 7
-
-    """
-
-    stats_pattern = re.compile(
-        r'^(?P<trans>\d+) packets transmitted,.*(?P<recv>\d+) received.*$')
-    TIMEOUT = 15
-
-    def __init__(self, namespace, address, count=None, timeout=1):
-        self.proc = None
-        self.namespace = namespace
-        self.address = address
-        self.count = count
-        self.timeout = timeout
-        self.sent = 0
-        self.received = 0
-
-    def _wait_for_death(self):
-        is_dead = lambda: self.proc.poll() is not None
-        utils.wait_until_true(
-            is_dead, timeout=self.TIMEOUT, exception=RuntimeError(
-                "Ping command hasn't ended after %d seconds." % self.TIMEOUT))
-
-    def _parse_stats(self):
-        for line in self.proc.stdout:
-            result = self.stats_pattern.match(line)
-            if result:
-                self.sent = int(result.group('trans'))
-                self.received = int(result.group('recv'))
-                break
-        else:
-            raise RuntimeError("Didn't find ping statistics.")
-
-    def start(self):
-        if self.proc and self.proc.is_running:
-            raise RuntimeError("This pinger has already a running process")
-        ip_version = ip_lib.get_ip_version(self.address)
-        ping_exec = 'ping' if ip_version == 4 else 'ping6'
-        cmd = [ping_exec, self.address, '-W', str(self.timeout)]
-        if self.count:
-            cmd.extend(['-c', str(self.count)])
-        self.proc = RootHelperProcess(cmd, namespace=self.namespace)
-
-    def stop(self):
-        if self.proc and self.proc.is_running:
-            self.proc.kill(signal.SIGINT)
-            self._wait_for_death()
-            self._parse_stats()
-
-
-class NetcatTester(object):
-    TCP = n_const.PROTO_NAME_TCP
-    UDP = n_const.PROTO_NAME_UDP
-
-    def __init__(self, client_namespace, server_namespace, address,
-                 dst_port, protocol, server_address='0.0.0.0', src_port=None):
-        """
-        Tool for testing connectivity on transport layer using netcat
-        executable.
-
-        The processes are spawned lazily.
-
-        :param client_namespace: Namespace in which netcat process that
-                                 connects to other netcat will be spawned
-        :param server_namespace: Namespace in which listening netcat process
-                                 will be spawned
-        :param address: Server address from client point of view
-        :param dst_port: Port on which netcat listens
-        :param protocol: Transport protocol, either 'tcp' or 'udp'
-        :param server_address: Address in server namespace on which netcat
-                               should listen
-        :param src_port: Source port of netcat process spawned in client
-                         namespace - packet will have src_port in TCP/UDP
-                         header with this value
-
-        """
-        self.client_namespace = client_namespace
-        self.server_namespace = server_namespace
-        self._client_process = None
-        self._server_process = None
-        self.address = address
-        self.server_address = server_address
-        self.dst_port = str(dst_port)
-        self.src_port = str(src_port) if src_port else None
-        if protocol not in TRANSPORT_PROTOCOLS:
-            raise ValueError("Unsupported protocol %s" % protocol)
-        self.protocol = protocol
-
-    @property
-    def client_process(self):
-        if not self._client_process:
-            self.establish_connection()
-        return self._client_process
-
-    @property
-    def server_process(self):
-        if not self._server_process:
-            self._spawn_server_process()
-        return self._server_process
-
-    def _spawn_server_process(self):
-        self._server_process = self._spawn_nc_in_namespace(
-            self.server_namespace,
-            address=self.server_address,
-            listen=True)
-
-    @property
-    def is_established(self):
-        return bool(self._client_process and not self._client_process.poll())
-
-    def establish_connection(self):
-        if self._client_process:
-            raise RuntimeError('%(proto)s connection to %(ip_addr)s is already'
-                               ' established' %
-                               {'proto': self.protocol,
-                                'ip_addr': self.address})
-
-        if not self._server_process:
-            self._spawn_server_process()
-        self._client_process = self._spawn_nc_in_namespace(
-            self.client_namespace,
-            address=self.address)
-        if self.protocol == self.UDP:
-            # Create an ASSURED entry in conntrack table for UDP packets,
-            # that requires 3-way communication
-            # 1st transmission creates UNREPLIED
-            # 2nd transmission removes UNREPLIED
-            # 3rd transmission creates ASSURED
-            data = 'foo'
-            self.client_process.writeline(data)
-            self.server_process.read_stdout(READ_TIMEOUT)
-            self.server_process.writeline(data)
-            self.client_process.read_stdout(READ_TIMEOUT)
-            self.client_process.writeline(data)
-            self.server_process.read_stdout(READ_TIMEOUT)
-
-    def test_connectivity(self, respawn=False):
-        testing_string = uuidutils.generate_uuid()
-        if respawn:
-            self.stop_processes()
-
-        self.client_process.writeline(testing_string)
-        message = self.server_process.read_stdout(READ_TIMEOUT).strip()
-        self.server_process.writeline(message)
-        message = self.client_process.read_stdout(READ_TIMEOUT).strip()
-
-        return message == testing_string
-
-    def _spawn_nc_in_namespace(self, namespace, address, listen=False):
-        cmd = ['nc', address, self.dst_port]
-        if self.protocol == self.UDP:
-            cmd.append('-u')
-        if listen:
-            cmd.append('-l')
-            if self.protocol == self.TCP:
-                cmd.append('-k')
-        else:
-            cmd.extend(['-w', '20'])
-            if self.src_port:
-                cmd.extend(['-p', self.src_port])
-        proc = RootHelperProcess(cmd, namespace=namespace)
-        return proc
-
-    def stop_processes(self):
-        for proc_attr in ('_client_process', '_server_process'):
-            proc = getattr(self, proc_attr)
-            if proc:
-                if proc.poll() is None:
-                    proc.kill()
-                    proc.wait()
-                setattr(self, proc_attr, None)
-
-
-class NamespaceFixture(fixtures.Fixture):
-    """Create a namespace.
-
-    :ivar ip_wrapper: created namespace
-    :type ip_wrapper: IPWrapper
-    :ivar name: created namespace name
-    :type name: str
-    """
-
-    def __init__(self, prefix=NS_PREFIX):
-        super(NamespaceFixture, self).__init__()
-        self.prefix = prefix
-
-    def _setUp(self):
-        ip = ip_lib.IPWrapper()
-        self.name = self.prefix + uuidutils.generate_uuid()
-        self.addCleanup(self.destroy)
-        self.ip_wrapper = ip.ensure_namespace(self.name)
-
-    def destroy(self):
-        if self.ip_wrapper.netns.exists(self.name):
-            self.ip_wrapper.netns.delete(self.name)
-
-
-class VethFixture(fixtures.Fixture):
-    """Create a veth.
-
-    :ivar ports: created veth ports
-    :type ports: IPDevice 2-uplet
-    """
-
-    def _setUp(self):
-        ip_wrapper = ip_lib.IPWrapper()
-
-        self.ports = common_base.create_resource(
-            VETH0_PREFIX,
-            lambda name: ip_wrapper.add_veth(name, self.get_peer_name(name)))
-
-        self.addCleanup(self.destroy)
-
-    def destroy(self):
-        for port in self.ports:
-            ip_wrapper = ip_lib.IPWrapper(port.namespace)
-            if (ip_wrapper.netns.exists(port.namespace) or
-                port.namespace is None):
-                try:
-                    ip_wrapper.del_veth(port.name)
-                    break
-                except RuntimeError:
-                    # NOTE(cbrandily): It seems a veth is automagically deleted
-                    # when a namespace owning a veth endpoint is deleted.
-                    pass
-
-    @staticmethod
-    def get_peer_name(name):
-        if name.startswith(VETH0_PREFIX):
-            return name.replace(VETH0_PREFIX, VETH1_PREFIX)
-        elif name.startswith(VETH1_PREFIX):
-            return name.replace(VETH1_PREFIX, VETH0_PREFIX)
-        else:
-            tools.fail('%s is not a valid VethFixture veth endpoint' % name)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class PortFixture(fixtures.Fixture):
-    """Create a port.
-
-    :ivar port: created port
-    :type port: IPDevice
-    :ivar bridge: port bridge
-    """
-
-    def __init__(self, bridge=None, namespace=None, mac=None, port_id=None):
-        super(PortFixture, self).__init__()
-        self.bridge = bridge
-        self.namespace = namespace
-        self.mac = (
-            mac or db_base_plugin_common.DbBasePluginCommon._generate_mac())
-        self.port_id = port_id or uuidutils.generate_uuid()
-
-    @abc.abstractmethod
-    def _create_bridge_fixture(self):
-        pass
-
-    @abc.abstractmethod
-    def _setUp(self):
-        super(PortFixture, self)._setUp()
-        if not self.bridge:
-            self.bridge = self.useFixture(self._create_bridge_fixture()).bridge
-
-    @classmethod
-    def get(cls, bridge, namespace=None, mac=None, port_id=None):
-        """Deduce PortFixture class from bridge type and instantiate it."""
-        if isinstance(bridge, ovs_lib.OVSBridge):
-            return OVSPortFixture(bridge, namespace, mac, port_id)
-        if isinstance(bridge, bridge_lib.BridgeDevice):
-            return LinuxBridgePortFixture(bridge, namespace)
-        if isinstance(bridge, VethBridge):
-            return VethPortFixture(bridge, namespace)
-        tools.fail('Unexpected bridge type: %s' % type(bridge))
-
-
-class OVSBridgeFixture(fixtures.Fixture):
-    """Create an OVS bridge.
-
-    :ivar prefix: bridge name prefix
-    :type prefix: str
-    :ivar bridge: created bridge
-    :type bridge: OVSBridge
-    """
-
-    def __init__(self, prefix=BR_PREFIX):
-        super(OVSBridgeFixture, self).__init__()
-        self.prefix = prefix
-
-    def _setUp(self):
-        ovs = ovs_lib.BaseOVS()
-        self.bridge = common_base.create_resource(self.prefix, ovs.add_bridge)
-        self.addCleanup(self.bridge.destroy)
-
-
-class OVSPortFixture(PortFixture):
-
-    def _create_bridge_fixture(self):
-        return OVSBridgeFixture()
-
-    def _setUp(self):
-        super(OVSPortFixture, self)._setUp()
-
-        interface_config = cfg.ConfigOpts()
-        interface_config.register_opts(interface.OPTS)
-        ovs_interface = interface.OVSInterfaceDriver(interface_config)
-
-        port_name = tests_base.get_rand_device_name(PORT_PREFIX)
-        ovs_interface.plug_new(
-            None,
-            self.port_id,
-            port_name,
-            self.mac,
-            bridge=self.bridge.br_name,
-            namespace=self.namespace)
-        self.addCleanup(self.bridge.delete_port, port_name)
-        self.port = ip_lib.IPDevice(port_name, self.namespace)
-
-
-class LinuxBridgeFixture(fixtures.Fixture):
-    """Create a linux bridge.
-
-    :ivar bridge: created bridge
-    :type bridge: BridgeDevice
-    :ivar namespace: created bridge namespace
-    :type namespace: str
-    """
-
-    def __init__(self, prefix=BR_PREFIX, namespace=UNDEFINED):
-        super(LinuxBridgeFixture, self).__init__()
-        self.prefix = prefix
-        self.namespace = namespace
-
-    def _setUp(self):
-        if self.namespace is UNDEFINED:
-            self.namespace = self.useFixture(NamespaceFixture()).name
-        self.bridge = common_base.create_resource(
-            self.prefix,
-            bridge_lib.BridgeDevice.addbr,
-            namespace=self.namespace)
-        self.addCleanup(self.bridge.delbr)
-        self.bridge.link.set_up()
-        self.addCleanup(self.bridge.link.set_down)
-
-
-class LinuxBridgePortFixture(PortFixture):
-    """Create a linux bridge port.
-
-    :ivar port: created port
-    :type port: IPDevice
-    :ivar br_port: bridge side veth peer port
-    :type br_port: IPDevice
-    """
-
-    def _create_bridge_fixture(self):
-        return LinuxBridgeFixture()
-
-    def _setUp(self):
-        super(LinuxBridgePortFixture, self)._setUp()
-        self.port, self.br_port = self.useFixture(VethFixture()).ports
-
-        # bridge side
-        br_ip_wrapper = ip_lib.IPWrapper(self.bridge.namespace)
-        br_ip_wrapper.add_device_to_namespace(self.br_port)
-        self.bridge.addif(self.br_port)
-        self.br_port.link.set_up()
-
-        # port side
-        ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
-        ns_ip_wrapper.add_device_to_namespace(self.port)
-        self.port.link.set_up()
-
-
-class VethBridge(object):
-
-    def __init__(self, ports):
-        self.ports = ports
-        self.unallocated_ports = set(self.ports)
-
-    def allocate_port(self):
-        try:
-            return self.unallocated_ports.pop()
-        except KeyError:
-            tools.fail('All FakeBridge ports (%s) are already allocated.' %
-                       len(self.ports))
-
-
-class VethBridgeFixture(fixtures.Fixture):
-    """Simulate a bridge with a veth.
-
-    :ivar bridge: created bridge
-    :type bridge: FakeBridge
-    """
-
-    def _setUp(self):
-        ports = self.useFixture(VethFixture()).ports
-        self.bridge = VethBridge(ports)
-
-
-class VethPortFixture(PortFixture):
-    """Create a veth bridge port.
-
-    :ivar port: created port
-    :type port: IPDevice
-    """
-
-    def _create_bridge_fixture(self):
-        return VethBridgeFixture()
-
-    def _setUp(self):
-        super(VethPortFixture, self)._setUp()
-        self.port = self.bridge.allocate_port()
-
-        ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
-        ns_ip_wrapper.add_device_to_namespace(self.port)
-        self.port.link.set_up()
diff --git a/neutron/tests/contrib/README b/neutron/tests/contrib/README
deleted file mode 100644 (file)
index a73d75a..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-The files in this directory are intended for use by the
-Neutron infra jobs that run the various functional test
-suites in the gate.
diff --git a/neutron/tests/contrib/functional-testing.filters b/neutron/tests/contrib/functional-testing.filters
deleted file mode 100644 (file)
index 1b09f69..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-# neutron-rootwrap command filters to support functional testing.  It
-# is NOT intended to be used outside of a test environment.
-#
-# This file should be owned by (and only-writeable by) the root user
-
-[Filters]
-# enable ping from namespace
-ping_filter: CommandFilter, ping, root
-ping6_filter: CommandFilter, ping6, root
-ping_kill: KillFilter, root, ping, -2
-
-# enable curl from namespace
-curl_filter: RegExpFilter, /usr/bin/curl, root, curl, --max-time, \d+, -D-, http://[0-9a-z:./-]+
-nc_filter: CommandFilter, nc, root
-# netcat has different binaries depending on linux distribution
-nc_kill: KillFilter, root, nc, -9
-ncbsd_kill: KillFilter, root, nc.openbsd, -9
-ncat_kill: KillFilter, root, ncat, -9
-ss_filter: CommandFilter, ss, root
-
-# enable neutron-linuxbridge-cleanup from namespace
-lb_cleanup_filter: RegExpFilter, neutron-linuxbridge-cleanup, root, neutron-linuxbridge-cleanup, --config-file, .*
-
-# enable dhclient from namespace
-dhclient_filter: CommandFilter, dhclient, root
-dhclient_kill: KillFilter, root, dhclient, -9
-
-# Actually, dhclient is used for test dhcp-agent and runs
-# in dhcp-agent namespace. If in that namespace resolv.conf file not exist
-# dhclient will override system /etc/resolv.conf
-# Filters below are limit functions mkdir, rm and touch
-# only to create and delete file resolv.conf in the that namespace
-mkdir_filter: RegExpFilter, /bin/mkdir, root, mkdir, -p, /etc/netns/qdhcp-[0-9a-z./-]+
-rm_filter: RegExpFilter, /bin/rm, root, rm, -r, /etc/netns/qdhcp-[0-9a-z./-]+
-touch_filter: RegExpFilter, /bin/touch, root, touch, /etc/netns/qdhcp-[0-9a-z./-]+/resolv.conf
-touch_filter2: RegExpFilter, /usr/bin/touch, root, touch, /etc/netns/qdhcp-[0-9a-z./-]+/resolv.conf
diff --git a/neutron/tests/contrib/gate_hook.sh b/neutron/tests/contrib/gate_hook.sh
deleted file mode 100644 (file)
index 4156452..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env bash
-
-set -ex
-
-VENV=${1:-"dsvm-functional"}
-
-GATE_DEST=$BASE/new
-DEVSTACK_PATH=$GATE_DEST/devstack
-
-if [ "$VENV" == "dsvm-functional" ] || [ "$VENV" == "dsvm-fullstack" ]
-then
-    # The following need to be set before sourcing
-    # configure_for_func_testing.
-    GATE_STACK_USER=stack
-    NEUTRON_PATH=$GATE_DEST/neutron
-    PROJECT_NAME=neutron
-    IS_GATE=True
-
-    source $NEUTRON_PATH/tools/configure_for_func_testing.sh
-
-    # Make the workspace owned by the stack user
-    sudo chown -R $STACK_USER:$STACK_USER $BASE
-
-    configure_host_for_func_testing
-elif [ "$VENV" == "api" -o "$VENV" == "api-pecan" -o "$VENV" == "full-pecan" ]
-then
-    cat > $DEVSTACK_PATH/local.conf <<EOF
-[[post-config|/etc/neutron/neutron_vpnaas.conf]]
-
-[service_providers]
-service_provider=VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-
-EOF
-
-    if [ "$VENV" == "api-pecan" -o "$VENV" == "full-pecan" ]
-    then
-        cat >> $DEVSTACK_PATH/local.conf <<EOF
-[[post-config|/etc/neutron/neutron.conf]]
-
-[default]
-web_framework=pecan
-
-EOF
-    fi
-
-    export DEVSTACK_LOCAL_CONFIG+="
-enable_plugin neutron-vpnaas git://git.openstack.org/openstack/neutron-vpnaas
-enable_plugin neutron git://git.openstack.org/openstack/neutron
-enable_service q-qos
-"
-
-    $BASE/new/devstack-gate/devstack-vm-gate.sh
-elif [ "$VENV" == "dsvm-plus" ]
-then
-    # We need the qos service enabled to add corresponding scenario tests to tempest
-    export DEVSTACK_LOCAL_CONFIG+="
-enable_plugin neutron git://git.openstack.org/openstack/neutron
-enable_service qos
-"
-
-    $BASE/new/devstack-gate/devstack-vm-gate.sh
-fi
diff --git a/neutron/tests/contrib/post_test_hook.sh b/neutron/tests/contrib/post_test_hook.sh
deleted file mode 100644 (file)
index 96bdc4d..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env bash
-
-set -xe
-
-NEUTRON_DIR="$BASE/new/neutron"
-TEMPEST_DIR="$BASE/new/tempest"
-SCRIPTS_DIR="/usr/os-testr-env/bin/"
-
-venv=${1:-"dsvm-functional"}
-
-function generate_test_logs {
-    local path="$1"
-    # Compress all $path/*.txt files and move the directories holding those
-    # files to /opt/stack/logs. Files with .log suffix have their
-    # suffix changed to .txt (so browsers will know to open the compressed
-    # files and not download them).
-    if [ -d "$path" ]
-    then
-        sudo find $path -iname "*.log" -type f -exec mv {} {}.txt \; -exec gzip -9 {}.txt \;
-        sudo mv $path/* /opt/stack/logs/
-    fi
-}
-
-function generate_testr_results {
-    # Give job user rights to access tox logs
-    sudo -H -u $owner chmod o+rw .
-    sudo -H -u $owner chmod o+rw -R .testrepository
-    if [ -f ".testrepository/0" ] ; then
-        .tox/$venv/bin/subunit-1to2 < .testrepository/0 > ./testrepository.subunit
-        $SCRIPTS_DIR/subunit2html ./testrepository.subunit testr_results.html
-        gzip -9 ./testrepository.subunit
-        gzip -9 ./testr_results.html
-        sudo mv ./*.gz /opt/stack/logs/
-    fi
-
-    if [[ "$venv" == dsvm-functional* ]] || [[ "$venv" == dsvm-fullstack* ]]
-    then
-        generate_test_logs $log_dir
-    fi
-}
-
-if [[ "$venv" == dsvm-functional* ]] || [[ "$venv" == dsvm-fullstack* ]]
-then
-    owner=stack
-    sudo_env=
-    log_dir="/tmp/${venv}-logs"
-elif [ "$venv" == "api" ]
-then
-    owner=tempest
-    # Configure the api tests to use the tempest.conf set by devstack.
-    sudo_env="TEMPEST_CONFIG_DIR=$TEMPEST_DIR/etc"
-fi
-
-# Set owner permissions according to job's requirements.
-cd $NEUTRON_DIR
-sudo chown -R $owner:stack $NEUTRON_DIR
-
-# NOTE(armax): this is a gate hook and we should run in a constrained env
-# to avoid breakage from uncontrolled upper constraints
-venv=$venv-constraints
-
-# Run tests
-echo "Running neutron $venv test suite"
-set +e
-sudo -H -u $owner $sudo_env tox -e $venv
-testr_exit_code=$?
-set -e
-
-# Collect and parse results
-generate_testr_results
-exit $testr_exit_code
diff --git a/neutron/tests/etc/api-paste.ini.test b/neutron/tests/etc/api-paste.ini.test
deleted file mode 100644 (file)
index 59dd915..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-[pipeline:extensions_app_with_filter]
-pipeline = extensions extensions_test_app
-
-[filter:extensions]
-paste.filter_factory = neutron.common.extensions:plugin_aware_extension_middleware_factory
-
-[app:extensions_test_app]
-paste.app_factory = neutron.tests.unit.api.test_extensions:app_factory
diff --git a/neutron/tests/etc/neutron.conf b/neutron/tests/etc/neutron.conf
deleted file mode 100644 (file)
index c636bd2..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-[DEFAULT]
-# Show more verbose log output (sets INFO log level output)
-verbose = True
-
-# Show debugging output in logs (sets DEBUG log level output)
-debug = False
-
-# Address to bind the API server
-bind_host = 0.0.0.0
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions
-api_extensions_path = neutron/tests/unit/extensions
-
-# Paste configuration file
-api_paste_config = api-paste.ini.test
-
-# The messaging module to use, defaults to kombu.
-rpc_backend = fake
-
-lock_path = $state_path/lock
-
-[database]
-connection = 'sqlite://'
-
diff --git a/neutron/tests/etc/neutron_test.conf b/neutron/tests/etc/neutron_test.conf
deleted file mode 100644 (file)
index cd4110d..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-[service_providers]
-service_provider=foo
-service_provider=bar
diff --git a/neutron/tests/etc/policy.json b/neutron/tests/etc/policy.json
deleted file mode 100644 (file)
index c551eb8..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-{
-    "context_is_admin":  "role:admin",
-    "owner": "tenant_id:%(tenant_id)s",
-    "admin_or_owner": "rule:context_is_admin or rule:owner",
-    "context_is_advsvc":  "role:advsvc",
-    "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
-    "admin_owner_or_network_owner": "rule:admin_or_network_owner or rule:owner",
-    "admin_only": "rule:context_is_admin",
-    "regular_user": "",
-    "shared": "field:networks:shared=True",
-    "shared_firewalls": "field:firewalls:shared=True",
-    "shared_firewall_policies": "field:firewall_policies:shared=True",
-    "shared_subnetpools": "field:subnetpools:shared=True",
-    "shared_address_scopes": "field:address_scopes:shared=True",
-    "external": "field:networks:router:external=True",
-    "default": "rule:admin_or_owner",
-
-    "create_subnet": "rule:admin_or_network_owner",
-    "get_subnet": "rule:admin_or_owner or rule:shared",
-    "update_subnet": "rule:admin_or_network_owner",
-    "delete_subnet": "rule:admin_or_network_owner",
-
-    "create_subnetpool": "",
-    "create_subnetpool:shared": "rule:admin_only",
-    "create_subnetpool:is_default": "rule:admin_only",
-    "get_subnetpool": "rule:admin_or_owner or rule:shared_subnetpools",
-    "update_subnetpool": "rule:admin_or_owner",
-    "update_subnetpool:is_default": "rule:admin_only",
-    "delete_subnetpool": "rule:admin_or_owner",
-
-    "create_address_scope": "",
-    "create_address_scope:shared": "rule:admin_only",
-    "get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes",
-    "update_address_scope": "rule:admin_or_owner",
-    "update_address_scope:shared": "rule:admin_only",
-    "delete_address_scope": "rule:admin_or_owner",
-
-    "create_network": "",
-    "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
-    "get_network:router:external": "rule:regular_user",
-    "get_network:segments": "rule:admin_only",
-    "get_network:provider:network_type": "rule:admin_only",
-    "get_network:provider:physical_network": "rule:admin_only",
-    "get_network:provider:segmentation_id": "rule:admin_only",
-    "get_network:queue_id": "rule:admin_only",
-    "create_network:shared": "rule:admin_only",
-    "create_network:router:external": "rule:admin_only",
-    "create_network:segments": "rule:admin_only",
-    "create_network:provider:network_type": "rule:admin_only",
-    "create_network:provider:physical_network": "rule:admin_only",
-    "create_network:provider:segmentation_id": "rule:admin_only",
-    "update_network": "rule:admin_or_owner",
-    "update_network:segments": "rule:admin_only",
-    "update_network:shared": "rule:admin_only",
-    "update_network:provider:network_type": "rule:admin_only",
-    "update_network:provider:physical_network": "rule:admin_only",
-    "update_network:provider:segmentation_id": "rule:admin_only",
-    "update_network:router:external": "rule:admin_only",
-    "delete_network": "rule:admin_or_owner",
-
-    "network_device": "field:port:device_owner=~^network:",
-    "create_port": "",
-    "create_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:binding:host_id": "rule:admin_only",
-    "create_port:binding:profile": "rule:admin_only",
-    "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:allowed_address_pairs": "rule:admin_or_network_owner",
-    "get_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
-    "get_port:queue_id": "rule:admin_only",
-    "get_port:binding:vif_type": "rule:admin_only",
-    "get_port:binding:vif_details": "rule:admin_only",
-    "get_port:binding:host_id": "rule:admin_only",
-    "get_port:binding:profile": "rule:admin_only",
-    "update_port": "rule:admin_or_owner or rule:context_is_advsvc",
-    "update_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
-    "update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
-    "update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "update_port:binding:host_id": "rule:admin_only",
-    "update_port:binding:profile": "rule:admin_only",
-    "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "update_port:allowed_address_pairs": "rule:admin_or_network_owner",
-    "delete_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
-
-    "get_router:ha": "rule:admin_only",
-    "create_router": "rule:regular_user",
-    "create_router:external_gateway_info:enable_snat": "rule:admin_only",
-    "create_router:distributed": "rule:admin_only",
-    "create_router:ha": "rule:admin_only",
-    "get_router": "rule:admin_or_owner",
-    "get_router:distributed": "rule:admin_only",
-    "update_router:external_gateway_info:enable_snat": "rule:admin_only",
-    "update_router:distributed": "rule:admin_only",
-    "update_router:ha": "rule:admin_only",
-    "delete_router": "rule:admin_or_owner",
-
-    "add_router_interface": "rule:admin_or_owner",
-    "remove_router_interface": "rule:admin_or_owner",
-
-    "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
-    "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
-
-    "create_firewall": "",
-    "get_firewall": "rule:admin_or_owner",
-    "create_firewall:shared": "rule:admin_only",
-    "get_firewall:shared": "rule:admin_only",
-    "update_firewall": "rule:admin_or_owner",
-    "update_firewall:shared": "rule:admin_only",
-    "delete_firewall": "rule:admin_or_owner",
-
-    "create_firewall_policy": "",
-    "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewall_policies",
-    "create_firewall_policy:shared": "rule:admin_or_owner",
-    "update_firewall_policy": "rule:admin_or_owner",
-    "delete_firewall_policy": "rule:admin_or_owner",
-
-    "insert_rule": "rule:admin_or_owner",
-    "remove_rule": "rule:admin_or_owner",
-
-    "create_firewall_rule": "",
-    "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
-    "update_firewall_rule": "rule:admin_or_owner",
-    "delete_firewall_rule": "rule:admin_or_owner",
-
-    "create_qos_queue": "rule:admin_only",
-    "get_qos_queue": "rule:admin_only",
-
-    "update_agent": "rule:admin_only",
-    "delete_agent": "rule:admin_only",
-    "get_agent": "rule:admin_only",
-
-    "create_dhcp-network": "rule:admin_only",
-    "delete_dhcp-network": "rule:admin_only",
-    "get_dhcp-networks": "rule:admin_only",
-    "create_l3-router": "rule:admin_only",
-    "delete_l3-router": "rule:admin_only",
-    "get_l3-routers": "rule:admin_only",
-    "get_dhcp-agents": "rule:admin_only",
-    "get_l3-agents": "rule:admin_only",
-    "get_loadbalancer-agent": "rule:admin_only",
-    "get_loadbalancer-pools": "rule:admin_only",
-    "get_agent-loadbalancers": "rule:admin_only",
-    "get_loadbalancer-hosting-agent": "rule:admin_only",
-
-    "create_floatingip": "rule:regular_user",
-    "create_floatingip:floating_ip_address": "rule:admin_only",
-    "update_floatingip": "rule:admin_or_owner",
-    "delete_floatingip": "rule:admin_or_owner",
-    "get_floatingip": "rule:admin_or_owner",
-
-    "create_network_profile": "rule:admin_only",
-    "update_network_profile": "rule:admin_only",
-    "delete_network_profile": "rule:admin_only",
-    "get_network_profiles": "",
-    "get_network_profile": "",
-    "update_policy_profiles": "rule:admin_only",
-    "get_policy_profiles": "",
-    "get_policy_profile": "",
-
-    "create_metering_label": "rule:admin_only",
-    "delete_metering_label": "rule:admin_only",
-    "get_metering_label": "rule:admin_only",
-
-    "create_metering_label_rule": "rule:admin_only",
-    "delete_metering_label_rule": "rule:admin_only",
-    "get_metering_label_rule": "rule:admin_only",
-
-    "get_service_provider": "rule:regular_user",
-    "get_lsn": "rule:admin_only",
-    "create_lsn": "rule:admin_only",
-
-    "create_flavor": "rule:admin_only",
-    "update_flavor": "rule:admin_only",
-    "delete_flavor": "rule:admin_only",
-    "get_flavors": "rule:regular_user",
-    "get_flavor": "rule:regular_user",
-    "create_service_profile": "rule:admin_only",
-    "update_service_profile": "rule:admin_only",
-    "delete_service_profile": "rule:admin_only",
-    "get_service_profiles": "rule:admin_only",
-    "get_service_profile": "rule:admin_only",
-
-    "get_policy": "rule:regular_user",
-    "create_policy": "rule:admin_only",
-    "update_policy": "rule:admin_only",
-    "delete_policy": "rule:admin_only",
-    "get_policy_bandwidth_limit_rule": "rule:regular_user",
-    "create_policy_bandwidth_limit_rule": "rule:admin_only",
-    "delete_policy_bandwidth_limit_rule": "rule:admin_only",
-    "update_policy_bandwidth_limit_rule": "rule:admin_only",
-    "get_rule_type": "rule:regular_user",
-
-    "restrict_wildcard": "(not field:rbac_policy:target_tenant=*) or rule:admin_only",
-    "create_rbac_policy": "",
-    "create_rbac_policy:target_tenant": "rule:restrict_wildcard",
-    "update_rbac_policy": "rule:admin_or_owner",
-    "update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner",
-    "get_rbac_policy": "rule:admin_or_owner",
-    "delete_rbac_policy": "rule:admin_or_owner",
-
-    "create_flavor_service_profile": "rule:admin_only",
-    "delete_flavor_service_profile": "rule:admin_only",
-    "get_flavor_service_profile": "rule:regular_user"
-}
diff --git a/neutron/tests/fake_notifier.py b/neutron/tests/fake_notifier.py
deleted file mode 100644 (file)
index 2972fd6..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import functools
-
-
-NOTIFICATIONS = []
-
-
-def reset():
-    del NOTIFICATIONS[:]
-
-
-FakeMessage = collections.namedtuple('Message',
-                                     ['publisher_id', 'priority',
-                                      'event_type', 'payload'])
-
-
-class FakeNotifier(object):
-
-    def __init__(self, transport, publisher_id=None,
-                 driver=None, topic=None,
-                 serializer=None, retry=None):
-        self.transport = transport
-        self.publisher_id = publisher_id
-        for priority in ('debug', 'info', 'warn', 'error', 'critical'):
-            setattr(self, priority,
-                    functools.partial(self._notify, priority=priority.upper()))
-
-    def prepare(self, publisher_id=None):
-        if publisher_id is None:
-            publisher_id = self.publisher_id
-        return self.__class__(self.transport, publisher_id)
-
-    def _notify(self, ctxt, event_type, payload, priority):
-        msg = dict(publisher_id=self.publisher_id,
-                   priority=priority,
-                   event_type=event_type,
-                   payload=payload)
-        NOTIFICATIONS.append(msg)
diff --git a/neutron/tests/fullstack/README b/neutron/tests/fullstack/README
deleted file mode 100644 (file)
index c172c70..0000000
+++ /dev/null
@@ -1 +0,0 @@
-Please see neutron/TESTING.rst.
\ No newline at end of file
diff --git a/neutron/tests/fullstack/__init__.py b/neutron/tests/fullstack/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/fullstack/base.py b/neutron/tests/fullstack/base.py
deleted file mode 100644 (file)
index 2e95ba4..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_db.sqlalchemy import test_base
-
-from neutron.db.migration import cli as migration
-from neutron.tests.common import base
-from neutron.tests.fullstack.resources import client as client_resource
-
-
-class BaseFullStackTestCase(base.MySQLTestCase):
-    """Base test class for full-stack tests."""
-
-    def setUp(self, environment):
-        super(BaseFullStackTestCase, self).setUp()
-        self.create_db_tables()
-        self.environment = environment
-        self.environment.test_name = self.get_name()
-        self.useFixture(self.environment)
-        self.client = self.environment.neutron_server.client
-        self.safe_client = self.useFixture(
-            client_resource.ClientFixture(self.client))
-
-    def get_name(self):
-        class_name, test_name = self.id().split(".")[-2:]
-        return "%s.%s" % (class_name, test_name)
-
-    def create_db_tables(self):
-        """Populate the new database.
-
-        MySQLTestCase creates a new database for each test, but these need to
-        be populated with the appropriate tables. Before we can do that, we
-        must change the 'connection' option which the Neutron code knows to
-        look at.
-
-        Currently, the username and password options are hard-coded by
-        oslo.db and neutron/tests/functional/contrib/gate_hook.sh. Also,
-        we only support MySQL for now, but the groundwork for adding Postgres
-        is already laid.
-        """
-        conn = ("mysql+pymysql://%(username)s:%(password)s"
-                "@127.0.0.1/%(db_name)s" % {
-                    'username': test_base.DbFixture.USERNAME,
-                    'password': test_base.DbFixture.PASSWORD,
-                    'db_name': self.engine.url.database})
-
-        alembic_config = migration.get_neutron_config()
-        alembic_config.neutron_config = cfg.CONF
-        self.original_conn = cfg.CONF.database.connection
-        self.addCleanup(self._revert_connection_address)
-        cfg.CONF.set_override('connection', conn, group='database')
-
-        migration.do_alembic_command(alembic_config, 'upgrade', 'heads')
-
-    def _revert_connection_address(self):
-        cfg.CONF.set_override('connection',
-                              self.original_conn,
-                              group='database')
diff --git a/neutron/tests/fullstack/resources/__init__.py b/neutron/tests/fullstack/resources/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/fullstack/resources/client.py b/neutron/tests/fullstack/resources/client.py
deleted file mode 100644 (file)
index 449c353..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-# Copyright (c) 2015 Thales Services SAS
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-import functools
-
-import fixtures
-from neutronclient.common import exceptions
-
-from neutron.extensions import portbindings
-from neutron.tests import base
-
-
-def _safe_method(f):
-    @functools.wraps(f)
-    def delete(*args, **kwargs):
-        try:
-            return f(*args, **kwargs)
-        except exceptions.NotFound:
-            pass
-    return delete
-
-
-class ClientFixture(fixtures.Fixture):
-    """Manage and cleanup neutron resources."""
-
-    def __init__(self, client):
-        super(ClientFixture, self).__init__()
-        self.client = client
-
-    def _create_resource(self, resource_type, spec):
-        create = getattr(self.client, 'create_%s' % resource_type)
-        delete = getattr(self.client, 'delete_%s' % resource_type)
-
-        body = {resource_type: spec}
-        resp = create(body=body)
-        data = resp[resource_type]
-        self.addCleanup(_safe_method(delete), data['id'])
-        return data
-
-    def create_router(self, tenant_id, name=None, ha=False):
-        resource_type = 'router'
-
-        name = name or base.get_rand_name(prefix=resource_type)
-        spec = {'tenant_id': tenant_id, 'name': name, 'ha': ha}
-
-        return self._create_resource(resource_type, spec)
-
-    def create_network(self, tenant_id, name=None):
-        resource_type = 'network'
-
-        name = name or base.get_rand_name(prefix=resource_type)
-        spec = {'tenant_id': tenant_id, 'name': name}
-
-        return self._create_resource(resource_type, spec)
-
-    def create_subnet(self, tenant_id, network_id,
-                      cidr, gateway_ip=None, ip_version=4,
-                      name=None, enable_dhcp=True):
-        resource_type = 'subnet'
-
-        name = name or base.get_rand_name(prefix=resource_type)
-        spec = {'tenant_id': tenant_id, 'network_id': network_id, 'name': name,
-                'cidr': cidr, 'ip_version': ip_version,
-                'enable_dhcp': enable_dhcp}
-        if gateway_ip:
-            spec['gateway_ip'] = gateway_ip
-
-        return self._create_resource(resource_type, spec)
-
-    def create_port(self, tenant_id, network_id, hostname, qos_policy_id=None):
-        spec = {
-            'network_id': network_id,
-            'tenant_id': tenant_id,
-            portbindings.HOST_ID: hostname,
-        }
-        if qos_policy_id:
-            spec['qos_policy_id'] = qos_policy_id
-        return self._create_resource('port', spec)
-
-    def add_router_interface(self, router_id, subnet_id):
-        body = {'subnet_id': subnet_id}
-        self.client.add_interface_router(router=router_id, body=body)
-        self.addCleanup(_safe_method(self.client.remove_interface_router),
-                        router=router_id, body=body)
-
-    def create_qos_policy(self, tenant_id, name, description, shared):
-        policy = self.client.create_qos_policy(
-            body={'policy': {'name': name,
-                             'description': description,
-                             'shared': shared,
-                             'tenant_id': tenant_id}})
-
-        def detach_and_delete_policy():
-            qos_policy_id = policy['policy']['id']
-            ports_with_policy = self.client.list_ports(
-                qos_policy_id=qos_policy_id)['ports']
-            for port in ports_with_policy:
-                self.client.update_port(
-                    port['id'],
-                    body={'port': {'qos_policy_id': None}})
-            self.client.delete_qos_policy(qos_policy_id)
-
-        # NOTE: We'll need to add support for detaching from network once
-        # create_network() supports qos_policy_id.
-        self.addCleanup(_safe_method(detach_and_delete_policy))
-
-        return policy['policy']
-
-    def create_bandwidth_limit_rule(self, tenant_id, qos_policy_id, limit=None,
-                                    burst=None):
-        rule = {'tenant_id': tenant_id}
-        if limit:
-            rule['max_kbps'] = limit
-        if burst:
-            rule['max_burst_kbps'] = burst
-        rule = self.client.create_bandwidth_limit_rule(
-            policy=qos_policy_id,
-            body={'bandwidth_limit_rule': rule})
-
-        self.addCleanup(_safe_method(self.client.delete_bandwidth_limit_rule),
-                        rule['bandwidth_limit_rule']['id'],
-                        qos_policy_id)
-
-        return rule['bandwidth_limit_rule']
diff --git a/neutron/tests/fullstack/resources/config.py b/neutron/tests/fullstack/resources/config.py
deleted file mode 100644 (file)
index 94d95e0..0000000
+++ /dev/null
@@ -1,235 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import tempfile
-
-import fixtures
-
-from neutron.common import constants
-from neutron.plugins.ml2.extensions import qos as qos_ext
-from neutron.tests import base
-from neutron.tests.common import config_fixtures
-from neutron.tests.common import helpers as c_helpers
-from neutron.tests.common import net_helpers
-
-
-def _generate_port():
-    """Get a free TCP port from the Operating System and return it.
-
-    This might fail if some other process occupies this port after this
-    function finished but before the neutron-server process started.
-    """
-    return str(net_helpers.get_free_namespace_port(
-        constants.PROTO_NAME_TCP))
-
-
-class ConfigFixture(fixtures.Fixture):
-    """A fixture that holds an actual Neutron configuration.
-
-    Note that 'self.config' is intended to only be updated once, during
-    the constructor, so if this fixture is re-used (setUp is called twice),
-    then the dynamic configuration values won't change. The correct usage
-    is initializing a new instance of the class.
-    """
-    def __init__(self, env_desc, host_desc, temp_dir, base_filename):
-        super(ConfigFixture, self).__init__()
-        self.config = config_fixtures.ConfigDict()
-        self.env_desc = env_desc
-        self.host_desc = host_desc
-        self.temp_dir = temp_dir
-        self.base_filename = base_filename
-
-    def _setUp(self):
-        cfg_fixture = config_fixtures.ConfigFileFixture(
-            self.base_filename, self.config, self.temp_dir)
-        self.useFixture(cfg_fixture)
-        self.filename = cfg_fixture.filename
-
-
-class NeutronConfigFixture(ConfigFixture):
-
-    def __init__(self, env_desc, host_desc, temp_dir,
-                 connection, rabbitmq_environment):
-        super(NeutronConfigFixture, self).__init__(
-            env_desc, host_desc, temp_dir, base_filename='neutron.conf')
-
-        service_plugins = ['router']
-        if env_desc.qos:
-            service_plugins.append('qos')
-
-        self.config.update({
-            'DEFAULT': {
-                'host': self._generate_host(),
-                'state_path': self._generate_state_path(self.temp_dir),
-                'lock_path': '$state_path/lock',
-                'bind_port': _generate_port(),
-                'api_paste_config': self._generate_api_paste(),
-                'policy_file': self._generate_policy_json(),
-                'core_plugin': 'neutron.plugins.ml2.plugin.Ml2Plugin',
-                'service_plugins': ','.join(service_plugins),
-                'auth_strategy': 'noauth',
-                'verbose': 'True',
-                'debug': 'True',
-            },
-            'database': {
-                'connection': connection,
-            },
-            'oslo_messaging_rabbit': {
-                'rabbit_userid': rabbitmq_environment.user,
-                'rabbit_password': rabbitmq_environment.password,
-                'rabbit_hosts': '127.0.0.1',
-                'rabbit_virtual_host': rabbitmq_environment.vhost,
-            }
-        })
-
-    def _generate_host(self):
-        return base.get_rand_name(prefix='host-')
-
-    def _generate_state_path(self, temp_dir):
-        # Assume that temp_dir will be removed by the caller
-        self.state_path = tempfile.mkdtemp(prefix='state_path', dir=temp_dir)
-        return self.state_path
-
-    def _generate_api_paste(self):
-        return c_helpers.find_sample_file('api-paste.ini')
-
-    def _generate_policy_json(self):
-        return c_helpers.find_sample_file('policy.json')
-
-
-class ML2ConfigFixture(ConfigFixture):
-
-    def __init__(self, env_desc, host_desc, temp_dir, tenant_network_types):
-        super(ML2ConfigFixture, self).__init__(
-            env_desc, host_desc, temp_dir, base_filename='ml2_conf.ini')
-
-        mechanism_drivers = 'openvswitch'
-        if self.env_desc.l2_pop:
-            mechanism_drivers += ',l2population'
-
-        self.config.update({
-            'ml2': {
-                'tenant_network_types': tenant_network_types,
-                'mechanism_drivers': mechanism_drivers,
-            },
-            'ml2_type_vlan': {
-                'network_vlan_ranges': 'physnet1:1000:2999',
-            },
-            'ml2_type_gre': {
-                'tunnel_id_ranges': '1:1000',
-            },
-            'ml2_type_vxlan': {
-                'vni_ranges': '1001:2000',
-            },
-        })
-
-        if env_desc.qos:
-            self.config['ml2']['extension_drivers'] =\
-                    qos_ext.QOS_EXT_DRIVER_ALIAS
-
-
-class OVSConfigFixture(ConfigFixture):
-
-    def __init__(self, env_desc, host_desc, temp_dir, local_ip):
-        super(OVSConfigFixture, self).__init__(
-            env_desc, host_desc, temp_dir,
-            base_filename='openvswitch_agent.ini')
-
-        self.tunneling_enabled = self.env_desc.tunneling_enabled
-        self.config.update({
-            'ovs': {
-                'local_ip': local_ip,
-                'integration_bridge': self._generate_integration_bridge(),
-                'of_interface': host_desc.of_interface,
-            },
-            'securitygroup': {
-                'firewall_driver': ('neutron.agent.linux.iptables_firewall.'
-                                    'OVSHybridIptablesFirewallDriver'),
-            },
-            'agent': {
-                'l2_population': str(self.env_desc.l2_pop),
-            }
-        })
-
-        if self.config['ovs']['of_interface'] == 'native':
-            self.config['ovs'].update({
-                'of_listen_port': _generate_port()})
-
-        if self.tunneling_enabled:
-            self.config['agent'].update({
-                'tunnel_types': self.env_desc.network_type})
-            self.config['ovs'].update({
-                'tunnel_bridge': self._generate_tunnel_bridge(),
-                'int_peer_patch_port': self._generate_int_peer(),
-                'tun_peer_patch_port': self._generate_tun_peer()})
-        else:
-            self.config['ovs']['bridge_mappings'] = (
-                self._generate_bridge_mappings())
-
-        if env_desc.qos:
-            self.config['agent']['extensions'] = 'qos'
-
-    def _generate_bridge_mappings(self):
-        return 'physnet1:%s' % base.get_rand_device_name(prefix='br-eth')
-
-    def _generate_integration_bridge(self):
-        return base.get_rand_device_name(prefix='br-int')
-
-    def _generate_tunnel_bridge(self):
-        return base.get_rand_device_name(prefix='br-tun')
-
-    def _generate_int_peer(self):
-        return base.get_rand_device_name(prefix='patch-tun')
-
-    def _generate_tun_peer(self):
-        return base.get_rand_device_name(prefix='patch-int')
-
-    def get_br_int_name(self):
-        return self.config.ovs.integration_bridge
-
-    def get_br_phys_name(self):
-        return self.config.ovs.bridge_mappings.split(':')[1]
-
-    def get_br_tun_name(self):
-        return self.config.ovs.tunnel_bridge
-
-
-class L3ConfigFixture(ConfigFixture):
-
-    def __init__(self, env_desc, host_desc, temp_dir, integration_bridge):
-        super(L3ConfigFixture, self).__init__(
-            env_desc, host_desc, temp_dir, base_filename='l3_agent.ini')
-
-        self.config.update({
-            'DEFAULT': {
-                'l3_agent_manager': ('neutron.agent.l3_agent.'
-                                     'L3NATAgentWithStateReport'),
-                'interface_driver': ('neutron.agent.linux.interface.'
-                                     'OVSInterfaceDriver'),
-                'ovs_integration_bridge': integration_bridge,
-                'external_network_bridge': self._generate_external_bridge(),
-                'debug': 'True',
-                'verbose': 'True',
-                'test_namespace_suffix': self._generate_namespace_suffix(),
-            }
-        })
-
-    def _generate_external_bridge(self):
-        return base.get_rand_device_name(prefix='br-ex')
-
-    def get_external_bridge(self):
-        return self.config.DEFAULT.external_network_bridge
-
-    def _generate_namespace_suffix(self):
-        return base.get_rand_name(prefix='test')
diff --git a/neutron/tests/fullstack/resources/environment.py b/neutron/tests/fullstack/resources/environment.py
deleted file mode 100644 (file)
index 1105e8c..0000000
+++ /dev/null
@@ -1,241 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import random
-
-import fixtures
-import netaddr
-from neutronclient.common import exceptions as nc_exc
-from oslo_config import cfg
-
-from neutron.agent.linux import utils
-from neutron.common import utils as common_utils
-from neutron.tests.common import net_helpers
-from neutron.tests.fullstack.resources import config
-from neutron.tests.fullstack.resources import process
-
-
-class EnvironmentDescription(object):
-    """A set of characteristics of an environment setup.
-
-    Does the setup, as a whole, support tunneling? How about l2pop?
-    """
-    def __init__(self, network_type='vxlan', l2_pop=True, qos=False):
-        self.network_type = network_type
-        self.l2_pop = l2_pop
-        self.qos = qos
-
-    @property
-    def tunneling_enabled(self):
-        return self.network_type in ('vxlan', 'gre')
-
-
-class HostDescription(object):
-    """A set of characteristics of an environment Host.
-
-    What agents should the host spawn? What mode should each agent operate
-    under?
-    """
-    def __init__(self, l3_agent=False, of_interface='ovs-ofctl'):
-        self.l3_agent = l3_agent
-        self.of_interface = of_interface
-
-
-class Host(fixtures.Fixture):
-    """The Host class models a physical host running agents, all reporting with
-    the same hostname.
-
-    OpenStack installers or administrators connect compute nodes to the
-    physical tenant network by connecting the provider bridges to their
-    respective physical NICs. Or, if using tunneling, by configuring an
-    IP address on the appropriate physical NIC. The Host class does the same
-    with the connect_* methods.
-
-    TODO(amuller): Add start/stop/restart methods that will start/stop/restart
-    all of the agents on this host. Add a kill method that stops all agents
-    and disconnects the host from other hosts.
-    """
-
-    def __init__(self, env_desc, host_desc,
-                 test_name, neutron_config,
-                 central_data_bridge, central_external_bridge):
-        self.env_desc = env_desc
-        self.host_desc = host_desc
-        self.test_name = test_name
-        self.neutron_config = neutron_config
-        # Use reserved class E addresses
-        self.local_ip = self.get_random_ip('240.0.0.1', '255.255.255.254')
-        self.central_data_bridge = central_data_bridge
-        self.central_external_bridge = central_external_bridge
-        self.agents = {}
-
-    def _setUp(self):
-        agent_cfg_fixture = config.OVSConfigFixture(
-            self.env_desc, self.host_desc, self.neutron_config.temp_dir,
-            self.local_ip)
-        self.useFixture(agent_cfg_fixture)
-
-        if self.env_desc.tunneling_enabled:
-            self.useFixture(
-                net_helpers.OVSBridgeFixture(
-                    agent_cfg_fixture.get_br_tun_name())).bridge
-            self.connect_to_internal_network_via_tunneling()
-        else:
-            br_phys = self.useFixture(
-                net_helpers.OVSBridgeFixture(
-                    agent_cfg_fixture.get_br_phys_name())).bridge
-            self.connect_to_internal_network_via_vlans(br_phys)
-
-        self.ovs_agent = self.useFixture(
-            process.OVSAgentFixture(
-                self.env_desc, self.host_desc,
-                self.test_name, self.neutron_config, agent_cfg_fixture))
-
-        if self.host_desc.l3_agent:
-            l3_agent_cfg_fixture = self.useFixture(
-                config.L3ConfigFixture(
-                    self.env_desc, self.host_desc,
-                    self.neutron_config.temp_dir,
-                    self.ovs_agent.agent_cfg_fixture.get_br_int_name()))
-            br_ex = self.useFixture(
-                net_helpers.OVSBridgeFixture(
-                    l3_agent_cfg_fixture.get_external_bridge())).bridge
-            self.connect_to_external_network(br_ex)
-            self.l3_agent = self.useFixture(
-                process.L3AgentFixture(
-                    self.env_desc, self.host_desc,
-                    self.test_name,
-                    self.neutron_config,
-                    l3_agent_cfg_fixture))
-
-    def connect_to_internal_network_via_tunneling(self):
-        veth_1, veth_2 = self.useFixture(
-            net_helpers.VethFixture()).ports
-
-        # NOTE: This sets an IP address on the host's root namespace
-        # which is cleaned up when the device is deleted.
-        veth_1.addr.add(common_utils.ip_to_cidr(self.local_ip, 32))
-
-        veth_1.link.set_up()
-        veth_2.link.set_up()
-
-    def connect_to_internal_network_via_vlans(self, host_data_bridge):
-        # If using VLANs as a segmentation device, it's needed to connect
-        # a provider bridge to a centralized, shared bridge.
-        net_helpers.create_patch_ports(
-            self.central_data_bridge, host_data_bridge)
-
-    def connect_to_external_network(self, host_external_bridge):
-        net_helpers.create_patch_ports(
-            self.central_external_bridge, host_external_bridge)
-
-    @staticmethod
-    def get_random_ip(low, high):
-        parent_range = netaddr.IPRange(low, high)
-        return str(random.choice(parent_range))
-
-    @property
-    def hostname(self):
-        return self.neutron_config.config.DEFAULT.host
-
-    @property
-    def l3_agent(self):
-        return self.agents['l3']
-
-    @l3_agent.setter
-    def l3_agent(self, agent):
-        self.agents['l3'] = agent
-
-    @property
-    def ovs_agent(self):
-        return self.agents['ovs']
-
-    @ovs_agent.setter
-    def ovs_agent(self, agent):
-        self.agents['ovs'] = agent
-
-
-class Environment(fixtures.Fixture):
-    """Represents a deployment topology.
-
-    Environment is a collection of hosts. It starts a Neutron server
-    and a parametrized number of Hosts, each a collection of agents.
-    The Environment accepts a collection of HostDescription, each describing
-    the type of Host to create.
-    """
-
-    def __init__(self, env_desc, hosts_desc):
-        """
-        :param env_desc: An EnvironmentDescription instance.
-        :param hosts_desc: A list of HostDescription instances.
-        """
-
-        super(Environment, self).__init__()
-        self.env_desc = env_desc
-        self.hosts_desc = hosts_desc
-        self.hosts = []
-
-    def wait_until_env_is_up(self):
-        utils.wait_until_true(self._processes_are_ready)
-
-    def _processes_are_ready(self):
-        try:
-            running_agents = self.neutron_server.client.list_agents()['agents']
-            agents_count = sum(len(host.agents) for host in self.hosts)
-            return len(running_agents) == agents_count
-        except nc_exc.NeutronClientException:
-            return False
-
-    def _create_host(self, host_desc):
-        temp_dir = self.useFixture(fixtures.TempDir()).path
-        neutron_config = config.NeutronConfigFixture(
-            self.env_desc, host_desc, temp_dir,
-            cfg.CONF.database.connection, self.rabbitmq_environment)
-        self.useFixture(neutron_config)
-
-        return self.useFixture(
-            Host(self.env_desc,
-                 host_desc,
-                 self.test_name,
-                 neutron_config,
-                 self.central_data_bridge,
-                 self.central_external_bridge))
-
-    def _setUp(self):
-        self.temp_dir = self.useFixture(fixtures.TempDir()).path
-
-        self.rabbitmq_environment = self.useFixture(
-            process.RabbitmqEnvironmentFixture())
-
-        plugin_cfg_fixture = self.useFixture(
-            config.ML2ConfigFixture(
-                self.env_desc, None, self.temp_dir,
-                self.env_desc.network_type))
-        neutron_cfg_fixture = self.useFixture(
-            config.NeutronConfigFixture(
-                self.env_desc, None, self.temp_dir,
-                cfg.CONF.database.connection, self.rabbitmq_environment))
-        self.neutron_server = self.useFixture(
-            process.NeutronServerFixture(
-                self.env_desc, None,
-                self.test_name, neutron_cfg_fixture, plugin_cfg_fixture))
-
-        self.central_data_bridge = self.useFixture(
-            net_helpers.OVSBridgeFixture('cnt-data')).bridge
-        self.central_external_bridge = self.useFixture(
-            net_helpers.OVSBridgeFixture('cnt-ex')).bridge
-
-        self.hosts = [self._create_host(desc) for desc in self.hosts_desc]
-
-        self.wait_until_env_is_up()
diff --git a/neutron/tests/fullstack/resources/machine.py b/neutron/tests/fullstack/resources/machine.py
deleted file mode 100644 (file)
index 170678a..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-
-from neutron.agent.linux import utils
-from neutron.tests.common import machine_fixtures
-from neutron.tests.common import net_helpers
-
-
-class FakeFullstackMachine(machine_fixtures.FakeMachineBase):
-    def __init__(self, host, network_id, tenant_id, safe_client,
-                 neutron_port=None):
-        super(FakeFullstackMachine, self).__init__()
-        self.bridge = host.ovs_agent.br_int
-        self.host_binding = host.hostname
-        self.tenant_id = tenant_id
-        self.network_id = network_id
-        self.safe_client = safe_client
-        self.neutron_port = neutron_port
-
-    def _setUp(self):
-        super(FakeFullstackMachine, self)._setUp()
-
-        if not self.neutron_port:
-            self.neutron_port = self.safe_client.create_port(
-                network_id=self.network_id,
-                tenant_id=self.tenant_id,
-                hostname=self.host_binding)
-        self.neutron_port_id = self.neutron_port['id']
-        mac_address = self.neutron_port['mac_address']
-
-        self.port = self.useFixture(
-            net_helpers.PortFixture.get(
-                self.bridge, self.namespace, mac_address,
-                self.neutron_port_id)).port
-
-        self._ip = self.neutron_port['fixed_ips'][0]['ip_address']
-        subnet_id = self.neutron_port['fixed_ips'][0]['subnet_id']
-        subnet = self.safe_client.client.show_subnet(subnet_id)
-        prefixlen = netaddr.IPNetwork(subnet['subnet']['cidr']).prefixlen
-        self._ip_cidr = '%s/%s' % (self._ip, prefixlen)
-
-        # TODO(amuller): Support DHCP
-        self.port.addr.add(self.ip_cidr)
-
-        self.gateway_ip = subnet['subnet']['gateway_ip']
-        if self.gateway_ip:
-            net_helpers.set_namespace_gateway(self.port, self.gateway_ip)
-
-    @property
-    def ip(self):
-        return self._ip
-
-    @property
-    def ip_cidr(self):
-        return self._ip_cidr
-
-    def block_until_boot(self):
-        utils.wait_until_true(
-            lambda: (self.safe_client.client.show_port(self.neutron_port_id)
-                     ['port']['status'] == 'ACTIVE'),
-            sleep=3)
diff --git a/neutron/tests/fullstack/resources/process.py b/neutron/tests/fullstack/resources/process.py
deleted file mode 100644 (file)
index 3c1a4db..0000000
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import datetime
-from distutils import spawn
-import os
-
-import fixtures
-from neutronclient.common import exceptions as nc_exc
-from neutronclient.v2_0 import client
-from oslo_log import log as logging
-
-from neutron.agent.linux import async_process
-from neutron.agent.linux import utils
-from neutron.common import utils as common_utils
-from neutron.tests import base
-from neutron.tests.common import net_helpers
-
-LOG = logging.getLogger(__name__)
-
-# This is the directory from which infra fetches log files for fullstack tests
-DEFAULT_LOG_DIR = '/tmp/dsvm-fullstack-logs/'
-
-
-class ProcessFixture(fixtures.Fixture):
-    def __init__(self, test_name, process_name, exec_name, config_filenames):
-        super(ProcessFixture, self).__init__()
-        self.test_name = test_name
-        self.process_name = process_name
-        self.exec_name = exec_name
-        self.config_filenames = config_filenames
-        self.process = None
-
-    def _setUp(self):
-        self.start()
-        self.addCleanup(self.stop)
-
-    def start(self):
-        test_name = base.sanitize_log_path(self.test_name)
-
-        log_dir = os.path.join(DEFAULT_LOG_DIR, test_name)
-        common_utils.ensure_dir(log_dir)
-
-        timestamp = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S-%f")
-        log_file = "%s--%s.log" % (self.process_name, timestamp)
-        cmd = [spawn.find_executable(self.exec_name),
-               '--log-dir', log_dir,
-               '--log-file', log_file]
-        for filename in self.config_filenames:
-            cmd += ['--config-file', filename]
-        self.process = async_process.AsyncProcess(cmd)
-        self.process.start(block=True)
-
-    def stop(self):
-        self.process.stop(block=True)
-
-
-class RabbitmqEnvironmentFixture(fixtures.Fixture):
-
-    def _setUp(self):
-        self.user = base.get_rand_name(prefix='user')
-        self.password = base.get_rand_name(prefix='pass')
-        self.vhost = base.get_rand_name(prefix='vhost')
-
-        self._execute('add_user', self.user, self.password)
-        self.addCleanup(self._execute, 'delete_user', self.user)
-
-        self._execute('add_vhost', self.vhost)
-        self.addCleanup(self._execute, 'delete_vhost', self.vhost)
-
-        self._execute('set_permissions', '-p', self.vhost, self.user,
-                      '.*', '.*', '.*')
-
-    def _execute(self, *args):
-        cmd = ['rabbitmqctl']
-        cmd.extend(args)
-        utils.execute(cmd, run_as_root=True)
-
-
-class NeutronServerFixture(fixtures.Fixture):
-
-    NEUTRON_SERVER = "neutron-server"
-
-    def __init__(self, env_desc, host_desc,
-                 test_name, neutron_cfg_fixture, plugin_cfg_fixture):
-        self.env_desc = env_desc
-        self.host_desc = host_desc
-        self.test_name = test_name
-        self.neutron_cfg_fixture = neutron_cfg_fixture
-        self.plugin_cfg_fixture = plugin_cfg_fixture
-
-    def _setUp(self):
-        config_filenames = [self.neutron_cfg_fixture.filename,
-                            self.plugin_cfg_fixture.filename]
-
-        self.process_fixture = self.useFixture(ProcessFixture(
-            test_name=self.test_name,
-            process_name=self.NEUTRON_SERVER,
-            exec_name=self.NEUTRON_SERVER,
-            config_filenames=config_filenames))
-
-        utils.wait_until_true(self.server_is_live)
-
-    def server_is_live(self):
-        try:
-            self.client.list_networks()
-            return True
-        except nc_exc.NeutronClientException:
-            return False
-
-    @property
-    def client(self):
-        url = ("http://127.0.0.1:%s" %
-               self.neutron_cfg_fixture.config.DEFAULT.bind_port)
-        return client.Client(auth_strategy="noauth", endpoint_url=url)
-
-
-class OVSAgentFixture(fixtures.Fixture):
-
-    NEUTRON_OVS_AGENT = "neutron-openvswitch-agent"
-
-    def __init__(self, env_desc, host_desc,
-                 test_name, neutron_cfg_fixture, agent_cfg_fixture):
-        self.env_desc = env_desc
-        self.host_desc = host_desc
-        self.test_name = test_name
-        self.neutron_cfg_fixture = neutron_cfg_fixture
-        self.neutron_config = self.neutron_cfg_fixture.config
-        self.agent_cfg_fixture = agent_cfg_fixture
-        self.agent_config = agent_cfg_fixture.config
-
-    def _setUp(self):
-        self.br_int = self.useFixture(
-            net_helpers.OVSBridgeFixture(
-                self.agent_cfg_fixture.get_br_int_name())).bridge
-
-        config_filenames = [self.neutron_cfg_fixture.filename,
-                            self.agent_cfg_fixture.filename]
-
-        self.process_fixture = self.useFixture(ProcessFixture(
-            test_name=self.test_name,
-            process_name=self.NEUTRON_OVS_AGENT,
-            exec_name=self.NEUTRON_OVS_AGENT,
-            config_filenames=config_filenames))
-
-
-class L3AgentFixture(fixtures.Fixture):
-
-    NEUTRON_L3_AGENT = "neutron-l3-agent"
-
-    def __init__(self, env_desc, host_desc,
-                 test_name, neutron_cfg_fixture, l3_agent_cfg_fixture):
-        super(L3AgentFixture, self).__init__()
-        self.env_desc = env_desc
-        self.host_desc = host_desc
-        self.test_name = test_name
-        self.neutron_cfg_fixture = neutron_cfg_fixture
-        self.l3_agent_cfg_fixture = l3_agent_cfg_fixture
-
-    def _setUp(self):
-        self.plugin_config = self.l3_agent_cfg_fixture.config
-
-        config_filenames = [self.neutron_cfg_fixture.filename,
-                            self.l3_agent_cfg_fixture.filename]
-
-        self.process_fixture = self.useFixture(ProcessFixture(
-            test_name=self.test_name,
-            process_name=self.NEUTRON_L3_AGENT,
-            exec_name=spawn.find_executable(
-                'l3_agent.py',
-                path=os.path.join(base.ROOTDIR, 'common', 'agents')),
-            config_filenames=config_filenames))
-
-    def get_namespace_suffix(self):
-        return self.plugin_config.DEFAULT.test_namespace_suffix
diff --git a/neutron/tests/fullstack/test_connectivity.py b/neutron/tests/fullstack/test_connectivity.py
deleted file mode 100644 (file)
index 31ad8f1..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import testscenarios
-
-from oslo_utils import uuidutils
-
-from neutron.tests.fullstack import base
-from neutron.tests.fullstack.resources import environment
-from neutron.tests.fullstack.resources import machine
-
-
-load_tests = testscenarios.load_tests_apply_scenarios
-
-
-class TestConnectivitySameNetwork(base.BaseFullStackTestCase):
-
-    network_scenarios = [
-        ('VXLAN', {'network_type': 'vxlan',
-                   'l2_pop': False}),
-        ('GRE and l2pop', {'network_type': 'gre',
-                           'l2_pop': True}),
-        ('VLANs', {'network_type': 'vlan',
-                   'l2_pop': False})]
-    interface_scenarios = [
-        ('Ofctl', {'of_interface': 'ovs-ofctl'}),
-        ('Native', {'of_interface': 'native'})]
-    scenarios = testscenarios.multiply_scenarios(
-        network_scenarios, interface_scenarios)
-
-    def setUp(self):
-        host_descriptions = [
-            # There's value in enabling L3 agents registration when l2pop
-            # is enabled, because l2pop code makes assumptions about the
-            # agent types present on machines.
-            environment.HostDescription(
-                l3_agent=self.l2_pop,
-                of_interface=self.of_interface) for _ in range(2)]
-        env = environment.Environment(
-            environment.EnvironmentDescription(
-                network_type=self.network_type,
-                l2_pop=self.l2_pop),
-            host_descriptions)
-        super(TestConnectivitySameNetwork, self).setUp(env)
-
-    def test_connectivity(self):
-        tenant_uuid = uuidutils.generate_uuid()
-
-        network = self.safe_client.create_network(tenant_uuid)
-        self.safe_client.create_subnet(
-            tenant_uuid, network['id'], '20.0.0.0/24')
-
-        vms = [
-            self.useFixture(
-                machine.FakeFullstackMachine(
-                    self.environment.hosts[i],
-                    network['id'],
-                    tenant_uuid,
-                    self.safe_client))
-            for i in range(2)]
-
-        for vm in vms:
-            vm.block_until_boot()
-
-        vms[0].block_until_ping(vms[1].ip)
diff --git a/neutron/tests/fullstack/test_l3_agent.py b/neutron/tests/fullstack/test_l3_agent.py
deleted file mode 100644 (file)
index 64bea30..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import functools
-
-from oslo_utils import uuidutils
-
-from neutron.agent.l3 import agent as l3_agent
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.tests.fullstack import base
-from neutron.tests.fullstack.resources import environment
-
-
-class TestLegacyL3Agent(base.BaseFullStackTestCase):
-
-    def setUp(self):
-        host_descriptions = [environment.HostDescription(l3_agent=True)]
-        env = environment.Environment(
-            environment.EnvironmentDescription(
-                network_type='vlan', l2_pop=False),
-            host_descriptions)
-        super(TestLegacyL3Agent, self).setUp(env)
-
-    def _get_namespace(self, router_id):
-        return namespaces.build_ns_name(l3_agent.NS_PREFIX, router_id)
-
-    def _assert_namespace_exists(self, ns_name):
-        ip = ip_lib.IPWrapper(ns_name)
-        utils.wait_until_true(lambda: ip.netns.exists(ns_name))
-
-    def test_namespace_exists(self):
-        tenant_id = uuidutils.generate_uuid()
-
-        router = self.safe_client.create_router(tenant_id)
-        network = self.safe_client.create_network(tenant_id)
-        subnet = self.safe_client.create_subnet(
-            tenant_id, network['id'], '20.0.0.0/24', gateway_ip='20.0.0.1')
-        self.safe_client.add_router_interface(router['id'], subnet['id'])
-
-        namespace = "%s@%s" % (
-            self._get_namespace(router['id']),
-            self.environment.hosts[0].l3_agent.get_namespace_suffix(), )
-        self._assert_namespace_exists(namespace)
-
-
-class TestHAL3Agent(base.BaseFullStackTestCase):
-
-    def setUp(self):
-        host_descriptions = [
-            environment.HostDescription(l3_agent=True) for _ in range(2)]
-        env = environment.Environment(
-            environment.EnvironmentDescription(
-                network_type='vxlan', l2_pop=True),
-            host_descriptions)
-        super(TestHAL3Agent, self).setUp(env)
-
-    def _is_ha_router_active_on_one_agent(self, router_id):
-        agents = self.client.list_l3_agent_hosting_routers(router_id)
-        return (
-            agents['agents'][0]['ha_state'] != agents['agents'][1]['ha_state'])
-
-    def test_ha_router(self):
-        # TODO(amuller): Test external connectivity before and after a
-        # failover, see: https://review.openstack.org/#/c/196393/
-
-        tenant_id = uuidutils.generate_uuid()
-        router = self.safe_client.create_router(tenant_id, ha=True)
-        agents = self.client.list_l3_agent_hosting_routers(router['id'])
-        self.assertEqual(2, len(agents['agents']),
-                         'HA router must be scheduled to both nodes')
-
-        utils.wait_until_true(
-            functools.partial(
-                self._is_ha_router_active_on_one_agent,
-                router['id']),
-            timeout=90)
diff --git a/neutron/tests/fullstack/test_qos.py b/neutron/tests/fullstack/test_qos.py
deleted file mode 100644 (file)
index 9e10336..0000000
+++ /dev/null
@@ -1,138 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_utils import uuidutils
-
-from neutron.agent.linux import utils
-from neutron.services.qos import qos_consts
-from neutron.tests.fullstack import base
-from neutron.tests.fullstack.resources import environment
-from neutron.tests.fullstack.resources import machine
-
-from neutron.plugins.ml2.drivers.openvswitch.mech_driver import \
-    mech_openvswitch as mech_ovs
-
-
-BANDWIDTH_LIMIT = 500
-BANDWIDTH_BURST = 100
-
-
-def _wait_for_rule_applied(vm, limit, burst):
-    utils.wait_until_true(
-        lambda: vm.bridge.get_egress_bw_limit_for_port(
-            vm.port.name) == (limit, burst))
-
-
-def _wait_for_rule_removed(vm):
-    # No values are provided when port doesn't have qos policy
-    _wait_for_rule_applied(vm, None, None)
-
-
-class TestQoSWithOvsAgent(base.BaseFullStackTestCase):
-
-    def setUp(self):
-        host_desc = [environment.HostDescription(l3_agent=False)]
-        env_desc = environment.EnvironmentDescription(qos=True)
-        env = environment.Environment(env_desc, host_desc)
-        super(TestQoSWithOvsAgent, self).setUp(env)
-
-    def _create_qos_policy(self):
-        return self.safe_client.create_qos_policy(
-            self.tenant_id, 'fs_policy', 'Fullstack testing policy',
-            shared='False')
-
-    def _prepare_vm_with_qos_policy(self, limit, burst):
-        qos_policy = self._create_qos_policy()
-        qos_policy_id = qos_policy['id']
-
-        rule = self.safe_client.create_bandwidth_limit_rule(
-            self.tenant_id, qos_policy_id, limit, burst)
-        # Make it consistent with GET reply
-        qos_policy['rules'].append(rule)
-        rule['type'] = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT
-        rule['qos_policy_id'] = qos_policy_id
-
-        port = self.safe_client.create_port(
-            self.tenant_id, self.network['id'],
-            self.environment.hosts[0].hostname,
-            qos_policy_id)
-
-        vm = self.useFixture(
-            machine.FakeFullstackMachine(
-                self.environment.hosts[0],
-                self.network['id'],
-                self.tenant_id,
-                self.safe_client,
-                neutron_port=port))
-
-        return vm, qos_policy
-
-    def test_qos_policy_rule_lifecycle(self):
-        new_limit = BANDWIDTH_LIMIT + 100
-        new_burst = BANDWIDTH_BURST + 50
-
-        self.tenant_id = uuidutils.generate_uuid()
-        self.network = self.safe_client.create_network(self.tenant_id,
-                                                       'network-test')
-        self.subnet = self.safe_client.create_subnet(
-            self.tenant_id, self.network['id'],
-            cidr='10.0.0.0/24',
-            gateway_ip='10.0.0.1',
-            name='subnet-test',
-            enable_dhcp=False)
-
-        # Create port with qos policy attached
-        vm, qos_policy = self._prepare_vm_with_qos_policy(BANDWIDTH_LIMIT,
-                                                          BANDWIDTH_BURST)
-        _wait_for_rule_applied(vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST)
-        qos_policy_id = qos_policy['id']
-        rule = qos_policy['rules'][0]
-
-        # Remove rule from qos policy
-        self.client.delete_bandwidth_limit_rule(rule['id'], qos_policy_id)
-        _wait_for_rule_removed(vm)
-
-        # Create new rule
-        new_rule = self.safe_client.create_bandwidth_limit_rule(
-            self.tenant_id, qos_policy_id, new_limit, new_burst)
-        _wait_for_rule_applied(vm, new_limit, new_burst)
-
-        # Update qos policy rule id
-        self.client.update_bandwidth_limit_rule(
-            new_rule['id'], qos_policy_id,
-            body={'bandwidth_limit_rule': {'max_kbps': BANDWIDTH_LIMIT,
-                                           'max_burst_kbps': BANDWIDTH_BURST}})
-        _wait_for_rule_applied(vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST)
-
-        # Remove qos policy from port
-        self.client.update_port(
-            vm.neutron_port['id'],
-            body={'port': {'qos_policy_id': None}})
-        _wait_for_rule_removed(vm)
-
-
-class TestQoSWithL2Population(base.BaseFullStackTestCase):
-
-    def setUp(self):
-        host_desc = []  # No need to register agents for this test case
-        env_desc = environment.EnvironmentDescription(qos=True, l2_pop=True)
-        env = environment.Environment(env_desc, host_desc)
-        super(TestQoSWithL2Population, self).setUp(env)
-
-    def test_supported_qos_rule_types(self):
-        res = self.client.list_qos_rule_types()
-        rule_types = {t['type'] for t in res['rule_types']}
-        expected_rules = (
-            set(mech_ovs.OpenvswitchMechanismDriver.supported_qos_rule_types))
-        self.assertEqual(expected_rules, rule_types)
diff --git a/neutron/tests/functional/__init__.py b/neutron/tests/functional/__init__.py
deleted file mode 100644 (file)
index a2a87e2..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-In order to save gate resources, test paths that have similar
-environmental requirements to the functional path are marked for
-discovery.
-"""
-
-import os.path
-
-
-def load_tests(loader, tests, pattern):
-    this_dir = os.path.dirname(__file__)
-    parent_dir = os.path.dirname(this_dir)
-    target_dirs = [
-        this_dir,
-        os.path.join(parent_dir, 'retargetable'),
-    ]
-    for start_dir in target_dirs:
-        new_tests = loader.discover(start_dir=start_dir, pattern=pattern)
-        tests.addTests(new_tests)
-    return tests
diff --git a/neutron/tests/functional/agent/__init__.py b/neutron/tests/functional/agent/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/agent/l2/__init__.py b/neutron/tests/functional/agent/l2/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/agent/l2/base.py b/neutron/tests/functional/agent/l2/base.py
deleted file mode 100644 (file)
index c2db452..0000000
+++ /dev/null
@@ -1,317 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-# Copyright (c) 2015 SUSE Linux Products GmbH
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import random
-
-import eventlet
-import mock
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-from neutron.agent.common import config as agent_config
-from neutron.agent.common import ovs_lib
-from neutron.agent.l2.extensions import manager as ext_manager
-from neutron.agent.linux import interface
-from neutron.agent.linux import polling
-from neutron.agent.linux import utils as agent_utils
-from neutron.common import config as common_config
-from neutron.common import constants as n_const
-from neutron.common import utils
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import config \
-    as ovs_config
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
-    import br_int
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
-    import br_phys
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
-    import br_tun
-from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent \
-    as ovs_agent
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.linux import base
-
-
-class OVSAgentTestFramework(base.BaseOVSLinuxTestCase):
-
-    def setUp(self):
-        super(OVSAgentTestFramework, self).setUp()
-        agent_rpc = ('neutron.plugins.ml2.drivers.openvswitch.agent.'
-                     'ovs_neutron_agent.OVSPluginApi')
-        mock.patch(agent_rpc).start()
-        mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
-        self.br_int = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN,
-                                         prefix='br-int')
-        self.br_tun = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN,
-                                         prefix='br-tun')
-        self.br_phys = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN,
-                                          prefix='br-phys')
-        patch_name_len = n_const.DEVICE_NAME_MAX_LEN - len("-patch-tun")
-        self.patch_tun = "%s-patch-tun" % self.br_int[patch_name_len:]
-        self.patch_int = "%s-patch-int" % self.br_tun[patch_name_len:]
-        self.ovs = ovs_lib.BaseOVS()
-        self.config = self._configure_agent()
-        self.driver = interface.OVSInterfaceDriver(self.config)
-        self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name
-
-    def _get_config_opts(self):
-        config = cfg.ConfigOpts()
-        config.register_opts(common_config.core_opts)
-        config.register_opts(interface.OPTS)
-        config.register_opts(ovs_config.ovs_opts, "OVS")
-        config.register_opts(ovs_config.agent_opts, "AGENT")
-        agent_config.register_interface_driver_opts_helper(config)
-        agent_config.register_agent_state_opts_helper(config)
-        ext_manager.register_opts(config)
-        return config
-
-    def _configure_agent(self):
-        config = self._get_config_opts()
-        config.set_override(
-            'interface_driver',
-            'neutron.agent.linux.interface.OVSInterfaceDriver')
-        config.set_override('integration_bridge', self.br_int, "OVS")
-        config.set_override('ovs_integration_bridge', self.br_int)
-        config.set_override('tunnel_bridge', self.br_tun, "OVS")
-        config.set_override('int_peer_patch_port', self.patch_tun, "OVS")
-        config.set_override('tun_peer_patch_port', self.patch_int, "OVS")
-        config.set_override('host', 'ovs-agent')
-        return config
-
-    def _bridge_classes(self):
-        return {
-            'br_int': br_int.OVSIntegrationBridge,
-            'br_phys': br_phys.OVSPhysicalBridge,
-            'br_tun': br_tun.OVSTunnelBridge
-        }
-
-    def create_agent(self, create_tunnels=True):
-        if create_tunnels:
-            tunnel_types = [p_const.TYPE_VXLAN]
-        else:
-            tunnel_types = None
-        bridge_mappings = ['physnet:%s' % self.br_phys]
-        self.config.set_override('tunnel_types', tunnel_types, "AGENT")
-        self.config.set_override('polling_interval', 1, "AGENT")
-        self.config.set_override('prevent_arp_spoofing', False, "AGENT")
-        self.config.set_override('local_ip', '192.168.10.1', "OVS")
-        self.config.set_override('bridge_mappings', bridge_mappings, "OVS")
-        # Physical bridges should be created prior to running
-        self._bridge_classes()['br_phys'](self.br_phys).create()
-        agent = ovs_agent.OVSNeutronAgent(self._bridge_classes(),
-                                          self.config)
-        self.addCleanup(self.ovs.delete_bridge, self.br_int)
-        if tunnel_types:
-            self.addCleanup(self.ovs.delete_bridge, self.br_tun)
-        self.addCleanup(self.ovs.delete_bridge, self.br_phys)
-        agent.sg_agent = mock.Mock()
-        agent.ancillary_brs = []
-        return agent
-
-    def _mock_get_events(self, agent, polling_manager, ports):
-        get_events = polling_manager.get_events
-        p_ids = [p['id'] for p in ports]
-
-        def filter_events():
-            events = get_events()
-            filtered_ports = []
-            for dev in events['added']:
-                iface_id = agent.int_br.portid_from_external_ids(
-                    dev.get('external_ids', []))
-                if iface_id in p_ids:
-                    # if the event is not about a port that was created by
-                    # this test, we filter the event out. Since these tests are
-                    # not run in isolation processing all the events might make
-                    # some test fail ( e.g. the agent might keep resycing
-                    # because it keeps finding not ready ports that are created
-                    # by other tests)
-                    filtered_ports.append(dev)
-            return {'added': filtered_ports, 'removed': events['removed']}
-        polling_manager.get_events = mock.Mock(side_effect=filter_events)
-
-    def start_agent(self, agent, ports=None, unplug_ports=None):
-        if unplug_ports is None:
-            unplug_ports = []
-        if ports is None:
-            ports = []
-        self.setup_agent_rpc_mocks(agent, unplug_ports)
-        polling_manager = polling.InterfacePollingMinimizer()
-        self._mock_get_events(agent, polling_manager, ports)
-        self.addCleanup(polling_manager.stop)
-        polling_manager.start()
-        agent_utils.wait_until_true(
-            polling_manager._monitor.is_active)
-        agent.check_ovs_status = mock.Mock(
-            return_value=constants.OVS_NORMAL)
-        t = eventlet.spawn(agent.rpc_loop, polling_manager)
-
-        def stop_agent(agent, rpc_loop_thread):
-            agent.run_daemon_loop = False
-            rpc_loop_thread.wait()
-
-        self.addCleanup(stop_agent, agent, t)
-        return polling_manager
-
-    def _create_test_port_dict(self):
-        return {'id': uuidutils.generate_uuid(),
-                'mac_address': utils.get_random_mac(
-                    'fa:16:3e:00:00:00'.split(':')),
-                'fixed_ips': [{
-                    'ip_address': '10.%d.%d.%d' % (
-                         random.randint(3, 254),
-                         random.randint(3, 254),
-                         random.randint(3, 254))}],
-                'vif_name': base.get_rand_name(
-                    self.driver.DEV_NAME_LEN, self.driver.DEV_NAME_PREFIX)}
-
-    def _create_test_network_dict(self):
-        return {'id': uuidutils.generate_uuid(),
-                'tenant_id': uuidutils.generate_uuid()}
-
-    def _plug_ports(self, network, ports, agent, ip_len=24):
-        for port in ports:
-            self.driver.plug(
-                network.get('id'), port.get('id'), port.get('vif_name'),
-                port.get('mac_address'),
-                agent.int_br.br_name, namespace=self.namespace)
-            ip_cidrs = ["%s/%s" % (port.get('fixed_ips')[0][
-                'ip_address'], ip_len)]
-            self.driver.init_l3(port.get('vif_name'), ip_cidrs,
-                                namespace=self.namespace)
-
-    def _unplug_ports(self, ports, agent):
-        for port in ports:
-            self.driver.unplug(
-                port.get('vif_name'), agent.int_br.br_name, self.namespace)
-
-    def _get_device_details(self, port, network):
-        dev = {'device': port['id'],
-               'port_id': port['id'],
-               'network_id': network['id'],
-               'network_type': 'vlan',
-               'physical_network': 'physnet',
-               'segmentation_id': 1,
-               'fixed_ips': port['fixed_ips'],
-               'device_owner': 'compute',
-               'port_security_enabled': True,
-               'security_groups': ['default'],
-               'admin_state_up': True}
-        return dev
-
-    def assert_bridge(self, br, exists=True):
-        self.assertEqual(exists, self.ovs.bridge_exists(br))
-
-    def assert_patch_ports(self, agent):
-
-        def get_peer(port):
-            return agent.int_br.db_get_val(
-                'Interface', port, 'options', check_error=True)
-
-        agent_utils.wait_until_true(
-            lambda: get_peer(self.patch_int) == {'peer': self.patch_tun})
-        agent_utils.wait_until_true(
-            lambda: get_peer(self.patch_tun) == {'peer': self.patch_int})
-
-    def assert_bridge_ports(self):
-        for port in [self.patch_tun, self.patch_int]:
-            self.assertTrue(self.ovs.port_exists(port))
-
-    def assert_vlan_tags(self, ports, agent):
-        for port in ports:
-            res = agent.int_br.db_get_val('Port', port.get('vif_name'), 'tag')
-            self.assertTrue(res)
-
-    def _expected_plugin_rpc_call(self, call, expected_devices, is_up=True):
-        """Helper to check expected rpc call are received
-
-        :param call: The call to check
-        :param expected_devices: The device for which call is expected
-        :param is_up: True if expected_devices are devices that are set up,
-               False if expected_devices are devices that are set down
-        """
-        if is_up:
-            rpc_devices = [
-                dev for args in call.call_args_list for dev in args[0][1]]
-        else:
-            rpc_devices = [
-                dev for args in call.call_args_list for dev in args[0][2]]
-        return not (set(expected_devices) - set(rpc_devices))
-
-    def create_test_ports(self, amount=3, **kwargs):
-        ports = []
-        for x in range(amount):
-            ports.append(self._create_test_port_dict(**kwargs))
-        return ports
-
-    def _mock_update_device(self, context, devices_up, devices_down, agent_id,
-                            host=None):
-        dev_up = []
-        dev_down = []
-        for port in self.ports:
-            if devices_up and port['id'] in devices_up:
-                dev_up.append(port['id'])
-            if devices_down and port['id'] in devices_down:
-                dev_down.append({'device': port['id'], 'exists': True})
-        return {'devices_up': dev_up,
-                'failed_devices_up': [],
-                'devices_down': dev_down,
-                'failed_devices_down': []}
-
-    def setup_agent_rpc_mocks(self, agent, unplug_ports):
-        def mock_device_details(context, devices, agent_id, host=None):
-            details = []
-            for port in self.ports:
-                if port['id'] in devices:
-                    dev = self._get_device_details(
-                        port, self.network)
-                    details.append(dev)
-            ports_to_unplug = [x for x in unplug_ports if x['id'] in devices]
-            if ports_to_unplug:
-                self._unplug_ports(ports_to_unplug, self.agent)
-            return {'devices': details, 'failed_devices': []}
-
-        (agent.plugin_rpc.get_devices_details_list_and_failed_devices.
-            side_effect) = mock_device_details
-        agent.plugin_rpc.update_device_list.side_effect = (
-            self._mock_update_device)
-
-    def _prepare_resync_trigger(self, agent):
-        def mock_device_raise_exception(context, devices_up, devices_down,
-                                        agent_id, host=None):
-            agent.plugin_rpc.update_device_list.side_effect = (
-                self._mock_update_device)
-            raise Exception('Exception to trigger resync')
-
-        self.agent.plugin_rpc.update_device_list.side_effect = (
-            mock_device_raise_exception)
-
-    def wait_until_ports_state(self, ports, up, timeout=60):
-        port_ids = [p['id'] for p in ports]
-        agent_utils.wait_until_true(
-            lambda: self._expected_plugin_rpc_call(
-                self.agent.plugin_rpc.update_device_list, port_ids, up),
-            timeout=timeout)
-
-    def setup_agent_and_ports(self, port_dicts, create_tunnels=True,
-                              trigger_resync=False):
-        self.ports = port_dicts
-        self.agent = self.create_agent(create_tunnels=create_tunnels)
-        self.polling_manager = self.start_agent(self.agent, ports=self.ports)
-        self.network = self._create_test_network_dict()
-        if trigger_resync:
-            self._prepare_resync_trigger(self.agent)
-        self._plug_ports(self.network, self.ports, self.agent)
diff --git a/neutron/tests/functional/agent/l2/extensions/__init__.py b/neutron/tests/functional/agent/l2/extensions/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py b/neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py
deleted file mode 100644 (file)
index 9b73e73..0000000
+++ /dev/null
@@ -1,197 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron.api.rpc.callbacks.consumer import registry as consumer_reg
-from neutron.api.rpc.callbacks import events
-from neutron.api.rpc.callbacks import resources
-from neutron.objects.qos import policy
-from neutron.objects.qos import rule
-from neutron.tests.common.agents import l2_extensions
-from neutron.tests.functional.agent.l2 import base
-
-
-TEST_POLICY_ID1 = "a2d72369-4246-4f19-bd3c-af51ec8d70cd"
-TEST_POLICY_ID2 = "46ebaec0-0570-43ac-82f6-60d2b03168c5"
-TEST_BW_LIMIT_RULE_1 = rule.QosBandwidthLimitRule(
-        context=None,
-        qos_policy_id=TEST_POLICY_ID1,
-        id="5f126d84-551a-4dcf-bb01-0e9c0df0c793",
-        max_kbps=1000,
-        max_burst_kbps=10)
-TEST_BW_LIMIT_RULE_2 = rule.QosBandwidthLimitRule(
-        context=None,
-        qos_policy_id=TEST_POLICY_ID2,
-        id="fa9128d9-44af-49b2-99bb-96548378ad42",
-        max_kbps=900,
-        max_burst_kbps=9)
-
-
-class OVSAgentQoSExtensionTestFramework(base.OVSAgentTestFramework):
-    def setUp(self):
-        super(OVSAgentQoSExtensionTestFramework, self).setUp()
-        self.config.set_override('extensions', ['qos'], 'agent')
-        self._set_pull_mock()
-        self.set_test_qos_rules(TEST_POLICY_ID1, [TEST_BW_LIMIT_RULE_1])
-        self.set_test_qos_rules(TEST_POLICY_ID2, [TEST_BW_LIMIT_RULE_2])
-
-    def _set_pull_mock(self):
-
-        self.qos_policies = {}
-
-        def _pull_mock(context, resource_type, resource_id):
-            return self.qos_policies[resource_id]
-
-        self.pull = mock.patch(
-            'neutron.api.rpc.handlers.resources_rpc.'
-            'ResourcesPullRpcApi.pull').start()
-        self.pull.side_effect = _pull_mock
-
-    def set_test_qos_rules(self, policy_id, policy_rules):
-        """This function sets the policy test rules to be exposed."""
-
-        qos_policy = policy.QosPolicy(
-            context=None,
-            tenant_id=uuidutils.generate_uuid(),
-            id=policy_id,
-            name="Test Policy Name",
-            description="This is a policy for testing purposes",
-            shared=False,
-            rules=policy_rules)
-
-        qos_policy.obj_reset_changes()
-        self.qos_policies[policy_id] = qos_policy
-
-    def _create_test_port_dict(self, policy_id=None):
-        port_dict = super(OVSAgentQoSExtensionTestFramework,
-                          self)._create_test_port_dict()
-        port_dict['qos_policy_id'] = policy_id
-        port_dict['network_qos_policy_id'] = None
-        return port_dict
-
-    def _get_device_details(self, port, network):
-        dev = super(OVSAgentQoSExtensionTestFramework,
-                    self)._get_device_details(port, network)
-        dev['qos_policy_id'] = port['qos_policy_id']
-        return dev
-
-    def _assert_bandwidth_limit_rule_is_set(self, port, rule):
-        max_rate, burst = (
-            self.agent.int_br.get_egress_bw_limit_for_port(port['vif_name']))
-        self.assertEqual(max_rate, rule.max_kbps)
-        self.assertEqual(burst, rule.max_burst_kbps)
-
-    def _assert_bandwidth_limit_rule_not_set(self, port):
-        max_rate, burst = (
-            self.agent.int_br.get_egress_bw_limit_for_port(port['vif_name']))
-        self.assertIsNone(max_rate)
-        self.assertIsNone(burst)
-
-    def wait_until_bandwidth_limit_rule_applied(self, port, rule):
-        l2_extensions.wait_until_bandwidth_limit_rule_applied(
-            self.agent.int_br, port['vif_name'], rule)
-
-    def _create_port_with_qos(self):
-        port_dict = self._create_test_port_dict()
-        port_dict['qos_policy_id'] = TEST_POLICY_ID1
-        self.setup_agent_and_ports([port_dict])
-        self.wait_until_ports_state(self.ports, up=True)
-        self.wait_until_bandwidth_limit_rule_applied(port_dict,
-                                                     TEST_BW_LIMIT_RULE_1)
-        return port_dict
-
-
-class TestOVSAgentQosExtension(OVSAgentQoSExtensionTestFramework):
-
-    def test_port_creation_with_bandwidth_limit(self):
-        """Make sure bandwidth limit rules are set in low level to ports."""
-
-        self.setup_agent_and_ports(
-            port_dicts=self.create_test_ports(amount=1,
-                                              policy_id=TEST_POLICY_ID1))
-        self.wait_until_ports_state(self.ports, up=True)
-
-        for port in self.ports:
-            self._assert_bandwidth_limit_rule_is_set(
-                port, TEST_BW_LIMIT_RULE_1)
-
-    def test_port_creation_with_different_bandwidth_limits(self):
-        """Make sure different types of policies end on the right ports."""
-
-        port_dicts = self.create_test_ports(amount=3)
-
-        port_dicts[0]['qos_policy_id'] = TEST_POLICY_ID1
-        port_dicts[1]['qos_policy_id'] = TEST_POLICY_ID2
-
-        self.setup_agent_and_ports(port_dicts)
-        self.wait_until_ports_state(self.ports, up=True)
-
-        self._assert_bandwidth_limit_rule_is_set(self.ports[0],
-                                                 TEST_BW_LIMIT_RULE_1)
-
-        self._assert_bandwidth_limit_rule_is_set(self.ports[1],
-                                                 TEST_BW_LIMIT_RULE_2)
-
-        self._assert_bandwidth_limit_rule_not_set(self.ports[2])
-
-    def test_simple_port_policy_update(self):
-        self.setup_agent_and_ports(
-            port_dicts=self.create_test_ports(amount=1,
-                                              policy_id=TEST_POLICY_ID1))
-        self.wait_until_ports_state(self.ports, up=True)
-        policy_copy = copy.deepcopy(self.qos_policies[TEST_POLICY_ID1])
-        policy_copy.rules[0].max_kbps = 500
-        policy_copy.rules[0].max_burst_kbps = 5
-        consumer_reg.push(resources.QOS_POLICY, policy_copy, events.UPDATED)
-        self.wait_until_bandwidth_limit_rule_applied(self.ports[0],
-                                                     policy_copy.rules[0])
-        self._assert_bandwidth_limit_rule_is_set(self.ports[0],
-                                                 policy_copy.rules[0])
-
-    def test_port_qos_disassociation(self):
-        """Test that qos_policy_id set to None will remove all qos rules from
-           given port.
-        """
-        port_dict = self._create_port_with_qos()
-
-        port_dict['qos_policy_id'] = None
-        self.agent.port_update(None, port=port_dict)
-
-        self.wait_until_bandwidth_limit_rule_applied(port_dict, None)
-
-    def test_port_qos_update_policy_id(self):
-        """Test that change of qos policy id on given port refreshes all its
-           rules.
-        """
-        port_dict = self._create_port_with_qos()
-
-        port_dict['qos_policy_id'] = TEST_POLICY_ID2
-        self.agent.port_update(None, port=port_dict)
-
-        self.wait_until_bandwidth_limit_rule_applied(port_dict,
-                                                     TEST_BW_LIMIT_RULE_2)
-
-    def test_policy_rule_delete(self):
-        port_dict = self._create_port_with_qos()
-
-        policy_copy = copy.deepcopy(self.qos_policies[TEST_POLICY_ID1])
-        policy_copy.rules = list()
-        consumer_reg.push(resources.QOS_POLICY, policy_copy, events.UPDATED)
-
-        self.wait_until_bandwidth_limit_rule_applied(port_dict, None)
diff --git a/neutron/tests/functional/agent/l3/__init__.py b/neutron/tests/functional/agent/l3/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/agent/l3/framework.py b/neutron/tests/functional/agent/l3/framework.py
deleted file mode 100644 (file)
index bbe7cfb..0000000
+++ /dev/null
@@ -1,484 +0,0 @@
-# Copyright (c) 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import functools
-
-import mock
-import netaddr
-import testtools
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-
-from neutron.agent.common import config as agent_config
-from neutron.agent.common import ovs_lib
-from neutron.agent.l3 import agent as neutron_l3_agent
-from neutron.agent import l3_agent as l3_agent_main
-from neutron.agent.linux import external_process
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import config as common_config
-from neutron.common import constants as l3_constants
-from neutron.common import utils as common_utils
-from neutron.tests.common import l3_test_common
-from neutron.tests.common import net_helpers
-from neutron.tests.functional import base
-
-
-_uuid = uuidutils.generate_uuid
-
-
-def get_ovs_bridge(br_name):
-    return ovs_lib.OVSBridge(br_name)
-
-
-class L3AgentTestFramework(base.BaseSudoTestCase):
-    def setUp(self):
-        super(L3AgentTestFramework, self).setUp()
-        self.mock_plugin_api = mock.patch(
-            'neutron.agent.l3.agent.L3PluginApi').start().return_value
-        mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
-        self.conf = self._configure_agent('agent1')
-        self.agent = neutron_l3_agent.L3NATAgentWithStateReport('agent1',
-                                                                self.conf)
-
-    def _get_config_opts(self):
-        config = cfg.ConfigOpts()
-        config.register_opts(common_config.core_opts)
-        config.register_opts(common_config.core_cli_opts)
-        logging.register_options(config)
-        agent_config.register_process_monitor_opts(config)
-        return config
-
-    def _configure_agent(self, host, agent_mode='dvr_snat'):
-        conf = self._get_config_opts()
-        l3_agent_main.register_opts(conf)
-        conf.set_override(
-            'interface_driver',
-            'neutron.agent.linux.interface.OVSInterfaceDriver')
-
-        br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
-        br_ex = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
-        conf.set_override('ovs_integration_bridge', br_int.br_name)
-        conf.set_override('external_network_bridge', br_ex.br_name)
-
-        temp_dir = self.get_new_temp_dir()
-        get_temp_file_path = functools.partial(self.get_temp_file_path,
-                                               root=temp_dir)
-        conf.set_override('state_path', temp_dir.path)
-        # NOTE(cbrandily): log_file or log_dir must be set otherwise
-        # metadata_proxy_watch_log has no effect
-        conf.set_override('log_file',
-                          get_temp_file_path('log_file'))
-        conf.set_override('metadata_proxy_socket',
-                          get_temp_file_path('metadata_proxy'))
-        conf.set_override('ha_confs_path',
-                          get_temp_file_path('ha_confs'))
-        conf.set_override('external_pids',
-                          get_temp_file_path('external/pids'))
-        conf.set_override('host', host)
-        conf.set_override('agent_mode', agent_mode)
-
-        return conf
-
-    def _get_agent_ovs_integration_bridge(self, agent):
-        return get_ovs_bridge(agent.conf.ovs_integration_bridge)
-
-    def generate_router_info(self, enable_ha, ip_version=4, extra_routes=True,
-                             enable_fip=True, enable_snat=True,
-                             dual_stack=False, v6_ext_gw_with_sub=True):
-        if ip_version == 6 and not dual_stack:
-            enable_snat = False
-            enable_fip = False
-            extra_routes = False
-
-        return l3_test_common.prepare_router_data(ip_version=ip_version,
-                                                 enable_snat=enable_snat,
-                                                 enable_floating_ip=enable_fip,
-                                                 enable_ha=enable_ha,
-                                                 extra_routes=extra_routes,
-                                                 dual_stack=dual_stack,
-                                                 v6_ext_gw_with_sub=(
-                                                     v6_ext_gw_with_sub))
-
-    def _test_conntrack_disassociate_fip(self, ha):
-        '''Test that conntrack immediately drops stateful connection
-           that uses floating IP once it's disassociated.
-        '''
-        router_info = self.generate_router_info(enable_ha=ha)
-        router = self.manage_router(self.agent, router_info)
-
-        port = net_helpers.get_free_namespace_port(l3_constants.PROTO_NAME_TCP,
-                                                   router.ns_name)
-        client_address = '19.4.4.3'
-        server_address = '35.4.0.4'
-
-        def clean_fips(router):
-            router.router[l3_constants.FLOATINGIP_KEY] = []
-
-        clean_fips(router)
-        self._add_fip(router, client_address, fixed_address=server_address)
-        router.process(self.agent)
-
-        router_ns = ip_lib.IPWrapper(namespace=router.ns_name)
-        netcat = net_helpers.NetcatTester(
-            router.ns_name, router.ns_name, client_address, port,
-            protocol=net_helpers.NetcatTester.TCP)
-        self.addCleanup(netcat.stop_processes)
-
-        def assert_num_of_conntrack_rules(n):
-            out = router_ns.netns.execute(["conntrack", "-L",
-                                           "--orig-src", client_address])
-            self.assertEqual(
-                n, len([line for line in out.strip().split('\n') if line]))
-
-        if ha:
-            utils.wait_until_true(lambda: router.ha_state == 'master')
-
-        with self.assert_max_execution_time(100):
-            assert_num_of_conntrack_rules(0)
-
-            self.assertTrue(netcat.test_connectivity())
-            assert_num_of_conntrack_rules(1)
-
-            clean_fips(router)
-            router.process(self.agent)
-            assert_num_of_conntrack_rules(0)
-
-            with testtools.ExpectedException(RuntimeError):
-                netcat.test_connectivity()
-
-    def _gateway_check(self, gateway_ip, external_device):
-        expected_gateway = gateway_ip
-        ip_vers = netaddr.IPAddress(expected_gateway).version
-        existing_gateway = (external_device.route.get_gateway(
-            ip_version=ip_vers).get('gateway'))
-        self.assertEqual(expected_gateway, existing_gateway)
-
-    def _assert_ha_device(self, router):
-        def ha_router_dev_name_getter(not_used):
-            return router.get_ha_device_name()
-        self.assertTrue(self.device_exists_with_ips_and_mac(
-            router.router[l3_constants.HA_INTERFACE_KEY],
-            ha_router_dev_name_getter, router.ns_name))
-
-    def _assert_gateway(self, router, v6_ext_gw_with_sub=True):
-        external_port = router.get_ex_gw_port()
-        external_device_name = router.get_external_device_name(
-            external_port['id'])
-        external_device = ip_lib.IPDevice(external_device_name,
-                                          namespace=router.ns_name)
-        for subnet in external_port['subnets']:
-            self._gateway_check(subnet['gateway_ip'], external_device)
-        if not v6_ext_gw_with_sub:
-            self._gateway_check(self.agent.conf.ipv6_gateway,
-                                external_device)
-
-    def _assert_external_device(self, router):
-        external_port = router.get_ex_gw_port()
-        self.assertTrue(self.device_exists_with_ips_and_mac(
-            external_port, router.get_external_device_name,
-            router.ns_name))
-
-    def _router_lifecycle(self, enable_ha, ip_version=4,
-                          dual_stack=False, v6_ext_gw_with_sub=True):
-        router_info = self.generate_router_info(enable_ha, ip_version,
-                                                dual_stack=dual_stack,
-                                                v6_ext_gw_with_sub=(
-                                                    v6_ext_gw_with_sub))
-        router = self.manage_router(self.agent, router_info)
-
-        # Add multiple-IPv6-prefix internal router port
-        slaac = l3_constants.IPV6_SLAAC
-        slaac_mode = {'ra_mode': slaac, 'address_mode': slaac}
-        subnet_modes = [slaac_mode] * 2
-        self._add_internal_interface_by_subnet(router.router,
-                                               count=2,
-                                               ip_version=6,
-                                               ipv6_subnet_modes=subnet_modes)
-        router.process(self.agent)
-
-        if enable_ha:
-            port = router.get_ex_gw_port()
-            interface_name = router.get_external_device_name(port['id'])
-            self._assert_no_ip_addresses_on_interface(router.ns_name,
-                                                      interface_name)
-            utils.wait_until_true(lambda: router.ha_state == 'master')
-
-            # Keepalived notifies of a state transition when it starts,
-            # not when it ends. Thus, we have to wait until keepalived finishes
-            # configuring everything. We verify this by waiting until the last
-            # device has an IP address.
-            device = router.router[l3_constants.INTERFACE_KEY][-1]
-            device_exists = functools.partial(
-                self.device_exists_with_ips_and_mac,
-                device,
-                router.get_internal_device_name,
-                router.ns_name)
-            utils.wait_until_true(device_exists)
-
-        self.assertTrue(self._namespace_exists(router.ns_name))
-        utils.wait_until_true(
-            lambda: self._metadata_proxy_exists(self.agent.conf, router))
-        self._assert_internal_devices(router)
-        self._assert_external_device(router)
-        if not (enable_ha and (ip_version == 6 or dual_stack)):
-            # Note(SridharG): enable the assert_gateway for IPv6 once
-            # keepalived on Ubuntu14.04 (i.e., check-neutron-dsvm-functional
-            # platform) is updated to 1.2.10 (or above).
-            # For more details: https://review.openstack.org/#/c/151284/
-            self._assert_gateway(router, v6_ext_gw_with_sub)
-            self.assertTrue(self.floating_ips_configured(router))
-            self._assert_snat_chains(router)
-            self._assert_floating_ip_chains(router)
-            self._assert_extra_routes(router)
-            ip_versions = [4, 6] if (ip_version == 6 or dual_stack) else [4]
-            self._assert_onlink_subnet_routes(router, ip_versions)
-        self._assert_metadata_chains(router)
-
-        # Verify router gateway interface is configured to receive Router Advts
-        # when IPv6 is enabled and no IPv6 gateway is configured.
-        if router.use_ipv6 and not v6_ext_gw_with_sub:
-            if not self.agent.conf.ipv6_gateway:
-                external_port = router.get_ex_gw_port()
-                external_device_name = router.get_external_device_name(
-                    external_port['id'])
-                ip_wrapper = ip_lib.IPWrapper(namespace=router.ns_name)
-                ra_state = ip_wrapper.netns.execute(['sysctl', '-b',
-                    'net.ipv6.conf.%s.accept_ra' % external_device_name])
-                self.assertEqual('2', ra_state)
-
-        if enable_ha:
-            self._assert_ha_device(router)
-            self.assertTrue(router.keepalived_manager.get_process().active)
-
-        self._delete_router(self.agent, router.router_id)
-
-        self._assert_interfaces_deleted_from_ovs()
-        self._assert_router_does_not_exist(router)
-        if enable_ha:
-            self.assertFalse(router.keepalived_manager.get_process().active)
-
-    def manage_router(self, agent, router):
-        self.addCleanup(agent._safe_router_removed, router['id'])
-        agent._process_added_router(router)
-        return agent.router_info[router['id']]
-
-    def _delete_router(self, agent, router_id):
-        agent._router_removed(router_id)
-
-    def _add_fip(self, router, fip_address, fixed_address='10.0.0.2',
-                 host=None):
-        fip = {'id': _uuid(),
-               'port_id': _uuid(),
-               'floating_ip_address': fip_address,
-               'fixed_ip_address': fixed_address,
-               'host': host}
-        router.router[l3_constants.FLOATINGIP_KEY].append(fip)
-
-    def _add_internal_interface_by_subnet(self, router, count=1,
-                                          ip_version=4,
-                                          ipv6_subnet_modes=None,
-                                          interface_id=None):
-        return l3_test_common.router_append_subnet(router, count,
-                ip_version, ipv6_subnet_modes, interface_id)
-
-    def _namespace_exists(self, namespace):
-        ip = ip_lib.IPWrapper(namespace=namespace)
-        return ip.netns.exists(namespace)
-
-    def _metadata_proxy_exists(self, conf, router):
-        pm = external_process.ProcessManager(
-            conf,
-            router.router_id,
-            router.ns_name)
-        return pm.active
-
-    def device_exists_with_ips_and_mac(self, expected_device, name_getter,
-                                       namespace):
-        ip_cidrs = common_utils.fixed_ip_cidrs(expected_device['fixed_ips'])
-        return ip_lib.device_exists_with_ips_and_mac(
-            name_getter(expected_device['id']), ip_cidrs,
-            expected_device['mac_address'], namespace)
-
-    @staticmethod
-    def _port_first_ip_cidr(port):
-        fixed_ip = port['fixed_ips'][0]
-        return common_utils.ip_to_cidr(fixed_ip['ip_address'],
-                                       fixed_ip['prefixlen'])
-
-    def get_device_mtu(self, target_device, name_getter, namespace):
-        device = ip_lib.IPDevice(name_getter(target_device), namespace)
-        return device.link.mtu
-
-    def get_expected_keepalive_configuration(self, router):
-        ha_device_name = router.get_ha_device_name()
-        external_port = router.get_ex_gw_port()
-        ex_port_ipv6 = ip_lib.get_ipv6_lladdr(external_port['mac_address'])
-        external_device_name = router.get_external_device_name(
-            external_port['id'])
-        external_device_cidr = self._port_first_ip_cidr(external_port)
-        internal_port = router.router[l3_constants.INTERFACE_KEY][0]
-        int_port_ipv6 = ip_lib.get_ipv6_lladdr(internal_port['mac_address'])
-        internal_device_name = router.get_internal_device_name(
-            internal_port['id'])
-        internal_device_cidr = self._port_first_ip_cidr(internal_port)
-        floating_ip_cidr = common_utils.ip_to_cidr(
-            router.get_floating_ips()[0]['floating_ip_address'])
-        default_gateway_ip = external_port['subnets'][0].get('gateway_ip')
-        extra_subnet_cidr = external_port['extra_subnets'][0].get('cidr')
-        return """vrrp_instance VR_1 {
-    state BACKUP
-    interface %(ha_device_name)s
-    virtual_router_id 1
-    priority 50
-    garp_master_repeat 5
-    garp_master_refresh 10
-    nopreempt
-    advert_int 2
-    track_interface {
-        %(ha_device_name)s
-    }
-    virtual_ipaddress {
-        169.254.0.1/24 dev %(ha_device_name)s
-    }
-    virtual_ipaddress_excluded {
-        %(floating_ip_cidr)s dev %(external_device_name)s
-        %(external_device_cidr)s dev %(external_device_name)s
-        %(internal_device_cidr)s dev %(internal_device_name)s
-        %(ex_port_ipv6)s dev %(external_device_name)s scope link
-        %(int_port_ipv6)s dev %(internal_device_name)s scope link
-    }
-    virtual_routes {
-        0.0.0.0/0 via %(default_gateway_ip)s dev %(external_device_name)s
-        8.8.8.0/24 via 19.4.4.4
-        %(extra_subnet_cidr)s dev %(external_device_name)s scope link
-    }
-}""" % {
-            'ha_device_name': ha_device_name,
-            'external_device_name': external_device_name,
-            'external_device_cidr': external_device_cidr,
-            'internal_device_name': internal_device_name,
-            'internal_device_cidr': internal_device_cidr,
-            'floating_ip_cidr': floating_ip_cidr,
-            'default_gateway_ip': default_gateway_ip,
-            'int_port_ipv6': int_port_ipv6,
-            'ex_port_ipv6': ex_port_ipv6,
-            'extra_subnet_cidr': extra_subnet_cidr,
-        }
-
-    def _get_rule(self, iptables_manager, table, chain, predicate):
-        rules = iptables_manager.get_chain(table, chain)
-        result = next(rule for rule in rules if predicate(rule))
-        return result
-
-    def _assert_router_does_not_exist(self, router):
-        # If the namespace assertion succeeds
-        # then the devices and iptable rules have also been deleted,
-        # so there's no need to check that explicitly.
-        self.assertFalse(self._namespace_exists(router.ns_name))
-        utils.wait_until_true(
-            lambda: not self._metadata_proxy_exists(self.agent.conf, router))
-
-    def _assert_snat_chains(self, router):
-        self.assertFalse(router.iptables_manager.is_chain_empty(
-            'nat', 'snat'))
-        self.assertFalse(router.iptables_manager.is_chain_empty(
-            'nat', 'POSTROUTING'))
-
-    def _assert_floating_ip_chains(self, router):
-        self.assertFalse(router.iptables_manager.is_chain_empty(
-            'nat', 'float-snat'))
-
-    def _assert_metadata_chains(self, router):
-        metadata_port_filter = lambda rule: (
-            str(self.agent.conf.metadata_port) in rule.rule)
-        self.assertTrue(self._get_rule(router.iptables_manager,
-                                       'nat',
-                                       'PREROUTING',
-                                       metadata_port_filter))
-        self.assertTrue(self._get_rule(router.iptables_manager,
-                                       'filter',
-                                       'INPUT',
-                                       metadata_port_filter))
-
-    def _assert_internal_devices(self, router):
-        internal_devices = router.router[l3_constants.INTERFACE_KEY]
-        self.assertTrue(len(internal_devices))
-        for device in internal_devices:
-            self.assertTrue(self.device_exists_with_ips_and_mac(
-                device, router.get_internal_device_name, router.ns_name))
-
-    def _assert_extra_routes(self, router, namespace=None):
-        if namespace is None:
-            namespace = router.ns_name
-        routes = ip_lib.get_routing_table(4, namespace=namespace)
-        routes = [{'nexthop': route['nexthop'],
-                   'destination': route['destination']} for route in routes]
-
-        for extra_route in router.router['routes']:
-            self.assertIn(extra_route, routes)
-
-    def _assert_onlink_subnet_routes(
-            self, router, ip_versions, namespace=None):
-        ns_name = namespace or router.ns_name
-        routes = []
-        for ip_version in ip_versions:
-            _routes = ip_lib.get_routing_table(ip_version,
-                                               namespace=ns_name)
-            routes.extend(_routes)
-        routes = set(route['destination'] for route in routes)
-        extra_subnets = router.get_ex_gw_port()['extra_subnets']
-        for extra_subnet in (route['cidr'] for route in extra_subnets):
-            self.assertIn(extra_subnet, routes)
-
-    def _assert_interfaces_deleted_from_ovs(self):
-
-        def assert_ovs_bridge_empty(bridge_name):
-            bridge = ovs_lib.OVSBridge(bridge_name)
-            self.assertFalse(bridge.get_port_name_list())
-
-        assert_ovs_bridge_empty(self.agent.conf.ovs_integration_bridge)
-        assert_ovs_bridge_empty(self.agent.conf.external_network_bridge)
-
-    def floating_ips_configured(self, router):
-        floating_ips = router.router[l3_constants.FLOATINGIP_KEY]
-        external_port = router.get_ex_gw_port()
-        return len(floating_ips) and all(
-            ip_lib.device_exists_with_ips_and_mac(
-                router.get_external_device_name(external_port['id']),
-                ['%s/32' % fip['floating_ip_address']],
-                external_port['mac_address'],
-                namespace=router.ns_name) for fip in floating_ips)
-
-    def fail_ha_router(self, router):
-        device_name = router.get_ha_device_name()
-        ha_device = ip_lib.IPDevice(device_name, router.ha_namespace)
-        ha_device.link.set_down()
-
-    @classmethod
-    def _get_addresses_on_device(cls, namespace, interface):
-        return [address['cidr'] for address in
-                ip_lib.IPDevice(interface, namespace=namespace).addr.list()]
-
-    def _assert_no_ip_addresses_on_interface(self, namespace, interface):
-        self.assertEqual(
-            [], self._get_addresses_on_device(namespace, interface))
-
-    def _assert_ip_address_on_interface(self,
-                                        namespace, interface, ip_address):
-        self.assertIn(
-            ip_address, self._get_addresses_on_device(namespace, interface))
diff --git a/neutron/tests/functional/agent/l3/test_dvr_router.py b/neutron/tests/functional/agent/l3/test_dvr_router.py
deleted file mode 100644 (file)
index e7c5beb..0000000
+++ /dev/null
@@ -1,680 +0,0 @@
-# Copyright (c) 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import functools
-
-import mock
-import netaddr
-
-from neutron.agent.l3 import agent as neutron_l3_agent
-from neutron.agent.l3 import dvr_fip_ns
-from neutron.agent.l3 import dvr_snat_ns
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import constants as l3_constants
-from neutron.extensions import portbindings
-from neutron.tests.common import l3_test_common
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.l3 import framework
-
-
-DEVICE_OWNER_COMPUTE = l3_constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
-
-
-class TestDvrRouter(framework.L3AgentTestFramework):
-    def manage_router(self, agent, router):
-        def _safe_fipnamespace_delete_on_ext_net(ext_net_id):
-            try:
-                agent.fipnamespace_delete_on_ext_net(None, ext_net_id)
-            except RuntimeError:
-                pass
-        self.addCleanup(
-            _safe_fipnamespace_delete_on_ext_net,
-            router['gw_port']['network_id'])
-
-        return super(TestDvrRouter, self).manage_router(agent, router)
-
-    def test_dvr_router_lifecycle_without_ha_without_snat_with_fips(self):
-        self._dvr_router_lifecycle(enable_ha=False, enable_snat=False)
-
-    def test_dvr_router_lifecycle_without_ha_with_snat_with_fips(self):
-        self._dvr_router_lifecycle(enable_ha=False, enable_snat=True)
-
-    def test_dvr_router_lifecycle_ha_with_snat_with_fips(self):
-        self._dvr_router_lifecycle(enable_ha=True, enable_snat=True)
-
-    def _helper_create_dvr_router_fips_for_ext_network(
-            self, agent_mode, **dvr_router_kwargs):
-        self.agent.conf.agent_mode = agent_mode
-        router_info = self.generate_dvr_router_info(**dvr_router_kwargs)
-        self.mock_plugin_api.get_external_network_id.return_value = (
-            router_info['_floatingips'][0]['floating_network_id'])
-        router = self.manage_router(self.agent, router_info)
-        fip_ns = router.fip_ns.get_name()
-        return router, fip_ns
-
-    def _validate_fips_for_external_network(self, router, fip_ns):
-        self.assertTrue(self._namespace_exists(router.ns_name))
-        self.assertTrue(self._namespace_exists(fip_ns))
-        self._assert_dvr_floating_ips(router)
-        self._assert_snat_namespace_does_not_exist(router)
-
-    def test_dvr_router_fips_for_multiple_ext_networks(self):
-        agent_mode = 'dvr'
-        # Create the first router fip with external net1
-        dvr_router1_kwargs = {'ip_address': '19.4.4.3',
-                              'subnet_cidr': '19.4.4.0/24',
-                              'gateway_ip': '19.4.4.1',
-                              'gateway_mac': 'ca:fe:de:ab:cd:ef'}
-        router1, fip1_ns = (
-            self._helper_create_dvr_router_fips_for_ext_network(
-                agent_mode, **dvr_router1_kwargs))
-        # Validate the fip with external net1
-        self._validate_fips_for_external_network(router1, fip1_ns)
-
-        # Create the second router fip with external net2
-        dvr_router2_kwargs = {'ip_address': '19.4.5.3',
-                              'subnet_cidr': '19.4.5.0/24',
-                              'gateway_ip': '19.4.5.1',
-                              'gateway_mac': 'ca:fe:de:ab:cd:fe'}
-        router2, fip2_ns = (
-            self._helper_create_dvr_router_fips_for_ext_network(
-                agent_mode, **dvr_router2_kwargs))
-        # Validate the fip with external net2
-        self._validate_fips_for_external_network(router2, fip2_ns)
-
-    def _dvr_router_lifecycle(self, enable_ha=False, enable_snat=False,
-                              custom_mtu=2000,
-                              ip_version=4,
-                              dual_stack=False):
-        '''Test dvr router lifecycle
-
-        :param enable_ha: sets the ha value for the router.
-        :param enable_snat:  the value of enable_snat is used
-        to  set the  agent_mode.
-        '''
-
-        # The value of agent_mode can be dvr, dvr_snat, or legacy.
-        # Since by definition this is a dvr (distributed = true)
-        # only dvr and dvr_snat are applicable
-        self.agent.conf.agent_mode = 'dvr_snat' if enable_snat else 'dvr'
-        self.agent.conf.network_device_mtu = custom_mtu
-
-        # We get the router info particular to a dvr router
-        router_info = self.generate_dvr_router_info(
-            enable_ha, enable_snat, extra_routes=True)
-
-        # We need to mock the get_agent_gateway_port return value
-        # because the whole L3PluginApi is mocked and we need the port
-        # gateway_port information before the l3_agent will create it.
-        # The port returned needs to have the same information as
-        # router_info['gw_port']
-        self.mock_plugin_api.get_agent_gateway_port.return_value = router_info[
-            'gw_port']
-
-        # We also need to mock the get_external_network_id method to
-        # get the correct fip namespace.
-        self.mock_plugin_api.get_external_network_id.return_value = (
-            router_info['_floatingips'][0]['floating_network_id'])
-
-        # With all that set we can now ask the l3_agent to
-        # manage the router (create it, create namespaces,
-        # attach interfaces, etc...)
-        router = self.manage_router(self.agent, router_info)
-        if enable_ha:
-            port = router.get_ex_gw_port()
-            interface_name = router.get_external_device_name(port['id'])
-            self._assert_no_ip_addresses_on_interface(router.ha_namespace,
-                                                      interface_name)
-            utils.wait_until_true(lambda: router.ha_state == 'master')
-
-            # Keepalived notifies of a state transition when it starts,
-            # not when it ends. Thus, we have to wait until keepalived finishes
-            # configuring everything. We verify this by waiting until the last
-            # device has an IP address.
-            device = router.router[l3_constants.INTERFACE_KEY][-1]
-            device_exists = functools.partial(
-                self.device_exists_with_ips_and_mac,
-                device,
-                router.get_internal_device_name,
-                router.ns_name)
-            utils.wait_until_true(device_exists)
-
-        ext_gateway_port = router_info['gw_port']
-        self.assertTrue(self._namespace_exists(router.ns_name))
-        utils.wait_until_true(
-            lambda: self._metadata_proxy_exists(self.agent.conf, router))
-        self._assert_internal_devices(router)
-        self._assert_dvr_external_device(router)
-        self._assert_dvr_gateway(router)
-        self._assert_dvr_floating_ips(router)
-        self._assert_snat_chains(router)
-        self._assert_floating_ip_chains(router)
-        self._assert_metadata_chains(router)
-        self._assert_rfp_fpr_mtu(router, custom_mtu)
-        if enable_snat:
-            ip_versions = [4, 6] if (ip_version == 6 or dual_stack) else [4]
-            snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
-                router.router_id)
-            self._assert_onlink_subnet_routes(
-                router, ip_versions, snat_ns_name)
-            self._assert_extra_routes(router, namespace=snat_ns_name)
-
-        # During normal operation, a router-gateway-clear followed by
-        # a router delete results in two notifications to the agent.  This
-        # code flow simulates the exceptional case where the notification of
-        # the clearing of the gateway hast been missed, so we are checking
-        # that the L3 agent is robust enough to handle that case and delete
-        # the router correctly.
-        self._delete_router(self.agent, router.router_id)
-        self._assert_fip_namespace_deleted(ext_gateway_port)
-        self._assert_router_does_not_exist(router)
-        self._assert_snat_namespace_does_not_exist(router)
-
-    def generate_dvr_router_info(self,
-                                 enable_ha=False,
-                                 enable_snat=False,
-                                 agent=None,
-                                 extra_routes=False,
-                                 **kwargs):
-        if not agent:
-            agent = self.agent
-        router = l3_test_common.prepare_router_data(
-            enable_snat=enable_snat,
-            enable_floating_ip=True,
-            enable_ha=enable_ha,
-            extra_routes=extra_routes,
-            num_internal_ports=2,
-            **kwargs)
-        internal_ports = router.get(l3_constants.INTERFACE_KEY, [])
-        router['distributed'] = True
-        router['gw_port_host'] = agent.conf.host
-        router['gw_port'][portbindings.HOST_ID] = agent.conf.host
-        floating_ip = router['_floatingips'][0]
-        floating_ip['floating_network_id'] = router['gw_port']['network_id']
-        floating_ip['host'] = agent.conf.host
-        floating_ip['port_id'] = internal_ports[0]['id']
-        floating_ip['status'] = 'ACTIVE'
-
-        self._add_snat_port_info_to_router(router, internal_ports)
-        # FIP has a dependency on external gateway. So we need to create
-        # the snat_port info and fip_agent_gw_port_info irrespective of
-        # the agent type the dvr supports. The namespace creation is
-        # dependent on the agent_type.
-        external_gw_port = router['gw_port']
-        self._add_fip_agent_gw_port_info_to_router(router, external_gw_port)
-        return router
-
-    def _add_fip_agent_gw_port_info_to_router(self, router, external_gw_port):
-        # Add fip agent gateway port information to the router_info
-        fip_gw_port_list = router.get(
-            l3_constants.FLOATINGIP_AGENT_INTF_KEY, [])
-        if not fip_gw_port_list and external_gw_port:
-            # Get values from external gateway port
-            fixed_ip = external_gw_port['fixed_ips'][0]
-            float_subnet = external_gw_port['subnets'][0]
-            port_ip = fixed_ip['ip_address']
-            # Pick an ip address which is not the same as port_ip
-            fip_gw_port_ip = str(netaddr.IPAddress(port_ip) + 5)
-            # Add floatingip agent gateway port info to router
-            prefixlen = netaddr.IPNetwork(float_subnet['cidr']).prefixlen
-            router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = [
-                {'subnets': [
-                    {'cidr': float_subnet['cidr'],
-                     'gateway_ip': float_subnet['gateway_ip'],
-                     'id': fixed_ip['subnet_id']}],
-                 'network_id': external_gw_port['network_id'],
-                 'device_owner': l3_constants.DEVICE_OWNER_AGENT_GW,
-                 'mac_address': 'fa:16:3e:80:8d:89',
-                 portbindings.HOST_ID: self.agent.conf.host,
-                 'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'],
-                                'ip_address': fip_gw_port_ip,
-                                'prefixlen': prefixlen}],
-                 'id': framework._uuid(),
-                 'device_id': framework._uuid()}
-            ]
-
-    def _add_snat_port_info_to_router(self, router, internal_ports):
-        # Add snat port information to the router
-        snat_port_list = router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
-        if not snat_port_list and internal_ports:
-            # Get values from internal port
-            port = internal_ports[0]
-            fixed_ip = port['fixed_ips'][0]
-            snat_subnet = port['subnets'][0]
-            port_ip = fixed_ip['ip_address']
-            # Pick an ip address which is not the same as port_ip
-            snat_ip = str(netaddr.IPAddress(port_ip) + 5)
-            # Add the info to router as the first snat port
-            # in the list of snat ports
-            prefixlen = netaddr.IPNetwork(snat_subnet['cidr']).prefixlen
-            router[l3_constants.SNAT_ROUTER_INTF_KEY] = [
-                {'subnets': [
-                    {'cidr': snat_subnet['cidr'],
-                     'gateway_ip': snat_subnet['gateway_ip'],
-                     'id': fixed_ip['subnet_id']}],
-                 'network_id': port['network_id'],
-                 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_SNAT,
-                 'mac_address': 'fa:16:3e:80:8d:89',
-                 'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'],
-                                'ip_address': snat_ip,
-                                'prefixlen': prefixlen}],
-                 'id': framework._uuid(),
-                 'device_id': framework._uuid()}
-            ]
-
-    def _assert_dvr_external_device(self, router):
-        external_port = router.get_ex_gw_port()
-        snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
-            router.router_id)
-
-        # if the agent is in dvr_snat mode, then we have to check
-        # that the correct ports and ip addresses exist in the
-        # snat_ns_name namespace
-        if self.agent.conf.agent_mode == 'dvr_snat':
-            device_exists = functools.partial(
-                self.device_exists_with_ips_and_mac,
-                external_port,
-                router.get_external_device_name,
-                snat_ns_name)
-            utils.wait_until_true(device_exists)
-        # if the agent is in dvr mode then the snat_ns_name namespace
-        # should not be present at all:
-        elif self.agent.conf.agent_mode == 'dvr':
-            self.assertFalse(
-                self._namespace_exists(snat_ns_name),
-                "namespace %s was found but agent is in dvr mode not dvr_snat"
-                % (str(snat_ns_name))
-            )
-        # if the agent is anything else the test is misconfigured
-        # we force a test failure with message
-        else:
-            self.assertTrue(False, " agent not configured for dvr or dvr_snat")
-
-    def _assert_dvr_gateway(self, router):
-        gateway_expected_in_snat_namespace = (
-            self.agent.conf.agent_mode == 'dvr_snat'
-        )
-        if gateway_expected_in_snat_namespace:
-            self._assert_dvr_snat_gateway(router)
-            self._assert_removal_of_already_deleted_gateway_device(router)
-
-        snat_namespace_should_not_exist = (
-            self.agent.conf.agent_mode == 'dvr'
-        )
-        if snat_namespace_should_not_exist:
-            self._assert_snat_namespace_does_not_exist(router)
-
-    def _assert_dvr_snat_gateway(self, router):
-        namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
-            router.router_id)
-        external_port = router.get_ex_gw_port()
-        external_device_name = router.get_external_device_name(
-            external_port['id'])
-        external_device = ip_lib.IPDevice(external_device_name,
-                                          namespace=namespace)
-        existing_gateway = (
-            external_device.route.get_gateway().get('gateway'))
-        expected_gateway = external_port['subnets'][0]['gateway_ip']
-        self.assertEqual(expected_gateway, existing_gateway)
-
-    def _assert_removal_of_already_deleted_gateway_device(self, router):
-        namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
-            router.router_id)
-        device = ip_lib.IPDevice("fakedevice",
-                                 namespace=namespace)
-
-        # Assert that no exception is thrown for this case
-        self.assertIsNone(router._delete_gateway_device_if_exists(
-                          device, "192.168.0.1", 0))
-
-    def _assert_snat_namespace_does_not_exist(self, router):
-        namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
-            router.router_id)
-        self.assertFalse(self._namespace_exists(namespace))
-
-    def _assert_dvr_floating_ips(self, router):
-        # in the fip namespace:
-        # Check that the fg-<port-id> (floatingip_agent_gateway)
-        # is created with the ip address of the external gateway port
-        floating_ips = router.router[l3_constants.FLOATINGIP_KEY]
-        self.assertTrue(floating_ips)
-        # We need to fetch the floatingip agent gateway port info
-        # from the router_info
-        floating_agent_gw_port = (
-            router.router[l3_constants.FLOATINGIP_AGENT_INTF_KEY])
-        self.assertTrue(floating_agent_gw_port)
-
-        external_gw_port = floating_agent_gw_port[0]
-        fip_ns = self.agent.get_fip_ns(floating_ips[0]['floating_network_id'])
-        fip_ns_name = fip_ns.get_name()
-        fg_port_created_successfully = ip_lib.device_exists_with_ips_and_mac(
-            fip_ns.get_ext_device_name(external_gw_port['id']),
-            [self._port_first_ip_cidr(external_gw_port)],
-            external_gw_port['mac_address'],
-            namespace=fip_ns_name)
-        self.assertTrue(fg_port_created_successfully)
-        # Check fpr-router device has been created
-        device_name = fip_ns.get_int_device_name(router.router_id)
-        fpr_router_device_created_successfully = ip_lib.device_exists(
-            device_name, namespace=fip_ns_name)
-        self.assertTrue(fpr_router_device_created_successfully)
-
-        # In the router namespace
-        # Check rfp-<router-id> is created correctly
-        for fip in floating_ips:
-            device_name = fip_ns.get_rtr_ext_device_name(router.router_id)
-            self.assertTrue(ip_lib.device_exists(
-                device_name, namespace=router.ns_name))
-
-    def test_dvr_router_rem_fips_on_restarted_agent(self):
-        self.agent.conf.agent_mode = 'dvr_snat'
-        router_info = self.generate_dvr_router_info()
-        router1 = self.manage_router(self.agent, router_info)
-        fip_ns = router1.fip_ns.get_name()
-        self.assertTrue(self._namespace_exists(fip_ns))
-        restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport(
-            self.agent.host, self.agent.conf)
-        router1.router[l3_constants.FLOATINGIP_KEY] = []
-        self.manage_router(restarted_agent, router1.router)
-        self._assert_dvr_snat_gateway(router1)
-        self.assertTrue(self._namespace_exists(fip_ns))
-
-    def test_dvr_router_add_fips_on_restarted_agent(self):
-        self.agent.conf.agent_mode = 'dvr'
-        router_info = self.generate_dvr_router_info()
-        router = self.manage_router(self.agent, router_info)
-        floating_ips = router.router[l3_constants.FLOATINGIP_KEY]
-        router_ns = router.ns_name
-        fip_rule_prio_1 = self._get_fixed_ip_rule_priority(
-            router_ns, floating_ips[0]['fixed_ip_address'])
-        restarted_agent = neutron_l3_agent.L3NATAgent(
-            self.agent.host, self.agent.conf)
-        floating_ips[0]['floating_ip_address'] = '21.4.4.2'
-        floating_ips[0]['fixed_ip_address'] = '10.0.0.2'
-        self.manage_router(restarted_agent, router_info)
-        fip_rule_prio_2 = self._get_fixed_ip_rule_priority(
-            router_ns, floating_ips[0]['fixed_ip_address'])
-        self.assertNotEqual(fip_rule_prio_1, fip_rule_prio_2)
-
-    def _get_fixed_ip_rule_priority(self, namespace, fip):
-        iprule = ip_lib.IPRule(namespace)
-        lines = iprule.rule._as_root([4], ['show']).splitlines()
-        for line in lines:
-            if fip in line:
-                info = iprule.rule._parse_line(4, line)
-                return info['priority']
-
-    def test_dvr_router_add_internal_network_set_arp_cache(self):
-        # Check that, when the router is set up and there are
-        # existing ports on the uplinked subnet, the ARP
-        # cache is properly populated.
-        self.agent.conf.agent_mode = 'dvr_snat'
-        router_info = l3_test_common.prepare_router_data()
-        router_info['distributed'] = True
-        expected_neighbor = '35.4.1.10'
-        port_data = {
-            'fixed_ips': [{'ip_address': expected_neighbor}],
-            'mac_address': 'fa:3e:aa:bb:cc:dd',
-            'device_owner': DEVICE_OWNER_COMPUTE
-        }
-        self.agent.plugin_rpc.get_ports_by_subnet.return_value = [port_data]
-        router1 = self.manage_router(self.agent, router_info)
-        internal_device = router1.get_internal_device_name(
-            router_info['_interfaces'][0]['id'])
-        neighbors = ip_lib.IPDevice(internal_device, router1.ns_name).neigh
-        self.assertEqual(expected_neighbor,
-                         neighbors.show(ip_version=4).split()[0])
-
-    def _assert_rfp_fpr_mtu(self, router, expected_mtu=1500):
-        dev_mtu = self.get_device_mtu(
-            router.router_id, router.fip_ns.get_rtr_ext_device_name,
-            router.ns_name)
-        self.assertEqual(expected_mtu, dev_mtu)
-        dev_mtu = self.get_device_mtu(
-            router.router_id, router.fip_ns.get_int_device_name,
-            router.fip_ns.get_name())
-        self.assertEqual(expected_mtu, dev_mtu)
-
-    def test_dvr_router_fip_agent_mismatch(self):
-        """Test to validate the floatingip agent mismatch.
-
-        This test validates the condition where floatingip agent
-        gateway port host mismatches with the agent and so the
-        binding will not be there.
-
-        """
-        self.agent.conf.agent_mode = 'dvr'
-        router_info = self.generate_dvr_router_info()
-        floating_ip = router_info['_floatingips'][0]
-        floating_ip['host'] = 'my_new_host'
-        # In this case the floatingip binding is different and so it
-        # should not create the floatingip namespace on the given agent.
-        # This is also like there is no current binding.
-        router1 = self.manage_router(self.agent, router_info)
-        fip_ns = router1.fip_ns.get_name()
-        self.assertTrue(self._namespace_exists(router1.ns_name))
-        self.assertFalse(self._namespace_exists(fip_ns))
-        self._assert_snat_namespace_does_not_exist(router1)
-
-    def test_dvr_router_fip_late_binding(self):
-        """Test to validate the floatingip migration or latebinding.
-
-        This test validates the condition where floatingip private
-        port changes while migration or when the private port host
-        binding is done later after floatingip association.
-
-        """
-        self.agent.conf.agent_mode = 'dvr'
-        router_info = self.generate_dvr_router_info()
-        fip_agent_gw_port = router_info[l3_constants.FLOATINGIP_AGENT_INTF_KEY]
-        # Now let us not pass the FLOATINGIP_AGENT_INTF_KEY, to emulate
-        # that the server did not create the port, since there was no valid
-        # host binding.
-        router_info[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = []
-        self.mock_plugin_api.get_agent_gateway_port.return_value = (
-            fip_agent_gw_port[0])
-        router1 = self.manage_router(self.agent, router_info)
-        fip_ns = router1.fip_ns.get_name()
-        self.assertTrue(self._namespace_exists(router1.ns_name))
-        self.assertTrue(self._namespace_exists(fip_ns))
-        self._assert_snat_namespace_does_not_exist(router1)
-
-    def _assert_snat_namespace_exists(self, router):
-        namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
-            router.router_id)
-        self.assertTrue(self._namespace_exists(namespace))
-
-    def _get_dvr_snat_namespace_device_status(
-        self, router, internal_dev_name=None):
-        """Function returns the internal and external device status."""
-        snat_ns = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
-            router.router_id)
-        external_port = router.get_ex_gw_port()
-        external_device_name = router.get_external_device_name(
-            external_port['id'])
-        qg_device_created_successfully = ip_lib.device_exists(
-            external_device_name, namespace=snat_ns)
-        sg_device_created_successfully = ip_lib.device_exists(
-            internal_dev_name, namespace=snat_ns)
-        return qg_device_created_successfully, sg_device_created_successfully
-
-    def test_dvr_router_snat_namespace_with_interface_remove(self):
-        """Test to validate the snat namespace with interface remove.
-
-        This test validates the snat namespace for all the external
-        and internal devices. It also validates if the internal
-        device corresponding to the router interface is removed
-        when the router interface is deleted.
-        """
-        self.agent.conf.agent_mode = 'dvr_snat'
-        router_info = self.generate_dvr_router_info()
-        snat_internal_port = router_info[l3_constants.SNAT_ROUTER_INTF_KEY]
-        router1 = self.manage_router(self.agent, router_info)
-        csnat_internal_port = (
-            router1.router[l3_constants.SNAT_ROUTER_INTF_KEY])
-        # Now save the internal device name to verify later
-        internal_device_name = router1._get_snat_int_device_name(
-            csnat_internal_port[0]['id'])
-        self._assert_snat_namespace_exists(router1)
-        qg_device, sg_device = self._get_dvr_snat_namespace_device_status(
-            router1, internal_dev_name=internal_device_name)
-        self.assertTrue(qg_device)
-        self.assertTrue(sg_device)
-        self.assertEqual(router1.snat_ports, snat_internal_port)
-        # Now let us not pass INTERFACE_KEY, to emulate
-        # the interface has been removed.
-        router1.router[l3_constants.INTERFACE_KEY] = []
-        # Now let us not pass the SNAT_ROUTER_INTF_KEY, to emulate
-        # that the server did not send it, since the interface has been
-        # removed.
-        router1.router[l3_constants.SNAT_ROUTER_INTF_KEY] = []
-        self.agent._process_updated_router(router1.router)
-        router_updated = self.agent.router_info[router_info['id']]
-        self._assert_snat_namespace_exists(router_updated)
-        qg_device, sg_device = self._get_dvr_snat_namespace_device_status(
-            router_updated, internal_dev_name=internal_device_name)
-        self.assertFalse(sg_device)
-        self.assertTrue(qg_device)
-
-    def _mocked_dvr_ha_router(self, agent):
-        r_info = self.generate_dvr_router_info(enable_ha=True,
-                                               enable_snat=True,
-                                               agent=agent)
-
-        r_snat_ns_name = namespaces.build_ns_name(dvr_snat_ns.SNAT_NS_PREFIX,
-                                                  r_info['id'])
-
-        mocked_r_snat_ns_name = r_snat_ns_name + '@' + agent.host
-        r_ns_name = namespaces.build_ns_name(namespaces.NS_PREFIX,
-                                             r_info['id'])
-
-        mocked_r_ns_name = r_ns_name + '@' + agent.host
-
-        return r_info, mocked_r_ns_name, mocked_r_snat_ns_name
-
-    def _setup_dvr_ha_agents(self):
-        self.agent.conf.agent_mode = 'dvr_snat'
-
-        conf = self._configure_agent('agent2')
-        self.failover_agent = neutron_l3_agent.L3NATAgentWithStateReport(
-            'agent2', conf)
-        self.failover_agent.conf.agent_mode = 'dvr_snat'
-
-    def _setup_dvr_ha_bridges(self):
-        br_int_1 = self._get_agent_ovs_integration_bridge(self.agent)
-        br_int_2 = self._get_agent_ovs_integration_bridge(self.failover_agent)
-
-        veth1, veth2 = self.useFixture(net_helpers.VethFixture()).ports
-        br_int_1.add_port(veth1.name)
-        br_int_2.add_port(veth2.name)
-
-    def _create_dvr_ha_router(self, agent):
-        get_ns_name = mock.patch.object(namespaces.RouterNamespace,
-                                        '_get_ns_name').start()
-        get_snat_ns_name = mock.patch.object(dvr_snat_ns.SnatNamespace,
-                                             'get_snat_ns_name').start()
-        (r_info,
-         mocked_r_ns_name,
-         mocked_r_snat_ns_name) = self._mocked_dvr_ha_router(agent)
-        get_ns_name.return_value = mocked_r_ns_name
-        get_snat_ns_name.return_value = mocked_r_snat_ns_name
-        router = self.manage_router(agent, r_info)
-        return router
-
-    def _assert_ip_addresses_in_dvr_ha_snat_namespace(self, router):
-        namespace = router.ha_namespace
-        ex_gw_port = router.get_ex_gw_port()
-        snat_port = router.get_snat_interfaces()[0]
-        ex_gw_port_name = router.get_external_device_name(
-            ex_gw_port['id'])
-        snat_port_name = router._get_snat_int_device_name(
-            snat_port['id'])
-
-        ip = ex_gw_port["fixed_ips"][0]['ip_address']
-        prefix_len = ex_gw_port["fixed_ips"][0]['prefixlen']
-        ex_gw_port_cidr = ip + "/" + str(prefix_len)
-        ip = snat_port["fixed_ips"][0]['ip_address']
-        prefix_len = snat_port["fixed_ips"][0]['prefixlen']
-        snat_port_cidr = ip + "/" + str(prefix_len)
-
-        self._assert_ip_address_on_interface(namespace,
-                                             ex_gw_port_name,
-                                             ex_gw_port_cidr)
-        self._assert_ip_address_on_interface(namespace,
-                                             snat_port_name,
-                                             snat_port_cidr)
-
-    def _assert_no_ip_addresses_in_dvr_ha_snat_namespace(self, router):
-        namespace = router.ha_namespace
-        ex_gw_port = router.get_ex_gw_port()
-        snat_port = router.get_snat_interfaces()[0]
-        ex_gw_port_name = router.get_external_device_name(
-            ex_gw_port['id'])
-        snat_port_name = router._get_snat_int_device_name(
-            snat_port['id'])
-
-        self._assert_no_ip_addresses_on_interface(namespace,
-                                                  snat_port_name)
-        self._assert_no_ip_addresses_on_interface(namespace,
-                                                  ex_gw_port_name)
-
-    def test_dvr_ha_router_failover(self):
-        self._setup_dvr_ha_agents()
-        self._setup_dvr_ha_bridges()
-
-        router1 = self._create_dvr_ha_router(self.agent)
-        router2 = self._create_dvr_ha_router(self.failover_agent)
-
-        utils.wait_until_true(lambda: router1.ha_state == 'master')
-        utils.wait_until_true(lambda: router2.ha_state == 'backup')
-
-        self._assert_ip_addresses_in_dvr_ha_snat_namespace(router1)
-        self._assert_no_ip_addresses_in_dvr_ha_snat_namespace(router2)
-
-        self.fail_ha_router(router1)
-
-        utils.wait_until_true(lambda: router2.ha_state == 'master')
-        utils.wait_until_true(lambda: router1.ha_state == 'backup')
-
-        self._assert_ip_addresses_in_dvr_ha_snat_namespace(router2)
-        self._assert_no_ip_addresses_in_dvr_ha_snat_namespace(router1)
-
-    def test_dvr_router_static_routes(self):
-        """Test to validate the extra routes on dvr routers."""
-        self.agent.conf.agent_mode = 'dvr_snat'
-        router_info = self.generate_dvr_router_info(enable_snat=True)
-        router1 = self.manage_router(self.agent, router_info)
-        self.assertTrue(self._namespace_exists(router1.ns_name))
-        self._assert_snat_namespace_exists(router1)
-        snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
-            router1.router_id)
-        # Now try to add routes that are suitable for both the
-        # router namespace and the snat namespace.
-        router1.router['routes'] = [{'destination': '8.8.4.0/24',
-                                     'nexthop': '35.4.0.20'}]
-        self.agent._process_updated_router(router1.router)
-        router_updated = self.agent.router_info[router_info['id']]
-        self._assert_extra_routes(router_updated, namespace=snat_ns_name)
-        self._assert_extra_routes(router_updated)
-
-    def _assert_fip_namespace_deleted(self, ext_gateway_port):
-        ext_net_id = ext_gateway_port['network_id']
-        self.agent.fipnamespace_delete_on_ext_net(
-            self.agent.context, ext_net_id)
-        self._assert_interfaces_deleted_from_ovs()
-        fip_ns_name = dvr_fip_ns.FipNamespace._get_ns_name(ext_net_id)
-        self.assertFalse(self._namespace_exists(fip_ns_name))
diff --git a/neutron/tests/functional/agent/l3/test_ha_router.py b/neutron/tests/functional/agent/l3/test_ha_router.py
deleted file mode 100644 (file)
index 1bdffa6..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-# Copyright (c) 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-import mock
-import six
-
-from neutron.agent.l3 import agent as neutron_l3_agent
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import constants as l3_constants
-from neutron.common import utils as common_utils
-from neutron.tests.common import l3_test_common
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.l3 import framework
-
-
-class L3HATestCase(framework.L3AgentTestFramework):
-
-    def test_keepalived_state_change_notification(self):
-        enqueue_mock = mock.patch.object(
-            self.agent, 'enqueue_state_change').start()
-        router_info = self.generate_router_info(enable_ha=True)
-        router = self.manage_router(self.agent, router_info)
-        utils.wait_until_true(lambda: router.ha_state == 'master')
-
-        self.fail_ha_router(router)
-        utils.wait_until_true(lambda: router.ha_state == 'backup')
-
-        utils.wait_until_true(lambda: enqueue_mock.call_count == 3)
-        calls = [args[0] for args in enqueue_mock.call_args_list]
-        self.assertEqual((router.router_id, 'backup'), calls[0])
-        self.assertEqual((router.router_id, 'master'), calls[1])
-        self.assertEqual((router.router_id, 'backup'), calls[2])
-
-    def _expected_rpc_report(self, expected):
-        calls = (args[0][1] for args in
-                 self.agent.plugin_rpc.update_ha_routers_states.call_args_list)
-
-        # Get the last state reported for each router
-        actual_router_states = {}
-        for call in calls:
-            for router_id, state in six.iteritems(call):
-                actual_router_states[router_id] = state
-
-        return actual_router_states == expected
-
-    def test_keepalived_state_change_bulk_rpc(self):
-        router_info = self.generate_router_info(enable_ha=True)
-        router1 = self.manage_router(self.agent, router_info)
-        self.fail_ha_router(router1)
-        router_info = self.generate_router_info(enable_ha=True)
-        router2 = self.manage_router(self.agent, router_info)
-
-        utils.wait_until_true(lambda: router1.ha_state == 'backup')
-        utils.wait_until_true(lambda: router2.ha_state == 'master')
-        utils.wait_until_true(
-            lambda: self._expected_rpc_report(
-                {router1.router_id: 'standby', router2.router_id: 'active'}))
-
-    def test_ha_router_lifecycle(self):
-        self._router_lifecycle(enable_ha=True)
-
-    def test_conntrack_disassociate_fip_ha_router(self):
-        self._test_conntrack_disassociate_fip(ha=True)
-
-    def test_ipv6_ha_router_lifecycle(self):
-        self._router_lifecycle(enable_ha=True, ip_version=6)
-
-    def test_ipv6_ha_router_lifecycle_with_no_gw_subnet(self):
-        self.agent.conf.set_override('ipv6_gateway',
-                                     'fe80::f816:3eff:fe2e:1')
-        self._router_lifecycle(enable_ha=True, ip_version=6,
-                               v6_ext_gw_with_sub=False)
-
-    def test_ipv6_ha_router_lifecycle_with_no_gw_subnet_for_router_advts(self):
-        # Verify that router gw interface is configured to receive Router
-        # Advts from upstream router when no external gateway is configured.
-        self._router_lifecycle(enable_ha=True, dual_stack=True,
-                               v6_ext_gw_with_sub=False)
-
-    def test_keepalived_configuration(self):
-        router_info = self.generate_router_info(enable_ha=True)
-        router = self.manage_router(self.agent, router_info)
-        expected = self.get_expected_keepalive_configuration(router)
-
-        self.assertEqual(expected,
-                         router.keepalived_manager.get_conf_on_disk())
-
-        # Add a new FIP and change the GW IP address
-        router.router = copy.deepcopy(router.router)
-        existing_fip = '19.4.4.2'
-        new_fip = '19.4.4.3'
-        self._add_fip(router, new_fip)
-        subnet_id = framework._uuid()
-        fixed_ips = [{'ip_address': '19.4.4.10',
-                      'prefixlen': 24,
-                      'subnet_id': subnet_id}]
-        subnets = [{'id': subnet_id,
-                    'cidr': '19.4.4.0/24',
-                    'gateway_ip': '19.4.4.5'}]
-        router.router['gw_port']['subnets'] = subnets
-        router.router['gw_port']['fixed_ips'] = fixed_ips
-
-        router.process(self.agent)
-
-        # Get the updated configuration and assert that both FIPs are in,
-        # and that the GW IP address was updated.
-        new_config = router.keepalived_manager.config.get_config_str()
-        old_gw = '0.0.0.0/0 via 19.4.4.1'
-        new_gw = '0.0.0.0/0 via 19.4.4.5'
-        old_external_device_ip = '19.4.4.4'
-        new_external_device_ip = '19.4.4.10'
-        self.assertIn(existing_fip, new_config)
-        self.assertIn(new_fip, new_config)
-        self.assertNotIn(old_gw, new_config)
-        self.assertIn(new_gw, new_config)
-        external_port = router.get_ex_gw_port()
-        external_device_name = router.get_external_device_name(
-            external_port['id'])
-        self.assertNotIn('%s/24 dev %s' %
-                         (old_external_device_ip, external_device_name),
-                         new_config)
-        self.assertIn('%s/24 dev %s' %
-                      (new_external_device_ip, external_device_name),
-                      new_config)
-
-    def test_ha_router_conf_on_restarted_agent(self):
-        router_info = self.generate_router_info(enable_ha=True)
-        router1 = self.manage_router(self.agent, router_info)
-        self._add_fip(router1, '192.168.111.12')
-        restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport(
-            self.agent.host, self.agent.conf)
-        self.manage_router(restarted_agent, router1.router)
-        utils.wait_until_true(lambda: self.floating_ips_configured(router1))
-        self.assertIn(
-            router1._get_primary_vip(),
-            self._get_addresses_on_device(
-                router1.ns_name,
-                router1.get_ha_device_name()))
-
-    def test_ha_router_ipv6_radvd_status(self):
-        router_info = self.generate_router_info(ip_version=6, enable_ha=True)
-        router1 = self.manage_router(self.agent, router_info)
-        utils.wait_until_true(lambda: router1.ha_state == 'master')
-        utils.wait_until_true(lambda: router1.radvd.enabled)
-
-        def _check_lla_status(router, expected):
-            internal_devices = router.router[l3_constants.INTERFACE_KEY]
-            for device in internal_devices:
-                lladdr = ip_lib.get_ipv6_lladdr(device['mac_address'])
-                exists = ip_lib.device_exists_with_ips_and_mac(
-                    router.get_internal_device_name(device['id']), [lladdr],
-                    device['mac_address'], router.ns_name)
-                self.assertEqual(expected, exists)
-
-        _check_lla_status(router1, True)
-
-        device_name = router1.get_ha_device_name()
-        ha_device = ip_lib.IPDevice(device_name, namespace=router1.ns_name)
-        ha_device.link.set_down()
-
-        utils.wait_until_true(lambda: router1.ha_state == 'backup')
-        utils.wait_until_true(lambda: not router1.radvd.enabled, timeout=10)
-        _check_lla_status(router1, False)
-
-    def test_ha_router_process_ipv6_subnets_to_existing_port(self):
-        router_info = self.generate_router_info(enable_ha=True, ip_version=6)
-        router = self.manage_router(self.agent, router_info)
-
-        def verify_ip_in_keepalived_config(router, iface):
-            config = router.keepalived_manager.config.get_config_str()
-            ip_cidrs = common_utils.fixed_ip_cidrs(iface['fixed_ips'])
-            for ip_addr in ip_cidrs:
-                self.assertIn(ip_addr, config)
-
-        interface_id = router.router[l3_constants.INTERFACE_KEY][0]['id']
-        slaac = l3_constants.IPV6_SLAAC
-        slaac_mode = {'ra_mode': slaac, 'address_mode': slaac}
-
-        # Add a second IPv6 subnet to the router internal interface.
-        self._add_internal_interface_by_subnet(router.router, count=1,
-                ip_version=6, ipv6_subnet_modes=[slaac_mode],
-                interface_id=interface_id)
-        router.process(self.agent)
-        utils.wait_until_true(lambda: router.ha_state == 'master')
-
-        # Verify that router internal interface is present and is configured
-        # with IP address from both the subnets.
-        internal_iface = router.router[l3_constants.INTERFACE_KEY][0]
-        self.assertEqual(2, len(internal_iface['fixed_ips']))
-        self._assert_internal_devices(router)
-
-        # Verify that keepalived config is properly updated.
-        verify_ip_in_keepalived_config(router, internal_iface)
-
-        # Remove one subnet from the router internal iface
-        interfaces = copy.deepcopy(router.router.get(
-            l3_constants.INTERFACE_KEY, []))
-        fixed_ips, subnets = [], []
-        fixed_ips.append(interfaces[0]['fixed_ips'][0])
-        subnets.append(interfaces[0]['subnets'][0])
-        interfaces[0].update({'fixed_ips': fixed_ips, 'subnets': subnets})
-        router.router[l3_constants.INTERFACE_KEY] = interfaces
-        router.process(self.agent)
-
-        # Verify that router internal interface has a single ipaddress
-        internal_iface = router.router[l3_constants.INTERFACE_KEY][0]
-        self.assertEqual(1, len(internal_iface['fixed_ips']))
-        self._assert_internal_devices(router)
-
-        # Verify that keepalived config is properly updated.
-        verify_ip_in_keepalived_config(router, internal_iface)
-
-    def test_delete_external_gateway_on_standby_router(self):
-        router_info = self.generate_router_info(enable_ha=True)
-        router = self.manage_router(self.agent, router_info)
-
-        self.fail_ha_router(router)
-        utils.wait_until_true(lambda: router.ha_state == 'backup')
-
-        # The purpose of the test is to simply make sure no exception is raised
-        port = router.get_ex_gw_port()
-        interface_name = router.get_external_device_name(port['id'])
-        router.external_gateway_removed(port, interface_name)
-
-
-class L3HATestFailover(framework.L3AgentTestFramework):
-
-    NESTED_NAMESPACE_SEPARATOR = '@'
-
-    def setUp(self):
-        super(L3HATestFailover, self).setUp()
-        conf = self._configure_agent('agent2')
-        self.failover_agent = neutron_l3_agent.L3NATAgentWithStateReport(
-            'agent2', conf)
-
-        br_int_1 = self._get_agent_ovs_integration_bridge(self.agent)
-        br_int_2 = self._get_agent_ovs_integration_bridge(self.failover_agent)
-
-        veth1, veth2 = self.useFixture(net_helpers.VethFixture()).ports
-        br_int_1.add_port(veth1.name)
-        br_int_2.add_port(veth2.name)
-
-    def test_ha_router_failover(self):
-        router_info = self.generate_router_info(enable_ha=True)
-        get_ns_name = mock.patch.object(
-            namespaces.RouterNamespace, '_get_ns_name').start()
-        get_ns_name.return_value = "%s%s%s" % (
-            'qrouter-' + router_info['id'],
-            self.NESTED_NAMESPACE_SEPARATOR, self.agent.host)
-        router1 = self.manage_router(self.agent, router_info)
-
-        router_info_2 = copy.deepcopy(router_info)
-        router_info_2[l3_constants.HA_INTERFACE_KEY] = (
-            l3_test_common.get_ha_interface(ip='169.254.192.2',
-                                            mac='22:22:22:22:22:22'))
-
-        get_ns_name.return_value = "%s%s%s" % (
-            namespaces.RouterNamespace._get_ns_name(router_info_2['id']),
-            self.NESTED_NAMESPACE_SEPARATOR, self.failover_agent.host)
-        router2 = self.manage_router(self.failover_agent, router_info_2)
-
-        utils.wait_until_true(lambda: router1.ha_state == 'master')
-        utils.wait_until_true(lambda: router2.ha_state == 'backup')
-
-        self.fail_ha_router(router1)
-
-        utils.wait_until_true(lambda: router2.ha_state == 'master')
-        utils.wait_until_true(lambda: router1.ha_state == 'backup')
diff --git a/neutron/tests/functional/agent/l3/test_keepalived_state_change.py b/neutron/tests/functional/agent/l3/test_keepalived_state_change.py
deleted file mode 100644 (file)
index fa08461..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright (c) 2015 Red Hat Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-import mock
-from oslo_config import cfg
-from oslo_config import fixture as fixture_config
-from oslo_utils import uuidutils
-
-from neutron._i18n import _
-from neutron.agent.l3 import keepalived_state_change
-from neutron.tests.functional import base
-
-
-class TestKeepalivedStateChange(base.BaseSudoTestCase):
-    def setUp(self):
-        super(TestKeepalivedStateChange, self).setUp()
-        self.conf_fixture = self.useFixture(fixture_config.Config())
-        self.conf_fixture.register_opt(
-            cfg.StrOpt('metadata_proxy_socket',
-                       default='$state_path/metadata_proxy',
-                       help=_('Location of Metadata Proxy UNIX domain '
-                              'socket')))
-
-        self.router_id = uuidutils.generate_uuid()
-        self.conf_dir = self.get_default_temp_dir().path
-        self.cidr = '169.254.128.1/24'
-        self.interface_name = 'interface'
-        self.monitor = keepalived_state_change.MonitorDaemon(
-            self.get_temp_file_path('monitor.pid'),
-            self.router_id,
-            1,
-            2,
-            'namespace',
-            self.conf_dir,
-            self.interface_name,
-            self.cidr)
-        mock.patch.object(self.monitor, 'notify_agent').start()
-        self.line = '1: %s    inet %s' % (self.interface_name, self.cidr)
-
-    def test_parse_and_handle_event_wrong_device_completes_without_error(self):
-        self.monitor.parse_and_handle_event(
-            '1: wrong_device    inet wrong_cidr')
-
-    def _get_state(self):
-        with open(os.path.join(self.monitor.conf_dir, 'state')) as state_file:
-            return state_file.read()
-
-    def test_parse_and_handle_event_writes_to_file(self):
-        self.monitor.parse_and_handle_event('Deleted %s' % self.line)
-        self.assertEqual('backup', self._get_state())
-
-        self.monitor.parse_and_handle_event(self.line)
-        self.assertEqual('master', self._get_state())
-
-    def test_parse_and_handle_event_fails_writing_state(self):
-        with mock.patch.object(
-                self.monitor, 'write_state_change', side_effect=OSError):
-            self.monitor.parse_and_handle_event(self.line)
-
-    def test_parse_and_handle_event_fails_notifying_agent(self):
-        with mock.patch.object(
-                self.monitor, 'notify_agent', side_effect=Exception):
-            self.monitor.parse_and_handle_event(self.line)
diff --git a/neutron/tests/functional/agent/l3/test_legacy_router.py b/neutron/tests/functional/agent/l3/test_legacy_router.py
deleted file mode 100644 (file)
index 926927a..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-# Copyright (c) 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-import mock
-
-from neutron.agent.l3 import namespace_manager
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import ip_lib
-from neutron.callbacks import events
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-from neutron.common import constants as l3_constants
-from neutron.tests.common import machine_fixtures
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.l3 import framework
-
-
-class L3AgentTestCase(framework.L3AgentTestFramework):
-
-    def test_agent_notifications_for_router_events(self):
-        """Test notifications for router create, update, and delete.
-
-        Make sure that when the agent sends notifications of router events
-        for router create, update, and delete, that the correct handler is
-        called with the right resource, event, and router information.
-        """
-        event_handler = mock.Mock()
-        registry.subscribe(event_handler,
-                           resources.ROUTER, events.BEFORE_CREATE)
-        registry.subscribe(event_handler,
-                           resources.ROUTER, events.AFTER_CREATE)
-        registry.subscribe(event_handler,
-                           resources.ROUTER, events.BEFORE_UPDATE)
-        registry.subscribe(event_handler,
-                           resources.ROUTER, events.AFTER_UPDATE)
-        registry.subscribe(event_handler,
-                           resources.ROUTER, events.BEFORE_DELETE)
-        registry.subscribe(event_handler,
-                           resources.ROUTER, events.AFTER_DELETE)
-
-        router_info = self.generate_router_info(enable_ha=False)
-        router = self.manage_router(self.agent, router_info)
-        self.agent._process_updated_router(router.router)
-        self._delete_router(self.agent, router.router_id)
-
-        expected_calls = [
-            mock.call('router', 'before_create', self.agent, router=router),
-            mock.call('router', 'after_create', self.agent, router=router),
-            mock.call('router', 'before_update', self.agent, router=router),
-            mock.call('router', 'after_update', self.agent, router=router),
-            mock.call('router', 'before_delete', self.agent, router=router),
-            mock.call('router', 'after_delete', self.agent, router=router)]
-        event_handler.assert_has_calls(expected_calls)
-
-    def test_legacy_router_lifecycle(self):
-        self._router_lifecycle(enable_ha=False, dual_stack=True)
-
-    def test_legacy_router_lifecycle_with_no_gateway_subnet(self):
-        self.agent.conf.set_override('ipv6_gateway',
-                                     'fe80::f816:3eff:fe2e:1')
-        self._router_lifecycle(enable_ha=False, dual_stack=True,
-                               v6_ext_gw_with_sub=False)
-
-    def test_legacy_router_ns_rebuild(self):
-        router_info = self.generate_router_info(False)
-        router = self.manage_router(self.agent, router_info)
-        gw_port = router.router['gw_port']
-        gw_inf_name = router.get_external_device_name(gw_port['id'])
-        gw_device = ip_lib.IPDevice(gw_inf_name, namespace=router.ns_name)
-        router_ports = [gw_device]
-        for i_port in router_info.get(l3_constants.INTERFACE_KEY, []):
-            interface_name = router.get_internal_device_name(i_port['id'])
-            router_ports.append(
-                ip_lib.IPDevice(interface_name, namespace=router.ns_name))
-
-        namespaces.Namespace.delete(router.router_namespace)
-
-        # l3 agent should be able to rebuild the ns when it is deleted
-        self.manage_router(self.agent, router_info)
-        # Assert the router ports are there in namespace
-        self.assertTrue(all([port.exists() for port in router_ports]))
-
-        self._delete_router(self.agent, router.router_id)
-
-    def test_conntrack_disassociate_fip_legacy_router(self):
-        self._test_conntrack_disassociate_fip(ha=False)
-
-    def _test_periodic_sync_routers_task(self,
-                                         routers_to_keep,
-                                         routers_deleted,
-                                         routers_deleted_during_resync):
-        ns_names_to_retrieve = set()
-        deleted_routers_info = []
-        for r in routers_to_keep:
-            ri = self.manage_router(self.agent, r)
-            ns_names_to_retrieve.add(ri.ns_name)
-        for r in routers_deleted + routers_deleted_during_resync:
-            ri = self.manage_router(self.agent, r)
-            deleted_routers_info.append(ri)
-            ns_names_to_retrieve.add(ri.ns_name)
-
-        mocked_get_router_ids = self.mock_plugin_api.get_router_ids
-        mocked_get_router_ids.return_value = [r['id'] for r in
-                                              routers_to_keep +
-                                              routers_deleted_during_resync]
-        mocked_get_routers = self.mock_plugin_api.get_routers
-        mocked_get_routers.return_value = (routers_to_keep +
-                                           routers_deleted_during_resync)
-        # clear agent router_info as it will be after restart
-        self.agent.router_info = {}
-
-        # Synchronize the agent with the plug-in
-        with mock.patch.object(namespace_manager.NamespaceManager, 'list_all',
-                               return_value=ns_names_to_retrieve):
-            self.agent.periodic_sync_routers_task(self.agent.context)
-
-        # Mock the plugin RPC API so a known external network id is returned
-        # when the router updates are processed by the agent
-        external_network_id = framework._uuid()
-        self.mock_plugin_api.get_external_network_id.return_value = (
-            external_network_id)
-
-        # Plug external_gateway_info in the routers that are not going to be
-        # deleted by the agent when it processes the updates. Otherwise,
-        # _process_router_if_compatible in the agent fails
-        for r in routers_to_keep:
-            r['external_gateway_info'] = {'network_id': external_network_id}
-
-        # while sync updates are still in the queue, higher priority
-        # router_deleted events may be added there as well
-        for r in routers_deleted_during_resync:
-            self.agent.router_deleted(self.agent.context, r['id'])
-
-        # make sure all events are processed
-        while not self.agent._queue._queue.empty():
-            self.agent._process_router_update()
-
-        for r in routers_to_keep:
-            self.assertIn(r['id'], self.agent.router_info)
-            self.assertTrue(self._namespace_exists(namespaces.NS_PREFIX +
-                                                   r['id']))
-        for ri in deleted_routers_info:
-            self.assertNotIn(ri.router_id,
-                             self.agent.router_info)
-            self._assert_router_does_not_exist(ri)
-
-    def test_periodic_sync_routers_task(self):
-        routers_to_keep = []
-        for i in range(2):
-            routers_to_keep.append(self.generate_router_info(False))
-        self._test_periodic_sync_routers_task(routers_to_keep,
-                                              routers_deleted=[],
-                                              routers_deleted_during_resync=[])
-
-    def test_periodic_sync_routers_task_routers_deleted_while_agent_down(self):
-        routers_to_keep = []
-        routers_deleted = []
-        for i in range(2):
-            routers_to_keep.append(self.generate_router_info(False))
-        for i in range(2):
-            routers_deleted.append(self.generate_router_info(False))
-        self._test_periodic_sync_routers_task(routers_to_keep,
-                                              routers_deleted,
-                                              routers_deleted_during_resync=[])
-
-    def test_periodic_sync_routers_task_routers_deleted_while_agent_sync(self):
-        routers_to_keep = []
-        routers_deleted_during_resync = []
-        for i in range(2):
-            routers_to_keep.append(self.generate_router_info(False))
-        for i in range(2):
-            routers_deleted_during_resync.append(
-                self.generate_router_info(False))
-        self._test_periodic_sync_routers_task(
-            routers_to_keep,
-            routers_deleted=[],
-            routers_deleted_during_resync=routers_deleted_during_resync)
-
-    def test_fip_connection_from_same_subnet(self):
-        '''Test connection to floatingip which is associated with
-           fixed_ip on the same subnet of the source fixed_ip.
-           In other words it confirms that return packets surely
-           go through the router.
-        '''
-        router_info = self.generate_router_info(enable_ha=False)
-        router = self.manage_router(self.agent, router_info)
-        router_ip_cidr = self._port_first_ip_cidr(router.internal_ports[0])
-        router_ip = router_ip_cidr.partition('/')[0]
-
-        br_int = framework.get_ovs_bridge(
-            self.agent.conf.ovs_integration_bridge)
-
-        src_machine, dst_machine = self.useFixture(
-            machine_fixtures.PeerMachines(
-                br_int,
-                net_helpers.increment_ip_cidr(router_ip_cidr),
-                router_ip)).machines
-
-        dst_fip = '19.4.4.10'
-        router.router[l3_constants.FLOATINGIP_KEY] = []
-        self._add_fip(router, dst_fip, fixed_address=dst_machine.ip)
-        router.process(self.agent)
-
-        protocol_port = net_helpers.get_free_namespace_port(
-            l3_constants.PROTO_NAME_TCP, dst_machine.namespace)
-        # client sends to fip
-        netcat = net_helpers.NetcatTester(
-            src_machine.namespace, dst_machine.namespace,
-            dst_fip, protocol_port,
-            protocol=net_helpers.NetcatTester.TCP)
-        self.addCleanup(netcat.stop_processes)
-        self.assertTrue(netcat.test_connectivity())
diff --git a/neutron/tests/functional/agent/l3/test_metadata_proxy.py b/neutron/tests/functional/agent/l3/test_metadata_proxy.py
deleted file mode 100644 (file)
index fb0aa97..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os.path
-import time
-
-import webob
-import webob.dec
-import webob.exc
-
-from neutron.agent.linux import dhcp
-from neutron.agent.linux import utils
-from neutron.tests.common import machine_fixtures
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.l3 import framework
-from neutron.tests.functional.agent.linux import helpers
-
-
-METADATA_REQUEST_TIMEOUT = 60
-METADATA_REQUEST_SLEEP = 5
-
-
-class MetadataFakeProxyHandler(object):
-
-    def __init__(self, status):
-        self.status = status
-
-    @webob.dec.wsgify()
-    def __call__(self, req):
-        return webob.Response(status=self.status)
-
-
-class MetadataL3AgentTestCase(framework.L3AgentTestFramework):
-
-    SOCKET_MODE = 0o644
-
-    def _create_metadata_fake_server(self, status):
-        server = utils.UnixDomainWSGIServer('metadata-fake-server')
-        self.addCleanup(server.stop)
-
-        # NOTE(cbrandily): TempDir fixture creates a folder with 0o700
-        # permissions but metadata_proxy_socket folder must be readable by all
-        # users
-        self.useFixture(
-            helpers.RecursivePermDirFixture(
-                os.path.dirname(self.agent.conf.metadata_proxy_socket), 0o555))
-        server.start(MetadataFakeProxyHandler(status),
-                     self.agent.conf.metadata_proxy_socket,
-                     workers=0, backlog=4096, mode=self.SOCKET_MODE)
-
-    def _query_metadata_proxy(self, machine):
-        url = 'http://%(host)s:%(port)s' % {'host': dhcp.METADATA_DEFAULT_IP,
-                                            'port': dhcp.METADATA_PORT}
-        cmd = 'curl', '--max-time', METADATA_REQUEST_TIMEOUT, '-D-', url
-        i = 0
-        CONNECTION_REFUSED_TIMEOUT = METADATA_REQUEST_TIMEOUT // 2
-        while i <= CONNECTION_REFUSED_TIMEOUT:
-            try:
-                raw_headers = machine.execute(cmd)
-                break
-            except RuntimeError as e:
-                if 'Connection refused' in str(e):
-                    time.sleep(METADATA_REQUEST_SLEEP)
-                    i += METADATA_REQUEST_SLEEP
-                else:
-                    self.fail('metadata proxy unreachable '
-                              'on %s before timeout' % url)
-
-        if i > CONNECTION_REFUSED_TIMEOUT:
-            self.fail('Timed out waiting metadata proxy to become available')
-        return raw_headers.splitlines()[0]
-
-    def test_access_to_metadata_proxy(self):
-        """Test access to the l3-agent metadata proxy.
-
-        The test creates:
-         * A l3-agent metadata service:
-           * A router (which creates a metadata proxy in the router namespace),
-           * A fake metadata server
-         * A "client" namespace (simulating a vm) with a port on router
-           internal subnet.
-
-        The test queries from the "client" namespace the metadata proxy on
-        http://169.254.169.254 and asserts that the metadata proxy added
-        the X-Forwarded-For and X-Neutron-Router-Id headers to the request
-        and forwarded the http request to the fake metadata server and the
-        response to the "client" namespace.
-        """
-        router_info = self.generate_router_info(enable_ha=False)
-        router = self.manage_router(self.agent, router_info)
-        self._create_metadata_fake_server(webob.exc.HTTPOk.code)
-
-        # Create and configure client namespace
-        router_ip_cidr = self._port_first_ip_cidr(router.internal_ports[0])
-        br_int = framework.get_ovs_bridge(
-            self.agent.conf.ovs_integration_bridge)
-
-        machine = self.useFixture(
-            machine_fixtures.FakeMachine(
-                br_int,
-                net_helpers.increment_ip_cidr(router_ip_cidr),
-                router_ip_cidr.partition('/')[0]))
-
-        # Query metadata proxy
-        firstline = self._query_metadata_proxy(machine)
-
-        # Check status code
-        self.assertIn(str(webob.exc.HTTPOk.code), firstline.split())
-
-
-class UnprivilegedUserMetadataL3AgentTestCase(MetadataL3AgentTestCase):
-    """Test metadata proxy with least privileged user.
-
-    The least privileged user has uid=65534 and is commonly named 'nobody' but
-    not always, that's why we use its uid.
-    """
-
-    SOCKET_MODE = 0o664
-
-    def setUp(self):
-        super(UnprivilegedUserMetadataL3AgentTestCase, self).setUp()
-        self.agent.conf.set_override('metadata_proxy_user', '65534')
-        self.agent.conf.set_override('metadata_proxy_watch_log', False)
-
-
-class UnprivilegedUserGroupMetadataL3AgentTestCase(MetadataL3AgentTestCase):
-    """Test metadata proxy with least privileged user/group.
-
-    The least privileged user has uid=65534 and is commonly named 'nobody' but
-    not always, that's why we use its uid.
-    Its group has gid=65534 and is commonly named 'nobody' or 'nogroup', that's
-    why we use its gid.
-    """
-
-    SOCKET_MODE = 0o666
-
-    def setUp(self):
-        super(UnprivilegedUserGroupMetadataL3AgentTestCase, self).setUp()
-        self.agent.conf.set_override('metadata_proxy_user', '65534')
-        self.agent.conf.set_override('metadata_proxy_group', '65534')
-        self.agent.conf.set_override('metadata_proxy_watch_log', False)
diff --git a/neutron/tests/functional/agent/l3/test_namespace_manager.py b/neutron/tests/functional/agent/l3/test_namespace_manager.py
deleted file mode 100644 (file)
index 331e53c..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright (c) 2015 Rackspace
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron.agent.l3 import dvr_snat_ns
-from neutron.agent.l3 import namespace_manager
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import ip_lib
-from neutron.tests.functional import base
-
-_uuid = uuidutils.generate_uuid
-
-
-class NamespaceManagerTestFramework(base.BaseSudoTestCase):
-
-    def setUp(self):
-        super(NamespaceManagerTestFramework, self).setUp()
-        self.agent_conf = mock.MagicMock()
-        self.metadata_driver_mock = mock.Mock()
-        self.namespace_manager = namespace_manager.NamespaceManager(
-            self.agent_conf, driver=None,
-            metadata_driver=self.metadata_driver_mock)
-
-    def _create_namespace(self, router_id, ns_class):
-        namespace = ns_class(router_id, self.agent_conf, driver=None,
-                             use_ipv6=False)
-        namespace.create()
-        self.addCleanup(self._delete_namespace, namespace)
-        return namespace.name
-
-    def _delete_namespace(self, namespace):
-        try:
-            namespace.delete()
-        except RuntimeError as e:
-            # If the namespace didn't exist when delete was attempted, mission
-            # accomplished. Otherwise, re-raise the exception
-            if 'No such file or directory' not in str(e):
-                raise e
-
-    def _namespace_exists(self, namespace):
-        ip = ip_lib.IPWrapper(namespace=namespace)
-        return ip.netns.exists(namespace)
-
-
-class NamespaceManagerTestCase(NamespaceManagerTestFramework):
-
-    def test_namespace_manager(self):
-        router_id = _uuid()
-        router_id_to_delete = _uuid()
-        to_keep = set()
-        to_delete = set()
-        to_retrieve = set()
-        to_keep.add(self._create_namespace(router_id,
-                                           namespaces.RouterNamespace))
-        to_keep.add(self._create_namespace(router_id,
-                                           dvr_snat_ns.SnatNamespace))
-        to_delete.add(self._create_namespace(router_id_to_delete,
-                                             dvr_snat_ns.SnatNamespace))
-        to_retrieve = to_keep | to_delete
-
-        with mock.patch.object(namespace_manager.NamespaceManager, 'list_all',
-                               return_value=to_retrieve):
-            with self.namespace_manager as ns_manager:
-                for ns_name in to_keep:
-                    id_to_keep = ns_manager.get_prefix_and_id(ns_name)[1]
-                    ns_manager.keep_router(id_to_keep)
-
-        for ns_name in to_keep:
-            self.assertTrue(self._namespace_exists(ns_name))
-        for ns_name in to_delete:
-            (self.metadata_driver_mock.destroy_monitored_metadata_proxy.
-             assert_called_once_with(mock.ANY,
-                                     router_id_to_delete,
-                                     self.agent_conf))
-            self.assertFalse(self._namespace_exists(ns_name))
diff --git a/neutron/tests/functional/agent/linux/__init__.py b/neutron/tests/functional/agent/linux/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/agent/linux/base.py b/neutron/tests/functional/agent/linux/base.py
deleted file mode 100644 (file)
index 8ed1d47..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2014 Cisco Systems, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import testscenarios
-
-from neutron.tests import base as tests_base
-from neutron.tests.functional import base
-
-
-MARK_VALUE = '0x1'
-MARK_MASK = '0xffffffff'
-ICMP_MARK_RULE = ('-j MARK --set-xmark %(value)s/%(mask)s'
-                  % {'value': MARK_VALUE, 'mask': MARK_MASK})
-MARKED_BLOCK_RULE = '-m mark --mark %s -j DROP' % MARK_VALUE
-ICMP_BLOCK_RULE = '-p icmp -j DROP'
-
-
-#TODO(jschwarz): Move these two functions to neutron/tests/common/
-get_rand_name = tests_base.get_rand_name
-
-
-# Regarding MRO, it goes BaseOVSLinuxTestCase, WithScenarios,
-# BaseSudoTestCase, ..., UnitTest, object. setUp is not defined in
-# WithScenarios, so it will correctly be found in BaseSudoTestCase.
-class BaseOVSLinuxTestCase(testscenarios.WithScenarios, base.BaseSudoTestCase):
-    scenarios = [
-        ('vsctl', dict(ovsdb_interface='vsctl')),
-        ('native', dict(ovsdb_interface='native')),
-    ]
-
-    def setUp(self):
-        super(BaseOVSLinuxTestCase, self).setUp()
-        self.config(group='OVS', ovsdb_interface=self.ovsdb_interface)
diff --git a/neutron/tests/functional/agent/linux/bin/__init__.py b/neutron/tests/functional/agent/linux/bin/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/agent/linux/bin/ipt_binname.py b/neutron/tests/functional/agent/linux/bin/ipt_binname.py
deleted file mode 100755 (executable)
index 79bd9be..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#! /usr/bin/env python
-
-# Copyright (C) 2014 VA Linux Systems Japan K.K.
-# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from __future__ import print_function
-import sys
-
-import eventlet
-
-
-def print_binary_name():
-    # NOTE(yamamoto): Don't move this import to module-level.
-    # The aim is to test importing from eventlet non-main thread.
-    # See Bug #1367075 for details.
-    from neutron.agent.linux import iptables_manager
-
-    print(iptables_manager.binary_name)
-
-if __name__ == "__main__":
-    if 'spawn' in sys.argv:
-        eventlet.spawn(print_binary_name).wait()
-    else:
-        print_binary_name()
diff --git a/neutron/tests/functional/agent/linux/helpers.py b/neutron/tests/functional/agent/linux/helpers.py
deleted file mode 100644 (file)
index 4cd40f5..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright (c) 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import os
-
-import fixtures
-
-from neutron.agent.linux import utils
-from neutron.tests import tools
-
-
-class RecursivePermDirFixture(fixtures.Fixture):
-    """Ensure at least perms permissions on directory and ancestors."""
-
-    def __init__(self, directory, perms):
-        super(RecursivePermDirFixture, self).__init__()
-        self.directory = directory
-        self.least_perms = perms
-
-    def _setUp(self):
-        previous_directory = None
-        current_directory = self.directory
-        while previous_directory != current_directory:
-            perms = os.stat(current_directory).st_mode
-            if perms & self.least_perms != self.least_perms:
-                os.chmod(current_directory, perms | self.least_perms)
-            previous_directory = current_directory
-            current_directory = os.path.dirname(current_directory)
-
-
-class AdminDirFixture(fixtures.Fixture):
-    """Handle directory create/delete with admin permissions required"""
-
-    def __init__(self, directory):
-        super(AdminDirFixture, self).__init__()
-        self.directory = directory
-
-    def _setUp(self):
-        # NOTE(cbrandily): Ensure we will not delete a directory existing
-        # before test run during cleanup.
-        if os.path.exists(self.directory):
-            tools.fail('%s already exists' % self.directory)
-
-        create_cmd = ['mkdir', '-p', self.directory]
-        delete_cmd = ['rm', '-r', self.directory]
-        utils.execute(create_cmd, run_as_root=True)
-        self.addCleanup(utils.execute, delete_cmd, run_as_root=True)
diff --git a/neutron/tests/functional/agent/linux/simple_daemon.py b/neutron/tests/functional/agent/linux/simple_daemon.py
deleted file mode 100644 (file)
index fa8b047..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import time
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.agent.linux import daemon
-
-
-def main():
-
-    class SimpleDaemon(daemon.Daemon):
-        """The purpose of this daemon is to serve as an example, and also as
-        a dummy daemon, which can be invoked by functional testing, it
-        does nothing but setting the pid file, and staying detached in the
-        background.
-        """
-
-        def run(self):
-            while True:
-                time.sleep(10)
-
-    opts = [
-        cfg.StrOpt('uuid',
-                   help=_('uuid provided from the command line '
-                          'so external_process can track us via /proc/'
-                          'cmdline interface.'),
-                   required=True),
-        cfg.StrOpt('pid_file',
-                   help=_('Location of pid file of this process.'),
-                   required=True)
-    ]
-
-    cfg.CONF.register_cli_opts(opts)
-    # Don't get the default configuration file
-    cfg.CONF(project='neutron', default_config_files=[])
-    simple_daemon = SimpleDaemon(cfg.CONF.pid_file,
-                                 uuid=cfg.CONF.uuid)
-    simple_daemon.start()
-
-
-if __name__ == "__main__":
-    main()
diff --git a/neutron/tests/functional/agent/linux/test_async_process.py b/neutron/tests/functional/agent/linux/test_async_process.py
deleted file mode 100644 (file)
index 07225c9..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import eventlet
-import six
-
-from neutron._i18n import _
-from neutron.agent.linux import async_process
-from neutron.agent.linux import utils
-from neutron.tests import base
-
-
-class AsyncProcessTestFramework(base.BaseTestCase):
-
-    def setUp(self):
-        super(AsyncProcessTestFramework, self).setUp()
-        self.test_file_path = self.get_temp_file_path('test_async_process.tmp')
-        self.data = [six.text_type(x) for x in range(4)]
-        with open(self.test_file_path, 'w') as f:
-            f.writelines('%s\n' % item for item in self.data)
-
-    def _check_stdout(self, proc):
-        # Ensure that all the output from the file is read
-        output = []
-        while output != self.data:
-            new_output = list(proc.iter_stdout())
-            if new_output:
-                output += new_output
-            eventlet.sleep(0.01)
-
-
-class TestAsyncProcess(AsyncProcessTestFramework):
-    def _safe_stop(self, proc):
-        try:
-            proc.stop()
-        except async_process.AsyncProcessException:
-            pass
-
-    def test_stopping_async_process_lifecycle(self):
-        proc = async_process.AsyncProcess(['tail', '-f',
-                                           self.test_file_path])
-        self.addCleanup(self._safe_stop, proc)
-        proc.start(block=True)
-        self._check_stdout(proc)
-        proc.stop(block=True)
-
-        # Ensure that the process and greenthreads have stopped
-        proc._process.wait()
-        self.assertEqual(proc._process.returncode, -9)
-        for watcher in proc._watchers:
-            watcher.wait()
-
-    def test_async_process_respawns(self):
-        proc = async_process.AsyncProcess(['tail', '-f',
-                                           self.test_file_path],
-                                          respawn_interval=0)
-        self.addCleanup(self._safe_stop, proc)
-        proc.start()
-
-        # Ensure that the same output is read twice
-        self._check_stdout(proc)
-        pid = proc.pid
-        utils.execute(['kill', '-9', pid])
-        utils.wait_until_true(
-            lambda: proc.is_active() and pid != proc.pid,
-            timeout=5,
-            sleep=0.01,
-            exception=RuntimeError(_("Async process didn't respawn")))
-        self._check_stdout(proc)
diff --git a/neutron/tests/functional/agent/linux/test_bridge_lib.py b/neutron/tests/functional/agent/linux/test_bridge_lib.py
deleted file mode 100644 (file)
index 5eb90d1..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (c) 2015 Thales Services SAS
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-from neutron.agent.linux import bridge_lib
-from neutron.tests.common import net_helpers
-from neutron.tests.functional import base
-
-
-class BridgeLibTestCase(base.BaseSudoTestCase):
-
-    def setUp(self):
-        super(BridgeLibTestCase, self).setUp()
-        self.bridge, self.port_fixture = self.create_bridge_port_fixture()
-
-    def create_bridge_port_fixture(self):
-        bridge = self.useFixture(
-            net_helpers.LinuxBridgeFixture(namespace=None)).bridge
-        port_fixture = self.useFixture(
-            net_helpers.LinuxBridgePortFixture(bridge))
-        return bridge, port_fixture
-
-    def test_is_bridged_interface(self):
-        self.assertTrue(
-            bridge_lib.is_bridged_interface(self.port_fixture.br_port.name))
-
-    def test_is_not_bridged_interface(self):
-        self.assertFalse(
-            bridge_lib.is_bridged_interface(self.port_fixture.port.name))
-
-    def test_get_bridge_names(self):
-        self.assertIn(self.bridge.name, bridge_lib.get_bridge_names())
-
-    def test_get_interface_bridge(self):
-        bridge = bridge_lib.BridgeDevice.get_interface_bridge(
-            self.port_fixture.br_port.name)
-        self.assertEqual(self.bridge.name, bridge.name)
-
-    def test_get_interface_no_bridge(self):
-        bridge = bridge_lib.BridgeDevice.get_interface_bridge(
-            self.port_fixture.port.name)
-        self.assertIsNone(bridge)
-
-    def test_get_interfaces(self):
-        self.assertEqual(
-            [self.port_fixture.br_port.name], self.bridge.get_interfaces())
-
-    def test_get_interfaces_no_bridge(self):
-        bridge = bridge_lib.BridgeDevice('--fake--')
-        self.assertEqual([], bridge.get_interfaces())
-
-    def test_disable_ipv6(self):
-        sysfs_path = ("/proc/sys/net/ipv6/conf/%s/disable_ipv6" %
-                      self.bridge.name)
-
-        # first, make sure it's enabled
-        with open(sysfs_path, 'r') as sysfs_disable_ipv6_file:
-            sysfs_disable_ipv6 = sysfs_disable_ipv6_file.read()
-            self.assertEqual("0\n", sysfs_disable_ipv6)
-
-        self.assertEqual(0, self.bridge.disable_ipv6())
-        with open(sysfs_path, 'r') as sysfs_disable_ipv6_file:
-            sysfs_disable_ipv6 = sysfs_disable_ipv6_file.read()
-            self.assertEqual("1\n", sysfs_disable_ipv6)
diff --git a/neutron/tests/functional/agent/linux/test_dhcp.py b/neutron/tests/functional/agent/linux/test_dhcp.py
deleted file mode 100644 (file)
index dbd4de5..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from oslo_config import cfg
-
-from neutron.agent.common import config
-from neutron.agent.dhcp import config as dhcp_conf
-from neutron.agent.linux import dhcp
-from neutron.agent.linux import interface
-from neutron.agent.linux import ip_lib
-from neutron.common import config as common_conf
-from neutron.tests import base as tests_base
-from neutron.tests.common import net_helpers
-from neutron.tests.functional import base as functional_base
-
-
-class TestDhcp(functional_base.BaseSudoTestCase):
-    def setUp(self):
-        super(TestDhcp, self).setUp()
-        conf = cfg.ConfigOpts()
-        conf.register_opts(config.INTERFACE_DRIVER_OPTS)
-        conf.register_opts(interface.OPTS)
-        conf.register_opts(common_conf.core_opts)
-        conf.register_opts(dhcp_conf.DHCP_AGENT_OPTS)
-        conf.set_override('interface_driver', 'openvswitch')
-        conf.set_override('host', 'foo_host')
-        self.conf = conf
-        br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
-        self.conf.set_override('ovs_integration_bridge', br_int.br_name)
-
-    def test_cleanup_stale_devices(self):
-        plugin = mock.MagicMock()
-        dev_mgr = dhcp.DeviceManager(self.conf, plugin)
-        network = {
-            'id': 'foo_id',
-            'tenant_id': 'foo_tenant',
-            'namespace': 'qdhcp-foo_id',
-            'ports': [],
-            'subnets': [tests_base.AttributeDict({'id': 'subnet_foo_id',
-                                                  'enable_dhcp': True,
-                                                  'ipv6_address_mode': None,
-                                                  'ipv6_ra_mode': None,
-                                                  'cidr': '10.0.0.0/24',
-                                                  'ip_version': 4,
-                                                  'gateway_ip': '10.0.0.1'})]}
-        dhcp_port = {
-            'id': 'foo_port_id',
-            'mac_address': '10:22:33:44:55:67',
-            'fixed_ips': [tests_base.AttributeDict(
-                {'subnet_id': 'subnet_foo_id', 'ip_address': '10.0.0.1'})]
-        }
-        plugin.create_dhcp_port.return_value = tests_base.AttributeDict(
-            dhcp_port)
-        dev_mgr.driver.plug("foo_id",
-                            "foo_id2",
-                            "tapfoo_id2",
-                            "10:22:33:44:55:68",
-                            namespace="qdhcp-foo_id")
-        dev_mgr.driver.plug("foo_id",
-                            "foo_id3",
-                            "tapfoo_id3",
-                            "10:22:33:44:55:69",
-                            namespace="qdhcp-foo_id")
-        ipw = ip_lib.IPWrapper(namespace="qdhcp-foo_id")
-        devices = ipw.get_devices(exclude_loopback=True)
-        self.addCleanup(ipw.netns.delete, 'qdhcp-foo_id')
-        self.assertEqual(2, len(devices))
-        # setting up dhcp for the network
-        dev_mgr.setup(tests_base.AttributeDict(network))
-        devices = ipw.get_devices(exclude_loopback=True)
-        # only one non-loopback device should remain
-        self.assertEqual(1, len(devices))
-        self.assertEqual("tapfoo_port_id", devices[0].name)
diff --git a/neutron/tests/functional/agent/linux/test_interface.py b/neutron/tests/functional/agent/linux/test_interface.py
deleted file mode 100644 (file)
index 588341e..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-import testtools
-
-from neutron.agent.linux import interface
-from neutron.agent.linux import ip_lib
-from neutron.common import exceptions
-from neutron.common import utils
-from neutron.tests import base as tests_base
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.linux import base
-
-
-class OVSInterfaceDriverTestCase(base.BaseOVSLinuxTestCase):
-    def setUp(self):
-        super(OVSInterfaceDriverTestCase, self).setUp()
-        conf = cfg.ConfigOpts()
-        conf.register_opts(interface.OPTS)
-        self.interface = interface.OVSInterfaceDriver(conf)
-
-    def test_plug_checks_if_bridge_exists(self):
-        with testtools.ExpectedException(exceptions.BridgeDoesNotExist):
-            self.interface.plug(network_id=42,
-                                port_id=71,
-                                device_name='not_a_device',
-                                mac_address='',
-                                bridge='not_a_bridge',
-                                namespace='not_a_namespace')
-
-    def test_plug_succeeds(self):
-        device_name = tests_base.get_rand_name()
-        mac_address = utils.get_random_mac('fa:16:3e:00:00:00'.split(':'))
-        namespace = self.useFixture(net_helpers.NamespaceFixture()).name
-        bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
-
-        self.assertFalse(bridge.get_port_name_list())
-        self.interface.plug(network_id=uuidutils.generate_uuid(),
-                            port_id=uuidutils.generate_uuid(),
-                            device_name=device_name,
-                            mac_address=mac_address,
-                            bridge=bridge.br_name,
-                            namespace=namespace)
-        self.assertIn(device_name, bridge.get_port_name_list())
-        self.assertTrue(ip_lib.device_exists(device_name, namespace))
diff --git a/neutron/tests/functional/agent/linux/test_ip_lib.py b/neutron/tests/functional/agent/linux/test_ip_lib.py
deleted file mode 100644 (file)
index 3389bcc..0000000
+++ /dev/null
@@ -1,196 +0,0 @@
-# Copyright (c) 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import importutils
-
-from neutron.agent.common import config
-from neutron.agent.linux import interface
-from neutron.agent.linux import ip_lib
-from neutron.common import utils
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.linux import base
-from neutron.tests.functional import base as functional_base
-
-LOG = logging.getLogger(__name__)
-Device = collections.namedtuple('Device',
-                                'name ip_cidrs mac_address namespace')
-
-WRONG_IP = '0.0.0.0'
-TEST_IP = '240.0.0.1'
-
-
-class IpLibTestFramework(functional_base.BaseSudoTestCase):
-    def setUp(self):
-        super(IpLibTestFramework, self).setUp()
-        self._configure()
-
-    def _configure(self):
-        config.register_interface_driver_opts_helper(cfg.CONF)
-        cfg.CONF.set_override(
-            'interface_driver',
-            'neutron.agent.linux.interface.OVSInterfaceDriver')
-        cfg.CONF.register_opts(interface.OPTS)
-        self.driver = importutils.import_object(cfg.CONF.interface_driver,
-                                                cfg.CONF)
-
-    def generate_device_details(self, name=None, ip_cidrs=None,
-                                mac_address=None, namespace=None):
-        return Device(name or base.get_rand_name(),
-                      ip_cidrs or ["%s/24" % TEST_IP],
-                      mac_address or
-                      utils.get_random_mac('fa:16:3e:00:00:00'.split(':')),
-                      namespace or base.get_rand_name())
-
-    def _safe_delete_device(self, device):
-        try:
-            device.link.delete()
-        except RuntimeError:
-            LOG.debug('Could not delete %s, was it already deleted?', device)
-
-    def manage_device(self, attr):
-        """Create a tuntap with the specified attributes.
-
-        The device is cleaned up at the end of the test.
-
-        :param attr: A Device namedtuple
-        :return: A tuntap ip_lib.IPDevice
-        """
-        ip = ip_lib.IPWrapper(namespace=attr.namespace)
-        if attr.namespace:
-            ip.netns.add(attr.namespace)
-            self.addCleanup(ip.netns.delete, attr.namespace)
-        tap_device = ip.add_tuntap(attr.name)
-        self.addCleanup(self._safe_delete_device, tap_device)
-        tap_device.link.set_address(attr.mac_address)
-        self.driver.init_l3(attr.name, attr.ip_cidrs,
-                            namespace=attr.namespace)
-        tap_device.link.set_up()
-        return tap_device
-
-
-class IpLibTestCase(IpLibTestFramework):
-    def test_namespace_exists(self):
-        namespace = self.useFixture(net_helpers.NamespaceFixture())
-        self.assertTrue(namespace.ip_wrapper.netns.exists(namespace.name))
-        namespace.destroy()
-        self.assertFalse(namespace.ip_wrapper.netns.exists(namespace.name))
-
-    def test_device_exists(self):
-        attr = self.generate_device_details()
-
-        self.assertFalse(
-            ip_lib.device_exists(attr.name, namespace=attr.namespace))
-
-        device = self.manage_device(attr)
-
-        self.assertTrue(
-            ip_lib.device_exists(device.name, namespace=attr.namespace))
-
-        device.link.delete()
-
-        self.assertFalse(
-            ip_lib.device_exists(attr.name, namespace=attr.namespace))
-
-    def test_ipdevice_exists(self):
-        attr = self.generate_device_details()
-        device = self.manage_device(attr)
-        self.assertTrue(device.exists())
-        device.link.delete()
-        self.assertFalse(device.exists())
-
-    def test_vxlan_exists(self):
-        attr = self.generate_device_details()
-        ip = ip_lib.IPWrapper(namespace=attr.namespace)
-        ip.netns.add(attr.namespace)
-        self.addCleanup(ip.netns.delete, attr.namespace)
-        self.assertFalse(ip_lib.vxlan_in_use(9999, namespace=attr.namespace))
-        device = ip.add_vxlan(attr.name, 9999)
-        self.addCleanup(self._safe_delete_device, device)
-        self.assertTrue(ip_lib.vxlan_in_use(9999, namespace=attr.namespace))
-        device.link.delete()
-        self.assertFalse(ip_lib.vxlan_in_use(9999, namespace=attr.namespace))
-
-    def test_ipwrapper_get_device_by_ip_None(self):
-        ip_wrapper = ip_lib.IPWrapper(namespace=None)
-        self.assertIsNone(ip_wrapper.get_device_by_ip(ip=None))
-
-    def test_ipwrapper_get_device_by_ip(self):
-        attr = self.generate_device_details()
-        self.manage_device(attr)
-        ip_wrapper = ip_lib.IPWrapper(namespace=attr.namespace)
-        self.assertEqual(attr.name, ip_wrapper.get_device_by_ip(TEST_IP).name)
-        self.assertIsNone(ip_wrapper.get_device_by_ip(WRONG_IP))
-
-    def test_device_exists_with_ips_and_mac(self):
-        attr = self.generate_device_details()
-        device = self.manage_device(attr)
-        self.assertTrue(
-            ip_lib.device_exists_with_ips_and_mac(*attr))
-
-        wrong_ip_cidr = '10.0.0.1/8'
-        wrong_mac_address = 'aa:aa:aa:aa:aa:aa'
-
-        attr = self.generate_device_details(name='wrong_name')
-        self.assertFalse(
-            ip_lib.device_exists_with_ips_and_mac(*attr))
-
-        attr = self.generate_device_details(ip_cidrs=[wrong_ip_cidr])
-        self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr))
-
-        attr = self.generate_device_details(mac_address=wrong_mac_address)
-        self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr))
-
-        attr = self.generate_device_details(namespace='wrong_namespace')
-        self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr))
-
-        device.link.delete()
-
-    def test_get_routing_table(self):
-        attr = self.generate_device_details()
-        device = self.manage_device(attr)
-        device_ip = attr.ip_cidrs[0].split('/')[0]
-        destination = '8.8.8.0/24'
-        device.route.add_route(destination, device_ip)
-
-        expected_routes = [{'nexthop': device_ip,
-                            'device': attr.name,
-                            'destination': destination,
-                            'scope': None},
-                           {'nexthop': None,
-                            'device': attr.name,
-                            'destination': str(
-                                netaddr.IPNetwork(attr.ip_cidrs[0]).cidr),
-                            'scope': 'link'}]
-
-        routes = ip_lib.get_routing_table(4, namespace=attr.namespace)
-        self.assertEqual(expected_routes, routes)
-
-    def _check_for_device_name(self, ip, name, should_exist):
-        exist = any(d for d in ip.get_devices() if d.name == name)
-        self.assertEqual(should_exist, exist)
-
-    def test_dummy_exists(self):
-        namespace = self.useFixture(net_helpers.NamespaceFixture())
-        dev_name = base.get_rand_name()
-        device = namespace.ip_wrapper.add_dummy(dev_name)
-        self.addCleanup(self._safe_delete_device, device)
-        self._check_for_device_name(namespace.ip_wrapper, dev_name, True)
-        device.link.delete()
-        self._check_for_device_name(namespace.ip_wrapper, dev_name, False)
diff --git a/neutron/tests/functional/agent/linux/test_ip_monitor.py b/neutron/tests/functional/agent/linux/test_ip_monitor.py
deleted file mode 100644 (file)
index f497a40..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.linux import async_process
-from neutron.agent.linux import ip_monitor
-from neutron.tests.functional.agent.linux import test_ip_lib
-
-
-class TestIPMonitor(test_ip_lib.IpLibTestFramework):
-    def setUp(self):
-        super(TestIPMonitor, self).setUp()
-        attr = self.generate_device_details()
-        self.device = self.manage_device(attr)
-        self.monitor = ip_monitor.IPMonitor(attr.namespace)
-        self.addCleanup(self._safe_stop_monitor)
-
-    def _safe_stop_monitor(self):
-        try:
-            self.monitor.stop()
-        except async_process.AsyncProcessException:
-            pass
-
-    def test_ip_monitor_lifecycle(self):
-        self.assertFalse(self.monitor.is_active())
-        self.monitor.start()
-        self.assertTrue(self.monitor.is_active())
-        self.monitor.stop()
-        self.assertFalse(self.monitor.is_active())
-
-    def test_ip_monitor_events(self):
-        self.monitor.start()
-
-        cidr = '169.254.128.1/24'
-        self.device.addr.add(cidr)
-        self._assert_event(expected_name=self.device.name,
-                           expected_cidr=cidr,
-                           expected_added=True,
-                           event=ip_monitor.IPMonitorEvent.from_text(
-                               next(self.monitor.iter_stdout(block=True))))
-
-        self.device.addr.delete(cidr)
-        self._assert_event(expected_name=self.device.name,
-                           expected_cidr=cidr,
-                           expected_added=False,
-                           event=ip_monitor.IPMonitorEvent.from_text(
-                               next(self.monitor.iter_stdout(block=True))))
-
-    def _assert_event(self,
-                      expected_name,
-                      expected_cidr,
-                      expected_added,
-                      event):
-        self.assertEqual(expected_name, event.interface)
-        self.assertEqual(expected_added, event.added)
-        self.assertEqual(expected_cidr, event.cidr)
diff --git a/neutron/tests/functional/agent/linux/test_ipset.py b/neutron/tests/functional/agent/linux/test_ipset.py
deleted file mode 100644 (file)
index 2a77943..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import ipset_manager
-from neutron.agent.linux import iptables_manager
-from neutron.tests.common import machine_fixtures
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.linux import base
-from neutron.tests.functional import base as functional_base
-
-MAX_IPSET_NAME_LENGTH = 28
-IPSET_ETHERTYPE = 'IPv4'
-UNRELATED_IP = '1.1.1.1'
-
-
-class IpsetBase(functional_base.BaseSudoTestCase):
-
-    def setUp(self):
-        super(IpsetBase, self).setUp()
-
-        bridge = self.useFixture(net_helpers.VethBridgeFixture()).bridge
-        self.source, self.destination = self.useFixture(
-            machine_fixtures.PeerMachines(bridge)).machines
-
-        self.ipset_name = base.get_rand_name(MAX_IPSET_NAME_LENGTH, 'set-')
-        self.icmp_accept_rule = ('-p icmp -m set --match-set %s src -j ACCEPT'
-                                 % self.ipset_name)
-        self.ipset = self._create_ipset_manager_and_set(
-            ip_lib.IPWrapper(self.destination.namespace), self.ipset_name)
-        self.addCleanup(self.ipset._destroy, self.ipset_name)
-        self.dst_iptables = iptables_manager.IptablesManager(
-            namespace=self.destination.namespace)
-
-        self._add_iptables_ipset_rules()
-        self.addCleanup(self._remove_iptables_ipset_rules)
-
-    def _create_ipset_manager_and_set(self, dst_ns, set_name):
-        ipset = ipset_manager.IpsetManager(
-            namespace=dst_ns.namespace)
-
-        ipset._create_set(set_name, IPSET_ETHERTYPE)
-        return ipset
-
-    def _remove_iptables_ipset_rules(self):
-        self.dst_iptables.ipv4['filter'].remove_rule(
-            'INPUT', base.ICMP_BLOCK_RULE)
-        self.dst_iptables.ipv4['filter'].remove_rule(
-            'INPUT', self.icmp_accept_rule)
-        self.dst_iptables.apply()
-
-    def _add_iptables_ipset_rules(self):
-        self.dst_iptables.ipv4['filter'].add_rule(
-            'INPUT', self.icmp_accept_rule)
-        self.dst_iptables.ipv4['filter'].add_rule(
-            'INPUT', base.ICMP_BLOCK_RULE)
-        self.dst_iptables.apply()
-
-
-class IpsetManagerTestCase(IpsetBase):
-
-    def test_add_member_allows_ping(self):
-        self.source.assert_no_ping(self.destination.ip)
-        self.ipset._add_member_to_set(self.ipset_name, self.source.ip)
-        self.source.assert_ping(self.destination.ip)
-
-    def test_del_member_denies_ping(self):
-        self.ipset._add_member_to_set(self.ipset_name, self.source.ip)
-        self.source.assert_ping(self.destination.ip)
-
-        self.ipset._del_member_from_set(self.ipset_name, self.source.ip)
-        self.source.assert_no_ping(self.destination.ip)
-
-    def test_refresh_ipset_allows_ping(self):
-        self.ipset._refresh_set(
-            self.ipset_name, [UNRELATED_IP], IPSET_ETHERTYPE)
-        self.source.assert_no_ping(self.destination.ip)
-
-        self.ipset._refresh_set(
-            self.ipset_name, [UNRELATED_IP, self.source.ip], IPSET_ETHERTYPE)
-        self.source.assert_ping(self.destination.ip)
-
-        self.ipset._refresh_set(
-            self.ipset_name, [self.source.ip, UNRELATED_IP], IPSET_ETHERTYPE)
-        self.source.assert_ping(self.destination.ip)
-
-    def test_destroy_ipset_set(self):
-        self._remove_iptables_ipset_rules()
-        self.ipset._destroy(self.ipset_name)
diff --git a/neutron/tests/functional/agent/linux/test_iptables.py b/neutron/tests/functional/agent/linux/test_iptables.py
deleted file mode 100644 (file)
index 2bbbedd..0000000
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright (c) 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import os.path
-
-import testtools
-
-from neutron.agent.linux import iptables_manager
-from neutron.agent.linux import utils
-from neutron.common import constants
-from neutron.tests import base
-from neutron.tests.common import machine_fixtures
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.linux import base as linux_base
-from neutron.tests.functional.agent.linux.bin import ipt_binname
-from neutron.tests.functional import base as functional_base
-
-
-class IptablesManagerTestCase(functional_base.BaseSudoTestCase):
-    DIRECTION_CHAIN_MAPPER = {'ingress': 'INPUT',
-                              'egress': 'OUTPUT'}
-    PROTOCOL_BLOCK_RULE = '-p %s -j DROP'
-    PROTOCOL_PORT_BLOCK_RULE = '-p %s --dport %d -j DROP'
-
-    def setUp(self):
-        super(IptablesManagerTestCase, self).setUp()
-
-        bridge = self.useFixture(net_helpers.VethBridgeFixture()).bridge
-        self.client, self.server = self.useFixture(
-            machine_fixtures.PeerMachines(bridge)).machines
-
-        self.client_fw, self.server_fw = self.create_firewalls()
-        # The port is used in isolated namespace that precludes possibility of
-        # port conflicts
-        self.port = net_helpers.get_free_namespace_port(
-            constants.PROTO_NAME_TCP, self.server.namespace)
-
-    def create_firewalls(self):
-        client_iptables = iptables_manager.IptablesManager(
-            namespace=self.client.namespace)
-        server_iptables = iptables_manager.IptablesManager(
-            namespace=self.server.namespace)
-
-        return client_iptables, server_iptables
-
-    def filter_add_rule(self, fw_manager, address, direction, protocol, port):
-        self._ipv4_filter_execute(fw_manager, 'add_rule', direction, protocol,
-                                  port)
-
-    def filter_remove_rule(self, fw_manager, address, direction, protocol,
-                           port):
-        self._ipv4_filter_execute(fw_manager, 'remove_rule', direction,
-                                  protocol, port)
-
-    def _ipv4_filter_execute(self, fw_manager, method, direction, protocol,
-                             port):
-        chain, rule = self._get_chain_and_rule(direction, protocol, port)
-        method = getattr(fw_manager.ipv4['filter'], method)
-        method(chain, rule)
-        fw_manager.apply()
-
-    def _get_chain_and_rule(self, direction, protocol, port):
-        chain = self.DIRECTION_CHAIN_MAPPER[direction]
-        if port:
-            rule = self.PROTOCOL_PORT_BLOCK_RULE % (protocol, port)
-        else:
-            rule = self.PROTOCOL_BLOCK_RULE % protocol
-        return chain, rule
-
-    def _test_with_nc(self, fw_manager, direction, port, protocol):
-        netcat = net_helpers.NetcatTester(
-            self.client.namespace, self.server.namespace,
-            self.server.ip, self.port, protocol)
-        self.addCleanup(netcat.stop_processes)
-        filter_params = 'direction %s, port %s and protocol %s' % (
-            direction, port, protocol)
-        self.assertTrue(netcat.test_connectivity(),
-                        'Failed connectivity check before applying a filter '
-                        'with %s' % filter_params)
-        # REVISIT(jlibosva): Make sure we have ASSURED conntrack entry for
-        #                    given connection
-        self.filter_add_rule(
-            fw_manager, self.server.ip, direction, protocol, port)
-        with testtools.ExpectedException(
-                RuntimeError,
-                msg='Wrongfully passed a connectivity check after applying '
-                    'a filter with %s' % filter_params):
-            netcat.test_connectivity()
-        self.filter_remove_rule(
-            fw_manager, self.server.ip, direction, protocol, port)
-        # With TCP packets will get through after firewall was removed, so we
-        # would get old data on socket and with UDP process died, so we need to
-        # respawn processes to have clean sockets
-        self.assertTrue(netcat.test_connectivity(True),
-                        'Failed connectivity check after removing a filter '
-                        'with %s' % filter_params)
-
-    def test_icmp(self):
-        self.client.assert_ping(self.server.ip)
-        self.server_fw.ipv4['filter'].add_rule('INPUT',
-                                               linux_base.ICMP_BLOCK_RULE)
-        self.server_fw.apply()
-        self.client.assert_no_ping(self.server.ip)
-        self.server_fw.ipv4['filter'].remove_rule('INPUT',
-                                                  linux_base.ICMP_BLOCK_RULE)
-        self.server_fw.apply()
-        self.client.assert_ping(self.server.ip)
-
-    def test_mangle_icmp(self):
-        self.client.assert_ping(self.server.ip)
-        self.server_fw.ipv4['mangle'].add_rule('INPUT',
-                                               linux_base.ICMP_MARK_RULE)
-        self.server_fw.ipv4['filter'].add_rule('INPUT',
-                                               linux_base.MARKED_BLOCK_RULE)
-        self.server_fw.apply()
-        self.client.assert_no_ping(self.server.ip)
-        self.server_fw.ipv4['mangle'].remove_rule('INPUT',
-                                                  linux_base.ICMP_MARK_RULE)
-        self.server_fw.ipv4['filter'].remove_rule('INPUT',
-                                                  linux_base.MARKED_BLOCK_RULE)
-        self.server_fw.apply()
-        self.client.assert_ping(self.server.ip)
-
-    def test_tcp_input_port(self):
-        self._test_with_nc(self.server_fw, 'ingress', self.port,
-                           protocol=net_helpers.NetcatTester.TCP)
-
-    def test_tcp_output_port(self):
-        self._test_with_nc(self.client_fw, 'egress', self.port,
-                           protocol=net_helpers.NetcatTester.TCP)
-
-    def test_tcp_input(self):
-        self._test_with_nc(self.server_fw, 'ingress', port=None,
-                           protocol=net_helpers.NetcatTester.TCP)
-
-    def test_tcp_output(self):
-        self._test_with_nc(self.client_fw, 'egress', port=None,
-                           protocol=net_helpers.NetcatTester.TCP)
-
-    def test_udp_input_port(self):
-        self._test_with_nc(self.server_fw, 'ingress', self.port,
-                           protocol=net_helpers.NetcatTester.UDP)
-
-    def test_udp_output_port(self):
-        self._test_with_nc(self.client_fw, 'egress', self.port,
-                           protocol=net_helpers.NetcatTester.UDP)
-
-    def test_udp_input(self):
-        self._test_with_nc(self.server_fw, 'ingress', port=None,
-                           protocol=net_helpers.NetcatTester.UDP)
-
-    def test_udp_output(self):
-        self._test_with_nc(self.client_fw, 'egress', port=None,
-                           protocol=net_helpers.NetcatTester.UDP)
-
-
-class IptablesManagerNonRootTestCase(base.BaseTestCase):
-    @staticmethod
-    def _normalize_module_name(name):
-        for suf in ['.pyc', '.pyo']:
-            if name.endswith(suf):
-                return name[:-len(suf)] + '.py'
-        return name
-
-    def _test_binary_name(self, module, *extra_options):
-        executable = self._normalize_module_name(module.__file__)
-        expected = os.path.basename(executable)[:16]
-        observed = utils.execute([executable] + list(extra_options)).rstrip()
-        self.assertEqual(expected, observed)
-
-    def test_binary_name(self):
-        self._test_binary_name(ipt_binname)
-
-    def test_binary_name_eventlet_spawn(self):
-        self._test_binary_name(ipt_binname, 'spawn')
diff --git a/neutron/tests/functional/agent/linux/test_keepalived.py b/neutron/tests/functional/agent/linux/test_keepalived.py
deleted file mode 100644 (file)
index eacbbaf..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright (c) 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron._i18n import _
-from neutron.agent.linux import external_process
-from neutron.agent.linux import keepalived
-from neutron.agent.linux import utils
-from neutron.tests import base
-from neutron.tests.unit.agent.linux import test_keepalived
-
-
-class KeepalivedManagerTestCase(base.BaseTestCase,
-                                test_keepalived.KeepalivedConfBaseMixin):
-
-    def setUp(self):
-        super(KeepalivedManagerTestCase, self).setUp()
-        cfg.CONF.set_override('check_child_processes_interval', 1, 'AGENT')
-
-        self.expected_config = self._get_config()
-        self.process_monitor = external_process.ProcessMonitor(cfg.CONF,
-                                                               'router')
-        self.manager = keepalived.KeepalivedManager(
-            'router1', self.expected_config, self.process_monitor,
-            conf_path=cfg.CONF.state_path)
-        self.addCleanup(self.manager.disable)
-
-    def test_keepalived_spawn(self):
-        self.manager.spawn()
-        process = external_process.ProcessManager(
-            cfg.CONF,
-            'router1',
-            namespace=None,
-            pids_path=cfg.CONF.state_path)
-        self.assertTrue(process.active)
-
-        self.assertEqual(self.expected_config.get_config_str(),
-                         self.manager.get_conf_on_disk())
-
-    def _test_keepalived_respawns(self, normal_exit=True):
-        self.manager.spawn()
-        process = self.manager.get_process()
-        pid = process.pid
-        utils.wait_until_true(
-            lambda: process.active,
-            timeout=5,
-            sleep=0.01,
-            exception=RuntimeError(_("Keepalived didn't spawn")))
-
-        exit_code = '-15' if normal_exit else '-9'
-
-        # Exit the process, and see that when it comes back
-        # It's indeed a different process
-        utils.execute(['kill', exit_code, pid], run_as_root=True)
-        utils.wait_until_true(
-            lambda: process.active and pid != process.pid,
-            timeout=5,
-            sleep=0.01,
-            exception=RuntimeError(_("Keepalived didn't respawn")))
-
-    def test_keepalived_respawns(self):
-        self._test_keepalived_respawns()
-
-    def test_keepalived_respawn_with_unexpected_exit(self):
-        self._test_keepalived_respawns(False)
diff --git a/neutron/tests/functional/agent/linux/test_linuxbridge_arp_protect.py b/neutron/tests/functional/agent/linux/test_linuxbridge_arp_protect.py
deleted file mode 100644 (file)
index dd4052c..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import constants
-from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect
-from neutron.tests.common import machine_fixtures
-from neutron.tests.common import net_helpers
-from neutron.tests.functional import base as functional_base
-
-no_arping = net_helpers.assert_no_arping
-arping = net_helpers.assert_arping
-
-
-class LinuxBridgeARPSpoofTestCase(functional_base.BaseSudoTestCase):
-
-    def setUp(self):
-        super(LinuxBridgeARPSpoofTestCase, self).setUp()
-
-        lbfixture = self.useFixture(net_helpers.LinuxBridgeFixture())
-        self.addCleanup(setattr, arp_protect, 'NAMESPACE', None)
-        arp_protect.NAMESPACE = lbfixture.namespace
-        bridge = lbfixture.bridge
-        self.source, self.destination, self.observer = self.useFixture(
-            machine_fixtures.PeerMachines(bridge, amount=3)).machines
-
-    def _add_arp_protection(self, machine, addresses, extra_port_dict=None):
-        port_dict = {'fixed_ips': [{'ip_address': a} for a in addresses],
-                     'device_owner': 'nobody'}
-        if extra_port_dict:
-            port_dict.update(extra_port_dict)
-        name = net_helpers.VethFixture.get_peer_name(machine.port.name)
-        arp_protect.setup_arp_spoofing_protection(name, port_dict)
-        self.addCleanup(arp_protect.delete_arp_spoofing_protection,
-                        [name])
-
-    def test_arp_no_protection(self):
-        arping(self.source.namespace, self.destination.ip)
-        arping(self.destination.namespace, self.source.ip)
-
-    def test_arp_correct_protection(self):
-        self._add_arp_protection(self.source, [self.source.ip])
-        self._add_arp_protection(self.destination, [self.destination.ip])
-        arping(self.source.namespace, self.destination.ip)
-        arping(self.destination.namespace, self.source.ip)
-
-    def test_arp_fails_incorrect_protection(self):
-        self._add_arp_protection(self.source, ['1.1.1.1'])
-        self._add_arp_protection(self.destination, ['2.2.2.2'])
-        no_arping(self.source.namespace, self.destination.ip)
-        no_arping(self.destination.namespace, self.source.ip)
-
-    def test_arp_protection_removal(self):
-        self._add_arp_protection(self.source, ['1.1.1.1'])
-        self._add_arp_protection(self.destination, ['2.2.2.2'])
-        no_arping(self.observer.namespace, self.destination.ip)
-        no_arping(self.observer.namespace, self.source.ip)
-        name = net_helpers.VethFixture.get_peer_name(self.source.port.name)
-        arp_protect.delete_arp_spoofing_protection([name])
-        # spoofing should have been removed from source, but not dest
-        arping(self.observer.namespace, self.source.ip)
-        no_arping(self.observer.namespace, self.destination.ip)
-
-    def test_arp_protection_update(self):
-        self._add_arp_protection(self.source, ['1.1.1.1'])
-        self._add_arp_protection(self.destination, ['2.2.2.2'])
-        no_arping(self.observer.namespace, self.destination.ip)
-        no_arping(self.observer.namespace, self.source.ip)
-        self._add_arp_protection(self.source, ['192.0.0.0/1'])
-        # spoofing should have been updated on source, but not dest
-        arping(self.observer.namespace, self.source.ip)
-        no_arping(self.observer.namespace, self.destination.ip)
-
-    def test_arp_protection_port_security_disabled(self):
-        self._add_arp_protection(self.source, ['1.1.1.1'])
-        no_arping(self.observer.namespace, self.source.ip)
-        self._add_arp_protection(self.source, ['1.1.1.1'],
-                                 {'port_security_enabled': False})
-        arping(self.observer.namespace, self.source.ip)
-
-    def test_arp_protection_network_owner(self):
-        self._add_arp_protection(self.source, ['1.1.1.1'])
-        no_arping(self.observer.namespace, self.source.ip)
-        self._add_arp_protection(self.source, ['1.1.1.1'],
-                                 {'device_owner':
-                                  constants.DEVICE_OWNER_ROUTER_GW})
-        arping(self.observer.namespace, self.source.ip)
-
-    def test_arp_protection_dead_reference_removal(self):
-        self._add_arp_protection(self.source, ['1.1.1.1'])
-        self._add_arp_protection(self.destination, ['2.2.2.2'])
-        no_arping(self.observer.namespace, self.destination.ip)
-        no_arping(self.observer.namespace, self.source.ip)
-        name = net_helpers.VethFixture.get_peer_name(self.source.port.name)
-        # This should remove all arp protect rules that aren't source port
-        arp_protect.delete_unreferenced_arp_protection([name])
-        no_arping(self.observer.namespace, self.source.ip)
-        arping(self.observer.namespace, self.destination.ip)
diff --git a/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py b/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py
deleted file mode 100644 (file)
index d2e206a..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Tests in this module will be skipped unless:
-
- - ovsdb-client is installed
-
- - ovsdb-client can be invoked password-less via the configured root helper
-
- - sudo testing is enabled (see neutron.tests.functional.base for details)
-"""
-
-from oslo_config import cfg
-
-from neutron.agent.linux import ovsdb_monitor
-from neutron.agent.linux import utils
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.linux import base as linux_base
-from neutron.tests.functional import base as functional_base
-
-
-class BaseMonitorTest(linux_base.BaseOVSLinuxTestCase):
-
-    def setUp(self):
-        super(BaseMonitorTest, self).setUp()
-
-        rootwrap_not_configured = (cfg.CONF.AGENT.root_helper ==
-                                   functional_base.SUDO_CMD)
-        if rootwrap_not_configured:
-            # The monitor tests require a nested invocation that has
-            # to be emulated by double sudo if rootwrap is not
-            # configured.
-            self.config(group='AGENT',
-                        root_helper=" ".join([functional_base.SUDO_CMD] * 2))
-
-        self._check_test_requirements()
-        # ovsdb-client monitor needs to have a bridge to make any output
-        self.useFixture(net_helpers.OVSBridgeFixture())
-
-    def _check_test_requirements(self):
-        self.check_command(['ovsdb-client', 'list-dbs'],
-                           'Exit code: 1',
-                           'password-less sudo not granted for ovsdb-client',
-                           run_as_root=True)
-
-
-class TestOvsdbMonitor(BaseMonitorTest):
-
-    def setUp(self):
-        super(TestOvsdbMonitor, self).setUp()
-
-        self.monitor = ovsdb_monitor.OvsdbMonitor('Bridge')
-        self.addCleanup(self.monitor.stop)
-        self.monitor.start()
-
-    def collect_monitor_output(self):
-        output = list(self.monitor.iter_stdout())
-        if output:
-            # Output[0] is header row with spaces for column separation.
-            # Use 'other_config' as an indication of the table header.
-            self.assertIn('other_config', output[0])
-            return True
-
-    def test_monitor_generates_initial_output(self):
-        utils.wait_until_true(self.collect_monitor_output, timeout=30)
-
-
-class TestSimpleInterfaceMonitor(BaseMonitorTest):
-
-    def setUp(self):
-        super(TestSimpleInterfaceMonitor, self).setUp()
-
-        self.monitor = ovsdb_monitor.SimpleInterfaceMonitor()
-        self.addCleanup(self.monitor.stop)
-        self.monitor.start(block=True, timeout=60)
-
-    def test_has_updates(self):
-        utils.wait_until_true(lambda: self.monitor.has_updates)
-        # clear the event list
-        self.monitor.get_events()
-        self.useFixture(net_helpers.OVSPortFixture())
-        # has_updates after port addition should become True
-        utils.wait_until_true(lambda: self.monitor.has_updates is True)
-
-    def _expected_devices_events(self, devices, state):
-        """Helper to check that events are received for expected devices.
-
-        :param devices: The list of expected devices. WARNING: This list
-          is modified by this method
-        :param state: The state of the devices (added or removed)
-        """
-        events = self.monitor.get_events()
-        event_devices = [
-            (dev['name'], dev['external_ids']) for dev in events.get(state)]
-        for dev in event_devices:
-            if dev[0] in devices:
-                devices.remove(dev[0])
-                self.assertEqual(dev[1].get('iface-status'), 'active')
-            if not devices:
-                return True
-
-    def test_get_events(self):
-        utils.wait_until_true(lambda: self.monitor.has_updates)
-        devices = self.monitor.get_events()
-        self.assertTrue(devices.get('added'),
-                        'Initial call should always be true')
-        br = self.useFixture(net_helpers.OVSBridgeFixture())
-        p1 = self.useFixture(net_helpers.OVSPortFixture(br.bridge))
-        p2 = self.useFixture(net_helpers.OVSPortFixture(br.bridge))
-        added_devices = [p1.port.name, p2.port.name]
-        utils.wait_until_true(
-            lambda: self._expected_devices_events(added_devices, 'added'))
-        br.bridge.delete_port(p1.port.name)
-        br.bridge.delete_port(p2.port.name)
-        removed_devices = [p1.port.name, p2.port.name]
-        utils.wait_until_true(
-            lambda: self._expected_devices_events(removed_devices, 'removed'))
-        # restart
-        self.monitor.stop(block=True)
-        self.monitor.start(block=True, timeout=60)
-        devices = self.monitor.get_events()
-        self.assertTrue(devices.get('added'),
-                        'Initial call should always be true')
diff --git a/neutron/tests/functional/agent/linux/test_process_monitor.py b/neutron/tests/functional/agent/linux/test_process_monitor.py
deleted file mode 100644 (file)
index ba64d46..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from oslo_config import cfg
-from six import moves
-
-from neutron.agent.linux import external_process
-from neutron.agent.linux import utils
-from neutron.tests import base
-from neutron.tests.functional.agent.linux import simple_daemon
-
-
-UUID_FORMAT = "test-uuid-%d"
-SERVICE_NAME = "service"
-
-
-class BaseTestProcessMonitor(base.BaseTestCase):
-
-    def setUp(self):
-        super(BaseTestProcessMonitor, self).setUp()
-        cfg.CONF.set_override('check_child_processes_interval', 1, 'AGENT')
-        self._child_processes = []
-        self._process_monitor = None
-        self.create_child_processes_manager('respawn')
-        self.addCleanup(self.cleanup_spawned_children)
-
-    def create_child_processes_manager(self, action):
-        cfg.CONF.set_override('check_child_processes_action', action, 'AGENT')
-        self._process_monitor = self.build_process_monitor()
-
-    def build_process_monitor(self):
-        return external_process.ProcessMonitor(
-            config=cfg.CONF,
-            resource_type='test')
-
-    def _make_cmdline_callback(self, uuid):
-        def _cmdline_callback(pidfile):
-            cmdline = ["python", simple_daemon.__file__,
-                       "--uuid=%s" % uuid,
-                       "--pid_file=%s" % pidfile]
-            return cmdline
-        return _cmdline_callback
-
-    def spawn_n_children(self, n, service=None):
-        self._child_processes = []
-        for child_number in moves.range(n):
-            uuid = self._child_uuid(child_number)
-            _callback = self._make_cmdline_callback(uuid)
-            pm = external_process.ProcessManager(
-                conf=cfg.CONF,
-                uuid=uuid,
-                default_cmd_callback=_callback,
-                service=service)
-            pm.enable()
-            self._process_monitor.register(uuid, SERVICE_NAME, pm)
-
-            self._child_processes.append(pm)
-
-    @staticmethod
-    def _child_uuid(child_number):
-        return UUID_FORMAT % child_number
-
-    def _kill_last_child(self):
-        self._child_processes[-1].disable()
-
-    def wait_for_all_children_respawned(self):
-        def all_children_active():
-            return all(pm.active for pm in self._child_processes)
-
-        for pm in self._child_processes:
-            directory = os.path.dirname(pm.get_pid_file_name())
-            self.assertEqual(0o755, os.stat(directory).st_mode & 0o777)
-
-        # we need to allow extra_time for the check process to happen
-        # and properly execute action over the gone processes under
-        # high load conditions
-        max_wait_time = (
-            cfg.CONF.AGENT.check_child_processes_interval + 5)
-        utils.wait_until_true(
-            all_children_active,
-            timeout=max_wait_time,
-            sleep=0.01,
-            exception=RuntimeError('Not all children respawned.'))
-
-    def cleanup_spawned_children(self):
-        self._process_monitor.stop()
-        for pm in self._child_processes:
-            pm.disable()
-
-
-class TestProcessMonitor(BaseTestProcessMonitor):
-
-    def test_respawn_handler(self):
-        self.spawn_n_children(2)
-        self._kill_last_child()
-        self.wait_for_all_children_respawned()
diff --git a/neutron/tests/functional/agent/linux/test_utils.py b/neutron/tests/functional/agent/linux/test_utils.py
deleted file mode 100644 (file)
index 5508457..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import eventlet
-import testtools
-
-from neutron.agent.linux import async_process
-from neutron.agent.linux import utils
-from neutron.tests.functional.agent.linux import test_async_process
-
-
-class TestPIDHelpers(test_async_process.AsyncProcessTestFramework):
-    def test_get_cmdline_from_pid_and_pid_invoked_with_cmdline(self):
-        cmd = ['tail', '-f', self.test_file_path]
-        proc = async_process.AsyncProcess(cmd)
-        proc.start(block=True)
-        self.addCleanup(proc.stop)
-
-        pid = proc.pid
-        self.assertEqual(cmd, utils.get_cmdline_from_pid(pid))
-        self.assertTrue(utils.pid_invoked_with_cmdline(pid, cmd))
-        self.assertEqual([], utils.get_cmdline_from_pid(-1))
-
-    def test_wait_until_true_predicate_succeeds(self):
-        utils.wait_until_true(lambda: True)
-
-    def test_wait_until_true_predicate_fails(self):
-        with testtools.ExpectedException(eventlet.timeout.Timeout):
-            utils.wait_until_true(lambda: False, 2)
diff --git a/neutron/tests/functional/agent/test_dhcp_agent.py b/neutron/tests/functional/agent/test_dhcp_agent.py
deleted file mode 100644 (file)
index 8dcdc03..0000000
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os.path
-
-import eventlet
-import fixtures
-import mock
-import netaddr
-from oslo_config import fixture as fixture_config
-from oslo_utils import uuidutils
-
-from neutron.agent.common import config
-from neutron.agent.common import ovs_lib
-from neutron.agent.dhcp import agent
-from neutron.agent import dhcp_agent
-from neutron.agent.linux import dhcp
-from neutron.agent.linux import interface
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import constants
-from neutron.common import utils as common_utils
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.linux import helpers
-from neutron.tests.functional import base
-
-
-class DHCPAgentOVSTestFramework(base.BaseSudoTestCase):
-
-    _DHCP_PORT_MAC_ADDRESS = netaddr.EUI("24:77:03:7d:00:4c")
-    _DHCP_PORT_MAC_ADDRESS.dialect = netaddr.mac_unix
-    _TENANT_PORT_MAC_ADDRESS = netaddr.EUI("24:77:03:7d:00:3a")
-    _TENANT_PORT_MAC_ADDRESS.dialect = netaddr.mac_unix
-
-    _IP_ADDRS = {
-        4: {'addr': '192.168.10.11',
-            'cidr': '192.168.10.0/24',
-            'gateway': '192.168.10.1'},
-        6: {'addr': '0:0:0:0:0:ffff:c0a8:a0b',
-            'cidr': '0:0:0:0:0:ffff:c0a8:a00/120',
-            'gateway': '0:0:0:0:0:ffff:c0a8:a01'}, }
-
-    def setUp(self):
-        super(DHCPAgentOVSTestFramework, self).setUp()
-        config.setup_logging()
-        self.conf_fixture = self.useFixture(fixture_config.Config())
-        self.conf = self.conf_fixture.conf
-        dhcp_agent.register_options(self.conf)
-
-        # NOTE(cbrandily): TempDir fixture creates a folder with 0o700
-        # permissions but agent dir must be readable by dnsmasq user (nobody)
-        agent_config_dir = self.useFixture(fixtures.TempDir()).path
-        self.useFixture(
-            helpers.RecursivePermDirFixture(agent_config_dir, 0o555))
-
-        self.conf.set_override("dhcp_confs", agent_config_dir)
-        self.conf.set_override(
-            'interface_driver',
-            'neutron.agent.linux.interface.OVSInterfaceDriver')
-        self.conf.set_override('report_interval', 0, 'AGENT')
-        br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
-        self.conf.set_override('ovs_integration_bridge', br_int.br_name)
-
-        self.mock_plugin_api = mock.patch(
-            'neutron.agent.dhcp.agent.DhcpPluginApi').start().return_value
-        mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
-        self.agent = agent.DhcpAgentWithStateReport('localhost')
-
-        self.ovs_driver = interface.OVSInterfaceDriver(self.conf)
-
-    def network_dict_for_dhcp(self, dhcp_enabled=True, ip_version=4):
-        net_id = uuidutils.generate_uuid()
-        subnet_dict = self.create_subnet_dict(
-            net_id, dhcp_enabled, ip_version)
-        port_dict = self.create_port_dict(
-            net_id, subnet_dict.id,
-            mac_address=str(self._DHCP_PORT_MAC_ADDRESS),
-            ip_version=ip_version)
-        port_dict.device_id = common_utils.get_dhcp_agent_device_id(
-            net_id, self.conf.host)
-        net_dict = self.create_network_dict(
-            net_id, [subnet_dict], [port_dict])
-        return net_dict
-
-    def create_subnet_dict(self, net_id, dhcp_enabled=True, ip_version=4):
-        sn_dict = dhcp.DictModel({
-            "id": uuidutils.generate_uuid(),
-            "network_id": net_id,
-            "ip_version": ip_version,
-            "cidr": self._IP_ADDRS[ip_version]['cidr'],
-            "gateway_ip": (self.
-                _IP_ADDRS[ip_version]['gateway']),
-            "enable_dhcp": dhcp_enabled,
-            "dns_nameservers": [],
-            "host_routes": [],
-            "ipv6_ra_mode": None,
-            "ipv6_address_mode": None})
-        if ip_version == 6:
-            sn_dict['ipv6_address_mode'] = constants.DHCPV6_STATEFUL
-        return sn_dict
-
-    def create_port_dict(self, network_id, subnet_id, mac_address,
-                         ip_version=4, ip_address=None):
-        ip_address = (self._IP_ADDRS[ip_version]['addr']
-            if not ip_address else ip_address)
-        port_dict = dhcp.DictModel({
-            "id": uuidutils.generate_uuid(),
-            "name": "foo",
-            "mac_address": mac_address,
-            "network_id": network_id,
-            "admin_state_up": True,
-            "device_id": uuidutils.generate_uuid(),
-            "device_owner": "foo",
-            "fixed_ips": [{"subnet_id": subnet_id,
-                           "ip_address": ip_address}], })
-        return port_dict
-
-    def create_network_dict(self, net_id, subnets=None, ports=None):
-        subnets = [] if not subnets else subnets
-        ports = [] if not ports else ports
-        net_dict = dhcp.NetModel(d={
-            "id": net_id,
-            "subnets": subnets,
-            "ports": ports,
-            "admin_state_up": True,
-            "tenant_id": uuidutils.generate_uuid(), })
-        return net_dict
-
-    def get_interface_name(self, network, port):
-        device_manager = dhcp.DeviceManager(conf=self.conf, plugin=mock.Mock())
-        return device_manager.get_interface_name(network, port)
-
-    def configure_dhcp_for_network(self, network, dhcp_enabled=True):
-        self.agent.configure_dhcp_for_network(network)
-        self.addCleanup(self._cleanup_network, network, dhcp_enabled)
-
-    def _cleanup_network(self, network, dhcp_enabled):
-        self.mock_plugin_api.release_dhcp_port.return_value = None
-        if dhcp_enabled:
-            self.agent.call_driver('disable', network)
-
-    def assert_dhcp_resources(self, network, dhcp_enabled):
-        ovs = ovs_lib.BaseOVS()
-        port = network.ports[0]
-        iface_name = self.get_interface_name(network, port)
-        self.assertEqual(dhcp_enabled, ovs.port_exists(iface_name))
-        self.assert_dhcp_namespace(network.namespace, dhcp_enabled)
-        self.assert_dhcp_device(network.namespace, iface_name, dhcp_enabled)
-
-    def assert_dhcp_namespace(self, namespace, dhcp_enabled):
-        ip = ip_lib.IPWrapper()
-        self.assertEqual(dhcp_enabled, ip.netns.exists(namespace))
-
-    def assert_dhcp_device(self, namespace, dhcp_iface_name, dhcp_enabled):
-        dev = ip_lib.IPDevice(dhcp_iface_name, namespace)
-        self.assertEqual(dhcp_enabled, ip_lib.device_exists(
-            dhcp_iface_name, namespace))
-        if dhcp_enabled:
-            self.assertEqual(self._DHCP_PORT_MAC_ADDRESS, dev.link.address)
-
-    def _plug_port_for_dhcp_request(self, network, port):
-        namespace = network.namespace
-        vif_name = self.get_interface_name(network.id, port)
-
-        self.ovs_driver.plug(network.id, port.id, vif_name, port.mac_address,
-                             self.conf['ovs_integration_bridge'],
-                             namespace=namespace)
-
-    def _ip_list_for_vif(self, vif_name, namespace):
-        ip_device = ip_lib.IPDevice(vif_name, namespace)
-        return ip_device.addr.list(ip_version=4)
-
-    def _get_network_port_for_allocation_test(self):
-        network = self.network_dict_for_dhcp()
-        ip_addr = netaddr.IPNetwork(network.subnets[0].cidr)[1]
-        port = self.create_port_dict(
-            network.id, network.subnets[0].id,
-            mac_address=str(self._TENANT_PORT_MAC_ADDRESS),
-            ip_address=str(ip_addr))
-        return network, port
-
-    def assert_good_allocation_for_port(self, network, port):
-        vif_name = self.get_interface_name(network.id, port)
-        self._run_dhclient(vif_name, network)
-
-        predicate = lambda: len(
-            self._ip_list_for_vif(vif_name, network.namespace))
-        utils.wait_until_true(predicate, 10)
-
-        ip_list = self._ip_list_for_vif(vif_name, network.namespace)
-        cidr = ip_list[0].get('cidr')
-        ip_addr = str(netaddr.IPNetwork(cidr).ip)
-        self.assertEqual(port.fixed_ips[0].ip_address, ip_addr)
-
-    def assert_bad_allocation_for_port(self, network, port):
-        vif_name = self.get_interface_name(network.id, port)
-        self._run_dhclient(vif_name, network)
-        # we need wait some time (10 seconds is enough) and check
-        # that dhclient not configured ip-address for interface
-        eventlet.sleep(10)
-
-        ip_list = self._ip_list_for_vif(vif_name, network.namespace)
-        self.assertEqual([], ip_list)
-
-    def _run_dhclient(self, vif_name, network):
-        # NOTE: Before run dhclient we should create resolv.conf file
-        # in namespace,  where we will run dhclient for testing address
-        # allocation for port, otherwise, dhclient will override
-        # system /etc/resolv.conf
-        # By default, folder for dhcp-agent's namespace doesn't exist
-        # that's why we use AdminDirFixture for create directory
-        # with admin permissions in /etc/netns/ and touch resolv.conf in it.
-        etc_dir = '/etc/netns/%s' % network.namespace
-        self.useFixture(helpers.AdminDirFixture(etc_dir))
-        cmd = ['touch', os.path.join(etc_dir, 'resolv.conf')]
-        utils.execute(cmd, run_as_root=True)
-        dhclient_cmd = ['dhclient', '--no-pid', '-d', '-1', vif_name]
-        proc = net_helpers.RootHelperProcess(
-            cmd=dhclient_cmd, namespace=network.namespace)
-        self.addCleanup(proc.wait)
-        self.addCleanup(proc.kill)
-
-
-class DHCPAgentOVSTestCase(DHCPAgentOVSTestFramework):
-
-    def test_create_subnet_with_dhcp(self):
-        dhcp_enabled = True
-        for version in [4, 6]:
-            network = self.network_dict_for_dhcp(
-                dhcp_enabled, ip_version=version)
-            self.configure_dhcp_for_network(network=network,
-                                            dhcp_enabled=dhcp_enabled)
-            self.assert_dhcp_resources(network, dhcp_enabled)
-
-    def test_good_address_allocation(self):
-        network, port = self._get_network_port_for_allocation_test()
-        network.ports.append(port)
-        self.configure_dhcp_for_network(network=network)
-        self._plug_port_for_dhcp_request(network, port)
-        self.assert_good_allocation_for_port(network, port)
-
-    def test_bad_address_allocation(self):
-        network, port = self._get_network_port_for_allocation_test()
-        network.ports.append(port)
-        self.configure_dhcp_for_network(network=network)
-        bad_mac_address = netaddr.EUI(self._TENANT_PORT_MAC_ADDRESS.value + 1)
-        bad_mac_address.dialect = netaddr.mac_unix
-        port.mac_address = str(bad_mac_address)
-        self._plug_port_for_dhcp_request(network, port)
-        self.assert_bad_allocation_for_port(network, port)
diff --git a/neutron/tests/functional/agent/test_firewall.py b/neutron/tests/functional/agent/test_firewall.py
deleted file mode 100644 (file)
index a376156..0000000
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2015 Intel Corporation.
-# Copyright 2015 Isaku Yamahata <isaku.yamahata at intel com>
-#                               <isaku.yamahata at gmail com>
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import copy
-import testscenarios
-
-import netaddr
-from oslo_config import cfg
-
-from neutron.agent import firewall
-from neutron.agent.linux import iptables_firewall
-from neutron.agent import securitygroups_rpc as sg_cfg
-from neutron.common import constants
-from neutron.tests.common import conn_testers
-from neutron.tests.functional import base
-
-
-load_tests = testscenarios.load_tests_apply_scenarios
-
-
-reverse_direction = {
-    conn_testers.ConnectionTester.INGRESS:
-        conn_testers.ConnectionTester.EGRESS,
-    conn_testers.ConnectionTester.EGRESS:
-        conn_testers.ConnectionTester.INGRESS}
-reverse_transport_protocol = {
-    conn_testers.ConnectionTester.TCP: conn_testers.ConnectionTester.UDP,
-    conn_testers.ConnectionTester.UDP: conn_testers.ConnectionTester.TCP}
-
-
-DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
-
-
-def _add_rule(sg_rules, base, port_range_min=None, port_range_max=None):
-    rule = copy.copy(base)
-    if port_range_min:
-        rule['port_range_min'] = port_range_min
-    if port_range_max:
-        rule['port_range_max'] = port_range_max
-    sg_rules.append(rule)
-
-
-class FirewallTestCase(base.BaseSudoTestCase):
-    FAKE_SECURITY_GROUP_ID = 'fake_sg_id'
-    MAC_SPOOFED = "fa:16:3e:9a:2f:48"
-    scenarios = [('IptablesFirewallDriver without ipset',
-                  {'enable_ipset': False}),
-                 ('IptablesFirewallDriver with ipset',
-                  {'enable_ipset': True})]
-
-    def create_iptables_firewall(self):
-        cfg.CONF.set_override('enable_ipset', self.enable_ipset,
-                              'SECURITYGROUP')
-        return iptables_firewall.IptablesFirewallDriver(
-            namespace=self.tester.bridge_namespace)
-
-    @staticmethod
-    def _create_port_description(port_id, ip_addresses, mac_address, sg_ids):
-        return {'admin_state_up': True,
-                'device': port_id,
-                'device_owner': DEVICE_OWNER_COMPUTE,
-                'fixed_ips': ip_addresses,
-                'mac_address': mac_address,
-                'port_security_enabled': True,
-                'security_groups': sg_ids,
-                'status': 'ACTIVE'}
-
-    def setUp(self):
-        cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP')
-        super(FirewallTestCase, self).setUp()
-        self.tester = self.useFixture(
-            conn_testers.LinuxBridgeConnectionTester())
-        self.firewall = self.create_iptables_firewall()
-        vm_mac = self.tester.vm_mac_address
-        vm_port_id = self.tester.vm_port_id
-        self.src_port_desc = self._create_port_description(
-            vm_port_id, [self.tester.vm_ip_address], vm_mac,
-            [self.FAKE_SECURITY_GROUP_ID])
-        self.firewall.prepare_port_filter(self.src_port_desc)
-
-    def _apply_security_group_rules(self, sg_id, sg_rules):
-        with self.firewall.defer_apply():
-            self.firewall.update_security_group_rules(sg_id, sg_rules)
-
-    def test_rule_application_converges(self):
-        sg_rules = [{'ethertype': 'IPv4', 'direction': 'egress'},
-                    {'ethertype': 'IPv6', 'direction': 'egress'},
-                    {'ethertype': 'IPv4', 'direction': 'ingress',
-                     'source_ip_prefix': '0.0.0.0/0', 'protocol': 'icmp'},
-                    {'ethertype': 'IPv6', 'direction': 'ingress',
-                     'source_ip_prefix': '0::0/0', 'protocol': 'ipv6-icmp'}]
-        # make sure port ranges converge on all protocols with and without
-        # port ranges (prevents regression of bug 1502924)
-        for proto in ('tcp', 'udp', 'icmp'):
-            for version in ('IPv4', 'IPv6'):
-                if proto == 'icmp' and version == 'IPv6':
-                    proto = 'ipv6-icmp'
-                base = {'ethertype': version, 'direction': 'ingress',
-                        'protocol': proto}
-                sg_rules.append(copy.copy(base))
-                _add_rule(sg_rules, base, port_range_min=50,
-                          port_range_max=50)
-                _add_rule(sg_rules, base, port_range_max=55)
-                _add_rule(sg_rules, base, port_range_min=60,
-                          port_range_max=60)
-                _add_rule(sg_rules, base, port_range_max=65)
-
-        # add some single-host rules to prevent regression of bug 1502917
-        sg_rules.append({'ethertype': 'IPv4', 'direction': 'ingress',
-                         'source_ip_prefix': '77.77.77.77/32'})
-        sg_rules.append({'ethertype': 'IPv6', 'direction': 'ingress',
-                         'source_ip_prefix': 'fe80::1/128'})
-        self.firewall.update_security_group_rules(
-            self.FAKE_SECURITY_GROUP_ID, sg_rules)
-        self.firewall.prepare_port_filter(self.src_port_desc)
-        # after one prepare call, another apply should be a NOOP
-        self.assertEqual([], self.firewall.iptables._apply())
-
-        orig_sg_rules = copy.copy(sg_rules)
-        for proto in ('tcp', 'udp', 'icmp'):
-            for version in ('IPv4', 'IPv6'):
-                if proto == 'icmp' and version == 'IPv6':
-                    proto = 'ipv6-icmp'
-                # make sure firewall is in converged state
-                self.firewall.update_security_group_rules(
-                    self.FAKE_SECURITY_GROUP_ID, orig_sg_rules)
-                self.firewall.update_port_filter(self.src_port_desc)
-                sg_rules = copy.copy(orig_sg_rules)
-
-                # remove one rule and add another to make sure it results in
-                # exactly one delete and insert
-                sg_rules.pop(0 if version == 'IPv4' else 1)
-                sg_rules.append({'ethertype': version, 'direction': 'egress',
-                                 'protocol': proto})
-                self.firewall.update_security_group_rules(
-                    self.FAKE_SECURITY_GROUP_ID, sg_rules)
-                result = self.firewall.update_port_filter(self.src_port_desc)
-                deletes = [r for r in result if r.startswith('-D ')]
-                creates = [r for r in result if r.startswith('-I ')]
-                self.assertEqual(1, len(deletes))
-                self.assertEqual(1, len(creates))
-                # quick sanity check to make sure the insert was for the
-                # correct proto
-                self.assertIn('-p %s' % proto, creates[0])
-                # another apply should be a NOOP if the right rule was removed
-                # and the new one was inserted in the correct position
-                self.assertEqual([], self.firewall.iptables._apply())
-
-    def test_rule_ordering_correct(self):
-        sg_rules = [
-            {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp',
-             'port_range_min': i, 'port_range_max': i}
-            for i in range(50, 61)
-        ]
-        self.firewall.update_security_group_rules(
-            self.FAKE_SECURITY_GROUP_ID, sg_rules)
-        self.firewall.prepare_port_filter(self.src_port_desc)
-        self._assert_sg_out_tcp_rules_appear_in_order(sg_rules)
-        # remove a rule and add a new one
-        sg_rules.pop(5)
-        sg_rules.insert(8, {'ethertype': 'IPv4', 'direction': 'egress',
-                            'protocol': 'tcp', 'port_range_min': 400,
-                            'port_range_max': 400})
-        self.firewall.update_security_group_rules(
-            self.FAKE_SECURITY_GROUP_ID, sg_rules)
-        self.firewall.prepare_port_filter(self.src_port_desc)
-        self._assert_sg_out_tcp_rules_appear_in_order(sg_rules)
-
-        # reverse all of the rules (requires lots of deletes and inserts)
-        sg_rules = list(reversed(sg_rules))
-        self.firewall.update_security_group_rules(
-            self.FAKE_SECURITY_GROUP_ID, sg_rules)
-        self.firewall.prepare_port_filter(self.src_port_desc)
-        self._assert_sg_out_tcp_rules_appear_in_order(sg_rules)
-
-    def _assert_sg_out_tcp_rules_appear_in_order(self, sg_rules):
-        outgoing_rule_pref = '-A %s-o%s' % (self.firewall.iptables.wrap_name,
-                                            self.src_port_desc['device'][3:13])
-        rules = [
-            r for r in self.firewall.iptables.get_rules_for_table('filter')
-            if r.startswith(outgoing_rule_pref)
-        ]
-        # we want to ensure the rules went in in the same order we sent
-        indexes = [rules.index('%s -p tcp -m tcp --dport %s -j RETURN' %
-                               (outgoing_rule_pref, i['port_range_min']))
-                   for i in sg_rules]
-        # all indexes should be in order with no unexpected rules in between
-        self.assertEqual(range(indexes[0], indexes[-1] + 1), indexes)
-
-    def test_ingress_icmp_secgroup(self):
-        # update the sg_group to make ping pass
-        sg_rules = [{'ethertype': constants.IPv4,
-                     'direction': firewall.INGRESS_DIRECTION,
-                     'protocol': constants.PROTO_NAME_ICMP},
-                    {'ethertype': constants.IPv4,
-                     'direction': firewall.EGRESS_DIRECTION}]
-
-        self.tester.assert_no_connection(protocol=self.tester.ICMP,
-                                         direction=self.tester.INGRESS)
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.INGRESS)
-
-    def test_mac_spoofing(self):
-        sg_rules = [{'ethertype': constants.IPv4,
-                     'direction': firewall.INGRESS_DIRECTION,
-                     'protocol': constants.PROTO_NAME_ICMP},
-                    {'ethertype': constants.IPv4,
-                     'direction': firewall.EGRESS_DIRECTION}]
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
-
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.INGRESS)
-        self.tester.vm_mac_address = self.MAC_SPOOFED
-        self.tester.flush_arp_tables()
-        self.tester.assert_no_connection(protocol=self.tester.ICMP,
-                                         direction=self.tester.INGRESS)
-        self.tester.assert_no_connection(protocol=self.tester.ICMP,
-                                         direction=self.tester.EGRESS)
-
-    def test_mac_spoofing_works_without_port_security_enabled(self):
-        self.src_port_desc['port_security_enabled'] = False
-        self.firewall.update_port_filter(self.src_port_desc)
-
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.INGRESS)
-        self.tester.vm_mac_address = self.MAC_SPOOFED
-        self.tester.flush_arp_tables()
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.INGRESS)
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.EGRESS)
-
-    def test_port_security_enabled_set_to_false(self):
-        self.tester.assert_no_connection(protocol=self.tester.ICMP,
-                                         direction=self.tester.INGRESS)
-        self.src_port_desc['port_security_enabled'] = False
-        self.firewall.update_port_filter(self.src_port_desc)
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.INGRESS)
-
-    def test_dhcp_requests_from_vm(self):
-        # DHCPv4 uses source port 67, destination port 68
-        self.tester.assert_connection(direction=self.tester.EGRESS,
-                                      protocol=self.tester.UDP,
-                                      src_port=68, dst_port=67)
-
-    def test_dhcp_server_forbidden_on_vm(self):
-        self.tester.assert_no_connection(direction=self.tester.EGRESS,
-                                         protocol=self.tester.UDP,
-                                         src_port=67, dst_port=68)
-        self.tester.assert_no_connection(direction=self.tester.INGRESS,
-                                         protocol=self.tester.UDP,
-                                         src_port=68, dst_port=67)
-
-    def test_ip_spoofing(self):
-        sg_rules = [{'ethertype': constants.IPv4,
-                     'direction': firewall.INGRESS_DIRECTION,
-                     'protocol': constants.PROTO_NAME_ICMP}]
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
-        not_allowed_ip = "%s/24" % (
-            netaddr.IPAddress(self.tester.vm_ip_address) + 1)
-
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.INGRESS)
-        self.tester.vm_ip_cidr = not_allowed_ip
-        self.tester.assert_no_connection(protocol=self.tester.ICMP,
-                                         direction=self.tester.INGRESS)
-        self.tester.assert_no_connection(protocol=self.tester.ICMP,
-                                         direction=self.tester.EGRESS)
-
-    def test_ip_spoofing_works_without_port_security_enabled(self):
-        self.src_port_desc['port_security_enabled'] = False
-        self.firewall.update_port_filter(self.src_port_desc)
-
-        sg_rules = [{'ethertype': constants.IPv4,
-                     'direction': firewall.INGRESS_DIRECTION,
-                     'protocol': constants.PROTO_NAME_ICMP}]
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
-        not_allowed_ip = "%s/24" % (
-            netaddr.IPAddress(self.tester.vm_ip_address) + 1)
-
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.INGRESS)
-        self.tester.vm_ip_cidr = not_allowed_ip
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.INGRESS)
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.EGRESS)
-
-    def test_allowed_address_pairs(self):
-        sg_rules = [{'ethertype': constants.IPv4,
-                     'direction': firewall.INGRESS_DIRECTION,
-                     'protocol': constants.PROTO_NAME_ICMP},
-                    {'ethertype': constants.IPv4,
-                     'direction': firewall.EGRESS_DIRECTION}]
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
-
-        port_mac = self.tester.vm_mac_address
-        allowed_ip = netaddr.IPAddress(self.tester.vm_ip_address) + 1
-        not_allowed_ip = "%s/24" % (allowed_ip + 1)
-        self.src_port_desc['allowed_address_pairs'] = [
-            {'mac_address': port_mac,
-             'ip_address': allowed_ip}]
-        allowed_ip = "%s/24" % allowed_ip
-
-        self.firewall.update_port_filter(self.src_port_desc)
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.INGRESS)
-        self.tester.vm_ip_cidr = allowed_ip
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.INGRESS)
-        self.tester.vm_ip_cidr = not_allowed_ip
-        self.tester.assert_no_connection(protocol=self.tester.ICMP,
-                                         direction=self.tester.INGRESS)
-
-    def test_arp_is_allowed(self):
-        self.tester.assert_connection(protocol=self.tester.ARP,
-                                      direction=self.tester.EGRESS)
-        self.tester.assert_connection(protocol=self.tester.ARP,
-                                      direction=self.tester.INGRESS)
-
-    def _test_rule(self, direction, protocol):
-        sg_rules = [{'ethertype': constants.IPv4, 'direction': direction,
-                     'protocol': protocol}]
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
-        not_allowed_direction = reverse_direction[direction]
-        not_allowed_protocol = reverse_transport_protocol[protocol]
-
-        self.tester.assert_connection(protocol=protocol,
-                                      direction=direction)
-        self.tester.assert_no_connection(protocol=not_allowed_protocol,
-                                         direction=direction)
-        self.tester.assert_no_connection(protocol=protocol,
-                                         direction=not_allowed_direction)
-
-    def test_ingress_tcp_rule(self):
-        self._test_rule(self.tester.INGRESS, self.tester.TCP)
-
-    def test_ingress_udp_rule(self):
-        self._test_rule(self.tester.INGRESS, self.tester.UDP)
-
-    def test_egress_tcp_rule(self):
-        self._test_rule(self.tester.EGRESS, self.tester.TCP)
-
-    def test_egress_udp_rule(self):
-        self._test_rule(self.tester.EGRESS, self.tester.UDP)
-
-    def test_connection_with_destination_port_range(self):
-        port_min = 12345
-        port_max = 12346
-        sg_rules = [{'ethertype': constants.IPv4,
-                     'direction': firewall.INGRESS_DIRECTION,
-                     'protocol': constants.PROTO_NAME_TCP,
-                     'port_range_min': port_min,
-                     'port_range_max': port_max}]
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
-
-        self.tester.assert_connection(protocol=self.tester.TCP,
-                                      direction=self.tester.INGRESS,
-                                      dst_port=port_min)
-        self.tester.assert_connection(protocol=self.tester.TCP,
-                                      direction=self.tester.INGRESS,
-                                      dst_port=port_max)
-        self.tester.assert_no_connection(protocol=self.tester.TCP,
-                                         direction=self.tester.INGRESS,
-                                         dst_port=port_min - 1)
-        self.tester.assert_no_connection(protocol=self.tester.TCP,
-                                         direction=self.tester.INGRESS,
-                                         dst_port=port_max + 1)
-
-    def test_connection_with_source_port_range(self):
-        source_port_min = 12345
-        source_port_max = 12346
-        sg_rules = [{'ethertype': constants.IPv4,
-                     'direction': firewall.EGRESS_DIRECTION,
-                     'protocol': constants.PROTO_NAME_TCP,
-                     'source_port_range_min': source_port_min,
-                     'source_port_range_max': source_port_max}]
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
-
-        self.tester.assert_connection(protocol=self.tester.TCP,
-                                      direction=self.tester.EGRESS,
-                                      src_port=source_port_min)
-        self.tester.assert_connection(protocol=self.tester.TCP,
-                                      direction=self.tester.EGRESS,
-                                      src_port=source_port_max)
-        self.tester.assert_no_connection(protocol=self.tester.TCP,
-                                         direction=self.tester.EGRESS,
-                                         src_port=source_port_min - 1)
-        self.tester.assert_no_connection(protocol=self.tester.TCP,
-                                         direction=self.tester.EGRESS,
-                                         src_port=source_port_max + 1)
-
-    def test_established_connection_is_not_cut(self):
-        port = 12345
-        sg_rules = [{'ethertype': constants.IPv4,
-                     'direction': firewall.INGRESS_DIRECTION,
-                     'protocol': constants.PROTO_NAME_TCP,
-                     'port_range_min': port,
-                     'port_range_max': port}]
-        connection = {'protocol': self.tester.TCP,
-                      'direction': self.tester.INGRESS,
-                      'dst_port': port}
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
-        self.tester.establish_connection(**connection)
-
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, list())
-        self.tester.assert_established_connection(**connection)
-
-    def test_preventing_firewall_blink(self):
-        direction = self.tester.INGRESS
-        sg_rules = [{'ethertype': 'IPv4', 'direction': 'ingress',
-                     'protocol': 'tcp'}]
-        self.tester.start_sending_icmp(direction)
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, {})
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
-        self.tester.stop_sending_icmp(direction)
-        packets_sent = self.tester.get_sent_icmp_packets(direction)
-        packets_received = self.tester.get_received_icmp_packets(direction)
-        self.assertGreater(packets_sent, 0)
-        self.assertEqual(0, packets_received)
-
-    def test_remote_security_groups(self):
-        remote_sg_id = 'remote_sg_id'
-        peer_port_desc = self._create_port_description(
-            self.tester.peer_port_id,
-            [self.tester.peer_ip_address],
-            self.tester.peer_mac_address,
-            [remote_sg_id])
-        self.firewall.prepare_port_filter(peer_port_desc)
-
-        peer_sg_rules = [{'ethertype': 'IPv4', 'direction': 'egress',
-                          'protocol': 'icmp'}]
-        self._apply_security_group_rules(remote_sg_id, peer_sg_rules)
-
-        vm_sg_rules = [{'ethertype': 'IPv4', 'direction': 'ingress',
-                        'protocol': 'icmp', 'remote_group_id': remote_sg_id}]
-        self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID,
-                                         vm_sg_rules)
-
-        vm_sg_members = {'IPv4': [self.tester.peer_ip_address]}
-        with self.firewall.defer_apply():
-            self.firewall.update_security_group_members(
-                remote_sg_id, vm_sg_members)
-
-        self.tester.assert_connection(protocol=self.tester.ICMP,
-                                      direction=self.tester.INGRESS)
-        self.tester.assert_no_connection(protocol=self.tester.TCP,
-                                         direction=self.tester.INGRESS)
-        self.tester.assert_no_connection(protocol=self.tester.ICMP,
-                                         direction=self.tester.EGRESS)
diff --git a/neutron/tests/functional/agent/test_l2_lb_agent.py b/neutron/tests/functional/agent/test_l2_lb_agent.py
deleted file mode 100644 (file)
index 74981ad..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_config import cfg
-import testtools
-
-from neutron.plugins.ml2.drivers.linuxbridge.agent import \
-    linuxbridge_neutron_agent
-from neutron.tests.functional.agent.linux import test_ip_lib
-
-lba = linuxbridge_neutron_agent
-
-
-class LinuxBridgeAgentTests(test_ip_lib.IpLibTestFramework):
-
-    def setUp(self):
-        super(LinuxBridgeAgentTests, self).setUp()
-        agent_rpc = ('neutron.agent.rpc.PluginApi')
-        mock.patch(agent_rpc).start()
-        mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
-        cfg.CONF.set_override('enable_vxlan', False, 'VXLAN')
-
-    def test_validate_interface_mappings(self):
-        mappings = {'physnet1': 'int1', 'physnet2': 'int2'}
-        with testtools.ExpectedException(SystemExit):
-            lba.LinuxBridgeManager({}, mappings)
-        self.manage_device(
-            self.generate_device_details()._replace(namespace=None,
-                                                    name='int1'))
-        with testtools.ExpectedException(SystemExit):
-            lba.LinuxBridgeManager({}, mappings)
-        self.manage_device(
-            self.generate_device_details()._replace(namespace=None,
-                                                    name='int2'))
-        lba.LinuxBridgeManager({}, mappings)
-
-    def test_validate_bridge_mappings(self):
-        mappings = {'physnet1': 'br-eth1'}
-        with testtools.ExpectedException(SystemExit):
-            lba.LinuxBridgeManager(mappings, {})
-        self.manage_device(
-            self.generate_device_details()._replace(namespace=None,
-                                                    name='br-eth1'))
-        lba.LinuxBridgeManager(mappings, {})
diff --git a/neutron/tests/functional/agent/test_l2_ovs_agent.py b/neutron/tests/functional/agent/test_l2_ovs_agent.py
deleted file mode 100644 (file)
index 763b26a..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-# Copyright (c) 2015 SUSE Linux Products GmbH
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import time
-
-from eventlet.timeout import Timeout
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.l2 import base
-
-
-class TestOVSAgent(base.OVSAgentTestFramework):
-
-    def test_port_creation_and_deletion(self):
-        self.setup_agent_and_ports(
-            port_dicts=self.create_test_ports())
-        self.wait_until_ports_state(self.ports, up=True)
-
-        for port in self.ports:
-            self.agent.int_br.delete_port(port['vif_name'])
-
-        self.wait_until_ports_state(self.ports, up=False)
-
-    def _check_datapath_type_netdev(self, expected, default=False):
-        if not default:
-            self.config.set_override('datapath_type',
-                                     expected,
-                                     "OVS")
-        agent = self.create_agent()
-        self.start_agent(agent)
-        for br_name in (getattr(self, br) for br in
-                        ('br_int', 'br_tun', 'br_phys')):
-            actual = self.ovs.db_get_val('Bridge', br_name, 'datapath_type')
-            self.assertEqual(expected, actual)
-
-    def test_datapath_type_change(self):
-        self._check_datapath_type_netdev('system')
-        self._check_datapath_type_netdev('netdev')
-
-    def test_datapath_type_netdev(self):
-        self._check_datapath_type_netdev(
-            constants.OVS_DATAPATH_NETDEV)
-
-    def test_datapath_type_system(self):
-        self._check_datapath_type_netdev(
-            constants.OVS_DATAPATH_SYSTEM)
-
-    def test_datapath_type_default(self):
-        self._check_datapath_type_netdev(
-            constants.OVS_DATAPATH_SYSTEM, default=True)
-
-    def test_resync_devices_set_up_after_exception(self):
-        self.setup_agent_and_ports(
-            port_dicts=self.create_test_ports(),
-            trigger_resync=True)
-        self.wait_until_ports_state(self.ports, up=True)
-
-    def test_reprocess_port_when_ovs_restarts(self):
-        self.setup_agent_and_ports(
-            port_dicts=self.create_test_ports())
-        self.wait_until_ports_state(self.ports, up=True)
-        self.agent.check_ovs_status.return_value = constants.OVS_RESTARTED
-        # OVS restarted, the agent should reprocess all the ports
-        self.agent.plugin_rpc.update_device_list.reset_mock()
-        self.wait_until_ports_state(self.ports, up=True)
-
-    def test_port_vlan_tags(self):
-        self.setup_agent_and_ports(
-            port_dicts=self.create_test_ports(),
-            trigger_resync=True)
-        self.wait_until_ports_state(self.ports, up=True)
-        self.assert_vlan_tags(self.ports, self.agent)
-
-    def test_assert_bridges_ports_vxlan(self):
-        agent = self.create_agent()
-        self.assertTrue(self.ovs.bridge_exists(self.br_int))
-        self.assertTrue(self.ovs.bridge_exists(self.br_tun))
-        self.assert_bridge_ports()
-        self.assert_patch_ports(agent)
-
-    def test_assert_bridges_ports_no_tunnel(self):
-        self.create_agent(create_tunnels=False)
-        self.assertTrue(self.ovs.bridge_exists(self.br_int))
-        self.assertFalse(self.ovs.bridge_exists(self.br_tun))
-
-    def test_assert_pings_during_br_int_setup_not_lost(self):
-        self.setup_agent_and_ports(port_dicts=self.create_test_ports(),
-                                   create_tunnels=False)
-        self.wait_until_ports_state(self.ports, up=True)
-        ips = [port['fixed_ips'][0]['ip_address'] for port in self.ports]
-        with net_helpers.async_ping(self.namespace, ips) as done:
-            while not done():
-                self.agent.setup_integration_br()
-                time.sleep(0.25)
-
-    def test_noresync_after_port_gone(self):
-        '''This will test the scenario where a port is removed after listing
-        it but before getting vif info about it.
-        '''
-        self.ports = self.create_test_ports(amount=2)
-        self.agent = self.create_agent(create_tunnels=False)
-        self.network = self._create_test_network_dict()
-        self._plug_ports(self.network, self.ports, self.agent)
-        self.start_agent(self.agent, ports=self.ports,
-                         unplug_ports=[self.ports[1]])
-        self.wait_until_ports_state([self.ports[0]], up=True)
-        self.assertRaises(
-            Timeout, self.wait_until_ports_state, [self.ports[1]], up=True,
-            timeout=10)
-
-
-class TestOVSAgentExtensionConfig(base.OVSAgentTestFramework):
-    def setUp(self):
-        super(TestOVSAgentExtensionConfig, self).setUp()
-        self.config.set_override('extensions', ['qos'], 'agent')
-        self.agent = self.create_agent(create_tunnels=False)
-
-    def test_report_loaded_extension(self):
-        self.agent._report_state()
-        agent_state = self.agent.state_rpc.report_state.call_args[0][1]
-        self.assertEqual(['qos'], agent_state['configurations']['extensions'])
diff --git a/neutron/tests/functional/agent/test_ovs_flows.py b/neutron/tests/functional/agent/test_ovs_flows.py
deleted file mode 100644 (file)
index e36addf..0000000
+++ /dev/null
@@ -1,279 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import eventlet
-import fixtures
-import mock
-
-from oslo_config import cfg
-from oslo_utils import importutils
-
-from neutron.agent.linux import ip_lib
-from neutron.cmd.sanity import checks
-from neutron.common import constants as n_const
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent \
-    import ovs_neutron_agent as ovsagt
-from neutron.tests.common import base as common_base
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent import test_ovs_lib
-from neutron.tests.functional import base
-from neutron.tests import tools
-
-
-cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
-                      'common.config')
-
-
-class _OVSAgentTestBase(test_ovs_lib.OVSBridgeTestBase,
-                        base.BaseSudoTestCase):
-    def setUp(self):
-        super(_OVSAgentTestBase, self).setUp()
-        self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
-        self.of_interface_mod = importutils.import_module(self._MAIN_MODULE)
-        self.br_int_cls = None
-        self.br_tun_cls = None
-        self.br_phys_cls = None
-        self.br_int = None
-        self.init_done = False
-        self.init_done_ev = eventlet.event.Event()
-        self.addCleanup(self._kill_main)
-        retry_count = 3
-        while True:
-            cfg.CONF.set_override('of_listen_port',
-                                  net_helpers.get_free_namespace_port(
-                                      n_const.PROTO_NAME_TCP),
-                                  group='OVS')
-            self.of_interface_mod.init_config()
-            self._main_thread = eventlet.spawn(self._kick_main)
-
-            # Wait for _kick_main -> of_interface main -> _agent_main
-            # NOTE(yamamoto): This complexity came from how "native"
-            # of_interface runs its openflow controller.  "native"
-            # of_interface's main routine blocks while running the
-            # embedded openflow controller.  In that case, the agent
-            # rpc_loop runs in another thread.  However, for FT we
-            # need to run setUp() and test_xxx() in the same thread.
-            # So I made this run of_interface's main in a separate
-            # thread instead.
-            try:
-                while not self.init_done:
-                    self.init_done_ev.wait()
-                break
-            except fixtures.TimeoutException:
-                self._kill_main()
-            retry_count -= 1
-            if retry_count < 0:
-                raise Exception('port allocation failed')
-
-    def _kick_main(self):
-        with mock.patch.object(ovsagt, 'main', self._agent_main):
-            self.of_interface_mod.main()
-
-    def _kill_main(self):
-        self._main_thread.kill()
-        self._main_thread.wait()
-
-    def _agent_main(self, bridge_classes):
-        self.br_int_cls = bridge_classes['br_int']
-        self.br_phys_cls = bridge_classes['br_phys']
-        self.br_tun_cls = bridge_classes['br_tun']
-        self.br_int = self.br_int_cls(self.br.br_name)
-        self.br_int.set_secure_mode()
-        self.br_int.setup_controllers(cfg.CONF)
-        self.br_int.setup_default_table()
-
-        # signal to setUp()
-        self.init_done = True
-        self.init_done_ev.send()
-
-
-class _OVSAgentOFCtlTestBase(_OVSAgentTestBase):
-    _MAIN_MODULE = ('neutron.plugins.ml2.drivers.openvswitch.agent.'
-                    'openflow.ovs_ofctl.main')
-
-
-class _OVSAgentNativeTestBase(_OVSAgentTestBase):
-    _MAIN_MODULE = ('neutron.plugins.ml2.drivers.openvswitch.agent.'
-                    'openflow.native.main')
-
-
-class _ARPSpoofTestCase(object):
-    def setUp(self):
-        # NOTE(kevinbenton): it would be way cooler to use scapy for
-        # these but scapy requires the python process to be running as
-        # root to bind to the ports.
-        super(_ARPSpoofTestCase, self).setUp()
-        self.skip_without_arp_support()
-        self.src_addr = '192.168.0.1'
-        self.dst_addr = '192.168.0.2'
-        self.src_namespace = self.useFixture(
-            net_helpers.NamespaceFixture()).name
-        self.dst_namespace = self.useFixture(
-            net_helpers.NamespaceFixture()).name
-        self.src_p = self.useFixture(
-            net_helpers.OVSPortFixture(self.br, self.src_namespace)).port
-        self.dst_p = self.useFixture(
-            net_helpers.OVSPortFixture(self.br, self.dst_namespace)).port
-        # wait to add IPs until after anti-spoof rules to ensure ARP doesn't
-        # happen before
-
-    @common_base.no_skip_on_missing_deps
-    def skip_without_arp_support(self):
-        if not checks.arp_header_match_supported():
-            self.skipTest("ARP header matching not supported")
-
-    def test_arp_spoof_doesnt_block_normal_traffic(self):
-        self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr])
-        self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr])
-        self.src_p.addr.add('%s/24' % self.src_addr)
-        self.dst_p.addr.add('%s/24' % self.dst_addr)
-        net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2)
-
-    def test_arp_spoof_doesnt_block_ipv6(self):
-        self.src_addr = '2000::1'
-        self.dst_addr = '2000::2'
-        self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr])
-        self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr])
-        self.src_p.addr.add('%s/64' % self.src_addr)
-        self.dst_p.addr.add('%s/64' % self.dst_addr)
-        # make sure the IPv6 addresses are ready before pinging
-        self.src_p.addr.wait_until_address_ready(self.src_addr)
-        self.dst_p.addr.wait_until_address_ready(self.dst_addr)
-        net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2)
-
-    def test_arp_spoof_blocks_response(self):
-        # this will prevent the destination from responding to the ARP
-        # request for it's own address
-        self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3'])
-        self.src_p.addr.add('%s/24' % self.src_addr)
-        self.dst_p.addr.add('%s/24' % self.dst_addr)
-        net_helpers.assert_no_ping(self.src_namespace, self.dst_addr, count=2)
-
-    def test_arp_spoof_blocks_icmpv6_neigh_advt(self):
-        self.src_addr = '2000::1'
-        self.dst_addr = '2000::2'
-        # this will prevent the destination from responding (i.e., icmpv6
-        # neighbour advertisement) to the icmpv6 neighbour solicitation
-        # request for it's own address (2000::2) as spoofing rules added
-        # below only allow '2000::3'.
-        self._setup_arp_spoof_for_port(self.dst_p.name, ['2000::3'])
-        self.src_p.addr.add('%s/64' % self.src_addr)
-        self.dst_p.addr.add('%s/64' % self.dst_addr)
-        # make sure the IPv6 addresses are ready before pinging
-        self.src_p.addr.wait_until_address_ready(self.src_addr)
-        self.dst_p.addr.wait_until_address_ready(self.dst_addr)
-        net_helpers.assert_no_ping(self.src_namespace, self.dst_addr, count=2)
-
-    def test_arp_spoof_blocks_request(self):
-        # this will prevent the source from sending an ARP
-        # request with its own address
-        self._setup_arp_spoof_for_port(self.src_p.name, ['192.168.0.3'])
-        self.src_p.addr.add('%s/24' % self.src_addr)
-        self.dst_p.addr.add('%s/24' % self.dst_addr)
-        ns_ip_wrapper = ip_lib.IPWrapper(self.src_namespace)
-        try:
-            ns_ip_wrapper.netns.execute(['arping', '-I', self.src_p.name,
-                                         '-c1', self.dst_addr])
-            tools.fail("arping should have failed. The arp request should "
-                       "have been blocked.")
-        except RuntimeError:
-            pass
-
-    def test_arp_spoof_allowed_address_pairs(self):
-        self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3',
-                                                         self.dst_addr])
-        self.src_p.addr.add('%s/24' % self.src_addr)
-        self.dst_p.addr.add('%s/24' % self.dst_addr)
-        net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2)
-
-    def test_arp_spoof_icmpv6_neigh_advt_allowed_address_pairs(self):
-        self.src_addr = '2000::1'
-        self.dst_addr = '2000::2'
-        self._setup_arp_spoof_for_port(self.dst_p.name, ['2000::3',
-                                                         self.dst_addr])
-        self.src_p.addr.add('%s/64' % self.src_addr)
-        self.dst_p.addr.add('%s/64' % self.dst_addr)
-        # make sure the IPv6 addresses are ready before pinging
-        self.src_p.addr.wait_until_address_ready(self.src_addr)
-        self.dst_p.addr.wait_until_address_ready(self.dst_addr)
-        net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2)
-
-    def test_arp_spoof_allowed_address_pairs_0cidr(self):
-        self._setup_arp_spoof_for_port(self.dst_p.name, ['9.9.9.9/0',
-                                                         '1.2.3.4'])
-        self.src_p.addr.add('%s/24' % self.src_addr)
-        self.dst_p.addr.add('%s/24' % self.dst_addr)
-        net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2)
-
-    def test_arp_spoof_disable_port_security(self):
-        # block first and then disable port security to make sure old rules
-        # are cleared
-        self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3'])
-        self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3'],
-                                       psec=False)
-        self.src_p.addr.add('%s/24' % self.src_addr)
-        self.dst_p.addr.add('%s/24' % self.dst_addr)
-        net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2)
-
-    def test_arp_spoof_disable_network_port(self):
-        # block first and then disable port security to make sure old rules
-        # are cleared
-        self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3'])
-        self._setup_arp_spoof_for_port(
-            self.dst_p.name, ['192.168.0.3'],
-            device_owner=n_const.DEVICE_OWNER_ROUTER_GW)
-        self.src_p.addr.add('%s/24' % self.src_addr)
-        self.dst_p.addr.add('%s/24' % self.dst_addr)
-        net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2)
-
-    def _setup_arp_spoof_for_port(self, port, addrs, psec=True,
-                                  device_owner='nobody'):
-        vif = next(
-            vif for vif in self.br.get_vif_ports() if vif.port_name == port)
-        ip_addr = addrs.pop()
-        details = {'port_security_enabled': psec,
-                   'fixed_ips': [{'ip_address': ip_addr}],
-                   'device_owner': device_owner,
-                   'allowed_address_pairs': [
-                        dict(ip_address=ip) for ip in addrs]}
-        ovsagt.OVSNeutronAgent.setup_arp_spoofing_protection(
-            self.br_int, vif, details)
-
-
-class ARPSpoofOFCtlTestCase(_ARPSpoofTestCase, _OVSAgentOFCtlTestBase):
-    pass
-
-
-class ARPSpoofNativeTestCase(_ARPSpoofTestCase, _OVSAgentNativeTestBase):
-    pass
-
-
-class _CanaryTableTestCase(object):
-    def test_canary_table(self):
-        self.br_int.delete_flows()
-        self.assertEqual(constants.OVS_RESTARTED,
-                         self.br_int.check_canary_table())
-        self.br_int.setup_canary_table()
-        self.assertEqual(constants.OVS_NORMAL,
-                         self.br_int.check_canary_table())
-
-
-class CanaryTableOFCtlTestCase(_CanaryTableTestCase, _OVSAgentOFCtlTestBase):
-    pass
-
-
-class CanaryTableNativeTestCase(_CanaryTableTestCase, _OVSAgentNativeTestBase):
-    pass
diff --git a/neutron/tests/functional/agent/test_ovs_lib.py b/neutron/tests/functional/agent/test_ovs_lib.py
deleted file mode 100644 (file)
index 7229367..0000000
+++ /dev/null
@@ -1,358 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import mock
-import uuid
-
-from neutron.agent.common import ovs_lib
-from neutron.agent.linux import ip_lib
-from neutron.tests import base as tests_base
-from neutron.tests.common import net_helpers
-from neutron.tests.functional.agent.linux import base
-
-
-class OVSBridgeTestBase(base.BaseOVSLinuxTestCase):
-    # TODO(twilson) So far, only ovsdb-related tests are written. It would be
-    # good to also add the openflow-related functions
-    def setUp(self):
-        super(OVSBridgeTestBase, self).setUp()
-        self.ovs = ovs_lib.BaseOVS()
-        self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
-
-    def create_ovs_port(self, *interface_attrs):
-        # Convert ((a, b), (c, d)) to {a: b, c: d} and add 'type' by default
-        attrs = collections.OrderedDict(interface_attrs)
-        attrs.setdefault('type', 'internal')
-        port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
-        return (port_name, self.br.add_port(port_name, *attrs.items()))
-
-    def create_ovs_vif_port(self, iface_id=None, mac=None,
-                            iface_field='iface-id'):
-        if iface_id is None:
-            iface_id = base.get_rand_name()
-        if mac is None:
-            mac = base.get_rand_name()
-        attrs = ('external_ids', {iface_field: iface_id, 'attached-mac': mac})
-        port_name, ofport = self.create_ovs_port(attrs)
-        return ovs_lib.VifPort(port_name, ofport, iface_id, mac, self.br)
-
-
-class OVSBridgeTestCase(OVSBridgeTestBase):
-
-    def test_port_lifecycle(self):
-        (port_name, ofport) = self.create_ovs_port(('type', 'internal'))
-        # ofport should always be an integer string with value -1 or > 0.
-        self.assertTrue(int(ofport))
-        self.assertTrue(int(self.br.get_port_ofport(port_name)))
-        self.assertTrue(self.br.port_exists(port_name))
-        self.assertEqual(self.br.br_name,
-                         self.br.get_bridge_for_iface(port_name))
-        self.br.delete_port(port_name)
-        self.assertFalse(self.br.port_exists(port_name))
-
-    def test_duplicate_port_may_exist_false(self):
-        port_name, ofport = self.create_ovs_port(('type', 'internal'))
-        cmd = self.br.ovsdb.add_port(self.br.br_name,
-                                     port_name, may_exist=False)
-        self.assertRaises(RuntimeError, cmd.execute, check_error=True)
-
-    def test_delete_port_if_exists_false(self):
-        cmd = self.br.ovsdb.del_port('nonexistantport', if_exists=False)
-        self.assertRaises(RuntimeError, cmd.execute, check_error=True)
-
-    def test_replace_port(self):
-        port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
-        self.br.replace_port(port_name, ('type', 'internal'))
-        self.assertTrue(self.br.port_exists(port_name))
-        self.assertEqual('internal',
-                         self.br.db_get_val('Interface', port_name, 'type'))
-        self.br.replace_port(port_name, ('type', 'internal'),
-                             ('external_ids', {'test': 'test'}))
-        self.assertTrue(self.br.port_exists(port_name))
-        self.assertEqual('test', self.br.db_get_val('Interface', port_name,
-                                                    'external_ids')['test'])
-
-    def test_attribute_lifecycle(self):
-        (port_name, ofport) = self.create_ovs_port()
-        tag = 42
-        self.ovs.set_db_attribute('Port', port_name, 'tag', tag)
-        self.assertEqual(tag, self.ovs.db_get_val('Port', port_name, 'tag'))
-        self.assertEqual(tag, self.br.get_port_tag_dict()[port_name])
-        self.ovs.clear_db_attribute('Port', port_name, 'tag')
-        self.assertEqual([], self.ovs.db_get_val('Port', port_name, 'tag'))
-        self.assertEqual([], self.br.get_port_tag_dict()[port_name])
-
-    def test_get_bridge_external_bridge_id(self):
-        self.ovs.set_db_attribute('Bridge', self.br.br_name,
-                                  'external_ids',
-                                  {'bridge-id': self.br.br_name})
-        self.assertEqual(
-            self.br.br_name,
-            self.ovs.get_bridge_external_bridge_id(self.br.br_name))
-
-    def test_controller_lifecycle(self):
-        controllers = {'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:55'}
-        self.br.set_controller(controllers)
-        self.assertSetEqual(controllers, set(self.br.get_controller()))
-        self.br.del_controller()
-        self.assertEqual([], self.br.get_controller())
-
-    def test_non_index_queries(self):
-        controllers = ['tcp:127.0.0.1:6633']
-        self.br.set_controller(controllers)
-        cmd = self.br.ovsdb.db_set('Controller', self.br.br_name,
-                                   ('connection_mode', 'out-of-band'))
-        cmd.execute(check_error=True)
-        self.assertEqual('out-of-band',
-                         self.br.db_get_val('Controller', self.br.br_name,
-                                            'connection_mode'))
-
-    def test_set_fail_mode_secure(self):
-        self.br.set_secure_mode()
-        self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE)
-
-    def test_set_fail_mode_standalone(self):
-        self.br.set_standalone_mode()
-        self._assert_br_fail_mode(ovs_lib.FAILMODE_STANDALONE)
-
-    def _assert_br_fail_mode(self, fail_mode):
-        self.assertEqual(
-            self.br.db_get_val('Bridge', self.br.br_name, 'fail_mode'),
-            fail_mode)
-
-    def test_set_protocols(self):
-        self.br.set_protocols('OpenFlow10')
-        self.assertEqual(
-            self.br.db_get_val('Bridge', self.br.br_name, 'protocols'),
-            "OpenFlow10")
-
-    def test_get_datapath_id(self):
-        brdev = ip_lib.IPDevice(self.br.br_name)
-        dpid = brdev.link.attributes['link/ether'].replace(':', '')
-        self.br.set_db_attribute('Bridge',
-                                 self.br.br_name, 'datapath_id', dpid)
-        self.assertIn(dpid, self.br.get_datapath_id())
-
-    def test_add_tunnel_port(self):
-        attrs = {
-            'remote_ip': '192.0.2.1',  # RFC 5737 TEST-NET-1
-            'local_ip': '198.51.100.1',  # RFC 5737 TEST-NET-2
-        }
-        port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
-        self.br.add_tunnel_port(port_name, attrs['remote_ip'],
-                                attrs['local_ip'])
-        self.assertEqual(self.ovs.db_get_val('Interface', port_name, 'type'),
-                         'gre')
-        options = self.ovs.db_get_val('Interface', port_name, 'options')
-        for attr, val in attrs.items():
-            self.assertEqual(val, options[attr])
-
-    def test_add_patch_port(self):
-        local = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
-        peer = 'remotepeer'
-        self.br.add_patch_port(local, peer)
-        self.assertEqual(self.ovs.db_get_val('Interface', local, 'type'),
-                         'patch')
-        options = self.ovs.db_get_val('Interface', local, 'options')
-        self.assertEqual(peer, options['peer'])
-
-    def test_get_port_name_list(self):
-        # Note that ovs-vsctl's list-ports does not include the port created
-        # with the same name as the bridge
-        ports = {self.create_ovs_port()[0] for i in range(5)}
-        self.assertSetEqual(ports, set(self.br.get_port_name_list()))
-
-    def test_get_iface_name_list(self):
-        ifaces = {self.create_ovs_port()[0] for i in range(5)}
-        self.assertSetEqual(ifaces, set(self.br.get_iface_name_list()))
-
-    def test_get_port_stats(self):
-        # Nothing seems to use this function?
-        (port_name, ofport) = self.create_ovs_port()
-        stats = set(self.br.get_port_stats(port_name).keys())
-        self.assertTrue(set(['rx_packets', 'tx_packets']).issubset(stats))
-
-    def test_get_vif_ports(self):
-        for i in range(2):
-            self.create_ovs_port()
-        vif_ports = [self.create_ovs_vif_port() for i in range(3)]
-        ports = self.br.get_vif_ports()
-        self.assertEqual(3, len(ports))
-        self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports]))
-        self.assertEqual(sorted([x.port_name for x in vif_ports]),
-                         sorted([x.port_name for x in ports]))
-
-    def test_get_vif_ports_with_bond(self):
-        for i in range(2):
-            self.create_ovs_port()
-        vif_ports = [self.create_ovs_vif_port() for i in range(3)]
-        # bond ports don't have records in the Interface table but they do in
-        # the Port table
-        orig = self.br.get_port_name_list
-        new_port_name_list = lambda: orig() + ['bondport']
-        mock.patch.object(self.br, 'get_port_name_list',
-                          new=new_port_name_list).start()
-        ports = self.br.get_vif_ports()
-        self.assertEqual(3, len(ports))
-        self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports]))
-        self.assertEqual(sorted([x.port_name for x in vif_ports]),
-                         sorted([x.port_name for x in ports]))
-
-    def test_get_vif_port_set(self):
-        for i in range(2):
-            self.create_ovs_port()
-        vif_ports = [self.create_ovs_vif_port() for i in range(2)]
-        ports = self.br.get_vif_port_set()
-        expected = set([x.vif_id for x in vif_ports])
-        self.assertEqual(expected, ports)
-
-    def test_get_vif_port_set_with_missing_port(self):
-        self.create_ovs_port()
-        vif_ports = [self.create_ovs_vif_port()]
-
-        # return an extra port to make sure the db list ignores it
-        orig = self.br.get_port_name_list
-        new_port_name_list = lambda: orig() + ['anotherport']
-        mock.patch.object(self.br, 'get_port_name_list',
-                          new=new_port_name_list).start()
-        ports = self.br.get_vif_port_set()
-        expected = set([vif_ports[0].vif_id])
-        self.assertEqual(expected, ports)
-
-    def test_get_vif_port_set_on_empty_bridge_returns_empty_set(self):
-        # Create a port on self.br
-        self.create_ovs_vif_port()
-
-        # Create another, empty bridge
-        br_2 = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
-
-        # Assert that get_vif_port_set on an empty bridge returns an empty set,
-        # and does not return the other bridge's ports.
-        self.assertEqual(set(), br_2.get_vif_port_set())
-
-    def test_get_ports_attributes(self):
-        port_names = [self.create_ovs_port()[0], self.create_ovs_port()[0]]
-        db_ports = self.br.get_ports_attributes('Interface', columns=['name'])
-        db_ports_names = [p['name'] for p in db_ports]
-        self.assertEqual(sorted(port_names), sorted(db_ports_names))
-
-    def test_get_port_tag_dict(self):
-        # Simple case tested in port test_set_get_clear_db_val
-        pass
-
-    def test_get_vif_port_by_id(self):
-        for i in range(2):
-            self.create_ovs_port()
-        vif_ports = [self.create_ovs_vif_port() for i in range(3)]
-        for vif in vif_ports:
-            self.assertEqual(self.br.get_vif_port_by_id(vif.vif_id).vif_id,
-                             vif.vif_id)
-
-    def test_get_vifs_by_ids(self):
-        for i in range(2):
-            self.create_ovs_port()
-        vif_ports = [self.create_ovs_vif_port() for i in range(3)]
-        by_id = self.br.get_vifs_by_ids([v.vif_id for v in vif_ports])
-        # convert to str for comparison of VifPorts
-        by_id = {vid: str(vport) for vid, vport in by_id.items()}
-        self.assertEqual({v.vif_id: str(v) for v in vif_ports}, by_id)
-
-    def test_delete_ports(self):
-        # TODO(twilson) I intensely dislike the current delete_ports function
-        # as the default behavior is really delete_vif_ports(), then it acts
-        # more like a delete_ports() seems like it should if all_ports=True is
-        # passed
-        # Create 2 non-vif ports and 2 vif ports
-        nonvifs = {self.create_ovs_port()[0] for i in range(2)}
-        vifs = {self.create_ovs_vif_port().port_name for i in range(2)}
-        self.assertSetEqual(nonvifs.union(vifs),
-                            set(self.br.get_port_name_list()))
-        self.br.delete_ports()
-        self.assertSetEqual(nonvifs, set(self.br.get_port_name_list()))
-        self.br.delete_ports(all_ports=True)
-        self.assertEqual(len(self.br.get_port_name_list()), 0)
-
-    def test_set_controller_connection_mode(self):
-        controllers = ['tcp:192.0.2.0:6633']
-        self._set_controllers_connection_mode(controllers)
-
-    def test_set_multi_controllers_connection_mode(self):
-        controllers = ['tcp:192.0.2.0:6633', 'tcp:192.0.2.1:55']
-        self._set_controllers_connection_mode(controllers)
-
-    def _set_controllers_connection_mode(self, controllers):
-        self.br.set_controller(controllers)
-        self.assertEqual(sorted(controllers), sorted(self.br.get_controller()))
-        self.br.set_controllers_connection_mode('out-of-band')
-        self._assert_controllers_connection_mode('out-of-band')
-        self.br.del_controller()
-        self.assertEqual([], self.br.get_controller())
-
-    def _assert_controllers_connection_mode(self, connection_mode):
-        controllers = self.br.db_get_val('Bridge', self.br.br_name,
-                                         'controller')
-        controllers = [controllers] if isinstance(
-            controllers, uuid.UUID) else controllers
-        for controller in controllers:
-            self.assertEqual(connection_mode,
-                             self.br.db_get_val('Controller',
-                                                controller,
-                                                'connection_mode'))
-
-    def test_egress_bw_limit(self):
-        port_name, _ = self.create_ovs_port()
-        self.br.create_egress_bw_limit_for_port(port_name, 700, 70)
-        max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name)
-        self.assertEqual(700, max_rate)
-        self.assertEqual(70, burst)
-        self.br.delete_egress_bw_limit_for_port(port_name)
-        max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name)
-        self.assertIsNone(max_rate)
-        self.assertIsNone(burst)
-
-
-class OVSLibTestCase(base.BaseOVSLinuxTestCase):
-
-    def setUp(self):
-        super(OVSLibTestCase, self).setUp()
-        self.ovs = ovs_lib.BaseOVS()
-
-    def test_bridge_lifecycle_baseovs(self):
-        name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
-        self.addCleanup(self.ovs.delete_bridge, name)
-        br = self.ovs.add_bridge(name)
-        self.assertEqual(br.br_name, name)
-        self.assertTrue(self.ovs.bridge_exists(name))
-        self.ovs.delete_bridge(name)
-        self.assertFalse(self.ovs.bridge_exists(name))
-
-    def test_get_bridges(self):
-        bridges = {
-            self.useFixture(net_helpers.OVSBridgeFixture()).bridge.br_name
-            for i in range(5)}
-        self.assertTrue(set(self.ovs.get_bridges()).issuperset(bridges))
-
-    def test_bridge_lifecycle_ovsbridge(self):
-        name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
-        br = ovs_lib.OVSBridge(name)
-        self.assertEqual(br.br_name, name)
-        # Make sure that instantiating an OVSBridge does not actually create
-        self.assertFalse(self.ovs.bridge_exists(name))
-        self.addCleanup(self.ovs.delete_bridge, name)
-        br.create()
-        self.assertTrue(self.ovs.bridge_exists(name))
-        br.destroy()
-        self.assertFalse(self.ovs.bridge_exists(name))
diff --git a/neutron/tests/functional/api/__init__.py b/neutron/tests/functional/api/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/api/test_policies.py b/neutron/tests/functional/api/test_policies.py
deleted file mode 100644 (file)
index 1912f1d..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os.path
-
-from neutron import context
-from neutron import policy
-
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-
-from neutron.tests import base
-from neutron.tests import tools
-
-TEST_PATH = os.path.dirname(os.path.abspath(__file__))
-
-
-class APIPolicyTestCase(base.BaseTestCase):
-    """
-    Tests for REST API policy checks. Ideally this would be done against an
-    environment with an instantiated plugin, but there appears to be problems
-    with instantiating a plugin against an sqlite environment and as yet, there
-    is no precedent for running a functional test against an actual database
-    backend.
-    """
-
-    api_version = "2.0"
-
-    def setUp(self):
-        super(APIPolicyTestCase, self).setUp()
-        self.useFixture(tools.AttributeMapMemento())
-        self.extension_path = os.path.abspath(os.path.join(
-            TEST_PATH, "../../../extensions"))
-        policy.reset()
-
-    def _network_definition(self):
-        return {'name': 'test_network',
-                'ports': [],
-                'subnets': [],
-                'status': 'up',
-                'admin_state_up': True,
-                'shared': False,
-                'tenant_id': 'admin',
-                'id': 'test_network',
-                'router:external': True}
-
-    def _check_external_router_policy(self, context):
-        return policy.check(context, 'get_network', self._network_definition())
-
-    def test_premature_loading(self):
-        """
-        Verifies that loading policies by way of admin context before
-        populating extensions and extending the resource map results in
-        networks with router:external is true being invisible to regular
-        tenants.
-        """
-        extension_manager = extensions.ExtensionManager(self.extension_path)
-        admin_context = context.get_admin_context()
-        tenant_context = context.Context('test_user', 'test_tenant_id', False)
-        extension_manager.extend_resources(self.api_version,
-                                           attributes.RESOURCE_ATTRIBUTE_MAP)
-        self.assertTrue(self._check_external_router_policy(admin_context))
-        self.assertFalse(self._check_external_router_policy(tenant_context))
-
-    def test_proper_load_order(self):
-        """
-        Verifies that loading policies by way of admin context after
-        populating extensions and extending the resource map results in
-        networks with router:external are visible to regular tenants.
-        """
-        extension_manager = extensions.ExtensionManager(self.extension_path)
-        extension_manager.extend_resources(self.api_version,
-                                           attributes.RESOURCE_ATTRIBUTE_MAP)
-        admin_context = context.get_admin_context()
-        tenant_context = context.Context('test_user', 'test_tenant_id', False)
-        self.assertTrue(self._check_external_router_policy(admin_context))
-        self.assertTrue(self._check_external_router_policy(tenant_context))
-
-    def tearDown(self):
-        policy.reset()
-        super(APIPolicyTestCase, self).tearDown()
diff --git a/neutron/tests/functional/base.py b/neutron/tests/functional/base.py
deleted file mode 100644 (file)
index 2a489a3..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from oslo_config import cfg
-
-from neutron.agent.common import config
-from neutron.agent.linux import utils
-from neutron.common import utils as common_utils
-from neutron.tests import base
-from neutron.tests.common import base as common_base
-
-SUDO_CMD = 'sudo -n'
-
-# This is the directory from which infra fetches log files for functional tests
-DEFAULT_LOG_DIR = '/tmp/dsvm-functional-logs/'
-
-
-class BaseSudoTestCase(base.BaseTestCase):
-    """
-    Base class for tests requiring invocation of commands via a root helper.
-
-    This class skips (during setUp) its tests unless sudo is enabled, ie:
-    OS_SUDO_TESTING is set to '1' or 'True' in the test execution environment.
-    This is intended to allow developers to run the functional suite (e.g. tox
-    -e functional) without test failures if sudo invocations are not allowed.
-
-    Running sudo tests in the upstream gate jobs
-    (*-neutron-dsvm-functional) requires the additional step of
-    setting OS_ROOTWRAP_CMD to the rootwrap command configured by
-    devstack, e.g.
-
-      sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
-
-    Gate jobs do not allow invocations of sudo without rootwrap to
-    ensure that rootwrap configuration gets as much testing as
-    possible.
-    """
-
-    def setUp(self):
-        super(BaseSudoTestCase, self).setUp()
-        if not base.bool_from_env('OS_SUDO_TESTING'):
-            self.skipTest('Testing with sudo is not enabled')
-
-        # Have each test log into its own log file
-        cfg.CONF.set_override('debug', True)
-        common_utils.ensure_dir(DEFAULT_LOG_DIR)
-        log_file = base.sanitize_log_path(
-            os.path.join(DEFAULT_LOG_DIR, "%s.log" % self.id()))
-        cfg.CONF.set_override('log_file', log_file)
-        config.setup_logging()
-
-        config.register_root_helper(cfg.CONF)
-        self.config(group='AGENT',
-                    root_helper=os.environ.get('OS_ROOTWRAP_CMD', SUDO_CMD))
-        self.config(group='AGENT',
-                    root_helper_daemon=os.environ.get(
-                        'OS_ROOTWRAP_DAEMON_CMD'))
-
-    @common_base.no_skip_on_missing_deps
-    def check_command(self, cmd, error_text, skip_msg, run_as_root=False):
-        try:
-            utils.execute(cmd, run_as_root=run_as_root)
-        except RuntimeError as e:
-            if error_text in str(e):
-                self.skipTest(skip_msg)
-            raise
diff --git a/neutron/tests/functional/cmd/__init__.py b/neutron/tests/functional/cmd/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/cmd/test_linuxbridge_cleanup.py b/neutron/tests/functional/cmd/test_linuxbridge_cleanup.py
deleted file mode 100644 (file)
index 74af2bc..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright (c) 2015 Thales Services SAS
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import fixtures
-import mock
-
-from neutron.agent.linux import ip_lib
-from neutron.common import constants
-from neutron.plugins.ml2.drivers.linuxbridge.agent import \
-    linuxbridge_neutron_agent as lb_agent
-from neutron.tests.common import config_fixtures
-from neutron.tests.common import net_helpers
-from neutron.tests.functional import base
-from neutron.tests import tools
-
-
-class LinuxbridgeCleanupTest(base.BaseSudoTestCase):
-
-    def _test_linuxbridge_cleanup(self, bridge_exists, callback):
-        br_fixture = self.useFixture(
-            tools.SafeCleanupFixture(
-                net_helpers.LinuxBridgeFixture(
-                    prefix=lb_agent.BRIDGE_NAME_PREFIX))).fixture
-
-        config = callback(br_fixture)
-        config.update({'VXLAN': {'enable_vxlan': 'False'}})
-
-        temp_dir = self.useFixture(fixtures.TempDir()).path
-        conf = self.useFixture(config_fixtures.ConfigFileFixture(
-            base_filename='neutron.conf',
-            config=config,
-            temp_dir=temp_dir))
-
-        cmd = 'neutron-linuxbridge-cleanup', '--config-file', conf.filename
-        ip_wrapper = ip_lib.IPWrapper(br_fixture.namespace)
-        ip_wrapper.netns.execute(cmd)
-
-        self.assertEqual(bridge_exists, ip_lib.device_exists(
-            br_fixture.bridge.name, br_fixture.namespace))
-
-    def test_cleanup_empty_bridge(self):
-
-        def callback(br_fixture):
-            return config_fixtures.ConfigDict()
-
-        self._test_linuxbridge_cleanup(False, callback)
-
-    def test_no_cleanup_bridge_with_tap(self):
-
-        def callback(br_fixture):
-            # TODO(cbrandily): refactor net_helpers to avoid mocking it
-            mock.patch.object(
-                net_helpers, 'VETH0_PREFIX',
-                new_callable=mock.PropertyMock(
-                    return_value=constants.TAP_DEVICE_PREFIX + '0')).start()
-            mock.patch.object(
-                net_helpers, 'VETH1_PREFIX',
-                new_callable=mock.PropertyMock(
-                    return_value=constants.TAP_DEVICE_PREFIX + '1')).start()
-
-            self.useFixture(
-                tools.SafeCleanupFixture(
-                    net_helpers.LinuxBridgePortFixture(
-                        br_fixture.bridge, br_fixture.namespace)))
-            return config_fixtures.ConfigDict()
-
-        self._test_linuxbridge_cleanup(True, callback)
-
-    def test_no_cleanup_bridge_in_bridge_mappings(self):
-
-        def callback(br_fixture):
-            br_name = br_fixture.bridge.name
-            conf = config_fixtures.ConfigDict()
-            conf.update(
-                {'LINUX_BRIDGE': {'bridge_mappings': 'physnet:%s' % br_name}})
-            return conf
-
-        self._test_linuxbridge_cleanup(True, callback)
diff --git a/neutron/tests/functional/cmd/test_netns_cleanup.py b/neutron/tests/functional/cmd/test_netns_cleanup.py
deleted file mode 100644 (file)
index 92ca83f..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.agent.l3 import agent as l3_agent
-from neutron.agent.linux import dhcp
-from neutron.agent.linux import ip_lib
-from neutron.cmd import netns_cleanup
-from neutron.tests.common import net_helpers
-from neutron.tests.functional import base
-
-GET_NAMESPACES = 'neutron.agent.linux.ip_lib.IPWrapper.get_namespaces'
-TEST_INTERFACE_DRIVER = 'neutron.agent.linux.interface.OVSInterfaceDriver'
-
-
-class NetnsCleanupTest(base.BaseSudoTestCase):
-    def setUp(self):
-        super(NetnsCleanupTest, self).setUp()
-
-        self.get_namespaces_p = mock.patch(GET_NAMESPACES)
-        self.get_namespaces = self.get_namespaces_p.start()
-
-    def setup_config(self, args=None):
-        if args is None:
-            args = []
-        # force option enabled to make sure non-empty namespaces are
-        # cleaned up and deleted
-        args.append('--force')
-
-        self.conf = netns_cleanup.setup_conf()
-        self.conf.set_override('interface_driver', TEST_INTERFACE_DRIVER)
-        self.config_parse(conf=self.conf, args=args)
-
-    def test_cleanup_network_namespaces_cleans_dhcp_and_l3_namespaces(self):
-        dhcp_namespace = self.useFixture(
-            net_helpers.NamespaceFixture(dhcp.NS_PREFIX)).name
-        l3_namespace = self.useFixture(
-            net_helpers.NamespaceFixture(l3_agent.NS_PREFIX)).name
-        bridge = self.useFixture(
-            net_helpers.VethPortFixture(namespace=dhcp_namespace)).bridge
-        self.useFixture(
-            net_helpers.VethPortFixture(bridge, l3_namespace))
-
-        # we scope the get_namespaces to our own ones not to affect other
-        # tests, as otherwise cleanup will kill them all
-        self.get_namespaces.return_value = [l3_namespace, dhcp_namespace]
-
-        netns_cleanup.cleanup_network_namespaces(self.conf)
-
-        self.get_namespaces_p.stop()
-        namespaces_now = ip_lib.IPWrapper.get_namespaces()
-        self.assertNotIn(l3_namespace, namespaces_now)
-        self.assertNotIn(dhcp_namespace, namespaces_now)
diff --git a/neutron/tests/functional/common/__init__.py b/neutron/tests/functional/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/common/test_utils.py b/neutron/tests/functional/common/test_utils.py
deleted file mode 100644 (file)
index 8515963..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os.path
-import stat
-
-from neutron.common import utils
-from neutron.tests import base
-
-
-class TestReplaceFile(base.BaseTestCase):
-    def setUp(self):
-        super(TestReplaceFile, self).setUp()
-        temp_dir = self.get_default_temp_dir().path
-        self.file_name = os.path.join(temp_dir, "new_file")
-        self.data = "data to copy"
-
-    def _verify_result(self, file_mode):
-        self.assertTrue(os.path.exists(self.file_name))
-        with open(self.file_name) as f:
-            content = f.read()
-        self.assertEqual(self.data, content)
-        mode = os.stat(self.file_name).st_mode
-        self.assertEqual(file_mode, stat.S_IMODE(mode))
-
-    def test_replace_file_default_mode(self):
-        file_mode = 0o644
-        utils.replace_file(self.file_name, self.data)
-        self._verify_result(file_mode)
-
-    def test_replace_file_custom_mode(self):
-        file_mode = 0o722
-        utils.replace_file(self.file_name, self.data, file_mode)
-        self._verify_result(file_mode)
-
-    def test_replace_file_custom_mode_twice(self):
-        file_mode = 0o722
-        utils.replace_file(self.file_name, self.data, file_mode)
-        self.data = "new data to copy"
-        file_mode = 0o777
-        utils.replace_file(self.file_name, self.data, file_mode)
-        self._verify_result(file_mode)
diff --git a/neutron/tests/functional/db/__init__.py b/neutron/tests/functional/db/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/db/test_ipam.py b/neutron/tests/functional/db/test_ipam.py
deleted file mode 100644 (file)
index a1dd846..0000000
+++ /dev/null
@@ -1,251 +0,0 @@
-# Copyright 2015 SUSE Linux Products GmbH
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_db.sqlalchemy import session
-import testtools
-
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.db import db_base_plugin_v2 as base_plugin
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.ipam.drivers.neutrondb_ipam import db_models as ipam_models
-from neutron.tests import base
-from neutron.tests.common import base as common_base
-
-
-def get_admin_test_context(db_url):
-    """
-    get_admin_test_context is used to provide a test context. A new session is
-    created using the db url specified
-    """
-    ctx = context.Context(user_id=None,
-                          tenant_id=None,
-                          is_admin=True,
-                          overwrite=False)
-    facade = session.EngineFacade(db_url, mysql_sql_mode='STRICT_ALL_TABLES')
-    ctx._session = facade.get_session(autocommit=False, expire_on_commit=True)
-    return ctx
-
-
-class IpamTestCase(object):
-    """
-    Base class for tests that aim to test ip allocation.
-    """
-
-    def configure_test(self, use_pluggable_ipam=False):
-        model_base.BASEV2.metadata.create_all(self.engine)
-        cfg.CONF.set_override('notify_nova_on_port_status_changes', False)
-        if use_pluggable_ipam:
-            self._turn_on_pluggable_ipam()
-        else:
-            self._turn_off_pluggable_ipam()
-        self.plugin = base_plugin.NeutronDbPluginV2()
-        self.cxt = get_admin_test_context(self.engine.url)
-        self.addCleanup(self.cxt._session.close)
-        self.tenant_id = 'test_tenant'
-        self.network_id = 'test_net_id'
-        self.subnet_id = 'test_sub_id'
-        self.port_id = 'test_p_id'
-        self._create_network()
-        self._create_subnet()
-
-    def _turn_off_pluggable_ipam(self):
-        cfg.CONF.set_override('ipam_driver', None)
-        self.ip_availability_range = models_v2.IPAvailabilityRange
-
-    def _turn_on_pluggable_ipam(self):
-        cfg.CONF.set_override('ipam_driver', 'internal')
-        DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
-        self.setup_coreplugin(DB_PLUGIN_KLASS)
-        self.ip_availability_range = ipam_models.IpamAvailabilityRange
-
-    def result_set_to_dicts(self, resultset, keys):
-        dicts = []
-        for item in resultset:
-            item_dict = dict((x, item[x]) for x in keys)
-            dicts.append(item_dict)
-        return dicts
-
-    def assert_ip_alloc_matches(self, expected):
-        result_set = self.cxt.session.query(models_v2.IPAllocation).all()
-        keys = ['port_id', 'ip_address', 'subnet_id', 'network_id']
-        actual = self.result_set_to_dicts(result_set, keys)
-        self.assertEqual(expected, actual)
-
-    def assert_ip_avail_range_matches(self, expected):
-        result_set = self.cxt.session.query(
-            self.ip_availability_range).all()
-        keys = ['first_ip', 'last_ip']
-        actual = self.result_set_to_dicts(result_set, keys)
-        self.assertEqual(expected, actual)
-
-    def assert_ip_alloc_pool_matches(self, expected):
-        result_set = self.cxt.session.query(models_v2.IPAllocationPool).all()
-        keys = ['first_ip', 'last_ip', 'subnet_id']
-        actual = self.result_set_to_dicts(result_set, keys)
-        self.assertEqual(expected, actual)
-
-    def _create_network(self):
-        network = {'tenant_id': self.tenant_id,
-                   'id': self.network_id,
-                   'name': 'test-net',
-                   'admin_state_up': True,
-                   'shared': False,
-                   'status': constants.NET_STATUS_ACTIVE}
-        return self.plugin.create_network(self.cxt, {'network': network})
-
-    def _create_subnet(self):
-        subnet = {'tenant_id': self.tenant_id,
-                  'id': self.subnet_id,
-                  'name': 'test_sub',
-                  'network_id': self.network_id,
-                  'ip_version': 4,
-                  'cidr': '10.10.10.0/29',
-                  'enable_dhcp': False,
-                  'gateway_ip': '10.10.10.1',
-                  'shared': False,
-                  'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
-                  'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
-                  'host_routes': attributes.ATTR_NOT_SPECIFIED}
-        return self.plugin.create_subnet(self.cxt, {'subnet': subnet})
-
-    def _create_port(self, port_id, fixed_ips=None):
-        port_fixed_ips = (fixed_ips if fixed_ips else
-                          attributes.ATTR_NOT_SPECIFIED)
-        port = {'tenant_id': self.tenant_id,
-                'name': 'test_port',
-                'id': port_id,
-                'network_id': self.network_id,
-                'mac_address': attributes.ATTR_NOT_SPECIFIED,
-                'admin_state_up': True,
-                'status': constants.PORT_STATUS_ACTIVE,
-                'device_id': 'test_dev_id',
-                'device_owner': 'compute',
-                'fixed_ips': port_fixed_ips}
-        self.plugin.create_port(self.cxt, {'port': port})
-
-    def test_allocate_fixed_ip(self):
-        fixed_ip = [{'ip_address': "10.10.10.3", 'subnet_id': self.subnet_id}]
-        self._create_port(self.port_id, fixed_ip)
-
-        ip_alloc_expected = [{'port_id': self.port_id,
-                              'ip_address': fixed_ip[0].get('ip_address'),
-                              'subnet_id': self.subnet_id,
-                              'network_id': self.network_id}]
-        ip_avail_ranges_expected = [{'first_ip': '10.10.10.2',
-                                     'last_ip': '10.10.10.2'},
-                                    {'first_ip': '10.10.10.4',
-                                     'last_ip': '10.10.10.6'}]
-        ip_alloc_pool_expected = [{'first_ip': '10.10.10.2',
-                                   'last_ip': '10.10.10.6',
-                                   'subnet_id': self.subnet_id}]
-        self.assert_ip_alloc_matches(ip_alloc_expected)
-        self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected)
-        self.assert_ip_avail_range_matches(
-            ip_avail_ranges_expected)
-
-    def test_allocate_first_available_ip(self):
-        self._create_port(self.port_id)
-        ip_alloc_expected = [{'port_id': self.port_id,
-                              'ip_address': '10.10.10.2',
-                              'subnet_id': self.subnet_id,
-                              'network_id': self.network_id}]
-        ip_avail_ranges_expected = [{'first_ip': '10.10.10.3',
-                                     'last_ip': '10.10.10.6'}]
-        ip_alloc_pool_expected = [{'first_ip': '10.10.10.2',
-                                   'last_ip': '10.10.10.6',
-                                   'subnet_id': self.subnet_id}]
-        self.assert_ip_alloc_matches(ip_alloc_expected)
-        self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected)
-        self.assert_ip_avail_range_matches(
-            ip_avail_ranges_expected)
-
-    def test_allocate_ip_exausted_pool(self):
-        # available from .2 up to .6 -> 5
-        for i in range(1, 6):
-            self._create_port(self.port_id + str(i))
-
-        ip_avail_ranges_expected = []
-        ip_alloc_pool_expected = [{'first_ip': '10.10.10.2',
-                                   'last_ip': '10.10.10.6',
-                                   'subnet_id': self.subnet_id}]
-        self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected)
-        self.assert_ip_avail_range_matches(
-            ip_avail_ranges_expected)
-        # Create another port
-        with testtools.ExpectedException(n_exc.IpAddressGenerationFailure):
-            self._create_port(self.port_id)
-
-    def test_rebuild_availability_range(self):
-        for i in range(1, 6):
-            self._create_port(self.port_id + str(i))
-
-        ip_avail_ranges_expected = []
-        ip_alloc_pool_expected = [{'first_ip': '10.10.10.2',
-                                   'last_ip': '10.10.10.6',
-                                   'subnet_id': self.subnet_id}]
-        self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected)
-        self.assert_ip_avail_range_matches(
-            ip_avail_ranges_expected)
-        # Delete some ports, this will free the first two IPs
-        for i in range(1, 3):
-            self.plugin.delete_port(self.cxt, self.port_id + str(i))
-        # Create another port, this will trigger the rebuilding of the
-        # availability ranges
-        self._create_port(self.port_id)
-        ip_avail_ranges_expected = [{'first_ip': '10.10.10.3',
-                                     'last_ip': '10.10.10.3'}]
-
-        ip_alloc = self.cxt.session.query(models_v2.IPAllocation).all()
-        self.assertEqual(4, len(ip_alloc))
-        self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected)
-        self.assert_ip_avail_range_matches(
-            ip_avail_ranges_expected)
-
-
-class TestIpamMySql(common_base.MySQLTestCase, base.BaseTestCase,
-                    IpamTestCase):
-
-    def setUp(self):
-        super(TestIpamMySql, self).setUp()
-        self.configure_test()
-
-
-class TestIpamPsql(common_base.PostgreSQLTestCase,
-                   base.BaseTestCase, IpamTestCase):
-
-    def setUp(self):
-        super(TestIpamPsql, self).setUp()
-        self.configure_test()
-
-
-class TestPluggableIpamMySql(common_base.MySQLTestCase,
-                             base.BaseTestCase, IpamTestCase):
-
-    def setUp(self):
-        super(TestPluggableIpamMySql, self).setUp()
-        self.configure_test(use_pluggable_ipam=True)
-
-
-class TestPluggableIpamPsql(common_base.PostgreSQLTestCase,
-                            base.BaseTestCase, IpamTestCase):
-
-    def setUp(self):
-        super(TestPluggableIpamPsql, self).setUp()
-        self.configure_test(use_pluggable_ipam=True)
diff --git a/neutron/tests/functional/db/test_migrations.py b/neutron/tests/functional/db/test_migrations.py
deleted file mode 100644 (file)
index 79b8785..0000000
+++ /dev/null
@@ -1,311 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import six
-
-from alembic import script as alembic_script
-from contextlib import contextmanager
-import mock
-from oslo_config import cfg
-from oslo_config import fixture as config_fixture
-from oslo_db.sqlalchemy import test_base
-from oslo_db.sqlalchemy import test_migrations
-import sqlalchemy
-from sqlalchemy import event
-import sqlalchemy.types as types
-
-import neutron.db.migration as migration_help
-from neutron.db.migration.alembic_migrations import external
-from neutron.db.migration import cli as migration
-from neutron.db.migration.models import head as head_models
-from neutron.tests.common import base
-
-cfg.CONF.import_opt('core_plugin', 'neutron.common.config')
-
-CORE_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-
-
-class _TestModelsMigrations(test_migrations.ModelsMigrationsSync):
-    '''Test for checking of equality models state and migrations.
-
-    For the opportunistic testing you need to set up a db named
-    'openstack_citest' with user 'openstack_citest' and password
-    'openstack_citest' on localhost.
-    The test will then use that db and user/password combo to run the tests.
-
-    For PostgreSQL on Ubuntu this can be done with the following commands::
-
-        sudo -u postgres psql
-        postgres=# create user openstack_citest with createdb login password
-                  'openstack_citest';
-        postgres=# create database openstack_citest with owner
-                   openstack_citest;
-
-    For MySQL on Ubuntu this can be done with the following commands::
-
-        mysql -u root
-        >create database openstack_citest;
-        >grant all privileges on openstack_citest.* to
-         openstack_citest@localhost identified by 'openstack_citest';
-
-    Output is a list that contains information about differences between db and
-    models. Output example::
-
-       [('add_table',
-         Table('bat', MetaData(bind=None),
-               Column('info', String(), table=<bat>), schema=None)),
-        ('remove_table',
-         Table(u'bar', MetaData(bind=None),
-               Column(u'data', VARCHAR(), table=<bar>), schema=None)),
-        ('add_column',
-         None,
-         'foo',
-         Column('data', Integer(), table=<foo>)),
-        ('remove_column',
-         None,
-         'foo',
-         Column(u'old_data', VARCHAR(), table=None)),
-        [('modify_nullable',
-          None,
-          'foo',
-          u'x',
-          {'existing_server_default': None,
-          'existing_type': INTEGER()},
-          True,
-          False)]]
-
-    * ``remove_*`` means that there is extra table/column/constraint in db;
-
-    * ``add_*`` means that it is missing in db;
-
-    * ``modify_*`` means that on column in db is set wrong
-      type/nullable/server_default. Element contains information:
-
-        - what should be modified,
-        - schema,
-        - table,
-        - column,
-        - existing correct column parameters,
-        - right value,
-        - wrong value.
-    '''
-
-    def setUp(self):
-        patch = mock.patch.dict('sys.modules', {
-            'heleosapi': mock.MagicMock(),
-        })
-        patch.start()
-        self.addCleanup(patch.stop)
-        super(_TestModelsMigrations, self).setUp()
-        self.cfg = self.useFixture(config_fixture.Config())
-        self.cfg.config(core_plugin=CORE_PLUGIN)
-        self.alembic_config = migration.get_neutron_config()
-        self.alembic_config.neutron_config = cfg.CONF
-
-    def db_sync(self, engine):
-        cfg.CONF.set_override('connection', engine.url, group='database')
-        migration.do_alembic_command(self.alembic_config, 'upgrade', 'heads')
-
-    def get_engine(self):
-        return self.engine
-
-    def get_metadata(self):
-        return head_models.get_metadata()
-
-    def include_object(self, object_, name, type_, reflected, compare_to):
-        if type_ == 'table' and (name == 'alembic_version'
-                                 or name in external.TABLES):
-                return False
-
-        return super(_TestModelsMigrations, self).include_object(
-            object_, name, type_, reflected, compare_to)
-
-    def filter_metadata_diff(self, diff):
-        return list(filter(self.remove_unrelated_errors, diff))
-
-    # TODO(akamyshikova): remove this method as soon as comparison with Variant
-    # will be implemented in oslo.db or alembic
-    def compare_type(self, ctxt, insp_col, meta_col, insp_type, meta_type):
-        if isinstance(meta_type, types.Variant):
-            orig_type = meta_col.type
-            meta_col.type = meta_type.impl
-            try:
-                return self.compare_type(ctxt, insp_col, meta_col, insp_type,
-                                         meta_type.impl)
-            finally:
-                meta_col.type = orig_type
-        else:
-            ret = super(_TestModelsMigrations, self).compare_type(
-                ctxt, insp_col, meta_col, insp_type, meta_type)
-            if ret is not None:
-                return ret
-            return ctxt.impl.compare_type(insp_col, meta_col)
-
-    # Remove some difference that are not mistakes just specific of
-    # dialects, etc
-    def remove_unrelated_errors(self, element):
-        insp = sqlalchemy.engine.reflection.Inspector.from_engine(
-            self.get_engine())
-        dialect = self.get_engine().dialect.name
-        if isinstance(element, tuple):
-            if dialect == 'mysql' and element[0] == 'remove_index':
-                table_name = element[1].table.name
-                for fk in insp.get_foreign_keys(table_name):
-                    if fk['name'] == element[1].name:
-                        return False
-                cols = [c.name for c in element[1].expressions]
-                for col in cols:
-                    if col in insp.get_pk_constraint(
-                            table_name)['constrained_columns']:
-                        return False
-        else:
-            for modified, _, table, column, _, _, new in element:
-                if modified == 'modify_default' and dialect == 'mysql':
-                    constrained = insp.get_pk_constraint(table)
-                    if column in constrained['constrained_columns']:
-                        return False
-        return True
-
-
-class TestModelsMigrationsMysql(_TestModelsMigrations,
-                                base.MySQLTestCase):
-    @contextmanager
-    def _listener(self, engine, listener_func):
-        try:
-            event.listen(engine, 'before_execute', listener_func)
-            yield
-        finally:
-            event.remove(engine, 'before_execute',
-                         listener_func)
-
-    # There is no use to run this against both dialects, so add this test just
-    # for MySQL tests
-    def test_external_tables_not_changed(self):
-
-        def block_external_tables(conn, clauseelement, multiparams, params):
-            if isinstance(clauseelement, sqlalchemy.sql.selectable.Select):
-                return
-
-            if (isinstance(clauseelement, six.string_types) and
-                    any(name in clauseelement for name in external.TABLES)):
-                self.fail("External table referenced by neutron core "
-                          "migration.")
-
-            if hasattr(clauseelement, 'element'):
-                element = clauseelement.element
-                if (element.name in external.TABLES or
-                        (hasattr(clauseelement, 'table') and
-                            element.table.name in external.TABLES)):
-                    # Table 'nsxv_vdr_dhcp_bindings' was created in liberty,
-                    # before NSXV has moved to separate repo.
-                    if ((isinstance(clauseelement,
-                                    sqlalchemy.sql.ddl.CreateTable) and
-                            element.name == 'nsxv_vdr_dhcp_bindings')):
-                        return
-                    self.fail("External table referenced by neutron core "
-                              "migration.")
-
-        engine = self.get_engine()
-        cfg.CONF.set_override('connection', engine.url, group='database')
-        with engine.begin() as connection:
-            self.alembic_config.attributes['connection'] = connection
-            migration.do_alembic_command(self.alembic_config, 'upgrade',
-                                         'kilo')
-
-            with self._listener(engine,
-                                block_external_tables):
-                migration.do_alembic_command(self.alembic_config, 'upgrade',
-                                             'heads')
-
-    def test_branches(self):
-
-        def check_expand_branch(conn, clauseelement, multiparams, params):
-            if isinstance(clauseelement, migration_help.DROP_OPERATIONS):
-                self.fail("Migration from expand branch contains drop command")
-
-        def check_contract_branch(conn, clauseelement, multiparams, params):
-            if isinstance(clauseelement, migration_help.CREATION_OPERATIONS):
-                # Skip tables that were created by mistake in contract branch
-                if hasattr(clauseelement, 'element'):
-                    element = clauseelement.element
-                    if any([
-                        isinstance(element, sqlalchemy.Table) and
-                        element.name in ['ml2_geneve_allocations',
-                                         'ml2_geneve_endpoints'],
-                        isinstance(element, sqlalchemy.Index) and
-                        element.table.name == 'ml2_geneve_allocations'
-                    ]):
-                        return
-                self.fail("Migration from contract branch contains create "
-                          "command")
-
-        engine = self.get_engine()
-        cfg.CONF.set_override('connection', engine.url, group='database')
-        with engine.begin() as connection:
-            self.alembic_config.attributes['connection'] = connection
-            migration.do_alembic_command(self.alembic_config, 'upgrade',
-                                         'kilo')
-
-            with self._listener(engine, check_expand_branch):
-                migration.do_alembic_command(
-                    self.alembic_config, 'upgrade',
-                    '%s@head' % migration.EXPAND_BRANCH)
-
-            with self._listener(engine, check_contract_branch):
-                migration.do_alembic_command(
-                    self.alembic_config, 'upgrade',
-                    '%s@head' % migration.CONTRACT_BRANCH)
-
-    def test_check_mysql_engine(self):
-        engine = self.get_engine()
-        cfg.CONF.set_override('connection', engine.url, group='database')
-        with engine.begin() as connection:
-            self.alembic_config.attributes['connection'] = connection
-            migration.do_alembic_command(self.alembic_config, 'upgrade',
-                                         'heads')
-            insp = sqlalchemy.engine.reflection.Inspector.from_engine(engine)
-            # Test that table creation on MySQL only builds InnoDB tables
-            tables = insp.get_table_names()
-            self.assertTrue(len(tables) > 0,
-                            "No tables found. Wrong schema?")
-            res = [table for table in tables if
-                   insp.get_table_options(table)['mysql_engine'] != 'InnoDB'
-                   and table != 'alembic_version']
-            self.assertEqual(0, len(res), "%s non InnoDB tables created" % res)
-
-
-class TestModelsMigrationsPsql(_TestModelsMigrations,
-                               base.PostgreSQLTestCase):
-    pass
-
-
-class TestWalkMigrations(test_base.DbTestCase):
-
-    def setUp(self):
-        super(TestWalkMigrations, self).setUp()
-        self.alembic_config = migration.get_neutron_config()
-        self.alembic_config.neutron_config = cfg.CONF
-
-    def test_no_downgrade(self):
-        script_dir = alembic_script.ScriptDirectory.from_config(
-            self.alembic_config)
-        versions = [v for v in script_dir.walk_revisions(base='base',
-                                                         head='heads')]
-        failed_revisions = []
-        for version in versions:
-            if hasattr(version.module, 'downgrade'):
-                failed_revisions.append(version.revision)
-
-        if failed_revisions:
-            self.fail('Migrations %s have downgrade' % failed_revisions)
diff --git a/neutron/tests/functional/db/test_models.py b/neutron/tests/functional/db/test_models.py
deleted file mode 100644 (file)
index a05c5a5..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy
-
-from neutron.tests import base
-
-
-class TestDBCreation(base.BaseTestCase):
-    """Check database schema can be created without conflicts.
-
-    For each test case is created a SQLite memory database.
-
-    """
-
-    def setUp(self):
-        super(TestDBCreation, self).setUp()
-        self.engine = sqlalchemy.create_engine('sqlite://')
-
-    def _test_creation(self, module):
-        metadata = module.get_metadata()
-        metadata.create_all(self.engine)
-
-    def test_head_creation(self):
-        from neutron.db.migration.models import head
-        self._test_creation(head)
diff --git a/neutron/tests/functional/pecan_wsgi/__init__.py b/neutron/tests/functional/pecan_wsgi/__init__.py
deleted file mode 100644 (file)
index 045c268..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-import os
-from pecan import set_config
-from pecan.testing import load_test_app
-from unittest import TestCase
-
-
-__all__ = ['FunctionalTest']
-
-
-class FunctionalTest(TestCase):
-    """
-    Used for functional tests where you need to test your
-    literal application and its integration with the framework.
-    """
-
-    def setUp(self):
-        self.app = load_test_app(os.path.join(
-            os.path.dirname(__file__),
-            'config.py'
-        ))
-
-    def tearDown(self):
-        set_config({}, overwrite=True)
diff --git a/neutron/tests/functional/pecan_wsgi/config.py b/neutron/tests/functional/pecan_wsgi/config.py
deleted file mode 100644 (file)
index 988961b..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# use main app settings except for the port number so testing doesn't need to
-# listen on the main neutron port
-app = {
-    'root': 'neutron.pecan_wsgi.controllers.root.RootController',
-    'modules': ['neutron.pecan_wsgi'],
-    'errors': {
-        400: '/error',
-        '__force_dict__': True
-    }
-}
diff --git a/neutron/tests/functional/pecan_wsgi/test_functional.py b/neutron/tests/functional/pecan_wsgi/test_functional.py
deleted file mode 100644 (file)
index 0f0aafc..0000000
+++ /dev/null
@@ -1,580 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from collections import namedtuple
-import mock
-from oslo_config import cfg
-from oslo_policy import policy as oslo_policy
-from oslo_serialization import jsonutils
-from oslo_utils import uuidutils
-import pecan
-from pecan import request
-from pecan import set_config
-from pecan.testing import load_test_app
-import testtools
-
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron import manager
-from neutron.pecan_wsgi.controllers import root as controllers
-from neutron import policy
-from neutron.tests.unit import testlib_api
-
-_SERVICE_PLUGIN_RESOURCE = 'serviceplugin'
-_SERVICE_PLUGIN_COLLECTION = _SERVICE_PLUGIN_RESOURCE + 's'
-_SERVICE_PLUGIN_INDEX_BODY = {_SERVICE_PLUGIN_COLLECTION: []}
-
-
-class FakeServicePluginController(object):
-    resource = _SERVICE_PLUGIN_RESOURCE
-
-    @pecan.expose(generic=True,
-                  content_type='application/json',
-                  template='json')
-    def index(self):
-        return _SERVICE_PLUGIN_INDEX_BODY
-
-
-class PecanFunctionalTest(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
-        super(PecanFunctionalTest, self).setUp()
-        self.addCleanup(extensions.PluginAwareExtensionManager.clear_instance)
-        self.addCleanup(set_config, {}, overwrite=True)
-        self.set_config_overrides()
-        self.setup_app()
-        self.setup_service_plugin()
-
-    def setup_app(self):
-        self.app = load_test_app(os.path.join(
-            os.path.dirname(__file__),
-            'config.py'
-        ))
-        self._gen_port()
-
-    def _gen_port(self):
-        pl = manager.NeutronManager.get_plugin()
-        network_id = pl.create_network(context.get_admin_context(), {
-            'network':
-            {'name': 'pecannet', 'tenant_id': 'tenid', 'shared': False,
-             'admin_state_up': True, 'status': 'ACTIVE'}})['id']
-        self.port = pl.create_port(context.get_admin_context(), {
-            'port':
-            {'tenant_id': 'tenid', 'network_id': network_id,
-             'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
-             'mac_address': '00:11:22:33:44:55',
-             'admin_state_up': True, 'device_id': 'FF',
-             'device_owner': 'pecan', 'name': 'pecan'}})
-
-    def set_config_overrides(self):
-        cfg.CONF.set_override('auth_strategy', 'noauth')
-
-    def setup_service_plugin(self):
-        manager.NeutronManager.set_controller_for_resource(
-            _SERVICE_PLUGIN_COLLECTION, FakeServicePluginController())
-
-
-class TestV2Controller(PecanFunctionalTest):
-
-    def test_get(self):
-        response = self.app.get('/v2.0/ports.json')
-        self.assertEqual(response.status_int, 200)
-
-    def test_post(self):
-        response = self.app.post_json('/v2.0/ports.json',
-            params={'port': {'network_id': self.port['network_id'],
-                             'admin_state_up': True,
-                             'tenant_id': 'tenid'}},
-            headers={'X-Project-Id': 'tenid'})
-        self.assertEqual(response.status_int, 201)
-
-    def test_put(self):
-        response = self.app.put_json('/v2.0/ports/%s.json' % self.port['id'],
-                                     params={'port': {'name': 'test'}},
-                                     headers={'X-Project-Id': 'tenid'})
-        self.assertEqual(response.status_int, 200)
-
-    def test_delete(self):
-        response = self.app.delete('/v2.0/ports/%s.json' % self.port['id'],
-                                   headers={'X-Project-Id': 'tenid'})
-        self.assertEqual(response.status_int, 204)
-
-    def test_plugin_initialized(self):
-        self.assertIsNotNone(manager.NeutronManager._instance)
-
-    def test_get_extensions(self):
-        response = self.app.get('/v2.0/extensions.json')
-        self.assertEqual(response.status_int, 200)
-
-    def test_get_specific_extension(self):
-        response = self.app.get('/v2.0/extensions/allowed-address-pairs.json')
-        self.assertEqual(response.status_int, 200)
-
-    def test_service_plugin_uri(self):
-        service_plugin = namedtuple('DummyServicePlugin', 'path_prefix')
-        service_plugin.path_prefix = 'dummy'
-        nm = manager.NeutronManager.get_instance()
-        nm.service_plugins['dummy_sp'] = service_plugin
-        response = self.app.get('/v2.0/dummy/serviceplugins.json')
-        self.assertEqual(200, response.status_int)
-        self.assertEqual(_SERVICE_PLUGIN_INDEX_BODY, response.json_body)
-
-
-class TestErrors(PecanFunctionalTest):
-
-    def test_404(self):
-        response = self.app.get('/assert_called_once', expect_errors=True)
-        self.assertEqual(response.status_int, 404)
-
-    def test_bad_method(self):
-        response = self.app.patch('/v2.0/ports/44.json',
-                                  expect_errors=True)
-        self.assertEqual(response.status_int, 405)
-
-
-class TestRequestID(PecanFunctionalTest):
-
-    def test_request_id(self):
-        response = self.app.get('/')
-        self.assertIn('x-openstack-request-id', response.headers)
-        self.assertTrue(
-            response.headers['x-openstack-request-id'].startswith('req-'))
-        id_part = response.headers['x-openstack-request-id'].split('req-')[1]
-        self.assertTrue(uuidutils.is_uuid_like(id_part))
-
-
-class TestKeystoneAuth(PecanFunctionalTest):
-
-    def set_config_overrides(self):
-        # default auth strategy is keystone so we pass
-        pass
-
-    def test_auth_enforced(self):
-        response = self.app.get('/', expect_errors=True)
-        self.assertEqual(response.status_int, 401)
-
-
-class TestInvalidAuth(PecanFunctionalTest):
-    def setup_app(self):
-        # disable normal app setup since it will fail
-        pass
-
-    def test_invalid_auth_strategy(self):
-        cfg.CONF.set_override('auth_strategy', 'badvalue')
-        with testtools.ExpectedException(n_exc.InvalidConfigurationOption):
-            load_test_app(os.path.join(os.path.dirname(__file__), 'config.py'))
-
-
-class TestExceptionTranslationHook(PecanFunctionalTest):
-
-    def test_neutron_nonfound_to_webob_exception(self):
-        # this endpoint raises a Neutron notfound exception. make sure it gets
-        # translated into a 404 error
-        with mock.patch(
-            'neutron.pecan_wsgi.controllers.root.CollectionsController.get',
-            side_effect=n_exc.NotFound()
-        ):
-            response = self.app.get('/v2.0/ports.json', expect_errors=True)
-            self.assertEqual(response.status_int, 404)
-
-    def test_unexpected_exception(self):
-        with mock.patch(
-            'neutron.pecan_wsgi.controllers.root.CollectionsController.get',
-            side_effect=ValueError('secretpassword')
-        ):
-            response = self.app.get('/v2.0/ports.json', expect_errors=True)
-            self.assertNotIn(response.body, 'secretpassword')
-            self.assertEqual(response.status_int, 500)
-
-
-class TestRequestProcessing(PecanFunctionalTest):
-
-    def setUp(self):
-        super(TestRequestProcessing, self).setUp()
-
-        # request.context is thread-local storage so it has to be accessed by
-        # the controller. We can capture it into a list here to assert on after
-        # the request finishes.
-
-        def capture_request_details(*args, **kwargs):
-            self.captured_context = request.context
-
-        mock.patch('neutron.pecan_wsgi.controllers.root.'
-                   'CollectionsController.get',
-                   side_effect=capture_request_details).start()
-        mock.patch('neutron.pecan_wsgi.controllers.root.'
-                   'CollectionsController.create',
-                   side_effect=capture_request_details).start()
-        mock.patch('neutron.pecan_wsgi.controllers.root.ItemController.get',
-                   side_effect=capture_request_details).start()
-    # TODO(kevinbenton): add context tests for X-Roles etc
-
-    def test_context_set_in_request(self):
-        self.app.get('/v2.0/ports.json',
-                     headers={'X-Project-Id': 'tenant_id'})
-        self.assertEqual('tenant_id',
-                         self.captured_context['neutron_context'].tenant_id)
-
-    def test_core_resource_identified(self):
-        self.app.get('/v2.0/ports.json')
-        self.assertEqual('port', self.captured_context['resource'])
-        self.assertEqual('ports', self.captured_context['collection'])
-
-    def test_lookup_identifies_resource_id(self):
-        # We now this will return a 404 but that's not the point as it is
-        # mocked
-        self.app.get('/v2.0/ports/reina.json')
-        self.assertEqual('port', self.captured_context['resource'])
-        self.assertEqual('ports', self.captured_context['collection'])
-        self.assertEqual('reina', self.captured_context['resource_id'])
-
-    def test_resource_processing_post(self):
-        self.app.post_json(
-            '/v2.0/ports.json',
-            params={'port': {'network_id': self.port['network_id'],
-                             'name': 'the_port',
-                             'admin_state_up': True}},
-            headers={'X-Project-Id': 'tenid'})
-        self.assertEqual('port', self.captured_context['resource'])
-        self.assertEqual('ports', self.captured_context['collection'])
-        resources = self.captured_context['resources']
-        self.assertEqual(1, len(resources))
-        self.assertEqual(self.port['network_id'],
-                         resources[0]['network_id'])
-        self.assertEqual('the_port', resources[0]['name'])
-
-    def test_resource_processing_post_bulk(self):
-        self.app.post_json(
-            '/v2.0/ports.json',
-            params={'ports': [{'network_id': self.port['network_id'],
-                               'name': 'the_port_1',
-                               'admin_state_up': True},
-                              {'network_id': self.port['network_id'],
-                               'name': 'the_port_2',
-                               'admin_state_up': True}]},
-            headers={'X-Project-Id': 'tenid'})
-        resources = self.captured_context['resources']
-        self.assertEqual(2, len(resources))
-        self.assertEqual(self.port['network_id'],
-                         resources[0]['network_id'])
-        self.assertEqual('the_port_1', resources[0]['name'])
-        self.assertEqual(self.port['network_id'],
-                         resources[1]['network_id'])
-        self.assertEqual('the_port_2', resources[1]['name'])
-
-    def test_resource_processing_post_unknown_attribute_returns_400(self):
-        response = self.app.post_json(
-            '/v2.0/ports.json',
-            params={'port': {'network_id': self.port['network_id'],
-                             'name': 'the_port',
-                             'alien': 'E.T.',
-                             'admin_state_up': True}},
-            headers={'X-Project-Id': 'tenid'},
-            expect_errors=True)
-        self.assertEqual(400, response.status_int)
-
-    def test_resource_processing_post_validation_errori_returns_400(self):
-        response = self.app.post_json(
-            '/v2.0/ports.json',
-            params={'port': {'network_id': self.port['network_id'],
-                             'name': 'the_port',
-                             'admin_state_up': 'invalid_value'}},
-            headers={'X-Project-Id': 'tenid'},
-            expect_errors=True)
-        self.assertEqual(400, response.status_int)
-
-    def test_service_plugin_identified(self):
-        # TODO(kevinbenton): fix the unit test setup to include an l3 plugin
-        self.skipTest("A dummy l3 plugin needs to be setup")
-        self.app.get('/v2.0/routers.json')
-        self.assertEqual('router', self.req_stash['resource_type'])
-        # make sure the core plugin was identified as the handler for ports
-        self.assertEqual(
-            manager.NeutronManager.get_service_plugins()['L3_ROUTER_NAT'],
-            self.req_stash['plugin'])
-
-
-class TestEnforcementHooks(PecanFunctionalTest):
-
-    def test_network_ownership_check(self):
-        response = self.app.post_json(
-            '/v2.0/ports.json',
-            params={'port': {'network_id': self.port['network_id'],
-                             'admin_state_up': True}},
-            headers={'X-Project-Id': 'tenid'})
-        self.assertEqual(201, response.status_int)
-
-    def test_quota_enforcement(self):
-        # TODO(kevinbenton): this test should do something
-        pass
-
-
-class TestPolicyEnforcementHook(PecanFunctionalTest):
-
-    FAKE_RESOURCE = {
-        'mehs': {
-            'id': {'allow_post': False, 'allow_put': False,
-                   'is_visible': True, 'primary_key': True},
-            'attr': {'allow_post': True, 'allow_put': True,
-                     'is_visible': True, 'default': ''},
-            'restricted_attr': {'allow_post': True, 'allow_put': True,
-                                'is_visible': True, 'default': ''},
-            'tenant_id': {'allow_post': True, 'allow_put': False,
-                          'required_by_policy': True,
-                          'validate': {'type:string':
-                                       attributes.TENANT_ID_MAX_LEN},
-                          'is_visible': True}
-        }
-    }
-
-    def setUp(self):
-        # Create a controller for a fake resource. This will make the tests
-        # independent from the evolution of the API (so if one changes the API
-        # or the default policies there won't be any risk of breaking these
-        # tests, or at least I hope so)
-        super(TestPolicyEnforcementHook, self).setUp()
-        self.mock_plugin = mock.Mock()
-        attributes.RESOURCE_ATTRIBUTE_MAP.update(self.FAKE_RESOURCE)
-        attributes.PLURALS['mehs'] = 'meh'
-        manager.NeutronManager.set_plugin_for_resource('meh', self.mock_plugin)
-        fake_controller = controllers.CollectionsController('mehs', 'meh')
-        manager.NeutronManager.set_controller_for_resource(
-            'mehs', fake_controller)
-        # Inject policies for the fake resource
-        policy.init()
-        policy._ENFORCER.set_rules(
-            oslo_policy.Rules.from_dict(
-                {'create_meh': '',
-                 'update_meh': 'rule:admin_only',
-                 'delete_meh': 'rule:admin_only',
-                 'get_meh': 'rule:admin_only or field:mehs:id=xxx',
-                 'get_meh:restricted_attr': 'rule:admin_only'}),
-            overwrite=False)
-
-    def test_before_on_create_authorized(self):
-        # Mock a return value for an hypothetical create operation
-        self.mock_plugin.create_meh.return_value = {
-            'id': 'xxx',
-            'attr': 'meh',
-            'restricted_attr': '',
-            'tenant_id': 'tenid'}
-        response = self.app.post_json('/v2.0/mehs.json',
-                                      params={'meh': {'attr': 'meh'}},
-                                      headers={'X-Project-Id': 'tenid'})
-        # We expect this operation to succeed
-        self.assertEqual(201, response.status_int)
-        self.assertEqual(0, self.mock_plugin.get_meh.call_count)
-        self.assertEqual(1, self.mock_plugin.create_meh.call_count)
-
-    def test_before_on_put_not_authorized(self):
-        # The policy hook here should load the resource, and therefore we must
-        # mock a get response
-        self.mock_plugin.get_meh.return_value = {
-            'id': 'xxx',
-            'attr': 'meh',
-            'restricted_attr': '',
-            'tenant_id': 'tenid'}
-        # The policy engine should trigger an exception in 'before', and the
-        # plugin method should not be called at all
-        response = self.app.put_json('/v2.0/mehs/xxx.json',
-                                     params={'meh': {'attr': 'meh'}},
-                                     headers={'X-Project-Id': 'tenid'},
-                                     expect_errors=True)
-        self.assertEqual(403, response.status_int)
-        self.assertEqual(1, self.mock_plugin.get_meh.call_count)
-        self.assertEqual(0, self.mock_plugin.update_meh.call_count)
-
-    def test_before_on_delete_not_authorized(self):
-        # The policy hook here should load the resource, and therefore we must
-        # mock a get response
-        self.mock_plugin.delete_meh.return_value = None
-        self.mock_plugin.get_meh.return_value = {
-            'id': 'xxx',
-            'attr': 'meh',
-            'restricted_attr': '',
-            'tenant_id': 'tenid'}
-        # The policy engine should trigger an exception in 'before', and the
-        # plugin method should not be called
-        response = self.app.delete_json('/v2.0/mehs/xxx.json',
-                                        headers={'X-Project-Id': 'tenid'},
-                                        expect_errors=True)
-        self.assertEqual(403, response.status_int)
-        self.assertEqual(1, self.mock_plugin.get_meh.call_count)
-        self.assertEqual(0, self.mock_plugin.delete_meh.call_count)
-
-    def test_after_on_get_not_authorized(self):
-        # The GET test policy will deny access to anything whose id is not
-        # 'xxx', so the following request should be forbidden
-        self.mock_plugin.get_meh.return_value = {
-            'id': 'yyy',
-            'attr': 'meh',
-            'restricted_attr': '',
-            'tenant_id': 'tenid'}
-        # The policy engine should trigger an exception in 'after', and the
-        # plugin method should be called
-        response = self.app.get('/v2.0/mehs/yyy.json',
-                                headers={'X-Project-Id': 'tenid'},
-                                expect_errors=True)
-        self.assertEqual(403, response.status_int)
-        self.assertEqual(1, self.mock_plugin.get_meh.call_count)
-
-    def test_after_on_get_excludes_admin_attribute(self):
-        self.mock_plugin.get_meh.return_value = {
-            'id': 'xxx',
-            'attr': 'meh',
-            'restricted_attr': '',
-            'tenant_id': 'tenid'}
-        response = self.app.get('/v2.0/mehs/xxx.json',
-                                headers={'X-Project-Id': 'tenid'})
-        self.assertEqual(200, response.status_int)
-        json_response = jsonutils.loads(response.body)
-        self.assertNotIn('restricted_attr', json_response['meh'])
-
-    def test_after_on_list_excludes_admin_attribute(self):
-        self.mock_plugin.get_mehs.return_value = [{
-            'id': 'xxx',
-            'attr': 'meh',
-            'restricted_attr': '',
-            'tenant_id': 'tenid'}]
-        response = self.app.get('/v2.0/mehs',
-                                headers={'X-Project-Id': 'tenid'})
-        self.assertEqual(200, response.status_int)
-        json_response = jsonutils.loads(response.body)
-        self.assertNotIn('restricted_attr', json_response['mehs'][0])
-
-
-class TestRootController(PecanFunctionalTest):
-    """Test version listing on root URI."""
-
-    def test_get(self):
-        response = self.app.get('/')
-        self.assertEqual(response.status_int, 200)
-        json_body = jsonutils.loads(response.body)
-        versions = json_body.get('versions')
-        self.assertEqual(1, len(versions))
-        for (attr, value) in controllers.V2Controller.version_info.items():
-            self.assertIn(attr, versions[0])
-            self.assertEqual(value, versions[0][attr])
-
-    def _test_method_returns_405(self, method):
-        api_method = getattr(self.app, method)
-        response = api_method('/', expect_errors=True)
-        self.assertEqual(response.status_int, 405)
-
-    def test_post(self):
-        self._test_method_returns_405('post')
-
-    def test_put(self):
-        self._test_method_returns_405('put')
-
-    def test_patch(self):
-        self._test_method_returns_405('patch')
-
-    def test_delete(self):
-        self._test_method_returns_405('delete')
-
-    def test_head(self):
-        self._test_method_returns_405('head')
-
-
-class TestQuotasController(TestRootController):
-    """Test quota management API controller."""
-
-    base_url = '/v2.0/quotas'
-    default_expected_limits = {
-        'network': 10,
-        'port': 50,
-        'subnet': 10}
-
-    def _verify_limits(self, response, limits):
-        for resource, limit in limits.items():
-            self.assertEqual(limit, response['quota'][resource])
-
-    def _verify_default_limits(self, response):
-        self._verify_limits(response, self.default_expected_limits)
-
-    def _verify_after_update(self, response, updated_limits):
-        expected_limits = self.default_expected_limits.copy()
-        expected_limits.update(updated_limits)
-        self._verify_limits(response, expected_limits)
-
-    def test_index_admin(self):
-        # NOTE(salv-orlando): The quota controller has an hardcoded check for
-        # admin-ness for this operation, which is supposed to return quotas for
-        # all tenants. Such check is "vestigial" from the home-grown WSGI and
-        # shall be removed
-        response = self.app.get('%s.json' % self.base_url,
-                                headers={'X-Project-Id': 'admin',
-                                         'X-Roles': 'admin'})
-        self.assertEqual(200, response.status_int)
-
-    def test_index(self):
-        response = self.app.get('%s.json' % self.base_url, expect_errors=True)
-        self.assertEqual(403, response.status_int)
-
-    def test_get_admin(self):
-        response = self.app.get('%s/foo.json' % self.base_url,
-                                headers={'X-Project-Id': 'admin',
-                                         'X-Roles': 'admin'})
-        self.assertEqual(200, response.status_int)
-        # As quota limits have not been updated, expect default values
-        json_body = jsonutils.loads(response.body)
-        self._verify_default_limits(json_body)
-
-    def test_get(self):
-        # It is not ok to access another tenant's limits
-        url = '%s/foo.json' % self.base_url
-        response = self.app.get(url, expect_errors=True)
-        self.assertEqual(403, response.status_int)
-        # It is however ok to retrieve your own limits
-        response = self.app.get(url, headers={'X-Project-Id': 'foo'})
-        self.assertEqual(200, response.status_int)
-        json_body = jsonutils.loads(response.body)
-        self._verify_default_limits(json_body)
-
-    def test_put_get_delete(self):
-        # PUT and DELETE actions are in the same test as a meaningful DELETE
-        # test would require a put anyway
-        url = '%s/foo.json' % self.base_url
-        response = self.app.put_json(url,
-                                     params={'quota': {'network': 99}},
-                                     headers={'X-Project-Id': 'admin',
-                                              'X-Roles': 'admin'})
-        self.assertEqual(200, response.status_int)
-        json_body = jsonutils.loads(response.body)
-        self._verify_after_update(json_body, {'network': 99})
-
-        response = self.app.get(url, headers={'X-Project-Id': 'foo'})
-        self.assertEqual(200, response.status_int)
-        json_body = jsonutils.loads(response.body)
-        self._verify_after_update(json_body, {'network': 99})
-
-        response = self.app.delete(url, headers={'X-Project-Id': 'admin',
-                                                 'X-Roles': 'admin'})
-        self.assertEqual(204, response.status_int)
-        # As DELETE does not return a body we need another GET
-        response = self.app.get(url, headers={'X-Project-Id': 'foo'})
-        self.assertEqual(200, response.status_int)
-        json_body = jsonutils.loads(response.body)
-        self._verify_default_limits(json_body)
-
-    def test_delete(self):
-        # TODO(salv-orlando)
-        pass
diff --git a/neutron/tests/functional/requirements.txt b/neutron/tests/functional/requirements.txt
deleted file mode 100644 (file)
index 62285e9..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-# Additional requirements for functional tests
-
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-
-psutil>=1.1.1,<2.0.0
-psycopg2
diff --git a/neutron/tests/functional/sanity/__init__.py b/neutron/tests/functional/sanity/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/sanity/test_sanity.py b/neutron/tests/functional/sanity/test_sanity.py
deleted file mode 100644 (file)
index b6aec47..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.cmd.sanity import checks
-from neutron.tests import base
-from neutron.tests.functional import base as functional_base
-
-
-class SanityTestCase(base.BaseTestCase):
-    """Sanity checks that do not require root access.
-
-    Tests that just call checks.some_function() are to ensure that
-    neutron-sanity-check runs without throwing an exception, as in the case
-    where someone modifies the API without updating the check script.
-    """
-
-    def setUp(self):
-        super(SanityTestCase, self).setUp()
-
-    def test_nova_notify_runs(self):
-        checks.nova_notify_supported()
-
-    def test_dnsmasq_version(self):
-        checks.dnsmasq_version_supported()
-
-    def test_dibbler_version(self):
-        checks.dibbler_version_supported()
-
-    def test_ipset_support(self):
-        checks.ipset_supported()
-
-    def test_ip6tables_support(self):
-        checks.ip6tables_supported()
-
-
-class SanityTestCaseRoot(functional_base.BaseSudoTestCase):
-    """Sanity checks that require root access.
-
-    Tests that just call checks.some_function() are to ensure that
-    neutron-sanity-check runs without throwing an exception, as in the case
-    where someone modifies the API without updating the check script.
-    """
-
-    def test_ovs_vxlan_support_runs(self):
-        checks.ovs_vxlan_supported()
-
-    def test_ovs_geneve_support_runs(self):
-        checks.ovs_geneve_supported()
-
-    def test_iproute2_vxlan_support_runs(self):
-        checks.iproute2_vxlan_supported()
-
-    def test_ovs_patch_support_runs(self):
-        checks.patch_supported()
-
-    def test_arp_responder_runs(self):
-        checks.arp_responder_supported()
-
-    def test_arp_header_match_runs(self):
-        checks.arp_header_match_supported()
-
-    def test_icmpv6_header_match_runs(self):
-        checks.icmpv6_header_match_supported()
-
-    def test_vf_management_runs(self):
-        checks.vf_management_supported()
-
-    def test_namespace_root_read_detection_runs(self):
-        checks.netns_read_requires_helper()
-
-    def test_ovsdb_native_supported_runs(self):
-        checks.ovsdb_native_supported()
-
-    def test_keepalived_ipv6_support(self):
-        checks.keepalived_ipv6_supported()
diff --git a/neutron/tests/functional/scheduler/__init__.py b/neutron/tests/functional/scheduler/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py b/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py
deleted file mode 100644 (file)
index 7037af0..0000000
+++ /dev/null
@@ -1,561 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import six
-import testscenarios
-
-from neutron import context
-from neutron.db import agents_db
-from neutron.db import agentschedulers_db
-from neutron.db import common_db_mixin
-from neutron.scheduler import dhcp_agent_scheduler
-from neutron.tests.unit.scheduler import (test_dhcp_agent_scheduler as
-                                          test_dhcp_sch)
-from operator import attrgetter
-
-# Required to generate tests from scenarios. Not compatible with nose.
-load_tests = testscenarios.load_tests_apply_scenarios
-
-
-class BaseTestScheduleNetwork(object):
-    """Base class which defines scenarios for schedulers.
-
-        agent_count
-            Number of dhcp agents (also number of hosts).
-
-        max_agents_per_network
-            Maximum  DHCP Agents that can be scheduled for a network.
-
-        scheduled_agent_count
-            Number of agents the network has previously scheduled
-
-        down_agent_count
-            Number of dhcp agents which are down
-
-        expected_scheduled_agent_count
-            Number of scheduled agents the schedule() should return
-            or 'None' if the schedule() cannot schedule the network.
-    """
-
-    scenarios = [
-        ('No agents scheduled if no agents are present',
-         dict(agent_count=0,
-              max_agents_per_network=1,
-              scheduled_agent_count=0,
-              down_agent_count=0,
-              expected_scheduled_agent_count=None)),
-
-        ('No agents scheduled if network already hosted and'
-         ' max_agents_per_network reached',
-         dict(agent_count=1,
-              max_agents_per_network=1,
-              scheduled_agent_count=1,
-              down_agent_count=0,
-              expected_scheduled_agent_count=None)),
-
-        ('No agents scheduled if all agents are down',
-         dict(agent_count=2,
-              max_agents_per_network=1,
-              scheduled_agent_count=0,
-              down_agent_count=2,
-              expected_scheduled_agent_count=None)),
-
-        ('Agent scheduled to the network if network is not yet hosted',
-         dict(agent_count=1,
-              max_agents_per_network=1,
-              scheduled_agent_count=0,
-              down_agent_count=0,
-              expected_scheduled_agent_count=1)),
-
-        ('Additional Agents scheduled to the network if max_agents_per_network'
-         ' is not yet reached',
-         dict(agent_count=3,
-              max_agents_per_network=3,
-              scheduled_agent_count=1,
-              down_agent_count=0,
-              expected_scheduled_agent_count=2)),
-
-        ('No agent scheduled if agent is dead',
-         dict(agent_count=3,
-              max_agents_per_network=3,
-              scheduled_agent_count=1,
-              down_agent_count=1,
-              expected_scheduled_agent_count=1)),
-    ]
-
-
-class TestChanceScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase,
-                                agentschedulers_db.DhcpAgentSchedulerDbMixin,
-                                agents_db.AgentDbMixin,
-                                common_db_mixin.CommonDbMixin,
-                                BaseTestScheduleNetwork):
-    """Test various scenarios for ChanceScheduler.schedule."""
-
-    def test_schedule_network(self):
-        self.config(dhcp_agents_per_network=self.max_agents_per_network)
-        scheduler = dhcp_agent_scheduler.ChanceScheduler()
-
-        # create dhcp agents
-        hosts = ['host-%s' % i for i in range(self.agent_count)]
-        dhcp_agents = self._create_and_set_agents_down(
-            hosts, down_agent_count=self.down_agent_count)
-
-        active_agents = dhcp_agents[self.down_agent_count:]
-
-        # schedule some agents before calling schedule
-        if self.scheduled_agent_count:
-            # schedule the network
-            schedule_agents = active_agents[:self.scheduled_agent_count]
-            scheduler.resource_filter.bind(self.ctx,
-                                           schedule_agents, self.network_id)
-        actual_scheduled_agents = scheduler.schedule(self, self.ctx,
-                                                     self.network)
-        if self.expected_scheduled_agent_count:
-            self.assertEqual(self.expected_scheduled_agent_count,
-                             len(actual_scheduled_agents))
-            hosted_agents = self.list_dhcp_agents_hosting_network(
-                self.ctx, self.network_id)
-            self.assertEqual(self.scheduled_agent_count +
-                             len(actual_scheduled_agents),
-                             len(hosted_agents['agents']))
-        else:
-            self.assertEqual([], actual_scheduled_agents)
-
-
-class TestWeightScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase,
-                                agentschedulers_db.DhcpAgentSchedulerDbMixin,
-                                agents_db.AgentDbMixin,
-                                common_db_mixin.CommonDbMixin,
-                                BaseTestScheduleNetwork):
-    """Test various scenarios for WeightScheduler.schedule."""
-
-    def test_weight_schedule_network(self):
-        self.config(dhcp_agents_per_network=self.max_agents_per_network)
-        scheduler = dhcp_agent_scheduler.WeightScheduler()
-
-        # create dhcp agents
-        hosts = ['host-%s' % i for i in range(self.agent_count)]
-        dhcp_agents = self._create_and_set_agents_down(
-            hosts, down_agent_count=self.down_agent_count)
-
-        active_agents = dhcp_agents[self.down_agent_count:]
-
-        unscheduled_active_agents = list(active_agents)
-        # schedule some agents before calling schedule
-        if self.scheduled_agent_count:
-            # schedule the network
-            schedule_agents = active_agents[:self.scheduled_agent_count]
-            scheduler.resource_filter.bind(self.ctx,
-                                           schedule_agents, self.network_id)
-            for agent in schedule_agents:
-                unscheduled_active_agents.remove(agent)
-        actual_scheduled_agents = scheduler.schedule(self, self.ctx,
-                                                     self.network)
-        if self.expected_scheduled_agent_count:
-            sorted_unscheduled_active_agents = sorted(
-                unscheduled_active_agents,
-                key=attrgetter('load'))[0:self.expected_scheduled_agent_count]
-            self.assertItemsEqual(
-                (agent['id'] for agent in actual_scheduled_agents),
-                (agent['id'] for agent in sorted_unscheduled_active_agents))
-            self.assertEqual(self.expected_scheduled_agent_count,
-                             len(actual_scheduled_agents))
-            hosted_agents = self.list_dhcp_agents_hosting_network(
-                self.ctx, self.network_id)
-            self.assertEqual(self.scheduled_agent_count +
-                             len(actual_scheduled_agents),
-                             len(hosted_agents['agents']))
-        else:
-            self.assertEqual([], actual_scheduled_agents)
-
-
-class TestAutoSchedule(test_dhcp_sch.TestDhcpSchedulerBaseTestCase,
-                       agentschedulers_db.DhcpAgentSchedulerDbMixin,
-                       agents_db.AgentDbMixin,
-                       common_db_mixin.CommonDbMixin):
-    """Test various scenarios for ChanceScheduler.auto_schedule_networks.
-
-        Below is the brief description of the scenario variables
-        --------------------------------------------------------
-        agent_count
-            number of DHCP agents (also number of hosts).
-
-        max_agents_per_network
-            Maximum  DHCP Agents that can be scheduled for a network.
-
-        network_count
-            Number of networks.
-
-        networks_with_dhcp_disabled
-            List of networks with dhcp disabled
-
-        hosted_networks
-            A mapping of agent id to the ids of the networks that they
-            should be initially hosting.
-
-        expected_auto_schedule_return_value
-            Expected return value of 'auto_schedule_networks'.
-
-        expected_hosted_networks
-            This stores the expected networks that should have been scheduled
-            (or that could have already been scheduled) for each agent
-            after the 'auto_schedule_networks' function is called.
-
-        no_network_with_az_match
-            If this parameter is True, there is no unscheduled network with
-            availability_zone_hints matches to an availability_zone of agents
-            to be scheduled. The default is False.
-    """
-
-    scenarios = [
-        ('Agent scheduled to the network if network is not yet hosted',
-         dict(agent_count=1,
-              max_agents_per_network=1,
-              network_count=1,
-              networks_with_dhcp_disabled=[],
-              hosted_networks={},
-              expected_auto_schedule_return_value=True,
-              expected_hosted_networks={'agent-0': ['network-0']})),
-
-        ('No agent scheduled if no networks are present',
-         dict(agent_count=1,
-              max_agents_per_network=1,
-              network_count=0,
-              networks_with_dhcp_disabled=[],
-              hosted_networks={},
-              expected_auto_schedule_return_value=False,
-              expected_hosted_networks={'agent-0': []})),
-
-        ('Agents scheduled to the networks if networks are not yet hosted',
-         dict(agent_count=2,
-              max_agents_per_network=3,
-              network_count=2,
-              networks_with_dhcp_disabled=[],
-              hosted_networks={},
-              expected_auto_schedule_return_value=True,
-              expected_hosted_networks={'agent-0': ['network-0',
-                                                    'network-1'],
-                                        'agent-1': ['network-0',
-                                                    'network-1']})),
-
-        ('No new agents scheduled if networks are already hosted',
-         dict(agent_count=2,
-              max_agents_per_network=3,
-              network_count=2,
-              networks_with_dhcp_disabled=[],
-              hosted_networks={'agent-0': ['network-0', 'network-1'],
-                               'agent-1': ['network-0', 'network-1']},
-              expected_auto_schedule_return_value=True,
-              expected_hosted_networks={'agent-0': ['network-0',
-                                                    'network-1'],
-                                        'agent-1': ['network-0',
-                                                    'network-1']})),
-
-        ('Additional agents scheduled to the networks if'
-         ' max_agents_per_network is not yet reached',
-         dict(agent_count=4,
-              max_agents_per_network=3,
-              network_count=4,
-              networks_with_dhcp_disabled=[],
-              hosted_networks={'agent-0': ['network-0', 'network-1'],
-                               'agent-1': ['network-0'],
-                               'agent-2': ['network-2'],
-                               'agent-3': ['network-0', 'network-2']},
-              expected_auto_schedule_return_value=True,
-              expected_hosted_networks={'agent-0': ['network-0',
-                                                    'network-1',
-                                                    'network-2',
-                                                    'network-3'],
-                                        'agent-1': ['network-0',
-                                                    'network-1',
-                                                    'network-2',
-                                                    'network-3'],
-                                        'agent-2': ['network-1',
-                                                    'network-2',
-                                                    'network-3'],
-                                        'agent-3': ['network-0',
-                                                    'network-1',
-                                                    'network-2',
-                                                    'network-3']})),
-
-        ('No agents scheduled if networks already hosted and'
-         ' max_agents_per_network reached',
-         dict(agent_count=4,
-              max_agents_per_network=1,
-              network_count=4,
-              networks_with_dhcp_disabled=[],
-              hosted_networks={'agent-0': ['network-0'],
-                               'agent-1': ['network-2'],
-                               'agent-2': ['network-1'],
-                               'agent-3': ['network-3']},
-              expected_auto_schedule_return_value=True,
-              expected_hosted_networks={'agent-0': ['network-0'],
-                                        'agent-1': ['network-2'],
-                                        'agent-2': ['network-1'],
-                                        'agent-3': ['network-3']})),
-
-        ('No agents scheduled to the network with dhcp disabled',
-         dict(agent_count=2,
-              max_agents_per_network=3,
-              network_count=2,
-              networks_with_dhcp_disabled=['network-1'],
-              hosted_networks={},
-              expected_auto_schedule_return_value=True,
-              expected_hosted_networks={'agent-0': ['network-0'],
-                                        'agent-1': ['network-0']})),
-
-        ('No agents scheduled if all networks have dhcp disabled',
-         dict(agent_count=2,
-              max_agents_per_network=3,
-              network_count=2,
-              networks_with_dhcp_disabled=['network-0', 'network-1'],
-              hosted_networks={},
-              expected_auto_schedule_return_value=False,
-              expected_hosted_networks={'agent-0': [],
-                                        'agent-1': []})),
-
-        ('No agents scheduled if unscheduled network does not match AZ',
-         dict(agent_count=1,
-              max_agents_per_network=1,
-              network_count=1,
-              networks_with_dhcp_disabled=[],
-              hosted_networks={},
-              expected_auto_schedule_return_value=True,
-              expected_hosted_networks={'agent-0': []},
-              no_network_with_az_match=True)),
-    ]
-
-    def _strip_host_index(self, name):
-        """Strips the host index.
-
-        Eg. if name = '2-agent-3', then 'agent-3' is returned.
-        """
-        return name[name.find('-') + 1:]
-
-    def _extract_index(self, name):
-        """Extracts the index number and returns.
-
-        Eg. if name = '2-agent-3', then 3 is returned
-        """
-        return int(name.split('-')[-1])
-
-    def get_subnets(self, context, fields=None):
-        subnets = []
-        for net_id in self._networks:
-            enable_dhcp = (not self._strip_host_index(net_id) in
-                           self.networks_with_dhcp_disabled)
-            subnets.append({'network_id': net_id,
-                            'enable_dhcp': enable_dhcp})
-        return subnets
-
-    def get_network(self, context, net_id):
-        az_hints = []
-        if getattr(self, 'no_network_with_az_match', False):
-            az_hints = ['not-match']
-        return {'availability_zone_hints': az_hints}
-
-    def _get_hosted_networks_on_dhcp_agent(self, agent_id):
-        query = self.ctx.session.query(
-            agentschedulers_db.NetworkDhcpAgentBinding.network_id)
-        query = query.filter(
-            agentschedulers_db.NetworkDhcpAgentBinding.dhcp_agent_id ==
-            agent_id)
-
-        return [item[0] for item in query]
-
-    def _test_auto_schedule(self, host_index):
-        self.config(dhcp_agents_per_network=self.max_agents_per_network)
-        scheduler = dhcp_agent_scheduler.ChanceScheduler()
-        self.ctx = context.get_admin_context()
-        msg = 'host_index = %s' % host_index
-
-        # create dhcp agents
-        hosts = ['%s-agent-%s' % (host_index, i)
-                 for i in range(self.agent_count)]
-        dhcp_agents = self._create_and_set_agents_down(hosts)
-
-        # create networks
-        self._networks = ['%s-network-%s' % (host_index, i)
-                          for i in range(self.network_count)]
-        self._save_networks(self._networks)
-
-        # pre schedule the networks to the agents defined in
-        # self.hosted_networks before calling auto_schedule_network
-        for agent, networks in six.iteritems(self.hosted_networks):
-            agent_index = self._extract_index(agent)
-            for net in networks:
-                net_index = self._extract_index(net)
-                scheduler.resource_filter.bind(self.ctx,
-                                               [dhcp_agents[agent_index]],
-                                               self._networks[net_index])
-
-        retval = scheduler.auto_schedule_networks(self, self.ctx,
-                                                  hosts[host_index])
-        self.assertEqual(self.expected_auto_schedule_return_value, retval,
-                         message=msg)
-
-        agent_id = dhcp_agents[host_index].id
-        hosted_networks = self._get_hosted_networks_on_dhcp_agent(agent_id)
-        hosted_net_ids = [self._strip_host_index(net)
-                          for net in hosted_networks]
-        expected_hosted_networks = self.expected_hosted_networks['agent-%s' %
-                                                                 host_index]
-        self.assertItemsEqual(hosted_net_ids, expected_hosted_networks, msg)
-
-    def test_auto_schedule(self):
-        for i in range(self.agent_count):
-            self._test_auto_schedule(i)
-
-
-class TestAZAwareWeightScheduler(test_dhcp_sch.TestDhcpSchedulerBaseTestCase,
-                                 agentschedulers_db.DhcpAgentSchedulerDbMixin,
-                                 agents_db.AgentDbMixin,
-                                 common_db_mixin.CommonDbMixin):
-    """Test various scenarios for AZAwareWeightScheduler.schedule.
-
-        az_count
-            Number of AZs.
-
-        network_az_hints
-            Number of AZs in availability_zone_hints of the network.
-
-        agent_count[each az]
-            Number of dhcp agents (also number of hosts).
-
-        max_agents_per_network
-            Maximum  DHCP Agents that can be scheduled for a network.
-
-        scheduled_agent_count[each az]
-            Number of agents the network has previously scheduled
-
-        down_agent_count[each az]
-            Number of dhcp agents which are down
-
-        expected_scheduled_agent_count[each az]
-            Number of scheduled agents the schedule() should return
-            or 'None' if the schedule() cannot schedule the network.
-    """
-
-    scenarios = [
-        ('Single hint, Single agent, Scheduled an agent of the specified AZ',
-         dict(az_count=2,
-              network_az_hints=1,
-              agent_count=[1, 1],
-              max_agents_per_network=1,
-              scheduled_agent_count=[0, 0],
-              down_agent_count=[0, 0],
-              expected_scheduled_agent_count=[1, 0])),
-
-        ('Multi hints, Multi agents Scheduled agents of the specified AZs',
-         dict(az_count=3,
-              network_az_hints=2,
-              agent_count=[1, 1, 1],
-              max_agents_per_network=2,
-              scheduled_agent_count=[0, 0, 0],
-              down_agent_count=[0, 0, 0],
-              expected_scheduled_agent_count=[1, 1, 0])),
-
-        ('Single hint, Multi agents, Scheduled agents of the specified AZ',
-         dict(az_count=2,
-              network_az_hints=1,
-              agent_count=[2, 1],
-              max_agents_per_network=2,
-              scheduled_agent_count=[0, 0],
-              down_agent_count=[0, 0],
-              expected_scheduled_agent_count=[2, 0])),
-
-        ('Multi hints, Multi agents, Only single AZ available',
-         dict(az_count=2,
-              network_az_hints=2,
-              agent_count=[2, 1],
-              max_agents_per_network=2,
-              scheduled_agent_count=[0, 0],
-              down_agent_count=[0, 1],
-              expected_scheduled_agent_count=[2, 0])),
-
-        ('Multi hints, Multi agents, Not enough agents',
-         dict(az_count=3,
-              network_az_hints=3,
-              agent_count=[1, 1, 1],
-              max_agents_per_network=3,
-              scheduled_agent_count=[0, 0, 0],
-              down_agent_count=[0, 1, 0],
-              expected_scheduled_agent_count=[1, 0, 1])),
-
-        ('Multi hints, Multi agents, Partially scheduled, Another AZ selected',
-         dict(az_count=3,
-              network_az_hints=2,
-              agent_count=[1, 1, 1],
-              max_agents_per_network=2,
-              scheduled_agent_count=[1, 0, 0],
-              down_agent_count=[0, 0, 0],
-              expected_scheduled_agent_count=[0, 1, 0])),
-
-        ('No hint, Scheduled independent to AZ',
-         dict(az_count=3,
-              network_az_hints=0,
-              agent_count=[1, 1, 1],
-              max_agents_per_network=3,
-              scheduled_agent_count=[0, 0, 0],
-              down_agent_count=[0, 0, 0],
-              expected_scheduled_agent_count=[1, 1, 1])),
-    ]
-
-    def _set_network_az_hints(self):
-        self.network['availability_zone_hints'] = []
-        for i in range(self.network_az_hints):
-            self.network['availability_zone_hints'].append('az%s' % i)
-
-    def test_schedule_network(self):
-        self.config(dhcp_agents_per_network=self.max_agents_per_network)
-        scheduler = dhcp_agent_scheduler.AZAwareWeightScheduler()
-        self._set_network_az_hints()
-
-        # create dhcp agents
-        for i in range(self.az_count):
-            az = 'az%s' % i
-            hosts = ['%s-host-%s' % (az, j)
-                     for j in range(self.agent_count[i])]
-            dhcp_agents = self._create_and_set_agents_down(
-                hosts, down_agent_count=self.down_agent_count[i], az=az)
-
-            active_agents = dhcp_agents[self.down_agent_count[i]:]
-
-            # schedule some agents before calling schedule
-            if self.scheduled_agent_count[i]:
-                # schedule the network
-                schedule_agents = active_agents[:self.scheduled_agent_count[i]]
-                scheduler.resource_filter.bind(
-                    self.ctx, schedule_agents, self.network_id)
-
-        actual_scheduled_agents = scheduler.schedule(self, self.ctx,
-                                                     self.network)
-        scheduled_azs = collections.defaultdict(int)
-        for agent in actual_scheduled_agents:
-            scheduled_azs[agent['availability_zone']] += 1
-
-        hosted_agents = self.list_dhcp_agents_hosting_network(
-                            self.ctx, self.network_id)
-        hosted_azs = collections.defaultdict(int)
-        for agent in hosted_agents['agents']:
-            hosted_azs[agent['availability_zone']] += 1
-
-        for i in range(self.az_count):
-            self.assertEqual(self.expected_scheduled_agent_count[i],
-                             scheduled_azs.get('az%s' % i, 0))
-            self.assertEqual(self.scheduled_agent_count[i] +
-                             scheduled_azs.get('az%s' % i, 0),
-                             hosted_azs.get('az%s' % i, 0))
diff --git a/neutron/tests/functional/scheduler/test_l3_agent_scheduler.py b/neutron/tests/functional/scheduler/test_l3_agent_scheduler.py
deleted file mode 100644 (file)
index 2f94b8d..0000000
+++ /dev/null
@@ -1,550 +0,0 @@
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import random
-import testscenarios
-
-from neutron import context
-from neutron.scheduler import l3_agent_scheduler
-from neutron.services.l3_router import l3_router_plugin
-from neutron.tests.common import helpers
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-# Required to generate tests from scenarios. Not compatible with nose.
-load_tests = testscenarios.load_tests_apply_scenarios
-
-
-class L3SchedulerBaseTest(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-
-    """Base class for functional test of L3 schedulers.
-       Provides basic setup and utility functions.
-    """
-
-    def setUp(self):
-        super(L3SchedulerBaseTest, self).setUp()
-
-        self.l3_plugin = l3_router_plugin.L3RouterPlugin()
-        self.adminContext = context.get_admin_context()
-        self.adminContext.tenant_id = '_func_test_tenant_'
-
-    def _create_l3_agent(self, host, context, agent_mode='legacy', plugin=None,
-                         state=True):
-        agent = helpers.register_l3_agent(host, agent_mode)
-        helpers.set_agent_admin_state(agent.id, state)
-        return agent
-
-    def _create_router(self, name):
-        router = {'name': name, 'admin_state_up': True,
-                  'tenant_id': self.adminContext.tenant_id}
-        return self.l3_plugin.create_router(
-            self.adminContext, {'router': router})
-
-    def _create_legacy_agents(self, agent_count, down_agent_count):
-        # Creates legacy l3 agents and sets admin state based on
-        #  down agent count.
-        self.hosts = ['host-%s' % i for i in range(agent_count)]
-        self.l3_agents = [self._create_l3_agent(self.hosts[i],
-               self.adminContext, 'legacy', self.l3_plugin,
-               (i >= down_agent_count)) for i in range(agent_count)]
-
-    def _create_routers(self, scheduled_router_count,
-                        expected_scheduled_router_count):
-        routers = []
-        if (scheduled_router_count + expected_scheduled_router_count):
-            for i in range(scheduled_router_count +
-                           expected_scheduled_router_count):
-                router = self._create_router('schd_rtr' + str(i))
-                routers.append(router)
-        else:
-            # create at least one router to test scheduling
-            routers.append(self._create_router('schd_rtr0'))
-
-        return routers
-
-    def _pre_scheduler_routers(self, scheduler, count):
-        hosting_agents = []
-        # schedule routers before calling schedule:
-        for i in range(count):
-            router = self.routers[i]
-            agent = random.choice(self.l3_agents)
-            scheduler.bind_router(self.adminContext, router['id'], agent)
-            hosting_agents.append(agent)
-        return hosting_agents
-
-    def _test_auto_schedule(self, expected_count):
-        router_ids = [rtr['id'] for rtr in self.routers]
-
-        did_it_schedule = False
-
-        # Try scheduling on each host
-        for host in self.hosts:
-            did_it_schedule = self.scheduler.auto_schedule_routers(
-                self.l3_plugin,
-                self.adminContext,
-                host,
-                router_ids)
-            if did_it_schedule:
-                break
-
-        if expected_count:
-            self.assertTrue(did_it_schedule, 'Failed to schedule agent')
-        else:
-            self.assertFalse(did_it_schedule, 'Agent scheduled, not expected')
-
-
-class L3ChanceSchedulerTestCase(L3SchedulerBaseTest):
-
-    """Test various scenarios for chance scheduler.
-
-        agent_count
-            Number of l3 agents (also number of hosts).
-
-        down_agent_count
-            Number of l3 agents which are down.
-
-        scheduled_router_count
-            Number of routers that have been previously scheduled.
-
-        expected_scheduled_router_count
-            Number of newly scheduled routers.
-    """
-
-    scenarios = [
-        ('No routers scheduled if no agents are present',
-         dict(agent_count=0,
-              down_agent_count=0,
-              scheduled_router_count=0,
-              expected_scheduled_router_count=0)),
-
-        ('No routers scheduled if it is already hosted',
-         dict(agent_count=1,
-              down_agent_count=0,
-              scheduled_router_count=1,
-              expected_scheduled_router_count=0)),
-
-        ('No routers scheduled if all agents are down',
-         dict(agent_count=2,
-              down_agent_count=2,
-              scheduled_router_count=0,
-              expected_scheduled_router_count=0)),
-
-        ('Router scheduled to the agent if router is not yet hosted',
-         dict(agent_count=1,
-              down_agent_count=0,
-              scheduled_router_count=0,
-              expected_scheduled_router_count=1)),
-
-        ('Router scheduled to the agent even if it already hosts a router',
-         dict(agent_count=1,
-              down_agent_count=0,
-              scheduled_router_count=1,
-              expected_scheduled_router_count=1)),
-    ]
-
-    def setUp(self):
-        super(L3ChanceSchedulerTestCase, self).setUp()
-        self._create_legacy_agents(self.agent_count, self.down_agent_count)
-        self.routers = self._create_routers(self.scheduled_router_count,
-                             self.expected_scheduled_router_count)
-        self.scheduler = l3_agent_scheduler.ChanceScheduler()
-
-    def test_chance_schedule_router(self):
-        # Pre schedule routers
-        self._pre_scheduler_routers(self.scheduler,
-                                    self.scheduled_router_count)
-        # schedule:
-        actual_scheduled_agent = self.scheduler.schedule(
-            self.l3_plugin, self.adminContext, self.routers[-1]['id'])
-
-        if self.expected_scheduled_router_count:
-            self.assertIsNotNone(actual_scheduled_agent,
-                                 message='Failed to schedule agent')
-        else:
-            self.assertIsNone(actual_scheduled_agent,
-                              message='Agent scheduled but not expected')
-
-    def test_auto_schedule_routers(self):
-        # Pre schedule routers
-        self._pre_scheduler_routers(self.scheduler,
-                                    self.scheduled_router_count)
-        # The test
-        self._test_auto_schedule(self.expected_scheduled_router_count)
-
-
-class L3LeastRoutersSchedulerTestCase(L3SchedulerBaseTest):
-
-    """Test various scenarios for least router scheduler.
-
-        agent_count
-            Number of l3 agents (also number of hosts).
-
-        down_agent_count
-            Number of l3 agents which are down.
-
-        scheduled_router_count
-            Number of routers that have been previously scheduled
-
-        expected_scheduled_router_count
-            Number of newly scheduled routers
-    """
-
-    scenarios = [
-        ('No routers scheduled if no agents are present',
-         dict(agent_count=0,
-              down_agent_count=0,
-              scheduled_router_count=0,
-              expected_scheduled_router_count=0)),
-
-        ('No routers scheduled if it is already hosted',
-         dict(agent_count=1,
-              down_agent_count=0,
-              scheduled_router_count=1,
-              expected_scheduled_router_count=1)),
-
-        ('No routers scheduled if all agents are down',
-         dict(agent_count=2,
-              down_agent_count=2,
-              scheduled_router_count=0,
-              expected_scheduled_router_count=0)),
-
-        ('Router scheduled to the agent if router is not yet hosted',
-         dict(agent_count=1,
-              down_agent_count=0,
-              scheduled_router_count=0,
-              expected_scheduled_router_count=1)),
-
-        ('Router scheduled to the agent even if it already hosts a router',
-         dict(agent_count=1,
-              down_agent_count=0,
-              scheduled_router_count=1,
-              expected_scheduled_router_count=1)),
-
-        ('Router is scheduled to agent hosting least routers',
-         dict(agent_count=2,
-              down_agent_count=0,
-              scheduled_router_count=1,
-              expected_scheduled_router_count=1)),
-    ]
-
-    def setUp(self):
-        super(L3LeastRoutersSchedulerTestCase, self).setUp()
-        self._create_legacy_agents(self.agent_count, self.down_agent_count)
-        self.routers = self._create_routers(self.scheduled_router_count,
-                             self.expected_scheduled_router_count)
-        self.scheduler = l3_agent_scheduler.LeastRoutersScheduler()
-
-    def test_least_routers_schedule(self):
-        # Pre schedule routers
-        hosting_agents = self._pre_scheduler_routers(self.scheduler,
-                                    self.scheduled_router_count)
-
-        actual_scheduled_agent = self.scheduler.schedule(
-            self.l3_plugin, self.adminContext, self.routers[-1]['id'])
-
-        if self.expected_scheduled_router_count:
-            # For case where there is just one agent:
-            if self.agent_count == 1:
-                self.assertEqual(actual_scheduled_agent.id,
-                                 self.l3_agents[0].id)
-            else:
-                self.assertNotIn(actual_scheduled_agent.id,
-                               [x.id for x in hosting_agents],
-                               message='The expected agent was not scheduled')
-        else:
-            self.assertIsNone(actual_scheduled_agent,
-                              message='Expected no agent to be scheduled,'
-                                      ' but it got scheduled')
-
-    def test_auto_schedule_routers(self):
-        # Pre schedule routers
-        self._pre_scheduler_routers(self.scheduler,
-                                    self.scheduled_router_count)
-        # The test
-        self._test_auto_schedule(self.expected_scheduled_router_count)
-
-
-class L3AZSchedulerBaseTest(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-
-    def setUp(self):
-        core_plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-        super(L3AZSchedulerBaseTest, self).setUp(plugin=core_plugin)
-
-        self.l3_plugin = l3_router_plugin.L3RouterPlugin()
-        self.adminContext = context.get_admin_context()
-        self.adminContext.tenant_id = '_func_test_tenant_'
-
-    def _create_l3_agent(self, host, context, agent_mode='legacy', plugin=None,
-                         state=True, az='nova'):
-        agent = helpers.register_l3_agent(host, agent_mode, az=az)
-        helpers.set_agent_admin_state(agent.id, state)
-        return agent
-
-    def _create_legacy_agents(self, agent_count, down_agent_count, az):
-        # Creates legacy l3 agents and sets admin state based on
-        #  down agent count.
-        hosts = ['%s-host-%s' % (az, i) for i in range(agent_count)]
-        l3_agents = [
-            self._create_l3_agent(hosts[i], self.adminContext, 'legacy',
-                                  self.l3_plugin, (i >= down_agent_count),
-                                  az=az)
-            for i in range(agent_count)]
-        return l3_agents
-
-    def _create_router(self, az_hints, ha):
-        router = {'name': 'router1', 'admin_state_up': True,
-                  'availability_zone_hints': az_hints,
-                  'tenant_id': self._tenant_id}
-        if ha:
-            router['ha'] = True
-        return self.l3_plugin.create_router(
-            self.adminContext, {'router': router})
-
-
-class L3AZLeastRoutersSchedulerTestCase(L3AZSchedulerBaseTest):
-
-    """Test various scenarios for AZ router scheduler.
-
-        az_count
-            Number of AZs.
-
-        router_az_hints
-            Number of AZs in availability_zone_hints of the router.
-
-        agent_count[each az]
-            Number of l3 agents (also number of hosts).
-
-        max_l3_agents_per_router
-            Maximum number of agents on which a router will be scheduled.
-            0 means test for regular router.
-
-        min_l3_agents_per_router
-            Minimum number of agents on which a router will be scheduled.
-            N/A for regular router test.
-
-        down_agent_count[each az]
-            Number of l3 agents which are down.
-
-        expected_scheduled_agent_count[each az]
-            Number of newly scheduled l3 agents.
-    """
-
-    scenarios = [
-        ('Regular router, Scheduled specified AZ',
-         dict(az_count=2,
-              router_az_hints=1,
-              agent_count=[1, 1],
-              max_l3_agents_per_router=0,
-              min_l3_agents_per_router=0,
-              down_agent_count=[0, 0],
-              expected_scheduled_agent_count=[1, 0])),
-
-        ('HA router, Scheduled specified AZs',
-         dict(az_count=3,
-              router_az_hints=2,
-              agent_count=[1, 1, 1],
-              max_l3_agents_per_router=2,
-              min_l3_agents_per_router=2,
-              down_agent_count=[0, 0, 0],
-              expected_scheduled_agent_count=[1, 1, 0])),
-
-        ('HA router, max_l3_agents_per_routers > az_hints',
-         dict(az_count=2,
-              router_az_hints=2,
-              agent_count=[2, 1],
-              max_l3_agents_per_router=3,
-              min_l3_agents_per_router=2,
-              down_agent_count=[0, 0],
-              expected_scheduled_agent_count=[2, 1])),
-
-        ('HA router, not enough agents',
-         dict(az_count=3,
-              router_az_hints=2,
-              agent_count=[2, 2, 2],
-              max_l3_agents_per_router=3,
-              min_l3_agents_per_router=2,
-              down_agent_count=[1, 1, 0],
-              expected_scheduled_agent_count=[1, 1, 0])),
-    ]
-
-    def test_schedule_router(self):
-        scheduler = l3_agent_scheduler.AZLeastRoutersScheduler()
-        ha = False
-        if self.max_l3_agents_per_router:
-            self.config(max_l3_agents_per_router=self.max_l3_agents_per_router)
-            self.config(min_l3_agents_per_router=self.min_l3_agents_per_router)
-            ha = True
-
-        # create l3 agents
-        for i in range(self.az_count):
-            az = 'az%s' % i
-            self._create_legacy_agents(self.agent_count[i],
-                                       self.down_agent_count[i], az)
-
-        # create router.
-        # note that ha-router needs enough agents beforehand.
-        az_hints = ['az%s' % i for i in range(self.router_az_hints)]
-        router = self._create_router(az_hints, ha)
-
-        scheduler.schedule(self.l3_plugin, self.adminContext, router['id'])
-        # schedule returns only one agent. so get all agents scheduled.
-        scheduled_agents = self.l3_plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [router['id']])
-
-        scheduled_azs = collections.defaultdict(int)
-        for agent in scheduled_agents:
-            scheduled_azs[agent['availability_zone']] += 1
-
-        for i in range(self.az_count):
-            self.assertEqual(self.expected_scheduled_agent_count[i],
-                             scheduled_azs.get('az%s' % i, 0))
-
-
-class L3AZAutoScheduleTestCaseBase(L3AZSchedulerBaseTest):
-
-    """Test various scenarios for AZ router scheduler.
-
-        az_count
-            Number of AZs.
-
-        router_az_hints
-            Number of AZs in availability_zone_hints of the router.
-
-        agent_az
-            AZ of newly activated l3 agent.
-
-        agent_count[each az]
-            Number of l3 agents (also number of hosts).
-
-        max_l3_agents_per_router
-            Maximum number of agents on which a router will be scheduled.
-            0 means test for regular router.
-
-        min_l3_agents_per_router
-            Minimum number of agents on which a router will be scheduled.
-            N/A for regular router test.
-
-        down_agent_count[each az]
-            Number of l3 agents which are down.
-
-        scheduled_agent_count[each az]
-            Number of l3 agents that have been previously scheduled
-
-        expected_scheduled_agent_count[each az]
-            Number of newly scheduled l3 agents
-    """
-
-    scenarios = [
-        ('Regular router, not scheduled, agent in specified AZ activated',
-         dict(az_count=2,
-              router_az_hints=1,
-              agent_az='az0',
-              agent_count=[1, 1],
-              max_l3_agents_per_router=0,
-              min_l3_agents_per_router=0,
-              down_agent_count=[1, 1],
-              scheduled_agent_count=[0, 0],
-              expected_scheduled_agent_count=[1, 0])),
-
-        ('Regular router, not scheduled, agent not in specified AZ activated',
-         dict(az_count=2,
-              router_az_hints=1,
-              agent_az='az1',
-              agent_count=[1, 1],
-              max_l3_agents_per_router=0,
-              min_l3_agents_per_router=0,
-              down_agent_count=[1, 1],
-              scheduled_agent_count=[0, 0],
-              expected_scheduled_agent_count=[0, 0])),
-
-        ('HA router, not scheduled, agent in specified AZ activated',
-         dict(az_count=3,
-              router_az_hints=2,
-              agent_az='az1',
-              agent_count=[1, 1, 1],
-              max_l3_agents_per_router=2,
-              min_l3_agents_per_router=2,
-              down_agent_count=[0, 1, 0],
-              scheduled_agent_count=[0, 0, 0],
-              expected_scheduled_agent_count=[0, 1, 0])),
-
-        ('HA router, not scheduled, agent not in specified AZ activated',
-         dict(az_count=3,
-              router_az_hints=2,
-              agent_az='az2',
-              agent_count=[1, 1, 1],
-              max_l3_agents_per_router=2,
-              min_l3_agents_per_router=2,
-              down_agent_count=[0, 0, 1],
-              scheduled_agent_count=[0, 0, 0],
-              expected_scheduled_agent_count=[0, 0, 0])),
-
-        ('HA router, partial scheduled, agent in specified AZ activated',
-         dict(az_count=3,
-              router_az_hints=2,
-              agent_az='az1',
-              agent_count=[1, 1, 1],
-              max_l3_agents_per_router=2,
-              min_l3_agents_per_router=2,
-              down_agent_count=[0, 1, 0],
-              scheduled_agent_count=[1, 0, 0],
-              expected_scheduled_agent_count=[1, 1, 0])),
-    ]
-
-    def test_auto_schedule_router(self):
-        scheduler = l3_agent_scheduler.AZLeastRoutersScheduler()
-        ha = False
-        if self.max_l3_agents_per_router:
-            self.config(max_l3_agents_per_router=self.max_l3_agents_per_router)
-            self.config(min_l3_agents_per_router=self.min_l3_agents_per_router)
-            ha = True
-
-        # create l3 agents
-        l3_agents = {}
-        for i in range(self.az_count):
-            az = 'az%s' % i
-            l3_agents[az] = self._create_legacy_agents(
-                self.agent_count[i], self.down_agent_count[i], az)
-
-        # create router.
-        # note that ha-router needs enough agents beforehand.
-        az_hints = ['az%s' % i for i in range(self.router_az_hints)]
-        router = self._create_router(az_hints, ha)
-
-        # schedule some agents before calling auto schedule
-        for i in range(self.az_count):
-            az = 'az%s' % i
-            for j in range(self.scheduled_agent_count[i]):
-                agent = l3_agents[az][j + self.down_agent_count[i]]
-                scheduler.bind_router(self.adminContext, router['id'], agent)
-
-        # activate down agent and call auto_schedule_routers
-        activate_agent = l3_agents[self.agent_az][0]
-        helpers.set_agent_admin_state(activate_agent['id'],
-                                      admin_state_up=True)
-
-        scheduler.auto_schedule_routers(self.l3_plugin, self.adminContext,
-                                        activate_agent['host'], None)
-
-        scheduled_agents = self.l3_plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [router['id']])
-
-        scheduled_azs = collections.defaultdict(int)
-        for agent in scheduled_agents:
-            scheduled_azs[agent['availability_zone']] += 1
-
-        for i in range(self.az_count):
-            self.assertEqual(self.expected_scheduled_agent_count[i],
-                             scheduled_azs.get('az%s' % i, 0))
diff --git a/neutron/tests/functional/services/__init__.py b/neutron/tests/functional/services/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/services/l3_router/__init__.py b/neutron/tests/functional/services/l3_router/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py b/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py
deleted file mode 100644 (file)
index 630b66b..0000000
+++ /dev/null
@@ -1,647 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import topics
-from neutron import context
-from neutron.extensions import external_net
-from neutron.extensions import portbindings
-from neutron.tests.common import helpers
-from neutron.tests.unit.plugins.ml2 import base as ml2_test_base
-
-
-DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
-
-
-class L3DvrTestCase(ml2_test_base.ML2TestFramework):
-    def setUp(self):
-        super(L3DvrTestCase, self).setUp()
-        self.l3_agent = helpers.register_l3_agent(
-            agent_mode=constants.L3_AGENT_MODE_DVR_SNAT)
-
-    def _create_router(self, distributed=True):
-        return (super(L3DvrTestCase, self).
-                _create_router(distributed=distributed))
-
-    def test_update_router_db_centralized_to_distributed(self):
-        router = self._create_router(distributed=False)
-        # router needs to be in admin state down in order to be upgraded to DVR
-        self.l3_plugin.update_router(
-            self.context, router['id'], {'router': {'admin_state_up': False}})
-        self.assertFalse(router['distributed'])
-        self.l3_plugin.update_router(
-            self.context, router['id'], {'router': {'distributed': True}})
-        router = self.l3_plugin.get_router(self.context, router['id'])
-        self.assertTrue(router['distributed'])
-
-    def test_get_device_owner_distributed_router_object(self):
-        router = self._create_router()
-        self.assertEqual(
-            constants.DEVICE_OWNER_DVR_INTERFACE,
-            self.l3_plugin._get_device_owner(self.context, router))
-
-    def test_get_device_owner_distributed_router_id(self):
-        router = self._create_router()
-        self.assertEqual(
-            constants.DEVICE_OWNER_DVR_INTERFACE,
-            self.l3_plugin._get_device_owner(self.context, router['id']))
-
-    def test_get_device_owner_centralized(self):
-        router = self._create_router(distributed=False)
-        self.assertEqual(
-            constants.DEVICE_OWNER_ROUTER_INTF,
-            self.l3_plugin._get_device_owner(self.context, router['id']))
-
-    def test_get_agent_gw_ports_exist_for_network_no_port(self):
-        self.assertIsNone(
-            self.l3_plugin._get_agent_gw_ports_exist_for_network(
-                self.context, 'network_id', 'host', 'agent_id'))
-
-    def _test_remove_router_interface_leaves_snat_intact(self, by_subnet):
-        with self.subnet() as subnet1, \
-                self.subnet(cidr='20.0.0.0/24') as subnet2:
-            kwargs = {'arg_list': (external_net.EXTERNAL,),
-                      external_net.EXTERNAL: True}
-            with self.network(**kwargs) as ext_net, \
-                    self.subnet(network=ext_net,
-                                cidr='30.0.0.0/24'):
-                router = self._create_router()
-                self.l3_plugin.add_router_interface(
-                    self.context, router['id'],
-                    {'subnet_id': subnet1['subnet']['id']})
-                self.l3_plugin.add_router_interface(
-                    self.context, router['id'],
-                    {'subnet_id': subnet2['subnet']['id']})
-                self.l3_plugin._update_router_gw_info(
-                    self.context, router['id'],
-                    {'network_id': ext_net['network']['id']})
-
-                snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces(
-                    self.context, [router['id']])
-                self.assertEqual(
-                    2, len(snat_router_intfs[router['id']]))
-
-                if by_subnet:
-                    self.l3_plugin.remove_router_interface(
-                        self.context, router['id'],
-                        {'subnet_id': subnet1['subnet']['id']})
-                else:
-                    port = self.core_plugin.get_ports(
-                        self.context, filters={
-                            'network_id': [subnet1['subnet']['network_id']],
-                            'device_owner':
-                                [constants.DEVICE_OWNER_DVR_INTERFACE]})[0]
-                    self.l3_plugin.remove_router_interface(
-                        self.context, router['id'],
-                        {'port_id': port['id']})
-
-                self.assertEqual(
-                    1, len(self.l3_plugin._get_snat_sync_interfaces(
-                        self.context, [router['id']])))
-
-    def test_remove_router_interface_by_subnet_leaves_snat_intact(self):
-        self._test_remove_router_interface_leaves_snat_intact(by_subnet=True)
-
-    def test_remove_router_interface_by_port_leaves_snat_intact(self):
-        self._test_remove_router_interface_leaves_snat_intact(
-            by_subnet=False)
-
-    def setup_create_agent_gw_port_for_network(self, network=None):
-        if not network:
-            network = self._make_network(self.fmt, '', True)
-        network_id = network['network']['id']
-        port = self.core_plugin.create_port(
-            self.context,
-            {'port': {'tenant_id': '',
-                      'network_id': network_id,
-                      'mac_address': attributes.ATTR_NOT_SPECIFIED,
-                      'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
-                      'device_id': self.l3_agent['id'],
-                      'device_owner': constants.DEVICE_OWNER_AGENT_GW,
-                      portbindings.HOST_ID: '',
-                      'admin_state_up': True,
-                      'name': ''}})
-        return network_id, port
-
-    def test_get_agent_gw_port_for_network(self):
-        network_id, port = (
-            self.setup_create_agent_gw_port_for_network())
-
-        self.assertEqual(
-            port['id'],
-            self.l3_plugin._get_agent_gw_ports_exist_for_network(
-                self.context, network_id, None, self.l3_agent['id'])['id'])
-
-    def test_delete_agent_gw_port_for_network(self):
-        network_id, port = (
-            self.setup_create_agent_gw_port_for_network())
-
-        self.l3_plugin.delete_floatingip_agent_gateway_port(
-            self.context, "", network_id)
-        self.assertIsNone(
-            self.l3_plugin._get_agent_gw_ports_exist_for_network(
-                self.context, network_id, "", self.l3_agent['id']))
-
-    def test_get_fip_sync_interfaces(self):
-        self.setup_create_agent_gw_port_for_network()
-
-        self.assertEqual(
-            1, len(self.l3_plugin._get_fip_sync_interfaces(
-                self.context, self.l3_agent['id'])))
-
-    def test_process_routers(self):
-        router = self._create_router()
-        result = self.l3_plugin._process_routers(self.context, [router])
-        self.assertEqual(
-            router['id'], result[router['id']]['id'])
-
-    def test_agent_gw_port_delete_when_last_gateway_for_ext_net_removed(self):
-        kwargs = {'arg_list': (external_net.EXTERNAL,),
-                  external_net.EXTERNAL: True}
-        net1 = self._make_network(self.fmt, 'net1', True)
-        net2 = self._make_network(self.fmt, 'net2', True)
-        subnet1 = self._make_subnet(
-            self.fmt, net1, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
-        subnet2 = self._make_subnet(
-            self.fmt, net2, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
-        ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
-        self._make_subnet(
-            self.fmt, ext_net, '20.0.0.1', '20.0.0.0/24', enable_dhcp=True)
-        # Create first router and add an interface
-        router1 = self._create_router()
-        ext_net_id = ext_net['network']['id']
-        self.l3_plugin.add_router_interface(
-            self.context, router1['id'],
-            {'subnet_id': subnet1['subnet']['id']})
-        # Set gateway to first router
-        self.l3_plugin._update_router_gw_info(
-            self.context, router1['id'],
-            {'network_id': ext_net_id})
-        # Create second router and add an interface
-        router2 = self._create_router()
-        self.l3_plugin.add_router_interface(
-            self.context, router2['id'],
-            {'subnet_id': subnet2['subnet']['id']})
-        # Set gateway to second router
-        self.l3_plugin._update_router_gw_info(
-            self.context, router2['id'],
-            {'network_id': ext_net_id})
-        # Create an agent gateway port for the external network
-        net_id, agent_gw_port = (
-            self.setup_create_agent_gw_port_for_network(network=ext_net))
-        # Check for agent gateway ports
-        self.assertIsNotNone(
-            self.l3_plugin._get_agent_gw_ports_exist_for_network(
-                self.context, ext_net_id, "", self.l3_agent['id']))
-        self.l3_plugin._update_router_gw_info(
-            self.context, router1['id'], {})
-        # Check for agent gateway port after deleting one of the gw
-        self.assertIsNotNone(
-            self.l3_plugin._get_agent_gw_ports_exist_for_network(
-                self.context, ext_net_id, "", self.l3_agent['id']))
-        self.l3_plugin._update_router_gw_info(
-            self.context, router2['id'], {})
-        # Check for agent gateway port after deleting last gw
-        self.assertIsNone(
-            self.l3_plugin._get_agent_gw_ports_exist_for_network(
-                self.context, ext_net_id, "", self.l3_agent['id']))
-
-    def _test_create_floating_ip_agent_notification(self, dvr=True):
-        with self.subnet() as ext_subnet,\
-                self.subnet(cidr='20.0.0.0/24') as int_subnet,\
-                self.port(subnet=int_subnet,
-                          device_owner=DEVICE_OWNER_COMPUTE) as int_port:
-            # make net external
-            ext_net_id = ext_subnet['subnet']['network_id']
-            self._update('networks', ext_net_id,
-                     {'network': {external_net.EXTERNAL: True}})
-
-            router = self._create_router(distributed=dvr)
-            self.l3_plugin.update_router(
-                self.context, router['id'],
-                {'router': {
-                    'external_gateway_info': {'network_id': ext_net_id}}})
-            self.l3_plugin.add_router_interface(
-                self.context, router['id'],
-                {'subnet_id': int_subnet['subnet']['id']})
-
-            floating_ip = {'floating_network_id': ext_net_id,
-                           'router_id': router['id'],
-                           'port_id': int_port['port']['id'],
-                           'tenant_id': int_port['port']['tenant_id']}
-            with mock.patch.object(
-                    self.l3_plugin, '_l3_rpc_notifier') as l3_notif:
-                self.l3_plugin.create_floatingip(
-                    self.context, {'floatingip': floating_ip})
-                if dvr:
-                    l3_notif.routers_updated_on_host.assert_called_once_with(
-                        self.context, [router['id']],
-                        int_port['port'][portbindings.HOST_ID])
-                    self.assertFalse(l3_notif.routers_updated.called)
-                else:
-                    l3_notif.routers_updated.assert_called_once_with(
-                        self.context, [router['id']], None)
-                    self.assertFalse(
-                        l3_notif.routers_updated_on_host.called)
-
-    def test_create_floating_ip_agent_notification(self):
-        self._test_create_floating_ip_agent_notification()
-
-    def test_create_floating_ip_agent_notification_non_dvr(self):
-        self._test_create_floating_ip_agent_notification(dvr=False)
-
-    def _test_update_floating_ip_agent_notification(self, dvr=True):
-        with self.subnet() as ext_subnet,\
-                self.subnet(cidr='20.0.0.0/24') as int_subnet1,\
-                self.subnet(cidr='30.0.0.0/24') as int_subnet2,\
-                self.port(subnet=int_subnet1,
-                          device_owner=DEVICE_OWNER_COMPUTE) as int_port1,\
-                self.port(subnet=int_subnet2,
-                          device_owner=DEVICE_OWNER_COMPUTE) as int_port2:
-            # locate internal ports on different hosts
-            self.core_plugin.update_port(
-                self.context, int_port1['port']['id'],
-                {'port': {portbindings.HOST_ID: 'host1'}})
-            self.core_plugin.update_port(
-                self.context, int_port2['port']['id'],
-                {'port': {portbindings.HOST_ID: 'host2'}})
-            # and create l3 agents on corresponding hosts
-            helpers.register_l3_agent(host='host1',
-                agent_mode=constants.L3_AGENT_MODE_DVR)
-            helpers.register_l3_agent(host='host2',
-                agent_mode=constants.L3_AGENT_MODE_DVR)
-
-            # make net external
-            ext_net_id = ext_subnet['subnet']['network_id']
-            self._update('networks', ext_net_id,
-                     {'network': {external_net.EXTERNAL: True}})
-
-            router1 = self._create_router(distributed=dvr)
-            router2 = self._create_router(distributed=dvr)
-            for router in (router1, router2):
-                self.l3_plugin.update_router(
-                    self.context, router['id'],
-                    {'router': {
-                        'external_gateway_info': {'network_id': ext_net_id}}})
-            self.l3_plugin.add_router_interface(
-                self.context, router1['id'],
-                {'subnet_id': int_subnet1['subnet']['id']})
-            self.l3_plugin.add_router_interface(
-                self.context, router2['id'],
-                {'subnet_id': int_subnet2['subnet']['id']})
-
-            floating_ip = {'floating_network_id': ext_net_id,
-                           'router_id': router1['id'],
-                           'port_id': int_port1['port']['id'],
-                           'tenant_id': int_port1['port']['tenant_id']}
-            floating_ip = self.l3_plugin.create_floatingip(
-                self.context, {'floatingip': floating_ip})
-
-            with mock.patch.object(
-                    self.l3_plugin, '_l3_rpc_notifier') as l3_notif:
-                updated_floating_ip = {'router_id': router2['id'],
-                                       'port_id': int_port2['port']['id']}
-                self.l3_plugin.update_floatingip(
-                    self.context, floating_ip['id'],
-                    {'floatingip': updated_floating_ip})
-                if dvr:
-                    self.assertEqual(
-                        2, l3_notif.routers_updated_on_host.call_count)
-                    expected_calls = [
-                        mock.call(self.context, [router1['id']], 'host1'),
-                        mock.call(self.context, [router2['id']], 'host2')]
-                    l3_notif.routers_updated_on_host.assert_has_calls(
-                        expected_calls)
-                    self.assertFalse(l3_notif.routers_updated.called)
-                else:
-                    self.assertEqual(
-                        2, l3_notif.routers_updated.call_count)
-                    expected_calls = [
-                        mock.call(self.context, [router1['id']], None),
-                        mock.call(self.context, [router2['id']], None)]
-                    l3_notif.routers_updated.assert_has_calls(
-                        expected_calls)
-                    self.assertFalse(l3_notif.routers_updated_on_host.called)
-
-    def test_update_floating_ip_agent_notification(self):
-        self._test_update_floating_ip_agent_notification()
-
-    def test_update_floating_ip_agent_notification_non_dvr(self):
-        self._test_update_floating_ip_agent_notification(dvr=False)
-
-    def _test_delete_floating_ip_agent_notification(self, dvr=True):
-        with self.subnet() as ext_subnet,\
-                self.subnet(cidr='20.0.0.0/24') as int_subnet,\
-                self.port(subnet=int_subnet,
-                          device_owner=DEVICE_OWNER_COMPUTE) as int_port:
-            # make net external
-            ext_net_id = ext_subnet['subnet']['network_id']
-            self._update('networks', ext_net_id,
-                     {'network': {external_net.EXTERNAL: True}})
-
-            router = self._create_router(distributed=dvr)
-            self.l3_plugin.update_router(
-                self.context, router['id'],
-                {'router': {
-                    'external_gateway_info': {'network_id': ext_net_id}}})
-            self.l3_plugin.add_router_interface(
-                self.context, router['id'],
-                {'subnet_id': int_subnet['subnet']['id']})
-
-            floating_ip = {'floating_network_id': ext_net_id,
-                           'router_id': router['id'],
-                           'port_id': int_port['port']['id'],
-                           'tenant_id': int_port['port']['tenant_id']}
-            floating_ip = self.l3_plugin.create_floatingip(
-                self.context, {'floatingip': floating_ip})
-            with mock.patch.object(
-                    self.l3_plugin, '_l3_rpc_notifier') as l3_notif:
-                self.l3_plugin.delete_floatingip(
-                    self.context, floating_ip['id'])
-                if dvr:
-                    l3_notif.routers_updated_on_host.assert_called_once_with(
-                        self.context, [router['id']],
-                        int_port['port'][portbindings.HOST_ID])
-                    self.assertFalse(l3_notif.routers_updated.called)
-                else:
-                    l3_notif.routers_updated.assert_called_once_with(
-                        self.context, [router['id']], None)
-                    self.assertFalse(
-                        l3_notif.routers_updated_on_host.called)
-
-    def test_delete_floating_ip_agent_notification(self):
-        self._test_delete_floating_ip_agent_notification()
-
-    def test_delete_floating_ip_agent_notification_non_dvr(self):
-        self._test_delete_floating_ip_agent_notification(dvr=False)
-
-    def test_router_with_ipv4_and_multiple_ipv6_on_same_network(self):
-        kwargs = {'arg_list': (external_net.EXTERNAL,),
-                  external_net.EXTERNAL: True}
-        ext_net = self._make_network(self.fmt, '', True, **kwargs)
-        self._make_subnet(
-            self.fmt, ext_net, '10.0.0.1', '10.0.0.0/24',
-            ip_version=4, enable_dhcp=True)
-        self._make_subnet(
-            self.fmt, ext_net, '2001:db8::1', '2001:db8::/64',
-            ip_version=6, enable_dhcp=True)
-        router1 = self._create_router()
-        self.l3_plugin._update_router_gw_info(
-            self.context, router1['id'],
-            {'network_id': ext_net['network']['id']})
-        snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces(
-            self.context, [router1['id']])
-        self.assertEqual(0, len(snat_router_intfs[router1['id']]))
-        private_net1 = self._make_network(self.fmt, 'net1', True)
-        private_ipv6_subnet1 = self._make_subnet(self.fmt,
-            private_net1, 'fd00::1',
-            cidr='fd00::1/64', ip_version=6,
-            ipv6_ra_mode='slaac',
-            ipv6_address_mode='slaac')
-        private_ipv6_subnet2 = self._make_subnet(self.fmt,
-            private_net1, 'fd01::1',
-            cidr='fd01::1/64', ip_version=6,
-            ipv6_ra_mode='slaac',
-            ipv6_address_mode='slaac')
-        # Add the first IPv6 subnet to the router
-        self.l3_plugin.add_router_interface(
-            self.context, router1['id'],
-            {'subnet_id': private_ipv6_subnet1['subnet']['id']})
-        # Check for the internal snat port interfaces
-        snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces(
-            self.context, [router1['id']])
-        self.assertEqual(1, len(snat_router_intfs[router1['id']]))
-        # Add the second IPv6 subnet to the router
-        self.l3_plugin.add_router_interface(
-            self.context, router1['id'],
-            {'subnet_id': private_ipv6_subnet2['subnet']['id']})
-        # Check for the internal snat port interfaces
-        snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces(
-            self.context, [router1['id']])
-        snat_intf_list = snat_router_intfs[router1['id']]
-        fixed_ips = snat_intf_list[0]['fixed_ips']
-        self.assertEqual(1, len(snat_router_intfs[router1['id']]))
-        self.assertEqual(2, len(fixed_ips))
-        # Now delete the router interface and it should update the
-        # SNAT port with the right fixed_ips instead of deleting it.
-        self.l3_plugin.remove_router_interface(
-            self.context, router1['id'],
-            {'subnet_id': private_ipv6_subnet2['subnet']['id']})
-        # Check for the internal snat port interfaces
-        snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces(
-            self.context, [router1['id']])
-        snat_intf_list = snat_router_intfs[router1['id']]
-        fixed_ips = snat_intf_list[0]['fixed_ips']
-        self.assertEqual(1, len(snat_router_intfs[router1['id']]))
-        self.assertEqual(1, len(fixed_ips))
-
-    def test_update_vm_port_host_router_update(self):
-        # register l3 agents in dvr mode in addition to existing dvr_snat agent
-        HOST1 = 'host1'
-        dvr_agent1 = helpers.register_l3_agent(
-            host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR)
-        HOST2 = 'host2'
-        dvr_agent2 = helpers.register_l3_agent(
-            host=HOST2, agent_mode=constants.L3_AGENT_MODE_DVR)
-        router = self._create_router()
-        with self.subnet() as subnet:
-            self.l3_plugin.add_router_interface(
-                self.context, router['id'],
-                {'subnet_id': subnet['subnet']['id']})
-
-            # since there are no vm ports on HOST, and the router
-            # has no external gateway at this point the router
-            # should neither be scheduled to dvr nor to dvr_snat agents
-            agents = self.l3_plugin.list_l3_agents_hosting_router(
-                self.context, router['id'])['agents']
-            self.assertEqual(0, len(agents))
-            with mock.patch.object(self.l3_plugin,
-                                   '_l3_rpc_notifier') as l3_notifier,\
-                    self.port(subnet=subnet,
-                              device_owner=DEVICE_OWNER_COMPUTE) as port:
-                self.l3_plugin.agent_notifiers[
-                    constants.AGENT_TYPE_L3] = l3_notifier
-                self.core_plugin.update_port(
-                    self.context, port['port']['id'],
-                    {'port': {portbindings.HOST_ID: HOST1}})
-
-                # now router should be scheduled to agent on HOST1
-                agents = self.l3_plugin.list_l3_agents_hosting_router(
-                    self.context, router['id'])['agents']
-                self.assertEqual(1, len(agents))
-                self.assertEqual(dvr_agent1['id'], agents[0]['id'])
-                # and notification should only be sent to the agent on HOST1
-                l3_notifier.routers_updated_on_host.assert_called_once_with(
-                    self.context, {router['id']}, HOST1)
-                self.assertFalse(l3_notifier.routers_updated.called)
-
-                # updating port's host (instance migration)
-                l3_notifier.reset_mock()
-                self.core_plugin.update_port(
-                    self.context, port['port']['id'],
-                    {'port': {portbindings.HOST_ID: HOST2}})
-                # now router should only be scheduled to dvr agent on host2
-                agents = self.l3_plugin.list_l3_agents_hosting_router(
-                    self.context, router['id'])['agents']
-                self.assertEqual(1, len(agents))
-                self.assertEqual(dvr_agent2['id'], agents[0]['id'])
-                l3_notifier.routers_updated_on_host.assert_called_once_with(
-                    self.context, {router['id']}, HOST2)
-                l3_notifier.router_removed_from_agent.assert_called_once_with(
-                    mock.ANY, router['id'], HOST1)
-
-    def _test_router_remove_from_agent_on_vm_port_deletion(
-            self, non_admin_port=False):
-        # register l3 agent in dvr mode in addition to existing dvr_snat agent
-        HOST = 'host1'
-        non_admin_tenant = 'tenant1'
-        dvr_agent = helpers.register_l3_agent(
-            host=HOST, agent_mode=constants.L3_AGENT_MODE_DVR)
-        router = self._create_router()
-        with self.network(shared=True) as net,\
-                self.subnet(network=net) as subnet,\
-                self.port(subnet=subnet,
-                          device_owner=DEVICE_OWNER_COMPUTE,
-                          tenant_id=non_admin_tenant,
-                          set_context=non_admin_port) as port:
-            self.core_plugin.update_port(
-                    self.context, port['port']['id'],
-                    {'port': {portbindings.HOST_ID: HOST}})
-            self.l3_plugin.add_router_interface(
-                self.context, router['id'],
-                {'subnet_id': subnet['subnet']['id']})
-
-            # router should be scheduled to agent on HOST
-            agents = self.l3_plugin.list_l3_agents_hosting_router(
-                self.context, router['id'])
-            self.assertEqual(1, len(agents['agents']))
-            self.assertEqual(dvr_agent['id'], agents['agents'][0]['id'])
-
-            notifier = self.l3_plugin.agent_notifiers[
-                constants.AGENT_TYPE_L3]
-            with mock.patch.object(
-                    notifier, 'router_removed_from_agent') as remove_mock:
-                ctx = context.Context(
-                    '', non_admin_tenant) if non_admin_port else self.context
-                self._delete('ports', port['port']['id'], neutron_context=ctx)
-                # now when port is deleted the router should be unscheduled
-                agents = self.l3_plugin.list_l3_agents_hosting_router(
-                    self.context, router['id'])
-                self.assertEqual(0, len(agents['agents']))
-                remove_mock.assert_called_once_with(
-                    mock.ANY, router['id'], HOST)
-
-    def test_router_remove_from_agent_on_vm_port_deletion(self):
-        self._test_router_remove_from_agent_on_vm_port_deletion()
-
-    def test_admin_router_remove_from_agent_on_vm_port_deletion(self):
-        self._test_router_remove_from_agent_on_vm_port_deletion(
-            non_admin_port=True)
-
-    def test_dvr_router_notifications(self):
-        """Check that notifications go to the right hosts in different
-        conditions
-        """
-        # register l3 agents in dvr mode in addition to existing dvr_snat agent
-        HOST1, HOST2, HOST3 = 'host1', 'host2', 'host3'
-        for host in [HOST1, HOST2, HOST3]:
-            helpers.register_l3_agent(
-                host=host, agent_mode=constants.L3_AGENT_MODE_DVR)
-
-        router = self._create_router()
-        arg_list = (portbindings.HOST_ID,)
-        with self.subnet() as ext_subnet,\
-                self.subnet(cidr='20.0.0.0/24') as subnet1,\
-                self.subnet(cidr='30.0.0.0/24') as subnet2,\
-                self.subnet(cidr='40.0.0.0/24') as subnet3,\
-                self.port(subnet=subnet1,
-                          device_owner=DEVICE_OWNER_COMPUTE,
-                          arg_list=arg_list,
-                          **{portbindings.HOST_ID: HOST1}),\
-                self.port(subnet=subnet2,
-                          device_owner=constants.DEVICE_OWNER_DHCP,
-                          arg_list=arg_list,
-                          **{portbindings.HOST_ID: HOST2}),\
-                self.port(subnet=subnet3,
-                          device_owner=constants.DEVICE_OWNER_NEUTRON_PREFIX,
-                          arg_list=arg_list,
-                          **{portbindings.HOST_ID: HOST3}):
-            # make net external
-            ext_net_id = ext_subnet['subnet']['network_id']
-            self._update('networks', ext_net_id,
-                     {'network': {external_net.EXTERNAL: True}})
-
-            with mock.patch.object(self.l3_plugin.l3_rpc_notifier.client,
-                                   'prepare') as mock_prepare:
-                # add external gateway to router
-                self.l3_plugin.update_router(
-                    self.context, router['id'],
-                    {'router': {
-                        'external_gateway_info': {'network_id': ext_net_id}}})
-                # router has no interfaces so notification goes
-                # to only dvr_snat agent
-                mock_prepare.assert_called_once_with(
-                    server=self.l3_agent['host'],
-                    topic=topics.L3_AGENT,
-                    version='1.1')
-
-                mock_prepare.reset_mock()
-                self.l3_plugin.add_router_interface(
-                    self.context, router['id'],
-                    {'subnet_id': subnet1['subnet']['id']})
-                self.assertEqual(2, mock_prepare.call_count)
-                expected = [mock.call(server=self.l3_agent['host'],
-                                      topic=topics.L3_AGENT,
-                                      version='1.1'),
-                            mock.call(server=HOST1,
-                                      topic=topics.L3_AGENT,
-                                      version='1.1')]
-                mock_prepare.assert_has_calls(expected, any_order=True)
-
-                mock_prepare.reset_mock()
-                self.l3_plugin.add_router_interface(
-                    self.context, router['id'],
-                    {'subnet_id': subnet2['subnet']['id']})
-                self.assertEqual(3, mock_prepare.call_count)
-                expected = [mock.call(server=self.l3_agent['host'],
-                                      topic=topics.L3_AGENT,
-                                      version='1.1'),
-                            mock.call(server=HOST1,
-                                      topic=topics.L3_AGENT,
-                                      version='1.1'),
-                            mock.call(server=HOST2,
-                                      topic=topics.L3_AGENT,
-                                      version='1.1')]
-                mock_prepare.assert_has_calls(expected, any_order=True)
-
-                mock_prepare.reset_mock()
-                self.l3_plugin.add_router_interface(
-                    self.context, router['id'],
-                    {'subnet_id': subnet3['subnet']['id']})
-                # there are no dvr serviceable ports on HOST3, so notification
-                # goes to the same hosts
-                self.assertEqual(3, mock_prepare.call_count)
-                expected = [mock.call(server=self.l3_agent['host'],
-                                      topic=topics.L3_AGENT,
-                                      version='1.1'),
-                            mock.call(server=HOST1,
-                                      topic=topics.L3_AGENT,
-                                      version='1.1'),
-                            mock.call(server=HOST2,
-                                      topic=topics.L3_AGENT,
-                                      version='1.1')]
-                mock_prepare.assert_has_calls(expected, any_order=True)
diff --git a/neutron/tests/functional/test_server.py b/neutron/tests/functional/test_server.py
deleted file mode 100644 (file)
index 6b88d3a..0000000
+++ /dev/null
@@ -1,290 +0,0 @@
-# Copyright 2015 Mirantis Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import httplib2
-import mock
-import os
-import signal
-import socket
-import time
-import traceback
-
-from oslo_config import cfg
-import psutil
-
-from neutron.agent.linux import utils
-from neutron import service
-from neutron.tests import base
-from neutron import worker
-from neutron import wsgi
-
-
-CONF = cfg.CONF
-
-# This message will be written to temporary file each time
-# start method is called.
-FAKE_START_MSG = "start".encode("utf-8")
-
-TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-
-
-class TestNeutronServer(base.BaseTestCase):
-    def setUp(self):
-        super(TestNeutronServer, self).setUp()
-        self.service_pid = None
-        self.workers = None
-        self.temp_file = self.get_temp_file_path("test_server.tmp")
-        self.health_checker = self._check_active
-        self.pipein, self.pipeout = os.pipe()
-        self.addCleanup(self._destroy_workers)
-
-    def _destroy_workers(self):
-        if self.service_pid:
-            # Make sure all processes are stopped
-            os.kill(self.service_pid, signal.SIGKILL)
-
-    def _start_server(self, callback, workers):
-        """Run a given service.
-
-        :param callback: callback that will start the required service
-        :param workers: number of service workers
-        :returns: list of spawned workers' pids
-        """
-
-        self.workers = workers
-
-        # Fork a new process in which server will be started
-        pid = os.fork()
-        if pid == 0:
-            status = 0
-            try:
-                callback(workers)
-            except SystemExit as exc:
-                status = exc.code
-            except BaseException:
-                traceback.print_exc()
-                status = 2
-
-            # Really exit
-            os._exit(status)
-
-        self.service_pid = pid
-
-        # If number of workers is 1 it is assumed that we run
-        # a service in the current process.
-        if self.workers > 1:
-            # Wait at most 10 seconds to spawn workers
-            condition = lambda: self.workers == len(self._get_workers())
-
-            utils.wait_until_true(
-                condition, timeout=10, sleep=0.1,
-                exception=RuntimeError(
-                    "Failed to start %d workers." % self.workers))
-
-            workers = self._get_workers()
-            self.assertEqual(len(workers), self.workers)
-            return workers
-
-        # Wait for a service to start.
-        utils.wait_until_true(self.health_checker, timeout=10, sleep=0.1,
-                              exception=RuntimeError(
-                                  "Failed to start service."))
-
-        return [self.service_pid]
-
-    def _get_workers(self):
-        """Get the list of processes in which WSGI server is running."""
-
-        def safe_ppid(proc):
-            try:
-                return proc.ppid
-            except psutil.NoSuchProcess:
-                return None
-
-        if self.workers > 1:
-            return [proc.pid for proc in psutil.process_iter()
-                    if safe_ppid(proc) == self.service_pid]
-        else:
-            return [proc.pid for proc in psutil.process_iter()
-                    if proc.pid == self.service_pid]
-
-    def _check_active(self):
-        """Dummy service activity check."""
-        time.sleep(5)
-        return True
-
-    def _fake_start(self):
-        with open(self.temp_file, 'a') as f:
-            f.write(FAKE_START_MSG)
-
-    def _test_restart_service_on_sighup(self, service, workers=1):
-        """Test that a service correctly (re)starts on receiving SIGHUP.
-
-        1. Start a service with a given number of workers.
-        2. Send SIGHUP to the service.
-        3. Wait for workers (if any) to (re)start.
-        """
-
-        self._start_server(callback=service, workers=workers)
-        os.kill(self.service_pid, signal.SIGHUP)
-
-        expected_msg = FAKE_START_MSG * workers * 2
-
-        # Wait for temp file to be created and its size reaching the expected
-        # value
-        expected_size = len(expected_msg)
-        condition = lambda: (os.path.isfile(self.temp_file)
-                             and os.stat(self.temp_file).st_size ==
-                             expected_size)
-
-        utils.wait_until_true(
-            condition, timeout=5, sleep=0.1,
-            exception=RuntimeError(
-                "Timed out waiting for file %(filename)s to be created and "
-                "its size become equal to %(size)s." %
-                {'filename': self.temp_file,
-                 'size': expected_size}))
-
-        # Verify that start has been called twice for each worker (one for
-        # initial start, and the second one on SIGHUP after children were
-        # terminated).
-        with open(self.temp_file, 'r') as f:
-            res = f.readline()
-            self.assertEqual(expected_msg, res)
-
-
-class TestWsgiServer(TestNeutronServer):
-    """Tests for neutron.wsgi.Server."""
-
-    def setUp(self):
-        super(TestWsgiServer, self).setUp()
-        self.health_checker = self._check_active
-        self.port = None
-
-    @staticmethod
-    def application(environ, start_response):
-        """A primitive test application."""
-
-        response_body = 'Response'
-        status = '200 OK'
-        response_headers = [('Content-Type', 'text/plain'),
-                            ('Content-Length', str(len(response_body)))]
-        start_response(status, response_headers)
-        return [response_body]
-
-    def _check_active(self):
-        """Check a wsgi service is active by making a GET request."""
-        port = int(os.read(self.pipein, 5))
-        conn = httplib2.HTTPConnectionWithTimeout("localhost", port)
-        try:
-            conn.request("GET", "/")
-            resp = conn.getresponse()
-            return resp.status == 200
-        except socket.error:
-            return False
-
-    def _run_wsgi(self, workers=1):
-        """Start WSGI server with a test application."""
-
-        # Mock start method to check that children are started again on
-        # receiving SIGHUP.
-        with mock.patch("neutron.wsgi.WorkerService.start") as start_method:
-            start_method.side_effect = self._fake_start
-
-            server = wsgi.Server("Test")
-            server.start(self.application, 0, "0.0.0.0",
-                         workers=workers)
-
-            # Memorize a port that was chosen for the service
-            self.port = server.port
-            os.write(self.pipeout, str(self.port))
-
-            server.wait()
-
-    def test_restart_wsgi_on_sighup_multiple_workers(self):
-        self._test_restart_service_on_sighup(service=self._run_wsgi,
-                                             workers=2)
-
-
-class TestRPCServer(TestNeutronServer):
-    """Tests for neutron RPC server."""
-
-    def setUp(self):
-        super(TestRPCServer, self).setUp()
-        self.setup_coreplugin(TARGET_PLUGIN)
-        self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
-        self.plugin = self._plugin_patcher.start()
-        self.plugin.return_value.rpc_workers_supported = True
-
-    def _serve_rpc(self, workers=1):
-        """Start RPC server with a given number of workers."""
-
-        # Mock start method to check that children are started again on
-        # receiving SIGHUP.
-        with mock.patch("neutron.service.RpcWorker.start") as start_method:
-            with mock.patch(
-                    "neutron.manager.NeutronManager.get_plugin"
-            ) as get_plugin:
-                start_method.side_effect = self._fake_start
-                get_plugin.return_value = self.plugin
-
-                CONF.set_override("rpc_workers", workers)
-                # not interested in state report workers specifically
-                CONF.set_override("rpc_state_report_workers", 0)
-
-                launcher = service.serve_rpc()
-                launcher.wait()
-
-    def test_restart_rpc_on_sighup_multiple_workers(self):
-        self._test_restart_service_on_sighup(service=self._serve_rpc,
-                                             workers=2)
-
-
-class TestPluginWorker(TestNeutronServer):
-    """Ensure that a plugin returning Workers spawns workers"""
-
-    def setUp(self):
-        super(TestPluginWorker, self).setUp()
-        self.setup_coreplugin(TARGET_PLUGIN)
-        self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
-        self.plugin = self._plugin_patcher.start()
-
-    def _start_plugin(self, workers=1):
-        with mock.patch('neutron.manager.NeutronManager.get_plugin') as gp:
-            gp.return_value = self.plugin
-            launchers = service.start_plugin_workers()
-            for launcher in launchers:
-                launcher.wait()
-
-    def test_start(self):
-        class FakeWorker(worker.NeutronWorker):
-            def start(self):
-                pass
-
-            def wait(self):
-                pass
-
-            def stop(self):
-                pass
-
-            def reset(self):
-                pass
-
-        # Make both ABC happy and ensure 'self' is correct
-        FakeWorker.start = self._fake_start
-        workers = [FakeWorker()]
-        self.plugin.return_value.get_workers.return_value = workers
-        self._test_restart_service_on_sighup(service=self._start_plugin,
-                                             workers=len(workers))
diff --git a/neutron/tests/functional/test_service.py b/neutron/tests/functional/test_service.py
deleted file mode 100644 (file)
index 9206aef..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_concurrency import processutils
-from oslo_config import cfg
-from oslo_service import service
-
-from neutron import service as neutron_service
-from neutron.tests import base
-from neutron.tests.functional import test_server
-
-
-class TestService(base.BaseTestCase):
-
-    def test_api_workers_default(self):
-        self.assertEqual(processutils.get_worker_count(),
-                         neutron_service._get_api_workers())
-
-    def test_api_workers_from_config(self):
-        cfg.CONF.set_override('api_workers', 1234)
-        self.assertEqual(1234,
-                         neutron_service._get_api_workers())
-
-
-class TestServiceRestart(test_server.TestNeutronServer):
-
-    def _start_service(self, host, binary, topic, manager, workers,
-                       *args, **kwargs):
-        server = neutron_service.Service(host, binary, topic, manager,
-                                         *args, **kwargs)
-        service.launch(cfg.CONF, server, workers).wait()
diff --git a/neutron/tests/post_mortem_debug.py b/neutron/tests/post_mortem_debug.py
deleted file mode 100644 (file)
index cecf44f..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import functools
-import traceback
-
-
-def get_exception_handler(debugger_name):
-    debugger = _get_debugger(debugger_name)
-    return functools.partial(_exception_handler, debugger)
-
-
-def _get_debugger(debugger_name):
-    try:
-        debugger = __import__(debugger_name)
-    except ImportError:
-        raise ValueError("can't import %s module as a post mortem debugger" %
-                         debugger_name)
-    if 'post_mortem' in dir(debugger):
-        return debugger
-    else:
-        raise ValueError("%s is not a supported post mortem debugger" %
-                         debugger_name)
-
-
-def _exception_handler(debugger, exc_info):
-    """Exception handler enabling post-mortem debugging.
-
-    A class extending testtools.TestCase can add this handler in setUp():
-
-        self.addOnException(post_mortem_debug.exception_handler)
-
-    When an exception occurs, the user will be dropped into a debugger
-    session in the execution environment of the failure.
-
-    Frames associated with the testing framework are excluded so that
-    the post-mortem session for an assertion failure will start at the
-    assertion call (e.g. self.assertTrue) rather than the framework code
-    that raises the failure exception (e.g. the assertTrue method).
-    """
-    tb = exc_info[2]
-    ignored_traceback = get_ignored_traceback(tb)
-    if ignored_traceback:
-        tb = FilteredTraceback(tb, ignored_traceback)
-    traceback.print_exception(exc_info[0], exc_info[1], tb)
-    debugger.post_mortem(tb)
-
-
-def get_ignored_traceback(tb):
-    """Retrieve the first traceback of an ignored trailing chain.
-
-    Given an initial traceback, find the first traceback of a trailing
-    chain of tracebacks that should be ignored.  The criteria for
-    whether a traceback should be ignored is whether its frame's
-    globals include the __unittest marker variable. This criteria is
-    culled from:
-
-        unittest.TestResult._is_relevant_tb_level
-
-    For example:
-
-       tb.tb_next => tb0.tb_next => tb1.tb_next
-
-    - If no tracebacks were to be ignored, None would be returned.
-    - If only tb1 was to be ignored, tb1 would be returned.
-    - If tb0 and tb1 were to be ignored, tb0 would be returned.
-    - If either of only tb or only tb0 was to be ignored, None would
-      be returned because neither tb or tb0 would be part of a
-      trailing chain of ignored tracebacks.
-    """
-    # Turn the traceback chain into a list
-    tb_list = []
-    while tb:
-        tb_list.append(tb)
-        tb = tb.tb_next
-
-    # Find all members of an ignored trailing chain
-    ignored_tracebacks = []
-    for tb in reversed(tb_list):
-        if '__unittest' in tb.tb_frame.f_globals:
-            ignored_tracebacks.append(tb)
-        else:
-            break
-
-    # Return the first member of the ignored trailing chain
-    if ignored_tracebacks:
-        return ignored_tracebacks[-1]
-
-
-class FilteredTraceback(object):
-    """Wraps a traceback to filter unwanted frames."""
-
-    def __init__(self, tb, filtered_traceback):
-        """Constructor.
-
-        :param tb: The start of the traceback chain to filter.
-        :param filtered_traceback: The first traceback of a trailing
-               chain that is to be filtered.
-        """
-        self._tb = tb
-        self.tb_lasti = self._tb.tb_lasti
-        self.tb_lineno = self._tb.tb_lineno
-        self.tb_frame = self._tb.tb_frame
-        self._filtered_traceback = filtered_traceback
-
-    @property
-    def tb_next(self):
-        tb_next = self._tb.tb_next
-        if tb_next and tb_next != self._filtered_traceback:
-            return FilteredTraceback(tb_next, self._filtered_traceback)
diff --git a/neutron/tests/retargetable/__init__.py b/neutron/tests/retargetable/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/retargetable/base.py b/neutron/tests/retargetable/base.py
deleted file mode 100644 (file)
index 6b5c5f0..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you
-# may not use this file except in compliance with the License. You may
-# obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied. See the License for the specific language governing
-# permissions and limitations under the License.
-
-"""
-This module defines a base test case that uses testscenarios to
-parametize the test methods of subclasses by varying the client
-fixture used to target the Neutron API.
-
-PluginClientFixture targets the Neutron API directly via the plugin
-api, and will be executed by default.  testscenarios will ensure that
-each test is run against all plugins defined in plugin_configurations.
-
-RestClientFixture targets a deployed Neutron daemon, and will be used
-instead of PluginClientFixture only if OS_TEST_API_WITH_REST is set to 1.
-
-Reference: https://pypi.python.org/pypi/testscenarios/
-"""
-
-import testscenarios
-
-from neutron.tests import base as tests_base
-from neutron.tests.retargetable import client_fixtures
-from neutron.tests.unit.plugins.ml2 import test_plugin
-
-
-# Each plugin must add a class to plugin_configurations that can configure the
-# plugin for use with PluginClient.  For a given plugin, the setup
-# used for NeutronDbPluginV2TestCase can usually be reused.  See the
-# configuration classes listed below for examples of this reuse.
-
-# TODO(marun) Discover plugin conf via a metaclass
-plugin_configurations = [
-    test_plugin.Ml2ConfFixture(),
-]
-
-
-def rest_enabled():
-    return tests_base.bool_from_env('OS_TEST_API_WITH_REST')
-
-
-def get_plugin_scenarios():
-    scenarios = []
-    for conf in plugin_configurations:
-        name = conf.plugin_name
-        class_name = name.rsplit('.', 1)[-1]
-        client = client_fixtures.PluginClientFixture(conf)
-        scenarios.append((class_name, {'client': client}))
-    return scenarios
-
-
-def get_scenarios():
-    if rest_enabled():
-        # FIXME(marun) Remove local import once tempest config is safe
-        # to import alongside neutron config
-        from neutron.tests.retargetable import rest_fixture
-        return [('tempest', {'client': rest_fixture.RestClientFixture()})]
-    else:
-        return get_plugin_scenarios()
-
-
-class RetargetableApiTest(testscenarios.WithScenarios,
-                          tests_base.BaseTestCase):
-
-    scenarios = get_scenarios()
-
-    def setUp(self):
-        super(RetargetableApiTest, self).setUp()
-        if rest_enabled():
-            raise self.skipException(
-                'Tempest fixture requirements prevent this test from running')
-        self.useFixture(self.client)
diff --git a/neutron/tests/retargetable/client_fixtures.py b/neutron/tests/retargetable/client_fixtures.py
deleted file mode 100644 (file)
index 4c4e9a2..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you
-# may not use this file except in compliance with the License. You may
-# obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied. See the License for the specific language governing
-# permissions and limitations under the License.
-
-"""
-This module defines client fixtures that can be used to target the
-Neutron API via different methods.
-"""
-
-import abc
-
-import fixtures
-import six
-
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron import manager
-from neutron.tests import base
-from neutron.tests.unit import testlib_api
-
-
-@six.add_metaclass(abc.ABCMeta)
-class AbstractClientFixture(fixtures.Fixture):
-    """
-    Base class for a client that can interact the neutron api in some
-    manner.
-    """
-
-    @abc.abstractproperty
-    def NotFound(self):
-        """The exception that indicates a resource could not be found.
-
-        Tests can use this property to assert for a missing resource
-        in a client-agnostic way.
-        """
-
-    @abc.abstractmethod
-    def create_network(self, **kwargs):
-        pass
-
-    @abc.abstractmethod
-    def update_network(self, id_, **kwargs):
-        pass
-
-    @abc.abstractmethod
-    def get_network(self, id_, fields=None):
-        pass
-
-    @abc.abstractmethod
-    def get_networks(self, filters=None, fields=None,
-                     sorts=None, limit=None, marker=None, page_reverse=False):
-        pass
-
-    @abc.abstractmethod
-    def delete_network(self, id_):
-        pass
-
-
-class PluginClientFixture(AbstractClientFixture):
-    """Targets the Neutron API via the plugin API"""
-
-    def __init__(self, plugin_conf):
-        super(PluginClientFixture, self).__init__()
-        self.plugin_conf = plugin_conf
-
-    def _setUp(self):
-        super(PluginClientFixture, self)._setUp()
-        self.useFixture(testlib_api.SqlFixture())
-        self.useFixture(self.plugin_conf)
-        self.useFixture(base.PluginFixture(self.plugin_conf.plugin_name))
-
-    @property
-    def ctx(self):
-        if not hasattr(self, '_ctx'):
-            self._ctx = context.Context('', 'test-tenant')
-        return self._ctx
-
-    @property
-    def plugin(self):
-        return manager.NeutronManager.get_plugin()
-
-    @property
-    def NotFound(self):
-        return n_exc.NetworkNotFound
-
-    def create_network(self, **kwargs):
-        # Supply defaults that are expected to be set by the api
-        # framework
-        kwargs.setdefault('admin_state_up', True)
-        kwargs.setdefault('shared', False)
-        kwargs.setdefault('tenant_id', self.ctx.tenant_id)
-        data = dict(network=kwargs)
-        result = self.plugin.create_network(self.ctx, data)
-        return base.AttributeDict(result)
-
-    def update_network(self, id_, **kwargs):
-        data = dict(network=kwargs)
-        result = self.plugin.update_network(self.ctx, id_, data)
-        return base.AttributeDict(result)
-
-    def get_network(self, *args, **kwargs):
-        result = self.plugin.get_network(self.ctx, *args, **kwargs)
-        return base.AttributeDict(result)
-
-    def get_networks(self, *args, **kwargs):
-        result = self.plugin.get_networks(self.ctx, *args, **kwargs)
-        return [base.AttributeDict(x) for x in result]
-
-    def delete_network(self, id_):
-        self.plugin.delete_network(self.ctx, id_)
diff --git a/neutron/tests/retargetable/rest_fixture.py b/neutron/tests/retargetable/rest_fixture.py
deleted file mode 100644 (file)
index 9255459..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you
-# may not use this file except in compliance with the License. You may
-# obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied. See the License for the specific language governing
-# permissions and limitations under the License.
-
-"""
-This module defines a client fixture that can be used to target a
-deployed neutron daemon.  The potential for conflict between Tempest
-configuration and Neutron configuration requires that
-neutron.tests.tempest imports be isolated in this module for now.
-"""
-
-from tempest_lib import exceptions as tlib_exceptions
-
-from neutron.tests import base
-from neutron.tests.retargetable import client_fixtures
-from neutron.tests.tempest import test as t_test
-
-
-class RestClientFixture(client_fixtures.AbstractClientFixture):
-    """Targets the Neutron API via REST."""
-
-    @property
-    def client(self):
-        if not hasattr(self, '_client'):
-            manager = t_test.BaseTestCase.get_client_manager()
-            self._client = manager.network_client
-        return self._client
-
-    @property
-    def NotFound(self):
-        return tlib_exceptions.NotFound
-
-    def _cleanup_network(self, id_):
-        try:
-            self.delete_network(id_)
-        except self.NotFound:
-            pass
-
-    def create_network(self, **kwargs):
-        network = self._create_network(**kwargs)
-        self.addCleanup(self._cleanup_network, network.id)
-        return network
-
-    def _create_network(self, **kwargs):
-        # Internal method - use create_network() instead
-        body = self.client.create_network(**kwargs)
-        return base.AttributeDict(body['network'])
-
-    def update_network(self, id_, **kwargs):
-        body = self.client.update_network(id_, **kwargs)
-        return base.AttributeDict(body['network'])
-
-    def get_network(self, id_, **kwargs):
-        body = self.client.show_network(id_, **kwargs)
-        return base.AttributeDict(body['network'])
-
-    def get_networks(self, **kwargs):
-        body = self.client.list_networks(**kwargs)
-        return [base.AttributeDict(x) for x in body['networks']]
-
-    def delete_network(self, id_):
-        self.client.delete_network(id_)
diff --git a/neutron/tests/retargetable/test_example.py b/neutron/tests/retargetable/test_example.py
deleted file mode 100644 (file)
index 02e7cb6..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you
-# may not use this file except in compliance with the License. You may
-# obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied. See the License for the specific language governing
-# permissions and limitations under the License.
-
-import testtools
-
-from neutron.tests import base as tests_base
-from neutron.tests.retargetable import base
-
-
-class TestExample(base.RetargetableApiTest):
-    """This class is an example of how to write a retargetable api test.
-
-    See the parent class for details about how the 'client' attribute
-    is configured via testscenarios.
-    """
-
-    def test_network_lifecycle(self):
-        net = self.client.create_network(name=tests_base.get_rand_name())
-        listed_networks = {x.id: x.name for x in self.client.get_networks()}
-        self.assertIn(net.id, listed_networks)
-        self.assertEqual(listed_networks[net.id], net.name,
-                         'Listed network name is not as expected.')
-        updated_name = 'new %s' % net.name
-        updated_net = self.client.update_network(net.id, name=updated_name)
-        self.assertEqual(updated_name, updated_net.name,
-                         'Updated network name is not as expected.')
-        self.client.delete_network(net.id)
-        with testtools.ExpectedException(self.client.NotFound,
-                                         msg='Network was not deleted'):
-            self.client.get_network(net.id)
diff --git a/neutron/tests/tempest/README.rst b/neutron/tests/tempest/README.rst
deleted file mode 100644 (file)
index 5b3600a..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-WARNING
-=======
-
-The files under this path were copied from tempest as part of the move
-of the api tests, and they will be removed as the required
-functionality is transitioned from tempest to tempest-lib.  While it
-exists, only neutron.tests.api and neutron.tests.retargetable should
-be importing files from this path.  neutron.tests.tempest.config uses
-the global cfg.CONF instance and importing it outside of the api tests
-has the potential to break Neutron's use of cfg.CONF.
diff --git a/neutron/tests/tempest/__init__.py b/neutron/tests/tempest/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/tempest/auth.py b/neutron/tests/tempest/auth.py
deleted file mode 100644 (file)
index a9fdb03..0000000
+++ /dev/null
@@ -1,655 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import copy
-import datetime
-import exceptions
-import re
-import urlparse
-
-import six
-
-from tempest_lib.services.identity.v2 import token_client as json_v2id
-from tempest_lib.services.identity.v3 import token_client as json_v3id
-
-
-@six.add_metaclass(abc.ABCMeta)
-class AuthProvider(object):
-    """
-    Provide authentication
-    """
-
-    def __init__(self, credentials):
-        """
-        :param credentials: credentials for authentication
-        """
-        if self.check_credentials(credentials):
-            self.credentials = credentials
-        else:
-            raise TypeError("Invalid credentials")
-        self.cache = None
-        self.alt_auth_data = None
-        self.alt_part = None
-
-    def __str__(self):
-        return "Creds :{creds}, cached auth data: {cache}".format(
-            creds=self.credentials, cache=self.cache)
-
-    @abc.abstractmethod
-    def _decorate_request(self, filters, method, url, headers=None, body=None,
-                          auth_data=None):
-        """
-        Decorate request with authentication data
-        """
-        return
-
-    @abc.abstractmethod
-    def _get_auth(self):
-        return
-
-    @abc.abstractmethod
-    def _fill_credentials(self, auth_data_body):
-        return
-
-    def fill_credentials(self):
-        """
-        Fill credentials object with data from auth
-        """
-        auth_data = self.get_auth()
-        self._fill_credentials(auth_data[1])
-        return self.credentials
-
-    @classmethod
-    def check_credentials(cls, credentials):
-        """
-        Verify credentials are valid.
-        """
-        return isinstance(credentials, Credentials) and credentials.is_valid()
-
-    @property
-    def auth_data(self):
-        return self.get_auth()
-
-    @auth_data.deleter
-    def auth_data(self):
-        self.clear_auth()
-
-    def get_auth(self):
-        """
-        Returns auth from cache if available, else auth first
-        """
-        if self.cache is None or self.is_expired(self.cache):
-            self.set_auth()
-        return self.cache
-
-    def set_auth(self):
-        """
-        Forces setting auth, ignores cache if it exists.
-        Refills credentials
-        """
-        self.cache = self._get_auth()
-        self._fill_credentials(self.cache[1])
-
-    def clear_auth(self):
-        """
-        Can be called to clear the access cache so that next request
-        will fetch a new token and base_url.
-        """
-        self.cache = None
-        self.credentials.reset()
-
-    @abc.abstractmethod
-    def is_expired(self, auth_data):
-        return
-
-    def auth_request(self, method, url, headers=None, body=None, filters=None):
-        """
-        Obtains auth data and decorates a request with that.
-        :param method: HTTP method of the request
-        :param url: relative URL of the request (path)
-        :param headers: HTTP headers of the request
-        :param body: HTTP body in case of POST / PUT
-        :param filters: select a base URL out of the catalog
-        :returns a Tuple (url, headers, body)
-        """
-        orig_req = dict(url=url, headers=headers, body=body)
-
-        auth_url, auth_headers, auth_body = self._decorate_request(
-            filters, method, url, headers, body)
-        auth_req = dict(url=auth_url, headers=auth_headers, body=auth_body)
-
-        # Overwrite part if the request if it has been requested
-        if self.alt_part is not None:
-            if self.alt_auth_data is not None:
-                alt_url, alt_headers, alt_body = self._decorate_request(
-                    filters, method, url, headers, body,
-                    auth_data=self.alt_auth_data)
-                alt_auth_req = dict(url=alt_url, headers=alt_headers,
-                                    body=alt_body)
-                auth_req[self.alt_part] = alt_auth_req[self.alt_part]
-
-            else:
-                # If alt auth data is None, skip auth in the requested part
-                auth_req[self.alt_part] = orig_req[self.alt_part]
-
-            # Next auth request will be normal, unless otherwise requested
-            self.reset_alt_auth_data()
-
-        return auth_req['url'], auth_req['headers'], auth_req['body']
-
-    def reset_alt_auth_data(self):
-        """
-        Configure auth provider to provide valid authentication data
-        """
-        self.alt_part = None
-        self.alt_auth_data = None
-
-    def set_alt_auth_data(self, request_part, auth_data):
-        """
-        Configure auth provider to provide alt authentication data
-        on a part of the *next* auth_request. If credentials are None,
-        set invalid data.
-        :param request_part: request part to contain invalid auth: url,
-                             headers, body
-        :param auth_data: alternative auth_data from which to get the
-                          invalid data to be injected
-        """
-        self.alt_part = request_part
-        self.alt_auth_data = auth_data
-
-    @abc.abstractmethod
-    def base_url(self, filters, auth_data=None):
-        """
-        Extracts the base_url based on provided filters
-        """
-        return
-
-
-class KeystoneAuthProvider(AuthProvider):
-
-    token_expiry_threshold = datetime.timedelta(seconds=60)
-
-    def __init__(self, credentials, auth_url,
-                 disable_ssl_certificate_validation=None,
-                 ca_certs=None, trace_requests=None):
-        super(KeystoneAuthProvider, self).__init__(credentials)
-        self.dsvm = disable_ssl_certificate_validation
-        self.ca_certs = ca_certs
-        self.trace_requests = trace_requests
-        self.auth_client = self._auth_client(auth_url)
-
-    def _decorate_request(self, filters, method, url, headers=None, body=None,
-                          auth_data=None):
-        if auth_data is None:
-            auth_data = self.auth_data
-        token, _ = auth_data
-        base_url = self.base_url(filters=filters, auth_data=auth_data)
-        # build authenticated request
-        # returns new request, it does not touch the original values
-        _headers = copy.deepcopy(headers) if headers is not None else {}
-        _headers['X-Auth-Token'] = str(token)
-        if url is None or url == "":
-            _url = base_url
-        else:
-            # Join base URL and url, and remove multiple contiguous slashes
-            _url = "/".join([base_url, url])
-            parts = [x for x in urlparse.urlparse(_url)]
-            parts[2] = re.sub("/{2,}", "/", parts[2])
-            _url = urlparse.urlunparse(parts)
-        # no change to method or body
-        return str(_url), _headers, body
-
-    @abc.abstractmethod
-    def _auth_client(self):
-        return
-
-    @abc.abstractmethod
-    def _auth_params(self):
-        return
-
-    def _get_auth(self):
-        # Bypasses the cache
-        auth_func = getattr(self.auth_client, 'get_token')
-        auth_params = self._auth_params()
-
-        # returns token, auth_data
-        token, auth_data = auth_func(**auth_params)
-        return token, auth_data
-
-    def get_token(self):
-        return self.auth_data[0]
-
-
-class KeystoneV2AuthProvider(KeystoneAuthProvider):
-
-    EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
-
-    def _auth_client(self, auth_url):
-        return json_v2id.TokenClientJSON(
-            auth_url, disable_ssl_certificate_validation=self.dsvm,
-            ca_certs=self.ca_certs, trace_requests=self.trace_requests)
-
-    def _auth_params(self):
-        return dict(
-            user=self.credentials.username,
-            password=self.credentials.password,
-            tenant=self.credentials.tenant_name,
-            auth_data=True)
-
-    def _fill_credentials(self, auth_data_body):
-        tenant = auth_data_body['token']['tenant']
-        user = auth_data_body['user']
-        if self.credentials.tenant_name is None:
-            self.credentials.tenant_name = tenant['name']
-        if self.credentials.tenant_id is None:
-            self.credentials.tenant_id = tenant['id']
-        if self.credentials.username is None:
-            self.credentials.username = user['name']
-        if self.credentials.user_id is None:
-            self.credentials.user_id = user['id']
-
-    def base_url(self, filters, auth_data=None):
-        """
-        Filters can be:
-        - service: compute, image, etc
-        - region: the service region
-        - endpoint_type: adminURL, publicURL, internalURL
-        - api_version: replace catalog version with this
-        - skip_path: take just the base URL
-        """
-        if auth_data is None:
-            auth_data = self.auth_data
-        token, _auth_data = auth_data
-        service = filters.get('service')
-        region = filters.get('region')
-        endpoint_type = filters.get('endpoint_type', 'publicURL')
-
-        if service is None:
-            raise exceptions.EndpointNotFound("No service provided")
-
-        _base_url = None
-        for ep in _auth_data['serviceCatalog']:
-            if ep["type"] == service:
-                for _ep in ep['endpoints']:
-                    if region is not None and _ep['region'] == region:
-                        _base_url = _ep.get(endpoint_type)
-                if not _base_url:
-                    # No region matching, use the first
-                    _base_url = ep['endpoints'][0].get(endpoint_type)
-                break
-        if _base_url is None:
-            raise exceptions.EndpointNotFound(service)
-
-        parts = urlparse.urlparse(_base_url)
-        if filters.get('api_version', None) is not None:
-            path = "/" + filters['api_version']
-            noversion_path = "/".join(parts.path.split("/")[2:])
-            if noversion_path != "":
-                path += "/" + noversion_path
-            _base_url = _base_url.replace(parts.path, path)
-        if filters.get('skip_path', None) is not None and parts.path != '':
-            _base_url = _base_url.replace(parts.path, "/")
-
-        return _base_url
-
-    def is_expired(self, auth_data):
-        _, access = auth_data
-        expiry = datetime.datetime.strptime(access['token']['expires'],
-                                            self.EXPIRY_DATE_FORMAT)
-        return expiry - self.token_expiry_threshold <= \
-            datetime.datetime.utcnow()
-
-
-class KeystoneV3AuthProvider(KeystoneAuthProvider):
-
-    EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
-
-    def _auth_client(self, auth_url):
-        return json_v3id.V3TokenClientJSON(
-            auth_url, disable_ssl_certificate_validation=self.dsvm,
-            ca_certs=self.ca_certs, trace_requests=self.trace_requests)
-
-    def _auth_params(self):
-        return dict(
-            user_id=self.credentials.user_id,
-            username=self.credentials.username,
-            password=self.credentials.password,
-            project_id=self.credentials.project_id,
-            project_name=self.credentials.project_name,
-            user_domain_id=self.credentials.user_domain_id,
-            user_domain_name=self.credentials.user_domain_name,
-            project_domain_id=self.credentials.project_domain_id,
-            project_domain_name=self.credentials.project_domain_name,
-            domain_id=self.credentials.domain_id,
-            domain_name=self.credentials.domain_name,
-            auth_data=True)
-
-    def _fill_credentials(self, auth_data_body):
-        # project or domain, depending on the scope
-        project = auth_data_body.get('project', None)
-        domain = auth_data_body.get('domain', None)
-        # user is always there
-        user = auth_data_body['user']
-        # Set project fields
-        if project is not None:
-            if self.credentials.project_name is None:
-                self.credentials.project_name = project['name']
-            if self.credentials.project_id is None:
-                self.credentials.project_id = project['id']
-            if self.credentials.project_domain_id is None:
-                self.credentials.project_domain_id = project['domain']['id']
-            if self.credentials.project_domain_name is None:
-                self.credentials.project_domain_name = \
-                    project['domain']['name']
-        # Set domain fields
-        if domain is not None:
-            if self.credentials.domain_id is None:
-                self.credentials.domain_id = domain['id']
-            if self.credentials.domain_name is None:
-                self.credentials.domain_name = domain['name']
-        # Set user fields
-        if self.credentials.username is None:
-            self.credentials.username = user['name']
-        if self.credentials.user_id is None:
-            self.credentials.user_id = user['id']
-        if self.credentials.user_domain_id is None:
-            self.credentials.user_domain_id = user['domain']['id']
-        if self.credentials.user_domain_name is None:
-            self.credentials.user_domain_name = user['domain']['name']
-
-    def base_url(self, filters, auth_data=None):
-        """
-        Filters can be:
-        - service: compute, image, etc
-        - region: the service region
-        - endpoint_type: adminURL, publicURL, internalURL
-        - api_version: replace catalog version with this
-        - skip_path: take just the base URL
-        """
-        if auth_data is None:
-            auth_data = self.auth_data
-        token, _auth_data = auth_data
-        service = filters.get('service')
-        region = filters.get('region')
-        endpoint_type = filters.get('endpoint_type', 'public')
-
-        if service is None:
-            raise exceptions.EndpointNotFound("No service provided")
-
-        if 'URL' in endpoint_type:
-            endpoint_type = endpoint_type.replace('URL', '')
-        _base_url = None
-        catalog = _auth_data['catalog']
-        # Select entries with matching service type
-        service_catalog = [ep for ep in catalog if ep['type'] == service]
-        if len(service_catalog) > 0:
-            service_catalog = service_catalog[0]['endpoints']
-        else:
-            # No matching service
-            raise exceptions.EndpointNotFound(service)
-        # Filter by endpoint type (interface)
-        filtered_catalog = [ep for ep in service_catalog if
-                            ep['interface'] == endpoint_type]
-        if len(filtered_catalog) == 0:
-            # No matching type, keep all and try matching by region at least
-            filtered_catalog = service_catalog
-        # Filter by region
-        filtered_catalog = [ep for ep in filtered_catalog if
-                            ep['region'] == region]
-        if len(filtered_catalog) == 0:
-            # No matching region, take the first endpoint
-            filtered_catalog = [service_catalog[0]]
-        # There should be only one match. If not take the first.
-        _base_url = filtered_catalog[0].get('url', None)
-        if _base_url is None:
-                raise exceptions.EndpointNotFound(service)
-
-        parts = urlparse.urlparse(_base_url)
-        if filters.get('api_version', None) is not None:
-            path = "/" + filters['api_version']
-            noversion_path = "/".join(parts.path.split("/")[2:])
-            if noversion_path != "":
-                path += "/" + noversion_path
-            _base_url = _base_url.replace(parts.path, path)
-        if filters.get('skip_path', None) is not None:
-            _base_url = _base_url.replace(parts.path, "/")
-
-        return _base_url
-
-    def is_expired(self, auth_data):
-        _, access = auth_data
-        expiry = datetime.datetime.strptime(access['expires_at'],
-                                            self.EXPIRY_DATE_FORMAT)
-        return expiry - self.token_expiry_threshold <= \
-            datetime.datetime.utcnow()
-
-
-def is_identity_version_supported(identity_version):
-    return identity_version in IDENTITY_VERSION
-
-
-def get_credentials(auth_url, fill_in=True, identity_version='v2',
-                    disable_ssl_certificate_validation=None, ca_certs=None,
-                    trace_requests=None, **kwargs):
-    """
-    Builds a credentials object based on the configured auth_version
-
-    :param auth_url (string): Full URI of the OpenStack Identity API(Keystone)
-           which is used to fetch the token from Identity service.
-    :param fill_in (boolean): obtain a token and fill in all credential
-           details provided by the identity service. When fill_in is not
-           specified, credentials are not validated. Validation can be invoked
-           by invoking ``is_valid()``
-    :param identity_version (string): identity API version is used to
-           select the matching auth provider and credentials class
-    :param disable_ssl_certificate_validation: whether to enforce SSL
-           certificate validation in SSL API requests to the auth system
-    :param ca_certs: CA certificate bundle for validation of certificates
-           in SSL API requests to the auth system
-    :param trace_requests: trace in log API requests to the auth system
-    :param kwargs (dict): Dict of credential key/value pairs
-
-    Examples:
-
-        Returns credentials from the provided parameters:
-        >>> get_credentials(username='foo', password='bar')
-
-        Returns credentials including IDs:
-        >>> get_credentials(username='foo', password='bar', fill_in=True)
-    """
-    if not is_identity_version_supported(identity_version):
-        raise exceptions.InvalidIdentityVersion(
-            identity_version=identity_version)
-
-    credential_class, auth_provider_class = IDENTITY_VERSION.get(
-        identity_version)
-
-    creds = credential_class(**kwargs)
-    # Fill in the credentials fields that were not specified
-    if fill_in:
-        dsvm = disable_ssl_certificate_validation
-        auth_provider = auth_provider_class(
-            creds, auth_url, disable_ssl_certificate_validation=dsvm,
-            ca_certs=ca_certs, trace_requests=trace_requests)
-        creds = auth_provider.fill_credentials()
-    return creds
-
-
-class Credentials(object):
-    """
-    Set of credentials for accessing OpenStack services
-
-    ATTRIBUTES: list of valid class attributes representing credentials.
-    """
-
-    ATTRIBUTES = []
-
-    def __init__(self, **kwargs):
-        """
-        Enforce the available attributes at init time (only).
-        Additional attributes can still be set afterwards if tests need
-        to do so.
-        """
-        self._initial = kwargs
-        self._apply_credentials(kwargs)
-
-    def _apply_credentials(self, attr):
-        for key in attr.keys():
-            if key in self.ATTRIBUTES:
-                setattr(self, key, attr[key])
-            else:
-                raise exceptions.InvalidCredentials
-
-    def __str__(self):
-        """
-        Represent only attributes included in self.ATTRIBUTES
-        """
-        _repr = dict((k, getattr(self, k)) for k in self.ATTRIBUTES)
-        return str(_repr)
-
-    def __eq__(self, other):
-        """
-        Credentials are equal if attributes in self.ATTRIBUTES are equal
-        """
-        return str(self) == str(other)
-
-    def __getattr__(self, key):
-        # If an attribute is set, __getattr__ is not invoked
-        # If an attribute is not set, and it is a known one, return None
-        if key in self.ATTRIBUTES:
-            return None
-        else:
-            raise AttributeError
-
-    def __delitem__(self, key):
-        # For backwards compatibility, support dict behaviour
-        if key in self.ATTRIBUTES:
-            delattr(self, key)
-        else:
-            raise AttributeError
-
-    def get(self, item, default):
-        # In this patch act as dict for backward compatibility
-        try:
-            return getattr(self, item)
-        except AttributeError:
-            return default
-
-    def get_init_attributes(self):
-        return self._initial.keys()
-
-    def is_valid(self):
-        raise NotImplementedError
-
-    def reset(self):
-        # First delete all known attributes
-        for key in self.ATTRIBUTES:
-            if getattr(self, key) is not None:
-                delattr(self, key)
-        # Then re-apply initial setup
-        self._apply_credentials(self._initial)
-
-
-class KeystoneV2Credentials(Credentials):
-
-    ATTRIBUTES = ['username', 'password', 'tenant_name', 'user_id',
-                  'tenant_id']
-
-    def is_valid(self):
-        """
-        Minimum set of valid credentials, are username and password.
-        Tenant is optional.
-        """
-        return None not in (self.username, self.password)
-
-
-class KeystoneV3Credentials(Credentials):
-    """
-    Credentials suitable for the Keystone Identity V3 API
-    """
-
-    ATTRIBUTES = ['domain_id', 'domain_name', 'password', 'username',
-                  'project_domain_id', 'project_domain_name', 'project_id',
-                  'project_name', 'tenant_id', 'tenant_name', 'user_domain_id',
-                  'user_domain_name', 'user_id']
-
-    def __setattr__(self, key, value):
-        parent = super(KeystoneV3Credentials, self)
-        # for tenant_* set both project and tenant
-        if key == 'tenant_id':
-            parent.__setattr__('project_id', value)
-        elif key == 'tenant_name':
-            parent.__setattr__('project_name', value)
-        # for project_* set both project and tenant
-        if key == 'project_id':
-            parent.__setattr__('tenant_id', value)
-        elif key == 'project_name':
-            parent.__setattr__('tenant_name', value)
-        # for *_domain_* set both user and project if not set yet
-        if key == 'user_domain_id':
-            if self.project_domain_id is None:
-                parent.__setattr__('project_domain_id', value)
-        if key == 'project_domain_id':
-            if self.user_domain_id is None:
-                parent.__setattr__('user_domain_id', value)
-        if key == 'user_domain_name':
-            if self.project_domain_name is None:
-                parent.__setattr__('project_domain_name', value)
-        if key == 'project_domain_name':
-            if self.user_domain_name is None:
-                parent.__setattr__('user_domain_name', value)
-        # support domain_name coming from config
-        if key == 'domain_name':
-            parent.__setattr__('user_domain_name', value)
-            parent.__setattr__('project_domain_name', value)
-        # finally trigger default behaviour for all attributes
-        parent.__setattr__(key, value)
-
-    def is_valid(self):
-        """
-        Valid combinations of v3 credentials (excluding token, scope)
-        - User id, password (optional domain)
-        - User name, password and its domain id/name
-        For the scope, valid combinations are:
-        - None
-        - Project id (optional domain)
-        - Project name and its domain id/name
-        - Domain id
-        - Domain name
-        """
-        valid_user_domain = any(
-            [self.user_domain_id is not None,
-             self.user_domain_name is not None])
-        valid_project_domain = any(
-            [self.project_domain_id is not None,
-             self.project_domain_name is not None])
-        valid_user = any(
-            [self.user_id is not None,
-             self.username is not None and valid_user_domain])
-        valid_project_scope = any(
-            [self.project_name is None and self.project_id is None,
-             self.project_id is not None,
-             self.project_name is not None and valid_project_domain])
-        valid_domain_scope = any(
-            [self.domain_id is None and self.domain_name is None,
-             self.domain_id or self.domain_name])
-        return all([self.password is not None,
-                    valid_user,
-                    valid_project_scope and valid_domain_scope])
-
-
-IDENTITY_VERSION = {'v2': (KeystoneV2Credentials, KeystoneV2AuthProvider),
-                    'v3': (KeystoneV3Credentials, KeystoneV3AuthProvider)}
diff --git a/neutron/tests/tempest/common/__init__.py b/neutron/tests/tempest/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/tempest/common/accounts.py b/neutron/tests/tempest/common/accounts.py
deleted file mode 100644 (file)
index 6440739..0000000
+++ /dev/null
@@ -1,357 +0,0 @@
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import hashlib
-import os
-
-from oslo_concurrency import lockutils
-from oslo_log import log as logging
-import yaml
-
-from neutron.tests.tempest.common import cred_provider
-from neutron.tests.tempest import config
-from neutron.tests.tempest import exceptions
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-def read_accounts_yaml(path):
-    with open(path, 'r') as yaml_file:
-        accounts = yaml.load(yaml_file)
-    return accounts
-
-
-class Accounts(cred_provider.CredentialProvider):
-
-    def __init__(self, name):
-        super(Accounts, self).__init__(name)
-        self.name = name
-        if os.path.isfile(CONF.auth.test_accounts_file):
-            accounts = read_accounts_yaml(CONF.auth.test_accounts_file)
-            self.use_default_creds = False
-        else:
-            accounts = {}
-            self.use_default_creds = True
-        self.hash_dict = self.get_hash_dict(accounts)
-        # FIXME(dhellmann): The configuration option is not part of
-        # the API of the library, because if we change the option name
-        # or group it will break this use. Tempest needs to set this
-        # value somewhere that it owns, and then use
-        # lockutils.set_defaults() to tell oslo.concurrency what value
-        # to use.
-        self.accounts_dir = os.path.join(CONF.oslo_concurrency.lock_path,
-                                         'test_accounts')
-        self.isolated_creds = {}
-
-    @classmethod
-    def _append_role(cls, role, account_hash, hash_dict):
-        if role in hash_dict['roles']:
-            hash_dict['roles'][role].append(account_hash)
-        else:
-            hash_dict['roles'][role] = [account_hash]
-        return hash_dict
-
-    @classmethod
-    def get_hash_dict(cls, accounts):
-        hash_dict = {'roles': {}, 'creds': {}}
-        # Loop over the accounts read from the yaml file
-        for account in accounts:
-            roles = []
-            types = []
-            if 'roles' in account:
-                roles = account.pop('roles')
-            if 'types' in account:
-                types = account.pop('types')
-            temp_hash = hashlib.md5()
-            temp_hash.update(str(account))
-            temp_hash_key = temp_hash.hexdigest()
-            hash_dict['creds'][temp_hash_key] = account
-            for role in roles:
-                hash_dict = cls._append_role(role, temp_hash_key,
-                                             hash_dict)
-            # If types are set for the account append the matching role
-            # subdict with the hash
-            for type in types:
-                if type == 'admin':
-                    hash_dict = cls._append_role(CONF.identity.admin_role,
-                                                 temp_hash_key, hash_dict)
-                elif type == 'operator':
-                    hash_dict = cls._append_role(
-                        CONF.object_storage.operator_role, temp_hash_key,
-                        hash_dict)
-                elif type == 'reseller_admin':
-                    hash_dict = cls._append_role(
-                        CONF.object_storage.reseller_admin_role,
-                        temp_hash_key,
-                        hash_dict)
-        return hash_dict
-
-    def is_multi_user(self):
-        # Default credentials is not a valid option with locking Account
-        if self.use_default_creds:
-            raise exceptions.InvalidConfiguration(
-                "Account file %s doesn't exist" % CONF.auth.test_accounts_file)
-        else:
-            return len(self.hash_dict['creds']) > 1
-
-    def is_multi_tenant(self):
-        return self.is_multi_user()
-
-    def _create_hash_file(self, hash_string):
-        path = os.path.join(os.path.join(self.accounts_dir, hash_string))
-        if not os.path.isfile(path):
-            with open(path, 'w') as fd:
-                fd.write(self.name)
-            return True
-        return False
-
-    @lockutils.synchronized('test_accounts_io', external=True)
-    def _get_free_hash(self, hashes):
-        # Cast as a list because in some edge cases a set will be passed in
-        hashes = list(hashes)
-        if not os.path.isdir(self.accounts_dir):
-            os.mkdir(self.accounts_dir)
-            # Create File from first hash (since none are in use)
-            self._create_hash_file(hashes[0])
-            return hashes[0]
-        names = []
-        for _hash in hashes:
-            res = self._create_hash_file(_hash)
-            if res:
-                return _hash
-            else:
-                path = os.path.join(os.path.join(self.accounts_dir,
-                                                 _hash))
-                with open(path, 'r') as fd:
-                    names.append(fd.read())
-        msg = ('Insufficient number of users provided. %s have allocated all '
-               'the credentials for this allocation request' % ','.join(names))
-        raise exceptions.InvalidConfiguration(msg)
-
-    def _get_match_hash_list(self, roles=None):
-        hashes = []
-        if roles:
-            # Loop over all the creds for each role in the subdict and generate
-            # a list of cred lists for each role
-            for role in roles:
-                temp_hashes = self.hash_dict['roles'].get(role, None)
-                if not temp_hashes:
-                    raise exceptions.InvalidConfiguration(
-                        "No credentials with role: %s specified in the "
-                        "accounts ""file" % role)
-                hashes.append(temp_hashes)
-            # Take the list of lists and do a boolean and between each list to
-            # find the creds which fall under all the specified roles
-            temp_list = set(hashes[0])
-            for hash_list in hashes[1:]:
-                temp_list = temp_list & set(hash_list)
-            hashes = temp_list
-        else:
-            hashes = self.hash_dict['creds'].keys()
-        # NOTE(mtreinish): admin is a special case because of the increased
-        # privlege set which could potentially cause issues on tests where that
-        # is not expected. So unless the admin role isn't specified do not
-        # allocate admin.
-        admin_hashes = self.hash_dict['roles'].get(CONF.identity.admin_role,
-                                                   None)
-        if ((not roles or CONF.identity.admin_role not in roles) and
-                admin_hashes):
-            useable_hashes = [x for x in hashes if x not in admin_hashes]
-        else:
-            useable_hashes = hashes
-        return useable_hashes
-
-    def _get_creds(self, roles=None):
-        if self.use_default_creds:
-            raise exceptions.InvalidConfiguration(
-                "Account file %s doesn't exist" % CONF.auth.test_accounts_file)
-        useable_hashes = self._get_match_hash_list(roles)
-        free_hash = self._get_free_hash(useable_hashes)
-        return self.hash_dict['creds'][free_hash]
-
-    @lockutils.synchronized('test_accounts_io', external=True)
-    def remove_hash(self, hash_string):
-        hash_path = os.path.join(self.accounts_dir, hash_string)
-        if not os.path.isfile(hash_path):
-            LOG.warning('Expected an account lock file %s to remove, but '
-                        'one did not exist' % hash_path)
-        else:
-            os.remove(hash_path)
-            if not os.listdir(self.accounts_dir):
-                os.rmdir(self.accounts_dir)
-
-    def get_hash(self, creds):
-        for _hash in self.hash_dict['creds']:
-            # Comparing on the attributes that are expected in the YAML
-            if all([getattr(creds, k) == self.hash_dict['creds'][_hash][k] for
-                   k in creds.get_init_attributes()]):
-                return _hash
-        raise AttributeError('Invalid credentials %s' % creds)
-
-    def remove_credentials(self, creds):
-        _hash = self.get_hash(creds)
-        self.remove_hash(_hash)
-
-    def get_primary_creds(self):
-        if self.isolated_creds.get('primary'):
-            return self.isolated_creds.get('primary')
-        creds = self._get_creds()
-        primary_credential = cred_provider.get_credentials(**creds)
-        self.isolated_creds['primary'] = primary_credential
-        return primary_credential
-
-    def get_alt_creds(self):
-        if self.isolated_creds.get('alt'):
-            return self.isolated_creds.get('alt')
-        creds = self._get_creds()
-        alt_credential = cred_provider.get_credentials(**creds)
-        self.isolated_creds['alt'] = alt_credential
-        return alt_credential
-
-    def get_creds_by_roles(self, roles, force_new=False):
-        roles = list(set(roles))
-        exist_creds = self.isolated_creds.get(str(roles), None)
-        # The force kwarg is used to allocate an additional set of creds with
-        # the same role list. The index used for the previously allocation
-        # in the isolated_creds dict will be moved.
-        if exist_creds and not force_new:
-            return exist_creds
-        elif exist_creds and force_new:
-            new_index = str(roles) + '-' + str(len(self.isolated_creds))
-            self.isolated_creds[new_index] = exist_creds
-        creds = self._get_creds(roles=roles)
-        role_credential = cred_provider.get_credentials(**creds)
-        self.isolated_creds[str(roles)] = role_credential
-        return role_credential
-
-    def clear_isolated_creds(self):
-        for creds in self.isolated_creds.values():
-            self.remove_credentials(creds)
-
-    def get_admin_creds(self):
-        return self.get_creds_by_roles([CONF.identity.admin_role])
-
-    def is_role_available(self, role):
-        if self.use_default_creds:
-            return False
-        else:
-            if self.hash_dict['roles'].get(role):
-                return True
-            return False
-
-    def admin_available(self):
-        return self.is_role_available(CONF.identity.admin_role)
-
-
-class NotLockingAccounts(Accounts):
-    """Credentials provider which always returns the first and second
-    configured accounts as primary and alt users.
-    This credential provider can be used in case of serial test execution
-    to preserve the current behaviour of the serial tempest run.
-    """
-
-    def _unique_creds(self, cred_arg=None):
-        """Verify that the configured credentials are valid and distinct """
-        if self.use_default_creds:
-            try:
-                user = self.get_primary_creds()
-                alt_user = self.get_alt_creds()
-                return getattr(user, cred_arg) != getattr(alt_user, cred_arg)
-            except exceptions.InvalidCredentials as ic:
-                msg = "At least one of the configured credentials is " \
-                      "not valid: %s" % ic
-                raise exceptions.InvalidConfiguration(msg)
-        else:
-            # TODO(andreaf) Add a uniqueness check here
-            return len(self.hash_dict['creds']) > 1
-
-    def is_multi_user(self):
-        return self._unique_creds('username')
-
-    def is_multi_tenant(self):
-        return self._unique_creds('tenant_id')
-
-    def get_creds(self, id, roles=None):
-        try:
-            hashes = self._get_match_hash_list(roles)
-            # No need to sort the dict as within the same python process
-            # the HASH seed won't change, so subsequent calls to keys()
-            # will return the same result
-            _hash = hashes[id]
-        except IndexError:
-            msg = 'Insufficient number of users provided'
-            raise exceptions.InvalidConfiguration(msg)
-        return self.hash_dict['creds'][_hash]
-
-    def get_primary_creds(self):
-        if self.isolated_creds.get('primary'):
-            return self.isolated_creds.get('primary')
-        if not self.use_default_creds:
-            creds = self.get_creds(0)
-            primary_credential = cred_provider.get_credentials(**creds)
-        else:
-            primary_credential = cred_provider.get_configured_credentials(
-                'user')
-        self.isolated_creds['primary'] = primary_credential
-        return primary_credential
-
-    def get_alt_creds(self):
-        if self.isolated_creds.get('alt'):
-            return self.isolated_creds.get('alt')
-        if not self.use_default_creds:
-            creds = self.get_creds(1)
-            alt_credential = cred_provider.get_credentials(**creds)
-        else:
-            alt_credential = cred_provider.get_configured_credentials(
-                'alt_user')
-        self.isolated_creds['alt'] = alt_credential
-        return alt_credential
-
-    def clear_isolated_creds(self):
-        self.isolated_creds = {}
-
-    def get_admin_creds(self):
-        if not self.use_default_creds:
-            return self.get_creds_by_roles([CONF.identity.admin_role])
-        else:
-            creds = cred_provider.get_configured_credentials(
-                "identity_admin", fill_in=False)
-            self.isolated_creds['admin'] = creds
-            return creds
-
-    def get_creds_by_roles(self, roles, force_new=False):
-        roles = list(set(roles))
-        exist_creds = self.isolated_creds.get(str(roles), None)
-        index = 0
-        if exist_creds and not force_new:
-            return exist_creds
-        elif exist_creds and force_new:
-            new_index = str(roles) + '-' + str(len(self.isolated_creds))
-            self.isolated_creds[new_index] = exist_creds
-            # Figure out how many existing creds for this roles set are present
-            # use this as the index the returning hash list to ensure separate
-            # creds are returned with force_new being True
-            for creds_names in self.isolated_creds:
-                if str(roles) in creds_names:
-                    index = index + 1
-        if not self.use_default_creds:
-            creds = self.get_creds(index, roles=roles)
-            role_credential = cred_provider.get_credentials(**creds)
-            self.isolated_creds[str(roles)] = role_credential
-        else:
-            msg = "Default credentials can not be used with specifying "\
-                  "credentials by roles"
-            raise exceptions.InvalidConfiguration(msg)
-        return role_credential
diff --git a/neutron/tests/tempest/common/commands.py b/neutron/tests/tempest/common/commands.py
deleted file mode 100644 (file)
index 392c9d0..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import shlex
-import subprocess
-
-from oslo_log import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def copy_file_to_host(file_from, dest, host, username, pkey):
-    dest = "%s@%s:%s" % (username, host, dest)
-    cmd = "scp -v -o UserKnownHostsFile=/dev/null " \
-          "-o StrictHostKeyChecking=no " \
-          "-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey,
-                                              'file1': file_from,
-                                              'dest': dest}
-    args = shlex.split(cmd.encode('utf-8'))
-    subprocess_args = {'stdout': subprocess.PIPE,
-                       'stderr': subprocess.STDOUT}
-    proc = subprocess.Popen(args, **subprocess_args)
-    stdout, stderr = proc.communicate()
-    if proc.returncode != 0:
-        LOG.error(("Command {0} returned with exit status {1},"
-                  "output {2}, error {3}").format(cmd, proc.returncode,
-                                                  stdout, stderr))
-    return stdout
diff --git a/neutron/tests/tempest/common/cred_provider.py b/neutron/tests/tempest/common/cred_provider.py
deleted file mode 100644 (file)
index b90d09d..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-# Copyright (c) 2014 Deutsche Telekom AG
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-import abc
-
-import six
-
-from neutron.tests.tempest import auth
-from neutron.tests.tempest import config
-from neutron.tests.tempest import exceptions
-
-CONF = config.CONF
-
-# Type of credentials available from configuration
-CREDENTIAL_TYPES = {
-    'identity_admin': ('identity', 'admin'),
-    'user': ('identity', None),
-    'alt_user': ('identity', 'alt')
-}
-
-DEFAULT_PARAMS = {
-    'disable_ssl_certificate_validation':
-        CONF.identity.disable_ssl_certificate_validation,
-    'ca_certs': CONF.identity.ca_certificates_file,
-    'trace_requests': CONF.debug.trace_requests
-}
-
-
-# Read credentials from configuration, builds a Credentials object
-# based on the specified or configured version
-def get_configured_credentials(credential_type, fill_in=True,
-                               identity_version=None):
-    identity_version = identity_version or CONF.identity.auth_version
-    if identity_version not in ('v2', 'v3'):
-        raise exceptions.InvalidConfiguration(
-            'Unsupported auth version: %s' % identity_version)
-    if credential_type not in CREDENTIAL_TYPES:
-        raise exceptions.InvalidCredentials()
-    conf_attributes = ['username', 'password', 'tenant_name']
-    if identity_version == 'v3':
-        conf_attributes.append('domain_name')
-    # Read the parts of credentials from config
-    params = DEFAULT_PARAMS.copy()
-    section, prefix = CREDENTIAL_TYPES[credential_type]
-    for attr in conf_attributes:
-        _section = getattr(CONF, section)
-        if prefix is None:
-            params[attr] = getattr(_section, attr)
-        else:
-            params[attr] = getattr(_section, prefix + "_" + attr)
-    # Build and validate credentials. We are reading configured credentials,
-    # so validate them even if fill_in is False
-    credentials = get_credentials(fill_in=fill_in, **params)
-    if not fill_in:
-        if not credentials.is_valid():
-            msg = ("The %s credentials are incorrectly set in the config file."
-                   " Double check that all required values are assigned" %
-                   credential_type)
-            raise exceptions.InvalidConfiguration(msg)
-    return credentials
-
-
-# Wrapper around auth.get_credentials to use the configured identity version
-# is none is specified
-def get_credentials(fill_in=True, identity_version=None, **kwargs):
-    params = dict(DEFAULT_PARAMS, **kwargs)
-    identity_version = identity_version or CONF.identity.auth_version
-    # In case of "v3" add the domain from config if not specified
-    if identity_version == 'v3':
-        domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
-                            if 'domain' in x)
-        if not domain_fields.intersection(kwargs.keys()):
-            kwargs['user_domain_name'] = CONF.identity.admin_domain_name
-        auth_url = CONF.identity.uri_v3
-    else:
-        auth_url = CONF.identity.uri
-    return auth.get_credentials(auth_url,
-                                fill_in=fill_in,
-                                identity_version=identity_version,
-                                **params)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class CredentialProvider(object):
-    def __init__(self, name, password='pass', network_resources=None):
-        self.name = name
-
-    @abc.abstractmethod
-    def get_primary_creds(self):
-        return
-
-    @abc.abstractmethod
-    def get_admin_creds(self):
-        return
-
-    @abc.abstractmethod
-    def get_alt_creds(self):
-        return
-
-    @abc.abstractmethod
-    def clear_isolated_creds(self):
-        return
-
-    @abc.abstractmethod
-    def is_multi_user(self):
-        return
-
-    @abc.abstractmethod
-    def is_multi_tenant(self):
-        return
-
-    @abc.abstractmethod
-    def get_creds_by_roles(self, roles, force_new=False):
-        return
-
-    @abc.abstractmethod
-    def is_role_available(self, role):
-        return
diff --git a/neutron/tests/tempest/common/credentials.py b/neutron/tests/tempest/common/credentials.py
deleted file mode 100644 (file)
index 9dfdff0..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-import os
-
-from neutron.tests.tempest.common import accounts
-from neutron.tests.tempest.common import cred_provider
-from neutron.tests.tempest.common import isolated_creds
-from neutron.tests.tempest import config
-from neutron.tests.tempest import exceptions
-
-CONF = config.CONF
-
-
-# Return the right implementation of CredentialProvider based on config
-# Dropping interface and password, as they are never used anyways
-# TODO(andreaf) Drop them from the CredentialsProvider interface completely
-def get_isolated_credentials(name, network_resources=None,
-                             force_tenant_isolation=False):
-    # If a test requires a new account to work, it can have it via forcing
-    # tenant isolation. A new account will be produced only for that test.
-    # In case admin credentials are not available for the account creation,
-    # the test should be skipped else it would fail.
-    if CONF.auth.allow_tenant_isolation or force_tenant_isolation:
-        return isolated_creds.IsolatedCreds(
-            name=name,
-            network_resources=network_resources)
-    else:
-        if CONF.auth.locking_credentials_provider:
-            # Most params are not relevant for pre-created accounts
-            return accounts.Accounts(name=name)
-        else:
-            return accounts.NotLockingAccounts(name=name)
-
-
-# We want a helper function here to check and see if admin credentials
-# are available so we can do a single call from skip_checks if admin
-# creds are available.
-def is_admin_available():
-    is_admin = True
-    # If tenant isolation is enabled admin will be available
-    if CONF.auth.allow_tenant_isolation:
-        return is_admin
-    # Check whether test accounts file has the admin specified or not
-    elif os.path.isfile(CONF.auth.test_accounts_file):
-        check_accounts = accounts.Accounts(name='check_admin')
-        if not check_accounts.admin_available():
-            is_admin = False
-    else:
-        try:
-            cred_provider.get_configured_credentials('identity_admin',
-                                                     fill_in=False)
-        except exceptions.InvalidConfiguration:
-            is_admin = False
-    return is_admin
diff --git a/neutron/tests/tempest/common/custom_matchers.py b/neutron/tests/tempest/common/custom_matchers.py
deleted file mode 100644 (file)
index 839088c..0000000
+++ /dev/null
@@ -1,227 +0,0 @@
-# Copyright 2013 NTT Corporation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-
-import six
-from testtools import helpers
-
-
-class ExistsAllResponseHeaders(object):
-    """
-    Specific matcher to check the existence of Swift's response headers
-
-    This matcher checks the existence of common headers for each HTTP method
-    or the target, which means account, container or object.
-    When checking the existence of 'specific' headers such as
-    X-Account-Meta-* or X-Object-Manifest for example, those headers must be
-    checked in each test code.
-    """
-
-    def __init__(self, target, method):
-        """
-        param: target Account/Container/Object
-        param: method PUT/GET/HEAD/DELETE/COPY/POST
-        """
-        self.target = target
-        self.method = method
-
-    def match(self, actual):
-        """
-        param: actual HTTP response headers
-        """
-        # Check common headers for all HTTP methods
-        if 'content-length' not in actual:
-            return NonExistentHeader('content-length')
-        if 'content-type' not in actual:
-            return NonExistentHeader('content-type')
-        if 'x-trans-id' not in actual:
-            return NonExistentHeader('x-trans-id')
-        if 'date' not in actual:
-            return NonExistentHeader('date')
-
-        # Check headers for a specific method or target
-        if self.method == 'GET' or self.method == 'HEAD':
-            if 'x-timestamp' not in actual:
-                return NonExistentHeader('x-timestamp')
-            if 'accept-ranges' not in actual:
-                return NonExistentHeader('accept-ranges')
-            if self.target == 'Account':
-                if 'x-account-bytes-used' not in actual:
-                    return NonExistentHeader('x-account-bytes-used')
-                if 'x-account-container-count' not in actual:
-                    return NonExistentHeader('x-account-container-count')
-                if 'x-account-object-count' not in actual:
-                    return NonExistentHeader('x-account-object-count')
-            elif self.target == 'Container':
-                if 'x-container-bytes-used' not in actual:
-                    return NonExistentHeader('x-container-bytes-used')
-                if 'x-container-object-count' not in actual:
-                    return NonExistentHeader('x-container-object-count')
-            elif self.target == 'Object':
-                if 'etag' not in actual:
-                    return NonExistentHeader('etag')
-                if 'last-modified' not in actual:
-                    return NonExistentHeader('last-modified')
-        elif self.method == 'PUT':
-            if self.target == 'Object':
-                if 'etag' not in actual:
-                    return NonExistentHeader('etag')
-                if 'last-modified' not in actual:
-                    return NonExistentHeader('last-modified')
-        elif self.method == 'COPY':
-            if self.target == 'Object':
-                if 'etag' not in actual:
-                    return NonExistentHeader('etag')
-                if 'last-modified' not in actual:
-                    return NonExistentHeader('last-modified')
-                if 'x-copied-from' not in actual:
-                    return NonExistentHeader('x-copied-from')
-                if 'x-copied-from-last-modified' not in actual:
-                    return NonExistentHeader('x-copied-from-last-modified')
-
-        return None
-
-
-class NonExistentHeader(object):
-    """
-    Informs an error message for end users in the case of missing a
-    certain header in Swift's responses
-    """
-
-    def __init__(self, header):
-        self.header = header
-
-    def describe(self):
-        return "%s header does not exist" % self.header
-
-    def get_details(self):
-        return {}
-
-
-class AreAllWellFormatted(object):
-    """
-    Specific matcher to check the correctness of formats of values of Swift's
-    response headers
-
-    This matcher checks the format of values of response headers.
-    When checking the format of values of 'specific' headers such as
-    X-Account-Meta-* or X-Object-Manifest for example, those values must be
-    checked in each test code.
-    """
-
-    def match(self, actual):
-        for key, value in six.iteritems(actual):
-            if key in ('content-length', 'x-account-bytes-used',
-                       'x-account-container-count', 'x-account-object-count',
-                       'x-container-bytes-used', 'x-container-object-count')\
-                and not value.isdigit():
-                return InvalidFormat(key, value)
-            elif key in ('content-type', 'date', 'last-modified',
-                         'x-copied-from-last-modified') and not value:
-                return InvalidFormat(key, value)
-            elif key == 'x-timestamp' and not re.match("^\d+\.?\d*\Z", value):
-                return InvalidFormat(key, value)
-            elif key == 'x-copied-from' and not re.match("\S+/\S+", value):
-                return InvalidFormat(key, value)
-            elif key == 'x-trans-id' and \
-                not re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*", value):
-                return InvalidFormat(key, value)
-            elif key == 'accept-ranges' and not value == 'bytes':
-                return InvalidFormat(key, value)
-            elif key == 'etag' and not value.isalnum():
-                return InvalidFormat(key, value)
-            elif key == 'transfer-encoding' and not value == 'chunked':
-                return InvalidFormat(key, value)
-
-        return None
-
-
-class InvalidFormat(object):
-    """
-    Informs an error message for end users if a format of a certain header
-    is invalid
-    """
-
-    def __init__(self, key, value):
-        self.key = key
-        self.value = value
-
-    def describe(self):
-        return "InvalidFormat (%s, %s)" % (self.key, self.value)
-
-    def get_details(self):
-        return {}
-
-
-class MatchesDictExceptForKeys(object):
-    """Matches two dictionaries. Verifies all items are equals except for those
-    identified by a list of keys.
-    """
-
-    def __init__(self, expected, excluded_keys=None):
-        self.expected = expected
-        self.excluded_keys = excluded_keys if excluded_keys is not None else []
-
-    def match(self, actual):
-        filtered_expected = helpers.dict_subtract(self.expected,
-                                                  self.excluded_keys)
-        filtered_actual = helpers.dict_subtract(actual,
-                                                self.excluded_keys)
-        if filtered_actual != filtered_expected:
-            return DictMismatch(filtered_expected, filtered_actual)
-
-
-class DictMismatch(object):
-    """Mismatch between two dicts describes deltas"""
-
-    def __init__(self, expected, actual):
-        self.expected = expected
-        self.actual = actual
-        self.intersect = set(self.expected) & set(self.actual)
-        self.symmetric_diff = set(self.expected) ^ set(self.actual)
-
-    def _format_dict(self, dict_to_format):
-        # Ensure the error string dict is printed in a set order
-        # NOTE(mtreinish): needed to ensure a deterministic error msg for
-        # testing. Otherwise the error message will be dependent on the
-        # dict ordering.
-        dict_string = "{"
-        for key in sorted(dict_to_format):
-            dict_string += "'%s': %s, " % (key, dict_to_format[key])
-        dict_string = dict_string[:-2] + '}'
-        return dict_string
-
-    def describe(self):
-        msg = ""
-        if self.symmetric_diff:
-            only_expected = helpers.dict_subtract(self.expected, self.actual)
-            only_actual = helpers.dict_subtract(self.actual, self.expected)
-            if only_expected:
-                msg += "Only in expected:\n  %s\n" % self._format_dict(
-                    only_expected)
-            if only_actual:
-                msg += "Only in actual:\n  %s\n" % self._format_dict(
-                    only_actual)
-        diff_set = set(o for o in self.intersect if
-                       self.expected[o] != self.actual[o])
-        if diff_set:
-            msg += "Differences:\n"
-            for o in diff_set:
-                msg += "  %s: expected %s, actual %s\n" % (
-                    o, self.expected[o], self.actual[o])
-        return msg
-
-    def get_details(self):
-        return {}
diff --git a/neutron/tests/tempest/common/generator/__init__.py b/neutron/tests/tempest/common/generator/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/tempest/common/generator/base_generator.py b/neutron/tests/tempest/common/generator/base_generator.py
deleted file mode 100644 (file)
index 41ed48c..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright 2014 Deutsche Telekom AG
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-import functools
-
-import jsonschema
-import six
-
-
-def _check_for_expected_result(name, schema):
-    expected_result = None
-    if "results" in schema:
-        if name in schema["results"]:
-            expected_result = schema["results"][name]
-    return expected_result
-
-
-def generator_type(*args, **kwargs):
-    def wrapper(func):
-        func.types = args
-        for key in kwargs:
-            setattr(func, key, kwargs[key])
-        return func
-    return wrapper
-
-
-def simple_generator(fn):
-    """
-    Decorator for simple generators that return one value
-    """
-    @functools.wraps(fn)
-    def wrapped(self, schema):
-        result = fn(self, schema)
-        if result is not None:
-            expected_result = _check_for_expected_result(fn.__name__, schema)
-            return (fn.__name__, result, expected_result)
-        return
-    return wrapped
-
-
-class BasicGeneratorSet(object):
-    _instance = None
-
-    schema = {
-        "type": "object",
-        "properties": {
-            "name": {"type": "string"},
-            "http-method": {
-                "enum": ["GET", "PUT", "HEAD",
-                         "POST", "PATCH", "DELETE", 'COPY']
-            },
-            "admin_client": {"type": "boolean"},
-            "url": {"type": "string"},
-            "default_result_code": {"type": "integer"},
-            "json-schema": {},
-            "resources": {
-                "type": "array",
-                "items": {
-                    "oneOf": [
-                        {"type": "string"},
-                        {
-                            "type": "object",
-                            "properties": {
-                                "name": {"type": "string"},
-                                "expected_result": {"type": "integer"}
-                            }
-                        }
-                    ]
-                }
-            },
-            "results": {
-                "type": "object",
-                "properties": {}
-            }
-        },
-        "required": ["name", "http-method", "url"],
-        "additionalProperties": False,
-    }
-
-    def __init__(self):
-        self.types_dict = {}
-        for m in dir(self):
-            if callable(getattr(self, m)) and not'__' in m:
-                method = getattr(self, m)
-                if hasattr(method, "types"):
-                    for type in method.types:
-                        if type not in self.types_dict:
-                            self.types_dict[type] = []
-                        self.types_dict[type].append(method)
-
-    def validate_schema(self, schema):
-        if "json-schema" in schema:
-            jsonschema.Draft4Validator.check_schema(schema['json-schema'])
-        jsonschema.validate(schema, self.schema)
-
-    def generate_scenarios(self, schema, path=None):
-        """
-        Generates the scenario (all possible test cases) out of the given
-        schema.
-
-        :param schema: a dict style schema (see ``BasicGeneratorSet.schema``)
-        :param path: the schema path if the given schema is a subschema
-        """
-        schema_type = schema['type']
-        scenarios = []
-
-        if schema_type == 'object':
-            properties = schema["properties"]
-            for attribute, definition in six.iteritems(properties):
-                current_path = copy.copy(path)
-                if path is not None:
-                    current_path.append(attribute)
-                else:
-                    current_path = [attribute]
-                scenarios.extend(
-                    self.generate_scenarios(definition, current_path))
-        elif isinstance(schema_type, list):
-            if "integer" in schema_type:
-                schema_type = "integer"
-            else:
-                raise Exception("non-integer list types not supported")
-        for generator in self.types_dict[schema_type]:
-            if hasattr(generator, "needed_property"):
-                prop = generator.needed_property
-                if (prop not in schema or
-                    schema[prop] is None or
-                    schema[prop] is False):
-                    continue
-
-            name = generator.__name__
-            if ("exclude_tests" in schema and
-               name in schema["exclude_tests"]):
-                continue
-            if path is not None:
-                name = "%s_%s" % ("_".join(path), name)
-            scenarios.append({
-                "_negtest_name": name,
-                "_negtest_generator": generator,
-                "_negtest_schema": schema,
-                "_negtest_path": path})
-        return scenarios
-
-    def generate_payload(self, test, schema):
-        """
-        Generates one jsonschema out of the given test. It's mandatory to use
-        generate_scenarios before to register all needed variables to the test.
-
-        :param test: A test object (scenario) with all _negtest variables on it
-        :param schema: schema for the test
-        """
-        generator = test._negtest_generator
-        ret = generator(test._negtest_schema)
-        path = copy.copy(test._negtest_path)
-        expected_result = None
-
-        if ret is not None:
-            generator_result = generator(test._negtest_schema)
-            invalid_snippet = generator_result[1]
-            expected_result = generator_result[2]
-            element = path.pop()
-            if len(path) > 0:
-                schema_snip = six.moves.reduce(dict.get, path, schema)
-                schema_snip[element] = invalid_snippet
-            else:
-                schema[element] = invalid_snippet
-        return expected_result
diff --git a/neutron/tests/tempest/common/generator/negative_generator.py b/neutron/tests/tempest/common/generator/negative_generator.py
deleted file mode 100644 (file)
index 44cd305..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2014 Deutsche Telekom AG
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-import neutron.tests.tempest.common.generator.base_generator as base
-import neutron.tests.tempest.common.generator.valid_generator as valid
-
-
-class NegativeTestGenerator(base.BasicGeneratorSet):
-    @base.generator_type("string")
-    @base.simple_generator
-    def gen_int(self, _):
-        return 4
-
-    @base.generator_type("integer")
-    @base.simple_generator
-    def gen_string(self, _):
-        return "XXXXXX"
-
-    @base.generator_type("integer", "string")
-    def gen_none(self, schema):
-        # Note(mkoderer): it's not using the decorator otherwise it'd be
-        # filtered
-        expected_result = base._check_for_expected_result('gen_none', schema)
-        return ('gen_none', None, expected_result)
-
-    @base.generator_type("string")
-    @base.simple_generator
-    def gen_str_min_length(self, schema):
-        min_length = schema.get("minLength", 0)
-        if min_length > 0:
-            return "x" * (min_length - 1)
-
-    @base.generator_type("string", needed_property="maxLength")
-    @base.simple_generator
-    def gen_str_max_length(self, schema):
-        max_length = schema.get("maxLength", -1)
-        return "x" * (max_length + 1)
-
-    @base.generator_type("integer", needed_property="minimum")
-    @base.simple_generator
-    def gen_int_min(self, schema):
-        minimum = schema["minimum"]
-        if "exclusiveMinimum" not in schema:
-            minimum -= 1
-        return minimum
-
-    @base.generator_type("integer", needed_property="maximum")
-    @base.simple_generator
-    def gen_int_max(self, schema):
-        maximum = schema["maximum"]
-        if "exclusiveMaximum" not in schema:
-            maximum += 1
-        return maximum
-
-    @base.generator_type("object", needed_property="additionalProperties")
-    @base.simple_generator
-    def gen_obj_add_attr(self, schema):
-        valid_schema = valid.ValidTestGenerator().generate_valid(schema)
-        new_valid = copy.deepcopy(valid_schema)
-        new_valid["$$$$$$$$$$"] = "xxx"
-        return new_valid
diff --git a/neutron/tests/tempest/common/generator/valid_generator.py b/neutron/tests/tempest/common/generator/valid_generator.py
deleted file mode 100644 (file)
index bc7014c..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2014 Deutsche Telekom AG
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import six
-
-import neutron.tests.tempest.common.generator.base_generator as base
-
-
-class ValidTestGenerator(base.BasicGeneratorSet):
-    @base.generator_type("string")
-    @base.simple_generator
-    def generate_valid_string(self, schema):
-        size = schema.get("minLength", 1)
-        # TODO(dkr mko): handle format and pattern
-        return "x" * size
-
-    @base.generator_type("integer")
-    @base.simple_generator
-    def generate_valid_integer(self, schema):
-        # TODO(dkr mko): handle multipleOf
-        if "minimum" in schema:
-            minimum = schema["minimum"]
-            if "exclusiveMinimum" not in schema:
-                return minimum
-            else:
-                return minimum + 1
-        if "maximum" in schema:
-            maximum = schema["maximum"]
-            if "exclusiveMaximum" not in schema:
-                return maximum
-            else:
-                return maximum - 1
-        return 0
-
-    @base.generator_type("object")
-    @base.simple_generator
-    def generate_valid_object(self, schema):
-        obj = {}
-        for k, v in six.iteritems(schema["properties"]):
-            obj[k] = self.generate_valid(v)
-        return obj
-
-    def generate(self, schema):
-        schema_type = schema["type"]
-        if isinstance(schema_type, list):
-            if "integer" in schema_type:
-                schema_type = "integer"
-            else:
-                raise Exception("non-integer list types not supported")
-        result = []
-        if schema_type not in self.types_dict:
-            raise TypeError("generator (%s) doesn't support type: %s"
-                            % (self.__class__.__name__, schema_type))
-        for generator in self.types_dict[schema_type]:
-            ret = generator(schema)
-            if ret is not None:
-                if isinstance(ret, list):
-                    result.extend(ret)
-                elif isinstance(ret, tuple):
-                    result.append(ret)
-                else:
-                    raise Exception("generator (%s) returns invalid result: %s"
-                                    % (generator, ret))
-        return result
-
-    def generate_valid(self, schema):
-        return self.generate(schema)[0][1]
diff --git a/neutron/tests/tempest/common/glance_http.py b/neutron/tests/tempest/common/glance_http.py
deleted file mode 100644 (file)
index 3d8c8aa..0000000
+++ /dev/null
@@ -1,379 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# Originally copied from python-glanceclient
-
-import copy
-import hashlib
-import posixpath
-import re
-import socket
-import StringIO
-import struct
-import urlparse
-
-
-import OpenSSL
-from oslo_log import log as logging
-from oslo_serialization import jsonutils as json
-from six import moves
-from six.moves import http_client as httplib
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.tempest import exceptions as exc
-
-LOG = logging.getLogger(__name__)
-USER_AGENT = 'tempest'
-CHUNKSIZE = 1024 * 64  # 64kB
-TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$')
-
-
-class HTTPClient(object):
-
-    def __init__(self, auth_provider, filters, **kwargs):
-        self.auth_provider = auth_provider
-        self.filters = filters
-        self.endpoint = auth_provider.base_url(filters)
-        endpoint_parts = urlparse.urlparse(self.endpoint)
-        self.endpoint_scheme = endpoint_parts.scheme
-        self.endpoint_hostname = endpoint_parts.hostname
-        self.endpoint_port = endpoint_parts.port
-        self.endpoint_path = endpoint_parts.path
-
-        self.connection_class = self.get_connection_class(self.endpoint_scheme)
-        self.connection_kwargs = self.get_connection_kwargs(
-            self.endpoint_scheme, **kwargs)
-
-    @staticmethod
-    def get_connection_class(scheme):
-        if scheme == 'https':
-            return VerifiedHTTPSConnection
-        else:
-            return httplib.HTTPConnection
-
-    @staticmethod
-    def get_connection_kwargs(scheme, **kwargs):
-        _kwargs = {'timeout': float(kwargs.get('timeout', 600))}
-
-        if scheme == 'https':
-            _kwargs['ca_certs'] = kwargs.get('ca_certs', None)
-            _kwargs['cert_file'] = kwargs.get('cert_file', None)
-            _kwargs['key_file'] = kwargs.get('key_file', None)
-            _kwargs['insecure'] = kwargs.get('insecure', False)
-            _kwargs['ssl_compression'] = kwargs.get('ssl_compression', True)
-
-        return _kwargs
-
-    def get_connection(self):
-        _class = self.connection_class
-        try:
-            return _class(self.endpoint_hostname, self.endpoint_port,
-                          **self.connection_kwargs)
-        except httplib.InvalidURL:
-            raise exc.EndpointNotFound
-
-    def _http_request(self, url, method, **kwargs):
-        """Send an http request with the specified characteristics.
-
-        Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
-        as setting headers and error handling.
-        """
-        # Copy the kwargs so we can reuse the original in case of redirects
-        kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
-        kwargs['headers'].setdefault('User-Agent', USER_AGENT)
-
-        self._log_request(method, url, kwargs['headers'])
-
-        conn = self.get_connection()
-
-        try:
-            url_parts = urlparse.urlparse(url)
-            conn_url = posixpath.normpath(url_parts.path)
-            LOG.debug('Actual Path: {path}'.format(path=conn_url))
-            if kwargs['headers'].get('Transfer-Encoding') == 'chunked':
-                conn.putrequest(method, conn_url)
-                for header, value in kwargs['headers'].items():
-                    conn.putheader(header, value)
-                conn.endheaders()
-                chunk = kwargs['body'].read(CHUNKSIZE)
-                # Chunk it, baby...
-                while chunk:
-                    conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
-                    chunk = kwargs['body'].read(CHUNKSIZE)
-                conn.send('0\r\n\r\n')
-            else:
-                conn.request(method, conn_url, **kwargs)
-            resp = conn.getresponse()
-        except socket.gaierror as e:
-            message = ("Error finding address for %(url)s: %(e)s" %
-                       {'url': url, 'e': e})
-            raise exc.EndpointNotFound(message)
-        except (socket.error, socket.timeout) as e:
-            message = ("Error communicating with %(endpoint)s %(e)s" %
-                       {'endpoint': self.endpoint, 'e': e})
-            raise exc.TimeoutException(message)
-
-        body_iter = ResponseBodyIterator(resp)
-        # Read body into string if it isn't obviously image data
-        if resp.getheader('content-type', None) != 'application/octet-stream':
-            body_str = ''.join([body_chunk for body_chunk in body_iter])
-            body_iter = StringIO.StringIO(body_str)
-            self._log_response(resp, None)
-        else:
-            self._log_response(resp, body_iter)
-
-        return resp, body_iter
-
-    def _log_request(self, method, url, headers):
-        LOG.info('Request: ' + method + ' ' + url)
-        if headers:
-            headers_out = headers
-            if 'X-Auth-Token' in headers and headers['X-Auth-Token']:
-                token = headers['X-Auth-Token']
-                if len(token) > 64 and TOKEN_CHARS_RE.match(token):
-                    headers_out = headers.copy()
-                    headers_out['X-Auth-Token'] = "<Token omitted>"
-                LOG.info('Request Headers: ' + str(headers_out))
-
-    def _log_response(self, resp, body):
-        status = str(resp.status)
-        LOG.info("Response Status: " + status)
-        if resp.getheaders():
-            LOG.info('Response Headers: ' + str(resp.getheaders()))
-        if body:
-            str_body = str(body)
-            length = len(body)
-            LOG.info('Response Body: ' + str_body[:2048])
-            if length >= 2048:
-                self.LOG.debug("Large body (%d) md5 summary: %s", length,
-                               hashlib.md5(str_body).hexdigest())
-
-    def json_request(self, method, url, **kwargs):
-        kwargs.setdefault('headers', {})
-        kwargs['headers'].setdefault('Content-Type', 'application/json')
-        if kwargs['headers']['Content-Type'] != 'application/json':
-            msg = "Only application/json content-type is supported."
-            raise lib_exc.InvalidContentType(msg)
-
-        if 'body' in kwargs:
-            kwargs['body'] = json.dumps(kwargs['body'])
-
-        resp, body_iter = self._http_request(url, method, **kwargs)
-
-        if 'application/json' in resp.getheader('content-type', ''):
-            body = ''.join([chunk for chunk in body_iter])
-            try:
-                body = json.loads(body)
-            except ValueError:
-                LOG.error('Could not decode response body as JSON')
-        else:
-            msg = "Only json/application content-type is supported."
-            raise lib_exc.InvalidContentType(msg)
-
-        return resp, body
-
-    def raw_request(self, method, url, **kwargs):
-        kwargs.setdefault('headers', {})
-        kwargs['headers'].setdefault('Content-Type',
-                                     'application/octet-stream')
-        if 'body' in kwargs:
-            if (hasattr(kwargs['body'], 'read')
-                    and method.lower() in ('post', 'put')):
-                # We use 'Transfer-Encoding: chunked' because
-                # body size may not always be known in advance.
-                kwargs['headers']['Transfer-Encoding'] = 'chunked'
-
-        # Decorate the request with auth
-        req_url, kwargs['headers'], kwargs['body'] = \
-            self.auth_provider.auth_request(
-                method=method, url=url, headers=kwargs['headers'],
-                body=kwargs.get('body', None), filters=self.filters)
-        return self._http_request(req_url, method, **kwargs)
-
-
-class OpenSSLConnectionDelegator(object):
-    """
-    An OpenSSL.SSL.Connection delegator.
-
-    Supplies an additional 'makefile' method which httplib requires
-    and is not present in OpenSSL.SSL.Connection.
-
-    Note: Since it is not possible to inherit from OpenSSL.SSL.Connection
-    a delegator must be used.
-    """
-    def __init__(self, *args, **kwargs):
-        self.connection = OpenSSL.SSL.Connection(*args, **kwargs)
-
-    def __getattr__(self, name):
-        return getattr(self.connection, name)
-
-    def makefile(self, *args, **kwargs):
-        # Ensure the socket is closed when this file is closed
-        kwargs['close'] = True
-        return socket._fileobject(self.connection, *args, **kwargs)
-
-
-class VerifiedHTTPSConnection(httplib.HTTPSConnection):
-    """
-    Extended HTTPSConnection which uses the OpenSSL library
-    for enhanced SSL support.
-    Note: Much of this functionality can eventually be replaced
-          with native Python 3.3 code.
-    """
-    def __init__(self, host, port=None, key_file=None, cert_file=None,
-                 ca_certs=None, timeout=None, insecure=False,
-                 ssl_compression=True):
-        httplib.HTTPSConnection.__init__(self, host, port,
-                                         key_file=key_file,
-                                         cert_file=cert_file)
-        self.key_file = key_file
-        self.cert_file = cert_file
-        self.timeout = timeout
-        self.insecure = insecure
-        self.ssl_compression = ssl_compression
-        self.ca_certs = ca_certs
-        self.setcontext()
-
-    @staticmethod
-    def host_matches_cert(host, x509):
-        """
-        Verify that the the x509 certificate we have received
-        from 'host' correctly identifies the server we are
-        connecting to, ie that the certificate's Common Name
-        or a Subject Alternative Name matches 'host'.
-        """
-        # First see if we can match the CN
-        if x509.get_subject().commonName == host:
-            return True
-
-        # Also try Subject Alternative Names for a match
-        san_list = None
-        for i in moves.range(x509.get_extension_count()):
-            ext = x509.get_extension(i)
-            if ext.get_short_name() == 'subjectAltName':
-                san_list = str(ext)
-                for san in ''.join(san_list.split()).split(','):
-                    if san == "DNS:%s" % host:
-                        return True
-
-        # Server certificate does not match host
-        msg = ('Host "%s" does not match x509 certificate contents: '
-               'CommonName "%s"' % (host, x509.get_subject().commonName))
-        if san_list is not None:
-            msg = msg + ', subjectAltName "%s"' % san_list
-        raise exc.SSLCertificateError(msg)
-
-    def verify_callback(self, connection, x509, errnum,
-                        depth, preverify_ok):
-        if x509.has_expired():
-            msg = "SSL Certificate expired on '%s'" % x509.get_notAfter()
-            raise exc.SSLCertificateError(msg)
-
-        if depth == 0 and preverify_ok is True:
-            # We verify that the host matches against the last
-            # certificate in the chain
-            return self.host_matches_cert(self.host, x509)
-        else:
-            # Pass through OpenSSL's default result
-            return preverify_ok
-
-    def setcontext(self):
-        """
-        Set up the OpenSSL context.
-        """
-        self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
-
-        if self.ssl_compression is False:
-            self.context.set_options(0x20000)  # SSL_OP_NO_COMPRESSION
-
-        if self.insecure is not True:
-            self.context.set_verify(OpenSSL.SSL.VERIFY_PEER,
-                                    self.verify_callback)
-        else:
-            self.context.set_verify(OpenSSL.SSL.VERIFY_NONE,
-                                    self.verify_callback)
-
-        if self.cert_file:
-            try:
-                self.context.use_certificate_file(self.cert_file)
-            except Exception as e:
-                msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e)
-                raise exc.SSLConfigurationError(msg)
-            if self.key_file is None:
-                # We support having key and cert in same file
-                try:
-                    self.context.use_privatekey_file(self.cert_file)
-                except Exception as e:
-                    msg = ('No key file specified and unable to load key '
-                           'from "%s" %s' % (self.cert_file, e))
-                    raise exc.SSLConfigurationError(msg)
-
-        if self.key_file:
-            try:
-                self.context.use_privatekey_file(self.key_file)
-            except Exception as e:
-                msg = 'Unable to load key from "%s" %s' % (self.key_file, e)
-                raise exc.SSLConfigurationError(msg)
-
-        if self.ca_certs:
-            try:
-                self.context.load_verify_locations(self.ca_certs)
-            except Exception as e:
-                msg = 'Unable to load CA from "%s"' % (self.ca_certs, e)
-                raise exc.SSLConfigurationError(msg)
-        else:
-            self.context.set_default_verify_paths()
-
-    def connect(self):
-        """
-        Connect to an SSL port using the OpenSSL library and apply
-        per-connection parameters.
-        """
-        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-        if self.timeout is not None:
-            # '0' microseconds
-            sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
-                            struct.pack('LL', self.timeout, 0))
-        self.sock = OpenSSLConnectionDelegator(self.context, sock)
-        self.sock.connect((self.host, self.port))
-
-    def close(self):
-        if self.sock:
-            # Remove the reference to the socket but don't close it yet.
-            # Response close will close both socket and associated
-            # file. Closing socket too soon will cause response
-            # reads to fail with socket IO error 'Bad file descriptor'.
-            self.sock = None
-        httplib.HTTPSConnection.close(self)
-
-
-class ResponseBodyIterator(object):
-    """A class that acts as an iterator over an HTTP response."""
-
-    def __init__(self, resp):
-        self.resp = resp
-
-    def __iter__(self):
-        while True:
-            yield next(self)
-
-    def next(self):
-        chunk = self.resp.read(CHUNKSIZE)
-        if chunk:
-            return chunk
-        else:
-            raise StopIteration()
-
-    __next__ = next
diff --git a/neutron/tests/tempest/common/isolated_creds.py b/neutron/tests/tempest/common/isolated_creds.py
deleted file mode 100644 (file)
index 163ce8a..0000000
+++ /dev/null
@@ -1,392 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-from oslo_log import log as logging
-from tempest_lib.common.utils import data_utils
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.api import clients
-from neutron.tests.tempest.common import cred_provider
-from neutron.tests.tempest import config
-from neutron.tests.tempest import exceptions
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-class IsolatedCreds(cred_provider.CredentialProvider):
-
-    def __init__(self, name, password='pass', network_resources=None):
-        super(IsolatedCreds, self).__init__(name, password, network_resources)
-        self.network_resources = network_resources
-        self.isolated_creds = {}
-        self.isolated_net_resources = {}
-        self.ports = []
-        self.password = password
-        self.identity_admin_client, self.network_admin_client = (
-            self._get_admin_clients())
-
-    def _get_admin_clients(self):
-        """
-        Returns a tuple with instances of the following admin clients (in this
-        order):
-            identity
-            network
-        """
-        os = clients.AdminManager()
-        return os.identity_client, os.network_client
-
-    def _create_tenant(self, name, description):
-        tenant = self.identity_admin_client.create_tenant(
-            name=name, description=description)
-        return tenant
-
-    def _get_tenant_by_name(self, name):
-        tenant = self.identity_admin_client.get_tenant_by_name(name)
-        return tenant
-
-    def _create_user(self, username, password, tenant, email):
-        user = self.identity_admin_client.create_user(
-            username, password, tenant['id'], email)
-        return user
-
-    def _get_user(self, tenant, username):
-        user = self.identity_admin_client.get_user_by_username(
-            tenant['id'], username)
-        return user
-
-    def _list_roles(self):
-        roles = self.identity_admin_client.list_roles()
-        return roles
-
-    def _assign_user_role(self, tenant, user, role_name):
-        role = None
-        try:
-            roles = self._list_roles()
-            role = next(r for r in roles if r['name'] == role_name)
-        except StopIteration:
-            msg = 'No "%s" role found' % role_name
-            raise lib_exc.NotFound(msg)
-        try:
-            self.identity_admin_client.assign_user_role(tenant['id'],
-                                                        user['id'],
-                                                        role['id'])
-        except lib_exc.Conflict:
-            LOG.warning('Trying to add %s for user %s in tenant %s but they '
-                        ' were already granted that role' % (role_name,
-                                                             user['name'],
-                                                             tenant['name']))
-
-    def _delete_user(self, user):
-        self.identity_admin_client.delete_user(user)
-
-    def _delete_tenant(self, tenant):
-        if CONF.service_available.neutron:
-            self._cleanup_default_secgroup(tenant)
-        self.identity_admin_client.delete_tenant(tenant)
-
-    def _create_creds(self, suffix="", admin=False, roles=None):
-        """Create random credentials under the following schema.
-
-        If the name contains a '.' is the full class path of something, and
-        we don't really care. If it isn't, it's probably a meaningful name,
-        so use it.
-
-        For logging purposes, -user and -tenant are long and redundant,
-        don't use them. The user# will be sufficient to figure it out.
-        """
-        if '.' in self.name:
-            root = ""
-        else:
-            root = self.name
-
-        tenant_name = data_utils.rand_name(root) + suffix
-        tenant_desc = tenant_name + "-desc"
-        tenant = self._create_tenant(name=tenant_name,
-                                     description=tenant_desc)
-
-        username = data_utils.rand_name(root) + suffix
-        email = data_utils.rand_name(root) + suffix + "@example.com"
-        user = self._create_user(username, self.password,
-                                 tenant, email)
-        if admin:
-            self._assign_user_role(tenant, user, CONF.identity.admin_role)
-        # Add roles specified in config file
-        for conf_role in CONF.auth.tempest_roles:
-            self._assign_user_role(tenant, user, conf_role)
-        # Add roles requested by caller
-        if roles:
-            for role in roles:
-                self._assign_user_role(tenant, user, role)
-        return self._get_credentials(user, tenant)
-
-    def _get_credentials(self, user, tenant):
-        return cred_provider.get_credentials(
-            username=user['name'], user_id=user['id'],
-            tenant_name=tenant['name'], tenant_id=tenant['id'],
-            password=self.password)
-
-    def _create_network_resources(self, tenant_id):
-        network = None
-        subnet = None
-        router = None
-        # Make sure settings
-        if self.network_resources:
-            if self.network_resources['router']:
-                if (not self.network_resources['subnet'] or
-                    not self.network_resources['network']):
-                    raise exceptions.InvalidConfiguration(
-                        'A router requires a subnet and network')
-            elif self.network_resources['subnet']:
-                if not self.network_resources['network']:
-                    raise exceptions.InvalidConfiguration(
-                        'A subnet requires a network')
-            elif self.network_resources['dhcp']:
-                raise exceptions.InvalidConfiguration('DHCP requires a subnet')
-
-        data_utils.rand_name_root = data_utils.rand_name(self.name)
-        if not self.network_resources or self.network_resources['network']:
-            network_name = data_utils.rand_name_root + "-network"
-            network = self._create_network(network_name, tenant_id)
-        try:
-            if not self.network_resources or self.network_resources['subnet']:
-                subnet_name = data_utils.rand_name_root + "-subnet"
-                subnet = self._create_subnet(subnet_name, tenant_id,
-                                             network['id'])
-            if not self.network_resources or self.network_resources['router']:
-                router_name = data_utils.rand_name_root + "-router"
-                router = self._create_router(router_name, tenant_id)
-                self._add_router_interface(router['id'], subnet['id'])
-        except Exception:
-            if router:
-                self._clear_isolated_router(router['id'], router['name'])
-            if subnet:
-                self._clear_isolated_subnet(subnet['id'], subnet['name'])
-            if network:
-                self._clear_isolated_network(network['id'], network['name'])
-            raise
-        return network, subnet, router
-
-    def _create_network(self, name, tenant_id):
-        resp_body = self.network_admin_client.create_network(
-            name=name, tenant_id=tenant_id)
-        return resp_body['network']
-
-    def _create_subnet(self, subnet_name, tenant_id, network_id):
-        base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
-        mask_bits = CONF.network.tenant_network_mask_bits
-        for subnet_cidr in base_cidr.subnet(mask_bits):
-            try:
-                if self.network_resources:
-                    resp_body = self.network_admin_client.\
-                        create_subnet(
-                            network_id=network_id, cidr=str(subnet_cidr),
-                            name=subnet_name,
-                            tenant_id=tenant_id,
-                            enable_dhcp=self.network_resources['dhcp'],
-                            ip_version=4)
-                else:
-                    resp_body = self.network_admin_client.\
-                        create_subnet(network_id=network_id,
-                                      cidr=str(subnet_cidr),
-                                      name=subnet_name,
-                                      tenant_id=tenant_id,
-                                      ip_version=4)
-                break
-            except lib_exc.BadRequest as e:
-                if 'overlaps with another subnet' not in str(e):
-                    raise
-        else:
-            message = 'Available CIDR for subnet creation could not be found'
-            raise Exception(message)
-        return resp_body['subnet']
-
-    def _create_router(self, router_name, tenant_id):
-        external_net_id = dict(
-            network_id=CONF.network.public_network_id)
-        resp_body = self.network_admin_client.create_router(
-            router_name,
-            external_gateway_info=external_net_id,
-            tenant_id=tenant_id)
-        return resp_body['router']
-
-    def _add_router_interface(self, router_id, subnet_id):
-        self.network_admin_client.add_router_interface_with_subnet_id(
-            router_id, subnet_id)
-
-    def get_primary_network(self):
-        return self.isolated_net_resources.get('primary')[0]
-
-    def get_primary_subnet(self):
-        return self.isolated_net_resources.get('primary')[1]
-
-    def get_primary_router(self):
-        return self.isolated_net_resources.get('primary')[2]
-
-    def get_admin_network(self):
-        return self.isolated_net_resources.get('admin')[0]
-
-    def get_admin_subnet(self):
-        return self.isolated_net_resources.get('admin')[1]
-
-    def get_admin_router(self):
-        return self.isolated_net_resources.get('admin')[2]
-
-    def get_alt_network(self):
-        return self.isolated_net_resources.get('alt')[0]
-
-    def get_alt_subnet(self):
-        return self.isolated_net_resources.get('alt')[1]
-
-    def get_alt_router(self):
-        return self.isolated_net_resources.get('alt')[2]
-
-    def get_credentials(self, credential_type):
-        if self.isolated_creds.get(str(credential_type)):
-            credentials = self.isolated_creds[str(credential_type)]
-        else:
-            if credential_type in ['primary', 'alt', 'admin']:
-                is_admin = (credential_type == 'admin')
-                credentials = self._create_creds(admin=is_admin)
-            else:
-                credentials = self._create_creds(roles=credential_type)
-            self.isolated_creds[str(credential_type)] = credentials
-            # Maintained until tests are ported
-            LOG.info("Acquired isolated creds:\n credentials: %s"
-                     % credentials)
-            if (CONF.service_available.neutron and
-                not CONF.baremetal.driver_enabled):
-                network, subnet, router = self._create_network_resources(
-                    credentials.tenant_id)
-                self.isolated_net_resources[str(credential_type)] = (
-                    network, subnet, router,)
-                LOG.info("Created isolated network resources for : \n"
-                         + " credentials: %s" % credentials)
-        return credentials
-
-    def get_primary_creds(self):
-        return self.get_credentials('primary')
-
-    def get_admin_creds(self):
-        return self.get_credentials('admin')
-
-    def get_alt_creds(self):
-        return self.get_credentials('alt')
-
-    def get_creds_by_roles(self, roles, force_new=False):
-        roles = list(set(roles))
-        # The roles list as a str will become the index as the dict key for
-        # the created credentials set in the isolated_creds dict.
-        exist_creds = self.isolated_creds.get(str(roles))
-        # If force_new flag is True 2 cred sets with the same roles are needed
-        # handle this by creating a separate index for old one to store it
-        # separately for cleanup
-        if exist_creds and force_new:
-            new_index = str(roles) + '-' + str(len(self.isolated_creds))
-            self.isolated_creds[new_index] = exist_creds
-            del self.isolated_creds[str(roles)]
-            # Handle isolated neutron resources if they exist too
-            if CONF.service_available.neutron:
-                exist_net = self.isolated_net_resources.get(str(roles))
-                if exist_net:
-                    self.isolated_net_resources[new_index] = exist_net
-                    del self.isolated_net_resources[str(roles)]
-        return self.get_credentials(roles)
-
-    def _clear_isolated_router(self, router_id, router_name):
-        net_client = self.network_admin_client
-        try:
-            net_client.delete_router(router_id)
-        except lib_exc.NotFound:
-            LOG.warn('router with name: %s not found for delete' %
-                     router_name)
-
-    def _clear_isolated_subnet(self, subnet_id, subnet_name):
-        net_client = self.network_admin_client
-        try:
-            net_client.delete_subnet(subnet_id)
-        except lib_exc.NotFound:
-            LOG.warn('subnet with name: %s not found for delete' %
-                     subnet_name)
-
-    def _clear_isolated_network(self, network_id, network_name):
-        net_client = self.network_admin_client
-        try:
-            net_client.delete_network(network_id)
-        except lib_exc.NotFound:
-            LOG.warn('network with name: %s not found for delete' %
-                     network_name)
-
-    def _cleanup_default_secgroup(self, tenant):
-        net_client = self.network_admin_client
-        resp_body = net_client.list_security_groups(tenant_id=tenant,
-                                                    name="default")
-        secgroups_to_delete = resp_body['security_groups']
-        for secgroup in secgroups_to_delete:
-            try:
-                net_client.delete_security_group(secgroup['id'])
-            except lib_exc.NotFound:
-                LOG.warn('Security group %s, id %s not found for clean-up' %
-                         (secgroup['name'], secgroup['id']))
-
-    def _clear_isolated_net_resources(self):
-        net_client = self.network_admin_client
-        for cred in self.isolated_net_resources:
-            network, subnet, router = self.isolated_net_resources.get(cred)
-            LOG.debug("Clearing network: %(network)s, "
-                      "subnet: %(subnet)s, router: %(router)s",
-                      {'network': network, 'subnet': subnet, 'router': router})
-            if (not self.network_resources or
-                self.network_resources.get('router')):
-                try:
-                    net_client.remove_router_interface_with_subnet_id(
-                        router['id'], subnet['id'])
-                except lib_exc.NotFound:
-                    LOG.warn('router with name: %s not found for delete' %
-                             router['name'])
-                self._clear_isolated_router(router['id'], router['name'])
-            if (not self.network_resources or
-                self.network_resources.get('subnet')):
-                self._clear_isolated_subnet(subnet['id'], subnet['name'])
-            if (not self.network_resources or
-                self.network_resources.get('network')):
-                self._clear_isolated_network(network['id'], network['name'])
-        self.isolated_net_resources = {}
-
-    def clear_isolated_creds(self):
-        if not self.isolated_creds:
-            return
-        self._clear_isolated_net_resources()
-        for creds in self.isolated_creds.values():
-            try:
-                self._delete_user(creds.user_id)
-            except lib_exc.NotFound:
-                LOG.warn("user with name: %s not found for delete" %
-                         creds.username)
-            try:
-                self._delete_tenant(creds.tenant_id)
-            except lib_exc.NotFound:
-                LOG.warn("tenant with name: %s not found for delete" %
-                         creds.tenant_name)
-        self.isolated_creds = {}
-
-    def is_multi_user(self):
-        return True
-
-    def is_multi_tenant(self):
-        return True
-
-    def is_role_available(self, role):
-        return True
diff --git a/neutron/tests/tempest/common/negative_rest_client.py b/neutron/tests/tempest/common/negative_rest_client.py
deleted file mode 100644 (file)
index 9058516..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-# (c) 2014 Deutsche Telekom AG
-# Copyright 2014 Red Hat, Inc.
-# Copyright 2014 NEC Corporation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.tests.tempest.common import service_client
-from neutron.tests.tempest import config
-
-CONF = config.CONF
-
-
-class NegativeRestClient(service_client.ServiceClient):
-    """
-    Version of RestClient that does not raise exceptions.
-    """
-    def __init__(self, auth_provider, service):
-        region = self._get_region(service)
-        super(NegativeRestClient, self).__init__(auth_provider,
-                                                 service, region)
-
-    def _get_region(self, service):
-        """
-        Returns the region for a specific service
-        """
-        service_region = None
-        for cfgname in dir(CONF._config):
-            # Find all config.FOO.catalog_type and assume FOO is a service.
-            cfg = getattr(CONF, cfgname)
-            catalog_type = getattr(cfg, 'catalog_type', None)
-            if catalog_type == service:
-                service_region = getattr(cfg, 'region', None)
-        if not service_region:
-            service_region = CONF.identity.region
-        return service_region
-
-    def _error_checker(self, method, url,
-                       headers, body, resp, resp_body):
-        pass
-
-    def send_request(self, method, url_template, resources, body=None):
-        url = url_template % tuple(resources)
-        if method == "GET":
-            resp, body = self.get(url)
-        elif method == "POST":
-            resp, body = self.post(url, body)
-        elif method == "PUT":
-            resp, body = self.put(url, body)
-        elif method == "PATCH":
-            resp, body = self.patch(url, body)
-        elif method == "HEAD":
-            resp, body = self.head(url)
-        elif method == "DELETE":
-            resp, body = self.delete(url)
-        elif method == "COPY":
-            resp, body = self.copy(url)
-        else:
-            assert False
-
-        return resp, body
diff --git a/neutron/tests/tempest/common/service_client.py b/neutron/tests/tempest/common/service_client.py
deleted file mode 100644 (file)
index ed19e89..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.common import rest_client
-
-from neutron.tests.tempest import config
-
-CONF = config.CONF
-
-
-class ServiceClient(rest_client.RestClient):
-
-    def __init__(self, auth_provider, service, region,
-                 endpoint_type=None, build_interval=None, build_timeout=None,
-                 disable_ssl_certificate_validation=None, ca_certs=None,
-                 trace_requests=None):
-
-        # TODO(oomichi): This params setting should be removed after all
-        # service clients pass these values, and we can make ServiceClient
-        # free from CONF values.
-        dscv = (disable_ssl_certificate_validation or
-                CONF.identity.disable_ssl_certificate_validation)
-        params = {
-            'disable_ssl_certificate_validation': dscv,
-            'ca_certs': ca_certs or CONF.identity.ca_certificates_file,
-            'trace_requests': trace_requests or CONF.debug.trace_requests
-        }
-
-        if endpoint_type is not None:
-            params.update({'endpoint_type': endpoint_type})
-        if build_interval is not None:
-            params.update({'build_interval': build_interval})
-        if build_timeout is not None:
-            params.update({'build_timeout': build_timeout})
-        super(ServiceClient, self).__init__(auth_provider, service, region,
-                                            **params)
-
-
-class ResponseBody(dict):
-    """Class that wraps an http response and dict body into a single value.
-
-    Callers that receive this object will normally use it as a dict but
-    can extract the response if needed.
-    """
-
-    def __init__(self, response, body=None):
-        body_data = body or {}
-        self.update(body_data)
-        self.response = response
-
-    def __str__(self):
-        body = super(ResponseBody, self).__str__()
-        return "response: %s\nBody: %s" % (self.response, body)
-
-
-class ResponseBodyData(object):
-    """Class that wraps an http response and string data into a single value.
-    """
-
-    def __init__(self, response, data):
-        self.response = response
-        self.data = data
-
-    def __str__(self):
-        return "response: %s\nBody: %s" % (self.response, self.data)
-
-
-class ResponseBodyList(list):
-    """Class that wraps an http response and list body into a single value.
-
-    Callers that receive this object will normally use it as a list but
-    can extract the response if needed.
-    """
-
-    def __init__(self, response, body=None):
-        body_data = body or []
-        self.extend(body_data)
-        self.response = response
-
-    def __str__(self):
-        body = super(ResponseBodyList, self).__str__()
-        return "response: %s\nBody: %s" % (self.response, body)
diff --git a/neutron/tests/tempest/common/ssh.py b/neutron/tests/tempest/common/ssh.py
deleted file mode 100644 (file)
index 00febc6..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-import cStringIO
-import select
-import socket
-import time
-import warnings
-
-from oslo_log import log as logging
-import six
-
-from neutron.tests.tempest import exceptions
-
-
-with warnings.catch_warnings():
-    warnings.simplefilter("ignore")
-    import paramiko
-
-
-LOG = logging.getLogger(__name__)
-
-
-class Client(object):
-
-    def __init__(self, host, username, password=None, timeout=300, pkey=None,
-                 channel_timeout=10, look_for_keys=False, key_filename=None):
-        self.host = host
-        self.username = username
-        self.password = password
-        if isinstance(pkey, six.string_types):
-            pkey = paramiko.RSAKey.from_private_key(
-                cStringIO.StringIO(str(pkey)))
-        self.pkey = pkey
-        self.look_for_keys = look_for_keys
-        self.key_filename = key_filename
-        self.timeout = int(timeout)
-        self.channel_timeout = float(channel_timeout)
-        self.buf_size = 1024
-
-    def _get_ssh_connection(self, sleep=1.5, backoff=1):
-        """Returns an ssh connection to the specified host."""
-        bsleep = sleep
-        ssh = paramiko.SSHClient()
-        ssh.set_missing_host_key_policy(
-            paramiko.AutoAddPolicy())
-        _start_time = time.time()
-        if self.pkey is not None:
-            LOG.info("Creating ssh connection to '%s' as '%s'"
-                     " with public key authentication",
-                     self.host, self.username)
-        else:
-            LOG.info("Creating ssh connection to '%s' as '%s'"
-                     " with password %s",
-                     self.host, self.username, str(self.password))
-        attempts = 0
-        while True:
-            try:
-                ssh.connect(self.host, username=self.username,
-                            password=self.password,
-                            look_for_keys=self.look_for_keys,
-                            key_filename=self.key_filename,
-                            timeout=self.channel_timeout, pkey=self.pkey)
-                LOG.info("ssh connection to %s@%s successfuly created",
-                         self.username, self.host)
-                return ssh
-            except (socket.error,
-                    paramiko.SSHException) as e:
-                if self._is_timed_out(_start_time):
-                    LOG.exception("Failed to establish authenticated ssh"
-                                  " connection to %s@%s after %d attempts",
-                                  self.username, self.host, attempts)
-                    raise exceptions.SSHTimeout(host=self.host,
-                                                user=self.username,
-                                                password=self.password)
-                bsleep += backoff
-                attempts += 1
-                LOG.warning("Failed to establish authenticated ssh"
-                            " connection to %s@%s (%s). Number attempts: %s."
-                            " Retry after %d seconds.",
-                            self.username, self.host, e, attempts, bsleep)
-                time.sleep(bsleep)
-
-    def _is_timed_out(self, start_time):
-        return (time.time() - self.timeout) > start_time
-
-    def exec_command(self, cmd):
-        """
-        Execute the specified command on the server.
-
-        Note that this method is reading whole command outputs to memory, thus
-        shouldn't be used for large outputs.
-
-        :returns: data read from standard output of the command.
-        :raises: SSHExecCommandFailed if command returns nonzero
-                 status. The exception contains command status stderr content.
-        """
-        ssh = self._get_ssh_connection()
-        transport = ssh.get_transport()
-        channel = transport.open_session()
-        channel.fileno()  # Register event pipe
-        channel.exec_command(cmd)
-        channel.shutdown_write()
-        out_data = []
-        err_data = []
-        poll = select.poll()
-        poll.register(channel, select.POLLIN)
-        start_time = time.time()
-
-        while True:
-            ready = poll.poll(self.channel_timeout)
-            if not any(ready):
-                if not self._is_timed_out(start_time):
-                    continue
-                raise exceptions.TimeoutException(
-                    "Command: '{0}' executed on host '{1}'.".format(
-                        cmd, self.host))
-            if not ready[0]:  # If there is nothing to read.
-                continue
-            out_chunk = err_chunk = None
-            if channel.recv_ready():
-                out_chunk = channel.recv(self.buf_size)
-                out_data += out_chunk,
-            if channel.recv_stderr_ready():
-                err_chunk = channel.recv_stderr(self.buf_size)
-                err_data += err_chunk,
-            if channel.closed and not err_chunk and not out_chunk:
-                break
-        exit_status = channel.recv_exit_status()
-        if 0 != exit_status:
-            raise exceptions.SSHExecCommandFailed(
-                command=cmd, exit_status=exit_status,
-                strerror=''.join(err_data))
-        return ''.join(out_data)
-
-    def test_connection_auth(self):
-        """Raises an exception when we can not connect to server via ssh."""
-        connection = self._get_ssh_connection()
-        connection.close()
diff --git a/neutron/tests/tempest/common/tempest_fixtures.py b/neutron/tests/tempest/common/tempest_fixtures.py
deleted file mode 100644 (file)
index d416857..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_concurrency.fixture import lockutils
-
-
-class LockFixture(lockutils.LockFixture):
-    def __init__(self, name):
-        super(LockFixture, self).__init__(name, 'tempest-')
diff --git a/neutron/tests/tempest/common/utils/__init__.py b/neutron/tests/tempest/common/utils/__init__.py
deleted file mode 100644 (file)
index 04d898d..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-PING_IPV4_COMMAND = 'ping -c 3 '
-PING_IPV6_COMMAND = 'ping6 -c 3 '
-PING_PACKET_LOSS_REGEX = '(\d{1,3})\.?\d*\% packet loss'
diff --git a/neutron/tests/tempest/common/utils/data_utils.py b/neutron/tests/tempest/common/utils/data_utils.py
deleted file mode 100644 (file)
index d441778..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import itertools
-import netaddr
-import random
-import uuid
-
-
-def rand_uuid():
-    return str(uuid.uuid4())
-
-
-def rand_uuid_hex():
-    return uuid.uuid4().hex
-
-
-def rand_name(name=''):
-    randbits = str(random.randint(1, 0x7fffffff))
-    if name:
-        return name + '-' + randbits
-    else:
-        return randbits
-
-
-def rand_url():
-    randbits = str(random.randint(1, 0x7fffffff))
-    return 'https://url-' + randbits + '.com'
-
-
-def rand_int_id(start=0, end=0x7fffffff):
-    return random.randint(start, end)
-
-
-def rand_mac_address():
-    """Generate an Ethernet MAC address."""
-    # NOTE(vish): We would prefer to use 0xfe here to ensure that linux
-    #             bridge mac addresses don't change, but it appears to
-    #             conflict with libvirt, so we use the next highest octet
-    #             that has the unicast and locally administered bits set
-    #             properly: 0xfa.
-    #             Discussion: https://bugs.launchpad.net/nova/+bug/921838
-    mac = [0xfa, 0x16, 0x3e,
-           random.randint(0x00, 0xff),
-           random.randint(0x00, 0xff),
-           random.randint(0x00, 0xff)]
-    return ':'.join(["%02x" % x for x in mac])
-
-
-def parse_image_id(image_ref):
-    """Return the image id from a given image ref."""
-    return image_ref.rsplit('/')[-1]
-
-
-def arbitrary_string(size=4, base_text=None):
-    """
-    Return size characters from base_text, repeating the base_text infinitely
-    if needed.
-    """
-    if not base_text:
-        base_text = 'test'
-    return ''.join(itertools.islice(itertools.cycle(base_text), size))
-
-
-def random_bytes(size=1024):
-    """
-    Return size randomly selected bytes as a string.
-    """
-    return ''.join([chr(random.randint(0, 255))
-                    for i in range(size)])
-
-
-def get_ipv6_addr_by_EUI64(cidr, mac):
-    # Check if the prefix is IPv4 address
-    is_ipv4 = netaddr.valid_ipv4(cidr)
-    if is_ipv4:
-        msg = "Unable to generate IP address by EUI64 for IPv4 prefix"
-        raise TypeError(msg)
-    try:
-        eui64 = int(netaddr.EUI(mac).eui64())
-        prefix = netaddr.IPNetwork(cidr)
-        return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57))
-    except (ValueError, netaddr.AddrFormatError):
-        raise TypeError('Bad prefix or mac format for generating IPv6 '
-                        'address by EUI-64: %(prefix)s, %(mac)s:'
-                        % {'prefix': cidr, 'mac': mac})
-    except TypeError:
-        raise TypeError('Bad prefix type for generate IPv6 address by '
-                        'EUI-64: %s' % cidr)
diff --git a/neutron/tests/tempest/common/utils/file_utils.py b/neutron/tests/tempest/common/utils/file_utils.py
deleted file mode 100644 (file)
index 43083f4..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-def have_effective_read_access(path):
-    try:
-        fh = open(path, "rb")
-    except IOError:
-        return False
-    fh.close()
-    return True
diff --git a/neutron/tests/tempest/common/utils/misc.py b/neutron/tests/tempest/common/utils/misc.py
deleted file mode 100644 (file)
index b97dd86..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import inspect
-import re
-
-from oslo_log import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def singleton(cls):
-    """Simple wrapper for classes that should only have a single instance."""
-    instances = {}
-
-    def getinstance():
-        if cls not in instances:
-            instances[cls] = cls()
-        return instances[cls]
-    return getinstance
-
-
-def find_test_caller():
-    """Find the caller class and test name.
-
-    Because we know that the interesting things that call us are
-    test_* methods, and various kinds of setUp / tearDown, we
-    can look through the call stack to find appropriate methods,
-    and the class we were in when those were called.
-    """
-    caller_name = None
-    names = []
-    frame = inspect.currentframe()
-    is_cleanup = False
-    # Start climbing the ladder until we hit a good method
-    while True:
-        try:
-            frame = frame.f_back
-            name = frame.f_code.co_name
-            names.append(name)
-            if re.search("^(test_|setUp|tearDown)", name):
-                cname = ""
-                if 'self' in frame.f_locals:
-                    cname = frame.f_locals['self'].__class__.__name__
-                if 'cls' in frame.f_locals:
-                    cname = frame.f_locals['cls'].__name__
-                caller_name = cname + ":" + name
-                break
-            elif re.search("^_run_cleanup", name):
-                is_cleanup = True
-            elif name == 'main':
-                caller_name = 'main'
-                break
-            else:
-                cname = ""
-                if 'self' in frame.f_locals:
-                    cname = frame.f_locals['self'].__class__.__name__
-                if 'cls' in frame.f_locals:
-                    cname = frame.f_locals['cls'].__name__
-
-                # the fact that we are running cleanups is indicated pretty
-                # deep in the stack, so if we see that we want to just
-                # start looking for a real class name, and declare victory
-                # once we do.
-                if is_cleanup and cname:
-                    if not re.search("^RunTest", cname):
-                        caller_name = cname + ":_run_cleanups"
-                        break
-        except Exception:
-            break
-    # prevents frame leaks
-    del frame
-    if caller_name is None:
-        LOG.debug("Sane call name not found in %s" % names)
-    return caller_name
diff --git a/neutron/tests/tempest/common/waiters.py b/neutron/tests/tempest/common/waiters.py
deleted file mode 100644 (file)
index caa9b37..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-import time
-
-from oslo_log import log as logging
-from tempest_lib.common.utils import misc as misc_utils
-
-from neutron.tests.tempest import config
-from neutron.tests.tempest import exceptions
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-# NOTE(afazekas): This function needs to know a token and a subject.
-def wait_for_server_status(client, server_id, status, ready_wait=True,
-                           extra_timeout=0, raise_on_error=True):
-    """Waits for a server to reach a given status."""
-
-    def _get_task_state(body):
-        return body.get('OS-EXT-STS:task_state', None)
-
-    # NOTE(afazekas): UNKNOWN status possible on ERROR
-    # or in a very early stage.
-    body = client.get_server(server_id)
-    old_status = server_status = body['status']
-    old_task_state = task_state = _get_task_state(body)
-    start_time = int(time.time())
-    timeout = client.build_timeout + extra_timeout
-    while True:
-        # NOTE(afazekas): Now the BUILD status only reached
-        # between the UNKNOWN->ACTIVE transition.
-        # TODO(afazekas): enumerate and validate the stable status set
-        if status == 'BUILD' and server_status != 'UNKNOWN':
-            return
-        if server_status == status:
-            if ready_wait:
-                if status == 'BUILD':
-                    return
-                # NOTE(afazekas): The instance is in "ready for action state"
-                # when no task in progress
-                # NOTE(afazekas): Converted to string because of the XML
-                # responses
-                if str(task_state) == "None":
-                    # without state api extension 3 sec usually enough
-                    time.sleep(CONF.compute.ready_wait)
-                    return
-            else:
-                return
-
-        time.sleep(client.build_interval)
-        body = client.get_server(server_id)
-        server_status = body['status']
-        task_state = _get_task_state(body)
-        if (server_status != old_status) or (task_state != old_task_state):
-            LOG.info('State transition "%s" ==> "%s" after %d second wait',
-                     '/'.join((old_status, str(old_task_state))),
-                     '/'.join((server_status, str(task_state))),
-                     time.time() - start_time)
-        if (server_status == 'ERROR') and raise_on_error:
-            if 'fault' in body:
-                raise exceptions.BuildErrorException(body['fault'],
-                                                     server_id=server_id)
-            else:
-                raise exceptions.BuildErrorException(server_id=server_id)
-
-        timed_out = int(time.time()) - start_time >= timeout
-
-        if timed_out:
-            expected_task_state = 'None' if ready_wait else 'n/a'
-            message = ('Server %(server_id)s failed to reach %(status)s '
-                       'status and task state "%(expected_task_state)s" '
-                       'within the required time (%(timeout)s s).' %
-                       {'server_id': server_id,
-                        'status': status,
-                        'expected_task_state': expected_task_state,
-                        'timeout': timeout})
-            message += ' Current status: %s.' % server_status
-            message += ' Current task state: %s.' % task_state
-            caller = misc_utils.find_test_caller()
-            if caller:
-                message = '(%s) %s' % (caller, message)
-            raise exceptions.TimeoutException(message)
-        old_status = server_status
-        old_task_state = task_state
-
-
-def wait_for_image_status(client, image_id, status):
-    """Waits for an image to reach a given status.
-
-    The client should have a get_image(image_id) method to get the image.
-    The client should also have build_interval and build_timeout attributes.
-    """
-    image = client.get_image(image_id)
-    start = int(time.time())
-
-    while image['status'] != status:
-        time.sleep(client.build_interval)
-        image = client.get_image(image_id)
-        status_curr = image['status']
-        if status_curr == 'ERROR':
-            raise exceptions.AddImageException(image_id=image_id)
-
-        # check the status again to avoid a false negative where we hit
-        # the timeout at the same time that the image reached the expected
-        # status
-        if status_curr == status:
-            return
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('Image %(image_id)s failed to reach %(status)s state'
-                       '(current state %(status_curr)s) '
-                       'within the required time (%(timeout)s s).' %
-                       {'image_id': image_id,
-                        'status': status,
-                        'status_curr': status_curr,
-                        'timeout': client.build_timeout})
-            caller = misc_utils.find_test_caller()
-            if caller:
-                message = '(%s) %s' % (caller, message)
-            raise exceptions.TimeoutException(message)
-
-
-def wait_for_bm_node_status(client, node_id, attr, status):
-    """Waits for a baremetal node attribute to reach given status.
-
-    The client should have a show_node(node_uuid) method to get the node.
-    """
-    _, node = client.show_node(node_id)
-    start = int(time.time())
-
-    while node[attr] != status:
-        time.sleep(client.build_interval)
-        _, node = client.show_node(node_id)
-        status_curr = node[attr]
-        if status_curr == status:
-            return
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s '
-                       'within the required time (%(timeout)s s).' %
-                       {'node_id': node_id,
-                        'attr': attr,
-                        'status': status,
-                        'timeout': client.build_timeout})
-            message += ' Current state of %s: %s.' % (attr, status_curr)
-            caller = misc_utils.find_test_caller()
-            if caller:
-                message = '(%s) %s' % (caller, message)
-            raise exceptions.TimeoutException(message)
diff --git a/neutron/tests/tempest/config.py b/neutron/tests/tempest/config.py
deleted file mode 100644 (file)
index 06fd541..0000000
+++ /dev/null
@@ -1,1237 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from __future__ import print_function
-
-import os
-
-from oslo_config import cfg
-
-from oslo_log import log as logging
-
-
-def register_opt_group(conf, opt_group, options):
-    conf.register_group(opt_group)
-    for opt in options:
-        conf.register_opt(opt, group=opt_group.name)
-
-
-auth_group = cfg.OptGroup(name='auth',
-                          title="Options for authentication and credentials")
-
-
-AuthGroup = [
-    cfg.StrOpt('test_accounts_file',
-               default='etc/accounts.yaml',
-               help="Path to the yaml file that contains the list of "
-                    "credentials to use for running tests"),
-    cfg.BoolOpt('allow_tenant_isolation',
-                default=True,
-                help="Allows test cases to create/destroy tenants and "
-                     "users. This option requires that OpenStack Identity "
-                     "API admin credentials are known. If false, isolated "
-                     "test cases and parallel execution, can still be "
-                     "achieved configuring a list of test accounts",
-                deprecated_opts=[cfg.DeprecatedOpt('allow_tenant_isolation',
-                                                   group='compute'),
-                                 cfg.DeprecatedOpt('allow_tenant_isolation',
-                                                   group='orchestration')]),
-    cfg.BoolOpt('locking_credentials_provider',
-                default=False,
-                help="If set to True it enables the Accounts provider, "
-                     "which locks credentials to allow for parallel execution "
-                     "with pre-provisioned accounts. It can only be used to "
-                     "run tests that ensure credentials cleanup happens. "
-                     "It requires at least `2 * CONC` distinct accounts "
-                     "configured in `test_accounts_file`, with CONC == the "
-                     "number of concurrent test processes."),
-    cfg.ListOpt('tempest_roles',
-                help="Roles to assign to all users created by tempest",
-                default=[]),
-    cfg.StrOpt('admin_username',
-               help="Administrative Username to use for "
-                    "Keystone API requests."),
-    cfg.StrOpt('admin_tenant_name',
-               help="Administrative Tenant name to use for Keystone API "
-                    "requests."),
-    cfg.StrOpt('admin_password',
-               help="API key to use when authenticating as admin.",
-               secret=True),
-    cfg.StrOpt('admin_domain_name',
-               help="Admin domain name for authentication (Keystone V3)."
-                    "The same domain applies to user and project"),
-]
-
-identity_group = cfg.OptGroup(name='identity',
-                              title="Keystone Configuration Options")
-
-IdentityGroup = [
-    cfg.StrOpt('catalog_type',
-               default='identity',
-               help="Catalog type of the Identity service."),
-    cfg.BoolOpt('disable_ssl_certificate_validation',
-                default=False,
-                help="Set to True if using self-signed SSL certificates."),
-    cfg.StrOpt('ca_certificates_file',
-               help='Specify a CA bundle file to use in verifying a '
-                    'TLS (https) server certificate.'),
-    cfg.StrOpt('uri',
-               help="Full URI of the OpenStack Identity API (Keystone), v2"),
-    cfg.StrOpt('uri_v3',
-               help='Full URI of the OpenStack Identity API (Keystone), v3'),
-    cfg.StrOpt('auth_version',
-               default='v2',
-               help="Identity API version to be used for authentication "
-                    "for API tests."),
-    cfg.StrOpt('region',
-               default='RegionOne',
-               help="The identity region name to use. Also used as the other "
-                    "services' region name unless they are set explicitly. "
-                    "If no such region is found in the service catalog, the "
-                    "first found one is used."),
-    cfg.StrOpt('endpoint_type',
-               default='publicURL',
-               choices=['public', 'admin', 'internal',
-                        'publicURL', 'adminURL', 'internalURL'],
-               help="The endpoint type to use for the identity service."),
-    cfg.StrOpt('username',
-               help="Username to use for Nova API requests."),
-    cfg.StrOpt('tenant_name',
-               help="Tenant name to use for Nova API requests."),
-    cfg.StrOpt('admin_role',
-               default='admin',
-               help="Role required to administrate keystone."),
-    cfg.StrOpt('password',
-               help="API key to use when authenticating.",
-               secret=True),
-    cfg.StrOpt('domain_name',
-               help="Domain name for authentication (Keystone V3)."
-                    "The same domain applies to user and project"),
-    cfg.StrOpt('alt_username',
-               help="Username of alternate user to use for Nova API "
-                    "requests."),
-    cfg.StrOpt('alt_tenant_name',
-               help="Alternate user's Tenant name to use for Nova API "
-                    "requests."),
-    cfg.StrOpt('alt_password',
-               help="API key to use when authenticating as alternate user.",
-               secret=True),
-    cfg.StrOpt('alt_domain_name',
-               help="Alternate domain name for authentication (Keystone V3)."
-                    "The same domain applies to user and project"),
-]
-
-identity_feature_group = cfg.OptGroup(name='identity-feature-enabled',
-                                      title='Enabled Identity Features')
-
-IdentityFeatureGroup = [
-    cfg.BoolOpt('trust',
-                default=True,
-                help='Does the identity service have delegation and '
-                     'impersonation enabled'),
-    cfg.BoolOpt('api_v2',
-                default=True,
-                help='Is the v2 identity API enabled'),
-    cfg.BoolOpt('api_v3',
-                default=True,
-                help='Is the v3 identity API enabled'),
-]
-
-compute_group = cfg.OptGroup(name='compute',
-                             title='Compute Service Options')
-
-ComputeGroup = [
-    cfg.StrOpt('image_ref',
-               help="Valid primary image reference to be used in tests. "
-                    "This is a required option"),
-    cfg.StrOpt('image_ref_alt',
-               help="Valid secondary image reference to be used in tests. "
-                    "This is a required option, but if only one image is "
-                    "available duplicate the value of image_ref above"),
-    cfg.StrOpt('flavor_ref',
-               default="1",
-               help="Valid primary flavor to use in tests."),
-    cfg.StrOpt('flavor_ref_alt',
-               default="2",
-               help='Valid secondary flavor to be used in tests.'),
-    cfg.StrOpt('image_ssh_user',
-               default="root",
-               help="User name used to authenticate to an instance."),
-    cfg.StrOpt('image_ssh_password',
-               default="password",
-               help="Password used to authenticate to an instance."),
-    cfg.StrOpt('image_alt_ssh_user',
-               default="root",
-               help="User name used to authenticate to an instance using "
-                    "the alternate image."),
-    cfg.StrOpt('image_alt_ssh_password',
-               default="password",
-               help="Password used to authenticate to an instance using "
-                    "the alternate image."),
-    cfg.IntOpt('build_interval',
-               default=1,
-               help="Time in seconds between build status checks."),
-    cfg.IntOpt('build_timeout',
-               default=300,
-               help="Timeout in seconds to wait for an instance to build. "
-                    "Other services that do not define build_timeout will "
-                    "inherit this value."),
-    cfg.BoolOpt('run_ssh',
-                default=False,
-                help="Should the tests ssh to instances?"),
-    cfg.StrOpt('ssh_auth_method',
-               default='keypair',
-               help="Auth method used for authenticate to the instance. "
-                    "Valid choices are: keypair, configured, adminpass. "
-                    "keypair: start the servers with an ssh keypair. "
-                    "configured: use the configured user and password. "
-                    "adminpass: use the injected adminPass. "
-                    "disabled: avoid using ssh when it is an option."),
-    cfg.StrOpt('ssh_connect_method',
-               default='fixed',
-               help="How to connect to the instance? "
-                    "fixed: using the first ip belongs the fixed network "
-                    "floating: creating and using a floating ip"),
-    cfg.StrOpt('ssh_user',
-               default='root',
-               help="User name used to authenticate to an instance."),
-    cfg.IntOpt('ping_timeout',
-               default=120,
-               help="Timeout in seconds to wait for ping to "
-                    "succeed."),
-    cfg.IntOpt('ssh_timeout',
-               default=300,
-               help="Timeout in seconds to wait for authentication to "
-                    "succeed."),
-    cfg.IntOpt('ready_wait',
-               default=0,
-               help="Additional wait time for clean state, when there is "
-                    "no OS-EXT-STS extension available"),
-    cfg.IntOpt('ssh_channel_timeout',
-               default=60,
-               help="Timeout in seconds to wait for output from ssh "
-                    "channel."),
-    cfg.StrOpt('fixed_network_name',
-               default='private',
-               help="Name of the fixed network that is visible to all test "
-                    "tenants."),
-    cfg.StrOpt('network_for_ssh',
-               default='public',
-               help="Network used for SSH connections. Ignored if "
-                    "use_floatingip_for_ssh=true or run_ssh=false."),
-    cfg.IntOpt('ip_version_for_ssh',
-               default=4,
-               help="IP version used for SSH connections."),
-    cfg.BoolOpt('use_floatingip_for_ssh',
-                default=True,
-                help="Does SSH use Floating IPs?"),
-    cfg.StrOpt('catalog_type',
-               default='compute',
-               help="Catalog type of the Compute service."),
-    cfg.StrOpt('region',
-               default='',
-               help="The compute region name to use. If empty, the value "
-                    "of identity.region is used instead. If no such region "
-                    "is found in the service catalog, the first found one is "
-                    "used."),
-    cfg.StrOpt('endpoint_type',
-               default='publicURL',
-               choices=['public', 'admin', 'internal',
-                        'publicURL', 'adminURL', 'internalURL'],
-               help="The endpoint type to use for the compute service."),
-    cfg.StrOpt('path_to_private_key',
-               help="Path to a private key file for SSH access to remote "
-                    "hosts"),
-    cfg.StrOpt('volume_device_name',
-               default='vdb',
-               help="Expected device name when a volume is attached to "
-                    "an instance"),
-    cfg.IntOpt('shelved_offload_time',
-               default=0,
-               help='Time in seconds before a shelved instance is eligible '
-                    'for removing from a host.  -1 never offload, 0 offload '
-                    'when shelved. This time should be the same as the time '
-                    'of nova.conf, and some tests will run for as long as the '
-                    'time.'),
-    cfg.StrOpt('floating_ip_range',
-               default='10.0.0.0/29',
-               help='Unallocated floating IP range, which will be used to '
-                    'test the floating IP bulk feature for CRUD operation. '
-                    'This block must not overlap an existing floating IP '
-                    'pool.')
-]
-
-compute_features_group = cfg.OptGroup(name='compute-feature-enabled',
-                                      title="Enabled Compute Service Features")
-
-ComputeFeaturesGroup = [
-    cfg.BoolOpt('disk_config',
-                default=True,
-                help="If false, skip disk config tests"),
-    cfg.ListOpt('api_extensions',
-                default=['all'],
-                help='A list of enabled compute extensions with a special '
-                     'entry all which indicates every extension is enabled. '
-                     'Each extension should be specified with alias name. '
-                     'Empty list indicates all extensions are disabled'),
-    cfg.BoolOpt('change_password',
-                default=False,
-                help="Does the test environment support changing the admin "
-                     "password?"),
-    cfg.BoolOpt('console_output',
-                default=True,
-                help="Does the test environment support obtaining instance "
-                     "serial console output?"),
-    cfg.BoolOpt('resize',
-                default=False,
-                help="Does the test environment support resizing?"),
-    cfg.BoolOpt('pause',
-                default=True,
-                help="Does the test environment support pausing?"),
-    cfg.BoolOpt('shelve',
-                default=True,
-                help="Does the test environment support shelving/unshelving?"),
-    cfg.BoolOpt('suspend',
-                default=True,
-                help="Does the test environment support suspend/resume?"),
-    cfg.BoolOpt('live_migration',
-                default=True,
-                help="Does the test environment support live migration "
-                     "available?"),
-    cfg.BoolOpt('block_migration_for_live_migration',
-                default=False,
-                help="Does the test environment use block devices for live "
-                     "migration"),
-    cfg.BoolOpt('block_migrate_cinder_iscsi',
-                default=False,
-                help="Does the test environment block migration support "
-                     "cinder iSCSI volumes"),
-    cfg.BoolOpt('vnc_console',
-                default=False,
-                help='Enable VNC console. This configuration value should '
-                     'be same as [nova.vnc]->vnc_enabled in nova.conf'),
-    cfg.BoolOpt('spice_console',
-                default=False,
-                help='Enable Spice console. This configuration value should '
-                     'be same as [nova.spice]->enabled in nova.conf'),
-    cfg.BoolOpt('rdp_console',
-                default=False,
-                help='Enable RDP console. This configuration value should '
-                     'be same as [nova.rdp]->enabled in nova.conf'),
-    cfg.BoolOpt('rescue',
-                default=True,
-                help='Does the test environment support instance rescue '
-                     'mode?'),
-    cfg.BoolOpt('enable_instance_password',
-                default=True,
-                help='Enables returning of the instance password by the '
-                     'relevant server API calls such as create, rebuild '
-                     'or rescue.'),
-    cfg.BoolOpt('interface_attach',
-                default=True,
-                help='Does the test environment support dynamic network '
-                     'interface attachment?'),
-    cfg.BoolOpt('snapshot',
-                default=True,
-                help='Does the test environment support creating snapshot '
-                     'images of running instances?'),
-    cfg.BoolOpt('ec2_api',
-                default=True,
-                help='Does the test environment have the ec2 api running?')
-]
-
-
-image_group = cfg.OptGroup(name='image',
-                           title="Image Service Options")
-
-ImageGroup = [
-    cfg.StrOpt('catalog_type',
-               default='image',
-               help='Catalog type of the Image service.'),
-    cfg.StrOpt('region',
-               default='',
-               help="The image region name to use. If empty, the value "
-                    "of identity.region is used instead. If no such region "
-                    "is found in the service catalog, the first found one is "
-                    "used."),
-    cfg.StrOpt('endpoint_type',
-               default='publicURL',
-               choices=['public', 'admin', 'internal',
-                        'publicURL', 'adminURL', 'internalURL'],
-               help="The endpoint type to use for the image service."),
-    cfg.StrOpt('http_image',
-               default='http://download.cirros-cloud.net/0.3.1/'
-               'cirros-0.3.1-x86_64-uec.tar.gz',
-               help='http accessible image'),
-    cfg.IntOpt('build_timeout',
-               default=300,
-               help="Timeout in seconds to wait for an image to "
-                    "become available."),
-    cfg.IntOpt('build_interval',
-               default=1,
-               help="Time in seconds between image operation status "
-                    "checks.")
-]
-
-image_feature_group = cfg.OptGroup(name='image-feature-enabled',
-                                   title='Enabled image service features')
-
-ImageFeaturesGroup = [
-    cfg.BoolOpt('api_v2',
-                default=True,
-                help="Is the v2 image API enabled"),
-    cfg.BoolOpt('api_v1',
-                default=True,
-                help="Is the v1 image API enabled"),
-]
-
-network_group = cfg.OptGroup(name='network',
-                             title='Network Service Options')
-
-NetworkGroup = [
-    cfg.StrOpt('catalog_type',
-               default='network',
-               help='Catalog type of the Neutron service.'),
-    cfg.StrOpt('region',
-               default='',
-               help="The network region name to use. If empty, the value "
-                    "of identity.region is used instead. If no such region "
-                    "is found in the service catalog, the first found one is "
-                    "used."),
-    cfg.StrOpt('endpoint_type',
-               default='publicURL',
-               choices=['public', 'admin', 'internal',
-                        'publicURL', 'adminURL', 'internalURL'],
-               help="The endpoint type to use for the network service."),
-    cfg.StrOpt('tenant_network_cidr',
-               default="10.100.0.0/16",
-               help="The cidr block to allocate tenant ipv4 subnets from"),
-    cfg.IntOpt('tenant_network_mask_bits',
-               default=28,
-               help="The mask bits for tenant ipv4 subnets"),
-    cfg.StrOpt('tenant_network_v6_cidr',
-               default="2003::/48",
-               help="The cidr block to allocate tenant ipv6 subnets from"),
-    cfg.IntOpt('tenant_network_v6_mask_bits',
-               default=64,
-               help="The mask bits for tenant ipv6 subnets"),
-    cfg.BoolOpt('tenant_networks_reachable',
-                default=False,
-                help="Whether tenant network connectivity should be "
-                     "evaluated directly"),
-    cfg.StrOpt('public_network_id',
-               default="",
-               help="Id of the public network that provides external "
-                    "connectivity"),
-    cfg.StrOpt('public_router_id',
-               default="",
-               help="Id of the public router that provides external "
-                    "connectivity. This should only be used when Neutron's "
-                    "'allow_overlapping_ips' is set to 'False' in "
-                    "neutron.conf. usually not needed past 'Grizzly' release"),
-    cfg.IntOpt('build_timeout',
-               default=300,
-               help="Timeout in seconds to wait for network operation to "
-                    "complete."),
-    cfg.IntOpt('build_interval',
-               default=1,
-               help="Time in seconds between network operation status "
-                    "checks."),
-    cfg.ListOpt('dns_servers',
-                default=["8.8.8.8", "8.8.4.4"],
-                help="List of dns servers which should be used"
-                     " for subnet creation"),
-    cfg.StrOpt('port_vnic_type',
-               choices=[None, 'normal', 'direct', 'macvtap'],
-               help="vnic_type to use when Launching instances"
-                    " with pre-configured ports."
-                    " Supported ports are:"
-                    " ['normal','direct','macvtap']"),
-]
-
-network_feature_group = cfg.OptGroup(name='network-feature-enabled',
-                                     title='Enabled network service features')
-
-NetworkFeaturesGroup = [
-    cfg.BoolOpt('ipv6',
-                default=True,
-                help="Allow the execution of IPv6 tests"),
-    cfg.ListOpt('api_extensions',
-                default=['all'],
-                help='A list of enabled network extensions with a special '
-                     'entry all which indicates every extension is enabled. '
-                     'Empty list indicates all extensions are disabled'),
-    cfg.BoolOpt('ipv6_subnet_attributes',
-                default=False,
-                help="Allow the execution of IPv6 subnet tests that use "
-                     "the extended IPv6 attributes ipv6_ra_mode "
-                     "and ipv6_address_mode"
-                ),
-    cfg.BoolOpt('specify_floating_ip_address_available',
-                default=True,
-                help='Allow passing an IP Address of the floating ip when '
-                     'creating the floating ip'),
-]
-
-messaging_group = cfg.OptGroup(name='messaging',
-                               title='Messaging Service')
-
-MessagingGroup = [
-    cfg.StrOpt('catalog_type',
-               default='messaging',
-               help='Catalog type of the Messaging service.'),
-    cfg.IntOpt('max_queues_per_page',
-               default=20,
-               help='The maximum number of queue records per page when '
-                    'listing queues'),
-    cfg.IntOpt('max_queue_metadata',
-               default=65536,
-               help='The maximum metadata size for a queue'),
-    cfg.IntOpt('max_messages_per_page',
-               default=20,
-               help='The maximum number of queue message per page when '
-                    'listing (or) posting messages'),
-    cfg.IntOpt('max_message_size',
-               default=262144,
-               help='The maximum size of a message body'),
-    cfg.IntOpt('max_messages_per_claim',
-               default=20,
-               help='The maximum number of messages per claim'),
-    cfg.IntOpt('max_message_ttl',
-               default=1209600,
-               help='The maximum ttl for a message'),
-    cfg.IntOpt('max_claim_ttl',
-               default=43200,
-               help='The maximum ttl for a claim'),
-    cfg.IntOpt('max_claim_grace',
-               default=43200,
-               help='The maximum grace period for a claim'),
-]
-
-volume_group = cfg.OptGroup(name='volume',
-                            title='Block Storage Options')
-
-VolumeGroup = [
-    cfg.IntOpt('build_interval',
-               default=1,
-               help='Time in seconds between volume availability checks.'),
-    cfg.IntOpt('build_timeout',
-               default=300,
-               help='Timeout in seconds to wait for a volume to become '
-                    'available.'),
-    cfg.StrOpt('catalog_type',
-               default='volume',
-               help="Catalog type of the Volume Service"),
-    cfg.StrOpt('region',
-               default='',
-               help="The volume region name to use. If empty, the value "
-                    "of identity.region is used instead. If no such region "
-                    "is found in the service catalog, the first found one is "
-                    "used."),
-    cfg.StrOpt('endpoint_type',
-               default='publicURL',
-               choices=['public', 'admin', 'internal',
-                        'publicURL', 'adminURL', 'internalURL'],
-               help="The endpoint type to use for the volume service."),
-    cfg.StrOpt('backend1_name',
-               default='BACKEND_1',
-               help="Name of the backend1 (must be declared in cinder.conf)"),
-    cfg.StrOpt('backend2_name',
-               default='BACKEND_2',
-               help="Name of the backend2 (must be declared in cinder.conf)"),
-    cfg.StrOpt('storage_protocol',
-               default='iSCSI',
-               help='Backend protocol to target when creating volume types'),
-    cfg.StrOpt('vendor_name',
-               default='Open Source',
-               help='Backend vendor to target when creating volume types'),
-    cfg.StrOpt('disk_format',
-               default='raw',
-               help='Disk format to use when copying a volume to image'),
-    cfg.IntOpt('volume_size',
-               default=1,
-               help='Default size in GB for volumes created by volumes tests'),
-]
-
-volume_feature_group = cfg.OptGroup(name='volume-feature-enabled',
-                                    title='Enabled Cinder Features')
-
-VolumeFeaturesGroup = [
-    cfg.BoolOpt('multi_backend',
-                default=False,
-                help="Runs Cinder multi-backend test (requires 2 backends)"),
-    cfg.BoolOpt('backup',
-                default=True,
-                help='Runs Cinder volumes backup test'),
-    cfg.BoolOpt('snapshot',
-                default=True,
-                help='Runs Cinder volume snapshot test'),
-    cfg.ListOpt('api_extensions',
-                default=['all'],
-                help='A list of enabled volume extensions with a special '
-                     'entry all which indicates every extension is enabled. '
-                     'Empty list indicates all extensions are disabled'),
-    cfg.BoolOpt('api_v1',
-                default=True,
-                help="Is the v1 volume API enabled"),
-    cfg.BoolOpt('api_v2',
-                default=True,
-                help="Is the v2 volume API enabled"),
-]
-
-
-object_storage_group = cfg.OptGroup(name='object-storage',
-                                    title='Object Storage Service Options')
-
-ObjectStoreGroup = [
-    cfg.StrOpt('catalog_type',
-               default='object-store',
-               help="Catalog type of the Object-Storage service."),
-    cfg.StrOpt('region',
-               default='',
-               help="The object-storage region name to use. If empty, the "
-                    "value of identity.region is used instead. If no such "
-                    "region is found in the service catalog, the first found "
-                    "one is used."),
-    cfg.StrOpt('endpoint_type',
-               default='publicURL',
-               choices=['public', 'admin', 'internal',
-                        'publicURL', 'adminURL', 'internalURL'],
-               help="The endpoint type to use for the object-store service."),
-    cfg.IntOpt('container_sync_timeout',
-               default=600,
-               help="Number of seconds to time on waiting for a container "
-                    "to container synchronization complete."),
-    cfg.IntOpt('container_sync_interval',
-               default=5,
-               help="Number of seconds to wait while looping to check the "
-                    "status of a container to container synchronization"),
-    cfg.StrOpt('operator_role',
-               default='Member',
-               help="Role to add to users created for swift tests to "
-                    "enable creating containers"),
-    cfg.StrOpt('reseller_admin_role',
-               default='ResellerAdmin',
-               help="User role that has reseller admin"),
-    cfg.StrOpt('realm_name',
-               default='realm1',
-               help="Name of sync realm. A sync realm is a set of clusters "
-                    "that have agreed to allow container syncing with each "
-                    "other. Set the same realm name as Swift's "
-                    "container-sync-realms.conf"),
-    cfg.StrOpt('cluster_name',
-               default='name1',
-               help="One name of cluster which is set in the realm whose name "
-                    "is set in 'realm_name' item in this file. Set the "
-                    "same cluster name as Swift's container-sync-realms.conf"),
-]
-
-object_storage_feature_group = cfg.OptGroup(
-    name='object-storage-feature-enabled',
-    title='Enabled object-storage features')
-
-ObjectStoreFeaturesGroup = [
-    cfg.ListOpt('discoverable_apis',
-                default=['all'],
-                help="A list of the enabled optional discoverable apis. "
-                     "A single entry, all, indicates that all of these "
-                     "features are expected to be enabled"),
-    cfg.BoolOpt('container_sync',
-                default=True,
-                help="Execute (old style) container-sync tests"),
-    cfg.BoolOpt('object_versioning',
-                default=True,
-                help="Execute object-versioning tests"),
-    cfg.BoolOpt('discoverability',
-                default=True,
-                help="Execute discoverability tests"),
-]
-
-database_group = cfg.OptGroup(name='database',
-                              title='Database Service Options')
-
-DatabaseGroup = [
-    cfg.StrOpt('catalog_type',
-               default='database',
-               help="Catalog type of the Database service."),
-    cfg.StrOpt('db_flavor_ref',
-               default="1",
-               help="Valid primary flavor to use in database tests."),
-    cfg.StrOpt('db_current_version',
-               default="v1.0",
-               help="Current database version to use in database tests."),
-]
-
-orchestration_group = cfg.OptGroup(name='orchestration',
-                                   title='Orchestration Service Options')
-
-OrchestrationGroup = [
-    cfg.StrOpt('catalog_type',
-               default='orchestration',
-               help="Catalog type of the Orchestration service."),
-    cfg.StrOpt('region',
-               default='',
-               help="The orchestration region name to use. If empty, the "
-                    "value of identity.region is used instead. If no such "
-                    "region is found in the service catalog, the first found "
-                    "one is used."),
-    cfg.StrOpt('endpoint_type',
-               default='publicURL',
-               choices=['public', 'admin', 'internal',
-                        'publicURL', 'adminURL', 'internalURL'],
-               help="The endpoint type to use for the orchestration service."),
-    cfg.IntOpt('build_interval',
-               default=1,
-               help="Time in seconds between build status checks."),
-    cfg.IntOpt('build_timeout',
-               default=1200,
-               help="Timeout in seconds to wait for a stack to build."),
-    cfg.StrOpt('instance_type',
-               default='m1.micro',
-               help="Instance type for tests. Needs to be big enough for a "
-                    "full OS plus the test workload"),
-    cfg.StrOpt('keypair_name',
-               help="Name of existing keypair to launch servers with."),
-    cfg.IntOpt('max_template_size',
-               default=524288,
-               help="Value must match heat configuration of the same name."),
-    cfg.IntOpt('max_resources_per_stack',
-               default=1000,
-               help="Value must match heat configuration of the same name."),
-]
-
-
-telemetry_group = cfg.OptGroup(name='telemetry',
-                               title='Telemetry Service Options')
-
-TelemetryGroup = [
-    cfg.StrOpt('catalog_type',
-               default='metering',
-               help="Catalog type of the Telemetry service."),
-    cfg.StrOpt('endpoint_type',
-               default='publicURL',
-               choices=['public', 'admin', 'internal',
-                        'publicURL', 'adminURL', 'internalURL'],
-               help="The endpoint type to use for the telemetry service."),
-    cfg.BoolOpt('too_slow_to_test',
-                default=True,
-                help="This variable is used as flag to enable "
-                     "notification tests")
-]
-
-
-dashboard_group = cfg.OptGroup(name="dashboard",
-                               title="Dashboard options")
-
-DashboardGroup = [
-    cfg.StrOpt('dashboard_url',
-               default='http://localhost/',
-               help="Where the dashboard can be found"),
-    cfg.StrOpt('login_url',
-               default='http://localhost/auth/login/',
-               help="Login page for the dashboard"),
-]
-
-
-data_processing_group = cfg.OptGroup(name="data_processing",
-                                     title="Data Processing options")
-
-DataProcessingGroup = [
-    cfg.StrOpt('catalog_type',
-               default='data_processing',
-               help="Catalog type of the data processing service."),
-    cfg.StrOpt('endpoint_type',
-               default='publicURL',
-               choices=['public', 'admin', 'internal',
-                        'publicURL', 'adminURL', 'internalURL'],
-               help="The endpoint type to use for the data processing "
-                    "service."),
-]
-
-
-data_processing_feature_group = cfg.OptGroup(
-    name="data_processing-feature-enabled",
-    title="Enabled Data Processing features")
-
-DataProcessingFeaturesGroup = [
-    cfg.ListOpt('plugins',
-                default=["vanilla", "hdp"],
-                help="List of enabled data processing plugins")
-]
-
-
-boto_group = cfg.OptGroup(name='boto',
-                          title='EC2/S3 options')
-BotoGroup = [
-    cfg.StrOpt('ec2_url',
-               default="http://localhost:8773/services/Cloud",
-               help="EC2 URL"),
-    cfg.StrOpt('s3_url',
-               default="http://localhost:8080",
-               help="S3 URL"),
-    cfg.StrOpt('aws_secret',
-               help="AWS Secret Key",
-               secret=True),
-    cfg.StrOpt('aws_access',
-               help="AWS Access Key"),
-    cfg.StrOpt('aws_zone',
-               default="nova",
-               help="AWS Zone for EC2 tests"),
-    cfg.StrOpt('s3_materials_path',
-               default="/opt/stack/devstack/files/images/"
-                       "s3-materials/cirros-0.3.0",
-               help="S3 Materials Path"),
-    cfg.StrOpt('ari_manifest',
-               default="cirros-0.3.0-x86_64-initrd.manifest.xml",
-               help="ARI Ramdisk Image manifest"),
-    cfg.StrOpt('ami_manifest',
-               default="cirros-0.3.0-x86_64-blank.img.manifest.xml",
-               help="AMI Machine Image manifest"),
-    cfg.StrOpt('aki_manifest',
-               default="cirros-0.3.0-x86_64-vmlinuz.manifest.xml",
-               help="AKI Kernel Image manifest"),
-    cfg.StrOpt('instance_type',
-               default="m1.tiny",
-               help="Instance type"),
-    cfg.IntOpt('http_socket_timeout',
-               default=3,
-               help="boto Http socket timeout"),
-    cfg.IntOpt('num_retries',
-               default=1,
-               help="boto num_retries on error"),
-    cfg.IntOpt('build_timeout',
-               default=60,
-               help="Status Change Timeout"),
-    cfg.IntOpt('build_interval',
-               default=1,
-               help="Status Change Test Interval"),
-]
-
-stress_group = cfg.OptGroup(name='stress', title='Stress Test Options')
-
-StressGroup = [
-    cfg.StrOpt('nova_logdir',
-               help='Directory containing log files on the compute nodes'),
-    cfg.IntOpt('max_instances',
-               default=16,
-               help='Maximum number of instances to create during test.'),
-    cfg.StrOpt('controller',
-               help='Controller host.'),
-    # new stress options
-    cfg.StrOpt('target_controller',
-               help='Controller host.'),
-    cfg.StrOpt('target_ssh_user',
-               help='ssh user.'),
-    cfg.StrOpt('target_private_key_path',
-               help='Path to private key.'),
-    cfg.StrOpt('target_logfiles',
-               help='regexp for list of log files.'),
-    cfg.IntOpt('log_check_interval',
-               default=60,
-               help='time (in seconds) between log file error checks.'),
-    cfg.IntOpt('default_thread_number_per_action',
-               default=4,
-               help='The number of threads created while stress test.'),
-    cfg.BoolOpt('leave_dirty_stack',
-                default=False,
-                help='Prevent the cleaning (tearDownClass()) between'
-                     ' each stress test run if an exception occurs'
-                     ' during this run.'),
-    cfg.BoolOpt('full_clean_stack',
-                default=False,
-                help='Allows a full cleaning process after a stress test.'
-                     ' Caution : this cleanup will remove every objects of'
-                     ' every tenant.')
-]
-
-
-scenario_group = cfg.OptGroup(name='scenario', title='Scenario Test Options')
-
-ScenarioGroup = [
-    cfg.StrOpt('img_dir',
-               default='/opt/stack/new/devstack/files/images/'
-               'cirros-0.3.1-x86_64-uec',
-               help='Directory containing image files'),
-    cfg.StrOpt('img_file', deprecated_name='qcow2_img_file',
-               default='cirros-0.3.1-x86_64-disk.img',
-               help='Image file name'),
-    cfg.StrOpt('img_disk_format',
-               default='qcow2',
-               help='Image disk format'),
-    cfg.StrOpt('img_container_format',
-               default='bare',
-               help='Image container format'),
-    cfg.StrOpt('ami_img_file',
-               default='cirros-0.3.1-x86_64-blank.img',
-               help='AMI image file name'),
-    cfg.StrOpt('ari_img_file',
-               default='cirros-0.3.1-x86_64-initrd',
-               help='ARI image file name'),
-    cfg.StrOpt('aki_img_file',
-               default='cirros-0.3.1-x86_64-vmlinuz',
-               help='AKI image file name'),
-    cfg.StrOpt('ssh_user',
-               default='cirros',
-               help='ssh username for the image file'),
-    cfg.IntOpt(
-        'large_ops_number',
-        default=0,
-        help="specifies how many resources to request at once. Used "
-        "for large operations testing."),
-    # TODO(yfried): add support for dhcpcd
-    cfg.StrOpt('dhcp_client',
-               default='udhcpc',
-               choices=["udhcpc", "dhclient"],
-               help='DHCP client used by images to renew DCHP lease. '
-                    'If left empty, update operation will be skipped. '
-                    'Supported clients: "udhcpc", "dhclient"')
-]
-
-
-service_available_group = cfg.OptGroup(name="service_available",
-                                       title="Available OpenStack Services")
-
-ServiceAvailableGroup = [
-    cfg.BoolOpt('cinder',
-                default=True,
-                help="Whether or not cinder is expected to be available"),
-    cfg.BoolOpt('neutron',
-                default=False,
-                help="Whether or not neutron is expected to be available"),
-    cfg.BoolOpt('glance',
-                default=True,
-                help="Whether or not glance is expected to be available"),
-    cfg.BoolOpt('swift',
-                default=True,
-                help="Whether or not swift is expected to be available"),
-    cfg.BoolOpt('nova',
-                default=True,
-                help="Whether or not nova is expected to be available"),
-    cfg.BoolOpt('heat',
-                default=False,
-                help="Whether or not Heat is expected to be available"),
-    cfg.BoolOpt('ceilometer',
-                default=True,
-                help="Whether or not Ceilometer is expected to be available"),
-    cfg.BoolOpt('horizon',
-                default=True,
-                help="Whether or not Horizon is expected to be available"),
-    cfg.BoolOpt('sahara',
-                default=False,
-                help="Whether or not Sahara is expected to be available"),
-    cfg.BoolOpt('ironic',
-                default=False,
-                help="Whether or not Ironic is expected to be available"),
-    cfg.BoolOpt('trove',
-                default=False,
-                help="Whether or not Trove is expected to be available"),
-    cfg.BoolOpt('zaqar',
-                default=False,
-                help="Whether or not Zaqar is expected to be available"),
-]
-
-debug_group = cfg.OptGroup(name="debug",
-                           title="Debug System")
-
-DebugGroup = [
-    cfg.StrOpt('trace_requests',
-               default='',
-               help="""A regex to determine which requests should be traced.
-
-This is a regex to match the caller for rest client requests to be able to
-selectively trace calls out of specific classes and methods. It largely
-exists for test development, and is not expected to be used in a real deploy
-of tempest. This will be matched against the discovered ClassName:method
-in the test environment.
-
-Expected values for this field are:
-
- * ClassName:test_method_name - traces one test_method
- * ClassName:setUp(Class) - traces specific setup functions
- * ClassName:tearDown(Class) - traces specific teardown functions
- * ClassName:_run_cleanups - traces the cleanup functions
-
-If nothing is specified, this feature is not enabled. To trace everything
-specify .* as the regex.
-""")
-]
-
-input_scenario_group = cfg.OptGroup(name="input-scenario",
-                                    title="Filters and values for"
-                                          " input scenarios")
-
-InputScenarioGroup = [
-    cfg.StrOpt('image_regex',
-               default='^cirros-0.3.1-x86_64-uec$',
-               help="Matching images become parameters for scenario tests"),
-    cfg.StrOpt('flavor_regex',
-               default='^m1.nano$',
-               help="Matching flavors become parameters for scenario tests"),
-    cfg.StrOpt('non_ssh_image_regex',
-               default='^.*[Ww]in.*$',
-               help="SSH verification in tests is skipped"
-                    "for matching images"),
-    cfg.StrOpt('ssh_user_regex',
-               default="[[\"^.*[Cc]irros.*$\", \"root\"]]",
-               help="List of user mapped to regex "
-                    "to matching image names."),
-]
-
-
-baremetal_group = cfg.OptGroup(name='baremetal',
-                               title='Baremetal provisioning service options',
-                               help='When enabling baremetal tests, Nova '
-                                    'must be configured to use the Ironic '
-                                    'driver. The following paremeters for the '
-                                    '[compute] section must be disabled: '
-                                    'console_output, interface_attach, '
-                                    'live_migration, pause, rescue, resize '
-                                    'shelve, snapshot, and suspend')
-
-BaremetalGroup = [
-    cfg.StrOpt('catalog_type',
-               default='baremetal',
-               help="Catalog type of the baremetal provisioning service"),
-    cfg.BoolOpt('driver_enabled',
-                default=False,
-                help="Whether the Ironic nova-compute driver is enabled"),
-    cfg.StrOpt('driver',
-               default='fake',
-               help="Driver name which Ironic uses"),
-    cfg.StrOpt('endpoint_type',
-               default='publicURL',
-               choices=['public', 'admin', 'internal',
-                        'publicURL', 'adminURL', 'internalURL'],
-               help="The endpoint type to use for the baremetal provisioning "
-                    "service"),
-    cfg.IntOpt('active_timeout',
-               default=300,
-               help="Timeout for Ironic node to completely provision"),
-    cfg.IntOpt('association_timeout',
-               default=30,
-               help="Timeout for association of Nova instance and Ironic "
-                    "node"),
-    cfg.IntOpt('power_timeout',
-               default=60,
-               help="Timeout for Ironic power transitions."),
-    cfg.IntOpt('unprovision_timeout',
-               default=60,
-               help="Timeout for unprovisioning an Ironic node.")
-]
-
-cli_group = cfg.OptGroup(name='cli', title="cli Configuration Options")
-
-CLIGroup = [
-    cfg.BoolOpt('enabled',
-                default=True,
-                help="enable cli tests"),
-    cfg.StrOpt('cli_dir',
-               default='/usr/local/bin',
-               help="directory where python client binaries are located"),
-    cfg.BoolOpt('has_manage',
-                default=True,
-                help=("Whether the tempest run location has access to the "
-                      "*-manage commands. In a pure blackbox environment "
-                      "it will not.")),
-    cfg.IntOpt('timeout',
-               default=15,
-               help="Number of seconds to wait on a CLI timeout"),
-]
-
-negative_group = cfg.OptGroup(name='negative', title="Negative Test Options")
-
-NegativeGroup = [
-    cfg.StrOpt('test_generator',
-               default='tempest.common.' +
-               'generator.negative_generator.NegativeTestGenerator',
-               help="Test generator class for all negative tests"),
-]
-
-_opts = [
-    (auth_group, AuthGroup),
-    (compute_group, ComputeGroup),
-    (compute_features_group, ComputeFeaturesGroup),
-    (identity_group, IdentityGroup),
-    (identity_feature_group, IdentityFeatureGroup),
-    (image_group, ImageGroup),
-    (image_feature_group, ImageFeaturesGroup),
-    (network_group, NetworkGroup),
-    (network_feature_group, NetworkFeaturesGroup),
-    (messaging_group, MessagingGroup),
-    (volume_group, VolumeGroup),
-    (volume_feature_group, VolumeFeaturesGroup),
-    (object_storage_group, ObjectStoreGroup),
-    (object_storage_feature_group, ObjectStoreFeaturesGroup),
-    (database_group, DatabaseGroup),
-    (orchestration_group, OrchestrationGroup),
-    (telemetry_group, TelemetryGroup),
-    (dashboard_group, DashboardGroup),
-    (data_processing_group, DataProcessingGroup),
-    (data_processing_feature_group, DataProcessingFeaturesGroup),
-    (boto_group, BotoGroup),
-    (stress_group, StressGroup),
-    (scenario_group, ScenarioGroup),
-    (service_available_group, ServiceAvailableGroup),
-    (debug_group, DebugGroup),
-    (baremetal_group, BaremetalGroup),
-    (input_scenario_group, InputScenarioGroup),
-    (cli_group, CLIGroup),
-    (negative_group, NegativeGroup)
-]
-
-
-def register_opts():
-    for g, o in _opts:
-        register_opt_group(cfg.CONF, g, o)
-
-
-def list_opts():
-    """Return a list of oslo.config options available.
-
-    The purpose of this is to allow tools like the Oslo sample config file
-    generator to discover the options exposed to users.
-    """
-    return [(g.name, o) for g, o in _opts]
-
-
-# this should never be called outside of this class
-class TempestConfigPrivate(object):
-    """Provides OpenStack configuration information."""
-
-    DEFAULT_CONFIG_DIR = os.path.join(
-        os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
-        "etc")
-
-    DEFAULT_CONFIG_FILE = "tempest.conf"
-
-    def __getattr__(self, attr):
-        # Handles config options from the default group
-        return getattr(cfg.CONF, attr)
-
-    def _set_attrs(self):
-        self.auth = cfg.CONF.auth
-        self.compute = cfg.CONF.compute
-        self.compute_feature_enabled = cfg.CONF['compute-feature-enabled']
-        self.identity = cfg.CONF.identity
-        self.identity_feature_enabled = cfg.CONF['identity-feature-enabled']
-        self.image = cfg.CONF.image
-        self.image_feature_enabled = cfg.CONF['image-feature-enabled']
-        self.network = cfg.CONF.network
-        self.network_feature_enabled = cfg.CONF['network-feature-enabled']
-        self.volume = cfg.CONF.volume
-        self.volume_feature_enabled = cfg.CONF['volume-feature-enabled']
-        self.object_storage = cfg.CONF['object-storage']
-        self.object_storage_feature_enabled = cfg.CONF[
-            'object-storage-feature-enabled']
-        self.database = cfg.CONF.database
-        self.orchestration = cfg.CONF.orchestration
-        self.messaging = cfg.CONF.messaging
-        self.telemetry = cfg.CONF.telemetry
-        self.dashboard = cfg.CONF.dashboard
-        self.data_processing = cfg.CONF.data_processing
-        self.data_processing_feature_enabled = cfg.CONF[
-            'data_processing-feature-enabled']
-        self.boto = cfg.CONF.boto
-        self.stress = cfg.CONF.stress
-        self.scenario = cfg.CONF.scenario
-        self.service_available = cfg.CONF.service_available
-        self.debug = cfg.CONF.debug
-        self.baremetal = cfg.CONF.baremetal
-        self.input_scenario = cfg.CONF['input-scenario']
-        self.cli = cfg.CONF.cli
-        self.negative = cfg.CONF.negative
-
-        self.identity.admin_username = self.auth.admin_username
-        self.identity.admin_password = self.auth.admin_password
-        self.identity.admin_tenant_name = self.auth.admin_tenant_name
-        self.identity.admin_domain_name = self.auth.admin_domain_name
-        self.identity.password = self.auth.admin_password
-        self.identity.tenant_name = 'demo'
-        self.identity.username = 'demo'
-        self.identity.alt_username = 'alt_demo'
-        self.identity.alt_tenant_name = 'alt_demo'
-        self.identity.alt_password = self.auth.admin_password
-
-        cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
-                             group='identity')
-        cfg.CONF.set_default('alt_domain_name',
-                             self.identity.admin_domain_name,
-                             group='identity')
-
-    def __init__(self, parse_conf=True, config_path=None):
-        """Initialize a configuration from a conf directory and conf file."""
-        super(TempestConfigPrivate, self).__init__()
-        config_files = []
-        failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE
-
-        if config_path:
-            path = config_path
-        else:
-            # Environment variables override defaults...
-            conf_dir = os.environ.get('TEMPEST_CONFIG_DIR',
-                                      self.DEFAULT_CONFIG_DIR)
-            conf_file = os.environ.get('TEMPEST_CONFIG',
-                                       self.DEFAULT_CONFIG_FILE)
-
-            path = os.path.join(conf_dir, conf_file)
-
-        if not os.path.isfile(path):
-            path = failsafe_path
-
-        # only parse the config file if we expect one to exist. This is needed
-        # to remove an issue with the config file up to date checker.
-        if parse_conf:
-            config_files.append(path)
-        logging.register_options(cfg.CONF)
-        if os.path.isfile(path):
-            cfg.CONF([], project='tempest', default_config_files=config_files)
-        else:
-            cfg.CONF([], project='tempest')
-        logging.setup(cfg.CONF, 'tempest')
-        LOG = logging.getLogger('tempest')
-        LOG.info("Using tempest config file %s" % path)
-        register_opts()
-        self._set_attrs()
-        if parse_conf:
-            cfg.CONF.log_opt_values(LOG, logging.DEBUG)
-
-
-class TempestConfigProxy(object):
-    _config = None
-    _path = None
-
-    _extra_log_defaults = [
-        ('keystoneclient.session', logging.INFO),
-        ('paramiko.transport', logging.INFO),
-        ('requests.packages.urllib3.connectionpool', logging.WARN),
-    ]
-
-    def _fix_log_levels(self):
-        """Tweak the oslo log defaults."""
-        for name, level in self._extra_log_defaults:
-            logging.getLogger(name).logger.setLevel(level)
-
-    def __getattr__(self, attr):
-        if not self._config:
-            self._fix_log_levels()
-            self._config = TempestConfigPrivate(config_path=self._path)
-
-        return getattr(self._config, attr)
-
-    def set_config_path(self, path):
-        self._path = path
-
-
-CONF = TempestConfigProxy()
diff --git a/neutron/tests/tempest/exceptions.py b/neutron/tests/tempest/exceptions.py
deleted file mode 100644 (file)
index db66bba..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import testtools
-
-
-class TempestException(Exception):
-    """
-    Base Tempest Exception
-
-    To correctly use this class, inherit from it and define
-    a 'message' property. That message will get printf'd
-    with the keyword arguments provided to the constructor.
-    """
-    message = "An unknown exception occurred"
-
-    def __init__(self, *args, **kwargs):
-        super(TempestException, self).__init__()
-        try:
-            self._error_string = self.message % kwargs
-        except Exception:
-            # at least get the core message out if something happened
-            self._error_string = self.message
-        if len(args) > 0:
-            # If there is a non-kwarg parameter, assume it's the error
-            # message or reason description and tack it on to the end
-            # of the exception message
-            # Convert all arguments into their string representations...
-            args = ["%s" % arg for arg in args]
-            self._error_string = (self._error_string +
-                                  "\nDetails: %s" % '\n'.join(args))
-
-    def __str__(self):
-        return self._error_string
-
-
-class RestClientException(TempestException,
-                          testtools.TestCase.failureException):
-    pass
-
-
-class InvalidConfiguration(TempestException):
-    message = "Invalid Configuration"
-
-
-class InvalidCredentials(TempestException):
-    message = "Invalid Credentials"
-
-
-class InvalidServiceTag(TempestException):
-    message = "Invalid service tag"
-
-
-class InvalidIdentityVersion(TempestException):
-    message = "Invalid version %(identity_version)s of the identity service"
-
-
-class TimeoutException(TempestException):
-    message = "Request timed out"
-
-
-class BuildErrorException(TempestException):
-    message = "Server %(server_id)s failed to build and is in ERROR status"
-
-
-class ImageKilledException(TempestException):
-    message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
-
-
-class AddImageException(TempestException):
-    message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
-
-
-class EC2RegisterImageException(TempestException):
-    message = ("Image %(image_id)s failed to become 'available' "
-               "in the allotted time")
-
-
-class VolumeBuildErrorException(TempestException):
-    message = "Volume %(volume_id)s failed to build and is in ERROR status"
-
-
-class SnapshotBuildErrorException(TempestException):
-    message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
-
-
-class VolumeBackupException(TempestException):
-    message = "Volume backup %(backup_id)s failed and is in ERROR status"
-
-
-class StackBuildErrorException(TempestException):
-    message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
-               "due to '%(stack_status_reason)s'")
-
-
-class StackResourceBuildErrorException(TempestException):
-    message = ("Resource %(resource_name)s in stack %(stack_identifier)s is "
-               "in %(resource_status)s status due to "
-               "'%(resource_status_reason)s'")
-
-
-class AuthenticationFailure(TempestException):
-    message = ("Authentication with user %(user)s and password "
-               "%(password)s failed auth using tenant %(tenant)s.")
-
-
-class EndpointNotFound(TempestException):
-    message = "Endpoint not found"
-
-
-class ImageFault(TempestException):
-    message = "Got image fault"
-
-
-class IdentityError(TempestException):
-    message = "Got identity error"
-
-
-class SSHTimeout(TempestException):
-    message = ("Connection to the %(host)s via SSH timed out.\n"
-               "User: %(user)s, Password: %(password)s")
-
-
-class SSHExecCommandFailed(TempestException):
-    """Raised when remotely executed command returns nonzero status."""
-    message = ("Command '%(command)s', exit status: %(exit_status)d, "
-               "Error:\n%(strerror)s")
-
-
-class ServerUnreachable(TempestException):
-    message = "The server is not reachable via the configured network"
-
-
-class TearDownException(TempestException):
-    message = "%(num)d cleanUp operation failed"
-
-
-class RFCViolation(RestClientException):
-    message = "RFC Violation"
-
-
-class InvalidHttpSuccessCode(RestClientException):
-    message = "The success code is different than the expected one"
-
-
-class BadRequest(RestClientException):
-    message = "Bad request"
-
-
-class ResponseWithNonEmptyBody(RFCViolation):
-    message = ("RFC Violation! Response with %(status)d HTTP Status Code "
-               "MUST NOT have a body")
-
-
-class ResponseWithEntity(RFCViolation):
-    message = ("RFC Violation! Response with 205 HTTP Status Code "
-               "MUST NOT have an entity")
-
-
-class InvalidHTTPResponseHeader(RestClientException):
-    message = "HTTP response header is invalid"
-
-
-class InvalidStructure(TempestException):
-    message = "Invalid structure of table with details"
-
-
-class CommandFailed(Exception):
-    def __init__(self, returncode, cmd, output, stderr):
-        super(CommandFailed, self).__init__()
-        self.returncode = returncode
-        self.cmd = cmd
-        self.stdout = output
-        self.stderr = stderr
-
-    def __str__(self):
-        return ("Command '%s' returned non-zero exit status %d.\n"
-                "stdout:\n%s\n"
-                "stderr:\n%s" % (self.cmd,
-                                 self.returncode,
-                                 self.stdout,
-                                 self.stderr))
diff --git a/neutron/tests/tempest/manager.py b/neutron/tests/tempest/manager.py
deleted file mode 100644 (file)
index 969bf98..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.tests.tempest import auth
-from neutron.tests.tempest.common import cred_provider
-from neutron.tests.tempest import config
-from neutron.tests.tempest import exceptions
-
-CONF = config.CONF
-
-
-class Manager(object):
-
-    """
-    Base manager class
-
-    Manager objects are responsible for providing a configuration object
-    and a client object for a test case to use in performing actions.
-    """
-
-    def __init__(self, credentials=None):
-        """
-        We allow overriding of the credentials used within the various
-        client classes managed by the Manager object. Left as None, the
-        standard username/password/tenant_name[/domain_name] is used.
-
-        :param credentials: Override of the credentials
-        """
-        self.auth_version = CONF.identity.auth_version
-        if credentials is None:
-            self.credentials = cred_provider.get_configured_credentials('user')
-        else:
-            self.credentials = credentials
-        # Check if passed or default credentials are valid
-        if not self.credentials.is_valid():
-            raise exceptions.InvalidCredentials()
-        # Creates an auth provider for the credentials
-        self.auth_provider = get_auth_provider(self.credentials)
-        # FIXME(andreaf) unused
-        self.client_attr_names = []
-
-
-def get_auth_provider_class(credentials):
-    if isinstance(credentials, auth.KeystoneV3Credentials):
-        return auth.KeystoneV3AuthProvider, CONF.identity.uri_v3
-    else:
-        return auth.KeystoneV2AuthProvider, CONF.identity.uri
-
-
-def get_auth_provider(credentials):
-    default_params = {
-        'disable_ssl_certificate_validation':
-            CONF.identity.disable_ssl_certificate_validation,
-        'ca_certs': CONF.identity.ca_certificates_file,
-        'trace_requests': CONF.debug.trace_requests
-    }
-    if credentials is None:
-        raise exceptions.InvalidCredentials(
-            'Credentials must be specified')
-    auth_provider_class, auth_url = get_auth_provider_class(
-        credentials)
-    return auth_provider_class(credentials, auth_url, **default_params)
diff --git a/neutron/tests/tempest/services/__init__.py b/neutron/tests/tempest/services/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/tempest/services/botoclients.py b/neutron/tests/tempest/services/botoclients.py
deleted file mode 100644 (file)
index 87d5266..0000000
+++ /dev/null
@@ -1,216 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from six.moves import configparser as ConfigParser
-import contextlib
-from tempest_lib import exceptions as lib_exc
-import types
-import urlparse
-
-from neutron.tests.tempest import config
-
-import boto
-import boto.ec2
-import boto.s3.connection
-
-CONF = config.CONF
-
-
-class BotoClientBase(object):
-
-    ALLOWED_METHODS = set()
-
-    def __init__(self, identity_client):
-        self.identity_client = identity_client
-
-        self.ca_cert = CONF.identity.ca_certificates_file
-        self.connection_timeout = str(CONF.boto.http_socket_timeout)
-        self.num_retries = str(CONF.boto.num_retries)
-        self.build_timeout = CONF.boto.build_timeout
-
-        self.connection_data = {}
-
-    def _config_boto_timeout(self, timeout, retries):
-        try:
-            boto.config.add_section("Boto")
-        except ConfigParser.DuplicateSectionError:
-            pass
-        boto.config.set("Boto", "http_socket_timeout", timeout)
-        boto.config.set("Boto", "num_retries", retries)
-
-    def _config_boto_ca_certificates_file(self, ca_cert):
-        if ca_cert is None:
-            return
-
-        try:
-            boto.config.add_section("Boto")
-        except ConfigParser.DuplicateSectionError:
-            pass
-        boto.config.set("Boto", "ca_certificates_file", ca_cert)
-
-    def __getattr__(self, name):
-        """Automatically creates methods for the allowed methods set."""
-        if name in self.ALLOWED_METHODS:
-            def func(self, *args, **kwargs):
-                with contextlib.closing(self.get_connection()) as conn:
-                    return getattr(conn, name)(*args, **kwargs)
-
-            func.__name__ = name
-            setattr(self, name, types.MethodType(func, self, self.__class__))
-            setattr(self.__class__, name,
-                    types.MethodType(func, None, self.__class__))
-            return getattr(self, name)
-        else:
-            raise AttributeError(name)
-
-    def get_connection(self):
-        self._config_boto_timeout(self.connection_timeout, self.num_retries)
-        self._config_boto_ca_certificates_file(self.ca_cert)
-
-        ec2_client_args = {'aws_access_key_id': CONF.boto.aws_access,
-                           'aws_secret_access_key': CONF.boto.aws_secret}
-        if not all(ec2_client_args.values()):
-            ec2_client_args = self.get_aws_credentials(self.identity_client)
-
-        self.connection_data.update(ec2_client_args)
-        return self.connect_method(**self.connection_data)
-
-    def get_aws_credentials(self, identity_client):
-        """
-        Obtain existing, or create new AWS credentials
-        :param identity_client: identity client with embedded credentials
-        :return: EC2 credentials
-        """
-        ec2_cred_list = identity_client.list_user_ec2_credentials(
-            identity_client.user_id)
-        for cred in ec2_cred_list:
-            if cred['tenant_id'] == identity_client.tenant_id:
-                ec2_cred = cred
-                break
-        else:
-            ec2_cred = identity_client.create_user_ec2_credentials(
-                identity_client.user_id, identity_client.tenant_id)
-        if not all((ec2_cred, ec2_cred['access'], ec2_cred['secret'])):
-            raise lib_exc.NotFound("Unable to get access and secret keys")
-        else:
-            ec2_cred_aws = {}
-            ec2_cred_aws['aws_access_key_id'] = ec2_cred['access']
-            ec2_cred_aws['aws_secret_access_key'] = ec2_cred['secret']
-        return ec2_cred_aws
-
-
-class APIClientEC2(BotoClientBase):
-
-    def connect_method(self, *args, **kwargs):
-        return boto.connect_ec2(*args, **kwargs)
-
-    def __init__(self, identity_client):
-        super(APIClientEC2, self).__init__(identity_client)
-        insecure_ssl = CONF.identity.disable_ssl_certificate_validation
-        purl = urlparse.urlparse(CONF.boto.ec2_url)
-
-        region_name = CONF.compute.region
-        if not region_name:
-            region_name = CONF.identity.region
-        region = boto.ec2.regioninfo.RegionInfo(name=region_name,
-                                                endpoint=purl.hostname)
-        port = purl.port
-        if port is None:
-            if purl.scheme is not "https":
-                port = 80
-            else:
-                port = 443
-        else:
-            port = int(port)
-        self.connection_data.update({"is_secure": purl.scheme == "https",
-                                     "validate_certs": not insecure_ssl,
-                                     "region": region,
-                                     "host": purl.hostname,
-                                     "port": port,
-                                     "path": purl.path})
-
-    ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
-                           'delete_key_pair', 'import_key_pair',
-                           'get_all_key_pairs',
-                           'get_all_tags',
-                           'create_image', 'get_image',
-                           'register_image', 'deregister_image',
-                           'get_all_images', 'get_image_attribute',
-                           'modify_image_attribute', 'reset_image_attribute',
-                           'get_all_kernels',
-                           'create_volume', 'delete_volume',
-                           'get_all_volume_status', 'get_all_volumes',
-                           'get_volume_attribute', 'modify_volume_attribute'
-                           'bundle_instance', 'cancel_spot_instance_requests',
-                           'confirm_product_instanc',
-                           'get_all_instance_status', 'get_all_instances',
-                           'get_all_reserved_instances',
-                           'get_all_spot_instance_requests',
-                           'get_instance_attribute', 'monitor_instance',
-                           'monitor_instances', 'unmonitor_instance',
-                           'unmonitor_instances',
-                           'purchase_reserved_instance_offering',
-                           'reboot_instances', 'request_spot_instances',
-                           'reset_instance_attribute', 'run_instances',
-                           'start_instances', 'stop_instances',
-                           'terminate_instances',
-                           'attach_network_interface', 'attach_volume',
-                           'detach_network_interface', 'detach_volume',
-                           'get_console_output',
-                           'delete_network_interface', 'create_subnet',
-                           'create_network_interface', 'delete_subnet',
-                           'get_all_network_interfaces',
-                           'allocate_address', 'associate_address',
-                           'disassociate_address', 'get_all_addresses',
-                           'release_address',
-                           'create_snapshot', 'delete_snapshot',
-                           'get_all_snapshots', 'get_snapshot_attribute',
-                           'modify_snapshot_attribute',
-                           'reset_snapshot_attribute', 'trim_snapshots',
-                           'get_all_regions', 'get_all_zones',
-                           'get_all_security_groups', 'create_security_group',
-                           'delete_security_group', 'authorize_security_group',
-                           'authorize_security_group_egress',
-                           'revoke_security_group',
-                           'revoke_security_group_egress'))
-
-
-class ObjectClientS3(BotoClientBase):
-
-    def connect_method(self, *args, **kwargs):
-        return boto.connect_s3(*args, **kwargs)
-
-    def __init__(self, identity_client):
-        super(ObjectClientS3, self).__init__(identity_client)
-        insecure_ssl = CONF.identity.disable_ssl_certificate_validation
-        purl = urlparse.urlparse(CONF.boto.s3_url)
-        port = purl.port
-        if port is None:
-            if purl.scheme is not "https":
-                port = 80
-            else:
-                port = 443
-        else:
-            port = int(port)
-        self.connection_data.update({"is_secure": purl.scheme == "https",
-                                     "validate_certs": not insecure_ssl,
-                                     "host": purl.hostname,
-                                     "port": port,
-                                     "calling_format": boto.s3.connection.
-                                     OrdinaryCallingFormat()})
-
-    ALLOWED_METHODS = set(('create_bucket', 'delete_bucket', 'generate_url',
-                           'get_all_buckets', 'get_bucket', 'delete_key',
-                           'lookup'))
diff --git a/neutron/tests/tempest/services/identity/__init__.py b/neutron/tests/tempest/services/identity/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/tempest/services/identity/v2/__init__.py b/neutron/tests/tempest/services/identity/v2/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/tempest/services/identity/v2/json/__init__.py b/neutron/tests/tempest/services/identity/v2/json/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/tempest/services/identity/v2/json/identity_client.py b/neutron/tests/tempest/services/identity/v2/json/identity_client.py
deleted file mode 100644 (file)
index 46e8f87..0000000
+++ /dev/null
@@ -1,283 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.tempest.common import service_client
-
-
-class IdentityClientJSON(service_client.ServiceClient):
-
-    def has_admin_extensions(self):
-        """
-        Returns True if the KSADM Admin Extensions are supported
-        False otherwise
-        """
-        if hasattr(self, '_has_admin_extensions'):
-            return self._has_admin_extensions
-        # Try something that requires admin
-        try:
-            self.list_roles()
-            self._has_admin_extensions = True
-        except Exception:
-            self._has_admin_extensions = False
-        return self._has_admin_extensions
-
-    def create_role(self, name):
-        """Create a role."""
-        post_body = {
-            'name': name,
-        }
-        post_body = json.dumps({'role': post_body})
-        resp, body = self.post('OS-KSADM/roles', post_body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def get_role(self, role_id):
-        """Get a role by its id."""
-        resp, body = self.get('OS-KSADM/roles/%s' % role_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['role'])
-
-    def create_tenant(self, name, **kwargs):
-        """
-        Create a tenant
-        name (required): New tenant name
-        description: Description of new tenant (default is none)
-        enabled <true|false>: Initial tenant status (default is true)
-        """
-        post_body = {
-            'name': name,
-            'description': kwargs.get('description', ''),
-            'enabled': kwargs.get('enabled', True),
-        }
-        post_body = json.dumps({'tenant': post_body})
-        resp, body = self.post('tenants', post_body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def delete_role(self, role_id):
-        """Delete a role."""
-        resp, body = self.delete('OS-KSADM/roles/%s' % str(role_id))
-        self.expected_success(204, resp.status)
-        return resp, body
-
-    def list_user_roles(self, tenant_id, user_id):
-        """Returns a list of roles assigned to a user for a tenant."""
-        url = '/tenants/%s/users/%s/roles' % (tenant_id, user_id)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, self._parse_resp(body))
-
-    def assign_user_role(self, tenant_id, user_id, role_id):
-        """Add roles to a user on a tenant."""
-        resp, body = self.put('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
-                              (tenant_id, user_id, role_id), "")
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def remove_user_role(self, tenant_id, user_id, role_id):
-        """Removes a role assignment for a user on a tenant."""
-        resp, body = self.delete('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
-                                 (tenant_id, user_id, role_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_tenant(self, tenant_id):
-        """Delete a tenant."""
-        resp, body = self.delete('tenants/%s' % str(tenant_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def get_tenant(self, tenant_id):
-        """Get tenant details."""
-        resp, body = self.get('tenants/%s' % str(tenant_id))
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def list_roles(self):
-        """Returns roles."""
-        resp, body = self.get('OS-KSADM/roles')
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, self._parse_resp(body))
-
-    def list_tenants(self):
-        """Returns tenants."""
-        resp, body = self.get('tenants')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['tenants'])
-
-    def get_tenant_by_name(self, tenant_name):
-        tenants = self.list_tenants()
-        for tenant in tenants:
-            if tenant['name'] == tenant_name:
-                return tenant
-        raise lib_exc.NotFound('No such tenant')
-
-    def update_tenant(self, tenant_id, **kwargs):
-        """Updates a tenant."""
-        body = self.get_tenant(tenant_id)
-        name = kwargs.get('name', body['name'])
-        desc = kwargs.get('description', body['description'])
-        en = kwargs.get('enabled', body['enabled'])
-        post_body = {
-            'id': tenant_id,
-            'name': name,
-            'description': desc,
-            'enabled': en,
-        }
-        post_body = json.dumps({'tenant': post_body})
-        resp, body = self.post('tenants/%s' % tenant_id, post_body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def create_user(self, name, password, tenant_id, email, **kwargs):
-        """Create a user."""
-        post_body = {
-            'name': name,
-            'password': password,
-            'email': email
-        }
-        if tenant_id is not None:
-            post_body['tenantId'] = tenant_id
-        if kwargs.get('enabled') is not None:
-            post_body['enabled'] = kwargs.get('enabled')
-        post_body = json.dumps({'user': post_body})
-        resp, body = self.post('users', post_body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def update_user(self, user_id, **kwargs):
-        """Updates a user."""
-        put_body = json.dumps({'user': kwargs})
-        resp, body = self.put('users/%s' % user_id, put_body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def get_user(self, user_id):
-        """GET a user."""
-        resp, body = self.get("users/%s" % user_id)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def delete_user(self, user_id):
-        """Delete a user."""
-        resp, body = self.delete("users/%s" % user_id)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def get_users(self):
-        """Get the list of users."""
-        resp, body = self.get("users")
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, self._parse_resp(body))
-
-    def enable_disable_user(self, user_id, enabled):
-        """Enables or disables a user."""
-        put_body = {
-            'enabled': enabled
-        }
-        put_body = json.dumps({'user': put_body})
-        resp, body = self.put('users/%s/enabled' % user_id, put_body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def get_token(self, token_id):
-        """Get token details."""
-        resp, body = self.get("tokens/%s" % token_id)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def delete_token(self, token_id):
-        """Delete a token."""
-        resp, body = self.delete("tokens/%s" % token_id)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def list_users_for_tenant(self, tenant_id):
-        """List users for a Tenant."""
-        resp, body = self.get('/tenants/%s/users' % tenant_id)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, self._parse_resp(body))
-
-    def get_user_by_username(self, tenant_id, username):
-        users = self.list_users_for_tenant(tenant_id)
-        for user in users:
-            if user['name'] == username:
-                return user
-        raise lib_exc.NotFound('No such user')
-
-    def create_service(self, name, type, **kwargs):
-        """Create a service."""
-        post_body = {
-            'name': name,
-            'type': type,
-            'description': kwargs.get('description')
-        }
-        post_body = json.dumps({'OS-KSADM:service': post_body})
-        resp, body = self.post('/OS-KSADM/services', post_body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def get_service(self, service_id):
-        """Get Service."""
-        url = '/OS-KSADM/services/%s' % service_id
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def list_services(self):
-        """List Service - Returns Services."""
-        resp, body = self.get('/OS-KSADM/services')
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, self._parse_resp(body))
-
-    def delete_service(self, service_id):
-        """Delete Service."""
-        url = '/OS-KSADM/services/%s' % service_id
-        resp, body = self.delete(url)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def update_user_password(self, user_id, new_pass):
-        """Update User Password."""
-        put_body = {
-            'password': new_pass,
-            'id': user_id
-        }
-        put_body = json.dumps({'user': put_body})
-        resp, body = self.put('users/%s/OS-KSADM/password' % user_id, put_body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def list_extensions(self):
-        """List all the extensions."""
-        resp, body = self.get('/extensions')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp,
-                                               body['extensions']['values'])
-
-    def create_user_ec2_credentials(self, user_id, tenant_id):
-        post_body = json.dumps({'tenant_id': tenant_id})
-        resp, body = self.post('/users/%s/credentials/OS-EC2' % user_id,
-                               post_body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
-
-    def list_user_ec2_credentials(self, user_id):
-        resp, body = self.get('/users/%s/credentials/OS-EC2' % user_id)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, self._parse_resp(body))
diff --git a/neutron/tests/tempest/services/identity/v3/__init__.py b/neutron/tests/tempest/services/identity/v3/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/tempest/services/identity/v3/json/__init__.py b/neutron/tests/tempest/services/identity/v3/json/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/tempest/services/identity/v3/json/credentials_client.py b/neutron/tests/tempest/services/identity/v3/json/credentials_client.py
deleted file mode 100644 (file)
index 07e230a..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from neutron.tests.tempest.common import service_client
-
-
-class CredentialsClientJSON(service_client.ServiceClient):
-    api_version = "v3"
-
-    def create_credential(self, access_key, secret_key, user_id, project_id):
-        """Creates a credential."""
-        blob = "{\"access\": \"%s\", \"secret\": \"%s\"}" % (
-            access_key, secret_key)
-        post_body = {
-            "blob": blob,
-            "project_id": project_id,
-            "type": "ec2",
-            "user_id": user_id
-        }
-        post_body = json.dumps({'credential': post_body})
-        resp, body = self.post('credentials', post_body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        body['credential']['blob'] = json.loads(body['credential']['blob'])
-        return service_client.ResponseBody(resp, body['credential'])
-
-    def update_credential(self, credential_id, **kwargs):
-        """Updates a credential."""
-        body = self.get_credential(credential_id)
-        cred_type = kwargs.get('type', body['type'])
-        access_key = kwargs.get('access_key', body['blob']['access'])
-        secret_key = kwargs.get('secret_key', body['blob']['secret'])
-        project_id = kwargs.get('project_id', body['project_id'])
-        user_id = kwargs.get('user_id', body['user_id'])
-        blob = "{\"access\": \"%s\", \"secret\": \"%s\"}" % (
-            access_key, secret_key)
-        post_body = {
-            "blob": blob,
-            "project_id": project_id,
-            "type": cred_type,
-            "user_id": user_id
-        }
-        post_body = json.dumps({'credential': post_body})
-        resp, body = self.patch('credentials/%s' % credential_id, post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        body['credential']['blob'] = json.loads(body['credential']['blob'])
-        return service_client.ResponseBody(resp, body['credential'])
-
-    def get_credential(self, credential_id):
-        """To GET Details of a credential."""
-        resp, body = self.get('credentials/%s' % credential_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        body['credential']['blob'] = json.loads(body['credential']['blob'])
-        return service_client.ResponseBody(resp, body['credential'])
-
-    def list_credentials(self):
-        """Lists out all the available credentials."""
-        resp, body = self.get('credentials')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['credentials'])
-
-    def delete_credential(self, credential_id):
-        """Deletes a credential."""
-        resp, body = self.delete('credentials/%s' % credential_id)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
diff --git a/neutron/tests/tempest/services/identity/v3/json/endpoints_client.py b/neutron/tests/tempest/services/identity/v3/json/endpoints_client.py
deleted file mode 100644 (file)
index 27ac3e5..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from neutron.tests.tempest.common import service_client
-
-
-class EndPointClientJSON(service_client.ServiceClient):
-    api_version = "v3"
-
-    def list_endpoints(self):
-        """GET endpoints."""
-        resp, body = self.get('endpoints')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['endpoints'])
-
-    def create_endpoint(self, service_id, interface, url, **kwargs):
-        """Create endpoint.
-
-        Normally this function wouldn't allow setting values that are not
-        allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
-
-        """
-        region = kwargs.get('region', None)
-        if 'force_enabled' in kwargs:
-            enabled = kwargs.get('force_enabled', None)
-        else:
-            enabled = kwargs.get('enabled', None)
-        post_body = {
-            'service_id': service_id,
-            'interface': interface,
-            'url': url,
-            'region': region,
-            'enabled': enabled
-        }
-        post_body = json.dumps({'endpoint': post_body})
-        resp, body = self.post('endpoints', post_body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['endpoint'])
-
-    def update_endpoint(self, endpoint_id, service_id=None, interface=None,
-                        url=None, region=None, enabled=None, **kwargs):
-        """Updates an endpoint with given parameters.
-
-        Normally this function wouldn't allow setting values that are not
-        allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
-
-        """
-        post_body = {}
-        if service_id is not None:
-            post_body['service_id'] = service_id
-        if interface is not None:
-            post_body['interface'] = interface
-        if url is not None:
-            post_body['url'] = url
-        if region is not None:
-            post_body['region'] = region
-        if 'force_enabled' in kwargs:
-            post_body['enabled'] = kwargs['force_enabled']
-        elif enabled is not None:
-            post_body['enabled'] = enabled
-        post_body = json.dumps({'endpoint': post_body})
-        resp, body = self.patch('endpoints/%s' % endpoint_id, post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['endpoint'])
-
-    def delete_endpoint(self, endpoint_id):
-        """Delete endpoint."""
-        resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
-        self.expected_success(204, resp_header.status)
-        return service_client.ResponseBody(resp_header, resp_body)
diff --git a/neutron/tests/tempest/services/identity/v3/json/identity_client.py b/neutron/tests/tempest/services/identity/v3/json/identity_client.py
deleted file mode 100644 (file)
index a090acf..0000000
+++ /dev/null
@@ -1,523 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse
-
-from neutron.tests.tempest.common import service_client
-
-
-class IdentityV3ClientJSON(service_client.ServiceClient):
-    api_version = "v3"
-
-    def create_user(self, user_name, password=None, project_id=None,
-                    email=None, domain_id='default', **kwargs):
-        """Creates a user."""
-        en = kwargs.get('enabled', True)
-        description = kwargs.get('description', None)
-        default_project_id = kwargs.get('default_project_id')
-        post_body = {
-            'project_id': project_id,
-            'default_project_id': default_project_id,
-            'description': description,
-            'domain_id': domain_id,
-            'email': email,
-            'enabled': en,
-            'name': user_name,
-            'password': password
-        }
-        post_body = json.dumps({'user': post_body})
-        resp, body = self.post('users', post_body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['user'])
-
-    def update_user(self, user_id, name, **kwargs):
-        """Updates a user."""
-        body = self.get_user(user_id)
-        email = kwargs.get('email', body['email'])
-        en = kwargs.get('enabled', body['enabled'])
-        project_id = kwargs.get('project_id', body['project_id'])
-        if 'default_project_id' in body.keys():
-            default_project_id = kwargs.get('default_project_id',
-                                            body['default_project_id'])
-        else:
-            default_project_id = kwargs.get('default_project_id')
-        description = kwargs.get('description', body['description'])
-        domain_id = kwargs.get('domain_id', body['domain_id'])
-        post_body = {
-            'name': name,
-            'email': email,
-            'enabled': en,
-            'project_id': project_id,
-            'default_project_id': default_project_id,
-            'id': user_id,
-            'domain_id': domain_id,
-            'description': description
-        }
-        post_body = json.dumps({'user': post_body})
-        resp, body = self.patch('users/%s' % user_id, post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['user'])
-
-    def update_user_password(self, user_id, password, original_password):
-        """Updates a user password."""
-        update_user = {
-            'password': password,
-            'original_password': original_password
-        }
-        update_user = json.dumps({'user': update_user})
-        resp, _ = self.post('users/%s/password' % user_id, update_user)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp)
-
-    def list_user_projects(self, user_id):
-        """Lists the projects on which a user has roles assigned."""
-        resp, body = self.get('users/%s/projects' % user_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['projects'])
-
-    def get_users(self, params=None):
-        """Get the list of users."""
-        url = 'users'
-        if params:
-            url += '?%s' % parse.urlencode(params)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['users'])
-
-    def get_user(self, user_id):
-        """GET a user."""
-        resp, body = self.get("users/%s" % user_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['user'])
-
-    def delete_user(self, user_id):
-        """Deletes a User."""
-        resp, body = self.delete("users/%s" % user_id)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def create_project(self, name, **kwargs):
-        """Creates a project."""
-        description = kwargs.get('description', None)
-        en = kwargs.get('enabled', True)
-        domain_id = kwargs.get('domain_id', 'default')
-        post_body = {
-            'description': description,
-            'domain_id': domain_id,
-            'enabled': en,
-            'name': name
-        }
-        post_body = json.dumps({'project': post_body})
-        resp, body = self.post('projects', post_body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['project'])
-
-    def list_projects(self, params=None):
-        url = "projects"
-        if params:
-            url += '?%s' % parse.urlencode(params)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['projects'])
-
-    def update_project(self, project_id, **kwargs):
-        body = self.get_project(project_id)
-        name = kwargs.get('name', body['name'])
-        desc = kwargs.get('description', body['description'])
-        en = kwargs.get('enabled', body['enabled'])
-        domain_id = kwargs.get('domain_id', body['domain_id'])
-        post_body = {
-            'id': project_id,
-            'name': name,
-            'description': desc,
-            'enabled': en,
-            'domain_id': domain_id,
-        }
-        post_body = json.dumps({'project': post_body})
-        resp, body = self.patch('projects/%s' % project_id, post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['project'])
-
-    def get_project(self, project_id):
-        """GET a Project."""
-        resp, body = self.get("projects/%s" % project_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['project'])
-
-    def delete_project(self, project_id):
-        """Delete a project."""
-        resp, body = self.delete('projects/%s' % str(project_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def create_role(self, name):
-        """Create a Role."""
-        post_body = {
-            'name': name
-        }
-        post_body = json.dumps({'role': post_body})
-        resp, body = self.post('roles', post_body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['role'])
-
-    def get_role(self, role_id):
-        """GET a Role."""
-        resp, body = self.get('roles/%s' % str(role_id))
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['role'])
-
-    def list_roles(self):
-        """Get the list of Roles."""
-        resp, body = self.get("roles")
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['roles'])
-
-    def update_role(self, name, role_id):
-        """Create a Role."""
-        post_body = {
-            'name': name
-        }
-        post_body = json.dumps({'role': post_body})
-        resp, body = self.patch('roles/%s' % str(role_id), post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['role'])
-
-    def delete_role(self, role_id):
-        """Delete a role."""
-        resp, body = self.delete('roles/%s' % str(role_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def assign_user_role(self, project_id, user_id, role_id):
-        """Add roles to a user on a project."""
-        resp, body = self.put('projects/%s/users/%s/roles/%s' %
-                              (project_id, user_id, role_id), None)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def create_domain(self, name, **kwargs):
-        """Creates a domain."""
-        description = kwargs.get('description', None)
-        en = kwargs.get('enabled', True)
-        post_body = {
-            'description': description,
-            'enabled': en,
-            'name': name
-        }
-        post_body = json.dumps({'domain': post_body})
-        resp, body = self.post('domains', post_body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['domain'])
-
-    def delete_domain(self, domain_id):
-        """Delete a domain."""
-        resp, body = self.delete('domains/%s' % str(domain_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def list_domains(self):
-        """List Domains."""
-        resp, body = self.get('domains')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['domains'])
-
-    def update_domain(self, domain_id, **kwargs):
-        """Updates a domain."""
-        body = self.get_domain(domain_id)
-        description = kwargs.get('description', body['description'])
-        en = kwargs.get('enabled', body['enabled'])
-        name = kwargs.get('name', body['name'])
-        post_body = {
-            'description': description,
-            'enabled': en,
-            'name': name
-        }
-        post_body = json.dumps({'domain': post_body})
-        resp, body = self.patch('domains/%s' % domain_id, post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['domain'])
-
-    def get_domain(self, domain_id):
-        """Get Domain details."""
-        resp, body = self.get('domains/%s' % domain_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['domain'])
-
-    def get_token(self, resp_token):
-        """Get token details."""
-        headers = {'X-Subject-Token': resp_token}
-        resp, body = self.get("auth/tokens", headers=headers)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['token'])
-
-    def delete_token(self, resp_token):
-        """Deletes token."""
-        headers = {'X-Subject-Token': resp_token}
-        resp, body = self.delete("auth/tokens", headers=headers)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def create_group(self, name, **kwargs):
-        """Creates a group."""
-        description = kwargs.get('description', None)
-        domain_id = kwargs.get('domain_id', 'default')
-        project_id = kwargs.get('project_id', None)
-        post_body = {
-            'description': description,
-            'domain_id': domain_id,
-            'project_id': project_id,
-            'name': name
-        }
-        post_body = json.dumps({'group': post_body})
-        resp, body = self.post('groups', post_body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['group'])
-
-    def get_group(self, group_id):
-        """Get group details."""
-        resp, body = self.get('groups/%s' % group_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['group'])
-
-    def list_groups(self):
-        """Lists the groups."""
-        resp, body = self.get('groups')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['groups'])
-
-    def update_group(self, group_id, **kwargs):
-        """Updates a group."""
-        body = self.get_group(group_id)
-        name = kwargs.get('name', body['name'])
-        description = kwargs.get('description', body['description'])
-        post_body = {
-            'name': name,
-            'description': description
-        }
-        post_body = json.dumps({'group': post_body})
-        resp, body = self.patch('groups/%s' % group_id, post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['group'])
-
-    def delete_group(self, group_id):
-        """Delete a group."""
-        resp, body = self.delete('groups/%s' % str(group_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def add_group_user(self, group_id, user_id):
-        """Add user into group."""
-        resp, body = self.put('groups/%s/users/%s' % (group_id, user_id),
-                              None)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def list_group_users(self, group_id):
-        """List users in group."""
-        resp, body = self.get('groups/%s/users' % group_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['users'])
-
-    def list_user_groups(self, user_id):
-        """Lists groups which a user belongs to."""
-        resp, body = self.get('users/%s/groups' % user_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['groups'])
-
-    def delete_group_user(self, group_id, user_id):
-        """Delete user in group."""
-        resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def assign_user_role_on_project(self, project_id, user_id, role_id):
-        """Add roles to a user on a project."""
-        resp, body = self.put('projects/%s/users/%s/roles/%s' %
-                              (project_id, user_id, role_id), None)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def assign_user_role_on_domain(self, domain_id, user_id, role_id):
-        """Add roles to a user on a domain."""
-        resp, body = self.put('domains/%s/users/%s/roles/%s' %
-                              (domain_id, user_id, role_id), None)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def list_user_roles_on_project(self, project_id, user_id):
-        """list roles of a user on a project."""
-        resp, body = self.get('projects/%s/users/%s/roles' %
-                              (project_id, user_id))
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['roles'])
-
-    def list_user_roles_on_domain(self, domain_id, user_id):
-        """list roles of a user on a domain."""
-        resp, body = self.get('domains/%s/users/%s/roles' %
-                              (domain_id, user_id))
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['roles'])
-
-    def revoke_role_from_user_on_project(self, project_id, user_id, role_id):
-        """Delete role of a user on a project."""
-        resp, body = self.delete('projects/%s/users/%s/roles/%s' %
-                                 (project_id, user_id, role_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def revoke_role_from_user_on_domain(self, domain_id, user_id, role_id):
-        """Delete role of a user on a domain."""
-        resp, body = self.delete('domains/%s/users/%s/roles/%s' %
-                                 (domain_id, user_id, role_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def assign_group_role_on_project(self, project_id, group_id, role_id):
-        """Add roles to a user on a project."""
-        resp, body = self.put('projects/%s/groups/%s/roles/%s' %
-                              (project_id, group_id, role_id), None)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def assign_group_role_on_domain(self, domain_id, group_id, role_id):
-        """Add roles to a user on a domain."""
-        resp, body = self.put('domains/%s/groups/%s/roles/%s' %
-                              (domain_id, group_id, role_id), None)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def list_group_roles_on_project(self, project_id, group_id):
-        """list roles of a user on a project."""
-        resp, body = self.get('projects/%s/groups/%s/roles' %
-                              (project_id, group_id))
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['roles'])
-
-    def list_group_roles_on_domain(self, domain_id, group_id):
-        """list roles of a user on a domain."""
-        resp, body = self.get('domains/%s/groups/%s/roles' %
-                              (domain_id, group_id))
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['roles'])
-
-    def revoke_role_from_group_on_project(self, project_id, group_id, role_id):
-        """Delete role of a user on a project."""
-        resp, body = self.delete('projects/%s/groups/%s/roles/%s' %
-                                 (project_id, group_id, role_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def revoke_role_from_group_on_domain(self, domain_id, group_id, role_id):
-        """Delete role of a user on a domain."""
-        resp, body = self.delete('domains/%s/groups/%s/roles/%s' %
-                                 (domain_id, group_id, role_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def create_trust(self, trustor_user_id, trustee_user_id, project_id,
-                     role_names, impersonation, expires_at):
-        """Creates a trust."""
-        roles = [{'name': n} for n in role_names]
-        post_body = {
-            'trustor_user_id': trustor_user_id,
-            'trustee_user_id': trustee_user_id,
-            'project_id': project_id,
-            'impersonation': impersonation,
-            'roles': roles,
-            'expires_at': expires_at
-        }
-        post_body = json.dumps({'trust': post_body})
-        resp, body = self.post('OS-TRUST/trusts', post_body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['trust'])
-
-    def delete_trust(self, trust_id):
-        """Deletes a trust."""
-        resp, body = self.delete("OS-TRUST/trusts/%s" % trust_id)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def get_trusts(self, trustor_user_id=None, trustee_user_id=None):
-        """GET trusts."""
-        if trustor_user_id:
-            resp, body = self.get("OS-TRUST/trusts?trustor_user_id=%s"
-                                  % trustor_user_id)
-        elif trustee_user_id:
-            resp, body = self.get("OS-TRUST/trusts?trustee_user_id=%s"
-                                  % trustee_user_id)
-        else:
-            resp, body = self.get("OS-TRUST/trusts")
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['trusts'])
-
-    def get_trust(self, trust_id):
-        """GET trust."""
-        resp, body = self.get("OS-TRUST/trusts/%s" % trust_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['trust'])
-
-    def get_trust_roles(self, trust_id):
-        """GET roles delegated by a trust."""
-        resp, body = self.get("OS-TRUST/trusts/%s/roles" % trust_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['roles'])
-
-    def get_trust_role(self, trust_id, role_id):
-        """GET role delegated by a trust."""
-        resp, body = self.get("OS-TRUST/trusts/%s/roles/%s"
-                              % (trust_id, role_id))
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['role'])
-
-    def check_trust_role(self, trust_id, role_id):
-        """HEAD Check if role is delegated by a trust."""
-        resp, body = self.head("OS-TRUST/trusts/%s/roles/%s"
-                               % (trust_id, role_id))
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body)
diff --git a/neutron/tests/tempest/services/identity/v3/json/policy_client.py b/neutron/tests/tempest/services/identity/v3/json/policy_client.py
deleted file mode 100644 (file)
index 2d247af..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from neutron.tests.tempest.common import service_client
-
-
-class PolicyClientJSON(service_client.ServiceClient):
-    api_version = "v3"
-
-    def create_policy(self, blob, type):
-        """Creates a Policy."""
-        post_body = {
-            "blob": blob,
-            "type": type
-        }
-        post_body = json.dumps({'policy': post_body})
-        resp, body = self.post('policies', post_body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['policy'])
-
-    def list_policies(self):
-        """Lists the policies."""
-        resp, body = self.get('policies')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['policies'])
-
-    def get_policy(self, policy_id):
-        """Lists out the given policy."""
-        url = 'policies/%s' % policy_id
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['policy'])
-
-    def update_policy(self, policy_id, **kwargs):
-        """Updates a policy."""
-        type = kwargs.get('type')
-        post_body = {
-            'type': type
-        }
-        post_body = json.dumps({'policy': post_body})
-        url = 'policies/%s' % policy_id
-        resp, body = self.patch(url, post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['policy'])
-
-    def delete_policy(self, policy_id):
-        """Deletes the policy."""
-        url = "policies/%s" % policy_id
-        resp, body = self.delete(url)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
diff --git a/neutron/tests/tempest/services/identity/v3/json/region_client.py b/neutron/tests/tempest/services/identity/v3/json/region_client.py
deleted file mode 100644 (file)
index 0effae8..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse
-
-from neutron.tests.tempest.common import service_client
-
-
-class RegionClientJSON(service_client.ServiceClient):
-    api_version = "v3"
-
-    def create_region(self, description, **kwargs):
-        """Create region."""
-        req_body = {
-            'description': description,
-        }
-        if kwargs.get('parent_region_id'):
-            req_body['parent_region_id'] = kwargs.get('parent_region_id')
-        req_body = json.dumps({'region': req_body})
-        if kwargs.get('unique_region_id'):
-            resp, body = self.put(
-                'regions/%s' % kwargs.get('unique_region_id'), req_body)
-        else:
-            resp, body = self.post('regions', req_body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['region'])
-
-    def update_region(self, region_id, **kwargs):
-        """Updates a region."""
-        post_body = {}
-        if 'description' in kwargs:
-            post_body['description'] = kwargs.get('description')
-        if 'parent_region_id' in kwargs:
-            post_body['parent_region_id'] = kwargs.get('parent_region_id')
-        post_body = json.dumps({'region': post_body})
-        resp, body = self.patch('regions/%s' % region_id, post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['region'])
-
-    def get_region(self, region_id):
-        """Get region."""
-        url = 'regions/%s' % region_id
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['region'])
-
-    def list_regions(self, params=None):
-        """List regions."""
-        url = 'regions'
-        if params:
-            url += '?%s' % parse.urlencode(params)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['regions'])
-
-    def delete_region(self, region_id):
-        """Delete region."""
-        resp, body = self.delete('regions/%s' % region_id)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
diff --git a/neutron/tests/tempest/services/identity/v3/json/service_client.py b/neutron/tests/tempest/services/identity/v3/json/service_client.py
deleted file mode 100644 (file)
index 75a5cf8..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from neutron.tests.tempest.common import service_client
-
-
-class ServiceClientJSON(service_client.ServiceClient):
-    api_version = "v3"
-
-    def update_service(self, service_id, **kwargs):
-        """Updates a service."""
-        body = self.get_service(service_id)
-        name = kwargs.get('name', body['name'])
-        type = kwargs.get('type', body['type'])
-        desc = kwargs.get('description', body['description'])
-        patch_body = {
-            'description': desc,
-            'type': type,
-            'name': name
-        }
-        patch_body = json.dumps({'service': patch_body})
-        resp, body = self.patch('services/%s' % service_id, patch_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['service'])
-
-    def get_service(self, service_id):
-        """Get Service."""
-        url = 'services/%s' % service_id
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['service'])
-
-    def create_service(self, serv_type, name=None, description=None,
-                       enabled=True):
-        body_dict = {
-            'name': name,
-            'type': serv_type,
-            'enabled': enabled,
-            'description': description,
-        }
-        body = json.dumps({'service': body_dict})
-        resp, body = self.post("services", body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body["service"])
-
-    def delete_service(self, serv_id):
-        url = "services/" + serv_id
-        resp, body = self.delete(url)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def list_services(self):
-        resp, body = self.get('services')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['services'])
diff --git a/neutron/tests/tempest/services/network/__init__.py b/neutron/tests/tempest/services/network/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/tempest/services/network/json/__init__.py b/neutron/tests/tempest/services/network/json/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/tempest/services/network/json/network_client.py b/neutron/tests/tempest/services/network/json/network_client.py
deleted file mode 100644 (file)
index caefc6d..0000000
+++ /dev/null
@@ -1,624 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import time
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urlparse
-from tempest_lib.common.utils import misc
-from tempest_lib import exceptions as lib_exc
-
-from neutron.tests.tempest.common import service_client
-from neutron.tests.tempest import exceptions
-
-
-class NetworkClientJSON(service_client.ServiceClient):
-
-    """
-    Tempest REST client for Neutron. Uses v2 of the Neutron API, since the
-    V1 API has been removed from the code base.
-
-    Implements create, delete, update, list and show for the basic Neutron
-    abstractions (networks, sub-networks, routers, ports and floating IP):
-
-    Implements add/remove interface to router using subnet ID / port ID
-
-    It also implements list, show, update and reset for OpenStack Networking
-    quotas
-    """
-
-    version = '2.0'
-    uri_prefix = "v2.0"
-
-    def get_uri(self, plural_name):
-        # get service prefix from resource name
-
-        # The following list represents resource names that do not require
-        # changing underscore to a hyphen
-        hyphen_exceptions = [
-            "firewall_rules", "firewall_policies", "service_profiles"]
-        # the following map is used to construct proper URI
-        # for the given neutron resource
-        service_resource_prefix_map = {
-            'networks': '',
-            'subnets': '',
-            'subnetpools': '',
-            'ports': '',
-            'ipsecpolicies': 'vpn',
-            'vpnservices': 'vpn',
-            'ikepolicies': 'vpn',
-            'ipsec-site-connections': 'vpn',
-            'metering_labels': 'metering',
-            'metering_label_rules': 'metering',
-            'firewall_rules': 'fw',
-            'firewall_policies': 'fw',
-            'firewalls': 'fw',
-            'policies': 'qos',
-            'bandwidth_limit_rules': 'qos',
-            'rule_types': 'qos',
-            'rbac-policies': '',
-        }
-        service_prefix = service_resource_prefix_map.get(
-            plural_name)
-        if plural_name not in hyphen_exceptions:
-            plural_name = plural_name.replace("_", "-")
-        if service_prefix:
-            uri = '%s/%s/%s' % (self.uri_prefix, service_prefix,
-                                plural_name)
-        else:
-            uri = '%s/%s' % (self.uri_prefix, plural_name)
-        return uri
-
-    def pluralize(self, resource_name):
-        # get plural from map or just add 's'
-
-        # map from resource name to a plural name
-        # needed only for those which can't be constructed as name + 's'
-        resource_plural_map = {
-            'security_groups': 'security_groups',
-            'security_group_rules': 'security_group_rules',
-            'ipsecpolicy': 'ipsecpolicies',
-            'ikepolicy': 'ikepolicies',
-            'ipsec_site_connection': 'ipsec-site-connections',
-            'quotas': 'quotas',
-            'firewall_policy': 'firewall_policies',
-            'qos_policy': 'policies',
-            'rbac_policy': 'rbac_policies',
-        }
-        return resource_plural_map.get(resource_name, resource_name + 's')
-
-    def _lister(self, plural_name):
-        def _list(**filters):
-            uri = self.get_uri(plural_name)
-            if filters:
-                uri += '?' + urlparse.urlencode(filters, doseq=1)
-            resp, body = self.get(uri)
-            result = {plural_name: self.deserialize_list(body)}
-            self.expected_success(200, resp.status)
-            return service_client.ResponseBody(resp, result)
-
-        return _list
-
-    def _deleter(self, resource_name):
-        def _delete(resource_id):
-            plural = self.pluralize(resource_name)
-            uri = '%s/%s' % (self.get_uri(plural), resource_id)
-            resp, body = self.delete(uri)
-            self.expected_success(204, resp.status)
-            return service_client.ResponseBody(resp, body)
-
-        return _delete
-
-    def _shower(self, resource_name):
-        def _show(resource_id, **fields):
-            # fields is a dict which key is 'fields' and value is a
-            # list of field's name. An example:
-            # {'fields': ['id', 'name']}
-            plural = self.pluralize(resource_name)
-            uri = '%s/%s' % (self.get_uri(plural), resource_id)
-            if fields:
-                uri += '?' + urlparse.urlencode(fields, doseq=1)
-            resp, body = self.get(uri)
-            body = self.deserialize_single(body)
-            self.expected_success(200, resp.status)
-            return service_client.ResponseBody(resp, body)
-
-        return _show
-
-    def _creater(self, resource_name):
-        def _create(**kwargs):
-            plural = self.pluralize(resource_name)
-            uri = self.get_uri(plural)
-            post_data = self.serialize({resource_name: kwargs})
-            resp, body = self.post(uri, post_data)
-            body = self.deserialize_single(body)
-            self.expected_success(201, resp.status)
-            return service_client.ResponseBody(resp, body)
-
-        return _create
-
-    def _updater(self, resource_name):
-        def _update(res_id, **kwargs):
-            plural = self.pluralize(resource_name)
-            uri = '%s/%s' % (self.get_uri(plural), res_id)
-            post_data = self.serialize({resource_name: kwargs})
-            resp, body = self.put(uri, post_data)
-            body = self.deserialize_single(body)
-            self.expected_success(200, resp.status)
-            return service_client.ResponseBody(resp, body)
-
-        return _update
-
-    def __getattr__(self, name):
-        method_prefixes = ["list_", "delete_", "show_", "create_", "update_"]
-        method_functors = [self._lister,
-                           self._deleter,
-                           self._shower,
-                           self._creater,
-                           self._updater]
-        for index, prefix in enumerate(method_prefixes):
-            prefix_len = len(prefix)
-            if name[:prefix_len] == prefix:
-                return method_functors[index](name[prefix_len:])
-        raise AttributeError(name)
-
-    # Common methods that are hard to automate
-    def create_bulk_network(self, names, shared=False):
-        network_list = [{'name': name, 'shared': shared} for name in names]
-        post_data = {'networks': network_list}
-        body = self.serialize_list(post_data, "networks", "network")
-        uri = self.get_uri("networks")
-        resp, body = self.post(uri, body)
-        body = {'networks': self.deserialize_list(body)}
-        self.expected_success(201, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def create_bulk_subnet(self, subnet_list):
-        post_data = {'subnets': subnet_list}
-        body = self.serialize_list(post_data, 'subnets', 'subnet')
-        uri = self.get_uri('subnets')
-        resp, body = self.post(uri, body)
-        body = {'subnets': self.deserialize_list(body)}
-        self.expected_success(201, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def create_bulk_port(self, port_list):
-        post_data = {'ports': port_list}
-        body = self.serialize_list(post_data, 'ports', 'port')
-        uri = self.get_uri('ports')
-        resp, body = self.post(uri, body)
-        body = {'ports': self.deserialize_list(body)}
-        self.expected_success(201, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def wait_for_resource_deletion(self, resource_type, id):
-        """Waits for a resource to be deleted."""
-        start_time = int(time.time())
-        while True:
-            if self.is_resource_deleted(resource_type, id):
-                return
-            if int(time.time()) - start_time >= self.build_timeout:
-                raise exceptions.TimeoutException
-            time.sleep(self.build_interval)
-
-    def is_resource_deleted(self, resource_type, id):
-        method = 'show_' + resource_type
-        try:
-            getattr(self, method)(id)
-        except AttributeError:
-            raise Exception("Unknown resource type %s " % resource_type)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    def wait_for_resource_status(self, fetch, status, interval=None,
-                                 timeout=None):
-        """
-        @summary: Waits for a network resource to reach a status
-        @param fetch: the callable to be used to query the resource status
-        @type fecth: callable that takes no parameters and returns the resource
-        @param status: the status that the resource has to reach
-        @type status: String
-        @param interval: the number of seconds to wait between each status
-          query
-        @type interval: Integer
-        @param timeout: the maximum number of seconds to wait for the resource
-          to reach the desired status
-        @type timeout: Integer
-        """
-        if not interval:
-            interval = self.build_interval
-        if not timeout:
-            timeout = self.build_timeout
-        start_time = time.time()
-
-        while time.time() - start_time <= timeout:
-            resource = fetch()
-            if resource['status'] == status:
-                return
-            time.sleep(interval)
-
-        # At this point, the wait has timed out
-        message = 'Resource %s' % (str(resource))
-        message += ' failed to reach status %s' % status
-        message += ' (current: %s)' % resource['status']
-        message += ' within the required time %s' % timeout
-        caller = misc.find_test_caller()
-        if caller:
-            message = '(%s) %s' % (caller, message)
-        raise exceptions.TimeoutException(message)
-
-    def deserialize_single(self, body):
-        return json.loads(body)
-
-    def deserialize_list(self, body):
-        res = json.loads(body)
-        # expecting response in form
-        # {'resources': [ res1, res2] } => when pagination disabled
-        # {'resources': [..], 'resources_links': {}} => if pagination enabled
-        for k in res.keys():
-            if k.endswith("_links"):
-                continue
-            return res[k]
-
-    def serialize(self, data):
-        return json.dumps(data)
-
-    def serialize_list(self, data, root=None, item=None):
-        return self.serialize(data)
-
-    def update_quotas(self, tenant_id, **kwargs):
-        put_body = {'quota': kwargs}
-        body = json.dumps(put_body)
-        uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['quota'])
-
-    def reset_quotas(self, tenant_id):
-        uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
-        resp, body = self.delete(uri)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def create_router(self, name, admin_state_up=True, **kwargs):
-        post_body = {'router': kwargs}
-        post_body['router']['name'] = name
-        post_body['router']['admin_state_up'] = admin_state_up
-        body = json.dumps(post_body)
-        uri = '%s/routers' % (self.uri_prefix)
-        resp, body = self.post(uri, body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def _update_router(self, router_id, set_enable_snat, **kwargs):
-        uri = '%s/routers/%s' % (self.uri_prefix, router_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        update_body = {}
-        update_body['name'] = kwargs.get('name', body['router']['name'])
-        update_body['admin_state_up'] = kwargs.get(
-            'admin_state_up', body['router']['admin_state_up'])
-        cur_gw_info = body['router']['external_gateway_info']
-        if cur_gw_info:
-            # TODO(kevinbenton): setting the external gateway info is not
-            # allowed for a regular tenant. If the ability to update is also
-            # merged, a test case for this will need to be added similar to
-            # the SNAT case.
-            cur_gw_info.pop('external_fixed_ips', None)
-            if not set_enable_snat:
-                cur_gw_info.pop('enable_snat', None)
-        update_body['external_gateway_info'] = kwargs.get(
-            'external_gateway_info', body['router']['external_gateway_info'])
-        if 'distributed' in kwargs:
-            update_body['distributed'] = kwargs['distributed']
-        update_body = dict(router=update_body)
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_router(self, router_id, **kwargs):
-        """Update a router leaving enable_snat to its default value."""
-        # If external_gateway_info contains enable_snat the request will fail
-        # with 404 unless executed with admin client, and therefore we instruct
-        # _update_router to not set this attribute
-        # NOTE(salv-orlando): The above applies as long as Neutron's default
-        # policy is to restrict enable_snat usage to admins only.
-        return self._update_router(router_id, set_enable_snat=False, **kwargs)
-
-    def update_router_with_snat_gw_info(self, router_id, **kwargs):
-        """Update a router passing also the enable_snat attribute.
-
-        This method must be execute with admin credentials, otherwise the API
-        call will return a 404 error.
-        """
-        return self._update_router(router_id, set_enable_snat=True, **kwargs)
-
-    def add_router_interface_with_subnet_id(self, router_id, subnet_id):
-        uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
-                                                      router_id)
-        update_body = {"subnet_id": subnet_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def add_router_interface_with_port_id(self, router_id, port_id):
-        uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
-                                                      router_id)
-        update_body = {"port_id": port_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def remove_router_interface_with_subnet_id(self, router_id, subnet_id):
-        uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
-                                                         router_id)
-        update_body = {"subnet_id": subnet_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def remove_router_interface_with_port_id(self, router_id, port_id):
-        uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
-                                                         router_id)
-        update_body = {"port_id": port_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_router_interfaces(self, uuid):
-        uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_agent(self, agent_id, agent_info):
-        """
-        :param agent_info: Agent update information.
-        E.g {"admin_state_up": True}
-        """
-        uri = '%s/agents/%s' % (self.uri_prefix, agent_id)
-        agent = {"agent": agent_info}
-        body = json.dumps(agent)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_routers_on_l3_agent(self, agent_id):
-        uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_l3_agents_hosting_router(self, router_id):
-        uri = '%s/routers/%s/l3-agents' % (self.uri_prefix, router_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def add_router_to_l3_agent(self, agent_id, router_id):
-        uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
-        post_body = {"router_id": router_id}
-        body = json.dumps(post_body)
-        resp, body = self.post(uri, body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def remove_router_from_l3_agent(self, agent_id, router_id):
-        uri = '%s/agents/%s/l3-routers/%s' % (
-            self.uri_prefix, agent_id, router_id)
-        resp, body = self.delete(uri)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def list_dhcp_agent_hosting_network(self, network_id):
-        uri = '%s/networks/%s/dhcp-agents' % (self.uri_prefix, network_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
-        uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def remove_network_from_dhcp_agent(self, agent_id, network_id):
-        uri = '%s/agents/%s/dhcp-networks/%s' % (self.uri_prefix, agent_id,
-                                                 network_id)
-        resp, body = self.delete(uri)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def create_ikepolicy(self, name, **kwargs):
-        post_body = {
-            "ikepolicy": {
-                "name": name,
-            }
-        }
-        for key, val in kwargs.items():
-            post_body['ikepolicy'][key] = val
-        body = json.dumps(post_body)
-        uri = '%s/vpn/ikepolicies' % (self.uri_prefix)
-        resp, body = self.post(uri, body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_extra_routes(self, router_id, nexthop, destination):
-        uri = '%s/routers/%s' % (self.uri_prefix, router_id)
-        put_body = {
-            'router': {
-                'routes': [{'nexthop': nexthop,
-                            "destination": destination}]
-            }
-        }
-        body = json.dumps(put_body)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_extra_routes(self, router_id):
-        uri = '%s/routers/%s' % (self.uri_prefix, router_id)
-        null_routes = None
-        put_body = {
-            'router': {
-                'routes': null_routes
-            }
-        }
-        body = json.dumps(put_body)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def add_dhcp_agent_to_network(self, agent_id, network_id):
-        post_body = {'network_id': network_id}
-        body = json.dumps(post_body)
-        uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
-        resp, body = self.post(uri, body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def insert_firewall_rule_in_policy(self, firewall_policy_id,
-                                       firewall_rule_id, insert_after="",
-                                       insert_before=""):
-        uri = '%s/fw/firewall_policies/%s/insert_rule' % (self.uri_prefix,
-                                                          firewall_policy_id)
-        body = {
-            "firewall_rule_id": firewall_rule_id,
-            "insert_after": insert_after,
-            "insert_before": insert_before
-        }
-        body = json.dumps(body)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def remove_firewall_rule_from_policy(self, firewall_policy_id,
-                                         firewall_rule_id):
-        uri = '%s/fw/firewall_policies/%s/remove_rule' % (self.uri_prefix,
-                                                          firewall_policy_id)
-        update_body = {"firewall_rule_id": firewall_rule_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_qos_policies(self, **filters):
-        if filters:
-            uri = '%s/qos/policies?%s' % (self.uri_prefix,
-                                          urlparse.urlencode(filters))
-        else:
-            uri = '%s/qos/policies' % self.uri_prefix
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def create_qos_policy(self, name, description, shared, tenant_id=None):
-        uri = '%s/qos/policies' % self.uri_prefix
-        post_data = {'policy': {
-                'name': name,
-                'description': description,
-                'shared': shared
-            }}
-        if tenant_id is not None:
-            post_data['policy']['tenant_id'] = tenant_id
-        resp, body = self.post(uri, self.serialize(post_data))
-        body = self.deserialize_single(body)
-        self.expected_success(201, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def update_qos_policy(self, policy_id, **kwargs):
-        uri = '%s/qos/policies/%s' % (self.uri_prefix, policy_id)
-        post_data = self.serialize({'policy': kwargs})
-        resp, body = self.put(uri, post_data)
-        body = self.deserialize_single(body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def create_bandwidth_limit_rule(self, policy_id, max_kbps, max_burst_kbps):
-        uri = '%s/qos/policies/%s/bandwidth_limit_rules' % (
-            self.uri_prefix, policy_id)
-        post_data = self.serialize(
-            {'bandwidth_limit_rule': {
-                'max_kbps': max_kbps,
-                'max_burst_kbps': max_burst_kbps}
-            })
-        resp, body = self.post(uri, post_data)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_bandwidth_limit_rules(self, policy_id):
-        uri = '%s/qos/policies/%s/bandwidth_limit_rules' % (
-            self.uri_prefix, policy_id)
-        resp, body = self.get(uri)
-        body = self.deserialize_single(body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def show_bandwidth_limit_rule(self, policy_id, rule_id):
-        uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % (
-            self.uri_prefix, policy_id, rule_id)
-        resp, body = self.get(uri)
-        body = self.deserialize_single(body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def update_bandwidth_limit_rule(self, policy_id, rule_id, **kwargs):
-        uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % (
-            self.uri_prefix, policy_id, rule_id)
-        post_data = {'bandwidth_limit_rule': kwargs}
-        resp, body = self.put(uri, json.dumps(post_data))
-        body = self.deserialize_single(body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_bandwidth_limit_rule(self, policy_id, rule_id):
-        uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % (
-            self.uri_prefix, policy_id, rule_id)
-        resp, body = self.delete(uri)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def list_qos_rule_types(self):
-        uri = '%s/qos/rule-types' % self.uri_prefix
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
diff --git a/neutron/tests/tempest/services/network/resources.py b/neutron/tests/tempest/services/network/resources.py
deleted file mode 100644 (file)
index 962dfc5..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-import six
-
-
-class AttributeDict(dict):
-
-    """
-    Provide attribute access (dict.key) to dictionary values.
-    """
-
-    def __getattr__(self, name):
-        """Allow attribute access for all keys in the dict."""
-        if name in self:
-            return self[name]
-        return super(AttributeDict, self).__getattribute__(name)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class DeletableResource(AttributeDict):
-
-    """
-    Support deletion of neutron resources (networks, subnets) via a
-    delete() method, as is supported by keystone and nova resources.
-    """
-
-    def __init__(self, *args, **kwargs):
-        self.client = kwargs.pop('client', None)
-        super(DeletableResource, self).__init__(*args, **kwargs)
-
-    def __str__(self):
-        return '<%s id="%s" name="%s">' % (self.__class__.__name__,
-                                           self.id, self.name)
-
-    @abc.abstractmethod
-    def delete(self):
-        return
-
-    @abc.abstractmethod
-    def refresh(self):
-        return
-
-    def __hash__(self):
-        return hash(self.id)
-
-    def wait_for_status(self, status):
-        if not hasattr(self, 'status'):
-            return
-
-        def helper_get():
-            self.refresh()
-            return self
-
-        return self.client.wait_for_resource_status(helper_get, status)
-
-
-class DeletableNetwork(DeletableResource):
-
-    def delete(self):
-        self.client.delete_network(self.id)
-
-
-class DeletableSubnet(DeletableResource):
-
-    def __init__(self, *args, **kwargs):
-        super(DeletableSubnet, self).__init__(*args, **kwargs)
-        self._router_ids = set()
-
-    def update(self, *args, **kwargs):
-        result = self.client.update_subnet(self.id,
-                                           *args,
-                                           **kwargs)
-        return super(DeletableSubnet, self).update(**result['subnet'])
-
-    def add_to_router(self, router_id):
-        self._router_ids.add(router_id)
-        self.client.add_router_interface_with_subnet_id(router_id,
-                                                        subnet_id=self.id)
-
-    def delete(self):
-        for router_id in self._router_ids.copy():
-            self.client.remove_router_interface_with_subnet_id(
-                router_id,
-                subnet_id=self.id)
-            self._router_ids.remove(router_id)
-        self.client.delete_subnet(self.id)
-
-
-class DeletableRouter(DeletableResource):
-
-    def set_gateway(self, network_id):
-        return self.update(external_gateway_info=dict(network_id=network_id))
-
-    def unset_gateway(self):
-        return self.update(external_gateway_info=dict())
-
-    def update(self, *args, **kwargs):
-        result = self.client.update_router(self.id,
-                                           *args,
-                                           **kwargs)
-        return super(DeletableRouter, self).update(**result['router'])
-
-    def delete(self):
-        self.unset_gateway()
-        self.client.delete_router(self.id)
-
-
-class DeletableFloatingIp(DeletableResource):
-
-    def refresh(self, *args, **kwargs):
-        result = self.client.show_floatingip(self.id,
-                                             *args,
-                                             **kwargs)
-        super(DeletableFloatingIp, self).update(**result['floatingip'])
-
-    def update(self, *args, **kwargs):
-        result = self.client.update_floatingip(self.id,
-                                               *args,
-                                               **kwargs)
-        super(DeletableFloatingIp, self).update(**result['floatingip'])
-
-    def __repr__(self):
-        return '<%s addr="%s">' % (self.__class__.__name__,
-                                   self.floating_ip_address)
-
-    def __str__(self):
-        return '<"FloatingIP" addr="%s" id="%s">' % (self.floating_ip_address,
-                                                     self.id)
-
-    def delete(self):
-        self.client.delete_floatingip(self.id)
-
-
-class DeletablePort(DeletableResource):
-
-    def delete(self):
-        self.client.delete_port(self.id)
-
-
-class DeletableSecurityGroup(DeletableResource):
-
-    def delete(self):
-        self.client.delete_security_group(self.id)
-
-
-class DeletableSecurityGroupRule(DeletableResource):
-
-    def __repr__(self):
-        return '<%s id="%s">' % (self.__class__.__name__, self.id)
-
-    def delete(self):
-        self.client.delete_security_group_rule(self.id)
diff --git a/neutron/tests/tempest/test.py b/neutron/tests/tempest/test.py
deleted file mode 100644 (file)
index 3abf826..0000000
+++ /dev/null
@@ -1,675 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import atexit
-import functools
-import os
-import re
-import sys
-import time
-import uuid
-
-import fixtures
-from oslo_log import log as logging
-from oslo_serialization import jsonutils as json
-from oslo_utils import importutils
-import six
-from six.moves.urllib import parse
-import testscenarios
-import testtools
-
-from neutron.tests.api import clients
-from neutron.tests.tempest.common import credentials
-import neutron.tests.tempest.common.generator.valid_generator as valid
-from neutron.tests.tempest import config
-from neutron.tests.tempest import exceptions
-
-LOG = logging.getLogger(__name__)
-
-CONF = config.CONF
-
-
-def attr(*args, **kwargs):
-    """A decorator which applies the  testtools attr decorator
-
-    This decorator applies the testtools.testcase.attr if it is in the list of
-    attributes to testtools we want to apply.
-    """
-
-    def decorator(f):
-        if 'type' in kwargs and isinstance(kwargs['type'], str):
-            f = testtools.testcase.attr(kwargs['type'])(f)
-            if kwargs['type'] == 'smoke':
-                f = testtools.testcase.attr('gate')(f)
-        elif 'type' in kwargs and isinstance(kwargs['type'], list):
-            for attr in kwargs['type']:
-                f = testtools.testcase.attr(attr)(f)
-                if attr == 'smoke':
-                    f = testtools.testcase.attr('gate')(f)
-        return f
-
-    return decorator
-
-
-def idempotent_id(id):
-    """Stub for metadata decorator"""
-    if not isinstance(id, six.string_types):
-        raise TypeError('Test idempotent_id must be string not %s'
-                        '' % type(id).__name__)
-    uuid.UUID(id)
-
-    def decorator(f):
-        f = testtools.testcase.attr('id-%s' % id)(f)
-        if f.__doc__:
-            f.__doc__ = 'Test idempotent id: %s\n%s' % (id, f.__doc__)
-        else:
-            f.__doc__ = 'Test idempotent id: %s' % id
-        return f
-    return decorator
-
-
-def get_service_list():
-    service_list = {
-        'compute': CONF.service_available.nova,
-        'image': CONF.service_available.glance,
-        'baremetal': CONF.service_available.ironic,
-        'volume': CONF.service_available.cinder,
-        'orchestration': CONF.service_available.heat,
-        # NOTE(mtreinish) nova-network will provide networking functionality
-        # if neutron isn't available, so always set to True.
-        'network': True,
-        'identity': True,
-        'object_storage': CONF.service_available.swift,
-        'dashboard': CONF.service_available.horizon,
-        'telemetry': CONF.service_available.ceilometer,
-        'data_processing': CONF.service_available.sahara,
-        'database': CONF.service_available.trove
-    }
-    return service_list
-
-
-def services(*args, **kwargs):
-    """A decorator used to set an attr for each service used in a test case
-
-    This decorator applies a testtools attr for each service that gets
-    exercised by a test case.
-    """
-    def decorator(f):
-        services = ['compute', 'image', 'baremetal', 'volume', 'orchestration',
-                    'network', 'identity', 'object_storage', 'dashboard',
-                    'telemetry', 'data_processing', 'database']
-        for service in args:
-            if service not in services:
-                raise exceptions.InvalidServiceTag('%s is not a valid '
-                                                   'service' % service)
-        attr(type=list(args))(f)
-
-        @functools.wraps(f)
-        def wrapper(self, *func_args, **func_kwargs):
-            service_list = get_service_list()
-
-            for service in args:
-                if not service_list[service]:
-                    msg = 'Skipped because the %s service is not available' % (
-                        service)
-                    raise testtools.TestCase.skipException(msg)
-            return f(self, *func_args, **func_kwargs)
-        return wrapper
-    return decorator
-
-
-def stresstest(*args, **kwargs):
-    """Add stress test decorator
-
-    For all functions with this decorator a attr stress will be
-    set automatically.
-
-    @param class_setup_per: allowed values are application, process, action
-           ``application``: once in the stress job lifetime
-           ``process``: once in the worker process lifetime
-           ``action``: on each action
-    @param allow_inheritance: allows inheritance of this attribute
-    """
-    def decorator(f):
-        if 'class_setup_per' in kwargs:
-            setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
-        else:
-            setattr(f, "st_class_setup_per", 'process')
-        if 'allow_inheritance' in kwargs:
-            setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
-        else:
-            setattr(f, "st_allow_inheritance", False)
-        attr(type='stress')(f)
-        return f
-    return decorator
-
-
-def requires_ext(*args, **kwargs):
-    """A decorator to skip tests if an extension is not enabled
-
-    @param extension
-    @param service
-    """
-    def decorator(func):
-        @functools.wraps(func)
-        def wrapper(*func_args, **func_kwargs):
-            if not is_extension_enabled(kwargs['extension'],
-                                        kwargs['service']):
-                msg = "Skipped because %s extension: %s is not enabled" % (
-                    kwargs['service'], kwargs['extension'])
-                raise testtools.TestCase.skipException(msg)
-            return func(*func_args, **func_kwargs)
-        return wrapper
-    return decorator
-
-
-def is_extension_enabled(extension_name, service):
-    """A function that will check the list of enabled extensions from config
-
-    """
-    config_dict = {
-        'compute': CONF.compute_feature_enabled.api_extensions,
-        'volume': CONF.volume_feature_enabled.api_extensions,
-        'network': CONF.network_feature_enabled.api_extensions,
-        'object': CONF.object_storage_feature_enabled.discoverable_apis,
-    }
-    if len(config_dict[service]) == 0:
-        return False
-    if config_dict[service][0] == 'all':
-        return True
-    if extension_name in config_dict[service]:
-        return True
-    return False
-
-
-at_exit_set = set()
-
-
-def validate_tearDownClass():
-    if at_exit_set:
-        LOG.error(
-            "tearDownClass does not call the super's "
-            "tearDownClass in these classes: \n"
-            + str(at_exit_set))
-
-
-atexit.register(validate_tearDownClass)
-
-
-class BaseTestCase(testtools.testcase.WithAttributes,
-                   testtools.TestCase):
-    """The test base class defines Tempest framework for class level fixtures.
-    `setUpClass` and `tearDownClass` are defined here and cannot be overwritten
-    by subclasses (enforced via hacking rule T105).
-
-    Set-up is split in a series of steps (setup stages), which can be
-    overwritten by test classes. Set-up stages are:
-    - skip_checks
-    - setup_credentials
-    - setup_clients
-    - resource_setup
-
-    Tear-down is also split in a series of steps (teardown stages), which are
-    stacked for execution only if the corresponding setup stage had been
-    reached during the setup phase. Tear-down stages are:
-    - clear_isolated_creds (defined in the base test class)
-    - resource_cleanup
-    """
-
-    setUpClassCalled = False
-    _service = None
-
-    network_resources = {}
-
-    # NOTE(sdague): log_format is defined inline here instead of using the oslo
-    # default because going through the config path recouples config to the
-    # stress tests too early, and depending on testr order will fail unit tests
-    log_format = ('%(asctime)s %(process)d %(levelname)-8s '
-                  '[%(name)s] %(message)s')
-
-    @classmethod
-    def setUpClass(cls):
-        # It should never be overridden by descendants
-        if hasattr(super(BaseTestCase, cls), 'setUpClass'):
-            super(BaseTestCase, cls).setUpClass()
-        cls.setUpClassCalled = True
-        # Stack of (name, callable) to be invoked in reverse order at teardown
-        cls.teardowns = []
-        # All the configuration checks that may generate a skip
-        cls.skip_checks()
-        try:
-            # Allocation of all required credentials and client managers
-            cls.teardowns.append(('credentials', cls.clear_isolated_creds))
-            cls.setup_credentials()
-            # Shortcuts to clients
-            cls.setup_clients()
-            # Additional class-wide test resources
-            cls.teardowns.append(('resources', cls.resource_cleanup))
-            cls.resource_setup()
-        except Exception:
-            etype, value, trace = sys.exc_info()
-            LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass." % (
-                     etype, cls.__name__))
-            cls.tearDownClass()
-            try:
-                raise etype, value, trace
-            finally:
-                del trace  # to avoid circular refs
-
-    @classmethod
-    def tearDownClass(cls):
-        at_exit_set.discard(cls)
-        # It should never be overridden by descendants
-        if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
-            super(BaseTestCase, cls).tearDownClass()
-        # Save any existing exception, we always want to re-raise the original
-        # exception only
-        etype, value, trace = sys.exc_info()
-        # If there was no exception during setup we shall re-raise the first
-        # exception in teardown
-        re_raise = (etype is None)
-        while cls.teardowns:
-            name, teardown = cls.teardowns.pop()
-            # Catch any exception in tearDown so we can re-raise the original
-            # exception at the end
-            try:
-                teardown()
-            except Exception as te:
-                sys_exec_info = sys.exc_info()
-                tetype = sys_exec_info[0]
-                # TODO(andreaf): Till we have the ability to cleanup only
-                # resources that were successfully setup in resource_cleanup,
-                # log AttributeError as info instead of exception.
-                if tetype is AttributeError and name == 'resources':
-                    LOG.info("tearDownClass of %s failed: %s" % (name, te))
-                else:
-                    LOG.exception("teardown of %s failed: %s" % (name, te))
-                if not etype:
-                    etype, value, trace = sys_exec_info
-        # If exceptions were raised during teardown, an not before, re-raise
-        # the first one
-        if re_raise and etype is not None:
-            try:
-                raise etype, value, trace
-            finally:
-                del trace  # to avoid circular refs
-
-    @classmethod
-    def skip_checks(cls):
-        """Class level skip checks. Subclasses verify in here all
-        conditions that might prevent the execution of the entire test class.
-        Checks implemented here may not make use API calls, and should rely on
-        configuration alone.
-        In general skip checks that require an API call are discouraged.
-        If one is really needed it may be implemented either in the
-        resource_setup or at test level.
-        """
-        pass
-
-    @classmethod
-    def setup_credentials(cls):
-        """Allocate credentials and the client managers from them."""
-        # TODO(andreaf) There is a fair amount of code that could me moved from
-        # base / test classes in here. Ideally tests should be able to only
-        # specify a list of (additional) credentials the need to use.
-        pass
-
-    @classmethod
-    def setup_clients(cls):
-        """Create links to the clients into the test object."""
-        # TODO(andreaf) There is a fair amount of code that could me moved from
-        # base / test classes in here. Ideally tests should be able to only
-        # specify which client is `client` and nothing else.
-        pass
-
-    @classmethod
-    def resource_setup(cls):
-        """Class level resource setup for test cases.
-        """
-        pass
-
-    @classmethod
-    def resource_cleanup(cls):
-        """Class level resource cleanup for test cases.
-        Resource cleanup must be able to handle the case of partially setup
-        resources, in case a failure during `resource_setup` should happen.
-        """
-        pass
-
-    def setUp(self):
-        super(BaseTestCase, self).setUp()
-        if not self.setUpClassCalled:
-            raise RuntimeError("setUpClass does not calls the super's"
-                               "setUpClass in the "
-                               + self.__class__.__name__)
-        at_exit_set.add(self.__class__)
-        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
-        try:
-            test_timeout = int(test_timeout)
-        except ValueError:
-            test_timeout = 0
-        if test_timeout > 0:
-            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
-
-        if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
-                os.environ.get('OS_STDOUT_CAPTURE') == '1'):
-            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
-            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
-        if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
-                os.environ.get('OS_STDERR_CAPTURE') == '1'):
-            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
-            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
-        if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
-            os.environ.get('OS_LOG_CAPTURE') != '0'):
-            self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
-                                                   format=self.log_format,
-                                                   level=None))
-
-    @classmethod
-    def get_client_manager(cls):
-        """
-        Returns an OpenStack client manager
-        """
-        force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
-
-        if (not hasattr(cls, 'isolated_creds') or
-            not cls.isolated_creds.name == cls.__name__):
-            cls.isolated_creds = credentials.get_isolated_credentials(
-                name=cls.__name__, network_resources=cls.network_resources,
-                force_tenant_isolation=force_tenant_isolation,
-            )
-
-        creds = cls.isolated_creds.get_primary_creds()
-        os = clients.Manager(credentials=creds, service=cls._service)
-        return os
-
-    @classmethod
-    def clear_isolated_creds(cls):
-        """
-        Clears isolated creds if set
-        """
-        if hasattr(cls, 'isolated_creds'):
-            cls.isolated_creds.clear_isolated_creds()
-
-    @classmethod
-    def _get_identity_admin_client(cls):
-        """
-        Returns an instance of the Identity Admin API client
-        """
-        os = clients.AdminManager(service=cls._service)
-        admin_client = os.identity_client
-        return admin_client
-
-    @classmethod
-    def set_network_resources(cls, network=False, router=False, subnet=False,
-                              dhcp=False):
-        """Specify which network resources should be created
-
-        @param network
-        @param router
-        @param subnet
-        @param dhcp
-        """
-        # network resources should be set only once from callers
-        # in order to ensure that even if it's called multiple times in
-        # a chain of overloaded methods, the attribute is set only
-        # in the leaf class
-        if not cls.network_resources:
-            cls.network_resources = {
-                'network': network,
-                'router': router,
-                'subnet': subnet,
-                'dhcp': dhcp}
-
-    def assertEmpty(self, list, msg=None):
-        self.assertTrue(len(list) == 0, msg)
-
-    def assertNotEmpty(self, list, msg=None):
-        self.assertTrue(len(list) > 0, msg)
-
-
-class NegativeAutoTest(BaseTestCase):
-
-    _resources = {}
-
-    @classmethod
-    def setUpClass(cls):
-        super(NegativeAutoTest, cls).setUpClass()
-        os = cls.get_client_manager()
-        cls.client = os.negative_client
-        os_admin = clients.AdminManager(service=cls._service)
-        cls.admin_client = os_admin.negative_client
-
-    @staticmethod
-    def load_tests(*args):
-        """
-        Wrapper for testscenarios to set the mandatory scenarios variable
-        only in case a real test loader is in place. Will be automatically
-        called in case the variable "load_tests" is set.
-        """
-        if getattr(args[0], 'suiteClass', None) is not None:
-            loader, standard_tests, pattern = args
-        else:
-            standard_tests, module, loader = args
-        for test in testtools.iterate_tests(standard_tests):
-            schema = getattr(test, '_schema', None)
-            if schema is not None:
-                setattr(test, 'scenarios',
-                        NegativeAutoTest.generate_scenario(schema))
-        return testscenarios.load_tests_apply_scenarios(*args)
-
-    @staticmethod
-    def generate_scenario(description):
-        """
-        Generates the test scenario list for a given description.
-
-        :param description: A file or dictionary with the following entries:
-            name (required) name for the api
-            http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
-            url (required) the url to be appended to the catalog url with '%s'
-                for each resource mentioned
-            resources: (optional) A list of resource names such as "server",
-                "flavor", etc. with an element for each '%s' in the url. This
-                method will call self.get_resource for each element when
-                constructing the positive test case template so negative
-                subclasses are expected to return valid resource ids when
-                appropriate.
-            json-schema (optional) A valid json schema that will be used to
-                create invalid data for the api calls. For "GET" and "HEAD",
-                the data is used to generate query strings appended to the url,
-                otherwise for the body of the http call.
-        """
-        LOG.debug(description)
-        generator = importutils.import_class(
-            CONF.negative.test_generator)()
-        generator.validate_schema(description)
-        schema = description.get("json-schema", None)
-        resources = description.get("resources", [])
-        scenario_list = []
-        expected_result = None
-        for resource in resources:
-            if isinstance(resource, dict):
-                expected_result = resource['expected_result']
-                resource = resource['name']
-            LOG.debug("Add resource to test %s" % resource)
-            scn_name = "inv_res_%s" % (resource)
-            scenario_list.append((scn_name, {"resource": (resource,
-                                                          str(uuid.uuid4())),
-                                             "expected_result": expected_result
-                                             }))
-        if schema is not None:
-            for scenario in generator.generate_scenarios(schema):
-                scenario_list.append((scenario['_negtest_name'],
-                                      scenario))
-        LOG.debug(scenario_list)
-        return scenario_list
-
-    def execute(self, description):
-        """
-        Execute a http call on an api that are expected to
-        result in client errors. First it uses invalid resources that are part
-        of the url, and then invalid data for queries and http request bodies.
-
-        :param description: A json file or dictionary with the following
-        entries:
-            name (required) name for the api
-            http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
-            url (required) the url to be appended to the catalog url with '%s'
-                for each resource mentioned
-            resources: (optional) A list of resource names such as "server",
-                "flavor", etc. with an element for each '%s' in the url. This
-                method will call self.get_resource for each element when
-                constructing the positive test case template so negative
-                subclasses are expected to return valid resource ids when
-                appropriate.
-            json-schema (optional) A valid json schema that will be used to
-                create invalid data for the api calls. For "GET" and "HEAD",
-                the data is used to generate query strings appended to the url,
-                otherwise for the body of the http call.
-
-        """
-        LOG.info("Executing %s" % description["name"])
-        LOG.debug(description)
-        generator = importutils.import_class(
-            CONF.negative.test_generator)()
-        schema = description.get("json-schema", None)
-        method = description["http-method"]
-        url = description["url"]
-        expected_result = None
-        if "default_result_code" in description:
-            expected_result = description["default_result_code"]
-
-        resources = [self.get_resource(r) for
-                     r in description.get("resources", [])]
-
-        if hasattr(self, "resource"):
-            # Note(mkoderer): The resources list already contains an invalid
-            # entry (see get_resource).
-            # We just send a valid json-schema with it
-            valid_schema = None
-            if schema:
-                valid_schema = \
-                    valid.ValidTestGenerator().generate_valid(schema)
-            new_url, body = self._http_arguments(valid_schema, url, method)
-        elif hasattr(self, "_negtest_name"):
-            schema_under_test = \
-                valid.ValidTestGenerator().generate_valid(schema)
-            local_expected_result = \
-                generator.generate_payload(self, schema_under_test)
-            if local_expected_result is not None:
-                expected_result = local_expected_result
-            new_url, body = \
-                self._http_arguments(schema_under_test, url, method)
-        else:
-            raise Exception("testscenarios are not active. Please make sure "
-                            "that your test runner supports the load_tests "
-                            "mechanism")
-
-        if "admin_client" in description and description["admin_client"]:
-            client = self.admin_client
-        else:
-            client = self.client
-        resp, resp_body = client.send_request(method, new_url,
-                                              resources, body=body)
-        self._check_negative_response(expected_result, resp.status, resp_body)
-
-    def _http_arguments(self, json_dict, url, method):
-        LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
-        if not json_dict:
-            return url, None
-        elif method in ["GET", "HEAD", "PUT", "DELETE"]:
-            return "%s?%s" % (url, parse.urlencode(json_dict)), None
-        else:
-            return url, json.dumps(json_dict)
-
-    def _check_negative_response(self, expected_result, result, body):
-        self.assertTrue(result >= 400 and result < 500 and result != 413,
-                        "Expected client error, got %s:%s" %
-                        (result, body))
-        self.assertTrue(expected_result is None or expected_result == result,
-                        "Expected %s, got %s:%s" %
-                        (expected_result, result, body))
-
-    @classmethod
-    def set_resource(cls, name, resource):
-        """
-        This function can be used in setUpClass context to register a resoruce
-        for a test.
-
-        :param name: The name of the kind of resource such as "flavor", "role",
-            etc.
-        :resource: The id of the resource
-        """
-        cls._resources[name] = resource
-
-    def get_resource(self, name):
-        """
-        Return a valid uuid for a type of resource. If a real resource is
-        needed as part of a url then this method should return one. Otherwise
-        it can return None.
-
-        :param name: The name of the kind of resource such as "flavor", "role",
-            etc.
-        """
-        if isinstance(name, dict):
-            name = name['name']
-        if hasattr(self, "resource") and self.resource[0] == name:
-            LOG.debug("Return invalid resource (%s) value: %s" %
-                      (self.resource[0], self.resource[1]))
-            return self.resource[1]
-        if name in self._resources:
-            return self._resources[name]
-        return None
-
-
-def SimpleNegativeAutoTest(klass):
-    """
-    This decorator registers a test function on basis of the class name.
-    """
-    @attr(type=['negative', 'gate'])
-    def generic_test(self):
-        if hasattr(self, '_schema'):
-            self.execute(self._schema)
-
-    cn = klass.__name__
-    cn = cn.replace('JSON', '')
-    cn = cn.replace('Test', '')
-    # NOTE(mkoderer): replaces uppercase chars inside the class name with '_'
-    lower_cn = re.sub('(?<!^)(?=[A-Z])', '_', cn).lower()
-    func_name = 'test_%s' % lower_cn
-    setattr(klass, func_name, generic_test)
-    return klass
-
-
-def call_until_true(func, duration, sleep_for):
-    """
-    Call the given function until it returns True (and return True) or
-    until the specified duration (in seconds) elapses (and return
-    False).
-
-    :param func: A zero argument callable that returns True on success.
-    :param duration: The number of seconds for which to attempt a
-        successful call of the function.
-    :param sleep_for: The number of seconds to sleep after an unsuccessful
-                      invocation of the function.
-    """
-    now = time.time()
-    timeout = now + duration
-    while now < timeout:
-        if func():
-            return True
-        time.sleep(sleep_for)
-        now = time.time()
-    return False
diff --git a/neutron/tests/tools.py b/neutron/tests/tools.py
deleted file mode 100644 (file)
index c469ab7..0000000
+++ /dev/null
@@ -1,190 +0,0 @@
-# Copyright (c) 2013 NEC Corporation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import platform
-import random
-import string
-import warnings
-
-import fixtures
-import six
-
-from neutron.api.v2 import attributes
-
-
-class AttributeMapMemento(fixtures.Fixture):
-    """Create a copy of the resource attribute map so it can be restored during
-    test cleanup.
-
-    There are a few reasons why this is not included in a class derived
-    from BaseTestCase:
-
-        - Test cases may need more control about when the backup is
-        made, especially if they are not direct descendants of
-        BaseTestCase.
-
-        - Inheritance is a bit of overkill for this facility and it's a
-        stretch to rationalize the "is a" criteria.
-    """
-
-    def _setUp(self):
-        # Shallow copy is not a proper choice for keeping a backup copy as
-        # the RESOURCE_ATTRIBUTE_MAP map is modified in place through the
-        # 0th level keys. Ideally deepcopy() would be used but this seems
-        # to result in test failures. A compromise is to copy one level
-        # deeper than a shallow copy.
-        self.contents_backup = {}
-        for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP):
-            self.contents_backup[res] = attrs.copy()
-        self.addCleanup(self.restore)
-
-    def restore(self):
-        attributes.RESOURCE_ATTRIBUTE_MAP = self.contents_backup
-
-
-class WarningsFixture(fixtures.Fixture):
-    """Filters out warnings during test runs."""
-
-    warning_types = (
-        DeprecationWarning, PendingDeprecationWarning, ImportWarning
-    )
-
-    def _setUp(self):
-        self.addCleanup(warnings.resetwarnings)
-        for wtype in self.warning_types:
-            warnings.filterwarnings(
-                "always", category=wtype, module='^neutron\\.')
-
-
-class OpenFixture(fixtures.Fixture):
-    """Mock access to a specific file while preserving open for others."""
-
-    def __init__(self, filepath, contents=''):
-        self.path = filepath
-        self.contents = contents
-
-    def _setUp(self):
-        self.mock_open = mock.mock_open(read_data=self.contents)
-        self._orig_open = open
-
-        def replacement_open(name, *args, **kwargs):
-            if name == self.path:
-                return self.mock_open(name, *args, **kwargs)
-            return self._orig_open(name, *args, **kwargs)
-
-        self._patch = mock.patch('six.moves.builtins.open',
-                                 new=replacement_open)
-        self._patch.start()
-        self.addCleanup(self._patch.stop)
-
-
-class SafeCleanupFixture(fixtures.Fixture):
-    """Catch errors in daughter fixture cleanup."""
-
-    def __init__(self, fixture):
-        self.fixture = fixture
-
-    def _setUp(self):
-
-        def cleanUp():
-            try:
-                self.fixture.cleanUp()
-            except Exception:
-                pass
-
-        self.fixture.setUp()
-        self.addCleanup(cleanUp)
-
-
-"""setup_mock_calls and verify_mock_calls are convenient methods
-to setup a sequence of mock calls.
-
-expected_calls_and_values is a list of (expected_call, return_value):
-
-        expected_calls_and_values = [
-            (mock.call(["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
-                        self.BR_NAME, pname]),
-             None),
-            (mock.call(["ovs-vsctl", self.TO, "set", "Interface",
-                        pname, "type=gre"]),
-             None),
-            ....
-        ]
-
-* expected_call should be mock.call(expected_arg, ....)
-* return_value is passed to side_effect of a mocked call.
-  A return value or an exception can be specified.
-"""
-
-import unittest
-
-from neutron.common import utils
-
-
-def setup_mock_calls(mocked_call, expected_calls_and_values):
-    return_values = [call[1] for call in expected_calls_and_values]
-    mocked_call.side_effect = return_values
-
-
-def verify_mock_calls(mocked_call, expected_calls_and_values,
-                      any_order=False):
-    expected_calls = [call[0] for call in expected_calls_and_values]
-    mocked_call.assert_has_calls(expected_calls, any_order=any_order)
-
-
-def fail(msg=None):
-    """Fail immediately, with the given message.
-
-    This method is equivalent to TestCase.fail without requiring a
-    testcase instance (usefully for reducing coupling).
-    """
-    raise unittest.TestCase.failureException(msg)
-
-
-class UnorderedList(list):
-    """A list that is equals to any permutation of itself."""
-
-    def __eq__(self, other):
-        if not isinstance(other, list):
-            return False
-        return (sorted(self, key=utils.safe_sort_key) ==
-                sorted(other, key=utils.safe_sort_key))
-
-    def __neq__(self, other):
-        return not self == other
-
-
-def get_random_string(n=10):
-    return ''.join(random.choice(string.ascii_lowercase) for _ in range(n))
-
-
-def get_random_boolean():
-    return bool(random.getrandbits(1))
-
-
-def get_random_integer(range_begin=0, range_end=1000):
-    return random.randint(range_begin, range_end)
-
-
-def is_bsd():
-    """Return True on BSD-based systems."""
-
-    system = platform.system()
-    if system == 'Darwin':
-        return True
-    if 'bsd' in system.lower():
-        return True
-    return False
diff --git a/neutron/tests/unit/__init__.py b/neutron/tests/unit/__init__.py
deleted file mode 100644 (file)
index faed26a..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-
-cfg.CONF.use_stderr = False
diff --git a/neutron/tests/unit/_test_extension_portbindings.py b/neutron/tests/unit/_test_extension_portbindings.py
deleted file mode 100644 (file)
index 42840d4..0000000
+++ /dev/null
@@ -1,360 +0,0 @@
-# Copyright 2013 NEC Corporation
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from six.moves import http_client as httplib
-
-from oslo_config import cfg
-from webob import exc
-
-from neutron import context
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-
-class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-
-    # VIF_TYPE must be overridden according to plugin vif_type
-    VIF_TYPE = portbindings.VIF_TYPE_OTHER
-    # VIF_DETAILS must be overridden according to plugin vif_details
-    VIF_DETAILS = None
-
-    def _check_response_portbindings(self, port):
-        self.assertEqual(port[portbindings.VIF_TYPE], self.VIF_TYPE)
-        # REVISIT(rkukura): Consider reworking tests to enable ML2 to bind
-
-        if self.VIF_TYPE not in [portbindings.VIF_TYPE_UNBOUND,
-                                 portbindings.VIF_TYPE_BINDING_FAILED]:
-            # NOTE(r-mibu): The following six lines are just for backward
-            # compatibility.  In this class, HAS_PORT_FILTER has been replaced
-            # by VIF_DETAILS which can be set expected vif_details to check,
-            # but all replacement of HAS_PORT_FILTER in successor has not been
-            # completed.
-            if self.VIF_DETAILS is None:
-                expected = getattr(self, 'HAS_PORT_FILTER', False)
-                vif_details = port[portbindings.VIF_DETAILS]
-                port_filter = vif_details[portbindings.CAP_PORT_FILTER]
-                self.assertEqual(expected, port_filter)
-                return
-            self.assertEqual(self.VIF_DETAILS, port[portbindings.VIF_DETAILS])
-
-    def _check_response_no_portbindings(self, port):
-        self.assertIn('status', port)
-        self.assertNotIn(portbindings.VIF_TYPE, port)
-        self.assertNotIn(portbindings.VIF_DETAILS, port)
-
-    def _get_non_admin_context(self):
-        return context.Context(user_id=None,
-                               tenant_id=self._tenant_id,
-                               is_admin=False)
-
-    def test_port_vif_details(self):
-        with self.port(name='name') as port:
-            port_id = port['port']['id']
-            # Check a response of create_port
-            self._check_response_portbindings(port['port'])
-            # Check a response of get_port
-            ctx = context.get_admin_context()
-            port = self._show('ports', port_id, neutron_context=ctx)['port']
-            self._check_response_portbindings(port)
-            # By default user is admin - now test non admin user
-            ctx = self._get_non_admin_context()
-            non_admin_port = self._show(
-                'ports', port_id, neutron_context=ctx)['port']
-            self._check_response_no_portbindings(non_admin_port)
-
-    def test_ports_vif_details(self):
-        plugin = manager.NeutronManager.get_plugin()
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        with self.port(), self.port():
-            ctx = context.get_admin_context()
-            ports = plugin.get_ports(ctx)
-            self.assertEqual(len(ports), 2)
-            for port in ports:
-                self._check_response_portbindings(port)
-            # By default user is admin - now test non admin user
-            ctx = self._get_non_admin_context()
-            ports = self._list('ports', neutron_context=ctx)['ports']
-            self.assertEqual(len(ports), 2)
-            for non_admin_port in ports:
-                self._check_response_no_portbindings(non_admin_port)
-
-    def _check_port_binding_profile(self, port, profile=None):
-        # For plugins which does not use binding:profile attr
-        # we just check an operation for the port succeed.
-        self.assertIn('id', port)
-
-    def _test_create_port_binding_profile(self, profile):
-        profile_arg = {portbindings.PROFILE: profile}
-        with self.port(arg_list=(portbindings.PROFILE,),
-                       **profile_arg) as port:
-            port_id = port['port']['id']
-            self._check_port_binding_profile(port['port'], profile)
-            port = self._show('ports', port_id)
-            self._check_port_binding_profile(port['port'], profile)
-
-    def test_create_port_binding_profile_none(self):
-        self._test_create_port_binding_profile(None)
-
-    def test_create_port_binding_profile_with_empty_dict(self):
-        self._test_create_port_binding_profile({})
-
-    def _test_update_port_binding_profile(self, profile):
-        profile_arg = {portbindings.PROFILE: profile}
-        with self.port() as port:
-            self._check_port_binding_profile(port['port'])
-            port_id = port['port']['id']
-            ctx = context.get_admin_context()
-            port = self._update('ports', port_id, {'port': profile_arg},
-                                neutron_context=ctx)['port']
-            self._check_port_binding_profile(port, profile)
-            port = self._show('ports', port_id)['port']
-            self._check_port_binding_profile(port, profile)
-
-    def test_update_port_binding_profile_none(self):
-        self._test_update_port_binding_profile(None)
-
-    def test_update_port_binding_profile_with_empty_dict(self):
-        self._test_update_port_binding_profile({})
-
-    def test_port_create_portinfo_non_admin(self):
-        profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}}
-        with self.network(set_context=True, tenant_id='test') as net1:
-            with self.subnet(network=net1) as subnet1:
-                # succeed without binding:profile
-                with self.port(subnet=subnet1,
-                               set_context=True, tenant_id='test'):
-                    pass
-                # fail with binding:profile
-                try:
-                    with self.port(subnet=subnet1,
-                                   expected_res_status=403,
-                                   arg_list=(portbindings.PROFILE,),
-                                   set_context=True, tenant_id='test',
-                                   **profile_arg):
-                        pass
-                except exc.HTTPClientError:
-                    pass
-
-    def test_port_update_portinfo_non_admin(self):
-        profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}}
-        with self.network() as net1:
-            with self.subnet(network=net1) as subnet1:
-                with self.port(subnet=subnet1) as port:
-                    # By default user is admin - now test non admin user
-                    port_id = port['port']['id']
-                    ctx = self._get_non_admin_context()
-                    port = self._update('ports', port_id,
-                                        {'port': profile_arg},
-                                        expected_code=exc.HTTPForbidden.code,
-                                        neutron_context=ctx)
-
-
-class PortBindingsHostTestCaseMixin(object):
-    fmt = 'json'
-    hostname = 'testhost'
-
-    def _check_response_portbindings_host(self, port):
-        self.assertEqual(port[portbindings.HOST_ID], self.hostname)
-
-    def _check_response_no_portbindings_host(self, port):
-        self.assertIn('status', port)
-        self.assertNotIn(portbindings.HOST_ID, port)
-
-    def test_port_vif_non_admin(self):
-        with self.network(set_context=True,
-                          tenant_id='test') as net1:
-            with self.subnet(network=net1) as subnet1:
-                host_arg = {portbindings.HOST_ID: self.hostname}
-                try:
-                    with self.port(subnet=subnet1,
-                                   expected_res_status=403,
-                                   arg_list=(portbindings.HOST_ID,),
-                                   set_context=True,
-                                   tenant_id='test',
-                                   **host_arg):
-                        pass
-                except exc.HTTPClientError:
-                    pass
-
-    def test_port_vif_host(self):
-        host_arg = {portbindings.HOST_ID: self.hostname}
-        with self.port(name='name', arg_list=(portbindings.HOST_ID,),
-                       **host_arg) as port:
-            port_id = port['port']['id']
-            # Check a response of create_port
-            self._check_response_portbindings_host(port['port'])
-            # Check a response of get_port
-            ctx = context.get_admin_context()
-            port = self._show('ports', port_id, neutron_context=ctx)['port']
-            self._check_response_portbindings_host(port)
-            # By default user is admin - now test non admin user
-            ctx = context.Context(user_id=None,
-                                  tenant_id=self._tenant_id,
-                                  is_admin=False)
-            non_admin_port = self._show(
-                'ports', port_id, neutron_context=ctx)['port']
-            self._check_response_no_portbindings_host(non_admin_port)
-
-    def test_ports_vif_host(self):
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        host_arg = {portbindings.HOST_ID: self.hostname}
-        with self.port(name='name1',
-                       arg_list=(portbindings.HOST_ID,),
-                       **host_arg), self.port(name='name2'):
-            ctx = context.get_admin_context()
-            ports = self._list('ports', neutron_context=ctx)['ports']
-            self.assertEqual(2, len(ports))
-            for port in ports:
-                if port['name'] == 'name1':
-                    self._check_response_portbindings_host(port)
-                else:
-                    self.assertFalse(port[portbindings.HOST_ID])
-            # By default user is admin - now test non admin user
-            ctx = context.Context(user_id=None,
-                                  tenant_id=self._tenant_id,
-                                  is_admin=False)
-            ports = self._list('ports', neutron_context=ctx)['ports']
-            self.assertEqual(2, len(ports))
-            for non_admin_port in ports:
-                self._check_response_no_portbindings_host(non_admin_port)
-
-    def test_ports_vif_host_update(self):
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        host_arg = {portbindings.HOST_ID: self.hostname}
-        with self.port(name='name1', arg_list=(portbindings.HOST_ID,),
-                       **host_arg) as port1, self.port(name='name2') as port2:
-            data = {'port': {portbindings.HOST_ID: 'testhosttemp'}}
-            req = self.new_update_request('ports', data, port1['port']['id'])
-            req.get_response(self.api)
-            req = self.new_update_request('ports', data, port2['port']['id'])
-            ctx = context.get_admin_context()
-            req.get_response(self.api)
-            ports = self._list('ports', neutron_context=ctx)['ports']
-        self.assertEqual(2, len(ports))
-        for port in ports:
-            self.assertEqual('testhosttemp', port[portbindings.HOST_ID])
-
-    def test_ports_vif_non_host_update(self):
-        host_arg = {portbindings.HOST_ID: self.hostname}
-        with self.port(name='name', arg_list=(portbindings.HOST_ID,),
-                       **host_arg) as port:
-            data = {'port': {'admin_state_up': False}}
-            req = self.new_update_request('ports', data, port['port']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(port['port'][portbindings.HOST_ID],
-                             res['port'][portbindings.HOST_ID])
-
-    def test_ports_vif_non_host_update_when_host_null(self):
-        with self.port() as port:
-            data = {'port': {'admin_state_up': False}}
-            req = self.new_update_request('ports', data, port['port']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(port['port'][portbindings.HOST_ID],
-                             res['port'][portbindings.HOST_ID])
-
-    def test_ports_vif_host_list(self):
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        host_arg = {portbindings.HOST_ID: self.hostname}
-        with self.port(name='name1',
-                       arg_list=(portbindings.HOST_ID,),
-                       **host_arg) as port1,\
-                self.port(name='name2'),\
-                self.port(name='name3',
-                          arg_list=(portbindings.HOST_ID,),
-                          **host_arg) as port3:
-            self._test_list_resources(
-                'port', (port1, port3),
-                query_params='%s=%s' % (portbindings.HOST_ID, self.hostname))
-
-
-class PortBindingsVnicTestCaseMixin(object):
-    fmt = 'json'
-    vnic_type = portbindings.VNIC_NORMAL
-
-    def _check_response_portbindings_vnic_type(self, port):
-        self.assertIn('status', port)
-        self.assertEqual(port[portbindings.VNIC_TYPE], self.vnic_type)
-
-    def test_port_vnic_type_non_admin(self):
-        with self.network(set_context=True,
-                          tenant_id='test') as net1:
-            with self.subnet(network=net1) as subnet1:
-                vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
-                with self.port(subnet=subnet1,
-                               expected_res_status=httplib.CREATED,
-                               arg_list=(portbindings.VNIC_TYPE,),
-                               set_context=True,
-                               tenant_id='test',
-                               **vnic_arg) as port:
-                    # Check a response of create_port
-                    self._check_response_portbindings_vnic_type(port['port'])
-
-    def test_port_vnic_type(self):
-        vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
-        with self.port(name='name', arg_list=(portbindings.VNIC_TYPE,),
-                       **vnic_arg) as port:
-            port_id = port['port']['id']
-            # Check a response of create_port
-            self._check_response_portbindings_vnic_type(port['port'])
-            # Check a response of get_port
-            ctx = context.get_admin_context()
-            port = self._show('ports', port_id, neutron_context=ctx)['port']
-            self._check_response_portbindings_vnic_type(port)
-            # By default user is admin - now test non admin user
-            ctx = context.Context(user_id=None,
-                                  tenant_id=self._tenant_id,
-                                  is_admin=False)
-            non_admin_port = self._show(
-                'ports', port_id, neutron_context=ctx)['port']
-            self._check_response_portbindings_vnic_type(non_admin_port)
-
-    def test_ports_vnic_type(self):
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
-        with self.port(name='name1', arg_list=(portbindings.VNIC_TYPE,),
-                       **vnic_arg), self.port(name='name2'):
-            ctx = context.get_admin_context()
-            ports = self._list('ports', neutron_context=ctx)['ports']
-            self.assertEqual(2, len(ports))
-            for port in ports:
-                if port['name'] == 'name1':
-                    self._check_response_portbindings_vnic_type(port)
-                else:
-                    self.assertEqual(portbindings.VNIC_NORMAL,
-                                     port[portbindings.VNIC_TYPE])
-            # By default user is admin - now test non admin user
-            ctx = context.Context(user_id=None,
-                                  tenant_id=self._tenant_id,
-                                  is_admin=False)
-            ports = self._list('ports', neutron_context=ctx)['ports']
-            self.assertEqual(2, len(ports))
-            for non_admin_port in ports:
-                self._check_response_portbindings_vnic_type(non_admin_port)
-
-    def test_ports_vnic_type_list(self):
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
-        with self.port(name='name1',
-                       arg_list=(portbindings.VNIC_TYPE,),
-                       **vnic_arg) as port1,\
-                self.port(name='name2') as port2,\
-                self.port(name='name3',
-                          arg_list=(portbindings.VNIC_TYPE,),
-                          **vnic_arg) as port3:
-            self._test_list_resources(
-                'port', (port1, port2, port3),
-                query_params='%s=%s' % (portbindings.VNIC_TYPE,
-                                        self.vnic_type))
diff --git a/neutron/tests/unit/agent/__init__.py b/neutron/tests/unit/agent/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/agent/common/__init__.py b/neutron/tests/unit/agent/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/agent/common/test_config.py b/neutron/tests/unit/agent/common/test_config.py
deleted file mode 100644 (file)
index b5a580e..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.common import config
-from neutron.tests import base
-
-
-def test_setup_conf():
-    conf = config.setup_conf()
-    assert conf.state_path.endswith('/var/lib/neutron')
-
-
-class TestRootHelper(base.BaseTestCase):
-
-    def test_agent_root_helper(self):
-        conf = config.setup_conf()
-        config.register_root_helper(conf)
-        conf.set_override('root_helper', 'my_root_helper', 'AGENT')
-        self.assertEqual(config.get_root_helper(conf), 'my_root_helper')
-
-    def test_root_default(self):
-        conf = config.setup_conf()
-        config.register_root_helper(conf)
-        self.assertEqual(config.get_root_helper(conf), 'sudo')
-
-    def test_agent_root_helper_daemon(self):
-        conf = config.setup_conf()
-        config.register_root_helper(conf)
-        rhd = 'my_root_helper_daemon'
-        conf.set_override('root_helper_daemon', rhd, 'AGENT')
-        self.assertEqual(rhd, conf.AGENT.root_helper_daemon)
diff --git a/neutron/tests/unit/agent/common/test_ovs_lib.py b/neutron/tests/unit/agent/common/test_ovs_lib.py
deleted file mode 100644 (file)
index 88a04e7..0000000
+++ /dev/null
@@ -1,907 +0,0 @@
-# Copyright 2012, VMware, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import mock
-from oslo_serialization import jsonutils
-from oslo_utils import uuidutils
-import testtools
-
-from neutron.agent.common import ovs_lib
-from neutron.agent.common import utils
-from neutron.common import exceptions
-from neutron.plugins.common import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent.common \
-    import constants as p_const
-from neutron.tests import base
-from neutron.tests import tools
-
-
-OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
-
-# some test data for get_vif_port_to_ofport_map that exhibited bug 1444269
-OVSLIST_WITH_UNSET_PORT = (
-    '{"data":[["patch-tun",["map",[]],1],["tap2ab72a72-44",["map",[["attached-'
-    'mac","fa:16:3e:b0:f8:38"],["iface-id","2ab72a72-4407-4ef3-806a-b2172f3e4d'
-    'c7"],["iface-status","active"]]],2],["tap6b108774-15",["map",[["attached-'
-    'mac","fa:16:3e:02:f5:91"],["iface-id","6b108774-1559-45e9-a7c3-b714f11722'
-    'cf"],["iface-status","active"]]],["set",[]]]],"headings":["name","externa'
-    'l_ids","ofport"]}')
-
-
-class OFCTLParamListMatcher(object):
-
-    def _parse(self, params):
-        actions_pos = params.find('actions')
-        return set(params[:actions_pos].split(',')), params[actions_pos:]
-
-    def __init__(self, params):
-        self.expected = self._parse(params)
-
-    def __eq__(self, other):
-        return self.expected == self._parse(other)
-
-    def __str__(self):
-        return 'ovs-ofctl parameters: %s, "%s"' % self.expected
-
-    __repr__ = __str__
-
-
-class OVS_Lib_Test(base.BaseTestCase):
-    """A test suite to exercise the OVS libraries shared by Neutron agents.
-
-    Note: these tests do not actually execute ovs-* utilities, and thus
-    can run on any system.  That does, however, limit their scope.
-    """
-
-    def setUp(self):
-        super(OVS_Lib_Test, self).setUp()
-        self.BR_NAME = "br-int"
-
-        self.br = ovs_lib.OVSBridge(self.BR_NAME)
-        self.execute = mock.patch.object(
-            utils, "execute", spec=utils.execute).start()
-
-    @property
-    def TO(self):
-        return "--timeout=%s" % self.br.vsctl_timeout
-
-    def _vsctl_args(self, *args):
-        cmd = ['ovs-vsctl', self.TO, '--oneline', '--format=json', '--']
-        cmd += args
-        return cmd
-
-    def _vsctl_mock(self, *args):
-        cmd = self._vsctl_args(*args)
-        return mock.call(cmd, run_as_root=True, log_fail_as_error=False)
-
-    def _verify_vsctl_mock(self, *args):
-        cmd = self._vsctl_args(*args)
-        self.execute.assert_called_once_with(cmd, run_as_root=True,
-                                             log_fail_as_error=False)
-
-    def test_vifport(self):
-        """Create and stringify vif port, confirm no exceptions."""
-
-        pname = "vif1.0"
-        ofport = 5
-        vif_id = uuidutils.generate_uuid()
-        mac = "ca:fe:de:ad:be:ef"
-
-        # test __init__
-        port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
-        self.assertEqual(port.port_name, pname)
-        self.assertEqual(port.ofport, ofport)
-        self.assertEqual(port.vif_id, vif_id)
-        self.assertEqual(port.vif_mac, mac)
-        self.assertEqual(port.switch.br_name, self.BR_NAME)
-
-        # test __str__
-        str(port)
-
-    def _build_timeout_opt(self, exp_timeout):
-        return "--timeout=%d" % exp_timeout if exp_timeout else self.TO
-
-    def test_add_flow(self):
-        ofport = "99"
-        vid = 4000
-        lsw_id = 18
-        cidr = '192.168.1.0/24'
-
-        flow_dict_1 = collections.OrderedDict([
-            ('cookie', 1234),
-            ('priority', 2),
-            ('dl_src', 'ca:fe:de:ad:be:ef'),
-            ('actions', 'strip_vlan,output:0')])
-        flow_dict_2 = collections.OrderedDict([
-            ('cookie', 1254),
-            ('priority', 1),
-            ('actions', 'normal')])
-        flow_dict_3 = collections.OrderedDict([
-            ('cookie', 1257),
-            ('priority', 2),
-            ('actions', 'drop')])
-        flow_dict_4 = collections.OrderedDict([
-            ('cookie', 1274),
-            ('priority', 2),
-            ('in_port', ofport),
-            ('actions', 'drop')])
-        flow_dict_5 = collections.OrderedDict([
-            ('cookie', 1284),
-            ('priority', 4),
-            ('in_port', ofport),
-            ('dl_vlan', vid),
-            ('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))])
-        flow_dict_6 = collections.OrderedDict([
-            ('cookie', 1754),
-            ('priority', 3),
-            ('tun_id', lsw_id),
-            ('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))])
-        flow_dict_7 = collections.OrderedDict([
-            ('cookie', 1256),
-            ('priority', 4),
-            ('nw_src', cidr),
-            ('proto', 'arp'),
-            ('actions', 'drop')])
-
-        self.br.add_flow(**flow_dict_1)
-        self.br.add_flow(**flow_dict_2)
-        self.br.add_flow(**flow_dict_3)
-        self.br.add_flow(**flow_dict_4)
-        self.br.add_flow(**flow_dict_5)
-        self.br.add_flow(**flow_dict_6)
-        self.br.add_flow(**flow_dict_7)
-        expected_calls = [
-            self._ofctl_mock("add-flows", self.BR_NAME, '-',
-                             process_input=OFCTLParamListMatcher(
-                                 "hard_timeout=0,idle_timeout=0,cookie=1234,"
-                                 "priority=2,dl_src=ca:fe:de:ad:be:ef,"
-                                 "actions=strip_vlan,output:0")),
-            self._ofctl_mock("add-flows", self.BR_NAME, '-',
-                             process_input=OFCTLParamListMatcher(
-                                 "hard_timeout=0,idle_timeout=0,cookie=1254,"
-                                 "priority=1,actions=normal")),
-            self._ofctl_mock("add-flows", self.BR_NAME, '-',
-                             process_input=OFCTLParamListMatcher(
-                                 "hard_timeout=0,idle_timeout=0,cookie=1257,"
-                                 "priority=2,actions=drop")),
-            self._ofctl_mock("add-flows", self.BR_NAME, '-',
-                             process_input=OFCTLParamListMatcher(
-                                 "hard_timeout=0,idle_timeout=0,cookie=1274,"
-                                 "priority=2,in_port=%s,actions=drop" % ofport
-                             )),
-            self._ofctl_mock("add-flows", self.BR_NAME, '-',
-                             process_input=OFCTLParamListMatcher(
-                                 "hard_timeout=0,idle_timeout=0,cookie=1284,"
-                                 "priority=4,dl_vlan=%s,in_port=%s,"
-                                 "actions=strip_vlan,set_tunnel:%s,normal" %
-                                 (vid, ofport, lsw_id))),
-            self._ofctl_mock("add-flows", self.BR_NAME, '-',
-                             process_input=OFCTLParamListMatcher(
-                                 "hard_timeout=0,idle_timeout=0,cookie=1754,"
-                                 "priority=3,"
-                                 "tun_id=%s,actions=mod_vlan_vid:%s,output:%s"
-                                 % (lsw_id, vid, ofport))),
-            self._ofctl_mock("add-flows", self.BR_NAME, '-',
-                             process_input=OFCTLParamListMatcher(
-                                 "hard_timeout=0,idle_timeout=0,cookie=1256,"
-                                 "priority=4,nw_src=%s,arp,actions=drop"
-                                 % cidr)),
-        ]
-        self.execute.assert_has_calls(expected_calls)
-
-    def _ofctl_args(self, cmd, *args):
-        cmd = ['ovs-ofctl', cmd]
-        cmd += args
-        return cmd
-
-    def _ofctl_mock(self, cmd, *args, **kwargs):
-        cmd = self._ofctl_args(cmd, *args)
-        return mock.call(cmd, run_as_root=True, **kwargs)
-
-    def _verify_ofctl_mock(self, cmd, *args, **kwargs):
-        cmd = self._ofctl_args(cmd, *args)
-        return self.execute.assert_called_once_with(cmd, run_as_root=True,
-                                                    **kwargs)
-
-    def test_add_flow_timeout_set(self):
-        flow_dict = collections.OrderedDict([
-            ('cookie', 1234),
-            ('priority', 1),
-            ('hard_timeout', 1000),
-            ('idle_timeout', 2000),
-            ('actions', 'normal')])
-
-        self.br.add_flow(**flow_dict)
-        self._verify_ofctl_mock(
-            "add-flows", self.BR_NAME, '-',
-            process_input="hard_timeout=1000,idle_timeout=2000,"
-                          "priority=1,cookie=1234,actions=normal")
-
-    def test_add_flow_default_priority(self):
-        flow_dict = collections.OrderedDict([('actions', 'normal'),
-                                             ('cookie', 1234)])
-
-        self.br.add_flow(**flow_dict)
-        self._verify_ofctl_mock(
-            "add-flows", self.BR_NAME, '-',
-            process_input="hard_timeout=0,idle_timeout=0,priority=1,"
-                          "cookie=1234,actions=normal")
-
-    def _test_get_port_ofport(self, ofport, expected_result):
-        pname = "tap99"
-        self.br.vsctl_timeout = 0  # Don't waste precious time retrying
-        self.execute.return_value = self._encode_ovs_json(
-            ['ofport'], [[ofport]])
-        self.assertEqual(self.br.get_port_ofport(pname), expected_result)
-        self._verify_vsctl_mock("--columns=ofport", "list", "Interface", pname)
-
-    def test_get_port_ofport_succeeds_for_valid_ofport(self):
-        self._test_get_port_ofport(6, 6)
-
-    def test_get_port_ofport_returns_invalid_ofport_for_non_int(self):
-        self._test_get_port_ofport([], ovs_lib.INVALID_OFPORT)
-
-    def test_get_port_ofport_returns_invalid_for_invalid(self):
-        self._test_get_port_ofport(ovs_lib.INVALID_OFPORT,
-                                   ovs_lib.INVALID_OFPORT)
-
-    def test_default_datapath(self):
-        # verify kernel datapath is default
-        expected = p_const.OVS_DATAPATH_SYSTEM
-        self.assertEqual(expected, self.br.datapath_type)
-
-    def test_non_default_datapath(self):
-        expected = p_const.OVS_DATAPATH_NETDEV
-        self.br = ovs_lib.OVSBridge(self.BR_NAME, datapath_type=expected)
-        self.assertEqual(expected, self.br.datapath_type)
-
-    def test_count_flows(self):
-        self.execute.return_value = 'ignore\nflow-1\n'
-        # counts the number of flows as total lines of output - 2
-        self.assertEqual(self.br.count_flows(), 1)
-        self._verify_ofctl_mock("dump-flows", self.BR_NAME, process_input=None)
-
-    def test_delete_flow(self):
-        ofport = "5"
-        lsw_id = 40
-        vid = 39
-        self.br.delete_flows(in_port=ofport)
-        self.br.delete_flows(tun_id=lsw_id)
-        self.br.delete_flows(dl_vlan=vid)
-        expected_calls = [
-            self._ofctl_mock("del-flows", self.BR_NAME, '-',
-                             process_input="in_port=" + ofport),
-            self._ofctl_mock("del-flows", self.BR_NAME, '-',
-                             process_input="tun_id=%s" % lsw_id),
-            self._ofctl_mock("del-flows", self.BR_NAME, '-',
-                             process_input="dl_vlan=%s" % vid),
-        ]
-        self.execute.assert_has_calls(expected_calls)
-
-    def test_delete_flow_with_priority_set(self):
-        params = {'in_port': '1',
-                  'priority': '1'}
-
-        self.assertRaises(exceptions.InvalidInput,
-                          self.br.delete_flows,
-                          **params)
-
-    def test_dump_flows(self):
-        table = 23
-        nxst_flow = "NXST_FLOW reply (xid=0x4):"
-        flows = "\n".join([" cookie=0x0, duration=18042.514s, table=0, "
-                           "n_packets=6, n_bytes=468, "
-                           "priority=2,in_port=1 actions=drop",
-                           " cookie=0x0, duration=18027.562s, table=0, "
-                           "n_packets=0, n_bytes=0, "
-                           "priority=3,in_port=1,dl_vlan=100 "
-                           "actions=mod_vlan_vid:1,NORMAL",
-                           " cookie=0x0, duration=18044.351s, table=0, "
-                           "n_packets=9, n_bytes=594, priority=1 "
-                           "actions=NORMAL", " cookie=0x0, "
-                           "duration=18044.211s, table=23, n_packets=0, "
-                           "n_bytes=0, priority=0 actions=drop"])
-        flow_args = '\n'.join([nxst_flow, flows])
-        run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
-        run_ofctl.side_effect = [flow_args]
-        retflows = self.br.dump_flows_for_table(table)
-        self.assertEqual(flows, retflows)
-
-    def test_dump_flows_ovs_dead(self):
-        table = 23
-        run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
-        run_ofctl.side_effect = ['']
-        retflows = self.br.dump_flows_for_table(table)
-        self.assertIsNone(retflows)
-
-    def test_mod_flow_with_priority_set(self):
-        params = {'in_port': '1',
-                  'priority': '1'}
-
-        self.assertRaises(exceptions.InvalidInput,
-                          self.br.mod_flow,
-                          **params)
-
-    def test_mod_flow_no_actions_set(self):
-        params = {'in_port': '1'}
-
-        self.assertRaises(exceptions.InvalidInput,
-                          self.br.mod_flow,
-                          **params)
-
-    def test_add_tunnel_port(self):
-        pname = "tap99"
-        local_ip = "1.1.1.1"
-        remote_ip = "9.9.9.9"
-        ofport = 6
-        command = ["--may-exist", "add-port",
-                   self.BR_NAME, pname]
-        command.extend(["--", "set", "Interface", pname])
-        command.extend(["type=gre", "options:df_default=true",
-                        "options:remote_ip=" + remote_ip,
-                        "options:local_ip=" + local_ip,
-                        "options:in_key=flow",
-                        "options:out_key=flow"])
-        # Each element is a tuple of (expected mock call, return_value)
-        expected_calls_and_values = [
-            (self._vsctl_mock(*command), None),
-            (self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
-             self._encode_ovs_json(['ofport'], [[ofport]])),
-        ]
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        self.assertEqual(
-            self.br.add_tunnel_port(pname, remote_ip, local_ip),
-            ofport)
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_add_vxlan_fragmented_tunnel_port(self):
-        pname = "tap99"
-        local_ip = "1.1.1.1"
-        remote_ip = "9.9.9.9"
-        ofport = 6
-        vxlan_udp_port = "9999"
-        dont_fragment = False
-        command = ["--may-exist", "add-port", self.BR_NAME, pname]
-        command.extend(["--", "set", "Interface", pname])
-        command.extend(["type=" + constants.TYPE_VXLAN,
-                        "options:dst_port=" + vxlan_udp_port,
-                        "options:df_default=false",
-                        "options:remote_ip=" + remote_ip,
-                        "options:local_ip=" + local_ip,
-                        "options:in_key=flow",
-                        "options:out_key=flow"])
-        # Each element is a tuple of (expected mock call, return_value)
-        expected_calls_and_values = [
-            (self._vsctl_mock(*command), None),
-            (self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
-             self._encode_ovs_json(['ofport'], [[ofport]])),
-        ]
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        self.assertEqual(
-            self.br.add_tunnel_port(pname, remote_ip, local_ip,
-                                    constants.TYPE_VXLAN, vxlan_udp_port,
-                                    dont_fragment),
-            ofport)
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_add_vxlan_csum_tunnel_port(self):
-        pname = "tap99"
-        local_ip = "1.1.1.1"
-        remote_ip = "9.9.9.9"
-        ofport = 6
-        vxlan_udp_port = "9999"
-        dont_fragment = True
-        tunnel_csum = True
-        command = ["--may-exist", "add-port", self.BR_NAME, pname]
-        command.extend(["--", "set", "Interface", pname])
-        command.extend(["type=" + constants.TYPE_VXLAN,
-                        "options:dst_port=" + vxlan_udp_port,
-                        "options:df_default=true",
-                        "options:remote_ip=" + remote_ip,
-                        "options:local_ip=" + local_ip,
-                        "options:in_key=flow",
-                        "options:out_key=flow",
-                        "options:csum=true"])
-        # Each element is a tuple of (expected mock call, return_value)
-        expected_calls_and_values = [
-            (self._vsctl_mock(*command), None),
-            (self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
-             self._encode_ovs_json(['ofport'], [[ofport]])),
-        ]
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        self.assertEqual(
-            self.br.add_tunnel_port(pname, remote_ip, local_ip,
-                                    constants.TYPE_VXLAN, vxlan_udp_port,
-                                    dont_fragment, tunnel_csum),
-            ofport)
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def _test_get_vif_ports(self, is_xen=False):
-        pname = "tap99"
-        ofport = 6
-        vif_id = uuidutils.generate_uuid()
-        mac = "ca:fe:de:ad:be:ef"
-        id_field = 'xs-vif-uuid' if is_xen else 'iface-id'
-        external_ids = {"attached-mac": mac, id_field: vif_id}
-        self.br.get_ports_attributes = mock.Mock(return_value=[{
-            'name': pname, 'ofport': ofport, 'external_ids': external_ids}])
-        self.br.get_xapi_iface_id = mock.Mock(return_value=vif_id)
-
-        ports = self.br.get_vif_ports()
-        self.assertEqual(1, len(ports))
-        self.assertEqual(ports[0].port_name, pname)
-        self.assertEqual(ports[0].ofport, ofport)
-        self.assertEqual(ports[0].vif_id, vif_id)
-        self.assertEqual(ports[0].vif_mac, mac)
-        self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
-        self.br.get_ports_attributes.assert_called_once_with(
-            'Interface',
-            columns=['name', 'external_ids', 'ofport'],
-            if_exists=True)
-
-    def _encode_ovs_json(self, headings, data):
-        # See man ovs-vsctl(8) for the encoding details.
-        r = {"data": [],
-             "headings": headings}
-        for row in data:
-            ovs_row = []
-            r["data"].append(ovs_row)
-            for cell in row:
-                if isinstance(cell, (str, int, list)):
-                    ovs_row.append(cell)
-                elif isinstance(cell, dict):
-                    ovs_row.append(["map", cell.items()])
-                elif isinstance(cell, set):
-                    ovs_row.append(["set", cell])
-                else:
-                    raise TypeError('%r not int, str, list, set or dict' %
-                                    type(cell))
-        return jsonutils.dumps(r)
-
-    def _test_get_vif_port_set(self, is_xen):
-        if is_xen:
-            id_key = 'xs-vif-uuid'
-        else:
-            id_key = 'iface-id'
-
-        headings = ['name', 'external_ids', 'ofport']
-        data = [
-            # A vif port on this bridge:
-            ['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1],
-            # A vif port on this bridge not yet configured
-            ['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []],
-            # Another vif port on this bridge not yet configured
-            ['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'},
-             ['set', []]],
-
-            # Non-vif port on this bridge:
-            ['bogus', {}, 2],
-        ]
-
-        # Each element is a tuple of (expected mock call, return_value)
-        expected_calls_and_values = [
-            (self._vsctl_mock("list-ports", self.BR_NAME), 'tap99\\ntun22'),
-            (self._vsctl_mock("--if-exists",
-                              "--columns=name,external_ids,ofport",
-                              "list", "Interface", 'tap99', 'tun22'),
-             self._encode_ovs_json(headings, data)),
-        ]
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        if is_xen:
-            get_xapi_iface_id = mock.patch.object(self.br,
-                                                  'get_xapi_iface_id').start()
-            get_xapi_iface_id.return_value = 'tap99id'
-
-        port_set = self.br.get_vif_port_set()
-        self.assertEqual(set(['tap99id']), port_set)
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-        if is_xen:
-            get_xapi_iface_id.assert_called_once_with('tap99id')
-
-    def test_get_vif_port_to_ofport_map(self):
-        self.execute.return_value = OVSLIST_WITH_UNSET_PORT
-        results = self.br.get_vif_port_to_ofport_map()
-        expected = {'2ab72a72-4407-4ef3-806a-b2172f3e4dc7': 2, 'patch-tun': 1}
-        self.assertEqual(expected, results)
-
-    def test_get_vif_ports_nonxen(self):
-        self._test_get_vif_ports(is_xen=False)
-
-    def test_get_vif_ports_xen(self):
-        self._test_get_vif_ports(is_xen=True)
-
-    def test_get_vif_port_set_nonxen(self):
-        self._test_get_vif_port_set(False)
-
-    def test_get_vif_port_set_xen(self):
-        self._test_get_vif_port_set(True)
-
-    def test_get_vif_ports_list_ports_error(self):
-        expected_calls_and_values = [
-            (self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
-        ]
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-        self.assertRaises(RuntimeError, self.br.get_vif_ports)
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_get_vif_port_set_list_ports_error(self):
-        expected_calls_and_values = [
-            (self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
-        ]
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-        self.assertRaises(RuntimeError, self.br.get_vif_port_set)
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_get_vif_port_set_list_interface_error(self):
-        expected_calls_and_values = [
-            (self._vsctl_mock("list-ports", self.BR_NAME), 'tap99\n'),
-            (self._vsctl_mock("--if-exists",
-                              "--columns=name,external_ids,ofport",
-                              "list", "Interface", "tap99"), RuntimeError()),
-        ]
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-        self.assertRaises(RuntimeError, self.br.get_vif_port_set)
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_get_port_tag_dict(self):
-        headings = ['name', 'tag']
-        data = [
-            ['int-br-eth2', set()],
-            ['patch-tun', set()],
-            ['qr-76d9e6b6-21', 1],
-            ['tapce5318ff-78', 1],
-            ['tape1400310-e6', 1],
-        ]
-
-        # Each element is a tuple of (expected mock call, return_value)
-        expected_calls_and_values = [
-            (self._vsctl_mock("list-ports", self.BR_NAME),
-             '\\n'.join((iface for iface, tag in data))),
-            (self._vsctl_mock("--columns=name,tag", "list", "Port"),
-             self._encode_ovs_json(headings, data)),
-        ]
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        port_tags = self.br.get_port_tag_dict()
-        self.assertEqual(
-            port_tags,
-            {u'int-br-eth2': [],
-             u'patch-tun': [],
-             u'qr-76d9e6b6-21': 1,
-             u'tapce5318ff-78': 1,
-             u'tape1400310-e6': 1}
-        )
-
-    def test_clear_db_attribute(self):
-        pname = "tap77"
-        self.br.clear_db_attribute("Port", pname, "tag")
-        self._verify_vsctl_mock("clear", "Port", pname, "tag")
-
-    def _test_iface_to_br(self, exp_timeout=None):
-        iface = 'tap0'
-        br = 'br-int'
-        if exp_timeout:
-            self.br.vsctl_timeout = exp_timeout
-        self.execute.return_value = 'br-int'
-        self.assertEqual(self.br.get_bridge_for_iface(iface), br)
-        self._verify_vsctl_mock("iface-to-br", iface)
-
-    def test_iface_to_br(self):
-        self._test_iface_to_br()
-
-    def test_iface_to_br_non_default_timeout(self):
-        new_timeout = 5
-        self._test_iface_to_br(new_timeout)
-
-    def test_iface_to_br_handles_ovs_vsctl_exception(self):
-        iface = 'tap0'
-        self.execute.side_effect = Exception
-
-        self.assertIsNone(self.br.get_bridge_for_iface(iface))
-        self._verify_vsctl_mock("iface-to-br", iface)
-
-    def test_delete_all_ports(self):
-        with mock.patch.object(self.br, 'get_port_name_list',
-                               return_value=['port1']) as get_port:
-            with mock.patch.object(self.br, 'delete_port') as delete_port:
-                self.br.delete_ports(all_ports=True)
-        get_port.assert_called_once_with()
-        delete_port.assert_called_once_with('port1')
-
-    def test_delete_neutron_ports(self):
-        port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
-                                'ca:fe:de:ad:be:ef', 'br')
-        port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
-                                'ca:ee:de:ad:be:ef', 'br')
-        with mock.patch.object(self.br, 'get_vif_ports',
-                               return_value=[port1, port2]) as get_ports:
-            with mock.patch.object(self.br, 'delete_port') as delete_port:
-                self.br.delete_ports(all_ports=False)
-        get_ports.assert_called_once_with()
-        delete_port.assert_has_calls([
-            mock.call('tap1234'),
-            mock.call('tap5678')
-        ])
-
-    def test_delete_neutron_ports_list_error(self):
-        expected_calls_and_values = [
-            (self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
-        ]
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-        self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False)
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_get_bridges_not_default_timeout(self):
-        bridges = ['br-int', 'br-ex']
-        self.br.vsctl_timeout = 5
-        self.execute.return_value = 'br-int\\nbr-ex\n'
-        self.assertEqual(self.br.get_bridges(), bridges)
-        self._verify_vsctl_mock("list-br")
-
-    def test_get_local_port_mac_succeeds(self):
-        with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
-                        return_value=mock.Mock(address='foo')):
-            self.assertEqual('foo', self.br.get_local_port_mac())
-
-    def test_get_local_port_mac_raises_exception_for_missing_mac(self):
-        with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
-                        return_value=mock.Mock(address=None)):
-            with testtools.ExpectedException(Exception):
-                self.br.get_local_port_mac()
-
-    def test_get_vifs_by_ids(self):
-        db_list_res = [
-            {'name': 'qvo1', 'ofport': 1,
-             'external_ids': {'iface-id': 'pid1', 'attached-mac': '11'}},
-            {'name': 'qvo2', 'ofport': 2,
-             'external_ids': {'iface-id': 'pid2', 'attached-mac': '22'}},
-            {'name': 'qvo4', 'ofport': -1,
-             'external_ids': {'iface-id': 'pid4', 'attached-mac': '44'}},
-        ]
-        self.br.get_ports_attributes = mock.Mock(return_value=db_list_res)
-        self.br.ovsdb = mock.Mock()
-        self.br.ovsdb.list_ports.return_value.execute.return_value = [
-            'qvo1', 'qvo2', 'qvo4']
-        by_id = self.br.get_vifs_by_ids(['pid1', 'pid2', 'pid3', 'pid4'])
-        # pid3 isn't on bridge and pid4 doesn't have a valid ofport
-        self.assertIsNone(by_id['pid3'])
-        self.assertIsNone(by_id['pid4'])
-        self.assertEqual('pid1', by_id['pid1'].vif_id)
-        self.assertEqual('qvo1', by_id['pid1'].port_name)
-        self.assertEqual(1, by_id['pid1'].ofport)
-        self.assertEqual('pid2', by_id['pid2'].vif_id)
-        self.assertEqual('qvo2', by_id['pid2'].port_name)
-        self.assertEqual(2, by_id['pid2'].ofport)
-        self.br.get_ports_attributes.assert_has_calls(
-            [mock.call('Interface', columns=['name', 'external_ids', 'ofport'],
-                       if_exists=True)])
-
-    def _test_get_vif_port_by_id(self, iface_id, data, br_name=None,
-                                 extra_calls_and_values=None):
-        headings = ['external_ids', 'name', 'ofport']
-
-        # Each element is a tuple of (expected mock call, return_value)
-        expected_calls_and_values = [
-            (self._vsctl_mock("--columns=external_ids,name,ofport", "find",
-                              "Interface",
-                              'external_ids:iface-id=%s' % iface_id,
-                              'external_ids:attached-mac!=""'),
-             self._encode_ovs_json(headings, data))]
-        if data:
-            if not br_name:
-                br_name = self.BR_NAME
-
-            # Only the last information list in 'data' is used, so if more
-            # than one vif is described in data, the rest must be declared
-            # in the argument 'expected_calls_and_values'.
-            if extra_calls_and_values:
-                expected_calls_and_values.extend(extra_calls_and_values)
-
-            expected_calls_and_values.append(
-                (self._vsctl_mock("iface-to-br",
-                                  data[-1][headings.index('name')]), br_name))
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-        vif_port = self.br.get_vif_port_by_id(iface_id)
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-        return vif_port
-
-    def _assert_vif_port(self, vif_port, ofport=None, mac=None):
-        if not ofport or ofport == -1 or not mac:
-            self.assertIsNone(vif_port, "Got %s" % vif_port)
-            return
-        self.assertEqual('tap99id', vif_port.vif_id)
-        self.assertEqual(mac, vif_port.vif_mac)
-        self.assertEqual('tap99', vif_port.port_name)
-        self.assertEqual(ofport, vif_port.ofport)
-
-    def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None):
-        external_ids = [["iface-id", "tap99id"],
-                        ["iface-status", "active"],
-                        ["attached-mac", mac]]
-        data = [[["map", external_ids], "tap99",
-                 ofport if ofport else ["set", []]]]
-        vif_port = self._test_get_vif_port_by_id('tap99id', data)
-        self._assert_vif_port(vif_port, ofport, mac)
-
-    def test_get_vif_by_port_id_with_ofport(self):
-        self._test_get_vif_port_by_id_with_data(
-            ofport=1, mac="aa:bb:cc:dd:ee:ff")
-
-    def test_get_vif_by_port_id_without_ofport(self):
-        self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff")
-
-    def test_get_vif_by_port_id_with_invalid_ofport(self):
-        self._test_get_vif_port_by_id_with_data(
-            ofport=-1, mac="aa:bb:cc:dd:ee:ff")
-
-    def test_get_vif_by_port_id_with_no_data(self):
-        self.assertIsNone(self._test_get_vif_port_by_id('whatever', []))
-
-    def test_get_vif_by_port_id_different_bridge(self):
-        external_ids = [["iface-id", "tap99id"],
-                        ["iface-status", "active"]]
-        data = [[["map", external_ids], "tap99", 1]]
-        self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data,
-                                                        "br-ext"))
-
-    def test_get_vif_by_port_id_multiple_vifs(self):
-        external_ids = [["iface-id", "tap99id"],
-                        ["iface-status", "active"],
-                        ["attached-mac", "de:ad:be:ef:13:37"]]
-        data = [[["map", external_ids], "dummytap", 1],
-                [["map", external_ids], "tap99", 1337]]
-        extra_calls_and_values = [
-            (self._vsctl_mock("iface-to-br", "dummytap"), "br-ext")]
-
-        vif_port = self._test_get_vif_port_by_id(
-            'tap99id', data, extra_calls_and_values=extra_calls_and_values)
-        self._assert_vif_port(vif_port, ofport=1337, mac="de:ad:be:ef:13:37")
-
-
-class TestDeferredOVSBridge(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestDeferredOVSBridge, self).setUp()
-
-        self.br = mock.Mock()
-        self.mocked_do_action_flows = mock.patch.object(
-            self.br, 'do_action_flows').start()
-
-        self.add_flow_dict1 = dict(in_port=11, actions='drop')
-        self.add_flow_dict2 = dict(in_port=12, actions='drop')
-        self.mod_flow_dict1 = dict(in_port=21, actions='drop')
-        self.mod_flow_dict2 = dict(in_port=22, actions='drop')
-        self.del_flow_dict1 = dict(in_port=31)
-        self.del_flow_dict2 = dict(in_port=32)
-
-    def test_right_allowed_passthroughs(self):
-        expected_passthroughs = ('add_port', 'add_tunnel_port', 'delete_port')
-        self.assertEqual(expected_passthroughs,
-                         ovs_lib.DeferredOVSBridge.ALLOWED_PASSTHROUGHS)
-
-    def _verify_mock_call(self, expected_calls):
-        self.mocked_do_action_flows.assert_has_calls(expected_calls)
-        self.assertEqual(len(expected_calls),
-                         len(self.mocked_do_action_flows.mock_calls))
-
-    def test_apply_on_exit(self):
-        expected_calls = [
-            mock.call('add', [self.add_flow_dict1]),
-            mock.call('mod', [self.mod_flow_dict1]),
-            mock.call('del', [self.del_flow_dict1]),
-        ]
-
-        with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
-            deferred_br.add_flow(**self.add_flow_dict1)
-            deferred_br.mod_flow(**self.mod_flow_dict1)
-            deferred_br.delete_flows(**self.del_flow_dict1)
-            self._verify_mock_call([])
-        self._verify_mock_call(expected_calls)
-
-    def test_apply_on_exit_with_errors(self):
-        try:
-            with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
-                deferred_br.add_flow(**self.add_flow_dict1)
-                deferred_br.mod_flow(**self.mod_flow_dict1)
-                deferred_br.delete_flows(**self.del_flow_dict1)
-                raise Exception()
-        except Exception:
-            self._verify_mock_call([])
-        else:
-            self.fail('Exception would be reraised')
-
-    def test_apply(self):
-        expected_calls = [
-            mock.call('add', [self.add_flow_dict1]),
-            mock.call('mod', [self.mod_flow_dict1]),
-            mock.call('del', [self.del_flow_dict1]),
-        ]
-
-        with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
-            deferred_br.add_flow(**self.add_flow_dict1)
-            deferred_br.mod_flow(**self.mod_flow_dict1)
-            deferred_br.delete_flows(**self.del_flow_dict1)
-            self._verify_mock_call([])
-            deferred_br.apply_flows()
-            self._verify_mock_call(expected_calls)
-        self._verify_mock_call(expected_calls)
-
-    def test_apply_order(self):
-        expected_calls = [
-            mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]),
-            mock.call('mod', [self.mod_flow_dict1, self.mod_flow_dict2]),
-            mock.call('add', [self.add_flow_dict1, self.add_flow_dict2]),
-        ]
-
-        order = 'del', 'mod', 'add'
-        with ovs_lib.DeferredOVSBridge(self.br, order=order) as deferred_br:
-            deferred_br.add_flow(**self.add_flow_dict1)
-            deferred_br.mod_flow(**self.mod_flow_dict1)
-            deferred_br.delete_flows(**self.del_flow_dict1)
-            deferred_br.delete_flows(**self.del_flow_dict2)
-            deferred_br.add_flow(**self.add_flow_dict2)
-            deferred_br.mod_flow(**self.mod_flow_dict2)
-        self._verify_mock_call(expected_calls)
-
-    def test_apply_full_ordered(self):
-        expected_calls = [
-            mock.call('add', [self.add_flow_dict1]),
-            mock.call('mod', [self.mod_flow_dict1]),
-            mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]),
-            mock.call('add', [self.add_flow_dict2]),
-            mock.call('mod', [self.mod_flow_dict2]),
-        ]
-
-        with ovs_lib.DeferredOVSBridge(self.br,
-                                       full_ordered=True) as deferred_br:
-            deferred_br.add_flow(**self.add_flow_dict1)
-            deferred_br.mod_flow(**self.mod_flow_dict1)
-            deferred_br.delete_flows(**self.del_flow_dict1)
-            deferred_br.delete_flows(**self.del_flow_dict2)
-            deferred_br.add_flow(**self.add_flow_dict2)
-            deferred_br.mod_flow(**self.mod_flow_dict2)
-        self._verify_mock_call(expected_calls)
-
-    def test_getattr_unallowed_attr(self):
-        with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
-            self.assertEqual(self.br.add_port, deferred_br.add_port)
-
-    def test_getattr_unallowed_attr_failure(self):
-        with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
-            self.assertRaises(AttributeError, getattr, deferred_br, 'failure')
-
-    def test_cookie_passed_to_addmod(self):
-        self.br = ovs_lib.OVSBridge("br-tun")
-        self.br.set_agent_uuid_stamp(1234)
-        expected_calls = [
-            mock.call('add-flows', ['-'],
-                      'hard_timeout=0,idle_timeout=0,priority=1,'
-                      'cookie=1234,actions=drop'),
-            mock.call('mod-flows', ['-'],
-                      'cookie=1234,actions=drop')
-        ]
-        with mock.patch.object(self.br, 'run_ofctl') as f:
-            with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
-                deferred_br.add_flow(actions='drop')
-                deferred_br.mod_flow(actions='drop')
-            f.assert_has_calls(expected_calls)
diff --git a/neutron/tests/unit/agent/common/test_polling.py b/neutron/tests/unit/agent/common/test_polling.py
deleted file mode 100644 (file)
index 738dc87..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.agent.common import base_polling as polling
-from neutron.tests import base
-
-
-class TestBasePollingManager(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestBasePollingManager, self).setUp()
-        self.pm = polling.BasePollingManager()
-
-    def test__is_polling_required_should_not_be_implemented(self):
-        self.assertRaises(NotImplementedError, self.pm._is_polling_required)
-
-    def test_force_polling_sets_interval_attribute(self):
-        self.assertFalse(self.pm._force_polling)
-        self.pm.force_polling()
-        self.assertTrue(self.pm._force_polling)
-
-    def test_polling_completed_sets_interval_attribute(self):
-        self.pm._polling_completed = False
-        self.pm.polling_completed()
-        self.assertTrue(self.pm._polling_completed)
-
-    def mock_is_polling_required(self, return_value):
-        return mock.patch.object(self.pm, '_is_polling_required',
-                                 return_value=return_value)
-
-    def test_is_polling_required_returns_true_when_forced(self):
-        with self.mock_is_polling_required(False):
-            self.pm.force_polling()
-            self.assertTrue(self.pm.is_polling_required)
-            self.assertFalse(self.pm._force_polling)
-
-    def test_is_polling_required_returns_true_when_polling_not_completed(self):
-        with self.mock_is_polling_required(False):
-            self.pm._polling_completed = False
-            self.assertTrue(self.pm.is_polling_required)
-
-    def test_is_polling_required_returns_true_when_updates_are_present(self):
-        with self.mock_is_polling_required(True):
-            self.assertTrue(self.pm.is_polling_required)
-            self.assertFalse(self.pm._polling_completed)
-
-    def test_is_polling_required_returns_false_for_no_updates(self):
-        with self.mock_is_polling_required(False):
-            self.assertFalse(self.pm.is_polling_required)
-
-
-class TestAlwaysPoll(base.BaseTestCase):
-
-    def test_is_polling_required_always_returns_true(self):
-        pm = polling.AlwaysPoll()
-        self.assertTrue(pm.is_polling_required)
diff --git a/neutron/tests/unit/agent/common/test_utils.py b/neutron/tests/unit/agent/common/test_utils.py
deleted file mode 100644 (file)
index 12eda7a..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.agent.common import config
-from neutron.agent.common import utils
-from neutron.agent.linux import interface
-from neutron.tests import base
-from neutron.tests.unit import testlib_api
-
-
-class TestLoadInterfaceDriver(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestLoadInterfaceDriver, self).setUp()
-        self.conf = config.setup_conf()
-        self.conf.register_opts(interface.OPTS)
-        config.register_interface_driver_opts_helper(self.conf)
-
-    def test_load_interface_driver_not_set(self):
-        with testlib_api.ExpectedException(SystemExit):
-            utils.load_interface_driver(self.conf)
-
-    def test_load_interface_driver_wrong_driver(self):
-        self.conf.set_override('interface_driver', 'neutron.NonExistentDriver')
-        with testlib_api.ExpectedException(SystemExit):
-            utils.load_interface_driver(self.conf)
-
-    def test_load_interface_driver_does_not_consume_irrelevant_errors(self):
-        self.conf.set_override('interface_driver',
-                               'neutron.agent.linux.interface.NullDriver')
-        with mock.patch('oslo_utils.importutils.import_class',
-                        side_effect=RuntimeError()):
-            with testlib_api.ExpectedException(RuntimeError):
-                utils.load_interface_driver(self.conf)
-
-    def test_load_interface_driver_success(self):
-        self.conf.set_override('interface_driver',
-                               'neutron.agent.linux.interface.NullDriver')
-        self.assertIsInstance(utils.load_interface_driver(self.conf),
-                              interface.NullDriver)
-
-    def test_load_null_interface_driver_success(self):
-        self.conf.set_override('interface_driver',
-                               'null')
-        self.assertIsInstance(utils.load_interface_driver(self.conf),
-                              interface.NullDriver)
-
-    def test_load_ivs_interface_driver_success(self):
-        self.conf.set_override('interface_driver',
-                               'ivs')
-        self.assertIsInstance(utils.load_interface_driver(self.conf),
-                              interface.IVSInterfaceDriver)
-
-    def test_load_linuxbridge_interface_driver_success(self):
-        self.conf.set_override('interface_driver',
-                               'linuxbridge')
-        self.assertIsInstance(utils.load_interface_driver(self.conf),
-                              interface.BridgeInterfaceDriver)
-
-    def test_load_ovs_interface_driver_success(self):
-        self.conf.set_override('interface_driver',
-                               'openvswitch')
-        self.assertIsInstance(utils.load_interface_driver(self.conf),
-                              interface.OVSInterfaceDriver)
-
-    def test_load_interface_driver_as_alias_wrong_driver(self):
-        self.conf.set_override('interface_driver', 'openvswitchXX')
-        with testlib_api.ExpectedException(SystemExit):
-            utils.load_interface_driver(self.conf)
diff --git a/neutron/tests/unit/agent/dhcp/__init__.py b/neutron/tests/unit/agent/dhcp/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py
deleted file mode 100644 (file)
index 16d8d65..0000000
+++ /dev/null
@@ -1,1637 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import copy
-import sys
-import uuid
-
-import eventlet
-import mock
-from oslo_config import cfg
-import oslo_messaging
-import testtools
-
-from neutron.agent.common import config
-from neutron.agent.dhcp import agent as dhcp_agent
-from neutron.agent.dhcp import config as dhcp_config
-from neutron.agent import dhcp_agent as entry
-from neutron.agent.linux import dhcp
-from neutron.agent.linux import interface
-from neutron.common import config as common_config
-from neutron.common import constants as const
-from neutron.common import exceptions
-from neutron.common import utils
-from neutron import context
-from neutron.tests import base
-
-
-HOSTNAME = 'hostname'
-dev_man = dhcp.DeviceManager
-rpc_api = dhcp_agent.DhcpPluginApi
-DEVICE_MANAGER = '%s.%s' % (dev_man.__module__, dev_man.__name__)
-DHCP_PLUGIN = '%s.%s' % (rpc_api.__module__, rpc_api.__name__)
-
-
-fake_tenant_id = 'aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'
-fake_subnet1_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.9.2',
-                                               end='172.9.9.254'))
-fake_subnet1 = dhcp.DictModel(dict(id='bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb',
-                              network_id='12345678-1234-5678-1234567890ab',
-                              cidr='172.9.9.0/24', enable_dhcp=True, name='',
-                              tenant_id=fake_tenant_id,
-                              gateway_ip='172.9.9.1', host_routes=[],
-                              dns_nameservers=[], ip_version=4,
-                              ipv6_ra_mode=None, ipv6_address_mode=None,
-                              allocation_pools=fake_subnet1_allocation_pools))
-
-fake_subnet2_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.8.2',
-                                               end='172.9.8.254'))
-fake_subnet2 = dhcp.DictModel(dict(id='dddddddd-dddd-dddd-dddddddddddd',
-                              network_id='12345678-1234-5678-1234567890ab',
-                              cidr='172.9.8.0/24', enable_dhcp=False, name='',
-                              tenant_id=fake_tenant_id, gateway_ip='172.9.8.1',
-                              host_routes=[], dns_nameservers=[], ip_version=4,
-                              allocation_pools=fake_subnet2_allocation_pools))
-
-fake_subnet3 = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb',
-                              network_id='12345678-1234-5678-1234567890ab',
-                              cidr='192.168.1.1/24', enable_dhcp=True))
-
-fake_ipv6_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb',
-                              network_id='12345678-1234-5678-1234567890ab',
-                              cidr='2001:0db8::0/64', enable_dhcp=True,
-                              tenant_id=fake_tenant_id,
-                              gateway_ip='2001:0db8::1', ip_version=6,
-                              ipv6_ra_mode='slaac', ipv6_address_mode=None))
-
-fake_meta_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb',
-                                  network_id='12345678-1234-5678-1234567890ab',
-                                  cidr='169.254.169.252/30',
-                                  gateway_ip='169.254.169.253',
-                                  enable_dhcp=True))
-
-fake_fixed_ip1 = dhcp.DictModel(dict(id='', subnet_id=fake_subnet1.id,
-                                ip_address='172.9.9.9'))
-fake_fixed_ip2 = dhcp.DictModel(dict(id='', subnet_id=fake_subnet1.id,
-                                ip_address='172.9.9.10'))
-fake_fixed_ipv6 = dhcp.DictModel(dict(id='', subnet_id=fake_ipv6_subnet.id,
-                                 ip_address='2001:db8::a8bb:ccff:fedd:ee99'))
-fake_meta_fixed_ip = dhcp.DictModel(dict(id='', subnet=fake_meta_subnet,
-                                    ip_address='169.254.169.254'))
-fake_allocation_pool_subnet1 = dhcp.DictModel(dict(id='', start='172.9.9.2',
-                                              end='172.9.9.254'))
-
-fake_port1 = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab',
-                            device_id='dhcp-12345678-1234-aaaa-1234567890ab',
-                            device_owner='',
-                            allocation_pools=fake_subnet1_allocation_pools,
-                            mac_address='aa:bb:cc:dd:ee:ff',
-                            network_id='12345678-1234-5678-1234567890ab',
-                            fixed_ips=[fake_fixed_ip1]))
-
-fake_dhcp_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789022',
-                            device_id='dhcp-12345678-1234-aaaa-123456789022',
-                            device_owner=const.DEVICE_OWNER_DHCP,
-                            allocation_pools=fake_subnet1_allocation_pools,
-                            mac_address='aa:bb:cc:dd:ee:22',
-                            network_id='12345678-1234-5678-1234567890ab',
-                            fixed_ips=[fake_fixed_ip2]))
-
-fake_port2 = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789000',
-                            device_id='dhcp-12345678-1234-aaaa-123456789000',
-                            device_owner='',
-                            mac_address='aa:bb:cc:dd:ee:99',
-                            network_id='12345678-1234-5678-1234567890ab',
-                            fixed_ips=[fake_fixed_ip2]))
-
-fake_ipv6_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789000',
-                                device_owner='',
-                                mac_address='aa:bb:cc:dd:ee:99',
-                                network_id='12345678-1234-5678-1234567890ab',
-                                fixed_ips=[fake_fixed_ipv6]))
-
-fake_meta_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab',
-                                mac_address='aa:bb:cc:dd:ee:ff',
-                                network_id='12345678-1234-5678-1234567890ab',
-                                device_owner=const.DEVICE_OWNER_ROUTER_INTF,
-                                device_id='forzanapoli',
-                                fixed_ips=[fake_meta_fixed_ip]))
-
-fake_meta_dvr_port = dhcp.DictModel(fake_meta_port.copy())
-fake_meta_dvr_port.device_owner = const.DEVICE_OWNER_DVR_INTERFACE
-
-fake_dist_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab',
-                                mac_address='aa:bb:cc:dd:ee:ff',
-                                network_id='12345678-1234-5678-1234567890ab',
-                                device_owner=const.DEVICE_OWNER_DVR_INTERFACE,
-                                device_id='forzanapoli',
-                                fixed_ips=[fake_meta_fixed_ip]))
-
-FAKE_NETWORK_UUID = '12345678-1234-5678-1234567890ab'
-FAKE_NETWORK_DHCP_NS = "qdhcp-%s" % FAKE_NETWORK_UUID
-
-fake_network = dhcp.NetModel(dict(id=FAKE_NETWORK_UUID,
-                             tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-                             admin_state_up=True,
-                             subnets=[fake_subnet1, fake_subnet2],
-                             ports=[fake_port1]))
-
-fake_network_ipv6 = dhcp.NetModel(dict(
-                             id='12345678-1234-5678-1234567890ab',
-                             tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-                             admin_state_up=True,
-                             subnets=[fake_ipv6_subnet],
-                             ports=[fake_ipv6_port]))
-
-fake_network_ipv6_ipv4 = dhcp.NetModel(dict(
-                             id='12345678-1234-5678-1234567890ab',
-                             tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-                             admin_state_up=True,
-                             subnets=[fake_ipv6_subnet, fake_subnet1],
-                             ports=[fake_port1]))
-
-isolated_network = dhcp.NetModel(
-    dict(
-        id='12345678-1234-5678-1234567890ab',
-        tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-        admin_state_up=True,
-        subnets=[fake_subnet1],
-        ports=[fake_port1]))
-
-nonisolated_dist_network = dhcp.NetModel(
-    dict(
-        id='12345678-1234-5678-1234567890ab',
-        tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-        admin_state_up=True,
-        subnets=[fake_subnet1],
-        ports=[fake_port1, fake_port2]))
-
-empty_network = dhcp.NetModel(
-    dict(
-        id='12345678-1234-5678-1234567890ab',
-        tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-        admin_state_up=True,
-        subnets=[fake_subnet1],
-        ports=[]))
-
-fake_meta_network = dhcp.NetModel(
-    dict(id='12345678-1234-5678-1234567890ab',
-         tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-         admin_state_up=True,
-         subnets=[fake_meta_subnet],
-         ports=[fake_meta_port]))
-
-fake_meta_dvr_network = dhcp.NetModel(fake_meta_network.copy())
-fake_meta_dvr_network.ports = [fake_meta_dvr_port]
-
-fake_dist_network = dhcp.NetModel(
-    dict(id='12345678-1234-5678-1234567890ab',
-         tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-         admin_state_up=True,
-         subnets=[fake_meta_subnet],
-         ports=[fake_meta_port, fake_dist_port]))
-
-fake_down_network = dhcp.NetModel(
-    dict(id='12345678-dddd-dddd-1234567890ab',
-         tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-         admin_state_up=False,
-         subnets=[],
-         ports=[]))
-
-
-class TestDhcpAgent(base.BaseTestCase):
-    def setUp(self):
-        super(TestDhcpAgent, self).setUp()
-        entry.register_options(cfg.CONF)
-        cfg.CONF.set_override('interface_driver',
-                              'neutron.agent.linux.interface.NullDriver')
-        # disable setting up periodic state reporting
-        cfg.CONF.set_override('report_interval', 0, 'AGENT')
-
-        self.driver_cls_p = mock.patch(
-            'neutron.agent.dhcp.agent.importutils.import_class')
-        self.driver = mock.Mock(name='driver')
-        self.driver.existing_dhcp_networks.return_value = []
-        self.driver_cls = self.driver_cls_p.start()
-        self.driver_cls.return_value = self.driver
-        self.mock_makedirs_p = mock.patch("os.makedirs")
-        self.mock_makedirs = self.mock_makedirs_p.start()
-
-        self.mock_ip_wrapper_p = mock.patch("neutron.agent.linux.ip_lib."
-                                            "IPWrapper")
-        self.mock_ip_wrapper = self.mock_ip_wrapper_p.start()
-
-    def test_init_host(self):
-        dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
-        with mock.patch.object(dhcp, 'sync_state') as sync_state:
-            dhcp.init_host()
-            sync_state.assert_called_once_with()
-
-    def test_dhcp_agent_manager(self):
-        state_rpc_str = 'neutron.agent.rpc.PluginReportStateAPI'
-        # sync_state is needed for this test
-        cfg.CONF.set_override('report_interval', 1, 'AGENT')
-        with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport,
-                               'sync_state',
-                               autospec=True) as mock_sync_state:
-            with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport,
-                                   'periodic_resync',
-                                   autospec=True) as mock_periodic_resync:
-                with mock.patch(state_rpc_str) as state_rpc:
-                    with mock.patch.object(sys, 'argv') as sys_argv:
-                        sys_argv.return_value = [
-                            'dhcp', '--config-file',
-                            base.etcdir('neutron.conf')]
-                        cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS)
-                        config.register_interface_driver_opts_helper(cfg.CONF)
-                        config.register_agent_state_opts_helper(cfg.CONF)
-                        cfg.CONF.register_opts(interface.OPTS)
-                        common_config.init(sys.argv[1:])
-                        agent_mgr = dhcp_agent.DhcpAgentWithStateReport(
-                            'testhost')
-                        eventlet.greenthread.sleep(1)
-                        agent_mgr.after_start()
-                        mock_sync_state.assert_called_once_with(agent_mgr)
-                        mock_periodic_resync.assert_called_once_with(agent_mgr)
-                        state_rpc.assert_has_calls(
-                            [mock.call(mock.ANY),
-                             mock.call().report_state(mock.ANY, mock.ANY,
-                                                      mock.ANY)])
-
-    def test_dhcp_agent_main_agent_manager(self):
-        logging_str = 'neutron.agent.common.config.setup_logging'
-        launcher_str = 'oslo_service.service.ServiceLauncher'
-        with mock.patch(logging_str):
-            with mock.patch.object(sys, 'argv') as sys_argv:
-                with mock.patch(launcher_str) as launcher:
-                    sys_argv.return_value = ['dhcp', '--config-file',
-                                             base.etcdir('neutron.conf')]
-                    entry.main()
-                    launcher.assert_has_calls(
-                        [mock.call(cfg.CONF),
-                         mock.call().launch_service(mock.ANY),
-                         mock.call().wait()])
-
-    def test_run_completes_single_pass(self):
-        with mock.patch(DEVICE_MANAGER):
-            dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
-            attrs_to_mock = dict(
-                [(a, mock.DEFAULT) for a in
-                 ['sync_state', 'periodic_resync']])
-            with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks:
-                dhcp.run()
-                mocks['sync_state'].assert_called_once_with()
-                mocks['periodic_resync'].assert_called_once_with()
-
-    def test_call_driver(self):
-        network = mock.Mock()
-        network.id = '1'
-        dhcp = dhcp_agent.DhcpAgent(cfg.CONF)
-        self.assertTrue(dhcp.call_driver('foo', network))
-        self.driver.assert_called_once_with(cfg.CONF,
-                                            mock.ANY,
-                                            mock.ANY,
-                                            mock.ANY,
-                                            mock.ANY)
-
-    def _test_call_driver_failure(self, exc=None,
-                                  trace_level='exception', expected_sync=True):
-        network = mock.Mock()
-        network.id = '1'
-        self.driver.return_value.foo.side_effect = exc or Exception
-        with mock.patch.object(dhcp_agent.LOG, trace_level) as log:
-            dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
-            with mock.patch.object(dhcp,
-                                   'schedule_resync') as schedule_resync:
-                self.assertIsNone(dhcp.call_driver('foo', network))
-                self.driver.assert_called_once_with(cfg.CONF,
-                                                    mock.ANY,
-                                                    mock.ANY,
-                                                    mock.ANY,
-                                                    mock.ANY)
-                self.assertEqual(log.call_count, 1)
-                self.assertEqual(expected_sync, schedule_resync.called)
-
-    def test_call_driver_ip_address_generation_failure(self):
-        error = oslo_messaging.RemoteError(
-            exc_type='IpAddressGenerationFailure')
-        self._test_call_driver_failure(exc=error, expected_sync=False)
-
-    def test_call_driver_failure(self):
-        self._test_call_driver_failure()
-
-    def test_call_driver_remote_error_net_not_found(self):
-        self._test_call_driver_failure(
-            exc=oslo_messaging.RemoteError(exc_type='NetworkNotFound'),
-            trace_level='warning')
-
-    def test_call_driver_network_not_found(self):
-        self._test_call_driver_failure(
-            exc=exceptions.NetworkNotFound(net_id='1'),
-            trace_level='warning')
-
-    def test_call_driver_conflict(self):
-        self._test_call_driver_failure(
-            exc=exceptions.Conflict(),
-            trace_level='warning',
-            expected_sync=False)
-
-    def _test_sync_state_helper(self, known_net_ids, active_net_ids):
-        active_networks = set(mock.Mock(id=netid) for netid in active_net_ids)
-
-        with mock.patch(DHCP_PLUGIN) as plug:
-            mock_plugin = mock.Mock()
-            mock_plugin.get_active_networks_info.return_value = active_networks
-            plug.return_value = mock_plugin
-
-            dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
-
-            attrs_to_mock = dict([(a, mock.DEFAULT)
-                                 for a in ['disable_dhcp_helper', 'cache',
-                                           'safe_configure_dhcp_for_network']])
-
-            with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks:
-                mocks['cache'].get_network_ids.return_value = known_net_ids
-                dhcp.sync_state()
-
-                diff = set(known_net_ids) - set(active_net_ids)
-                exp_disable = [mock.call(net_id) for net_id in diff]
-                mocks['cache'].assert_has_calls([mock.call.get_network_ids()])
-                mocks['disable_dhcp_helper'].assert_has_calls(exp_disable)
-
-    def test_sync_state_initial(self):
-        self._test_sync_state_helper([], ['a'])
-
-    def test_sync_state_same(self):
-        self._test_sync_state_helper(['a'], ['a'])
-
-    def test_sync_state_disabled_net(self):
-        self._test_sync_state_helper(['b'], ['a'])
-
-    def test_sync_state_waitall(self):
-        with mock.patch.object(dhcp_agent.eventlet.GreenPool, 'waitall') as w:
-            active_net_ids = ['1', '2', '3', '4', '5']
-            known_net_ids = ['1', '2', '3', '4', '5']
-            self._test_sync_state_helper(known_net_ids, active_net_ids)
-            w.assert_called_once_with()
-
-    def test_sync_state_for_all_networks_plugin_error(self):
-        with mock.patch(DHCP_PLUGIN) as plug:
-            mock_plugin = mock.Mock()
-            mock_plugin.get_active_networks_info.side_effect = Exception
-            plug.return_value = mock_plugin
-
-            with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
-                dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
-                with mock.patch.object(dhcp,
-                                       'schedule_resync') as schedule_resync:
-                    dhcp.sync_state()
-
-                    self.assertTrue(log.called)
-                    self.assertTrue(schedule_resync.called)
-
-    def test_sync_state_for_one_network_plugin_error(self):
-        with mock.patch(DHCP_PLUGIN) as plug:
-            mock_plugin = mock.Mock()
-            exc = Exception()
-            mock_plugin.get_active_networks_info.side_effect = exc
-            plug.return_value = mock_plugin
-
-            with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
-                dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
-                with mock.patch.object(dhcp,
-                                       'schedule_resync') as schedule_resync:
-                    dhcp.sync_state(['foo_network'])
-
-                    self.assertTrue(log.called)
-                    schedule_resync.assert_called_with(exc, 'foo_network')
-
-    def test_periodic_resync(self):
-        dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
-        with mock.patch.object(dhcp_agent.eventlet, 'spawn') as spawn:
-            dhcp.periodic_resync()
-            spawn.assert_called_once_with(dhcp._periodic_resync_helper)
-
-    def test_report_state_revival_logic(self):
-        dhcp = dhcp_agent.DhcpAgentWithStateReport(HOSTNAME)
-        with mock.patch.object(dhcp.state_rpc,
-                               'report_state') as report_state,\
-            mock.patch.object(dhcp, "run"):
-            report_state.return_value = const.AGENT_ALIVE
-            dhcp._report_state()
-            self.assertEqual({}, dhcp.needs_resync_reasons)
-
-            report_state.return_value = const.AGENT_REVIVED
-            dhcp._report_state()
-            self.assertEqual(dhcp.needs_resync_reasons[None],
-                             ['Agent has just been revived'])
-
-    def test_periodic_resync_helper(self):
-        with mock.patch.object(dhcp_agent.eventlet, 'sleep') as sleep:
-            dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
-            resync_reasons = collections.OrderedDict(
-                (('a', 'reason1'), ('b', 'reason2')))
-            dhcp.needs_resync_reasons = resync_reasons
-            with mock.patch.object(dhcp, 'sync_state') as sync_state:
-                sync_state.side_effect = RuntimeError
-                with testtools.ExpectedException(RuntimeError):
-                    dhcp._periodic_resync_helper()
-                sync_state.assert_called_once_with(resync_reasons.keys())
-                sleep.assert_called_once_with(dhcp.conf.resync_interval)
-                self.assertEqual(len(dhcp.needs_resync_reasons), 0)
-
-    def test_populate_cache_on_start_without_active_networks_support(self):
-        # emul dhcp driver that doesn't support retrieving of active networks
-        self.driver.existing_dhcp_networks.side_effect = NotImplementedError
-
-        with mock.patch.object(dhcp_agent.LOG, 'debug') as log:
-            dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
-
-            self.driver.existing_dhcp_networks.assert_called_once_with(
-                dhcp.conf,
-            )
-
-            self.assertFalse(dhcp.cache.get_network_ids())
-            self.assertTrue(log.called)
-
-    def test_populate_cache_on_start(self):
-        networks = ['aaa', 'bbb']
-        self.driver.existing_dhcp_networks.return_value = networks
-
-        dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
-
-        self.driver.existing_dhcp_networks.assert_called_once_with(
-            dhcp.conf,
-        )
-
-        self.assertEqual(set(networks), set(dhcp.cache.get_network_ids()))
-
-    def test_none_interface_driver(self):
-        cfg.CONF.set_override('interface_driver', None)
-        self.assertRaises(SystemExit, dhcp.DeviceManager,
-                          cfg.CONF, None)
-
-    def test_nonexistent_interface_driver(self):
-        # Temporarily turn off mock, so could use the real import_class
-        # to import interface_driver.
-        self.driver_cls_p.stop()
-        self.addCleanup(self.driver_cls_p.start)
-        cfg.CONF.set_override('interface_driver', 'foo.bar')
-        self.assertRaises(SystemExit, dhcp.DeviceManager,
-                          cfg.CONF, None)
-
-
-class TestLogArgs(base.BaseTestCase):
-
-    def test_log_args_without_log_dir_and_file(self):
-        conf_dict = {'debug': True,
-                     'verbose': False,
-                     'log_dir': None,
-                     'log_file': None,
-                     'use_syslog': True,
-                     'syslog_log_facility': 'LOG_USER'}
-        conf = dhcp.DictModel(conf_dict)
-        expected_args = ['--debug',
-                         '--use-syslog',
-                         '--syslog-log-facility=LOG_USER']
-        args = config.get_log_args(conf, 'log_file_name')
-        self.assertEqual(expected_args, args)
-
-    def test_log_args_without_log_file(self):
-        conf_dict = {'debug': True,
-                     'verbose': True,
-                     'log_dir': '/etc/tests',
-                     'log_file': None,
-                     'use_syslog': False,
-                     'syslog_log_facility': 'LOG_USER'}
-        conf = dhcp.DictModel(conf_dict)
-        expected_args = ['--debug',
-                         '--verbose',
-                         '--log-file=log_file_name',
-                         '--log-dir=/etc/tests']
-        args = config.get_log_args(conf, 'log_file_name')
-        self.assertEqual(expected_args, args)
-
-    def test_log_args_with_log_dir_and_file(self):
-        conf_dict = {'debug': True,
-                     'verbose': False,
-                     'log_dir': '/etc/tests',
-                     'log_file': 'tests/filelog',
-                     'use_syslog': False,
-                     'syslog_log_facility': 'LOG_USER'}
-        conf = dhcp.DictModel(conf_dict)
-        expected_args = ['--debug',
-                         '--log-file=log_file_name',
-                         '--log-dir=/etc/tests/tests']
-        args = config.get_log_args(conf, 'log_file_name')
-        self.assertEqual(expected_args, args)
-
-    def test_log_args_without_log_dir(self):
-        conf_dict = {'debug': True,
-                     'verbose': False,
-                     'log_file': 'tests/filelog',
-                     'log_dir': None,
-                     'use_syslog': False,
-                     'syslog_log_facility': 'LOG_USER'}
-        conf = dhcp.DictModel(conf_dict)
-        expected_args = ['--debug',
-                         '--log-file=log_file_name',
-                         '--log-dir=tests']
-        args = config.get_log_args(conf, 'log_file_name')
-        self.assertEqual(expected_args, args)
-
-    def test_log_args_with_filelog_and_syslog(self):
-        conf_dict = {'debug': True,
-                     'verbose': True,
-                     'log_file': 'tests/filelog',
-                     'log_dir': '/etc/tests',
-                     'use_syslog': True,
-                     'syslog_log_facility': 'LOG_USER'}
-        conf = dhcp.DictModel(conf_dict)
-        expected_args = ['--debug',
-                         '--verbose',
-                         '--log-file=log_file_name',
-                         '--log-dir=/etc/tests/tests']
-        args = config.get_log_args(conf, 'log_file_name')
-        self.assertEqual(expected_args, args)
-
-
-class TestDhcpAgentEventHandler(base.BaseTestCase):
-    def setUp(self):
-        super(TestDhcpAgentEventHandler, self).setUp()
-        config.register_interface_driver_opts_helper(cfg.CONF)
-        cfg.CONF.set_override('interface_driver',
-                              'neutron.agent.linux.interface.NullDriver')
-        entry.register_options(cfg.CONF)  # register all dhcp cfg options
-
-        self.plugin_p = mock.patch(DHCP_PLUGIN)
-        plugin_cls = self.plugin_p.start()
-        self.plugin = mock.Mock()
-        plugin_cls.return_value = self.plugin
-
-        self.cache_p = mock.patch('neutron.agent.dhcp.agent.NetworkCache')
-        cache_cls = self.cache_p.start()
-        self.cache = mock.Mock()
-        cache_cls.return_value = self.cache
-        self.mock_makedirs_p = mock.patch("os.makedirs")
-        self.mock_makedirs = self.mock_makedirs_p.start()
-        self.mock_init_p = mock.patch('neutron.agent.dhcp.agent.'
-                                      'DhcpAgent._populate_networks_cache')
-        self.mock_init = self.mock_init_p.start()
-        self.dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
-        self.call_driver_p = mock.patch.object(self.dhcp, 'call_driver')
-        self.call_driver = self.call_driver_p.start()
-        self.schedule_resync_p = mock.patch.object(self.dhcp,
-                                                   'schedule_resync')
-        self.schedule_resync = self.schedule_resync_p.start()
-        self.external_process_p = mock.patch(
-            'neutron.agent.linux.external_process.ProcessManager'
-        )
-        self.external_process = self.external_process_p.start()
-
-    def _process_manager_constructor_call(self, ns=FAKE_NETWORK_DHCP_NS):
-        return mock.call(conf=cfg.CONF,
-                         uuid=FAKE_NETWORK_UUID,
-                         namespace=ns,
-                         default_cmd_callback=mock.ANY)
-
-    def _enable_dhcp_helper(self, network, enable_isolated_metadata=False,
-                            is_isolated_network=False):
-        self.dhcp._process_monitor = mock.Mock()
-        if enable_isolated_metadata:
-            cfg.CONF.set_override('enable_isolated_metadata', True)
-        self.plugin.get_network_info.return_value = network
-        self.dhcp.enable_dhcp_helper(network.id)
-        self.plugin.assert_has_calls([
-            mock.call.get_network_info(network.id)])
-        self.call_driver.assert_called_once_with('enable', network)
-        self.cache.assert_has_calls([mock.call.put(network)])
-        if is_isolated_network:
-            self.external_process.assert_has_calls([
-                self._process_manager_constructor_call(),
-                mock.call().enable()
-            ])
-        else:
-            self.assertFalse(self.external_process.call_count)
-
-    def test_enable_dhcp_helper_enable_metadata_isolated_network(self):
-        self._enable_dhcp_helper(isolated_network,
-                                 enable_isolated_metadata=True,
-                                 is_isolated_network=True)
-
-    def test_enable_dhcp_helper_enable_metadata_no_gateway(self):
-        isolated_network_no_gateway = copy.deepcopy(isolated_network)
-        isolated_network_no_gateway.subnets[0].gateway_ip = None
-
-        self._enable_dhcp_helper(isolated_network_no_gateway,
-                                 enable_isolated_metadata=True,
-                                 is_isolated_network=True)
-
-    def test_enable_dhcp_helper_enable_metadata_nonisolated_network(self):
-        nonisolated_network = copy.deepcopy(isolated_network)
-        nonisolated_network.ports[0].device_owner = (
-            const.DEVICE_OWNER_ROUTER_INTF)
-        nonisolated_network.ports[0].fixed_ips[0].ip_address = '172.9.9.1'
-
-        self._enable_dhcp_helper(nonisolated_network,
-                                 enable_isolated_metadata=True,
-                                 is_isolated_network=False)
-
-    def test_enable_dhcp_helper_enable_metadata_nonisolated_dist_network(self):
-        nonisolated_dist_network.ports[0].device_owner = (
-            const.DEVICE_OWNER_ROUTER_INTF)
-        nonisolated_dist_network.ports[0].fixed_ips[0].ip_address = '172.9.9.1'
-        nonisolated_dist_network.ports[1].device_owner = (
-            const.DEVICE_OWNER_DVR_INTERFACE)
-        nonisolated_dist_network.ports[1].fixed_ips[0].ip_address = '172.9.9.1'
-
-        self._enable_dhcp_helper(nonisolated_dist_network,
-                                 enable_isolated_metadata=True,
-                                 is_isolated_network=False)
-
-    def test_enable_dhcp_helper_enable_metadata_empty_network(self):
-        self._enable_dhcp_helper(empty_network,
-                                 enable_isolated_metadata=True,
-                                 is_isolated_network=True)
-
-    def test_enable_dhcp_helper_enable_metadata_ipv6_ipv4_network(self):
-        self._enable_dhcp_helper(fake_network_ipv6_ipv4,
-                                 enable_isolated_metadata=True,
-                                 is_isolated_network=True)
-
-    def test_enable_dhcp_helper_driver_failure_ipv6_ipv4_network(self):
-        self.plugin.get_network_info.return_value = fake_network_ipv6_ipv4
-        self.call_driver.return_value = False
-        cfg.CONF.set_override('enable_isolated_metadata', True)
-        with mock.patch.object(
-            self.dhcp, 'enable_isolated_metadata_proxy') as enable_metadata:
-            self.dhcp.enable_dhcp_helper(fake_network_ipv6_ipv4.id)
-            self.plugin.assert_has_calls(
-                [mock.call.get_network_info(fake_network_ipv6_ipv4.id)])
-            self.call_driver.assert_called_once_with('enable',
-                                                     fake_network_ipv6_ipv4)
-            self.assertFalse(self.cache.called)
-            self.assertFalse(enable_metadata.called)
-            self.assertFalse(self.external_process.called)
-
-    def test_enable_dhcp_helper(self):
-        self._enable_dhcp_helper(fake_network)
-
-    def test_enable_dhcp_helper_ipv6_network(self):
-        self._enable_dhcp_helper(fake_network_ipv6)
-
-    def test_enable_dhcp_helper_down_network(self):
-        self.plugin.get_network_info.return_value = fake_down_network
-        self.dhcp.enable_dhcp_helper(fake_down_network.id)
-        self.plugin.assert_has_calls(
-            [mock.call.get_network_info(fake_down_network.id)])
-        self.assertFalse(self.call_driver.called)
-        self.assertFalse(self.cache.called)
-        self.assertFalse(self.external_process.called)
-
-    def test_enable_dhcp_helper_network_none(self):
-        self.plugin.get_network_info.return_value = None
-        with mock.patch.object(dhcp_agent.LOG, 'warn') as log:
-            self.dhcp.enable_dhcp_helper('fake_id')
-            self.plugin.assert_has_calls(
-                [mock.call.get_network_info('fake_id')])
-            self.assertFalse(self.call_driver.called)
-            self.assertTrue(log.called)
-            self.assertFalse(self.dhcp.schedule_resync.called)
-
-    def test_enable_dhcp_helper_exception_during_rpc(self):
-        self.plugin.get_network_info.side_effect = Exception
-        with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
-            self.dhcp.enable_dhcp_helper(fake_network.id)
-            self.plugin.assert_has_calls(
-                [mock.call.get_network_info(fake_network.id)])
-            self.assertFalse(self.call_driver.called)
-            self.assertTrue(log.called)
-            self.assertTrue(self.schedule_resync.called)
-            self.assertFalse(self.cache.called)
-            self.assertFalse(self.external_process.called)
-
-    def test_enable_dhcp_helper_driver_failure(self):
-        self.plugin.get_network_info.return_value = fake_network
-        self.call_driver.return_value = False
-        self.dhcp.enable_dhcp_helper(fake_network.id)
-        self.plugin.assert_has_calls(
-            [mock.call.get_network_info(fake_network.id)])
-        self.call_driver.assert_called_once_with('enable', fake_network)
-        self.assertFalse(self.cache.called)
-        self.assertFalse(self.external_process.called)
-
-    def _disable_dhcp_helper_known_network(self, isolated_metadata=False):
-        if isolated_metadata:
-            cfg.CONF.set_override('enable_isolated_metadata', True)
-        self.cache.get_network_by_id.return_value = fake_network
-        self.dhcp.disable_dhcp_helper(fake_network.id)
-        self.cache.assert_has_calls(
-            [mock.call.get_network_by_id(fake_network.id)])
-        self.call_driver.assert_called_once_with('disable', fake_network)
-        if isolated_metadata:
-            self.external_process.assert_has_calls([
-                self._process_manager_constructor_call(ns=None),
-                mock.call().disable()])
-        else:
-            self.assertFalse(self.external_process.call_count)
-
-    def test_disable_dhcp_helper_known_network_isolated_metadata(self):
-        self._disable_dhcp_helper_known_network(isolated_metadata=True)
-
-    def test_disable_dhcp_helper_known_network(self):
-        self._disable_dhcp_helper_known_network()
-
-    def test_disable_dhcp_helper_unknown_network(self):
-        self.cache.get_network_by_id.return_value = None
-        self.dhcp.disable_dhcp_helper('abcdef')
-        self.cache.assert_has_calls(
-            [mock.call.get_network_by_id('abcdef')])
-        self.assertEqual(0, self.call_driver.call_count)
-        self.assertFalse(self.external_process.called)
-
-    def _disable_dhcp_helper_driver_failure(self, isolated_metadata=False):
-        if isolated_metadata:
-            cfg.CONF.set_override('enable_isolated_metadata', True)
-        self.cache.get_network_by_id.return_value = fake_network
-        self.call_driver.return_value = False
-        self.dhcp.disable_dhcp_helper(fake_network.id)
-        self.cache.assert_has_calls(
-            [mock.call.get_network_by_id(fake_network.id)])
-        self.call_driver.assert_called_once_with('disable', fake_network)
-        self.cache.assert_has_calls(
-            [mock.call.get_network_by_id(fake_network.id)])
-        if isolated_metadata:
-            self.external_process.assert_has_calls([
-                self._process_manager_constructor_call(ns=None),
-                mock.call().disable()
-            ])
-        else:
-            self.assertFalse(self.external_process.call_count)
-
-    def test_disable_dhcp_helper_driver_failure_isolated_metadata(self):
-        self._disable_dhcp_helper_driver_failure(isolated_metadata=True)
-
-    def test_disable_dhcp_helper_driver_failure(self):
-        self._disable_dhcp_helper_driver_failure()
-
-    def test_enable_isolated_metadata_proxy(self):
-        self.dhcp._process_monitor = mock.Mock()
-        self.dhcp.enable_isolated_metadata_proxy(fake_network)
-        self.external_process.assert_has_calls([
-            self._process_manager_constructor_call(),
-            mock.call().enable()
-        ])
-
-    def test_disable_isolated_metadata_proxy(self):
-        method_path = ('neutron.agent.metadata.driver.MetadataDriver'
-                       '.destroy_monitored_metadata_proxy')
-        with mock.patch(method_path) as destroy:
-            self.dhcp.disable_isolated_metadata_proxy(fake_network)
-            destroy.assert_called_once_with(self.dhcp._process_monitor,
-                                            fake_network.id,
-                                            cfg.CONF)
-
-    def _test_enable_isolated_metadata_proxy(self, network):
-        cfg.CONF.set_override('enable_metadata_network', True)
-        cfg.CONF.set_override('debug', True)
-        cfg.CONF.set_override('verbose', False)
-        cfg.CONF.set_override('log_file', 'test.log')
-        method_path = ('neutron.agent.metadata.driver.MetadataDriver'
-                       '.spawn_monitored_metadata_proxy')
-        with mock.patch(method_path) as spawn:
-            self.dhcp.enable_isolated_metadata_proxy(network)
-            spawn.assert_called_once_with(self.dhcp._process_monitor,
-                                          network.namespace,
-                                          dhcp.METADATA_PORT,
-                                          cfg.CONF,
-                                          router_id='forzanapoli')
-
-    def test_enable_isolated_metadata_proxy_with_metadata_network(self):
-        self._test_enable_isolated_metadata_proxy(fake_meta_network)
-
-    def test_enable_isolated_metadata_proxy_with_metadata_network_dvr(self):
-        self._test_enable_isolated_metadata_proxy(fake_meta_dvr_network)
-
-    def test_enable_isolated_metadata_proxy_with_dist_network(self):
-        self._test_enable_isolated_metadata_proxy(fake_dist_network)
-
-    def _test_disable_isolated_metadata_proxy(self, network):
-        cfg.CONF.set_override('enable_metadata_network', True)
-        method_path = ('neutron.agent.metadata.driver.MetadataDriver'
-                       '.destroy_monitored_metadata_proxy')
-        with mock.patch(method_path) as destroy:
-            self.dhcp.enable_isolated_metadata_proxy(network)
-            self.dhcp.disable_isolated_metadata_proxy(network)
-            destroy.assert_called_once_with(self.dhcp._process_monitor,
-                                            'forzanapoli',
-                                            cfg.CONF)
-
-    def test_disable_isolated_metadata_proxy_with_metadata_network(self):
-        self._test_disable_isolated_metadata_proxy(fake_meta_network)
-
-    def test_disable_isolated_metadata_proxy_with_metadata_network_dvr(self):
-        self._test_disable_isolated_metadata_proxy(fake_meta_dvr_network)
-
-    def test_disable_isolated_metadata_proxy_with_dist_network(self):
-        self._test_disable_isolated_metadata_proxy(fake_dist_network)
-
-    def test_network_create_end(self):
-        payload = dict(network=dict(id=fake_network.id))
-
-        with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable:
-            self.dhcp.network_create_end(None, payload)
-            enable.assert_called_once_with(fake_network.id)
-
-    def test_network_update_end_admin_state_up(self):
-        payload = dict(network=dict(id=fake_network.id, admin_state_up=True))
-        with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable:
-            self.dhcp.network_update_end(None, payload)
-            enable.assert_called_once_with(fake_network.id)
-
-    def test_network_update_end_admin_state_down(self):
-        payload = dict(network=dict(id=fake_network.id, admin_state_up=False))
-        with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
-            self.dhcp.network_update_end(None, payload)
-            disable.assert_called_once_with(fake_network.id)
-
-    def test_network_delete_end(self):
-        payload = dict(network_id=fake_network.id)
-
-        with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
-            self.dhcp.network_delete_end(None, payload)
-            disable.assert_called_once_with(fake_network.id)
-
-    def test_refresh_dhcp_helper_no_dhcp_enabled_networks(self):
-        network = dhcp.NetModel(dict(id='net-id',
-                                tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-                                admin_state_up=True,
-                                subnets=[],
-                                ports=[]))
-
-        self.cache.get_network_by_id.return_value = network
-        self.plugin.get_network_info.return_value = network
-        with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
-            self.dhcp.refresh_dhcp_helper(network.id)
-            disable.assert_called_once_with(network.id)
-            self.assertFalse(self.cache.called)
-            self.assertFalse(self.call_driver.called)
-            self.cache.assert_has_calls(
-                [mock.call.get_network_by_id('net-id')])
-
-    def test_refresh_dhcp_helper_exception_during_rpc(self):
-        network = dhcp.NetModel(dict(id='net-id',
-                                tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-                                admin_state_up=True,
-                                subnets=[],
-                                ports=[]))
-
-        self.cache.get_network_by_id.return_value = network
-        self.plugin.get_network_info.side_effect = Exception
-        with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
-            self.dhcp.refresh_dhcp_helper(network.id)
-            self.assertFalse(self.call_driver.called)
-            self.cache.assert_has_calls(
-                [mock.call.get_network_by_id('net-id')])
-            self.assertTrue(log.called)
-            self.assertTrue(self.dhcp.schedule_resync.called)
-
-    def test_subnet_update_end(self):
-        payload = dict(subnet=dict(network_id=fake_network.id))
-        self.cache.get_network_by_id.return_value = fake_network
-        self.plugin.get_network_info.return_value = fake_network
-
-        self.dhcp.subnet_update_end(None, payload)
-
-        self.cache.assert_has_calls([mock.call.put(fake_network)])
-        self.call_driver.assert_called_once_with('reload_allocations',
-                                                 fake_network)
-
-    def test_subnet_update_end_restart(self):
-        new_state = dhcp.NetModel(dict(id=fake_network.id,
-                                  tenant_id=fake_network.tenant_id,
-                                  admin_state_up=True,
-                                  subnets=[fake_subnet1, fake_subnet3],
-                                  ports=[fake_port1]))
-
-        payload = dict(subnet=dict(network_id=fake_network.id))
-        self.cache.get_network_by_id.return_value = fake_network
-        self.plugin.get_network_info.return_value = new_state
-
-        self.dhcp.subnet_update_end(None, payload)
-
-        self.cache.assert_has_calls([mock.call.put(new_state)])
-        self.call_driver.assert_called_once_with('restart',
-                                                 new_state)
-
-    def test_subnet_update_end_delete_payload(self):
-        prev_state = dhcp.NetModel(dict(id=fake_network.id,
-                                   tenant_id=fake_network.tenant_id,
-                                   admin_state_up=True,
-                                   subnets=[fake_subnet1, fake_subnet3],
-                                   ports=[fake_port1]))
-
-        payload = dict(subnet_id=fake_subnet1.id)
-        self.cache.get_network_by_subnet_id.return_value = prev_state
-        self.cache.get_network_by_id.return_value = prev_state
-        self.plugin.get_network_info.return_value = fake_network
-
-        self.dhcp.subnet_delete_end(None, payload)
-
-        self.cache.assert_has_calls([
-            mock.call.get_network_by_subnet_id(
-                'bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb'),
-            mock.call.get_network_by_id('12345678-1234-5678-1234567890ab'),
-            mock.call.put(fake_network)])
-        self.call_driver.assert_called_once_with('restart',
-                                                 fake_network)
-
-    def test_port_update_end(self):
-        payload = dict(port=fake_port2)
-        self.cache.get_network_by_id.return_value = fake_network
-        self.cache.get_port_by_id.return_value = fake_port2
-        self.dhcp.port_update_end(None, payload)
-        self.cache.assert_has_calls(
-            [mock.call.get_network_by_id(fake_port2.network_id),
-             mock.call.put_port(mock.ANY)])
-        self.call_driver.assert_called_once_with('reload_allocations',
-                                                 fake_network)
-
-    def test_port_update_change_ip_on_port(self):
-        payload = dict(port=fake_port1)
-        self.cache.get_network_by_id.return_value = fake_network
-        updated_fake_port1 = copy.deepcopy(fake_port1)
-        updated_fake_port1.fixed_ips[0].ip_address = '172.9.9.99'
-        self.cache.get_port_by_id.return_value = updated_fake_port1
-        self.dhcp.port_update_end(None, payload)
-        self.cache.assert_has_calls(
-            [mock.call.get_network_by_id(fake_port1.network_id),
-             mock.call.put_port(mock.ANY)])
-        self.call_driver.assert_has_calls(
-            [mock.call.call_driver('reload_allocations', fake_network)])
-
-    def test_port_update_change_ip_on_dhcp_agents_port(self):
-        self.cache.get_network_by_id.return_value = fake_network
-        self.cache.get_port_by_id.return_value = fake_port1
-        payload = dict(port=copy.deepcopy(fake_port1))
-        device_id = utils.get_dhcp_agent_device_id(
-            payload['port']['network_id'], self.dhcp.conf.host)
-        payload['port']['fixed_ips'][0]['ip_address'] = '172.9.9.99'
-        payload['port']['device_id'] = device_id
-        self.dhcp.port_update_end(None, payload)
-        self.call_driver.assert_has_calls(
-            [mock.call.call_driver('restart', fake_network)])
-
-    def test_port_update_on_dhcp_agents_port_no_ip_change(self):
-        self.cache.get_network_by_id.return_value = fake_network
-        self.cache.get_port_by_id.return_value = fake_port1
-        payload = dict(port=fake_port1)
-        device_id = utils.get_dhcp_agent_device_id(
-            payload['port']['network_id'], self.dhcp.conf.host)
-        payload['port']['device_id'] = device_id
-        self.dhcp.port_update_end(None, payload)
-        self.call_driver.assert_has_calls(
-            [mock.call.call_driver('reload_allocations', fake_network)])
-
-    def test_port_delete_end(self):
-        payload = dict(port_id=fake_port2.id)
-        self.cache.get_network_by_id.return_value = fake_network
-        self.cache.get_port_by_id.return_value = fake_port2
-
-        self.dhcp.port_delete_end(None, payload)
-        self.cache.assert_has_calls(
-            [mock.call.get_port_by_id(fake_port2.id),
-             mock.call.get_network_by_id(fake_network.id),
-             mock.call.remove_port(fake_port2)])
-        self.call_driver.assert_has_calls(
-            [mock.call.call_driver('reload_allocations', fake_network)])
-
-    def test_port_delete_end_unknown_port(self):
-        payload = dict(port_id='unknown')
-        self.cache.get_port_by_id.return_value = None
-
-        self.dhcp.port_delete_end(None, payload)
-
-        self.cache.assert_has_calls([mock.call.get_port_by_id('unknown')])
-        self.assertEqual(self.call_driver.call_count, 0)
-
-
-class TestDhcpPluginApiProxy(base.BaseTestCase):
-    def _test_dhcp_api(self, method, **kwargs):
-        ctxt = context.get_admin_context()
-        proxy = dhcp_agent.DhcpPluginApi('foo', ctxt, host='foo')
-
-        with mock.patch.object(proxy.client, 'call') as rpc_mock,\
-                mock.patch.object(proxy.client, 'prepare') as prepare_mock:
-            prepare_mock.return_value = proxy.client
-            rpc_mock.return_value = kwargs.pop('return_value', [])
-
-            prepare_args = {}
-            if 'version' in kwargs:
-                prepare_args['version'] = kwargs.pop('version')
-
-            retval = getattr(proxy, method)(**kwargs)
-            self.assertEqual(retval, rpc_mock.return_value)
-
-            prepare_mock.assert_called_once_with(**prepare_args)
-            kwargs['host'] = proxy.host
-            rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
-
-    def test_get_active_networks_info(self):
-        self._test_dhcp_api('get_active_networks_info', version='1.1')
-
-    def test_get_network_info(self):
-        self._test_dhcp_api('get_network_info', network_id='fake_id',
-                            return_value=None)
-
-    def test_create_dhcp_port(self):
-        self._test_dhcp_api('create_dhcp_port', port='fake_port',
-                            return_value=None, version='1.1')
-
-    def test_update_dhcp_port(self):
-        self._test_dhcp_api('update_dhcp_port', port_id='fake_id',
-                            port='fake_port', return_value=None, version='1.1')
-
-    def test_release_dhcp_port(self):
-        self._test_dhcp_api('release_dhcp_port', network_id='fake_id',
-                            device_id='fake_id_2')
-
-
-class TestNetworkCache(base.BaseTestCase):
-    def test_put_network(self):
-        nc = dhcp_agent.NetworkCache()
-        nc.put(fake_network)
-        self.assertEqual(nc.cache,
-                         {fake_network.id: fake_network})
-        self.assertEqual(nc.subnet_lookup,
-                         {fake_subnet1.id: fake_network.id,
-                          fake_subnet2.id: fake_network.id})
-        self.assertEqual(nc.port_lookup,
-                         {fake_port1.id: fake_network.id})
-
-    def test_put_network_existing(self):
-        prev_network_info = mock.Mock()
-        nc = dhcp_agent.NetworkCache()
-        with mock.patch.object(nc, 'remove') as remove:
-            nc.cache[fake_network.id] = prev_network_info
-
-            nc.put(fake_network)
-            remove.assert_called_once_with(prev_network_info)
-        self.assertEqual(nc.cache,
-                         {fake_network.id: fake_network})
-        self.assertEqual(nc.subnet_lookup,
-                         {fake_subnet1.id: fake_network.id,
-                          fake_subnet2.id: fake_network.id})
-        self.assertEqual(nc.port_lookup,
-                         {fake_port1.id: fake_network.id})
-
-    def test_remove_network(self):
-        nc = dhcp_agent.NetworkCache()
-        nc.cache = {fake_network.id: fake_network}
-        nc.subnet_lookup = {fake_subnet1.id: fake_network.id,
-                            fake_subnet2.id: fake_network.id}
-        nc.port_lookup = {fake_port1.id: fake_network.id}
-        nc.remove(fake_network)
-
-        self.assertEqual(len(nc.cache), 0)
-        self.assertEqual(len(nc.subnet_lookup), 0)
-        self.assertEqual(len(nc.port_lookup), 0)
-
-    def test_get_network_by_id(self):
-        nc = dhcp_agent.NetworkCache()
-        nc.put(fake_network)
-
-        self.assertEqual(nc.get_network_by_id(fake_network.id), fake_network)
-
-    def test_get_network_ids(self):
-        nc = dhcp_agent.NetworkCache()
-        nc.put(fake_network)
-
-        self.assertEqual(list(nc.get_network_ids()), [fake_network.id])
-
-    def test_get_network_by_subnet_id(self):
-        nc = dhcp_agent.NetworkCache()
-        nc.put(fake_network)
-
-        self.assertEqual(nc.get_network_by_subnet_id(fake_subnet1.id),
-                         fake_network)
-
-    def test_get_network_by_port_id(self):
-        nc = dhcp_agent.NetworkCache()
-        nc.put(fake_network)
-
-        self.assertEqual(nc.get_network_by_port_id(fake_port1.id),
-                         fake_network)
-
-    def test_put_port(self):
-        fake_net = dhcp.NetModel(
-            dict(id='12345678-1234-5678-1234567890ab',
-                 tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-                 subnets=[fake_subnet1],
-                 ports=[fake_port1]))
-        nc = dhcp_agent.NetworkCache()
-        nc.put(fake_net)
-        nc.put_port(fake_port2)
-        self.assertEqual(len(nc.port_lookup), 2)
-        self.assertIn(fake_port2, fake_net.ports)
-
-    def test_put_port_existing(self):
-        fake_net = dhcp.NetModel(
-            dict(id='12345678-1234-5678-1234567890ab',
-                 tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-                 subnets=[fake_subnet1],
-                 ports=[fake_port1, fake_port2]))
-        nc = dhcp_agent.NetworkCache()
-        nc.put(fake_net)
-        nc.put_port(fake_port2)
-
-        self.assertEqual(len(nc.port_lookup), 2)
-        self.assertIn(fake_port2, fake_net.ports)
-
-    def test_remove_port_existing(self):
-        fake_net = dhcp.NetModel(
-            dict(id='12345678-1234-5678-1234567890ab',
-                 tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
-                 subnets=[fake_subnet1],
-                 ports=[fake_port1, fake_port2]))
-        nc = dhcp_agent.NetworkCache()
-        nc.put(fake_net)
-        nc.remove_port(fake_port2)
-
-        self.assertEqual(len(nc.port_lookup), 1)
-        self.assertNotIn(fake_port2, fake_net.ports)
-
-    def test_get_port_by_id(self):
-        nc = dhcp_agent.NetworkCache()
-        nc.put(fake_network)
-        self.assertEqual(nc.get_port_by_id(fake_port1.id), fake_port1)
-
-
-class FakePort1(object):
-    id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
-
-
-class FakeV4Subnet(object):
-    id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
-    ip_version = 4
-    cidr = '192.168.0.0/24'
-    gateway_ip = '192.168.0.1'
-    enable_dhcp = True
-
-
-class FakeV4SubnetNoGateway(object):
-    id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
-    ip_version = 4
-    cidr = '192.168.1.0/24'
-    gateway_ip = None
-    enable_dhcp = True
-
-
-class FakeV4Network(object):
-    id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-    subnets = [FakeV4Subnet()]
-    ports = [FakePort1()]
-    namespace = 'qdhcp-aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-
-
-class FakeV4NetworkNoSubnet(object):
-    id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-    subnets = []
-    ports = []
-
-
-class FakeV4NetworkNoGateway(object):
-    id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    subnets = [FakeV4SubnetNoGateway()]
-    ports = [FakePort1()]
-
-
-class TestDeviceManager(base.BaseTestCase):
-    def setUp(self):
-        super(TestDeviceManager, self).setUp()
-        config.register_interface_driver_opts_helper(cfg.CONF)
-        cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS)
-        cfg.CONF.set_override('interface_driver',
-                              'neutron.agent.linux.interface.NullDriver')
-        cfg.CONF.set_override('enable_isolated_metadata', True)
-
-        self.ensure_device_is_ready_p = mock.patch(
-            'neutron.agent.linux.ip_lib.ensure_device_is_ready')
-        self.ensure_device_is_ready = (self.ensure_device_is_ready_p.start())
-
-        self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
-        self.iproute_cls_p = mock.patch('neutron.agent.linux.'
-                                        'ip_lib.IpRouteCommand')
-        driver_cls = self.dvr_cls_p.start()
-        iproute_cls = self.iproute_cls_p.start()
-        self.mock_driver = mock.MagicMock()
-        self.mock_driver.DEV_NAME_LEN = (
-            interface.LinuxInterfaceDriver.DEV_NAME_LEN)
-        self.mock_driver.use_gateway_ips = False
-        self.mock_iproute = mock.MagicMock()
-        driver_cls.return_value = self.mock_driver
-        iproute_cls.return_value = self.mock_iproute
-
-        iptables_cls_p = mock.patch(
-            'neutron.agent.linux.iptables_manager.IptablesManager')
-        iptables_cls = iptables_cls_p.start()
-        self.iptables_inst = mock.Mock()
-        iptables_cls.return_value = self.iptables_inst
-        self.mangle_inst = mock.Mock()
-        self.iptables_inst.ipv4 = {'mangle': self.mangle_inst}
-
-        self.mock_ip_wrapper_p = mock.patch("neutron.agent.linux.ip_lib."
-                                            "IPWrapper")
-        self.mock_ip_wrapper = self.mock_ip_wrapper_p.start()
-
-    def _test_setup_helper(self, device_is_ready, net=None, port=None):
-        net = net or fake_network
-        port = port or fake_port1
-        plugin = mock.Mock()
-        plugin.create_dhcp_port.return_value = port or fake_port1
-        self.ensure_device_is_ready.return_value = device_is_ready
-        self.mock_driver.get_device_name.return_value = 'tap12345678-12'
-
-        dh = dhcp.DeviceManager(cfg.CONF, plugin)
-        dh._set_default_route = mock.Mock()
-        dh._cleanup_stale_devices = mock.Mock()
-        interface_name = dh.setup(net)
-
-        self.assertEqual(interface_name, 'tap12345678-12')
-
-        plugin.assert_has_calls([
-            mock.call.create_dhcp_port(
-                {'port': {'name': '', 'admin_state_up': True,
-                          'network_id': net.id, 'tenant_id': net.tenant_id,
-                          'fixed_ips':
-                          [{'subnet_id': port.fixed_ips[0].subnet_id}],
-                          'device_id': mock.ANY}})])
-
-        if port == fake_ipv6_port:
-            expected_ips = ['169.254.169.254/16']
-        else:
-            expected_ips = ['172.9.9.9/24', '169.254.169.254/16']
-        expected = [
-            mock.call.get_device_name(port),
-            mock.call.init_l3(
-                'tap12345678-12',
-                expected_ips,
-                namespace=net.namespace)]
-
-        if not device_is_ready:
-            expected.insert(1,
-                            mock.call.plug(net.id,
-                                           port.id,
-                                           'tap12345678-12',
-                                           'aa:bb:cc:dd:ee:ff',
-                                           namespace=net.namespace))
-        self.mock_driver.assert_has_calls(expected)
-
-        dh._set_default_route.assert_called_once_with(net, 'tap12345678-12')
-
-    def test_setup(self):
-        cfg.CONF.set_override('enable_metadata_network', False)
-        self._test_setup_helper(False)
-        cfg.CONF.set_override('enable_metadata_network', True)
-        self._test_setup_helper(False)
-
-    def test_setup_calls_fill_dhcp_udp_checksums(self):
-        self._test_setup_helper(False)
-        rule = ('-p udp --dport %d -j CHECKSUM --checksum-fill'
-                % const.DHCP_RESPONSE_PORT)
-        expected = [mock.call.add_rule('POSTROUTING', rule)]
-        self.mangle_inst.assert_has_calls(expected)
-
-    def test_setup_create_dhcp_port(self):
-        plugin = mock.Mock()
-        net = copy.deepcopy(fake_network)
-        plugin.create_dhcp_port.return_value = fake_dhcp_port
-        dh = dhcp.DeviceManager(cfg.CONF, plugin)
-        dh.setup(net)
-
-        plugin.assert_has_calls([
-            mock.call.create_dhcp_port(
-                {'port': {'name': '', 'admin_state_up': True,
-                          'network_id': net.id,
-                          'tenant_id': net.tenant_id,
-                          'fixed_ips': [{'subnet_id':
-                          fake_dhcp_port.fixed_ips[0].subnet_id}],
-                          'device_id': mock.ANY}})])
-        self.assertIn(fake_dhcp_port, net.ports)
-
-    def test_setup_ipv6(self):
-        self._test_setup_helper(True, net=fake_network_ipv6,
-                                port=fake_ipv6_port)
-
-    def test_setup_device_is_ready(self):
-        self._test_setup_helper(True)
-
-    def test_create_dhcp_port_raise_conflict(self):
-        plugin = mock.Mock()
-        dh = dhcp.DeviceManager(cfg.CONF, plugin)
-        plugin.create_dhcp_port.return_value = None
-        self.assertRaises(exceptions.Conflict,
-                          dh.setup_dhcp_port,
-                          fake_network)
-
-    def test_create_dhcp_port_create_new(self):
-        plugin = mock.Mock()
-        dh = dhcp.DeviceManager(cfg.CONF, plugin)
-        plugin.create_dhcp_port.return_value = fake_network.ports[0]
-        dh.setup_dhcp_port(fake_network)
-        plugin.assert_has_calls([
-            mock.call.create_dhcp_port(
-                {'port': {'name': '', 'admin_state_up': True,
-                          'network_id':
-                          fake_network.id, 'tenant_id': fake_network.tenant_id,
-                          'fixed_ips':
-                          [{'subnet_id': fake_fixed_ip1.subnet_id}],
-                          'device_id': mock.ANY}})])
-
-    def test_create_dhcp_port_update_add_subnet(self):
-        plugin = mock.Mock()
-        dh = dhcp.DeviceManager(cfg.CONF, plugin)
-        fake_network_copy = copy.deepcopy(fake_network)
-        fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
-        fake_network_copy.subnets[1].enable_dhcp = True
-        plugin.update_dhcp_port.return_value = fake_network.ports[0]
-        dh.setup_dhcp_port(fake_network_copy)
-        port_body = {'port': {
-                     'network_id': fake_network.id,
-                     'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id,
-                                    'ip_address': fake_fixed_ip1.ip_address},
-                                   {'subnet_id': fake_subnet2.id}]}}
-
-        plugin.assert_has_calls([
-            mock.call.update_dhcp_port(fake_network_copy.ports[0].id,
-                                       port_body)])
-
-    def test_update_dhcp_port_raises_conflict(self):
-        plugin = mock.Mock()
-        dh = dhcp.DeviceManager(cfg.CONF, plugin)
-        fake_network_copy = copy.deepcopy(fake_network)
-        fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
-        fake_network_copy.subnets[1].enable_dhcp = True
-        plugin.update_dhcp_port.return_value = None
-        self.assertRaises(exceptions.Conflict,
-                          dh.setup_dhcp_port,
-                          fake_network_copy)
-
-    def test_create_dhcp_port_no_update_or_create(self):
-        plugin = mock.Mock()
-        dh = dhcp.DeviceManager(cfg.CONF, plugin)
-        fake_network_copy = copy.deepcopy(fake_network)
-        fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
-        dh.setup_dhcp_port(fake_network_copy)
-        self.assertFalse(plugin.setup_dhcp_port.called)
-        self.assertFalse(plugin.update_dhcp_port.called)
-
-    def test_setup_dhcp_port_with_non_enable_dhcp_subnet(self):
-        plugin = mock.Mock()
-        dh = dhcp.DeviceManager(cfg.CONF, plugin)
-        fake_network_copy = copy.deepcopy(fake_network)
-        fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
-        plugin.update_dhcp_port.return_value = fake_port1
-        self.assertEqual(fake_subnet1.id,
-                dh.setup_dhcp_port(fake_network_copy).fixed_ips[0].subnet_id)
-
-    def test_destroy(self):
-        fake_net = dhcp.NetModel(
-            dict(id=FAKE_NETWORK_UUID,
-                 tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
-
-        with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls:
-            mock_driver = mock.MagicMock()
-            mock_driver.get_device_name.return_value = 'tap12345678-12'
-            dvr_cls.return_value = mock_driver
-
-            plugin = mock.Mock()
-
-            dh = dhcp.DeviceManager(cfg.CONF, plugin)
-            dh.destroy(fake_net, 'tap12345678-12')
-
-            dvr_cls.assert_called_once_with(cfg.CONF)
-            mock_driver.assert_has_calls(
-                [mock.call.unplug('tap12345678-12',
-                                  namespace='qdhcp-' + fake_net.id)])
-            plugin.assert_has_calls(
-                [mock.call.release_dhcp_port(fake_net.id, mock.ANY)])
-
-    def test_destroy_with_none(self):
-        fake_net = dhcp.NetModel(
-            dict(id=FAKE_NETWORK_UUID,
-                 tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
-
-        with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls:
-            mock_driver = mock.MagicMock()
-            mock_driver.get_device_name.return_value = 'tap12345678-12'
-            dvr_cls.return_value = mock_driver
-
-            plugin = mock.Mock()
-
-            dh = dhcp.DeviceManager(cfg.CONF, plugin)
-            dh.destroy(fake_net, None)
-
-            dvr_cls.assert_called_once_with(cfg.CONF)
-            plugin.assert_has_calls(
-                [mock.call.release_dhcp_port(fake_net.id, mock.ANY)])
-            self.assertFalse(mock_driver.called)
-
-    def test_get_interface_name(self):
-        fake_net = dhcp.NetModel(
-            dict(id='12345678-1234-5678-1234567890ab',
-                 tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
-
-        fake_port = dhcp.DictModel(
-            dict(id='12345678-1234-aaaa-1234567890ab',
-                 mac_address='aa:bb:cc:dd:ee:ff'))
-
-        with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls:
-            mock_driver = mock.MagicMock()
-            mock_driver.get_device_name.return_value = 'tap12345678-12'
-            dvr_cls.return_value = mock_driver
-
-            plugin = mock.Mock()
-
-            dh = dhcp.DeviceManager(cfg.CONF, plugin)
-            dh.get_interface_name(fake_net, fake_port)
-
-            dvr_cls.assert_called_once_with(cfg.CONF)
-            mock_driver.assert_has_calls(
-                [mock.call.get_device_name(fake_port)])
-
-            self.assertEqual(len(plugin.mock_calls), 0)
-
-    def test_get_device_id(self):
-        fake_net = dhcp.NetModel(
-            dict(id='12345678-1234-5678-1234567890ab',
-                 tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
-        expected = ('dhcp1ae5f96c-c527-5079-82ea-371a01645457-12345678-1234-'
-                    '5678-1234567890ab')
-
-        with mock.patch('uuid.uuid5') as uuid5:
-            uuid5.return_value = '1ae5f96c-c527-5079-82ea-371a01645457'
-
-            dh = dhcp.DeviceManager(cfg.CONF, None)
-            uuid5.called_once_with(uuid.NAMESPACE_DNS, cfg.CONF.host)
-            self.assertEqual(dh.get_device_id(fake_net), expected)
-
-    def test_update(self):
-        # Try with namespaces and no metadata network
-        cfg.CONF.set_override('enable_metadata_network', False)
-        dh = dhcp.DeviceManager(cfg.CONF, None)
-        dh._set_default_route = mock.Mock()
-        network = mock.Mock()
-
-        dh.update(network, 'ns-12345678-12')
-
-        dh._set_default_route.assert_called_once_with(network,
-                                                      'ns-12345678-12')
-
-        # Meta data network enabled, don't interfere with its gateway.
-        cfg.CONF.set_override('enable_metadata_network', True)
-        dh = dhcp.DeviceManager(cfg.CONF, None)
-        dh._set_default_route = mock.Mock()
-
-        dh.update(FakeV4Network(), 'ns-12345678-12')
-
-        self.assertTrue(dh._set_default_route.called)
-
-    def test_set_default_route(self):
-        dh = dhcp.DeviceManager(cfg.CONF, None)
-        with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
-            device = mock.Mock()
-            mock_IPDevice.return_value = device
-            device.route.get_gateway.return_value = None
-            # Basic one subnet with gateway.
-            network = FakeV4Network()
-            dh._set_default_route(network, 'tap-name')
-
-        self.assertEqual(device.route.get_gateway.call_count, 1)
-        self.assertFalse(device.route.delete_gateway.called)
-        device.route.add_gateway.assert_called_once_with('192.168.0.1')
-
-    def test_set_default_route_no_subnet(self):
-        dh = dhcp.DeviceManager(cfg.CONF, None)
-        with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
-            device = mock.Mock()
-            mock_IPDevice.return_value = device
-            device.route.get_gateway.return_value = None
-            network = FakeV4NetworkNoSubnet()
-            network.namespace = 'qdhcp-1234'
-            dh._set_default_route(network, 'tap-name')
-
-        self.assertEqual(device.route.get_gateway.call_count, 1)
-        self.assertFalse(device.route.delete_gateway.called)
-        self.assertFalse(device.route.add_gateway.called)
-
-    def test_set_default_route_no_subnet_delete_gateway(self):
-        dh = dhcp.DeviceManager(cfg.CONF, None)
-        with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
-            device = mock.Mock()
-            mock_IPDevice.return_value = device
-            device.route.get_gateway.return_value = dict(gateway='192.168.0.1')
-            network = FakeV4NetworkNoSubnet()
-            network.namespace = 'qdhcp-1234'
-            dh._set_default_route(network, 'tap-name')
-
-        self.assertEqual(device.route.get_gateway.call_count, 1)
-        device.route.delete_gateway.assert_called_once_with('192.168.0.1')
-        self.assertFalse(device.route.add_gateway.called)
-
-    def test_set_default_route_no_gateway(self):
-        dh = dhcp.DeviceManager(cfg.CONF, None)
-        with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
-            device = mock.Mock()
-            mock_IPDevice.return_value = device
-            device.route.get_gateway.return_value = dict(gateway='192.168.0.1')
-            network = FakeV4NetworkNoGateway()
-            network.namespace = 'qdhcp-1234'
-            dh._set_default_route(network, 'tap-name')
-
-        self.assertEqual(device.route.get_gateway.call_count, 1)
-        device.route.delete_gateway.assert_called_once_with('192.168.0.1')
-        self.assertFalse(device.route.add_gateway.called)
-
-    def test_set_default_route_do_nothing(self):
-        dh = dhcp.DeviceManager(cfg.CONF, None)
-        with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
-            device = mock.Mock()
-            mock_IPDevice.return_value = device
-            device.route.get_gateway.return_value = dict(gateway='192.168.0.1')
-            network = FakeV4Network()
-            dh._set_default_route(network, 'tap-name')
-
-        self.assertEqual(device.route.get_gateway.call_count, 1)
-        self.assertFalse(device.route.delete_gateway.called)
-        self.assertFalse(device.route.add_gateway.called)
-
-    def test_set_default_route_change_gateway(self):
-        dh = dhcp.DeviceManager(cfg.CONF, None)
-        with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
-            device = mock.Mock()
-            mock_IPDevice.return_value = device
-            device.route.get_gateway.return_value = dict(gateway='192.168.0.2')
-            network = FakeV4Network()
-            dh._set_default_route(network, 'tap-name')
-
-        self.assertEqual(device.route.get_gateway.call_count, 1)
-        self.assertFalse(device.route.delete_gateway.called)
-        device.route.add_gateway.assert_called_once_with('192.168.0.1')
-
-    def test_set_default_route_two_subnets(self):
-        # Try two subnets. Should set gateway from the first.
-        dh = dhcp.DeviceManager(cfg.CONF, None)
-        with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
-            device = mock.Mock()
-            mock_IPDevice.return_value = device
-            device.route.get_gateway.return_value = None
-            network = FakeV4Network()
-            subnet2 = FakeV4Subnet()
-            subnet2.gateway_ip = '192.168.1.1'
-            network.subnets = [subnet2, FakeV4Subnet()]
-            dh._set_default_route(network, 'tap-name')
-
-        self.assertEqual(device.route.get_gateway.call_count, 1)
-        self.assertFalse(device.route.delete_gateway.called)
-        device.route.add_gateway.assert_called_once_with('192.168.1.1')
-
-
-class TestDictModel(base.BaseTestCase):
-    def test_basic_dict(self):
-        d = dict(a=1, b=2)
-
-        m = dhcp.DictModel(d)
-        self.assertEqual(m.a, 1)
-        self.assertEqual(m.b, 2)
-
-    def test_dict_has_sub_dict(self):
-        d = dict(a=dict(b=2))
-        m = dhcp.DictModel(d)
-        self.assertEqual(m.a.b, 2)
-
-    def test_dict_contains_list(self):
-        d = dict(a=[1, 2])
-
-        m = dhcp.DictModel(d)
-        self.assertEqual(m.a, [1, 2])
-
-    def test_dict_contains_list_of_dicts(self):
-        d = dict(a=[dict(b=2), dict(c=3)])
-
-        m = dhcp.DictModel(d)
-        self.assertEqual(m.a[0].b, 2)
-        self.assertEqual(m.a[1].c, 3)
diff --git a/neutron/tests/unit/agent/l2/__init__.py b/neutron/tests/unit/agent/l2/__init__.py
deleted file mode 100755 (executable)
index e69de29..0000000
diff --git a/neutron/tests/unit/agent/l2/extensions/__init__.py b/neutron/tests/unit/agent/l2/extensions/__init__.py
deleted file mode 100755 (executable)
index e69de29..0000000
diff --git a/neutron/tests/unit/agent/l2/extensions/test_manager.py b/neutron/tests/unit/agent/l2/extensions/test_manager.py
deleted file mode 100644 (file)
index 0f0e429..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_config import cfg
-
-from neutron.agent.l2.extensions import manager as ext_manager
-from neutron.tests import base
-
-
-class TestAgentExtensionsManager(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestAgentExtensionsManager, self).setUp()
-        mock.patch('neutron.agent.l2.extensions.qos.QosAgentExtension',
-                   autospec=True).start()
-        conf = cfg.CONF
-        ext_manager.register_opts(conf)
-        cfg.CONF.set_override('extensions', ['qos'], 'agent')
-        self.manager = ext_manager.AgentExtensionsManager(conf)
-
-    def _get_extension(self):
-        return self.manager.extensions[0].obj
-
-    def test_initialize(self):
-        connection = object()
-        self.manager.initialize(connection, 'fake_driver_type')
-        ext = self._get_extension()
-        ext.initialize.assert_called_once_with(connection, 'fake_driver_type')
-
-    def test_handle_port(self):
-        context = object()
-        data = object()
-        self.manager.handle_port(context, data)
-        ext = self._get_extension()
-        ext.handle_port.assert_called_once_with(context, data)
-
-    def test_delete_port(self):
-        context = object()
-        data = object()
-        self.manager.delete_port(context, data)
-        ext = self._get_extension()
-        ext.delete_port.assert_called_once_with(context, data)
diff --git a/neutron/tests/unit/agent/l2/extensions/test_qos.py b/neutron/tests/unit/agent/l2/extensions/test_qos.py
deleted file mode 100755 (executable)
index 25cf371..0000000
+++ /dev/null
@@ -1,403 +0,0 @@
-# Copyright (c) 2015 Mellanox Technologies, Ltd
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron.agent.l2.extensions import qos
-from neutron.api.rpc.callbacks.consumer import registry
-from neutron.api.rpc.callbacks import events
-from neutron.api.rpc.callbacks import resources
-from neutron.api.rpc.handlers import resources_rpc
-from neutron.common import exceptions
-from neutron import context
-from neutron.objects.qos import policy
-from neutron.objects.qos import rule
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-from neutron.services.qos import qos_consts
-from neutron.tests import base
-
-
-BASE_TEST_POLICY = {'context': None,
-                    'name': 'test1',
-                    'id': 'fake_policy_id'}
-
-TEST_POLICY = policy.QosPolicy(**BASE_TEST_POLICY)
-
-TEST_POLICY_DESCR = policy.QosPolicy(description='fake_descr',
-                                     **BASE_TEST_POLICY)
-
-TEST_POLICY2 = policy.QosPolicy(context=None,
-                                name='test2', id='fake_policy_id_2')
-
-TEST_PORT = {'port_id': 'test_port_id',
-             'qos_policy_id': TEST_POLICY.id}
-
-TEST_PORT2 = {'port_id': 'test_port_id_2',
-             'qos_policy_id': TEST_POLICY2.id}
-
-
-class FakeDriver(qos.QosAgentDriver):
-
-    SUPPORTED_RULES = {qos_consts.RULE_TYPE_BANDWIDTH_LIMIT}
-
-    def __init__(self):
-        super(FakeDriver, self).__init__()
-        self.create_bandwidth_limit = mock.Mock()
-        self.update_bandwidth_limit = mock.Mock()
-        self.delete_bandwidth_limit = mock.Mock()
-
-    def initialize(self):
-        pass
-
-
-class QosFakeRule(rule.QosRule):
-
-    rule_type = 'fake_type'
-
-
-class QosAgentDriverTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(QosAgentDriverTestCase, self).setUp()
-        self.driver = FakeDriver()
-        self.policy = TEST_POLICY
-        self.rule = (
-            rule.QosBandwidthLimitRule(context=None, id='fake_rule_id',
-                                       qos_policy_id=self.policy.id,
-                                       max_kbps=100, max_burst_kbps=200))
-        self.policy.rules = [self.rule]
-        self.port = {'qos_policy_id': None, 'network_qos_policy_id': None,
-                     'device_owner': 'random-device-owner'}
-
-        self.fake_rule = QosFakeRule(context=None, id='really_fake_rule_id',
-                                     qos_policy_id=self.policy.id)
-
-    def test_create(self):
-        self.driver.create(self.port, self.policy)
-        self.driver.create_bandwidth_limit.assert_called_with(
-            self.port, self.rule)
-
-    def test_update(self):
-        self.driver.update(self.port, self.policy)
-        self.driver.update_bandwidth_limit.assert_called_with(
-            self.port, self.rule)
-
-    def test_delete(self):
-        self.driver.delete(self.port, self.policy)
-        self.driver.delete_bandwidth_limit.assert_called_with(self.port)
-
-    def test_delete_no_policy(self):
-        self.driver.delete(self.port, qos_policy=None)
-        self.driver.delete_bandwidth_limit.assert_called_with(self.port)
-
-    def test__iterate_rules_with_unknown_rule_type(self):
-        self.policy.rules.append(self.fake_rule)
-        rules = list(self.driver._iterate_rules(self.policy.rules))
-        self.assertEqual(1, len(rules))
-        self.assertIsInstance(rules[0], rule.QosBandwidthLimitRule)
-
-    def test__handle_update_create_rules_checks_should_apply_to_port(self):
-        self.rule.should_apply_to_port = mock.Mock(return_value=False)
-        self.driver.create(self.port, self.policy)
-        self.assertFalse(self.driver.create_bandwidth_limit.called)
-
-        self.rule.should_apply_to_port = mock.Mock(return_value=True)
-        self.driver.create(self.port, self.policy)
-        self.assertTrue(self.driver.create_bandwidth_limit.called)
-
-
-class QosExtensionBaseTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(QosExtensionBaseTestCase, self).setUp()
-        self.qos_ext = qos.QosAgentExtension()
-        self.context = context.get_admin_context()
-        self.connection = mock.Mock()
-
-        # Don't rely on used driver
-        mock.patch(
-            'neutron.manager.NeutronManager.load_class_for_provider',
-            return_value=lambda: mock.Mock(spec=qos.QosAgentDriver)
-        ).start()
-
-
-class QosExtensionRpcTestCase(QosExtensionBaseTestCase):
-
-    def setUp(self):
-        super(QosExtensionRpcTestCase, self).setUp()
-        self.qos_ext.initialize(
-            self.connection, constants.EXTENSION_DRIVER_TYPE)
-
-        self.pull_mock = mock.patch.object(
-            self.qos_ext.resource_rpc, 'pull',
-            return_value=TEST_POLICY).start()
-
-    def _create_test_port_dict(self, qos_policy_id=None):
-        return {'port_id': uuidutils.generate_uuid(),
-                'qos_policy_id': qos_policy_id or TEST_POLICY.id}
-
-    def test_handle_port_with_no_policy(self):
-        port = self._create_test_port_dict()
-        del port['qos_policy_id']
-        self.qos_ext._process_reset_port = mock.Mock()
-        self.qos_ext.handle_port(self.context, port)
-        self.qos_ext._process_reset_port.assert_called_with(port)
-
-    def test_handle_unknown_port(self):
-        port = self._create_test_port_dict()
-        qos_policy_id = port['qos_policy_id']
-        port_id = port['port_id']
-        self.qos_ext.handle_port(self.context, port)
-        # we make sure the underlying qos driver is called with the
-        # right parameters
-        self.qos_ext.qos_driver.create.assert_called_once_with(
-            port, TEST_POLICY)
-        self.assertEqual(port,
-            self.qos_ext.policy_map.qos_policy_ports[qos_policy_id][port_id])
-        self.assertIn(port_id, self.qos_ext.policy_map.port_policies)
-        self.assertEqual(TEST_POLICY,
-            self.qos_ext.policy_map.known_policies[qos_policy_id])
-
-    def test_handle_known_port(self):
-        port_obj1 = self._create_test_port_dict()
-        port_obj2 = dict(port_obj1)
-        self.qos_ext.handle_port(self.context, port_obj1)
-        self.qos_ext.qos_driver.reset_mock()
-        self.qos_ext.handle_port(self.context, port_obj2)
-        self.assertFalse(self.qos_ext.qos_driver.create.called)
-
-    def test_handle_known_port_change_policy_id(self):
-        port = self._create_test_port_dict()
-        self.qos_ext.handle_port(self.context, port)
-        self.qos_ext.resource_rpc.pull.reset_mock()
-        port['qos_policy_id'] = uuidutils.generate_uuid()
-        self.qos_ext.handle_port(self.context, port)
-        self.pull_mock.assert_called_once_with(
-             self.context, resources.QOS_POLICY,
-             port['qos_policy_id'])
-
-    def test_delete_known_port(self):
-        port = self._create_test_port_dict()
-        self.qos_ext.handle_port(self.context, port)
-        self.qos_ext.qos_driver.reset_mock()
-        self.qos_ext.delete_port(self.context, port)
-        self.qos_ext.qos_driver.delete.assert_called_with(port)
-        self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port))
-
-    def test_delete_unknown_port(self):
-        port = self._create_test_port_dict()
-        self.qos_ext.delete_port(self.context, port)
-        self.assertFalse(self.qos_ext.qos_driver.delete.called)
-        self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port))
-
-    def test__handle_notification_ignores_all_event_types_except_updated(self):
-        with mock.patch.object(
-            self.qos_ext, '_process_update_policy') as update_mock:
-
-            for event_type in set(events.VALID) - {events.UPDATED}:
-                self.qos_ext._handle_notification(object(), event_type)
-                self.assertFalse(update_mock.called)
-
-    def test__handle_notification_passes_update_events(self):
-        with mock.patch.object(
-            self.qos_ext, '_process_update_policy') as update_mock:
-
-            policy_obj = mock.Mock()
-            self.qos_ext._handle_notification(policy_obj, events.UPDATED)
-            update_mock.assert_called_with(policy_obj)
-
-    def test__process_update_policy(self):
-        port1 = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id)
-        port2 = self._create_test_port_dict(qos_policy_id=TEST_POLICY2.id)
-        self.qos_ext.policy_map.set_port_policy(port1, TEST_POLICY)
-        self.qos_ext.policy_map.set_port_policy(port2, TEST_POLICY2)
-        self.qos_ext._policy_rules_modified = mock.Mock(return_value=True)
-
-        policy_obj = mock.Mock()
-        policy_obj.id = port1['qos_policy_id']
-        self.qos_ext._process_update_policy(policy_obj)
-        self.qos_ext.qos_driver.update.assert_called_with(port1, policy_obj)
-
-        self.qos_ext.qos_driver.update.reset_mock()
-        policy_obj.id = port2['qos_policy_id']
-        self.qos_ext._process_update_policy(policy_obj)
-        self.qos_ext.qos_driver.update.assert_called_with(port2, policy_obj)
-
-    def test__process_update_policy_descr_not_propagated_into_driver(self):
-        port = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id)
-        self.qos_ext.policy_map.set_port_policy(port, TEST_POLICY)
-        self.qos_ext._policy_rules_modified = mock.Mock(return_value=False)
-        self.qos_ext._process_update_policy(TEST_POLICY_DESCR)
-        self.qos_ext._policy_rules_modified.assert_called_with(TEST_POLICY,
-            TEST_POLICY_DESCR)
-        self.assertFalse(self.qos_ext.qos_driver.delete.called)
-        self.assertFalse(self.qos_ext.qos_driver.update.called)
-        self.assertEqual(TEST_POLICY_DESCR,
-                         self.qos_ext.policy_map.get_policy(TEST_POLICY.id))
-
-    def test__process_update_policy_not_known(self):
-        self.qos_ext._policy_rules_modified = mock.Mock()
-        self.qos_ext._process_update_policy(TEST_POLICY_DESCR)
-        self.assertFalse(self.qos_ext._policy_rules_modified.called)
-        self.assertFalse(self.qos_ext.qos_driver.delete.called)
-        self.assertFalse(self.qos_ext.qos_driver.update.called)
-        self.assertIsNone(self.qos_ext.policy_map.get_policy(
-            TEST_POLICY_DESCR.id))
-
-    def test__process_reset_port(self):
-        port1 = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id)
-        port2 = self._create_test_port_dict(qos_policy_id=TEST_POLICY2.id)
-        self.qos_ext.policy_map.set_port_policy(port1, TEST_POLICY)
-        self.qos_ext.policy_map.set_port_policy(port2, TEST_POLICY2)
-
-        self.qos_ext._process_reset_port(port1)
-        self.qos_ext.qos_driver.delete.assert_called_with(port1)
-        self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port1))
-        self.assertIsNotNone(self.qos_ext.policy_map.get_port_policy(port2))
-
-        self.qos_ext.qos_driver.delete.reset_mock()
-        self.qos_ext._process_reset_port(port2)
-        self.qos_ext.qos_driver.delete.assert_called_with(port2)
-        self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port2))
-
-
-class QosExtensionInitializeTestCase(QosExtensionBaseTestCase):
-
-    @mock.patch.object(registry, 'subscribe')
-    @mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback')
-    def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock):
-        self.qos_ext.initialize(
-            self.connection, constants.EXTENSION_DRIVER_TYPE)
-        self.connection.create_consumer.assert_has_calls(
-            [mock.call(
-                 resources_rpc.resource_type_versioned_topic(resource_type),
-                 [rpc_mock()],
-                 fanout=True)
-             for resource_type in self.qos_ext.SUPPORTED_RESOURCES]
-        )
-        subscribe_mock.assert_called_with(mock.ANY, resources.QOS_POLICY)
-
-
-class QosExtensionReflushRulesTestCase(QosExtensionBaseTestCase):
-
-    def setUp(self):
-        super(QosExtensionReflushRulesTestCase, self).setUp()
-        self.qos_ext.initialize(
-            self.connection, constants.EXTENSION_DRIVER_TYPE)
-
-        self.pull_mock = mock.patch.object(
-            self.qos_ext.resource_rpc, 'pull',
-            return_value=TEST_POLICY).start()
-
-        self.policy = policy.QosPolicy(**BASE_TEST_POLICY)
-        self.rule = (
-            rule.QosBandwidthLimitRule(context=None, id='fake_rule_id',
-                                       qos_policy_id=self.policy.id,
-                                       max_kbps=100, max_burst_kbps=10))
-        self.policy.rules = [self.rule]
-        self.port = {'port_id': uuidutils.generate_uuid(),
-                     'qos_policy_id': TEST_POLICY.id}
-        self.new_policy = policy.QosPolicy(description='descr',
-                                           **BASE_TEST_POLICY)
-
-    def test_is_reflush_required_change_policy_descr(self):
-        self.qos_ext.policy_map.set_port_policy(self.port, self.policy)
-        self.new_policy.rules = [self.rule]
-        self.assertFalse(self.qos_ext._policy_rules_modified(self.policy,
-                                                             self.new_policy))
-
-    def test_is_reflush_required_change_policy_rule(self):
-        self.qos_ext.policy_map.set_port_policy(self.port, self.policy)
-        updated_rule = (rule.QosBandwidthLimitRule(context=None,
-                                                id='fake_rule_id',
-                                                qos_policy_id=self.policy.id,
-                                                max_kbps=200,
-                                                max_burst_kbps=20))
-        self.new_policy.rules = [updated_rule]
-        self.assertTrue(self.qos_ext._policy_rules_modified(self.policy,
-                                                            self.new_policy))
-
-    def test_is_reflush_required_remove_rules(self):
-        self.qos_ext.policy_map.set_port_policy(self.port, self.policy)
-        self.new_policy.rules = []
-        self.assertTrue(self.qos_ext._policy_rules_modified(self.policy,
-                                                            self.new_policy))
-
-    def test_is_reflush_required_add_rules(self):
-        self.qos_ext.policy_map.set_port_policy(self.port, self.policy)
-        self.new_policy.rules = [self.rule]
-        fake_rule = QosFakeRule(context=None, id='really_fake_rule_id',
-                                qos_policy_id=self.policy.id)
-        self.new_policy.rules.append(fake_rule)
-        self.assertTrue(self.qos_ext._policy_rules_modified(self.policy,
-                                                            self.new_policy))
-
-
-class PortPolicyMapTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(PortPolicyMapTestCase, self).setUp()
-        self.policy_map = qos.PortPolicyMap()
-
-    def test_update_policy(self):
-        self.policy_map.update_policy(TEST_POLICY)
-        self.assertEqual(TEST_POLICY,
-                         self.policy_map.known_policies[TEST_POLICY.id])
-
-    def _set_ports(self):
-        self.policy_map.set_port_policy(TEST_PORT, TEST_POLICY)
-        self.policy_map.set_port_policy(TEST_PORT2, TEST_POLICY2)
-
-    def test_set_port_policy(self):
-        self._set_ports()
-        self.assertEqual(TEST_POLICY,
-                         self.policy_map.known_policies[TEST_POLICY.id])
-        self.assertIn(TEST_PORT['port_id'],
-                      self.policy_map.qos_policy_ports[TEST_POLICY.id])
-
-    def test_get_port_policy(self):
-        self._set_ports()
-        self.assertEqual(TEST_POLICY,
-                         self.policy_map.get_port_policy(TEST_PORT))
-        self.assertEqual(TEST_POLICY2,
-                         self.policy_map.get_port_policy(TEST_PORT2))
-
-    def test_get_ports(self):
-        self._set_ports()
-        self.assertEqual([TEST_PORT],
-                         list(self.policy_map.get_ports(TEST_POLICY)))
-
-        self.assertEqual([TEST_PORT2],
-                         list(self.policy_map.get_ports(TEST_POLICY2)))
-
-    def test_clean_by_port(self):
-        self._set_ports()
-        self.policy_map.clean_by_port(TEST_PORT)
-        self.assertNotIn(TEST_POLICY.id, self.policy_map.known_policies)
-        self.assertNotIn(TEST_PORT['port_id'], self.policy_map.port_policies)
-        self.assertIn(TEST_POLICY2.id, self.policy_map.known_policies)
-
-    def test_clean_by_port_raises_exception_for_unknown_port(self):
-        self.assertRaises(exceptions.PortNotFound,
-                          self.policy_map.clean_by_port, TEST_PORT)
-
-    def test_has_policy_changed(self):
-        self._set_ports()
-        self.assertTrue(
-            self.policy_map.has_policy_changed(TEST_PORT, 'a_new_policy_id'))
-
-        self.assertFalse(
-            self.policy_map.has_policy_changed(TEST_PORT, TEST_POLICY.id))
diff --git a/neutron/tests/unit/agent/l3/__init__.py b/neutron/tests/unit/agent/l3/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/agent/l3/test_agent.py b/neutron/tests/unit/agent/l3/test_agent.py
deleted file mode 100644 (file)
index 229114a..0000000
+++ /dev/null
@@ -1,2624 +0,0 @@
-# Copyright 2012 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-import eventlet
-from itertools import chain as iter_chain
-from itertools import combinations as iter_combinations
-import mock
-import netaddr
-from oslo_log import log
-import oslo_messaging
-from oslo_utils import timeutils
-from oslo_utils import uuidutils
-import six
-from testtools import matchers
-
-from neutron.agent.common import config as agent_config
-from neutron.agent.l3 import agent as l3_agent
-from neutron.agent.l3 import config as l3_config
-from neutron.agent.l3 import dvr_edge_router as dvr_router
-from neutron.agent.l3 import dvr_snat_ns
-from neutron.agent.l3 import ha
-from neutron.agent.l3 import legacy_router
-from neutron.agent.l3 import link_local_allocator as lla
-from neutron.agent.l3 import namespaces
-from neutron.agent.l3 import router_info as l3router
-from neutron.agent.l3 import router_processing_queue
-from neutron.agent.linux import dibbler
-from neutron.agent.linux import external_process
-from neutron.agent.linux import interface
-from neutron.agent.linux import pd
-from neutron.agent.linux import ra
-from neutron.agent.metadata import driver as metadata_driver
-from neutron.agent import rpc as agent_rpc
-from neutron.common import config as base_config
-from neutron.common import constants as l3_constants
-from neutron.common import exceptions as n_exc
-from neutron.extensions import portbindings
-from neutron.plugins.common import constants as p_const
-from neutron.tests import base
-from neutron.tests.common import l3_test_common
-
-_uuid = uuidutils.generate_uuid
-HOSTNAME = 'myhost'
-FAKE_ID = _uuid()
-FAKE_ID_2 = _uuid()
-FIP_PRI = 32768
-
-
-class BasicRouterOperationsFramework(base.BaseTestCase):
-    def setUp(self):
-        super(BasicRouterOperationsFramework, self).setUp()
-        mock.patch('eventlet.spawn').start()
-        self.conf = agent_config.setup_conf()
-        self.conf.register_opts(base_config.core_opts)
-        log.register_options(self.conf)
-        self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT')
-        self.conf.register_opts(l3_config.OPTS)
-        self.conf.register_opts(ha.OPTS)
-        agent_config.register_interface_driver_opts_helper(self.conf)
-        agent_config.register_process_monitor_opts(self.conf)
-        agent_config.register_availability_zone_opts_helper(self.conf)
-        self.conf.register_opts(interface.OPTS)
-        self.conf.register_opts(external_process.OPTS)
-        self.conf.register_opts(pd.OPTS)
-        self.conf.register_opts(ra.OPTS)
-        self.conf.set_override('interface_driver',
-                               'neutron.agent.linux.interface.NullDriver')
-        self.conf.set_override('send_arp_for_ha', 1)
-        self.conf.set_override('state_path', '')
-        self.conf.set_override('ra_confs', '/tmp')
-        self.conf.set_override('pd_dhcp_driver', '')
-
-        self.device_exists_p = mock.patch(
-            'neutron.agent.linux.ip_lib.device_exists')
-        self.device_exists = self.device_exists_p.start()
-
-        self.ensure_dir = mock.patch('neutron.common.utils.ensure_dir').start()
-
-        mock.patch('neutron.agent.linux.keepalived.KeepalivedManager'
-                   '.get_full_config_file_path').start()
-
-        self.utils_exec_p = mock.patch(
-            'neutron.agent.linux.utils.execute')
-        self.utils_exec = self.utils_exec_p.start()
-
-        self.utils_replace_file_p = mock.patch(
-            'neutron.common.utils.replace_file')
-        self.utils_replace_file = self.utils_replace_file_p.start()
-
-        self.external_process_p = mock.patch(
-            'neutron.agent.linux.external_process.ProcessManager')
-        self.external_process = self.external_process_p.start()
-        self.process_monitor = mock.patch(
-            'neutron.agent.linux.external_process.ProcessMonitor').start()
-
-        self.send_adv_notif_p = mock.patch(
-            'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif')
-        self.send_adv_notif = self.send_adv_notif_p.start()
-
-        self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
-        driver_cls = self.dvr_cls_p.start()
-        self.mock_driver = mock.MagicMock()
-        self.mock_driver.DEV_NAME_LEN = (
-            interface.LinuxInterfaceDriver.DEV_NAME_LEN)
-        driver_cls.return_value = self.mock_driver
-
-        self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
-        ip_cls = self.ip_cls_p.start()
-        self.mock_ip = mock.MagicMock()
-        ip_cls.return_value = self.mock_ip
-
-        ip_rule = mock.patch('neutron.agent.linux.ip_lib.IPRule').start()
-        self.mock_rule = mock.MagicMock()
-        ip_rule.return_value = self.mock_rule
-
-        ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
-        self.mock_ip_dev = mock.MagicMock()
-        ip_dev.return_value = self.mock_ip_dev
-
-        self.l3pluginApi_cls_p = mock.patch(
-            'neutron.agent.l3.agent.L3PluginApi')
-        l3pluginApi_cls = self.l3pluginApi_cls_p.start()
-        self.plugin_api = mock.MagicMock()
-        l3pluginApi_cls.return_value = self.plugin_api
-
-        self.looping_call_p = mock.patch(
-            'oslo_service.loopingcall.FixedIntervalLoopingCall')
-        self.looping_call_p.start()
-
-        subnet_id_1 = _uuid()
-        subnet_id_2 = _uuid()
-        self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16',
-                                         'gateway_ip': '152.2.0.1',
-                                         'id': subnet_id_1}],
-                           'network_id': _uuid(),
-                           'device_owner':
-                           l3_constants.DEVICE_OWNER_ROUTER_SNAT,
-                           'mac_address': 'fa:16:3e:80:8d:80',
-                           'fixed_ips': [{'subnet_id': subnet_id_1,
-                                          'ip_address': '152.2.0.13',
-                                          'prefixlen': 16}],
-                           'id': _uuid(), 'device_id': _uuid()},
-                          {'subnets': [{'cidr': '152.10.0.0/16',
-                                        'gateway_ip': '152.10.0.1',
-                                        'id': subnet_id_2}],
-                           'network_id': _uuid(),
-                           'device_owner':
-                           l3_constants.DEVICE_OWNER_ROUTER_SNAT,
-                           'mac_address': 'fa:16:3e:80:8d:80',
-                           'fixed_ips': [{'subnet_id': subnet_id_2,
-                                         'ip_address': '152.10.0.13',
-                                         'prefixlen': 16}],
-                           'id': _uuid(), 'device_id': _uuid()}]
-
-        self.ri_kwargs = {'agent_conf': self.conf,
-                          'interface_driver': self.mock_driver}
-
-    def _process_router_instance_for_agent(self, agent, ri, router):
-        ri.router = router
-        if not ri.radvd:
-            ri.radvd = ra.DaemonMonitor(router['id'],
-                                        ri.ns_name,
-                                        agent.process_monitor,
-                                        ri.get_internal_device_name,
-                                        self.conf)
-        ri.process(agent)
-
-
-class TestBasicRouterOperations(BasicRouterOperationsFramework):
-    def test_init_ha_conf(self):
-        with mock.patch('os.path.dirname', return_value='/etc/ha/'):
-            l3_agent.L3NATAgent(HOSTNAME, self.conf)
-            self.ensure_dir.assert_called_once_with('/etc/ha/')
-
-    def test_enqueue_state_change_router_not_found(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        non_existent_router = 42
-
-        # Make sure the exceptional code path has coverage
-        agent.enqueue_state_change(non_existent_router, 'master')
-
-    def test_enqueue_state_change_metadata_disable(self):
-        self.conf.set_override('enable_metadata_proxy', False)
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = mock.Mock()
-        router_info = mock.MagicMock()
-        agent.router_info[router.id] = router_info
-        agent._update_metadata_proxy = mock.Mock()
-        agent.enqueue_state_change(router.id, 'master')
-        self.assertFalse(agent._update_metadata_proxy.call_count)
-
-    def test_periodic_sync_routers_task_raise_exception(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        self.plugin_api.get_router_ids.return_value = ['fake_id']
-        self.plugin_api.get_routers.side_effect = ValueError
-        self.assertRaises(ValueError,
-                          agent.periodic_sync_routers_task,
-                          agent.context)
-        self.assertTrue(agent.fullsync)
-
-    def test_l3_initial_report_state_done(self):
-        with mock.patch.object(l3_agent.L3NATAgentWithStateReport,
-                               'periodic_sync_routers_task'),\
-                mock.patch.object(agent_rpc.PluginReportStateAPI,
-                                  'report_state') as report_state,\
-                mock.patch.object(eventlet, 'spawn_n'):
-
-            agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME,
-                                                       conf=self.conf)
-
-            self.assertTrue(agent.agent_state['start_flag'])
-            agent.after_start()
-            report_state.assert_called_once_with(agent.context,
-                                                 agent.agent_state,
-                                                 True)
-            self.assertIsNone(agent.agent_state.get('start_flag'))
-
-    def test_report_state_revival_logic(self):
-        with mock.patch.object(agent_rpc.PluginReportStateAPI,
-                               'report_state') as report_state:
-            agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME,
-                                                       conf=self.conf)
-            report_state.return_value = l3_constants.AGENT_REVIVED
-            agent._report_state()
-            self.assertTrue(agent.fullsync)
-
-            agent.fullsync = False
-            report_state.return_value = l3_constants.AGENT_ALIVE
-            agent._report_state()
-            self.assertFalse(agent.fullsync)
-
-    def test_periodic_sync_routers_task_call_clean_stale_namespaces(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        self.plugin_api.get_routers.return_value = []
-        agent.periodic_sync_routers_task(agent.context)
-        self.assertFalse(agent.namespaces_manager._clean_stale)
-
-    def test_periodic_sync_routers_task_call_clean_stale_meta_proxies(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        stale_router_ids = [_uuid(), _uuid()]
-        active_routers = [{'id': _uuid()}, {'id': _uuid()}]
-        self.plugin_api.get_router_ids.return_value = [r['id'] for r
-                                                       in active_routers]
-        self.plugin_api.get_routers.return_value = active_routers
-        namespace_list = [namespaces.NS_PREFIX + r_id
-                          for r_id in stale_router_ids]
-        namespace_list += [namespaces.NS_PREFIX + r['id']
-                           for r in active_routers]
-        self.mock_ip.get_namespaces.return_value = namespace_list
-        driver = metadata_driver.MetadataDriver
-        with mock.patch.object(
-                driver, 'destroy_monitored_metadata_proxy') as destroy_proxy:
-            agent.periodic_sync_routers_task(agent.context)
-
-            expected_calls = [mock.call(mock.ANY, r_id, agent.conf)
-                              for r_id in stale_router_ids]
-            self.assertEqual(len(stale_router_ids), destroy_proxy.call_count)
-            destroy_proxy.assert_has_calls(expected_calls, any_order=True)
-
-    def test_router_info_create(self):
-        id = _uuid()
-        ri = l3router.RouterInfo(id, {}, **self.ri_kwargs)
-
-        self.assertTrue(ri.ns_name.endswith(id))
-
-    def test_router_info_create_with_router(self):
-        ns_id = _uuid()
-        subnet_id = _uuid()
-        ex_gw_port = {'id': _uuid(),
-                      'network_id': _uuid(),
-                      'fixed_ips': [{'ip_address': '19.4.4.4',
-                                     'prefixlen': 24,
-                                     'subnet_id': subnet_id}],
-                      'subnets': [{'id': subnet_id,
-                                   'cidr': '19.4.4.0/24',
-                                   'gateway_ip': '19.4.4.1'}]}
-        router = {
-            'id': _uuid(),
-            'enable_snat': True,
-            'routes': [],
-            'gw_port': ex_gw_port}
-        ri = l3router.RouterInfo(ns_id, router, **self.ri_kwargs)
-        self.assertTrue(ri.ns_name.endswith(ns_id))
-        self.assertEqual(router, ri.router)
-
-    def test_agent_create(self):
-        l3_agent.L3NATAgent(HOSTNAME, self.conf)
-
-    def _test_internal_network_action(self, action):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        router_id = router['id']
-        ri = l3router.RouterInfo(router_id, router, **self.ri_kwargs)
-        port = {'network_id': _uuid(),
-                'id': _uuid(),
-                'mac_address': 'ca:fe:de:ad:be:ef',
-                'fixed_ips': [{'subnet_id': _uuid(),
-                               'ip_address': '99.0.1.9',
-                               'prefixlen': 24}]}
-
-        interface_name = ri.get_internal_device_name(port['id'])
-
-        if action == 'add':
-            self.device_exists.return_value = False
-            ri.internal_network_added(port)
-            self.assertEqual(1, self.mock_driver.plug.call_count)
-            self.assertEqual(1, self.mock_driver.init_router_port.call_count)
-            self.send_adv_notif.assert_called_once_with(ri.ns_name,
-                                                        interface_name,
-                                                        '99.0.1.9', mock.ANY)
-        elif action == 'remove':
-            self.device_exists.return_value = True
-            ri.internal_network_removed(port)
-            self.assertEqual(1, self.mock_driver.unplug.call_count)
-        else:
-            raise Exception("Invalid action %s" % action)
-
-    @staticmethod
-    def _fixed_ip_cidr(fixed_ip):
-        return '%s/%s' % (fixed_ip['ip_address'], fixed_ip['prefixlen'])
-
-    def _test_internal_network_action_dist(self, action):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        router_id = router['id']
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = dvr_router.DvrEdgeRouter(
-            agent, HOSTNAME, router_id, router, **self.ri_kwargs)
-        subnet_id = _uuid()
-        port = {'network_id': _uuid(),
-                'id': _uuid(),
-                'mac_address': 'ca:fe:de:ad:be:ef',
-                'fixed_ips': [{'subnet_id': subnet_id,
-                               'ip_address': '99.0.1.9',
-                               'prefixlen': 24}],
-                'subnets': [{'id': subnet_id}]}
-
-        ri.router['gw_port_host'] = HOSTNAME
-        agent.host = HOSTNAME
-        agent.conf.agent_mode = 'dvr_snat'
-        sn_port = {'fixed_ips': [{'ip_address': '20.0.0.31',
-                                 'subnet_id': _uuid()}],
-                  'subnets': [{'gateway_ip': '20.0.0.1'}],
-                  'extra_subnets': [{'cidr': '172.16.0.0/24'}],
-                  'id': _uuid(),
-                  'network_id': _uuid(),
-                  'mac_address': 'ca:fe:de:ad:be:ef'}
-        ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
-                                     'prefixlen': 24,
-                                     'subnet_id': _uuid()}],
-                      'subnets': [{'gateway_ip': '20.0.0.1'}],
-                      'extra_subnets': [{'cidr': '172.16.0.0/24'}],
-                      'id': _uuid(),
-                      portbindings.HOST_ID: HOSTNAME,
-                      'network_id': _uuid(),
-                      'mac_address': 'ca:fe:de:ad:be:ef'}
-        ri.snat_ports = sn_port
-        ri.ex_gw_port = ex_gw_port
-        ri.snat_namespace = mock.Mock()
-
-        if action == 'add':
-            self.device_exists.return_value = False
-
-            ri.get_snat_port_for_internal_port = mock.Mock(
-                return_value=sn_port)
-            ri._snat_redirect_add = mock.Mock()
-            ri._set_subnet_arp_info = mock.Mock()
-            ri._internal_network_added = mock.Mock()
-            ri._set_subnet_arp_info = mock.Mock()
-            ri.internal_network_added(port)
-            self.assertEqual(1, ri._snat_redirect_add.call_count)
-            self.assertEqual(2, ri._internal_network_added.call_count)
-            ri._set_subnet_arp_info.assert_called_once_with(subnet_id)
-            ri._internal_network_added.assert_called_with(
-                dvr_snat_ns.SnatNamespace.get_snat_ns_name(ri.router['id']),
-                sn_port['network_id'],
-                sn_port['id'],
-                sn_port['fixed_ips'],
-                sn_port['mac_address'],
-                ri._get_snat_int_device_name(sn_port['id']),
-                dvr_snat_ns.SNAT_INT_DEV_PREFIX)
-        elif action == 'remove':
-            self.device_exists.return_value = False
-            ri.get_snat_port_for_internal_port = mock.Mock(
-                return_value=sn_port)
-            ri._delete_arp_cache_for_internal_port = mock.Mock()
-            ri._snat_redirect_modify = mock.Mock()
-            ri.internal_network_removed(port)
-            self.assertEqual(
-                1, ri._delete_arp_cache_for_internal_port.call_count)
-            ri._snat_redirect_modify.assert_called_with(
-                sn_port, port,
-                ri.get_internal_device_name(port['id']),
-                is_add=False)
-
-    def test_agent_add_internal_network(self):
-        self._test_internal_network_action('add')
-
-    def test_agent_add_internal_network_dist(self):
-        self._test_internal_network_action_dist('add')
-
-    def test_agent_remove_internal_network(self):
-        self._test_internal_network_action('remove')
-
-    def test_agent_remove_internal_network_dist(self):
-        self._test_internal_network_action_dist('remove')
-
-    def _add_external_gateway(self, ri, router, ex_gw_port, interface_name,
-                              use_fake_fip=False,
-                              no_subnet=False, no_sub_gw=None,
-                              dual_stack=False):
-        self.device_exists.return_value = False
-        if no_sub_gw is None:
-            no_sub_gw = []
-        if use_fake_fip:
-            fake_fip = {'floatingips': [{'id': _uuid(),
-                                         'floating_ip_address': '192.168.1.34',
-                                         'fixed_ip_address': '192.168.0.1',
-                                         'port_id': _uuid()}]}
-            router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips']
-        ri.external_gateway_added(ex_gw_port, interface_name)
-        if not router.get('distributed'):
-            self.assertEqual(1, self.mock_driver.plug.call_count)
-            self.assertEqual(1, self.mock_driver.init_router_port.call_count)
-            if no_subnet and not dual_stack:
-                self.assertEqual(0, self.send_adv_notif.call_count)
-                ip_cidrs = []
-                kwargs = {'preserve_ips': [],
-                          'namespace': 'qrouter-' + router['id'],
-                          'extra_subnets': [],
-                          'clean_connections': True}
-            else:
-                exp_arp_calls = [mock.call(ri.ns_name, interface_name,
-                                           '20.0.0.30', mock.ANY)]
-                if dual_stack and not no_sub_gw:
-                    exp_arp_calls += [mock.call(ri.ns_name, interface_name,
-                                                '2001:192:168:100::2',
-                                                mock.ANY)]
-                self.send_adv_notif.assert_has_calls(exp_arp_calls)
-                ip_cidrs = ['20.0.0.30/24']
-                if dual_stack:
-                    if not no_sub_gw:
-                        ip_cidrs.append('2001:192:168:100::2/64')
-                kwargs = {'preserve_ips': ['192.168.1.34/32'],
-                          'namespace': 'qrouter-' + router['id'],
-                          'extra_subnets': [{'cidr': '172.16.0.0/24'}],
-                          'clean_connections': True}
-            self.mock_driver.init_router_port.assert_called_with(
-                interface_name, ip_cidrs, **kwargs)
-        else:
-            ri._create_dvr_gateway.assert_called_once_with(
-                ex_gw_port, interface_name)
-
-    def _test_external_gateway_action(self, action, router, dual_stack=False):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ex_net_id = _uuid()
-        sn_port = self.snat_ports[1]
-        # Special setup for dvr routers
-        if router.get('distributed'):
-            agent.conf.agent_mode = 'dvr_snat'
-            agent.host = HOSTNAME
-            ri = dvr_router.DvrEdgeRouter(agent,
-                                          HOSTNAME,
-                                          router['id'],
-                                          router,
-                                          **self.ri_kwargs)
-            ri._create_dvr_gateway = mock.Mock()
-            ri.get_snat_interfaces = mock.Mock(return_value=self.snat_ports)
-            ri.snat_ports = self.snat_ports
-            ri._create_snat_namespace()
-            ri.fip_ns = agent.get_fip_ns(ex_net_id)
-            ri.internal_ports = self.snat_ports
-        else:
-            ri = l3router.RouterInfo(
-                router['id'], router,
-                **self.ri_kwargs)
-
-        ri.use_ipv6 = False
-        subnet_id = _uuid()
-        fixed_ips = [{'subnet_id': subnet_id,
-                      'ip_address': '20.0.0.30',
-                      'prefixlen': 24}]
-        subnets = [{'id': subnet_id,
-                    'cidr': '20.0.0.0/24',
-                    'gateway_ip': '20.0.0.1'}]
-        if dual_stack:
-            ri.use_ipv6 = True
-            subnet_id_v6 = _uuid()
-            fixed_ips.append({'subnet_id': subnet_id_v6,
-                              'ip_address': '2001:192:168:100::2',
-                              'prefixlen': 64})
-            subnets.append({'id': subnet_id_v6,
-                            'cidr': '2001:192:168:100::/64',
-                            'gateway_ip': '2001:192:168:100::1'})
-        ex_gw_port = {'fixed_ips': fixed_ips,
-                      'subnets': subnets,
-                      'extra_subnets': [{'cidr': '172.16.0.0/24'}],
-                      'id': _uuid(),
-                      'network_id': ex_net_id,
-                      'mac_address': 'ca:fe:de:ad:be:ef'}
-        ex_gw_port_no_sub = {'fixed_ips': [],
-                             'id': _uuid(),
-                             'network_id': ex_net_id,
-                             'mac_address': 'ca:fe:de:ad:be:ef'}
-        interface_name = ri.get_external_device_name(ex_gw_port['id'])
-
-        if action == 'add':
-            self._add_external_gateway(ri, router, ex_gw_port, interface_name,
-                                       use_fake_fip=True,
-                                       dual_stack=dual_stack)
-
-        elif action == 'add_no_sub':
-            ri.use_ipv6 = True
-            self._add_external_gateway(ri, router, ex_gw_port_no_sub,
-                                       interface_name,
-                                       no_subnet=True)
-
-        elif action == 'add_no_sub_v6_gw':
-            ri.use_ipv6 = True
-            self.conf.set_override('ipv6_gateway',
-                                   'fe80::f816:3eff:fe2e:1')
-            if dual_stack:
-                use_fake_fip = True
-                # Remove v6 entries
-                del ex_gw_port['fixed_ips'][-1]
-                del ex_gw_port['subnets'][-1]
-            else:
-                use_fake_fip = False
-                ex_gw_port = ex_gw_port_no_sub
-            self._add_external_gateway(ri, router, ex_gw_port,
-                                       interface_name, no_subnet=True,
-                                       no_sub_gw='fe80::f816:3eff:fe2e:1',
-                                       use_fake_fip=use_fake_fip,
-                                       dual_stack=dual_stack)
-
-        elif action == 'remove':
-            self.device_exists.return_value = True
-            ri.get_snat_port_for_internal_port = mock.Mock(
-                return_value=sn_port)
-            ri._snat_redirect_remove = mock.Mock()
-            ri.external_gateway_removed(ex_gw_port, interface_name)
-            if not router.get('distributed'):
-                self.mock_driver.unplug.assert_called_once_with(
-                    interface_name,
-                    bridge=agent.conf.external_network_bridge,
-                    namespace=mock.ANY,
-                    prefix=mock.ANY)
-            else:
-                ri._snat_redirect_remove.assert_called_with(
-                    sn_port, sn_port,
-                    ri.get_internal_device_name(sn_port['id']))
-                ri.get_snat_port_for_internal_port.assert_called_with(
-                    mock.ANY, ri.snat_ports)
-        else:
-            raise Exception("Invalid action %s" % action)
-
-    def _test_external_gateway_updated(self, dual_stack=False):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        ri.use_ipv6 = False
-        interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(
-            self, ri, dual_stack=dual_stack)
-
-        fake_fip = {'floatingips': [{'id': _uuid(),
-                                     'floating_ip_address': '192.168.1.34',
-                                     'fixed_ip_address': '192.168.0.1',
-                                     'port_id': _uuid()}]}
-        router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips']
-        ri.external_gateway_updated(ex_gw_port, interface_name)
-        self.assertEqual(1, self.mock_driver.plug.call_count)
-        self.assertEqual(1, self.mock_driver.init_router_port.call_count)
-        exp_arp_calls = [mock.call(ri.ns_name, interface_name,
-                                   '20.0.0.30', mock.ANY)]
-        if dual_stack:
-            ri.use_ipv6 = True
-            exp_arp_calls += [mock.call(ri.ns_name, interface_name,
-                                        '2001:192:168:100::2', mock.ANY)]
-        self.send_adv_notif.assert_has_calls(exp_arp_calls)
-        ip_cidrs = ['20.0.0.30/24']
-        gateway_ips = ['20.0.0.1']
-        if dual_stack:
-            ip_cidrs.append('2001:192:168:100::2/64')
-            gateway_ips.append('2001:192:168:100::1')
-        kwargs = {'preserve_ips': ['192.168.1.34/32'],
-                  'namespace': 'qrouter-' + router['id'],
-                  'extra_subnets': [{'cidr': '172.16.0.0/24'}],
-                  'clean_connections': True}
-        self.mock_driver.init_router_port.assert_called_with(interface_name,
-                                                             ip_cidrs,
-                                                             **kwargs)
-
-    def test_external_gateway_updated(self):
-        self._test_external_gateway_updated()
-
-    def test_external_gateway_updated_dual_stack(self):
-        self._test_external_gateway_updated(dual_stack=True)
-
-    def _test_ext_gw_updated_dvr_edge_router(self, host_match,
-                                             snat_hosted_before=True):
-        """
-        Helper to test external gw update for edge router on dvr_snat agent
-
-        :param host_match: True if new gw host should be the same as agent host
-        :param snat_hosted_before: True if agent has already been hosting
-        snat for the router
-        """
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        ri = dvr_router.DvrEdgeRouter(mock.Mock(),
-                                      HOSTNAME,
-                                      router['id'],
-                                      router,
-                                      **self.ri_kwargs)
-        if snat_hosted_before:
-            ri._create_snat_namespace()
-            snat_ns_name = ri.snat_namespace.name
-        else:
-            self.assertIsNone(ri.snat_namespace)
-
-        interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
-                                                                        ri)
-        ri._external_gateway_added = mock.Mock()
-
-        router['gw_port_host'] = ri.host if host_match else (ri.host + 'foo')
-
-        ri.external_gateway_updated(ex_gw_port, interface_name)
-        if not host_match:
-            self.assertFalse(ri._external_gateway_added.called)
-            if snat_hosted_before:
-                # host mismatch means that snat was rescheduled to another
-                # agent, hence need to verify that gw port was unplugged and
-                # snat namespace was deleted
-                self.mock_driver.unplug.assert_called_with(
-                    interface_name,
-                    bridge=self.conf.external_network_bridge,
-                    namespace=snat_ns_name,
-                    prefix=l3_agent.EXTERNAL_DEV_PREFIX)
-                self.assertIsNone(ri.snat_namespace)
-        else:
-            if not snat_hosted_before:
-                self.assertIsNotNone(ri.snat_namespace)
-            self.assertTrue(ri._external_gateway_added.called)
-
-    def test_ext_gw_updated_dvr_edge_router(self):
-        self._test_ext_gw_updated_dvr_edge_router(host_match=True)
-
-    def test_ext_gw_updated_dvr_edge_router_host_mismatch(self):
-        self._test_ext_gw_updated_dvr_edge_router(host_match=False)
-
-    def test_ext_gw_updated_dvr_dvr_edge_router_snat_rescheduled(self):
-        self._test_ext_gw_updated_dvr_edge_router(host_match=True,
-                                                  snat_hosted_before=False)
-
-    def test_agent_add_external_gateway(self):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        self._test_external_gateway_action('add', router)
-
-    def test_agent_add_external_gateway_dual_stack(self):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        self._test_external_gateway_action('add', router, dual_stack=True)
-
-    def test_agent_add_external_gateway_dist(self):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        router['distributed'] = True
-        router['gw_port_host'] = HOSTNAME
-        self._test_external_gateway_action('add', router)
-
-    def test_agent_add_external_gateway_dist_dual_stack(self):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        router['distributed'] = True
-        router['gw_port_host'] = HOSTNAME
-        self._test_external_gateway_action('add', router, dual_stack=True)
-
-    def test_agent_add_external_gateway_no_subnet(self):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2,
-                                                    v6_ext_gw_with_sub=False)
-        self._test_external_gateway_action('add_no_sub', router)
-
-    def test_agent_add_external_gateway_no_subnet_with_ipv6_gw(self):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2,
-                                                    v6_ext_gw_with_sub=False)
-        self._test_external_gateway_action('add_no_sub_v6_gw', router)
-
-    def test_agent_add_external_gateway_dual_stack_no_subnet_w_ipv6_gw(self):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2,
-                                                    v6_ext_gw_with_sub=False)
-        self._test_external_gateway_action('add_no_sub_v6_gw',
-                                           router, dual_stack=True)
-
-    def test_agent_remove_external_gateway(self):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        self._test_external_gateway_action('remove', router)
-
-    def test_agent_remove_external_gateway_dual_stack(self):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        self._test_external_gateway_action('remove', router, dual_stack=True)
-
-    def test_agent_remove_external_gateway_dist(self):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        router['distributed'] = True
-        router['gw_port_host'] = HOSTNAME
-        self._test_external_gateway_action('remove', router)
-
-    def test_agent_remove_external_gateway_dist_dual_stack(self):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        router['distributed'] = True
-        router['gw_port_host'] = HOSTNAME
-        self._test_external_gateway_action('remove', router, dual_stack=True)
-
-    def _verify_snat_mangle_rules(self, nat_rules, mangle_rules, router,
-                                  negate=False):
-        interfaces = router[l3_constants.INTERFACE_KEY]
-        source_cidrs = []
-        for iface in interfaces:
-            for subnet in iface['subnets']:
-                prefix = subnet['cidr'].split('/')[1]
-                source_cidr = "%s/%s" % (iface['fixed_ips'][0]['ip_address'],
-                                         prefix)
-                source_cidrs.append(source_cidr)
-        source_nat_ip = router['gw_port']['fixed_ips'][0]['ip_address']
-        interface_name = ('qg-%s' % router['gw_port']['id'])[:14]
-        expected_rules = [
-            '! -i %s ! -o %s -m conntrack ! --ctstate DNAT -j ACCEPT' %
-            (interface_name, interface_name),
-            '-o %s -j SNAT --to-source %s' % (interface_name, source_nat_ip),
-            '-m mark ! --mark 0x2/%s -m conntrack --ctstate DNAT '
-            '-j SNAT --to-source %s' %
-            (l3_constants.ROUTER_MARK_MASK, source_nat_ip)]
-        for r in nat_rules:
-            if negate:
-                self.assertNotIn(r.rule, expected_rules)
-            else:
-                self.assertIn(r.rule, expected_rules)
-        expected_rules = [
-            '-i %s -j MARK --set-xmark 0x2/%s' %
-            (interface_name, l3_constants.ROUTER_MARK_MASK)]
-        for r in mangle_rules:
-            if negate:
-                self.assertNotIn(r.rule, expected_rules)
-            else:
-                self.assertIn(r.rule, expected_rules)
-
-    def test_get_snat_port_for_internal_port(self):
-        router = l3_test_common.prepare_router_data(num_internal_ports=4)
-        ri = dvr_router.DvrEdgeRouter(mock.sentinel.agent,
-                                      HOSTNAME,
-                                      router['id'],
-                                      router,
-                                      **self.ri_kwargs)
-        test_port = {
-            'mac_address': '00:12:23:34:45:56',
-            'fixed_ips': [{'subnet_id': l3_test_common.get_subnet_id(
-                router[l3_constants.INTERFACE_KEY][0]),
-                'ip_address': '101.12.13.14'}]}
-        internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
-        # test valid case
-        with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces:
-            get_interfaces.return_value = [test_port]
-            res_port = ri.get_snat_port_for_internal_port(internal_ports[0])
-            self.assertEqual(test_port, res_port)
-            # test invalid case
-            test_port['fixed_ips'][0]['subnet_id'] = 1234
-            res_ip = ri.get_snat_port_for_internal_port(internal_ports[0])
-            self.assertNotEqual(test_port, res_ip)
-            self.assertIsNone(res_ip)
-
-    def test_process_cent_router(self):
-        router = l3_test_common.prepare_router_data()
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        self._test_process_router(ri, agent)
-
-    def test_process_dist_router(self):
-        router = l3_test_common.prepare_router_data()
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = dvr_router.DvrEdgeRouter(agent,
-                                      HOSTNAME,
-                                      router['id'],
-                                      router,
-                                      **self.ri_kwargs)
-        subnet_id = l3_test_common.get_subnet_id(
-            router[l3_constants.INTERFACE_KEY][0])
-        ri.router['distributed'] = True
-        ri.router['_snat_router_interfaces'] = [{
-            'fixed_ips': [{'subnet_id': subnet_id,
-                           'ip_address': '1.2.3.4'}]}]
-        ri.router['gw_port_host'] = None
-        self._test_process_router(ri, agent)
-
-    def _test_process_router(self, ri, agent):
-        router = ri.router
-        agent.host = HOSTNAME
-        fake_fip_id = 'fake_fip_id'
-        ri.create_dvr_fip_interfaces = mock.Mock()
-        ri.process_floating_ip_addresses = mock.Mock()
-        ri.process_floating_ip_nat_rules = mock.Mock()
-        ri.process_floating_ip_addresses.return_value = {
-            fake_fip_id: 'ACTIVE'}
-        ri.external_gateway_added = mock.Mock()
-        ri.external_gateway_updated = mock.Mock()
-        fake_floatingips1 = {'floatingips': [
-            {'id': fake_fip_id,
-             'floating_ip_address': '8.8.8.8',
-             'fixed_ip_address': '7.7.7.7',
-             'port_id': _uuid(),
-             'host': HOSTNAME}]}
-        ri.process(agent)
-        ri.process_floating_ip_addresses.assert_called_with(mock.ANY)
-        ri.process_floating_ip_addresses.reset_mock()
-        ri.process_floating_ip_nat_rules.assert_called_with()
-        ri.process_floating_ip_nat_rules.reset_mock()
-        ri.external_gateway_added.reset_mock()
-
-        # remap floating IP to a new fixed ip
-        fake_floatingips2 = copy.deepcopy(fake_floatingips1)
-        fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8'
-
-        router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips']
-        ri.process(agent)
-        ri.process_floating_ip_addresses.assert_called_with(mock.ANY)
-        ri.process_floating_ip_addresses.reset_mock()
-        ri.process_floating_ip_nat_rules.assert_called_with()
-        ri.process_floating_ip_nat_rules.reset_mock()
-        self.assertEqual(0, ri.external_gateway_added.call_count)
-        self.assertEqual(0, ri.external_gateway_updated.call_count)
-        ri.external_gateway_added.reset_mock()
-        ri.external_gateway_updated.reset_mock()
-
-        # change the ex_gw_port a bit to test gateway update
-        new_gw_port = copy.deepcopy(ri.router['gw_port'])
-        ri.router['gw_port'] = new_gw_port
-        old_ip = (netaddr.IPAddress(ri.router['gw_port']
-                                    ['fixed_ips'][0]['ip_address']))
-        ri.router['gw_port']['fixed_ips'][0]['ip_address'] = str(old_ip + 1)
-
-        ri.process(agent)
-        ri.process_floating_ip_addresses.reset_mock()
-        ri.process_floating_ip_nat_rules.reset_mock()
-        self.assertEqual(0, ri.external_gateway_added.call_count)
-        self.assertEqual(1, ri.external_gateway_updated.call_count)
-
-        # remove just the floating ips
-        del router[l3_constants.FLOATINGIP_KEY]
-        ri.process(agent)
-        ri.process_floating_ip_addresses.assert_called_with(mock.ANY)
-        ri.process_floating_ip_addresses.reset_mock()
-        ri.process_floating_ip_nat_rules.assert_called_with()
-        ri.process_floating_ip_nat_rules.reset_mock()
-
-        # now no ports so state is torn down
-        del router[l3_constants.INTERFACE_KEY]
-        del router['gw_port']
-        ri.process(agent)
-        self.assertEqual(1, self.send_adv_notif.call_count)
-        distributed = ri.router.get('distributed', False)
-        self.assertEqual(distributed, ri.process_floating_ip_addresses.called)
-        self.assertEqual(distributed, ri.process_floating_ip_nat_rules.called)
-
-    @mock.patch('neutron.agent.linux.ip_lib.IPDevice')
-    def _test_process_floating_ip_addresses_add(self, ri, agent, IPDevice):
-        floating_ips = ri.get_floating_ips()
-        fip_id = floating_ips[0]['id']
-        IPDevice.return_value = device = mock.Mock()
-        device.addr.list.return_value = []
-        ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
-        ex_gw_port = {'id': _uuid(), 'network_id': mock.sentinel.ext_net_id}
-
-        ri.add_floating_ip = mock.Mock(
-            return_value=l3_constants.FLOATINGIP_STATUS_ACTIVE)
-        with mock.patch.object(lla.LinkLocalAllocator, '_write'):
-            if ri.router['distributed']:
-                ri.fip_ns = agent.get_fip_ns(ex_gw_port['network_id'])
-                ri.create_dvr_fip_interfaces(ex_gw_port)
-            fip_statuses = ri.process_floating_ip_addresses(
-                mock.sentinel.interface_name)
-        self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
-                         fip_statuses)
-        ri.add_floating_ip.assert_called_once_with(
-            floating_ips[0], mock.sentinel.interface_name, device)
-
-    @mock.patch.object(lla.LinkLocalAllocator, '_write')
-    def test_create_dvr_fip_interfaces_for_late_binding(self, lla_write):
-        fake_network_id = _uuid()
-        fake_subnet_id = _uuid()
-        fake_floatingips = {'floatingips': [
-            {'id': _uuid(),
-             'floating_ip_address': '20.0.0.3',
-             'fixed_ip_address': '192.168.0.1',
-             'floating_network_id': _uuid(),
-             'port_id': _uuid(),
-             'host': HOSTNAME}]}
-        agent_gateway_port = (
-            {'fixed_ips': [
-                {'ip_address': '20.0.0.30',
-                 'prefixlen': 24,
-                 'subnet_id': fake_subnet_id}],
-             'subnets': [
-                 {'id': fake_subnet_id,
-                  'gateway_ip': '20.0.0.1'}],
-             'id': _uuid(),
-             'network_id': fake_network_id,
-             'mac_address': 'ca:fe:de:ad:be:ef'}
-        )
-
-        router = l3_test_common.prepare_router_data(enable_snat=True)
-        router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
-        router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = []
-        router['distributed'] = True
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = dvr_router.DvrEdgeRouter(
-            agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
-
-        ext_gw_port = ri.router.get('gw_port')
-        ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
-        ri.dist_fip_count = 0
-        ri.fip_ns.subscribe = mock.Mock()
-        with mock.patch.object(agent.plugin_rpc,
-                               'get_agent_gateway_port') as fip_gw_port:
-            fip_gw_port.return_value = agent_gateway_port
-            ri.create_dvr_fip_interfaces(ext_gw_port)
-            self.assertTrue(fip_gw_port.called)
-            self.assertEqual(agent_gateway_port,
-                             ri.fip_ns.agent_gateway_port)
-
-    @mock.patch.object(lla.LinkLocalAllocator, '_write')
-    def test_create_dvr_fip_interfaces(self, lla_write):
-        fake_network_id = _uuid()
-        subnet_id = _uuid()
-        fake_floatingips = {'floatingips': [
-            {'id': _uuid(),
-             'floating_ip_address': '20.0.0.3',
-             'fixed_ip_address': '192.168.0.1',
-             'floating_network_id': _uuid(),
-             'port_id': _uuid(),
-             'host': HOSTNAME}]}
-        agent_gateway_port = (
-            [{'fixed_ips': [
-                {'ip_address': '20.0.0.30',
-                 'prefixlen': 24,
-                 'subnet_id': subnet_id}],
-             'subnets': [
-                 {'id': subnet_id,
-                  'gateway_ip': '20.0.0.1'}],
-             'id': _uuid(),
-             'network_id': fake_network_id,
-             'mac_address': 'ca:fe:de:ad:be:ef'}]
-        )
-
-        router = l3_test_common.prepare_router_data(enable_snat=True)
-        router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
-        router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
-        router['distributed'] = True
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = dvr_router.DvrEdgeRouter(
-            agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
-
-        ext_gw_port = ri.router.get('gw_port')
-        ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
-        ri.dist_fip_count = 0
-        ri.fip_ns.subscribe = mock.Mock()
-        ri.fip_ns.agent_router_gateway = mock.Mock()
-        agent.process_router_add = mock.Mock()
-
-        with mock.patch.object(ri, 'get_floating_ips') as fips, \
-                mock.patch.object(ri, 'get_floating_agent_gw_interface'
-                                  ) as fip_gw_port:
-            fips.return_value = fake_floatingips
-            fip_gw_port.return_value = agent_gateway_port[0]
-            ri.create_dvr_fip_interfaces(ext_gw_port)
-            self.assertTrue(fip_gw_port.called)
-            self.assertTrue(fips.called)
-            self.assertEqual(agent_gateway_port[0],
-                             ri.fip_ns.agent_gateway_port)
-            self.assertTrue(ri.rtr_fip_subnet)
-            self.assertEqual(1, agent.process_router_add.call_count)
-
-    @mock.patch.object(lla.LinkLocalAllocator, '_write')
-    def test_create_dvr_fip_interfaces_for_restart_l3agent_case(self,
-                                                                lla_write):
-        fake_floatingips = {'floatingips': [
-            {'id': _uuid(),
-             'floating_ip_address': '20.0.0.3',
-             'fixed_ip_address': '192.168.0.1',
-             'floating_network_id': _uuid(),
-             'port_id': _uuid(),
-             'host': HOSTNAME}]}
-        agent_gateway_port = (
-            [{'fixed_ips': [
-                {'ip_address': '20.0.0.30',
-                 'prefixlen': 24,
-                 'subnet_id': 'subnet_id'}],
-             'subnets': [
-                 {'id': 'subnet_id',
-                  'gateway_ip': '20.0.0.1'}],
-             'id': _uuid(),
-             'network_id': 'fake_network_id',
-             'mac_address': 'ca:fe:de:ad:be:ef'}]
-        )
-
-        router = l3_test_common.prepare_router_data(enable_snat=True)
-        router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
-        router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
-        router['distributed'] = True
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = dvr_router.DvrEdgeRouter(
-            agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
-        ext_gw_port = ri.router.get('gw_port')
-        ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
-        ri.fip_ns.subscribe = mock.Mock(return_value=True)
-        ri.fip_ns.agent_router_gateway = mock.Mock()
-        ri.rtr_fip_subnet = None
-        ri.dist_fip_count = 1
-
-        with mock.patch.object(ri, 'get_floating_ips') as fips,\
-                mock.patch.object(ri, 'get_floating_agent_gw_interface'
-                                  ) as fip_gw_port:
-            fips.return_value = fake_floatingips
-            fip_gw_port.return_value = agent_gateway_port[0]
-            ri.create_dvr_fip_interfaces(ext_gw_port)
-            self.assertTrue(fip_gw_port.called)
-            self.assertTrue(fips.called)
-            self.assertEqual(agent_gateway_port[0],
-                             ri.fip_ns.agent_gateway_port)
-            self.assertTrue(ri.rtr_fip_subnet)
-
-    def test_process_router_cent_floating_ip_add(self):
-        fake_floatingips = {'floatingips': [
-            {'id': _uuid(),
-             'floating_ip_address': '15.1.2.3',
-             'fixed_ip_address': '192.168.0.1',
-             'status': 'DOWN',
-             'floating_network_id': _uuid(),
-             'port_id': _uuid(),
-             'host': HOSTNAME}]}
-
-        router = l3_test_common.prepare_router_data(enable_snat=True)
-        router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
-        ri.get_external_device_name = mock.Mock(return_value='exgw')
-        self._test_process_floating_ip_addresses_add(ri, agent)
-
-    def test_process_router_snat_disabled(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data(enable_snat=True)
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        ri.external_gateway_added = mock.Mock()
-        # Process with NAT
-        ri.process(agent)
-        orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
-        orig_mangle_rules = ri.iptables_manager.ipv4['mangle'].rules[:]
-        # Reprocess without NAT
-        router['enable_snat'] = False
-        # Reassign the router object to RouterInfo
-        ri.router = router
-        ri.process(agent)
-        # For some reason set logic does not work well with
-        # IpTablesRule instances
-        nat_rules_delta = [r for r in orig_nat_rules
-                           if r not in ri.iptables_manager.ipv4['nat'].rules]
-        self.assertEqual(2, len(nat_rules_delta))
-        mangle_rules_delta = [
-            r for r in orig_mangle_rules
-            if r not in ri.iptables_manager.ipv4['mangle'].rules]
-        self.assertEqual(1, len(mangle_rules_delta))
-        self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta,
-                                       router)
-        self.assertEqual(1, self.send_adv_notif.call_count)
-
-    def test_process_router_snat_enabled(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data(enable_snat=False)
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        ri.external_gateway_added = mock.Mock()
-        # Process without NAT
-        ri.process(agent)
-        orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
-        orig_mangle_rules = ri.iptables_manager.ipv4['mangle'].rules[:]
-        # Reprocess with NAT
-        router['enable_snat'] = True
-        # Reassign the router object to RouterInfo
-        ri.router = router
-        ri.process(agent)
-        # For some reason set logic does not work well with
-        # IpTablesRule instances
-        nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
-                           if r not in orig_nat_rules]
-        self.assertEqual(2, len(nat_rules_delta))
-        mangle_rules_delta = [
-            r for r in ri.iptables_manager.ipv4['mangle'].rules
-            if r not in orig_mangle_rules]
-        self.assertEqual(1, len(mangle_rules_delta))
-        self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta,
-                                       router)
-        self.assertEqual(1, self.send_adv_notif.call_count)
-
-    def _test_update_routing_table(self, is_snat_host=True):
-        router = l3_test_common.prepare_router_data()
-        uuid = router['id']
-        s_netns = 'snat-' + uuid
-        q_netns = 'qrouter-' + uuid
-        fake_route1 = {'destination': '135.207.0.0/16',
-                       'nexthop': '19.4.4.200'}
-        calls = [mock.call('replace', fake_route1, q_netns)]
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = dvr_router.DvrEdgeRouter(
-            agent,
-            HOSTNAME,
-            uuid,
-            router,
-            **self.ri_kwargs)
-        ri._update_routing_table = mock.Mock()
-
-        with mock.patch.object(ri, '_is_this_snat_host') as snat_host:
-            snat_host.return_value = is_snat_host
-            ri.update_routing_table('replace', fake_route1)
-            if is_snat_host:
-                ri._update_routing_table('replace', fake_route1, s_netns)
-                calls += [mock.call('replace', fake_route1, s_netns)]
-            ri._update_routing_table.assert_has_calls(calls, any_order=True)
-
-    def test_process_update_snat_routing_table(self):
-        self._test_update_routing_table()
-
-    def test_process_not_update_snat_routing_table(self):
-        self._test_update_routing_table(is_snat_host=False)
-
-    def test_process_router_interface_added(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data()
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        ri.external_gateway_added = mock.Mock()
-        # Process with NAT
-        ri.process(agent)
-        # Add an interface and reprocess
-        l3_test_common.router_append_interface(router)
-        # Reassign the router object to RouterInfo
-        ri.router = router
-        ri.process(agent)
-        # send_ip_addr_adv_notif is called both times process is called
-        self.assertEqual(2, self.send_adv_notif.call_count)
-
-    def _test_process_ipv6_only_or_dual_stack_gw(self, dual_stack=False):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data(ip_version=6,
-                                                    dual_stack=dual_stack)
-        # Get NAT rules without the gw_port
-        gw_port = router['gw_port']
-        router['gw_port'] = None
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        ri.external_gateway_added = mock.Mock()
-        self._process_router_instance_for_agent(agent, ri, router)
-        orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
-
-        # Get NAT rules with the gw_port
-        router['gw_port'] = gw_port
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        p = ri.external_gateway_nat_postroute_rules
-        s = ri.external_gateway_nat_snat_rules
-        attrs_to_mock = dict(
-            [(a, mock.DEFAULT) for a in
-                ['external_gateway_nat_postroute_rules',
-                 'external_gateway_nat_snat_rules']]
-        )
-        with mock.patch.multiple(ri, **attrs_to_mock) as mocks:
-            mocks['external_gateway_nat_postroute_rules'].side_effect = p
-            mocks['external_gateway_nat_snat_rules'].side_effect = s
-            self._process_router_instance_for_agent(agent, ri, router)
-            new_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
-
-            # NAT rules should only change for dual_stack operation
-            if dual_stack:
-                self.assertTrue(
-                    mocks['external_gateway_nat_postroute_rules'].called)
-                self.assertTrue(
-                    mocks['external_gateway_nat_snat_rules'].called)
-                self.assertNotEqual(orig_nat_rules, new_nat_rules)
-            else:
-                self.assertFalse(
-                    mocks['external_gateway_nat_postroute_rules'].called)
-                self.assertFalse(
-                    mocks['external_gateway_nat_snat_rules'].called)
-                self.assertEqual(orig_nat_rules, new_nat_rules)
-
-    def test_process_ipv6_only_gw(self):
-        self._test_process_ipv6_only_or_dual_stack_gw()
-
-    def test_process_dual_stack_gw(self):
-        self._test_process_ipv6_only_or_dual_stack_gw(dual_stack=True)
-
-    def _process_router_ipv6_interface_added(
-            self, router, ra_mode=None, addr_mode=None):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        ri.external_gateway_added = mock.Mock()
-        # Process with NAT
-        ri.process(agent)
-        orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
-        # Add an IPv6 interface and reprocess
-        l3_test_common.router_append_interface(router, count=1,
-                                               ip_version=6, ra_mode=ra_mode,
-                                               addr_mode=addr_mode)
-        # Reassign the router object to RouterInfo
-        self._process_router_instance_for_agent(agent, ri, router)
-        # IPv4 NAT rules should not be changed by adding an IPv6 interface
-        nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
-                           if r not in orig_nat_rules]
-        self.assertFalse(nat_rules_delta)
-        return ri
-
-    def _radvd_expected_call_external_process(self, ri, enable=True):
-        expected_calls = [mock.call(uuid=ri.router['id'],
-                          service='radvd',
-                          default_cmd_callback=mock.ANY,
-                          namespace=ri.ns_name,
-                          conf=mock.ANY,
-                          run_as_root=True)]
-        if enable:
-            expected_calls.append(mock.call().enable(reload_cfg=True))
-        else:
-            expected_calls.append(mock.call().disable())
-        return expected_calls
-
-    def _process_router_ipv6_subnet_added(
-            self, router, ipv6_subnet_modes=None, dns_nameservers=None):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        agent.external_gateway_added = mock.Mock()
-        self._process_router_instance_for_agent(agent, ri, router)
-        # Add an IPv6 interface with len(ipv6_subnet_modes) subnets
-        # and reprocess
-        l3_test_common.router_append_subnet(
-            router,
-            count=len(ipv6_subnet_modes),
-            ip_version=6,
-            ipv6_subnet_modes=ipv6_subnet_modes,
-            dns_nameservers=dns_nameservers)
-        # Reassign the router object to RouterInfo
-        self._process_router_instance_for_agent(agent, ri, router)
-        return ri
-
-    def _assert_ri_process_enabled(self, ri):
-        """Verify that process was enabled for a router instance."""
-        expected_calls = self._radvd_expected_call_external_process(ri)
-        self.assertEqual(expected_calls, self.external_process.mock_calls)
-
-    def _assert_ri_process_disabled(self, ri):
-        """Verify that process was disabled for a router instance."""
-        expected_calls = self._radvd_expected_call_external_process(ri, False)
-        self.assertEqual(expected_calls, self.external_process.mock_calls)
-
-    def test_process_router_ipv6_interface_added(self):
-        router = l3_test_common.prepare_router_data()
-        ri = self._process_router_ipv6_interface_added(router)
-        self._assert_ri_process_enabled(ri)
-        # Expect radvd configured without prefix
-        self.assertNotIn('prefix',
-                         self.utils_replace_file.call_args[0][1].split())
-
-    def test_process_router_ipv6_slaac_interface_added(self):
-        router = l3_test_common.prepare_router_data()
-        ri = self._process_router_ipv6_interface_added(
-            router, ra_mode=l3_constants.IPV6_SLAAC)
-        self._assert_ri_process_enabled(ri)
-        # Expect radvd configured with prefix
-        self.assertIn('prefix',
-                      self.utils_replace_file.call_args[0][1].split())
-
-    def test_process_router_ipv6_subnets_added(self):
-        router = l3_test_common.prepare_router_data()
-        ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes=[
-            {'ra_mode': l3_constants.IPV6_SLAAC,
-             'address_mode': l3_constants.IPV6_SLAAC},
-            {'ra_mode': l3_constants.DHCPV6_STATELESS,
-             'address_mode': l3_constants.DHCPV6_STATELESS},
-            {'ra_mode': l3_constants.DHCPV6_STATEFUL,
-             'address_mode': l3_constants.DHCPV6_STATEFUL}])
-        self._assert_ri_process_enabled(ri)
-        radvd_config = self.utils_replace_file.call_args[0][1].split()
-        # Assert we have a prefix from IPV6_SLAAC and a prefix from
-        # DHCPV6_STATELESS on one interface
-        self.assertEqual(2, radvd_config.count("prefix"))
-        self.assertEqual(1, radvd_config.count("interface"))
-
-    def test_process_router_ipv6_subnets_added_to_existing_port(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data()
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        agent.external_gateway_added = mock.Mock()
-        self._process_router_instance_for_agent(agent, ri, router)
-        # Add the first subnet on a new interface
-        l3_test_common.router_append_subnet(
-            router, count=1,
-            ip_version=6, ipv6_subnet_modes=[
-                {'ra_mode': l3_constants.IPV6_SLAAC,
-                 'address_mode': l3_constants.IPV6_SLAAC}])
-        self._process_router_instance_for_agent(agent, ri, router)
-        self._assert_ri_process_enabled(ri)
-        radvd_config = self.utils_replace_file.call_args[0][1].split()
-        self.assertEqual(1, len(ri.internal_ports[1]['subnets']))
-        self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips']))
-        self.assertEqual(1, radvd_config.count("prefix"))
-        self.assertEqual(1, radvd_config.count("interface"))
-        # Reset mocks to verify radvd enabled and configured correctly
-        # after second subnet added to interface
-        self.external_process.reset_mock()
-        self.utils_replace_file.reset_mock()
-        # Add the second subnet on the same interface
-        interface_id = router[l3_constants.INTERFACE_KEY][1]['id']
-        l3_test_common.router_append_subnet(
-            router, count=1,
-            ip_version=6,
-            ipv6_subnet_modes=[
-                {'ra_mode': l3_constants.IPV6_SLAAC,
-                 'address_mode': l3_constants.IPV6_SLAAC}],
-            interface_id=interface_id)
-        self._process_router_instance_for_agent(agent, ri, router)
-        # radvd should have been enabled again and the interface
-        # should have two prefixes
-        self._assert_ri_process_enabled(ri)
-        radvd_config = self.utils_replace_file.call_args[0][1].split()
-        self.assertEqual(2, len(ri.internal_ports[1]['subnets']))
-        self.assertEqual(2, len(ri.internal_ports[1]['fixed_ips']))
-        self.assertEqual(2, radvd_config.count("prefix"))
-        self.assertEqual(1, radvd_config.count("interface"))
-
-    def test_process_router_ipv6v4_interface_added(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data()
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        ri.external_gateway_added = mock.Mock()
-        # Process with NAT
-        ri.process(agent)
-        # Add an IPv4 and IPv6 interface and reprocess
-        l3_test_common.router_append_interface(router, count=1, ip_version=4)
-        l3_test_common.router_append_interface(router, count=1, ip_version=6)
-        # Reassign the router object to RouterInfo
-        self._process_router_instance_for_agent(agent, ri, router)
-        self._assert_ri_process_enabled(ri)
-
-    def test_process_router_interface_removed(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        ri.external_gateway_added = mock.Mock()
-        # Process with NAT
-        ri.process(agent)
-        # Add an interface and reprocess
-        del router[l3_constants.INTERFACE_KEY][1]
-        # Reassign the router object to RouterInfo
-        ri.router = router
-        ri.process(agent)
-        # send_ip_addr_adv_notif is called both times process is called
-        self.assertEqual(2, self.send_adv_notif.call_count)
-
-    def test_process_router_ipv6_interface_removed(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data()
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        ri.external_gateway_added = mock.Mock()
-        self._process_router_instance_for_agent(agent, ri, router)
-        # Add an IPv6 interface and reprocess
-        l3_test_common.router_append_interface(router, count=1, ip_version=6)
-        self._process_router_instance_for_agent(agent, ri, router)
-        self._assert_ri_process_enabled(ri)
-        # Reset the calls so we can check for disable radvd
-        self.external_process.reset_mock()
-        self.process_monitor.reset_mock()
-        # Remove the IPv6 interface and reprocess
-        del router[l3_constants.INTERFACE_KEY][1]
-        self._process_router_instance_for_agent(agent, ri, router)
-        self._assert_ri_process_disabled(ri)
-
-    def test_process_router_ipv6_subnet_removed(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data()
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        agent.external_gateway_added = mock.Mock()
-        self._process_router_instance_for_agent(agent, ri, router)
-        # Add an IPv6 interface with two subnets and reprocess
-        l3_test_common.router_append_subnet(
-            router, count=2, ip_version=6,
-            ipv6_subnet_modes=([{'ra_mode': l3_constants.IPV6_SLAAC,
-                                 'address_mode': l3_constants.IPV6_SLAAC}]
-                               * 2))
-        self._process_router_instance_for_agent(agent, ri, router)
-        self._assert_ri_process_enabled(ri)
-        # Reset mocks to check for modified radvd config
-        self.utils_replace_file.reset_mock()
-        self.external_process.reset_mock()
-        # Remove one subnet from the interface and reprocess
-        interfaces = copy.deepcopy(router[l3_constants.INTERFACE_KEY])
-        del interfaces[1]['subnets'][0]
-        del interfaces[1]['fixed_ips'][0]
-        router[l3_constants.INTERFACE_KEY] = interfaces
-        self._process_router_instance_for_agent(agent, ri, router)
-        # Assert radvd was enabled again and that we only have one
-        # prefix on the interface
-        self._assert_ri_process_enabled(ri)
-        radvd_config = self.utils_replace_file.call_args[0][1].split()
-        self.assertEqual(1, len(ri.internal_ports[1]['subnets']))
-        self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips']))
-        self.assertEqual(1, radvd_config.count("interface"))
-        self.assertEqual(1, radvd_config.count("prefix"))
-
-    def test_process_router_internal_network_added_unexpected_error(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data()
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        ri.external_gateway_added = mock.Mock()
-        with mock.patch.object(
-                ri,
-                'internal_network_added') as internal_network_added:
-            # raise RuntimeError to simulate that an unexpected exception
-            # occurs
-            internal_network_added.side_effect = RuntimeError
-            self.assertRaises(RuntimeError, ri.process, agent)
-            self.assertNotIn(
-                router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
-
-            # The unexpected exception has been fixed manually
-            internal_network_added.side_effect = None
-
-            # periodic_sync_routers_task finds out that _rpc_loop failed to
-            # process the router last time, it will retry in the next run.
-            ri.process(agent)
-            # We were able to add the port to ri.internal_ports
-            self.assertIn(
-                router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
-
-    def test_process_router_internal_network_removed_unexpected_error(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data()
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        ri.external_gateway_added = mock.Mock()
-        # add an internal port
-        ri.process(agent)
-
-        with mock.patch.object(
-                ri,
-                'internal_network_removed') as internal_net_removed:
-            # raise RuntimeError to simulate that an unexpected exception
-            # occurs
-            internal_net_removed.side_effect = RuntimeError
-            ri.internal_ports[0]['admin_state_up'] = False
-            # The above port is set to down state, remove it.
-            self.assertRaises(RuntimeError, ri.process, agent)
-            self.assertIn(
-                router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
-
-            # The unexpected exception has been fixed manually
-            internal_net_removed.side_effect = None
-
-            # periodic_sync_routers_task finds out that _rpc_loop failed to
-            # process the router last time, it will retry in the next run.
-            ri.process(agent)
-            # We were able to remove the port from ri.internal_ports
-            self.assertNotIn(
-                router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
-
-    def test_process_router_floatingip_nochange(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data(num_internal_ports=1)
-        fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8',
-                'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE',
-                'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}
-        fip2 = copy.copy(fip1)
-        fip2.update({'id': _uuid(), 'status': 'DOWN',
-                     'floating_ip_address': '9.9.9.9'})
-        router[l3_constants.FLOATINGIP_KEY] = [fip1, fip2]
-
-        ri = legacy_router.LegacyRouter(router['id'], router,
-                                        **self.ri_kwargs)
-        ri.external_gateway_added = mock.Mock()
-        with mock.patch.object(
-            agent.plugin_rpc, 'update_floatingip_statuses'
-        ) as mock_update_fip_status,\
-                mock.patch.object(ri, 'get_router_cidrs') as mock_get_cidrs:
-            mock_get_cidrs.return_value = set(
-                [fip1['floating_ip_address'] + '/32'])
-            ri.process(agent)
-            # make sure only the one that wasn't in existing cidrs was sent
-            mock_update_fip_status.assert_called_once_with(
-                mock.ANY, ri.router_id, {fip2['id']: 'ACTIVE'})
-
-    def test_process_router_floatingip_status_update_if_processed(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data(num_internal_ports=1)
-        fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8',
-                'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE',
-                'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}
-        fip2 = copy.copy(fip1)
-        fip2.update({'id': _uuid(), 'status': 'DOWN', })
-        router[l3_constants.FLOATINGIP_KEY] = [fip1, fip2]
-
-        ri = legacy_router.LegacyRouter(router['id'], router,
-                                        **self.ri_kwargs)
-        ri.external_gateway_added = mock.Mock()
-        with mock.patch.object(
-            agent.plugin_rpc, 'update_floatingip_statuses'
-        ) as mock_update_fip_status,\
-                mock.patch.object(ri, 'get_router_cidrs') as mock_get_cidrs:
-            mock_get_cidrs.return_value = set()
-            ri.process(agent)
-            # make sure both was sent since not existed in existing cidrs
-            mock_update_fip_status.assert_called_once_with(
-                mock.ANY, ri.router_id, {fip1['id']: 'ACTIVE',
-                                         fip2['id']: 'ACTIVE'})
-
-    def test_process_router_floatingip_disabled(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        with mock.patch.object(
-            agent.plugin_rpc,
-            'update_floatingip_statuses') as mock_update_fip_status:
-            fip_id = _uuid()
-            router = l3_test_common.prepare_router_data(num_internal_ports=1)
-            router[l3_constants.FLOATINGIP_KEY] = [
-                {'id': fip_id,
-                 'floating_ip_address': '8.8.8.8',
-                 'fixed_ip_address': '7.7.7.7',
-                 'status': 'DOWN',
-                 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
-
-            ri = legacy_router.LegacyRouter(router['id'],
-                                            router,
-                                            **self.ri_kwargs)
-            ri.external_gateway_added = mock.Mock()
-            ri.process(agent)
-            # Assess the call for putting the floating IP up was performed
-            mock_update_fip_status.assert_called_once_with(
-                mock.ANY, ri.router_id,
-                {fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE})
-            mock_update_fip_status.reset_mock()
-            # Process the router again, this time without floating IPs
-            router[l3_constants.FLOATINGIP_KEY] = []
-            ri.router = router
-            ri.process(agent)
-            # Assess the call for putting the floating IP up was performed
-            mock_update_fip_status.assert_called_once_with(
-                mock.ANY, ri.router_id,
-                {fip_id: l3_constants.FLOATINGIP_STATUS_DOWN})
-
-    def test_process_router_floatingip_exception(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        with mock.patch.object(
-            agent.plugin_rpc,
-            'update_floatingip_statuses') as mock_update_fip_status:
-            fip_id = _uuid()
-            router = l3_test_common.prepare_router_data(num_internal_ports=1)
-            router[l3_constants.FLOATINGIP_KEY] = [
-                {'id': fip_id,
-                 'floating_ip_address': '8.8.8.8',
-                 'fixed_ip_address': '7.7.7.7',
-                 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
-
-            ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-            ri.process_floating_ip_addresses = mock.Mock(
-                side_effect=RuntimeError)
-            ri.external_gateway_added = mock.Mock()
-            ri.process(agent)
-            # Assess the call for putting the floating IP into Error
-            # was performed
-            mock_update_fip_status.assert_called_once_with(
-                mock.ANY, ri.router_id,
-                {fip_id: l3_constants.FLOATINGIP_STATUS_ERROR})
-
-    def test_process_external_iptables_exception(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        with mock.patch.object(
-            agent.plugin_rpc,
-            'update_floatingip_statuses') as mock_update_fip_status:
-            fip_id = _uuid()
-            router = l3_test_common.prepare_router_data(num_internal_ports=1)
-            router[l3_constants.FLOATINGIP_KEY] = [
-                {'id': fip_id,
-                 'floating_ip_address': '8.8.8.8',
-                 'fixed_ip_address': '7.7.7.7',
-                 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
-
-            ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-            ri.iptables_manager._apply = mock.Mock(side_effect=Exception)
-            ri.process_external(agent)
-            # Assess the call for putting the floating IP into Error
-            # was performed
-            mock_update_fip_status.assert_called_once_with(
-                mock.ANY, ri.router_id,
-                {fip_id: l3_constants.FLOATINGIP_STATUS_ERROR})
-
-            self.assertEqual(1, ri.iptables_manager._apply.call_count)
-
-    def test_handle_router_snat_rules_distributed_without_snat_manager(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = dvr_router.DvrEdgeRouter(
-            agent,
-            HOSTNAME,
-            'foo_router_id',
-            {},
-            **self.ri_kwargs)
-        ri.iptables_manager = mock.Mock()
-        ri._is_this_snat_host = mock.Mock(return_value=True)
-        ri.get_ex_gw_port = mock.Mock(return_value=mock.ANY)
-
-        with mock.patch.object(dvr_router.LOG, 'debug') as log_debug:
-            ri._handle_router_snat_rules(mock.ANY, mock.ANY)
-        self.assertIsNone(ri.snat_iptables_manager)
-        self.assertFalse(ri.iptables_manager.called)
-        self.assertTrue(log_debug.called)
-
-    def test_handle_router_snat_rules_add_back_jump(self):
-        ri = l3router.RouterInfo(_uuid(), {}, **self.ri_kwargs)
-        ri.iptables_manager = mock.MagicMock()
-        port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
-
-        ri._handle_router_snat_rules(port, "iface")
-
-        nat = ri.iptables_manager.ipv4['nat']
-        nat.empty_chain.assert_any_call('snat')
-        nat.add_rule.assert_any_call('snat', '-j $float-snat')
-        for call in nat.mock_calls:
-            name, args, kwargs = call
-            if name == 'add_rule':
-                self.assertEqual(('snat', '-j $float-snat'), args)
-                self.assertEqual({}, kwargs)
-                break
-
-    def test_handle_router_snat_rules_add_rules(self):
-        ri = l3router.RouterInfo(_uuid(), {}, **self.ri_kwargs)
-        ex_gw_port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
-        ri.router = {'distributed': False}
-        ri._handle_router_snat_rules(ex_gw_port, "iface")
-
-        nat_rules = list(map(str, ri.iptables_manager.ipv4['nat'].rules))
-        wrap_name = ri.iptables_manager.wrap_name
-
-        jump_float_rule = "-A %s-snat -j %s-float-snat" % (wrap_name,
-                                                           wrap_name)
-        snat_rule1 = ("-A %s-snat -o iface -j SNAT --to-source %s") % (
-            wrap_name, ex_gw_port['fixed_ips'][0]['ip_address'])
-        snat_rule2 = ("-A %s-snat -m mark ! --mark 0x2/%s "
-                      "-m conntrack --ctstate DNAT "
-                      "-j SNAT --to-source %s") % (
-            wrap_name, l3_constants.ROUTER_MARK_MASK,
-            ex_gw_port['fixed_ips'][0]['ip_address'])
-
-        self.assertIn(jump_float_rule, nat_rules)
-
-        self.assertIn(snat_rule1, nat_rules)
-        self.assertIn(snat_rule2, nat_rules)
-        self.assertThat(nat_rules.index(jump_float_rule),
-                        matchers.LessThan(nat_rules.index(snat_rule1)))
-
-        mangle_rules = list(map(str, ri.iptables_manager.ipv4['mangle'].rules))
-        mangle_rule = ("-A %s-mark -i iface "
-                       "-j MARK --set-xmark 0x2/%s" %
-                       (wrap_name, l3_constants.ROUTER_MARK_MASK))
-        self.assertIn(mangle_rule, mangle_rules)
-
-    def test_process_router_delete_stale_internal_devices(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        stale_devlist = [l3_test_common.FakeDev('qr-a1b2c3d4-e5'),
-                         l3_test_common.FakeDev('qr-b2c3d4e5-f6')]
-        stale_devnames = [dev.name for dev in stale_devlist]
-
-        get_devices_return = []
-        get_devices_return.extend(stale_devlist)
-        self.mock_ip.get_devices.return_value = get_devices_return
-
-        router = l3_test_common.prepare_router_data(enable_snat=True,
-                                                    num_internal_ports=1)
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-
-        internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
-        self.assertEqual(1, len(internal_ports))
-        internal_port = internal_ports[0]
-
-        with mock.patch.object(ri, 'internal_network_removed'
-                               ) as internal_network_removed,\
-                mock.patch.object(ri, 'internal_network_added'
-                                  ) as internal_network_added,\
-                mock.patch.object(ri, 'external_gateway_removed'
-                                  ) as external_gateway_removed,\
-                mock.patch.object(ri, 'external_gateway_added'
-                                  ) as external_gateway_added:
-
-            ri.process(agent)
-
-            self.assertEqual(1, external_gateway_added.call_count)
-            self.assertFalse(external_gateway_removed.called)
-            self.assertFalse(internal_network_removed.called)
-            internal_network_added.assert_called_once_with(internal_port)
-            self.assertEqual(len(stale_devnames),
-                             self.mock_driver.unplug.call_count)
-            calls = [mock.call(stale_devname,
-                               namespace=ri.ns_name,
-                               prefix=l3_agent.INTERNAL_DEV_PREFIX)
-                     for stale_devname in stale_devnames]
-            self.mock_driver.unplug.assert_has_calls(calls, any_order=True)
-
-    def test_process_router_delete_stale_external_devices(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        stale_devlist = [l3_test_common.FakeDev('qg-a1b2c3d4-e5')]
-        stale_devnames = [dev.name for dev in stale_devlist]
-
-        router = l3_test_common.prepare_router_data(enable_snat=True,
-                                                    num_internal_ports=1)
-        del router['gw_port']
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-
-        self.mock_ip.get_devices.return_value = stale_devlist
-
-        ri.process(agent)
-
-        self.mock_driver.unplug.assert_called_with(
-            stale_devnames[0],
-            bridge="br-ex",
-            namespace=ri.ns_name,
-            prefix=l3_agent.EXTERNAL_DEV_PREFIX)
-
-    def test_router_deleted(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        agent._queue = mock.Mock()
-        agent.router_deleted(None, FAKE_ID)
-        self.assertEqual(1, agent._queue.add.call_count)
-
-    def test_routers_updated(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        agent._queue = mock.Mock()
-        agent.routers_updated(None, [FAKE_ID])
-        self.assertEqual(1, agent._queue.add.call_count)
-
-    def test_removed_from_agent(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        agent._queue = mock.Mock()
-        agent.router_removed_from_agent(None, {'router_id': FAKE_ID})
-        self.assertEqual(1, agent._queue.add.call_count)
-
-    def test_added_to_agent(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        agent._queue = mock.Mock()
-        agent.router_added_to_agent(None, [FAKE_ID])
-        self.assertEqual(1, agent._queue.add.call_count)
-
-    def test_destroy_namespace(self):
-        namespace = 'qrouter-bar'
-
-        self.mock_ip.get_namespaces.return_value = [namespace]
-        self.mock_ip.get_devices.return_value = [
-            l3_test_common.FakeDev('qr-aaaa'),
-            l3_test_common.FakeDev('rfp-aaaa')]
-
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-
-        ns = namespaces.RouterNamespace(
-            'bar', self.conf, agent.driver, agent.use_ipv6)
-        ns.create()
-
-        ns.delete()
-        self.mock_driver.unplug.assert_called_once_with('qr-aaaa',
-                                                        prefix='qr-',
-                                                        namespace='qrouter'
-                                                        '-bar')
-        self.mock_ip.del_veth.assert_called_once_with('rfp-aaaa')
-
-    def test_destroy_router_namespace(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ns = namespaces.Namespace(
-            'qrouter-bar', self.conf, agent.driver, agent.use_ipv6)
-        ns.create()
-        ns.delete()
-        self.mock_ip.netns.delete.assert_called_once_with("qrouter-bar")
-
-    def _configure_metadata_proxy(self, enableflag=True):
-        if not enableflag:
-            self.conf.set_override('enable_metadata_proxy', False)
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router_id = _uuid()
-        router = {'id': router_id,
-                  'external_gateway_info': {},
-                  'routes': [],
-                  'distributed': False}
-        driver = metadata_driver.MetadataDriver
-        with mock.patch.object(
-            driver, 'destroy_monitored_metadata_proxy') as destroy_proxy:
-            with mock.patch.object(
-                driver, 'spawn_monitored_metadata_proxy') as spawn_proxy:
-                agent._process_added_router(router)
-                if enableflag:
-                    spawn_proxy.assert_called_with(
-                        mock.ANY,
-                        mock.ANY,
-                        self.conf.metadata_port,
-                        mock.ANY,
-                        router_id=router_id
-                    )
-                else:
-                    self.assertFalse(spawn_proxy.call_count)
-                agent._router_removed(router_id)
-                if enableflag:
-                    destroy_proxy.assert_called_with(mock.ANY,
-                                                     router_id,
-                                                     mock.ANY)
-                else:
-                    self.assertFalse(destroy_proxy.call_count)
-
-    def test_enable_metadata_proxy(self):
-        self._configure_metadata_proxy()
-
-    def test_disable_metadata_proxy_spawn(self):
-        self._configure_metadata_proxy(enableflag=False)
-
-    def test_router_id_specified_in_conf(self):
-        self.conf.set_override('router_id', '1234')
-        self._configure_metadata_proxy()
-
-    def _test_process_routers_update_rpc_timeout(self, ext_net_call=False,
-                                                 ext_net_call_failed=False):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        agent.fullsync = False
-        agent._process_router_if_compatible = mock.Mock()
-        if ext_net_call_failed:
-            agent._process_router_if_compatible.side_effect = (
-                oslo_messaging.MessagingTimeout)
-        agent._queue = mock.Mock()
-        agent._resync_router = mock.Mock()
-        update = mock.Mock()
-        update.router = None
-        agent._queue.each_update_to_next_router.side_effect = [
-            [(None, update)]]
-        agent._process_router_update()
-        self.assertFalse(agent.fullsync)
-        self.assertEqual(ext_net_call,
-                         agent._process_router_if_compatible.called)
-        agent._resync_router.assert_called_with(update)
-
-    def test_process_routers_update_rpc_timeout_on_get_routers(self):
-        self.plugin_api.get_routers.side_effect = (
-            oslo_messaging.MessagingTimeout)
-        self._test_process_routers_update_rpc_timeout()
-
-    def test_process_routers_update_resyncs_failed_router(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-
-        # Attempting to configure the router will fail
-        agent._process_router_if_compatible = mock.MagicMock()
-        agent._process_router_if_compatible.side_effect = RuntimeError()
-
-        # Queue an update from a full sync
-        update = router_processing_queue.RouterUpdate(
-            42,
-            router_processing_queue.PRIORITY_SYNC_ROUTERS_TASK,
-            router=mock.Mock(),
-            timestamp=timeutils.utcnow())
-        agent._queue.add(update)
-        agent._process_router_update()
-
-        # The update contained the router object, get_routers won't be called
-        self.assertFalse(agent.plugin_rpc.get_routers.called)
-
-        # The update failed, assert that get_routers was called
-        agent._process_router_update()
-        self.assertTrue(agent.plugin_rpc.get_routers.called)
-
-    def test_process_routers_update_rpc_timeout_on_get_ext_net(self):
-        self._test_process_routers_update_rpc_timeout(ext_net_call=True,
-                                                      ext_net_call_failed=True)
-
-    def _test_process_routers_update_router_deleted(self, error=False):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        agent._queue = mock.Mock()
-        update = mock.Mock()
-        update.router = None
-        update.action = 1  # ROUTER_DELETED
-        router_info = mock.MagicMock()
-        agent.router_info[update.id] = router_info
-        router_processor = mock.Mock()
-        agent._queue.each_update_to_next_router.side_effect = [
-            [(router_processor, update)]]
-        agent._resync_router = mock.Mock()
-        if error:
-            agent._safe_router_removed = mock.Mock()
-            agent._safe_router_removed.return_value = False
-        agent._process_router_update()
-        if error:
-            self.assertFalse(router_processor.fetched_and_processed.called)
-            agent._resync_router.assert_called_with(update)
-        else:
-            router_info.delete.assert_called_once_with(agent)
-            self.assertFalse(agent.router_info)
-            self.assertFalse(agent._resync_router.called)
-            router_processor.fetched_and_processed.assert_called_once_with(
-                update.timestamp)
-
-    def test_process_routers_update_router_deleted_success(self):
-        self._test_process_routers_update_router_deleted()
-
-    def test_process_routers_update_router_deleted_error(self):
-        self._test_process_routers_update_router_deleted(True)
-
-    def test_process_router_if_compatible_with_no_ext_net_in_conf(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        self.plugin_api.get_external_network_id.return_value = 'aaa'
-
-        router = {'id': _uuid(),
-                  'routes': [],
-                  'admin_state_up': True,
-                  'external_gateway_info': {'network_id': 'aaa'}}
-
-        agent._process_router_if_compatible(router)
-        self.assertIn(router['id'], agent.router_info)
-        self.plugin_api.get_external_network_id.assert_called_with(
-            agent.context)
-
-    def test_process_router_if_compatible_with_cached_ext_net(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        self.plugin_api.get_external_network_id.return_value = 'aaa'
-        agent.target_ex_net_id = 'aaa'
-
-        router = {'id': _uuid(),
-                  'routes': [],
-                  'admin_state_up': True,
-                  'external_gateway_info': {'network_id': 'aaa'}}
-
-        agent._process_router_if_compatible(router)
-        self.assertIn(router['id'], agent.router_info)
-        self.assertFalse(self.plugin_api.get_external_network_id.called)
-
-    def test_process_router_if_compatible_with_stale_cached_ext_net(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        self.plugin_api.get_external_network_id.return_value = 'aaa'
-        agent.target_ex_net_id = 'bbb'
-
-        router = {'id': _uuid(),
-                  'routes': [],
-                  'admin_state_up': True,
-                  'external_gateway_info': {'network_id': 'aaa'}}
-
-        agent._process_router_if_compatible(router)
-        self.assertIn(router['id'], agent.router_info)
-        self.plugin_api.get_external_network_id.assert_called_with(
-            agent.context)
-
-    def test_process_router_if_compatible_w_no_ext_net_and_2_net_plugin(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-
-        router = {'id': _uuid(),
-                  'routes': [],
-                  'admin_state_up': True,
-                  'external_gateway_info': {'network_id': 'aaa'}}
-
-        agent.router_info = {}
-        self.plugin_api.get_external_network_id.side_effect = (
-            n_exc.TooManyExternalNetworks())
-        self.assertRaises(n_exc.TooManyExternalNetworks,
-                          agent._process_router_if_compatible,
-                          router)
-        self.assertNotIn(router['id'], agent.router_info)
-
-    def test_process_router_if_compatible_with_ext_net_in_conf(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        self.plugin_api.get_external_network_id.return_value = 'aaa'
-
-        router = {'id': _uuid(),
-                  'routes': [],
-                  'admin_state_up': True,
-                  'external_gateway_info': {'network_id': 'bbb'}}
-
-        agent.router_info = {}
-        self.conf.set_override('gateway_external_network_id', 'aaa')
-        self.assertRaises(n_exc.RouterNotCompatibleWithAgent,
-                          agent._process_router_if_compatible,
-                          router)
-        self.assertNotIn(router['id'], agent.router_info)
-
-    def test_process_router_if_compatible_with_no_bridge_no_ext_net(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        self.plugin_api.get_external_network_id.return_value = 'aaa'
-
-        router = {'id': _uuid(),
-                  'routes': [],
-                  'admin_state_up': True,
-                  'external_gateway_info': {'network_id': 'aaa'}}
-
-        agent.router_info = {}
-        self.conf.set_override('external_network_bridge', '')
-        agent._process_router_if_compatible(router)
-        self.assertIn(router['id'], agent.router_info)
-
-    def test_nonexistent_interface_driver(self):
-        self.conf.set_override('interface_driver', None)
-        self.assertRaises(SystemExit, l3_agent.L3NATAgent,
-                          HOSTNAME, self.conf)
-
-        self.conf.set_override('interface_driver', 'wrong.driver')
-        self.assertRaises(SystemExit, l3_agent.L3NATAgent,
-                          HOSTNAME, self.conf)
-
-    @mock.patch.object(namespaces.RouterNamespace, 'delete')
-    @mock.patch.object(dvr_snat_ns.SnatNamespace, 'delete')
-    def _cleanup_namespace_test(self,
-                                stale_namespace_list,
-                                router_list,
-                                other_namespaces,
-                                mock_snat_ns,
-                                mock_router_ns):
-
-        good_namespace_list = [namespaces.NS_PREFIX + r['id']
-                               for r in router_list]
-        good_namespace_list += [dvr_snat_ns.SNAT_NS_PREFIX + r['id']
-                                for r in router_list]
-        self.mock_ip.get_namespaces.return_value = (stale_namespace_list +
-                                                    good_namespace_list +
-                                                    other_namespaces)
-
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-
-        self.assertTrue(agent.namespaces_manager._clean_stale)
-
-        pm = self.external_process.return_value
-        pm.reset_mock()
-
-        with agent.namespaces_manager as ns_manager:
-            for r in router_list:
-                ns_manager.keep_router(r['id'])
-        qrouters = [n for n in stale_namespace_list
-                    if n.startswith(namespaces.NS_PREFIX)]
-        self.assertEqual(len(qrouters), mock_router_ns.call_count)
-        self.assertEqual(
-            len(stale_namespace_list) - len(qrouters),
-            mock_snat_ns.call_count)
-
-        self.assertFalse(agent.namespaces_manager._clean_stale)
-
-    def test_cleanup_namespace(self):
-        self.conf.set_override('router_id', None)
-        stale_namespaces = [namespaces.NS_PREFIX + 'foo',
-                            namespaces.NS_PREFIX + 'bar',
-                            dvr_snat_ns.SNAT_NS_PREFIX + 'foo']
-        other_namespaces = ['unknown']
-
-        self._cleanup_namespace_test(stale_namespaces,
-                                     [],
-                                     other_namespaces)
-
-    def test_cleanup_namespace_with_registered_router_ids(self):
-        self.conf.set_override('router_id', None)
-        stale_namespaces = [namespaces.NS_PREFIX + 'cccc',
-                            namespaces.NS_PREFIX + 'eeeee',
-                            dvr_snat_ns.SNAT_NS_PREFIX + 'fffff']
-        router_list = [{'id': 'foo', 'distributed': False},
-                       {'id': 'aaaa', 'distributed': False}]
-        other_namespaces = ['qdhcp-aabbcc', 'unknown']
-
-        self._cleanup_namespace_test(stale_namespaces,
-                                     router_list,
-                                     other_namespaces)
-
-    def test_cleanup_namespace_with_conf_router_id(self):
-        self.conf.set_override('router_id', 'bbbbb')
-        stale_namespaces = [namespaces.NS_PREFIX + 'cccc',
-                            namespaces.NS_PREFIX + 'eeeee',
-                            namespaces.NS_PREFIX + self.conf.router_id]
-        router_list = [{'id': 'foo', 'distributed': False},
-                       {'id': 'aaaa', 'distributed': False}]
-        other_namespaces = ['qdhcp-aabbcc', 'unknown']
-
-        self._cleanup_namespace_test(stale_namespaces,
-                                     router_list,
-                                     other_namespaces)
-
-    def test_create_dvr_gateway(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data()
-        ri = dvr_router.DvrEdgeRouter(agent,
-                                      HOSTNAME,
-                                      router['id'],
-                                      router,
-                                      **self.ri_kwargs)
-
-        port_id = _uuid()
-        subnet_id = _uuid()
-        dvr_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
-                                      'prefixlen': 24,
-                                      'subnet_id': subnet_id}],
-                       'subnets': [{'id': subnet_id,
-                                    'cidr': '20.0.0.0/24',
-                                    'gateway_ip': '20.0.0.1'}],
-                       'id': port_id,
-                       'network_id': _uuid(),
-                       'mac_address': 'ca:fe:de:ad:be:ef'}
-
-        interface_name = ri._get_snat_int_device_name(port_id)
-        self.device_exists.return_value = False
-
-        with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces:
-            get_interfaces.return_value = self.snat_ports
-            ri._create_dvr_gateway(dvr_gw_port, interface_name)
-
-        # check 2 internal ports are plugged
-        # check 1 ext-gw-port is plugged
-        self.assertEqual(3, self.mock_driver.plug.call_count)
-        self.assertEqual(3, self.mock_driver.init_router_port.call_count)
-
-    def test_get_service_plugin_list(self):
-        service_plugins = [p_const.L3_ROUTER_NAT]
-        self.plugin_api.get_service_plugin_list.return_value = service_plugins
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        self.assertEqual(service_plugins, agent.neutron_service_plugins)
-        self.assertTrue(self.plugin_api.get_service_plugin_list.called)
-
-    def test_get_service_plugin_list_failed(self):
-        raise_rpc = oslo_messaging.RemoteError()
-        self.plugin_api.get_service_plugin_list.side_effect = raise_rpc
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        self.assertIsNone(agent.neutron_service_plugins)
-        self.assertTrue(self.plugin_api.get_service_plugin_list.called)
-
-    def test_get_service_plugin_list_retried(self):
-        raise_timeout = oslo_messaging.MessagingTimeout()
-        # Raise a timeout the first 2 times it calls
-        # get_service_plugin_list then return a empty tuple
-        self.plugin_api.get_service_plugin_list.side_effect = (
-            raise_timeout, raise_timeout, tuple()
-        )
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-
-        self.assertEqual(tuple(), agent.neutron_service_plugins)
-
-    def test_get_service_plugin_list_retried_max(self):
-        raise_timeout = oslo_messaging.MessagingTimeout()
-        # Raise a timeout 5 times
-        self.plugin_api.get_service_plugin_list.side_effect = (
-            (raise_timeout, ) * 5
-        )
-        self.assertRaises(oslo_messaging.MessagingTimeout, l3_agent.L3NATAgent,
-                          HOSTNAME, self.conf)
-
-    def test_external_gateway_removed_ext_gw_port_no_fip_ns(self):
-        self.conf.set_override('state_path', '/tmp')
-
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        agent.conf.agent_mode = 'dvr_snat'
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        router['gw_port_host'] = HOSTNAME
-        self.mock_driver.unplug.reset_mock()
-
-        external_net_id = router['gw_port']['network_id']
-        ri = dvr_router.DvrEdgeRouter(
-            agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
-        ri.remove_floating_ip = mock.Mock()
-        agent._fetch_external_net_id = mock.Mock(return_value=external_net_id)
-        ri.ex_gw_port = ri.router['gw_port']
-        del ri.router['gw_port']
-        ri.fip_ns = None
-        nat = ri.iptables_manager.ipv4['nat']
-        nat.clear_rules_by_tag = mock.Mock()
-        nat.add_rule = mock.Mock()
-
-        ri.snat_namespace = mock.Mock()
-        ri.external_gateway_removed(
-            ri.ex_gw_port,
-            ri.get_external_device_name(ri.ex_gw_port['id']))
-
-        self.assertFalse(ri.remove_floating_ip.called)
-
-    def test_spawn_radvd(self):
-        router = l3_test_common.prepare_router_data(ip_version=6)
-
-        conffile = '/fake/radvd.conf'
-        pidfile = '/fake/radvd.pid'
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-
-        # we don't want the whole process manager to be mocked to be
-        # able to catch execute() calls
-        self.external_process_p.stop()
-        self.ip_cls_p.stop()
-
-        get_conf_file_name = 'neutron.agent.linux.utils.get_conf_file_name'
-        get_pid_file_name = ('neutron.agent.linux.external_process.'
-                             'ProcessManager.get_pid_file_name')
-        utils_execute = 'neutron.agent.common.utils.execute'
-
-        mock.patch(get_conf_file_name).start().return_value = conffile
-        mock.patch(get_pid_file_name).start().return_value = pidfile
-        execute = mock.patch(utils_execute).start()
-
-        radvd = ra.DaemonMonitor(
-            router['id'],
-            namespaces.RouterNamespace._get_ns_name(router['id']),
-            agent.process_monitor,
-            l3_test_common.FakeDev,
-            self.conf)
-        radvd.enable(router['_interfaces'])
-
-        cmd = execute.call_args[0][0]
-
-        self.assertIn('radvd', cmd)
-
-        _join = lambda *args: ' '.join(args)
-
-        cmd = _join(*cmd)
-        self.assertIn(_join('-C', conffile), cmd)
-        self.assertIn(_join('-p', pidfile), cmd)
-        self.assertIn(_join('-m', 'syslog'), cmd)
-
-    def test_generate_radvd_conf_other_and_managed_flag(self):
-        # expected = {ra_mode: (AdvOtherConfigFlag, AdvManagedFlag), ...}
-        expected = {l3_constants.IPV6_SLAAC: (False, False),
-                    l3_constants.DHCPV6_STATELESS: (True, False),
-                    l3_constants.DHCPV6_STATEFUL: (False, True)}
-
-        modes = [l3_constants.IPV6_SLAAC, l3_constants.DHCPV6_STATELESS,
-                 l3_constants.DHCPV6_STATEFUL]
-        mode_combos = list(iter_chain(*[[list(combo) for combo in
-            iter_combinations(modes, i)] for i in range(1, len(modes) + 1)]))
-
-        for mode_list in mode_combos:
-            ipv6_subnet_modes = [{'ra_mode': mode, 'address_mode': mode}
-                                 for mode in mode_list]
-            router = l3_test_common.prepare_router_data()
-            ri = self._process_router_ipv6_subnet_added(router,
-                                                        ipv6_subnet_modes)
-
-            ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY])
-
-            def assertFlag(flag):
-                return (self.assertIn if flag else self.assertNotIn)
-
-            other_flag, managed_flag = (
-                    any(expected[mode][0] for mode in mode_list),
-                    any(expected[mode][1] for mode in mode_list))
-
-            assertFlag(other_flag)('AdvOtherConfigFlag on;',
-                self.utils_replace_file.call_args[0][1])
-            assertFlag(managed_flag)('AdvManagedFlag on;',
-                self.utils_replace_file.call_args[0][1])
-
-    def test_generate_radvd_rdnss_conf(self):
-        router = l3_test_common.prepare_router_data()
-        ipv6_subnet_modes = [{'ra_mode': l3_constants.IPV6_SLAAC,
-                             'address_mode': l3_constants.IPV6_SLAAC}]
-        dns_list = ['fd01:1::100', 'fd01:1::200', 'fd01::300', 'fd01::400']
-        ri = self._process_router_ipv6_subnet_added(router,
-                                                    ipv6_subnet_modes,
-                                                    dns_nameservers=dns_list)
-        ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY])
-        # Verify that radvd configuration file includes RDNSS entries
-        expected = "RDNSS  "
-        for dns in dns_list[0:ra.MAX_RDNSS_ENTRIES]:
-            expected += "%s  " % dns
-        self.assertIn(expected, self.utils_replace_file.call_args[0][1])
-
-    def _pd_expected_call_external_process(self, requestor, ri, enable=True):
-        expected_calls = []
-        if enable:
-            expected_calls.append(mock.call(uuid=requestor,
-                                            service='dibbler',
-                                            default_cmd_callback=mock.ANY,
-                                            namespace=ri.ns_name,
-                                            conf=mock.ANY,
-                                            pid_file=mock.ANY))
-            expected_calls.append(mock.call().enable(reload_cfg=False))
-        else:
-            expected_calls.append(mock.call(uuid=requestor,
-                                            service='dibbler',
-                                            namespace=ri.ns_name,
-                                            conf=mock.ANY,
-                                            pid_file=mock.ANY))
-            expected_calls.append(mock.call().disable(
-                get_stop_command=mock.ANY))
-        return expected_calls
-
-    def _pd_setup_agent_router(self):
-        router = l3_test_common.prepare_router_data()
-        ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        agent.external_gateway_added = mock.Mock()
-        ri.process(agent)
-        agent._router_added(router['id'], router)
-        # Make sure radvd monitor is created
-        if not ri.radvd:
-            ri.radvd = ra.DaemonMonitor(router['id'],
-                                        ri.ns_name,
-                                        agent.process_monitor,
-                                        ri.get_internal_device_name,
-                                        self.conf)
-        return agent, router, ri
-
-    def _pd_remove_gw_interface(self, intfs, agent, router, ri):
-        expected_pd_update = {}
-        expected_calls = []
-        for intf in intfs:
-            requestor_id = self._pd_get_requestor_id(intf, router, ri)
-            expected_calls += (self._pd_expected_call_external_process(
-                requestor_id, ri, False))
-            for subnet in intf['subnets']:
-                expected_pd_update[subnet['id']] = (
-                    l3_constants.PROVISIONAL_IPV6_PD_PREFIX)
-
-        # Implement the prefix update notifier
-        # Keep track of the updated prefix
-        self.pd_update = {}
-
-        def pd_notifier(context, prefix_update):
-            self.pd_update = prefix_update
-            for subnet_id, prefix in six.iteritems(prefix_update):
-                for intf in intfs:
-                    for subnet in intf['subnets']:
-                        if subnet['id'] == subnet_id:
-                            # Update the prefix
-                            subnet['cidr'] = prefix
-                            break
-
-        # Remove the gateway interface
-        agent.pd.notifier = pd_notifier
-        agent.pd.remove_gw_interface(router['id'])
-
-        self._pd_assert_dibbler_calls(expected_calls,
-            self.external_process.mock_calls[-len(expected_calls):])
-        self.assertEqual(expected_pd_update, self.pd_update)
-
-    def _pd_remove_interfaces(self, intfs, agent, router, ri):
-        expected_pd_update = []
-        expected_calls = []
-        for intf in intfs:
-            # Remove the router interface
-            router[l3_constants.INTERFACE_KEY].remove(intf)
-            requestor_id = self._pd_get_requestor_id(intf, router, ri)
-            expected_calls += (self._pd_expected_call_external_process(
-                requestor_id, ri, False))
-            for subnet in intf['subnets']:
-                expected_pd_update += [{subnet['id']:
-                    l3_constants.PROVISIONAL_IPV6_PD_PREFIX}]
-
-        # Implement the prefix update notifier
-        # Keep track of the updated prefix
-        self.pd_update = []
-
-        def pd_notifier(context, prefix_update):
-            self.pd_update.append(prefix_update)
-            for intf in intfs:
-                for subnet in intf['subnets']:
-                    if subnet['id'] in prefix_update:
-                        # Update the prefix
-                        subnet['cidr'] = prefix_update[subnet['id']]
-
-        # Process the router for removed interfaces
-        agent.pd.notifier = pd_notifier
-        ri.process(agent)
-
-        # The number of external process calls takes radvd into account.
-        # This is because there is no ipv6 interface any more after removing
-        # the interfaces, and radvd will be killed because of that
-        self._pd_assert_dibbler_calls(expected_calls,
-            self.external_process.mock_calls[-len(expected_calls) - 2:])
-        self._pd_assert_radvd_calls(ri, False)
-        self.assertEqual(expected_pd_update, self.pd_update)
-
-    def _pd_get_requestor_id(self, intf, router, ri):
-        ifname = ri.get_internal_device_name(intf['id'])
-        for subnet in intf['subnets']:
-            return dibbler.PDDibbler(router['id'],
-                       subnet['id'], ifname).requestor_id
-
-    def _pd_assert_dibbler_calls(self, expected, actual):
-        '''Check the external process calls for dibbler are expected
-
-        in the case of multiple pd-enabled router ports, the exact sequence
-        of these calls are not deterministic. It's known, though, that each
-        external_process call is followed with either an enable() or disable()
-        '''
-
-        num_ext_calls = len(expected) // 2
-        expected_ext_calls = []
-        actual_ext_calls = []
-        expected_action_calls = []
-        actual_action_calls = []
-        for c in range(num_ext_calls):
-            expected_ext_calls.append(expected[c * 2])
-            actual_ext_calls.append(actual[c * 2])
-            expected_action_calls.append(expected[c * 2 + 1])
-            actual_action_calls.append(actual[c * 2 + 1])
-
-        self.assertEqual(expected_action_calls, actual_action_calls)
-        for exp in expected_ext_calls:
-            for act in actual_ext_calls:
-                if exp == act:
-                    break
-            else:
-                msg = "Unexpected dibbler external process call."
-                self.fail(msg)
-
-    def _pd_assert_radvd_calls(self, ri, enable=True):
-        exp_calls = self._radvd_expected_call_external_process(ri, enable)
-        self.assertEqual(exp_calls,
-                         self.external_process.mock_calls[-len(exp_calls):])
-
-    def _pd_get_prefixes(self, agent, router, ri,
-                         existing_intfs, new_intfs, mock_get_prefix):
-        # First generate the prefixes that will be used for each interface
-        prefixes = {}
-        expected_pd_update = {}
-        expected_calls = []
-        for ifno, intf in enumerate(existing_intfs + new_intfs):
-            requestor_id = self._pd_get_requestor_id(intf, router, ri)
-            prefixes[requestor_id] = "2001:cafe:cafe:%d::/64" % ifno
-            if intf in new_intfs:
-                subnet_id = (intf['subnets'][0]['id'] if intf['subnets']
-                             else None)
-                expected_pd_update[subnet_id] = prefixes[requestor_id]
-                expected_calls += (
-                    self._pd_expected_call_external_process(requestor_id, ri))
-
-        # Implement the prefix update notifier
-        # Keep track of the updated prefix
-        self.pd_update = {}
-
-        def pd_notifier(context, prefix_update):
-            self.pd_update = prefix_update
-            for subnet_id, prefix in six.iteritems(prefix_update):
-                for intf in new_intfs:
-                    for subnet in intf['subnets']:
-                        if subnet['id'] == subnet_id:
-                            # Update the prefix
-                            subnet['cidr'] = prefix
-                            break
-
-        # Start the dibbler client
-        agent.pd.notifier = pd_notifier
-        agent.pd.process_prefix_update()
-
-        # Get the prefix and check that the neutron server is notified
-        def get_prefix(pdo):
-            key = '%s:%s:%s' % (pdo.router_id, pdo.subnet_id, pdo.ri_ifname)
-            return prefixes[key]
-        mock_get_prefix.side_effect = get_prefix
-        agent.pd.process_prefix_update()
-
-        # Make sure that the updated prefixes are expected
-        self._pd_assert_dibbler_calls(expected_calls,
-             self.external_process.mock_calls[-len(expected_calls):])
-        self.assertEqual(expected_pd_update, self.pd_update)
-
-    def _pd_add_gw_interface(self, agent, router, ri):
-        gw_ifname = ri.get_external_device_name(router['gw_port']['id'])
-        agent.pd.add_gw_interface(router['id'], gw_ifname)
-
-    @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
-    @mock.patch.object(dibbler.os, 'getpid', return_value=1234)
-    @mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
-                       return_value=True)
-    @mock.patch.object(dibbler.os, 'chmod')
-    @mock.patch.object(dibbler.shutil, 'rmtree')
-    @mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
-    def test_pd_add_remove_subnet(self, mock1, mock2, mock3, mock4,
-                                  mock_getpid, mock_get_prefix):
-        '''Add and remove one pd-enabled subnet
-        Remove the interface by deleting it from the router
-        '''
-        # Initial setup
-        agent, router, ri = self._pd_setup_agent_router()
-
-        # Create one pd-enabled subnet and add router interface
-        intfs = l3_test_common.router_append_pd_enabled_subnet(router)
-        ri.process(agent)
-
-        # No client should be started since there is no gateway port
-        self.assertFalse(self.external_process.call_count)
-        self.assertFalse(mock_get_prefix.call_count)
-
-        # Add the gateway interface
-        self._pd_add_gw_interface(agent, router, ri)
-
-        # Get one prefix
-        self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
-
-        # Update the router with the new prefix
-        ri.process(agent)
-
-        # Check that radvd is started and the router port is configured
-        # with the new prefix
-        self._pd_assert_radvd_calls(ri)
-
-        # Now remove the interface
-        self._pd_remove_interfaces(intfs, agent, router, ri)
-
-    @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
-    @mock.patch.object(dibbler.os, 'getpid', return_value=1234)
-    @mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
-                       return_value=True)
-    @mock.patch.object(dibbler.os, 'chmod')
-    @mock.patch.object(dibbler.shutil, 'rmtree')
-    @mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
-    def test_pd_remove_gateway(self, mock1, mock2, mock3, mock4,
-                               mock_getpid, mock_get_prefix):
-        '''Add one pd-enabled subnet and remove the gateway port
-        Remove the gateway port and check the prefix is removed
-        '''
-        # Initial setup
-        agent, router, ri = self._pd_setup_agent_router()
-
-        # Create one pd-enabled subnet and add router interface
-        intfs = l3_test_common.router_append_pd_enabled_subnet(router)
-        ri.process(agent)
-
-        # Add the gateway interface
-        self._pd_add_gw_interface(agent, router, ri)
-
-        # Get one prefix
-        self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
-
-        # Update the router with the new prefix
-        ri.process(agent)
-
-        # Check that radvd is started
-        self._pd_assert_radvd_calls(ri)
-
-        # Now remove the gw interface
-        self._pd_remove_gw_interface(intfs, agent, router, ri)
-
-        # There will be a router update
-        ri.process(agent)
-
-    @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
-    @mock.patch.object(dibbler.os, 'getpid', return_value=1234)
-    @mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
-                       return_value=True)
-    @mock.patch.object(dibbler.os, 'chmod')
-    @mock.patch.object(dibbler.shutil, 'rmtree')
-    @mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
-    def test_pd_add_remove_2_subnets(self, mock1, mock2, mock3, mock4,
-                                     mock_getpid, mock_get_prefix):
-        '''Add and remove two pd-enabled subnets
-        Remove the interfaces by deleting them from the router
-        '''
-        # Initial setup
-        agent, router, ri = self._pd_setup_agent_router()
-
-        # Create 2 pd-enabled subnets and add router interfaces
-        intfs = l3_test_common.router_append_pd_enabled_subnet(router, count=2)
-        ri.process(agent)
-
-        # No client should be started
-        self.assertFalse(self.external_process.call_count)
-        self.assertFalse(mock_get_prefix.call_count)
-
-        # Add the gateway interface
-        self._pd_add_gw_interface(agent, router, ri)
-
-        # Get prefixes
-        self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
-
-        # Update the router with the new prefix
-        ri.process(agent)
-
-        # Check that radvd is started and the router port is configured
-        # with the new prefix
-        self._pd_assert_radvd_calls(ri)
-
-        # Now remove the interface
-        self._pd_remove_interfaces(intfs, agent, router, ri)
-
-    @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
-    @mock.patch.object(dibbler.os, 'getpid', return_value=1234)
-    @mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
-                       return_value=True)
-    @mock.patch.object(dibbler.os, 'chmod')
-    @mock.patch.object(dibbler.shutil, 'rmtree')
-    @mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
-    def test_pd_remove_gateway_2_subnets(self, mock1, mock2, mock3, mock4,
-                                         mock_getpid, mock_get_prefix):
-        '''Add one pd-enabled subnet, followed by adding another one
-        Remove the gateway port and check the prefix is removed
-        '''
-        # Initial setup
-        agent, router, ri = self._pd_setup_agent_router()
-
-        # Add the gateway interface
-        self._pd_add_gw_interface(agent, router, ri)
-
-        # Create 1 pd-enabled subnet and add router interface
-        intfs = l3_test_common.router_append_pd_enabled_subnet(router, count=1)
-        ri.process(agent)
-
-        # Get prefixes
-        self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
-
-        # Update the router with the new prefix
-        ri.process(agent)
-
-        # Check that radvd is started
-        self._pd_assert_radvd_calls(ri)
-
-        # Now add another interface
-        # Create one pd-enabled subnet and add router interface
-        intfs1 = l3_test_common.router_append_pd_enabled_subnet(router,
-                                                                count=1)
-        ri.process(agent)
-
-        # Get prefixes
-        self._pd_get_prefixes(agent, router, ri, intfs,
-                              intfs1, mock_get_prefix)
-
-        # Update the router with the new prefix
-        ri.process(agent)
-
-        # Check that radvd is notified for the new prefix
-        self._pd_assert_radvd_calls(ri)
-
-        # Now remove the gw interface
-        self._pd_remove_gw_interface(intfs + intfs1, agent, router, ri)
-
-        ri.process(agent)
diff --git a/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py b/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py
deleted file mode 100644 (file)
index e5b2f9a..0000000
+++ /dev/null
@@ -1,240 +0,0 @@
-# Copyright (c) 2015 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron.agent.common import utils
-from neutron.agent.l3 import dvr_fip_ns
-from neutron.agent.l3 import link_local_allocator as lla
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import iptables_manager
-from neutron.tests import base
-
-_uuid = uuidutils.generate_uuid
-
-
-class TestDvrFipNs(base.BaseTestCase):
-    def setUp(self):
-        super(TestDvrFipNs, self).setUp()
-        self.conf = mock.Mock()
-        self.conf.state_path = '/tmp'
-        self.driver = mock.Mock()
-        self.driver.DEV_NAME_LEN = 14
-        self.net_id = _uuid()
-        self.fip_ns = dvr_fip_ns.FipNamespace(self.net_id,
-                                              self.conf,
-                                              self.driver,
-                                              use_ipv6=True)
-
-    def test_subscribe(self):
-        is_first = self.fip_ns.subscribe(mock.sentinel.router_id)
-        self.assertTrue(is_first)
-
-    def test_subscribe_not_first(self):
-        self.fip_ns.subscribe(mock.sentinel.router_id)
-        is_first = self.fip_ns.subscribe(mock.sentinel.router_id2)
-        self.assertFalse(is_first)
-
-    def test_unsubscribe(self):
-        self.fip_ns.subscribe(mock.sentinel.router_id)
-        is_last = self.fip_ns.unsubscribe(mock.sentinel.router_id)
-        self.assertTrue(is_last)
-
-    def test_unsubscribe_not_last(self):
-        self.fip_ns.subscribe(mock.sentinel.router_id)
-        self.fip_ns.subscribe(mock.sentinel.router_id2)
-        is_last = self.fip_ns.unsubscribe(mock.sentinel.router_id2)
-        self.assertFalse(is_last)
-
-    def test_allocate_rule_priority(self):
-        pr = self.fip_ns.allocate_rule_priority('20.0.0.30')
-        self.assertIn('20.0.0.30', self.fip_ns._rule_priorities.allocations)
-        self.assertNotIn(pr, self.fip_ns._rule_priorities.pool)
-
-    def test_deallocate_rule_priority(self):
-        pr = self.fip_ns.allocate_rule_priority('20.0.0.30')
-        self.fip_ns.deallocate_rule_priority('20.0.0.30')
-        self.assertNotIn('20.0.0.30', self.fip_ns._rule_priorities.allocations)
-        self.assertIn(pr, self.fip_ns._rule_priorities.pool)
-
-    @mock.patch.object(ip_lib, 'IPWrapper')
-    @mock.patch.object(ip_lib, 'IPDevice')
-    @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif')
-    @mock.patch.object(ip_lib, 'device_exists')
-    def test_gateway_added(self, device_exists, send_adv_notif,
-                           IPDevice, IPWrapper):
-        subnet_id = _uuid()
-        agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
-                                        'prefixlen': 24,
-                                        'subnet_id': subnet_id}],
-                         'subnets': [{'id': subnet_id,
-                                      'cidr': '20.0.0.0/24',
-                                      'gateway_ip': '20.0.0.1'}],
-                         'id': _uuid(),
-                         'network_id': self.net_id,
-                         'mac_address': 'ca:fe:de:ad:be:ef'}
-
-        device_exists.return_value = False
-        self.fip_ns._gateway_added(agent_gw_port,
-                                   mock.sentinel.interface_name)
-        self.assertEqual(1, self.driver.plug.call_count)
-        self.assertEqual(1, self.driver.init_l3.call_count)
-        send_adv_notif.assert_called_once_with(self.fip_ns.get_name(),
-                                               mock.sentinel.interface_name,
-                                               '20.0.0.30',
-                                               mock.ANY)
-
-    @mock.patch.object(iptables_manager, 'IptablesManager')
-    @mock.patch.object(utils, 'execute')
-    @mock.patch.object(ip_lib.IpNetnsCommand, 'exists')
-    def _test_create(self, old_kernel, exists, execute, IPTables):
-        exists.return_value = True
-        # There are up to four sysctl calls - two for ip_nonlocal_bind,
-        # and two to enable forwarding
-        execute.side_effect = [RuntimeError if old_kernel else None,
-                               None, None, None]
-
-        self.fip_ns._iptables_manager = IPTables()
-        self.fip_ns.create()
-
-        ns_name = self.fip_ns.get_name()
-
-        netns_cmd = ['ip', 'netns', 'exec', ns_name]
-        bind_cmd = ['sysctl', '-w', 'net.ipv4.ip_nonlocal_bind=1']
-        expected = [mock.call(netns_cmd + bind_cmd, check_exit_code=True,
-                              extra_ok_codes=None, log_fail_as_error=False,
-                              run_as_root=True)]
-
-        if old_kernel:
-            expected.append(mock.call(bind_cmd, check_exit_code=True,
-                                      extra_ok_codes=None,
-                                      log_fail_as_error=True,
-                                      run_as_root=True))
-
-        execute.assert_has_calls(expected)
-
-    def test_create_old_kernel(self):
-        self._test_create(True)
-
-    def test_create_new_kernel(self):
-        self._test_create(False)
-
-    @mock.patch.object(ip_lib, 'IPWrapper')
-    def test_destroy(self, IPWrapper):
-        ip_wrapper = IPWrapper()
-        dev1 = mock.Mock()
-        dev1.name = 'fpr-aaaa'
-        dev2 = mock.Mock()
-        dev2.name = 'fg-aaaa'
-        ip_wrapper.get_devices.return_value = [dev1, dev2]
-
-        with mock.patch.object(self.fip_ns.ip_wrapper_root.netns,
-                               'delete') as delete:
-            self.fip_ns.delete()
-            delete.assert_called_once_with(mock.ANY)
-
-        ext_net_bridge = self.conf.external_network_bridge
-        ns_name = self.fip_ns.get_name()
-        self.driver.unplug.assert_called_once_with('fg-aaaa',
-                                                   bridge=ext_net_bridge,
-                                                   prefix='fg-',
-                                                   namespace=ns_name)
-        ip_wrapper.del_veth.assert_called_once_with('fpr-aaaa')
-
-    @mock.patch.object(ip_lib, 'IPWrapper')
-    @mock.patch.object(ip_lib, 'IPDevice')
-    @mock.patch.object(ip_lib, 'device_exists')
-    def test_create_rtr_2_fip_link(self, device_exists, IPDevice, IPWrapper):
-        ri = mock.Mock()
-        ri.router_id = _uuid()
-        ri.rtr_fip_subnet = None
-        ri.ns_name = mock.sentinel.router_ns
-
-        rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(ri.router_id)
-        fip_2_rtr_name = self.fip_ns.get_int_device_name(ri.router_id)
-        fip_ns_name = self.fip_ns.get_name()
-
-        self.fip_ns.local_subnets = allocator = mock.Mock()
-        pair = lla.LinkLocalAddressPair('169.254.31.28/31')
-        allocator.allocate.return_value = pair
-        device_exists.return_value = False
-        ip_wrapper = IPWrapper()
-        self.conf.network_device_mtu = 2000
-        ip_wrapper.add_veth.return_value = (IPDevice(), IPDevice())
-
-        self.fip_ns.create_rtr_2_fip_link(ri)
-
-        ip_wrapper.add_veth.assert_called_with(rtr_2_fip_name,
-                                               fip_2_rtr_name,
-                                               fip_ns_name)
-
-        device = IPDevice()
-        device.link.set_mtu.assert_called_with(2000)
-        self.assertEqual(2, device.link.set_mtu.call_count)
-        device.route.add_gateway.assert_called_once_with(
-            '169.254.31.29', table=16)
-
-    @mock.patch.object(ip_lib, 'IPWrapper')
-    @mock.patch.object(ip_lib, 'IPDevice')
-    @mock.patch.object(ip_lib, 'device_exists')
-    def test_create_rtr_2_fip_link_already_exists(self,
-                                                  device_exists,
-                                                  IPDevice,
-                                                  IPWrapper):
-        ri = mock.Mock()
-        ri.router_id = _uuid()
-        ri.rtr_fip_subnet = None
-        device_exists.return_value = True
-
-        self.fip_ns.local_subnets = allocator = mock.Mock()
-        pair = lla.LinkLocalAddressPair('169.254.31.28/31')
-        allocator.allocate.return_value = pair
-        self.fip_ns.create_rtr_2_fip_link(ri)
-
-        ip_wrapper = IPWrapper()
-        self.assertFalse(ip_wrapper.add_veth.called)
-
-    @mock.patch.object(ip_lib, 'IPDevice')
-    def _test_scan_fip_ports(self, ri, ip_list, IPDevice):
-        IPDevice.return_value = device = mock.Mock()
-        device.addr.list.return_value = ip_list
-        self.fip_ns.get_rtr_ext_device_name = mock.Mock(
-            return_value=mock.sentinel.rtr_ext_device_name)
-        self.fip_ns.scan_fip_ports(ri)
-
-    @mock.patch.object(ip_lib, 'device_exists')
-    def test_scan_fip_ports_restart_fips(self, device_exists):
-        device_exists.return_value = True
-        ri = mock.Mock()
-        ri.dist_fip_count = None
-        ri.floating_ips_dict = {}
-        ip_list = [{'cidr': '111.2.3.4/32'}, {'cidr': '111.2.3.5/32'}]
-        self._test_scan_fip_ports(ri, ip_list)
-        self.assertEqual(2, ri.dist_fip_count)
-
-    @mock.patch.object(ip_lib, 'device_exists')
-    def test_scan_fip_ports_restart_none(self, device_exists):
-        device_exists.return_value = True
-        ri = mock.Mock()
-        ri.dist_fip_count = None
-        ri.floating_ips_dict = {}
-        self._test_scan_fip_ports(ri, [])
-        self.assertEqual(0, ri.dist_fip_count)
-
-    def test_scan_fip_ports_restart_zero(self):
-        ri = mock.Mock()
-        ri.dist_fip_count = 0
-        self._test_scan_fip_ports(ri, None)
-        self.assertEqual(0, ri.dist_fip_count)
diff --git a/neutron/tests/unit/agent/l3/test_dvr_local_router.py b/neutron/tests/unit/agent/l3/test_dvr_local_router.py
deleted file mode 100644 (file)
index b200cf5..0000000
+++ /dev/null
@@ -1,631 +0,0 @@
-# Copyright (c) 2015 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import netaddr
-
-from oslo_log import log
-from oslo_utils import uuidutils
-
-from neutron.agent.common import config as agent_config
-from neutron.agent.l3 import agent as l3_agent
-from neutron.agent.l3 import config as l3_config
-from neutron.agent.l3 import dvr_local_router as dvr_router
-from neutron.agent.l3 import ha
-from neutron.agent.l3 import link_local_allocator as lla
-from neutron.agent.l3 import router_info
-from neutron.agent.linux import external_process
-from neutron.agent.linux import interface
-from neutron.agent.linux import ip_lib
-from neutron.common import config as base_config
-from neutron.common import constants as l3_constants
-from neutron.common import utils as common_utils
-from neutron.extensions import portbindings
-from neutron.tests import base
-from neutron.tests.common import l3_test_common
-
-_uuid = uuidutils.generate_uuid
-FIP_PRI = 32768
-HOSTNAME = 'myhost'
-
-
-class TestDvrRouterOperations(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestDvrRouterOperations, self).setUp()
-        mock.patch('eventlet.spawn').start()
-        self.conf = agent_config.setup_conf()
-        self.conf.register_opts(base_config.core_opts)
-        log.register_options(self.conf)
-        self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT')
-        self.conf.register_opts(l3_config.OPTS)
-        self.conf.register_opts(ha.OPTS)
-        agent_config.register_interface_driver_opts_helper(self.conf)
-        agent_config.register_process_monitor_opts(self.conf)
-        self.conf.register_opts(interface.OPTS)
-        self.conf.register_opts(external_process.OPTS)
-        self.conf.set_override('router_id', 'fake_id')
-        self.conf.set_override('interface_driver',
-                               'neutron.agent.linux.interface.NullDriver')
-        self.conf.set_override('send_arp_for_ha', 1)
-        self.conf.set_override('state_path', '')
-
-        self.device_exists_p = mock.patch(
-            'neutron.agent.linux.ip_lib.device_exists')
-        self.device_exists = self.device_exists_p.start()
-
-        self.ensure_dir = mock.patch('neutron.common.utils.ensure_dir').start()
-
-        mock.patch('neutron.agent.linux.keepalived.KeepalivedManager'
-                   '.get_full_config_file_path').start()
-
-        self.utils_exec_p = mock.patch(
-            'neutron.agent.linux.utils.execute')
-        self.utils_exec = self.utils_exec_p.start()
-
-        self.utils_replace_file_p = mock.patch(
-            'neutron.common.utils.replace_file')
-        self.utils_replace_file = self.utils_replace_file_p.start()
-
-        self.external_process_p = mock.patch(
-            'neutron.agent.linux.external_process.ProcessManager')
-        self.external_process = self.external_process_p.start()
-        self.process_monitor = mock.patch(
-            'neutron.agent.linux.external_process.ProcessMonitor').start()
-
-        self.send_adv_notif_p = mock.patch(
-            'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif')
-        self.send_adv_notif = self.send_adv_notif_p.start()
-
-        self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
-        driver_cls = self.dvr_cls_p.start()
-        self.mock_driver = mock.MagicMock()
-        self.mock_driver.DEV_NAME_LEN = (
-            interface.LinuxInterfaceDriver.DEV_NAME_LEN)
-        driver_cls.return_value = self.mock_driver
-
-        self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
-        ip_cls = self.ip_cls_p.start()
-        self.mock_ip = mock.MagicMock()
-        ip_cls.return_value = self.mock_ip
-
-        ip_rule = mock.patch('neutron.agent.linux.ip_lib.IPRule').start()
-        self.mock_rule = mock.MagicMock()
-        ip_rule.return_value = self.mock_rule
-
-        ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
-        self.mock_ip_dev = mock.MagicMock()
-        ip_dev.return_value = self.mock_ip_dev
-
-        self.l3pluginApi_cls_p = mock.patch(
-            'neutron.agent.l3.agent.L3PluginApi')
-        l3pluginApi_cls = self.l3pluginApi_cls_p.start()
-        self.plugin_api = mock.MagicMock()
-        l3pluginApi_cls.return_value = self.plugin_api
-
-        self.looping_call_p = mock.patch(
-            'oslo_service.loopingcall.FixedIntervalLoopingCall')
-        self.looping_call_p.start()
-
-        subnet_id_1 = _uuid()
-        subnet_id_2 = _uuid()
-        self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16',
-                                         'gateway_ip': '152.2.0.1',
-                                         'id': subnet_id_1}],
-                           'network_id': _uuid(),
-                           'device_owner':
-                           l3_constants.DEVICE_OWNER_ROUTER_SNAT,
-                           'mac_address': 'fa:16:3e:80:8d:80',
-                           'fixed_ips': [{'subnet_id': subnet_id_1,
-                                          'ip_address': '152.2.0.13',
-                                          'prefixlen': 16}],
-                           'id': _uuid(), 'device_id': _uuid()},
-                          {'subnets': [{'cidr': '152.10.0.0/16',
-                                        'gateway_ip': '152.10.0.1',
-                                        'id': subnet_id_2}],
-                           'network_id': _uuid(),
-                           'device_owner':
-                           l3_constants.DEVICE_OWNER_ROUTER_SNAT,
-                           'mac_address': 'fa:16:3e:80:8d:80',
-                           'fixed_ips': [{'subnet_id': subnet_id_2,
-                                         'ip_address': '152.10.0.13',
-                                         'prefixlen': 16}],
-                           'id': _uuid(), 'device_id': _uuid()}]
-
-        self.ri_kwargs = {'agent_conf': self.conf,
-                          'interface_driver': self.mock_driver}
-
-    def _create_router(self, router=None, **kwargs):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        self.router_id = _uuid()
-        if not router:
-            router = mock.MagicMock()
-        return dvr_router.DvrLocalRouter(agent,
-                                    HOSTNAME,
-                                    self.router_id,
-                                    router,
-                                    self.conf,
-                                    mock.Mock(),
-                                    **kwargs)
-
-    def test_get_floating_ips_dvr(self):
-        router = mock.MagicMock()
-        router.get.return_value = [{'host': HOSTNAME},
-                                   {'host': mock.sentinel.otherhost}]
-        ri = self._create_router(router)
-
-        fips = ri.get_floating_ips()
-
-        self.assertEqual([{'host': HOSTNAME}], fips)
-
-    @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif')
-    @mock.patch.object(ip_lib, 'IPDevice')
-    @mock.patch.object(ip_lib, 'IPRule')
-    def test_floating_ip_added_dist(self, mIPRule, mIPDevice, mock_adv_notif):
-        router = mock.MagicMock()
-        ri = self._create_router(router)
-        ext_net_id = _uuid()
-        subnet_id = _uuid()
-        agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
-                                        'prefixlen': 24,
-                                        'subnet_id': subnet_id}],
-                         'subnets': [{'id': subnet_id,
-                                      'cidr': '20.0.0.0/24',
-                                      'gateway_ip': '20.0.0.1'}],
-                         'id': _uuid(),
-                         'network_id': ext_net_id,
-                         'mac_address': 'ca:fe:de:ad:be:ef'}
-
-        fip = {'id': _uuid(),
-               'host': HOSTNAME,
-               'floating_ip_address': '15.1.2.3',
-               'fixed_ip_address': '192.168.0.1',
-               'floating_network_id': ext_net_id,
-               'port_id': _uuid()}
-        ri.fip_ns = mock.Mock()
-        ri.fip_ns.agent_gateway_port = agent_gw_port
-        ri.fip_ns.allocate_rule_priority.return_value = FIP_PRI
-        subnet = lla.LinkLocalAddressPair('169.254.30.42/31')
-        ri.rtr_fip_subnet = subnet
-        ri.fip_ns.local_subnets = mock.Mock()
-        ri.fip_ns.local_subnets.allocate.return_value = subnet
-        ri.dist_fip_count = 0
-        ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
-        ri.floating_ip_added_dist(fip, ip_cidr)
-        mIPRule().rule.add.assert_called_with(ip='192.168.0.1',
-                                              table=16,
-                                              priority=FIP_PRI)
-        ri.fip_ns.local_subnets.allocate.assert_not_called()
-        self.assertEqual(1, ri.dist_fip_count)
-
-        # Validate that fip_ns.local_subnets is called when
-        # rtr_fip_subnet is None
-        ri.rtr_fip_subnet = None
-        ri.floating_ip_added_dist(fip, ip_cidr)
-        mIPRule().rule.add.assert_called_with(ip='192.168.0.1',
-                                              table=16,
-                                              priority=FIP_PRI)
-        ri.fip_ns.local_subnets.allocate.assert_called_once_with(ri.router_id)
-        # TODO(mrsmith): add more asserts
-
-    @mock.patch.object(ip_lib, 'IPWrapper')
-    @mock.patch.object(ip_lib, 'IPDevice')
-    @mock.patch.object(ip_lib, 'IPRule')
-    def test_floating_ip_removed_dist(self, mIPRule, mIPDevice, mIPWrapper):
-        router = mock.MagicMock()
-        ri = self._create_router(router)
-
-        subnet_id = _uuid()
-        agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
-                                        'prefixlen': 24,
-                                        'subnet_id': subnet_id}],
-                         'subnets': [{'id': subnet_id,
-                                      'cidr': '20.0.0.0/24',
-                                      'gateway_ip': '20.0.0.1'}],
-                         'id': _uuid(),
-                         'network_id': _uuid(),
-                         'mac_address': 'ca:fe:de:ad:be:ef'}
-        fip_cidr = '11.22.33.44/24'
-
-        ri.dist_fip_count = 2
-        ri.fip_ns = mock.Mock()
-        ri.fip_ns.get_name.return_value = 'fip_ns_name'
-        ri.floating_ips_dict['11.22.33.44'] = FIP_PRI
-        ri.fip_2_rtr = '11.22.33.42'
-        ri.rtr_2_fip = '11.22.33.40'
-        ri.fip_ns.agent_gateway_port = agent_gw_port
-        s = lla.LinkLocalAddressPair('169.254.30.42/31')
-        ri.rtr_fip_subnet = s
-        ri.fip_ns.local_subnets = mock.Mock()
-        ri.floating_ip_removed_dist(fip_cidr)
-        mIPRule().rule.delete.assert_called_with(
-            ip=str(netaddr.IPNetwork(fip_cidr).ip), table=16, priority=FIP_PRI)
-        mIPDevice().route.delete_route.assert_called_with(fip_cidr, str(s.ip))
-        self.assertFalse(ri.fip_ns.unsubscribe.called)
-        ri.fip_ns.local_subnets.allocate.assert_not_called()
-
-        ri.dist_fip_count = 1
-        s1 = lla.LinkLocalAddressPair('15.1.2.3/32')
-        ri.rtr_fip_subnet = None
-        ri.fip_ns.local_subnets.allocate.return_value = s1
-        _, fip_to_rtr = s1.get_pair()
-        fip_ns = ri.fip_ns
-        ri.floating_ip_removed_dist(fip_cidr)
-        self.assertTrue(fip_ns.destroyed)
-        mIPWrapper().del_veth.assert_called_once_with(
-            fip_ns.get_int_device_name(router['id']))
-        mIPDevice().route.delete_gateway.assert_called_once_with(
-            str(fip_to_rtr.ip), table=16)
-        fip_ns.unsubscribe.assert_called_once_with(ri.router_id)
-        ri.fip_ns.local_subnets.allocate.assert_called_once_with(ri.router_id)
-
-    def _test_add_floating_ip(self, ri, fip, is_failure):
-        ri._add_fip_addr_to_device = mock.Mock(return_value=is_failure)
-        ri.floating_ip_added_dist = mock.Mock()
-
-        result = ri.add_floating_ip(fip,
-                                    mock.sentinel.interface_name,
-                                    mock.sentinel.device)
-        ri._add_fip_addr_to_device.assert_called_once_with(
-            fip, mock.sentinel.device)
-        return result
-
-    def test_add_floating_ip(self):
-        ri = self._create_router(mock.MagicMock())
-        ip = '15.1.2.3'
-        fip = {'floating_ip_address': ip}
-        result = self._test_add_floating_ip(ri, fip, True)
-        ri.floating_ip_added_dist.assert_called_once_with(fip, ip + '/32')
-        self.assertEqual(l3_constants.FLOATINGIP_STATUS_ACTIVE, result)
-
-    def test_add_floating_ip_error(self):
-        ri = self._create_router(mock.MagicMock())
-        result = self._test_add_floating_ip(
-            ri, {'floating_ip_address': '15.1.2.3'}, False)
-        self.assertFalse(ri.floating_ip_added_dist.called)
-        self.assertEqual(l3_constants.FLOATINGIP_STATUS_ERROR, result)
-
-    @mock.patch.object(router_info.RouterInfo, 'remove_floating_ip')
-    def test_remove_floating_ip(self, super_remove_floating_ip):
-        ri = self._create_router(mock.MagicMock())
-        ri.floating_ip_removed_dist = mock.Mock()
-
-        ri.remove_floating_ip(mock.sentinel.device, mock.sentinel.ip_cidr)
-
-        super_remove_floating_ip.assert_called_once_with(
-            mock.sentinel.device, mock.sentinel.ip_cidr)
-        ri.floating_ip_removed_dist.assert_called_once_with(
-            mock.sentinel.ip_cidr)
-
-    def test__get_internal_port(self):
-        ri = self._create_router()
-        port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}
-        router_ports = [port]
-        ri.router.get.return_value = router_ports
-        self.assertEqual(port, ri._get_internal_port(mock.sentinel.subnet_id))
-
-    def test__get_internal_port_not_found(self):
-        ri = self._create_router()
-        port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}
-        router_ports = [port]
-        ri.router.get.return_value = router_ports
-        self.assertIsNone(ri._get_internal_port(mock.sentinel.subnet_id2))
-
-    def test__get_snat_idx_ipv4(self):
-        ip_cidr = '101.12.13.00/24'
-        ri = self._create_router(mock.MagicMock())
-        snat_idx = ri._get_snat_idx(ip_cidr)
-        # 0x650C0D00 is numerical value of 101.12.13.00
-        self.assertEqual(0x650C0D00, snat_idx)
-
-    def test__get_snat_idx_ipv6(self):
-        ip_cidr = '2620:0:a03:e100::/64'
-        ri = self._create_router(mock.MagicMock())
-        snat_idx = ri._get_snat_idx(ip_cidr)
-        # 0x3D345705 is 30 bit xor folded crc32 of the ip_cidr
-        self.assertEqual(0x3D345705, snat_idx)
-
-    def test__get_snat_idx_ipv6_below_32768(self):
-        ip_cidr = 'd488::/30'
-        # crc32 of this ip_cidr is 0x1BD7
-        ri = self._create_router(mock.MagicMock())
-        snat_idx = ri._get_snat_idx(ip_cidr)
-        # 0x1BD7 + 0x3FFFFFFF = 0x40001BD6
-        self.assertEqual(0x40001BD6, snat_idx)
-
-    def test__set_subnet_arp_info(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        router['distributed'] = True
-        ri = dvr_router.DvrLocalRouter(
-            agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
-        ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
-        subnet_id = l3_test_common.get_subnet_id(ports[0])
-        test_ports = [{'mac_address': '00:11:22:33:44:55',
-                      'device_owner': l3_constants.DEVICE_OWNER_DHCP,
-                      'fixed_ips': [{'ip_address': '1.2.3.4',
-                                     'prefixlen': 24,
-                                     'subnet_id': subnet_id}]}]
-
-        self.plugin_api.get_ports_by_subnet.return_value = test_ports
-
-        # Test basic case
-        ports[0]['subnets'] = [{'id': subnet_id,
-                                'cidr': '1.2.3.0/24'}]
-        with mock.patch.object(ri,
-                               '_process_arp_cache_for_internal_port') as parp:
-            ri._set_subnet_arp_info(subnet_id)
-        self.assertEqual(1, parp.call_count)
-        self.mock_ip_dev.neigh.add.assert_called_once_with(
-            '1.2.3.4', '00:11:22:33:44:55')
-
-        # Test negative case
-        router['distributed'] = False
-        ri._set_subnet_arp_info(subnet_id)
-        self.mock_ip_dev.neigh.add.never_called()
-
-    def test_add_arp_entry(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        router['distributed'] = True
-        subnet_id = l3_test_common.get_subnet_id(
-            router[l3_constants.INTERFACE_KEY][0])
-        arp_table = {'ip_address': '1.7.23.11',
-                     'mac_address': '00:11:22:33:44:55',
-                     'subnet_id': subnet_id}
-
-        payload = {'arp_table': arp_table, 'router_id': router['id']}
-        agent._router_added(router['id'], router)
-        agent.add_arp_entry(None, payload)
-        agent.router_deleted(None, router['id'])
-        self.mock_ip_dev.neigh.add.assert_called_once_with(
-            '1.7.23.11', '00:11:22:33:44:55')
-
-    def test_add_arp_entry_no_routerinfo(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        subnet_id = l3_test_common.get_subnet_id(
-            router[l3_constants.INTERFACE_KEY][0])
-        arp_table = {'ip_address': '1.7.23.11',
-                     'mac_address': '00:11:22:33:44:55',
-                     'subnet_id': subnet_id}
-
-        payload = {'arp_table': arp_table, 'router_id': router['id']}
-        agent.add_arp_entry(None, payload)
-
-    def test__update_arp_entry_with_no_subnet(self):
-        ri = dvr_router.DvrLocalRouter(
-            mock.sentinel.agent,
-            HOSTNAME,
-            'foo_router_id',
-            {'distributed': True, 'gw_port_host': HOSTNAME},
-            **self.ri_kwargs)
-        with mock.patch.object(l3_agent.ip_lib, 'IPDevice') as f:
-            ri._update_arp_entry(mock.ANY, mock.ANY, 'foo_subnet_id', 'add')
-        self.assertFalse(f.call_count)
-
-    def _setup_test_for_arp_entry_cache(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        router['distributed'] = True
-        ri = dvr_router.DvrLocalRouter(
-            agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
-        subnet_id = l3_test_common.get_subnet_id(
-            ri.router[l3_constants.INTERFACE_KEY][0])
-        return ri, subnet_id
-
-    def test__update_arp_entry_calls_arp_cache_with_no_device(self):
-        ri, subnet_id = self._setup_test_for_arp_entry_cache()
-        state = True
-        with mock.patch.object(l3_agent.ip_lib, 'IPDevice') as rtrdev,\
-                mock.patch.object(ri, '_cache_arp_entry') as arp_cache:
-                rtrdev.return_value.exists.return_value = False
-                state = ri._update_arp_entry(
-                    mock.ANY, mock.ANY, subnet_id, 'add')
-        self.assertFalse(state)
-        self.assertTrue(arp_cache.called)
-        arp_cache.assert_called_once_with(mock.ANY, mock.ANY,
-                                          subnet_id, 'add')
-        self.assertFalse(rtrdev.neigh.add.called)
-
-    def test__process_arp_cache_for_internal_port(self):
-        ri, subnet_id = self._setup_test_for_arp_entry_cache()
-        ri._cache_arp_entry('1.7.23.11', '00:11:22:33:44:55',
-                            subnet_id, 'add')
-        self.assertEqual(1, len(ri._pending_arp_set))
-        with mock.patch.object(ri, '_update_arp_entry') as update_arp:
-            update_arp.return_value = True
-        ri._process_arp_cache_for_internal_port(subnet_id)
-        self.assertEqual(0, len(ri._pending_arp_set))
-
-    def test__delete_arp_cache_for_internal_port(self):
-        ri, subnet_id = self._setup_test_for_arp_entry_cache()
-        ri._cache_arp_entry('1.7.23.11', '00:11:22:33:44:55',
-                            subnet_id, 'add')
-        self.assertEqual(1, len(ri._pending_arp_set))
-        ri._delete_arp_cache_for_internal_port(subnet_id)
-        self.assertEqual(0, len(ri._pending_arp_set))
-
-    def test_del_arp_entry(self):
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        router['distributed'] = True
-        subnet_id = l3_test_common.get_subnet_id(
-            router[l3_constants.INTERFACE_KEY][0])
-        arp_table = {'ip_address': '1.5.25.15',
-                     'mac_address': '00:44:33:22:11:55',
-                     'subnet_id': subnet_id}
-
-        payload = {'arp_table': arp_table, 'router_id': router['id']}
-        agent._router_added(router['id'], router)
-        # first add the entry
-        agent.add_arp_entry(None, payload)
-        # now delete it
-        agent.del_arp_entry(None, payload)
-        self.mock_ip_dev.neigh.delete.assert_called_once_with(
-            '1.5.25.15', '00:44:33:22:11:55')
-        agent.router_deleted(None, router['id'])
-
-    def test_get_floating_agent_gw_interfaces(self):
-        fake_network_id = _uuid()
-        subnet_id = _uuid()
-        agent_gateway_port = (
-            [{'fixed_ips': [{'ip_address': '20.0.0.30',
-                             'prefixlen': 24,
-                             'subnet_id': subnet_id}],
-              'subnets': [{'id': subnet_id,
-                           'cidr': '20.0.0.0/24',
-                           'gateway_ip': '20.0.0.1'}],
-              'id': _uuid(),
-              portbindings.HOST_ID: 'myhost',
-              'device_owner': l3_constants.DEVICE_OWNER_AGENT_GW,
-              'network_id': fake_network_id,
-              'mac_address': 'ca:fe:de:ad:be:ef'}]
-        )
-
-        router = l3_test_common.prepare_router_data(enable_snat=True)
-        router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
-        router['distributed'] = True
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = dvr_router.DvrLocalRouter(
-            agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
-        self.assertEqual(
-            agent_gateway_port[0],
-            ri.get_floating_agent_gw_interface(fake_network_id))
-
-    def test_process_router_dist_floating_ip_add(self):
-        fake_floatingips = {'floatingips': [
-            {'id': _uuid(),
-             'host': HOSTNAME,
-             'floating_ip_address': '15.1.2.3',
-             'fixed_ip_address': '192.168.0.1',
-             'floating_network_id': mock.sentinel.ext_net_id,
-             'port_id': _uuid()},
-            {'id': _uuid(),
-             'host': 'some-other-host',
-             'floating_ip_address': '15.1.2.4',
-             'fixed_ip_address': '192.168.0.10',
-             'floating_network_id': mock.sentinel.ext_net_id,
-             'port_id': _uuid()}]}
-
-        router = l3_test_common.prepare_router_data(enable_snat=True)
-        router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
-        router['distributed'] = True
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = dvr_router.DvrLocalRouter(agent,
-                                  HOSTNAME,
-                                  router['id'],
-                                  router,
-                                  **self.ri_kwargs)
-        ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
-        ri.dist_fip_count = 0
-        fip_ns = agent.get_fip_ns(mock.sentinel.ext_net_id)
-        subnet_id = _uuid()
-        fip_ns.agent_gateway_port = (
-            {'fixed_ips': [{'ip_address': '20.0.0.30',
-                            'subnet_id': subnet_id}],
-             'subnets': [{'id': subnet_id,
-                          'cidr': '20.0.0.0/24',
-                          'gateway_ip': '20.0.0.1'}],
-             'id': _uuid(),
-             'network_id': _uuid(),
-             'mac_address': 'ca:fe:de:ad:be:ef'}
-        )
-
-    def _test_ext_gw_updated_dvr_agent_mode(self, host,
-                                            agent_mode, expected_call_count):
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        ri = dvr_router.DvrLocalRouter(agent,
-                                       HOSTNAME,
-                                       router['id'],
-                                       router,
-                                       **self.ri_kwargs)
-
-        interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
-                                                                        ri)
-        ri._external_gateway_added = mock.Mock()
-
-        # test agent mode = dvr (compute node)
-        router['gw_port_host'] = host
-        agent.conf.agent_mode = agent_mode
-
-        ri.external_gateway_updated(ex_gw_port, interface_name)
-        # no gateway should be added on dvr node
-        self.assertEqual(expected_call_count,
-                         ri._external_gateway_added.call_count)
-
-    def test_ext_gw_updated_dvr_agent_mode(self):
-        # no gateway should be added on dvr node
-        self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr', 0)
-
-    def test_ext_gw_updated_dvr_agent_mode_host(self):
-        # no gateway should be added on dvr node
-        self._test_ext_gw_updated_dvr_agent_mode(HOSTNAME,
-                                                 'dvr', 0)
-
-    def test_external_gateway_removed_ext_gw_port_and_fip(self):
-        self.conf.set_override('state_path', '/tmp')
-
-        agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
-        agent.conf.agent_mode = 'dvr'
-        router = l3_test_common.prepare_router_data(num_internal_ports=2)
-        router['gw_port_host'] = HOSTNAME
-        self.mock_driver.unplug.reset_mock()
-
-        external_net_id = router['gw_port']['network_id']
-        ri = dvr_router.DvrLocalRouter(
-            agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
-        ri.remove_floating_ip = mock.Mock()
-        agent._fetch_external_net_id = mock.Mock(return_value=external_net_id)
-        ri.ex_gw_port = ri.router['gw_port']
-        del ri.router['gw_port']
-        ri.fip_ns = None
-        nat = ri.iptables_manager.ipv4['nat']
-        nat.clear_rules_by_tag = mock.Mock()
-        nat.add_rule = mock.Mock()
-
-        ri.fip_ns = agent.get_fip_ns(external_net_id)
-        subnet_id = _uuid()
-        ri.fip_ns.agent_gateway_port = {
-            'fixed_ips': [{
-                            'ip_address': '20.0.0.30',
-                            'prefixlen': 24,
-                            'subnet_id': subnet_id
-                         }],
-            'subnets': [{'id': subnet_id,
-                         'cidr': '20.0.0.0/24',
-                         'gateway_ip': '20.0.0.1'}],
-            'id': _uuid(),
-            'network_id': external_net_id,
-            'mac_address': 'ca:fe:de:ad:be:ef'}
-
-        vm_floating_ip = '19.4.4.2'
-        ri.floating_ips_dict[vm_floating_ip] = FIP_PRI
-        ri.dist_fip_count = 1
-        ri.rtr_fip_subnet = ri.fip_ns.local_subnets.allocate(ri.router_id)
-        _, fip_to_rtr = ri.rtr_fip_subnet.get_pair()
-        self.mock_ip.get_devices.return_value = [
-            l3_test_common.FakeDev(ri.fip_ns.get_ext_device_name(_uuid()))]
-        self.mock_ip_dev.addr.list.return_value = [
-            {'cidr': vm_floating_ip + '/32'},
-            {'cidr': '19.4.4.1/24'}]
-        self.device_exists.return_value = True
-
-        ri.external_gateway_removed(
-            ri.ex_gw_port,
-            ri.get_external_device_name(ri.ex_gw_port['id']))
-
-        ri.remove_floating_ip.assert_called_once_with(self.mock_ip_dev,
-                                                      '19.4.4.2/32')
diff --git a/neutron/tests/unit/agent/l3/test_fip_rule_priority_allocator.py b/neutron/tests/unit/agent/l3/test_fip_rule_priority_allocator.py
deleted file mode 100644 (file)
index b7d606d..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.l3 import fip_rule_priority_allocator as frpa
-from neutron.tests import base
-
-
-class TestFipPriority(base.BaseTestCase):
-    def setUp(self):
-        super(TestFipPriority, self).setUp()
-
-    def test__init__(self):
-        test_pr = frpa.FipPriority(10)
-        self.assertEqual(10, test_pr.index)
-
-    def test__repr__(self):
-        test_pr = frpa.FipPriority(20)
-        self.assertEqual("20", str(test_pr))
-
-    def test__eq__(self):
-        left_pr = frpa.FipPriority(10)
-        right_pr = frpa.FipPriority(10)
-        other_pr = frpa.FipPriority(20)
-        self.assertEqual(left_pr, right_pr)
-        self.assertNotEqual(left_pr, other_pr)
-        self.assertNotEqual(right_pr, other_pr)
-
-    def test__hash__(self):
-        left_pr = frpa.FipPriority(10)
-        right_pr = frpa.FipPriority(10)
-        other_pr = frpa.FipPriority(20)
-        self.assertEqual(hash(left_pr), hash(right_pr))
-        self.assertNotEqual(hash(left_pr), hash(other_pr))
-        self.assertNotEqual(hash(other_pr), hash(right_pr))
-
-
-class TestFipRulePriorityAllocator(base.BaseTestCase):
-    def setUp(self):
-        super(TestFipRulePriorityAllocator, self).setUp()
-        self.priority_rule_start = 100
-        self.priority_rule_end = 200
-        self.data_store_path = '/data_store_path_test'
-
-    def test__init__(self):
-        _frpa = frpa.FipRulePriorityAllocator(self.data_store_path,
-                                            self.priority_rule_start,
-                                            self.priority_rule_end)
-        self.assertEqual(self.data_store_path, _frpa.state_file)
-        self.assertEqual(frpa.FipPriority, _frpa.ItemClass)
-        self.assertEqual(100, len(_frpa.pool))
diff --git a/neutron/tests/unit/agent/l3/test_ha_router.py b/neutron/tests/unit/agent/l3/test_ha_router.py
deleted file mode 100644 (file)
index 7b05f0a..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (c) 2015 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron.agent.l3 import ha_router
-from neutron.tests import base
-
-_uuid = uuidutils.generate_uuid
-
-
-class TestBasicRouterOperations(base.BaseTestCase):
-    def setUp(self):
-        super(TestBasicRouterOperations, self).setUp()
-
-    def _create_router(self, router=None, **kwargs):
-        if not router:
-            router = mock.MagicMock()
-        self.agent_conf = mock.Mock()
-        self.router_id = _uuid()
-        return ha_router.HaRouter(mock.sentinel.enqueue_state,
-                                  self.router_id,
-                                  router,
-                                  self.agent_conf,
-                                  mock.sentinel.driver,
-                                  **kwargs)
-
-    def test_get_router_cidrs_returns_ha_cidrs(self):
-        ri = self._create_router()
-        device = mock.MagicMock()
-        device.name.return_value = 'eth2'
-        addresses = ['15.1.2.2/24', '15.1.2.3/32']
-        ri._get_cidrs_from_keepalived = mock.MagicMock(return_value=addresses)
-        self.assertEqual(set(addresses), ri.get_router_cidrs(device))
-
-    def test__add_default_gw_virtual_route(self):
-        ri = self._create_router()
-        mock_instance = mock.Mock()
-        mock_instance.virtual_routes.gateway_routes = []
-        ri._get_keepalived_instance = mock.Mock(return_value=mock_instance)
-        subnets = [{'id': _uuid(),
-                    'cidr': '20.0.0.0/24',
-                    'gateway_ip': None}]
-        ex_gw_port = {'fixed_ips': [],
-                      'subnets': subnets,
-                      'extra_subnets': [],
-                      'id': _uuid(),
-                      'network_id': _uuid(),
-                      'mac_address': 'ca:fe:de:ad:be:ef'}
-        # Make sure no exceptional code
-        ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc')
-        self.assertEqual(0, len(mock_instance.virtual_routes.gateway_routes))
-
-        subnets.append({'id': _uuid(),
-                        'cidr': '30.0.0.0/24',
-                        'gateway_ip': '30.0.0.1'})
-        ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc')
-        self.assertEqual(1, len(mock_instance.virtual_routes.gateway_routes))
diff --git a/neutron/tests/unit/agent/l3/test_item_allocator.py b/neutron/tests/unit/agent/l3/test_item_allocator.py
deleted file mode 100644 (file)
index 7f4c365..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.agent.l3 import item_allocator as ia
-from neutron.tests import base
-
-
-class TestObject(object):
-    def __init__(self, value):
-        super(TestObject, self).__init__()
-        self._value = value
-
-    def __str__(self):
-        return str(self._value)
-
-
-class TestItemAllocator(base.BaseTestCase):
-    def setUp(self):
-        super(TestItemAllocator, self).setUp()
-
-    def test__init__(self):
-        test_pool = set(TestObject(s) for s in range(32768, 40000))
-        with mock.patch.object(ia.ItemAllocator, '_write') as write:
-            a = ia.ItemAllocator('/file', TestObject, test_pool)
-            test_object = a.allocate('test')
-
-        self.assertIn('test', a.allocations)
-        self.assertIn(test_object, a.allocations.values())
-        self.assertNotIn(test_object, a.pool)
-        self.assertTrue(write.called)
-
-    def test__init__readfile(self):
-        test_pool = set(TestObject(s) for s in range(32768, 40000))
-        with mock.patch.object(ia.ItemAllocator, '_read') as read:
-            read.return_value = ["da873ca2,10\n"]
-            a = ia.ItemAllocator('/file', TestObject, test_pool)
-
-        self.assertIn('da873ca2', a.remembered)
-        self.assertEqual({}, a.allocations)
-
-    def test_allocate(self):
-        test_pool = set([TestObject(33000), TestObject(33001)])
-        a = ia.ItemAllocator('/file', TestObject, test_pool)
-        with mock.patch.object(ia.ItemAllocator, '_write') as write:
-            test_object = a.allocate('test')
-
-        self.assertIn('test', a.allocations)
-        self.assertIn(test_object, a.allocations.values())
-        self.assertNotIn(test_object, a.pool)
-        self.assertTrue(write.called)
-
-    def test_allocate_from_file(self):
-        test_pool = set([TestObject(33000), TestObject(33001)])
-        with mock.patch.object(ia.ItemAllocator, '_read') as read:
-            read.return_value = ["deadbeef,33000\n"]
-            a = ia.ItemAllocator('/file', TestObject, test_pool)
-
-        with mock.patch.object(ia.ItemAllocator, '_write') as write:
-            t_obj = a.allocate('deadbeef')
-
-        self.assertEqual('33000', t_obj._value)
-        self.assertIn('deadbeef', a.allocations)
-        self.assertIn(t_obj, a.allocations.values())
-        self.assertNotIn(33000, a.pool)
-        self.assertFalse(write.called)
-
-    def test_allocate_exhausted_pool(self):
-        test_pool = set([TestObject(33000)])
-        with mock.patch.object(ia.ItemAllocator, '_read') as read:
-            read.return_value = ["deadbeef,33000\n"]
-            a = ia.ItemAllocator('/file', TestObject, test_pool)
-
-        with mock.patch.object(ia.ItemAllocator, '_write') as write:
-            allocation = a.allocate('abcdef12')
-
-        self.assertNotIn('deadbeef', a.allocations)
-        self.assertNotIn(allocation, a.pool)
-        self.assertTrue(write.called)
-
-    def test_release(self):
-        test_pool = set([TestObject(33000), TestObject(33001)])
-        with mock.patch.object(ia.ItemAllocator, '_write') as write:
-            a = ia.ItemAllocator('/file', TestObject, test_pool)
-            allocation = a.allocate('deadbeef')
-            write.reset_mock()
-            a.release('deadbeef')
-
-        self.assertNotIn('deadbeef', a.allocations)
-        self.assertIn(allocation, a.pool)
-        self.assertEqual({}, a.allocations)
-        write.assert_called_once_with([])
diff --git a/neutron/tests/unit/agent/l3/test_legacy_router.py b/neutron/tests/unit/agent/l3/test_legacy_router.py
deleted file mode 100644 (file)
index 95f6bcc..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright (c) 2015 Openstack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron.agent.l3 import legacy_router
-from neutron.agent.linux import ip_lib
-from neutron.common import constants as l3_constants
-from neutron.tests import base
-
-_uuid = uuidutils.generate_uuid
-
-
-class BasicRouterTestCaseFramework(base.BaseTestCase):
-    def _create_router(self, router=None, **kwargs):
-        if not router:
-            router = mock.MagicMock()
-        self.agent_conf = mock.Mock()
-        self.driver = mock.Mock()
-        self.router_id = _uuid()
-        return legacy_router.LegacyRouter(self.router_id,
-                                          router,
-                                          self.agent_conf,
-                                          self.driver,
-                                          **kwargs)
-
-
-class TestBasicRouterOperations(BasicRouterTestCaseFramework):
-
-    def test_remove_floating_ip(self):
-        ri = self._create_router(mock.MagicMock())
-        device = mock.Mock()
-        cidr = '15.1.2.3/32'
-
-        ri.remove_floating_ip(device, cidr)
-
-        device.delete_addr_and_conntrack_state.assert_called_once_with(cidr)
-
-    def test_remove_external_gateway_ip(self):
-        ri = self._create_router(mock.MagicMock())
-        device = mock.Mock()
-        cidr = '172.16.0.0/24'
-
-        ri.remove_external_gateway_ip(device, cidr)
-
-        device.delete_addr_and_conntrack_state.assert_called_once_with(cidr)
-
-
-@mock.patch.object(ip_lib, 'send_ip_addr_adv_notif')
-class TestAddFloatingIpWithMockGarp(BasicRouterTestCaseFramework):
-    def test_add_floating_ip(self, send_ip_addr_adv_notif):
-        ri = self._create_router()
-        ri._add_fip_addr_to_device = mock.Mock(return_value=True)
-        ip = '15.1.2.3'
-        result = ri.add_floating_ip({'floating_ip_address': ip},
-                                    mock.sentinel.interface_name,
-                                    mock.sentinel.device)
-        ip_lib.send_ip_addr_adv_notif.assert_called_once_with(
-            ri.ns_name,
-            mock.sentinel.interface_name,
-            ip,
-            self.agent_conf)
-        self.assertEqual(l3_constants.FLOATINGIP_STATUS_ACTIVE, result)
-
-    def test_add_floating_ip_error(self, send_ip_addr_adv_notif):
-        ri = self._create_router()
-        ri._add_fip_addr_to_device = mock.Mock(return_value=False)
-        result = ri.add_floating_ip({'floating_ip_address': '15.1.2.3'},
-                                    mock.sentinel.interface_name,
-                                    mock.sentinel.device)
-        self.assertFalse(ip_lib.send_ip_addr_adv_notif.called)
-        self.assertEqual(l3_constants.FLOATINGIP_STATUS_ERROR, result)
diff --git a/neutron/tests/unit/agent/l3/test_link_local_allocator.py b/neutron/tests/unit/agent/l3/test_link_local_allocator.py
deleted file mode 100644 (file)
index e33b676..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-
-from neutron.agent.l3 import link_local_allocator as lla
-from neutron.tests import base
-
-
-class TestLinkLocalAddrAllocator(base.BaseTestCase):
-    def setUp(self):
-        super(TestLinkLocalAddrAllocator, self).setUp()
-        self.subnet = netaddr.IPNetwork('169.254.31.0/24')
-
-    def test__init__(self):
-        a = lla.LinkLocalAllocator('/file', self.subnet.cidr)
-        self.assertEqual('/file', a.state_file)
-        self.assertEqual({}, a.allocations)
diff --git a/neutron/tests/unit/agent/l3/test_namespace_manager.py b/neutron/tests/unit/agent/l3/test_namespace_manager.py
deleted file mode 100644 (file)
index 6223119..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright (c) 2015 Rackspace
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron.agent.l3 import dvr_fip_ns
-from neutron.agent.l3 import dvr_snat_ns
-from neutron.agent.l3 import namespace_manager
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import ip_lib
-from neutron.tests import base
-
-_uuid = uuidutils.generate_uuid
-
-
-class NamespaceManagerTestCaseFramework(base.BaseTestCase):
-
-    def _create_namespace_manager(self):
-        self.agent_conf = mock.Mock()
-        self.driver = mock.Mock()
-        return namespace_manager.NamespaceManager(self.agent_conf, self.driver)
-
-
-class TestNamespaceManager(NamespaceManagerTestCaseFramework):
-
-    def setUp(self):
-        super(TestNamespaceManager, self).setUp()
-        self.ns_manager = self._create_namespace_manager()
-
-    def test_get_prefix_and_id(self):
-        router_id = _uuid()
-
-        ns_prefix, ns_id = self.ns_manager.get_prefix_and_id(
-            namespaces.NS_PREFIX + router_id)
-        self.assertEqual(namespaces.NS_PREFIX, ns_prefix)
-        self.assertEqual(router_id, ns_id)
-
-        ns_prefix, ns_id = self.ns_manager.get_prefix_and_id(
-            dvr_snat_ns.SNAT_NS_PREFIX + router_id)
-        self.assertEqual(dvr_snat_ns.SNAT_NS_PREFIX, ns_prefix)
-        self.assertEqual(router_id, ns_id)
-
-        ns_name = 'dhcp-' + router_id
-        self.assertIsNone(self.ns_manager.get_prefix_and_id(ns_name))
-
-    def test_is_managed(self):
-        router_id = _uuid()
-
-        router_ns_name = namespaces.NS_PREFIX + router_id
-        self.assertTrue(self.ns_manager.is_managed(router_ns_name))
-        router_ns_name = dvr_snat_ns.SNAT_NS_PREFIX + router_id
-        self.assertTrue(self.ns_manager.is_managed(router_ns_name))
-
-        ext_net_id = _uuid()
-        router_ns_name = dvr_fip_ns.FIP_NS_PREFIX + ext_net_id
-        self.assertTrue(self.ns_manager.is_managed(router_ns_name))
-
-        self.assertFalse(self.ns_manager.is_managed('dhcp-' + router_id))
-
-    def test_list_all(self):
-        ns_names = [namespaces.NS_PREFIX + _uuid(),
-                    dvr_snat_ns.SNAT_NS_PREFIX + _uuid(),
-                    dvr_fip_ns.FIP_NS_PREFIX + _uuid(),
-                    'dhcp-' + _uuid(), ]
-
-        # Test the normal path
-        with mock.patch.object(ip_lib.IPWrapper, 'get_namespaces',
-                               return_value=ns_names):
-            retrieved_ns_names = self.ns_manager.list_all()
-        self.assertEqual(len(ns_names) - 1, len(retrieved_ns_names))
-        for i in range(len(retrieved_ns_names)):
-            self.assertIn(ns_names[i], retrieved_ns_names)
-        self.assertNotIn(ns_names[-1], retrieved_ns_names)
-
-        # Test path where IPWrapper raises exception
-        with mock.patch.object(ip_lib.IPWrapper, 'get_namespaces',
-                               side_effect=RuntimeError):
-            retrieved_ns_names = self.ns_manager.list_all()
-        self.assertFalse(retrieved_ns_names)
-
-    def test_ensure_router_cleanup(self):
-        router_id = _uuid()
-        ns_names = [namespaces.NS_PREFIX + _uuid() for _ in range(5)]
-        ns_names += [dvr_snat_ns.SNAT_NS_PREFIX + _uuid() for _ in range(5)]
-        ns_names += [namespaces.NS_PREFIX + router_id,
-                     dvr_snat_ns.SNAT_NS_PREFIX + router_id]
-        with mock.patch.object(ip_lib.IPWrapper, 'get_namespaces',
-                               return_value=ns_names), \
-                mock.patch.object(self.ns_manager, '_cleanup') as mock_cleanup:
-            self.ns_manager.ensure_router_cleanup(router_id)
-            expected = [mock.call(namespaces.NS_PREFIX, router_id),
-                        mock.call(dvr_snat_ns.SNAT_NS_PREFIX, router_id)]
-            mock_cleanup.assert_has_calls(expected, any_order=True)
-            self.assertEqual(2, mock_cleanup.call_count)
diff --git a/neutron/tests/unit/agent/l3/test_router_info.py b/neutron/tests/unit/agent/l3/test_router_info.py
deleted file mode 100644 (file)
index 4921d15..0000000
+++ /dev/null
@@ -1,325 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron.agent.common import config as agent_config
-from neutron.agent.l3 import router_info
-from neutron.agent.linux import ip_lib
-from neutron.common import constants as l3_constants
-from neutron.common import exceptions as n_exc
-from neutron.tests import base
-
-
-_uuid = uuidutils.generate_uuid
-
-
-class TestRouterInfo(base.BaseTestCase):
-    def setUp(self):
-        super(TestRouterInfo, self).setUp()
-
-        conf = agent_config.setup_conf()
-
-        self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
-        ip_cls = self.ip_cls_p.start()
-        self.mock_ip = mock.MagicMock()
-        ip_cls.return_value = self.mock_ip
-        self.ri_kwargs = {'agent_conf': conf,
-                          'interface_driver': mock.sentinel.interface_driver}
-
-    def _check_agent_method_called(self, calls):
-        self.mock_ip.netns.execute.assert_has_calls(
-            [mock.call(call, check_exit_code=False) for call in calls],
-            any_order=True)
-
-    def test_routing_table_update(self):
-        ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs)
-        ri.router = {}
-
-        fake_route1 = {'destination': '135.207.0.0/16',
-                       'nexthop': '1.2.3.4'}
-        fake_route2 = {'destination': '135.207.111.111/32',
-                       'nexthop': '1.2.3.4'}
-
-        ri.update_routing_table('replace', fake_route1)
-        expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16',
-                     'via', '1.2.3.4']]
-        self._check_agent_method_called(expected)
-
-        ri.update_routing_table('delete', fake_route1)
-        expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16',
-                     'via', '1.2.3.4']]
-        self._check_agent_method_called(expected)
-
-        ri.update_routing_table('replace', fake_route2)
-        expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32',
-                     'via', '1.2.3.4']]
-        self._check_agent_method_called(expected)
-
-        ri.update_routing_table('delete', fake_route2)
-        expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32',
-                     'via', '1.2.3.4']]
-        self._check_agent_method_called(expected)
-
-    def test_update_routing_table(self):
-        # Just verify the correct namespace was used in the call
-        uuid = _uuid()
-        netns = 'qrouter-' + uuid
-        fake_route1 = {'destination': '135.207.0.0/16',
-                       'nexthop': '1.2.3.4'}
-
-        ri = router_info.RouterInfo(uuid, {'id': uuid}, **self.ri_kwargs)
-        ri._update_routing_table = mock.Mock()
-
-        ri.update_routing_table('replace', fake_route1)
-        ri._update_routing_table.assert_called_once_with('replace',
-                                                         fake_route1,
-                                                         netns)
-
-    def test_routes_updated(self):
-        ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs)
-        ri.router = {}
-
-        fake_old_routes = []
-        fake_new_routes = [{'destination': "110.100.31.0/24",
-                            'nexthop': "10.100.10.30"},
-                           {'destination': "110.100.30.0/24",
-                            'nexthop': "10.100.10.30"}]
-        ri.routes = fake_old_routes
-        ri.router['routes'] = fake_new_routes
-        ri.routes_updated(fake_old_routes, fake_new_routes)
-
-        expected = [['ip', 'route', 'replace', 'to', '110.100.30.0/24',
-                    'via', '10.100.10.30'],
-                    ['ip', 'route', 'replace', 'to', '110.100.31.0/24',
-                     'via', '10.100.10.30']]
-
-        self._check_agent_method_called(expected)
-        ri.routes = fake_new_routes
-        fake_new_routes = [{'destination': "110.100.30.0/24",
-                            'nexthop': "10.100.10.30"}]
-        ri.router['routes'] = fake_new_routes
-        ri.routes_updated(ri.routes, fake_new_routes)
-        expected = [['ip', 'route', 'delete', 'to', '110.100.31.0/24',
-                    'via', '10.100.10.30']]
-
-        self._check_agent_method_called(expected)
-        fake_new_routes = []
-        ri.router['routes'] = fake_new_routes
-        ri.routes_updated(ri.routes, fake_new_routes)
-
-        expected = [['ip', 'route', 'delete', 'to', '110.100.30.0/24',
-                    'via', '10.100.10.30']]
-        self._check_agent_method_called(expected)
-
-
-class BasicRouterTestCaseFramework(base.BaseTestCase):
-    def _create_router(self, router=None, **kwargs):
-        if not router:
-            router = mock.MagicMock()
-        self.agent_conf = mock.Mock()
-        self.router_id = _uuid()
-        return router_info.RouterInfo(self.router_id,
-                                      router,
-                                      self.agent_conf,
-                                      mock.sentinel.interface_driver,
-                                      **kwargs)
-
-
-class TestBasicRouterOperations(BasicRouterTestCaseFramework):
-
-    def test_get_floating_ips(self):
-        router = mock.MagicMock()
-        router.get.return_value = [mock.sentinel.floating_ip]
-        ri = self._create_router(router)
-
-        fips = ri.get_floating_ips()
-
-        self.assertEqual([mock.sentinel.floating_ip], fips)
-
-    def test_process_floating_ip_nat_rules(self):
-        ri = self._create_router()
-        fips = [{'fixed_ip_address': mock.sentinel.ip,
-                 'floating_ip_address': mock.sentinel.fip}]
-        ri.get_floating_ips = mock.Mock(return_value=fips)
-        ri.iptables_manager = mock.MagicMock()
-        ipv4_nat = ri.iptables_manager.ipv4['nat']
-        ri.floating_forward_rules = mock.Mock(
-            return_value=[(mock.sentinel.chain, mock.sentinel.rule)])
-
-        ri.process_floating_ip_nat_rules()
-
-        # Be sure that the rules are cleared first and apply is called last
-        self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'),
-                         ipv4_nat.mock_calls[0])
-        self.assertEqual(mock.call.apply(), ri.iptables_manager.mock_calls[-1])
-
-        # Be sure that add_rule is called somewhere in the middle
-        ipv4_nat.add_rule.assert_called_once_with(mock.sentinel.chain,
-                                                  mock.sentinel.rule,
-                                                  tag='floating_ip')
-
-    def test_process_floating_ip_nat_rules_removed(self):
-        ri = self._create_router()
-        ri.get_floating_ips = mock.Mock(return_value=[])
-        ri.iptables_manager = mock.MagicMock()
-        ipv4_nat = ri.iptables_manager.ipv4['nat']
-
-        ri.process_floating_ip_nat_rules()
-
-        # Be sure that the rules are cleared first and apply is called last
-        self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'),
-                         ipv4_nat.mock_calls[0])
-        self.assertEqual(mock.call.apply(), ri.iptables_manager.mock_calls[-1])
-
-        # Be sure that add_rule is called somewhere in the middle
-        self.assertFalse(ipv4_nat.add_rule.called)
-
-    def _test_add_fip_addr_to_device_error(self, device):
-        ri = self._create_router()
-        ip = '15.1.2.3'
-
-        result = ri._add_fip_addr_to_device(
-            {'id': mock.sentinel.id, 'floating_ip_address': ip}, device)
-
-        device.addr.add.assert_called_with(ip + '/32')
-        return result
-
-    def test__add_fip_addr_to_device(self):
-        result = self._test_add_fip_addr_to_device_error(mock.Mock())
-        self.assertTrue(result)
-
-    def test__add_fip_addr_to_device_error(self):
-        device = mock.Mock()
-        device.addr.add.side_effect = RuntimeError
-        result = self._test_add_fip_addr_to_device_error(device)
-        self.assertFalse(result)
-
-    def test_process_snat_dnat_for_fip(self):
-        ri = self._create_router()
-        ri.process_floating_ip_nat_rules = mock.Mock(side_effect=Exception)
-
-        self.assertRaises(n_exc.FloatingIpSetupException,
-                          ri.process_snat_dnat_for_fip)
-
-        ri.process_floating_ip_nat_rules.assert_called_once_with()
-
-    def test_put_fips_in_error_state(self):
-        ri = self._create_router()
-        ri.router = mock.Mock()
-        ri.router.get.return_value = [{'id': mock.sentinel.id1},
-                                      {'id': mock.sentinel.id2}]
-
-        statuses = ri.put_fips_in_error_state()
-
-        expected = [{mock.sentinel.id1: l3_constants.FLOATINGIP_STATUS_ERROR,
-                     mock.sentinel.id2: l3_constants.FLOATINGIP_STATUS_ERROR}]
-        self.assertNotEqual(expected, statuses)
-
-    def test_configure_fip_addresses(self):
-        ri = self._create_router()
-        ri.process_floating_ip_addresses = mock.Mock(
-            side_effect=Exception)
-
-        self.assertRaises(n_exc.FloatingIpSetupException,
-                          ri.configure_fip_addresses,
-                          mock.sentinel.interface_name)
-
-        ri.process_floating_ip_addresses.assert_called_once_with(
-            mock.sentinel.interface_name)
-
-    def test_get_router_cidrs_returns_cidrs(self):
-        ri = self._create_router()
-        addresses = ['15.1.2.2/24', '15.1.2.3/32']
-        device = mock.MagicMock()
-        device.addr.list.return_value = [{'cidr': addresses[0]},
-                                         {'cidr': addresses[1]}]
-        self.assertEqual(set(addresses), ri.get_router_cidrs(device))
-
-
-@mock.patch.object(ip_lib, 'IPDevice')
-class TestFloatingIpWithMockDevice(BasicRouterTestCaseFramework):
-
-    def test_process_floating_ip_addresses_remap(self, IPDevice):
-        fip_id = _uuid()
-        fip = {
-            'id': fip_id, 'port_id': _uuid(),
-            'floating_ip_address': '15.1.2.3',
-            'fixed_ip_address': '192.168.0.2',
-            'status': l3_constants.FLOATINGIP_STATUS_DOWN
-        }
-
-        IPDevice.return_value = device = mock.Mock()
-        device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
-        ri = self._create_router()
-        ri.get_floating_ips = mock.Mock(return_value=[fip])
-
-        fip_statuses = ri.process_floating_ip_addresses(
-            mock.sentinel.interface_name)
-        self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
-                         fip_statuses)
-
-        self.assertFalse(device.addr.add.called)
-        self.assertFalse(device.addr.delete.called)
-
-    def test_process_router_with_disabled_floating_ip(self, IPDevice):
-        fip_id = _uuid()
-        fip = {
-            'id': fip_id, 'port_id': _uuid(),
-            'floating_ip_address': '15.1.2.3',
-            'fixed_ip_address': '192.168.0.2'
-        }
-
-        ri = self._create_router()
-        ri.floating_ips = [fip]
-        ri.get_floating_ips = mock.Mock(return_value=[])
-
-        fip_statuses = ri.process_floating_ip_addresses(
-            mock.sentinel.interface_name)
-
-        self.assertIsNone(fip_statuses.get(fip_id))
-
-    def test_process_router_floating_ip_with_device_add_error(self, IPDevice):
-        IPDevice.return_value = device = mock.Mock(side_effect=RuntimeError)
-        device.addr.list.return_value = []
-        fip_id = _uuid()
-        fip = {
-            'id': fip_id, 'port_id': _uuid(),
-            'floating_ip_address': '15.1.2.3',
-            'fixed_ip_address': '192.168.0.2',
-            'status': 'DOWN'
-        }
-        ri = self._create_router()
-        ri.add_floating_ip = mock.Mock(
-            return_value=l3_constants.FLOATINGIP_STATUS_ERROR)
-        ri.get_floating_ips = mock.Mock(return_value=[fip])
-
-        fip_statuses = ri.process_floating_ip_addresses(
-            mock.sentinel.interface_name)
-
-        self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ERROR},
-                         fip_statuses)
-
-    # TODO(mrsmith): refactor for DVR cases
-    def test_process_floating_ip_addresses_remove(self, IPDevice):
-        IPDevice.return_value = device = mock.Mock()
-        device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
-
-        ri = self._create_router()
-        ri.remove_floating_ip = mock.Mock()
-        ri.router.get = mock.Mock(return_value=[])
-
-        fip_statuses = ri.process_floating_ip_addresses(
-            mock.sentinel.interface_name)
-        self.assertEqual({}, fip_statuses)
-        ri.remove_floating_ip.assert_called_once_with(device, '15.1.2.3/32')
diff --git a/neutron/tests/unit/agent/l3/test_router_processing_queue.py b/neutron/tests/unit/agent/l3/test_router_processing_queue.py
deleted file mode 100644 (file)
index 8f8db19..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import datetime
-
-from oslo_utils import uuidutils
-
-from neutron.agent.l3 import router_processing_queue as l3_queue
-from neutron.tests import base
-
-_uuid = uuidutils.generate_uuid
-FAKE_ID = _uuid()
-FAKE_ID_2 = _uuid()
-
-
-class TestExclusiveRouterProcessor(base.BaseTestCase):
-    def setUp(self):
-        super(TestExclusiveRouterProcessor, self).setUp()
-
-    def test_i_am_master(self):
-        master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
-        not_master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
-        master_2 = l3_queue.ExclusiveRouterProcessor(FAKE_ID_2)
-        not_master_2 = l3_queue.ExclusiveRouterProcessor(FAKE_ID_2)
-
-        self.assertTrue(master._i_am_master())
-        self.assertFalse(not_master._i_am_master())
-        self.assertTrue(master_2._i_am_master())
-        self.assertFalse(not_master_2._i_am_master())
-
-        master.__exit__(None, None, None)
-        master_2.__exit__(None, None, None)
-
-    def test_master(self):
-        master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
-        not_master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
-        master_2 = l3_queue.ExclusiveRouterProcessor(FAKE_ID_2)
-        not_master_2 = l3_queue.ExclusiveRouterProcessor(FAKE_ID_2)
-
-        self.assertEqual(master, master._master)
-        self.assertEqual(master, not_master._master)
-        self.assertEqual(master_2, master_2._master)
-        self.assertEqual(master_2, not_master_2._master)
-
-        master.__exit__(None, None, None)
-        master_2.__exit__(None, None, None)
-
-    def test__enter__(self):
-        self.assertNotIn(FAKE_ID, l3_queue.ExclusiveRouterProcessor._masters)
-        master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
-        master.__enter__()
-        self.assertIn(FAKE_ID, l3_queue.ExclusiveRouterProcessor._masters)
-        master.__exit__(None, None, None)
-
-    def test__exit__(self):
-        master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
-        not_master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
-        master.__enter__()
-        self.assertIn(FAKE_ID, l3_queue.ExclusiveRouterProcessor._masters)
-        not_master.__enter__()
-        not_master.__exit__(None, None, None)
-        self.assertIn(FAKE_ID, l3_queue.ExclusiveRouterProcessor._masters)
-        master.__exit__(None, None, None)
-        self.assertNotIn(FAKE_ID, l3_queue.ExclusiveRouterProcessor._masters)
-
-    def test_data_fetched_since(self):
-        master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
-        self.assertEqual(datetime.datetime.min,
-                         master._get_router_data_timestamp())
-
-        ts1 = datetime.datetime.utcnow() - datetime.timedelta(seconds=10)
-        ts2 = datetime.datetime.utcnow()
-
-        master.fetched_and_processed(ts2)
-        self.assertEqual(ts2, master._get_router_data_timestamp())
-        master.fetched_and_processed(ts1)
-        self.assertEqual(ts2, master._get_router_data_timestamp())
-
-        master.__exit__(None, None, None)
-
-    def test_updates(self):
-        master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
-        not_master = l3_queue.ExclusiveRouterProcessor(FAKE_ID)
-
-        master.queue_update(l3_queue.RouterUpdate(FAKE_ID, 0))
-        not_master.queue_update(l3_queue.RouterUpdate(FAKE_ID, 0))
-
-        for update in not_master.updates():
-            raise Exception("Only the master should process a router")
-
-        self.assertEqual(2, len([i for i in master.updates()]))
diff --git a/neutron/tests/unit/agent/l3/test_rt_tables.py b/neutron/tests/unit/agent/l3/test_rt_tables.py
deleted file mode 100644 (file)
index 70471c8..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (c) 2015 Hewlett-Packard Enterprise Development Company, L.P.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import fixtures
-import mock
-
-from neutron.agent.common import utils as common_utils
-from neutron.agent.l3 import rt_tables
-from neutron.tests import base
-
-
-def mock_netnamespace_directory(function):
-    """Decorator to test RoutingTablesManager with temp dir
-
-    Allows direct testing of RoutingTablesManager by changing the directory
-    where it finds the rt_tables to one in /tmp where root privileges are not
-    required and it won't mess with any real routing tables.
-    """
-    orig_execute = common_utils.execute
-
-    def execute_no_root(*args, **kwargs):
-        kwargs['run_as_root'] = False
-        orig_execute(*args, **kwargs)
-
-    def inner(*args, **kwargs):
-        with fixtures.TempDir() as tmpdir:
-            cls = rt_tables.NamespaceEtcDir
-            with mock.patch.object(common_utils, 'execute') as execute,\
-                    mock.patch.object(cls, 'BASE_DIR', tmpdir.path):
-                execute.side_effect = execute_no_root
-                function(*args, **kwargs)
-    return inner
-
-
-class TestRoutingTablesManager(base.BaseTestCase):
-    def setUp(self):
-        super(TestRoutingTablesManager, self).setUp()
-        self.ns_name = "fakens"
-
-    @mock_netnamespace_directory
-    def test_default_tables(self):
-        rtm = rt_tables.RoutingTablesManager(self.ns_name)
-        self.assertEqual(253, rtm.get("default").table_id)
-        self.assertEqual(254, rtm.get("main").table_id)
-        self.assertEqual(255, rtm.get("local").table_id)
-        self.assertEqual(0, rtm.get("unspec").table_id)
-
-    @mock_netnamespace_directory
-    def test_get_all(self):
-        rtm = rt_tables.RoutingTablesManager(self.ns_name)
-        table_names = set(rt.name for rt in rtm.get_all())
-        self.assertEqual({"main", "default", "local", "unspec"}, table_names)
-
-        new_table = rtm.add("faketable")
-        self.assertIn(new_table, rtm.get_all())
-
-    @mock_netnamespace_directory
-    def test_add(self):
-        rtm = rt_tables.RoutingTablesManager(self.ns_name)
-        added_table = rtm.add("faketable")
-        self.assertGreaterEqual(added_table.table_id, 1024)
-
-        table = rtm.get("faketable")
-        self.assertEqual(added_table, table)
-
-        # Be sure that adding it twice gets the same result
-        added_again = rtm.add("faketable")
-        self.assertEqual(added_table, added_again)
-
-    @mock_netnamespace_directory
-    def test_delete(self):
-        rtm = rt_tables.RoutingTablesManager(self.ns_name)
-        rtm.add("faketable")
-        rtm.delete("faketable")
-
-        table = rtm.get("faketable")
-        self.assertIsNone(table)
diff --git a/neutron/tests/unit/agent/linux/__init__.py b/neutron/tests/unit/agent/linux/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/agent/linux/failing_process.py b/neutron/tests/unit/agent/linux/failing_process.py
deleted file mode 100644 (file)
index 29547ca..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-import sys
-
-
-def main():
-    filename = sys.argv[1]
-    if not os.path.exists(filename):
-        sys.exit(1)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/neutron/tests/unit/agent/linux/test_async_process.py b/neutron/tests/unit/agent/linux/test_async_process.py
deleted file mode 100644 (file)
index db0321d..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import signal
-
-import eventlet.event
-import eventlet.queue
-import eventlet.timeout
-import mock
-import testtools
-
-from neutron.agent.linux import async_process
-from neutron.agent.linux import utils
-from neutron.tests import base
-from neutron.tests.unit.agent.linux import failing_process
-
-
-class TestAsyncProcess(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestAsyncProcess, self).setUp()
-        self.proc = async_process.AsyncProcess(['fake'])
-
-    def test_construtor_raises_exception_for_negative_respawn_interval(self):
-        with testtools.ExpectedException(ValueError):
-            async_process.AsyncProcess(['fake'], respawn_interval=-1)
-
-    def test__spawn(self):
-        expected_process = 'Foo'
-        proc = self.proc
-        with mock.patch.object(utils, 'create_process') as mock_create_process:
-            mock_create_process.return_value = [expected_process, None]
-            with mock.patch('eventlet.spawn') as mock_spawn:
-                proc._spawn()
-
-        self.assertTrue(self.proc._is_running)
-        self.assertIsInstance(proc._kill_event, eventlet.event.Event)
-        self.assertEqual(proc._process, expected_process)
-        mock_spawn.assert_has_calls([
-            mock.call(proc._watch_process,
-                      proc._read_stdout,
-                      proc._kill_event),
-            mock.call(proc._watch_process,
-                      proc._read_stderr,
-                      proc._kill_event),
-        ])
-        self.assertEqual(len(proc._watchers), 2)
-
-    def test__handle_process_error_kills_with_respawn(self):
-        with mock.patch.object(self.proc, '_kill') as kill:
-            self.proc._handle_process_error()
-
-        kill.assert_has_calls([mock.call(signal.SIGKILL)])
-
-    def test__handle_process_error_kills_without_respawn(self):
-        self.proc.respawn_interval = 1
-        with mock.patch.object(self.proc, '_kill') as kill:
-            with mock.patch.object(self.proc, '_spawn') as spawn:
-                with mock.patch('eventlet.sleep') as sleep:
-                    self.proc._handle_process_error()
-
-        kill.assert_has_calls([mock.call(signal.SIGKILL)])
-        sleep.assert_has_calls([mock.call(self.proc.respawn_interval)])
-        spawn.assert_called_once_with()
-
-    def test__handle_process_error_no_crash_if_started(self):
-        self.proc._is_running = True
-        with mock.patch.object(self.proc, '_kill'):
-            with mock.patch.object(self.proc, '_spawn') as mock_spawn:
-                self.proc._handle_process_error()
-                mock_spawn.assert_not_called()
-
-    def _watch_process_exception(self):
-        raise Exception('Error!')
-
-    def _test__watch_process(self, callback, kill_event):
-        self.proc._is_running = True
-        self.proc._kill_event = kill_event
-        # Ensure the test times out eventually if the watcher loops endlessly
-        with eventlet.timeout.Timeout(5):
-            with mock.patch.object(self.proc,
-                                   '_handle_process_error') as func:
-                self.proc._watch_process(callback, kill_event)
-
-        if not kill_event.ready():
-            func.assert_called_once_with()
-
-    def test__watch_process_exits_on_callback_failure(self):
-        self._test__watch_process(lambda: None, eventlet.event.Event())
-
-    def test__watch_process_exits_on_exception(self):
-        self._test__watch_process(self._watch_process_exception,
-                                  eventlet.event.Event())
-        with mock.patch.object(self.proc,
-                               '_handle_process_error') as func:
-            self.proc._watch_process(self._watch_process_exception,
-                                     self.proc._kill_event)
-            func.assert_not_called()
-
-    def test__watch_process_exits_on_sent_kill_event(self):
-        kill_event = eventlet.event.Event()
-        kill_event.send()
-        self._test__watch_process(None, kill_event)
-
-    def _test_read_output_queues_and_returns_result(self, output):
-        queue = eventlet.queue.LightQueue()
-        mock_stream = mock.Mock()
-        with mock.patch.object(mock_stream, 'readline') as mock_readline:
-            mock_readline.return_value = output
-            result = self.proc._read(mock_stream, queue)
-
-        if output:
-            self.assertEqual(output, result)
-            self.assertEqual(output, queue.get_nowait())
-        else:
-            self.assertFalse(result)
-            self.assertTrue(queue.empty())
-
-    def test__read_queues_and_returns_output(self):
-        self._test_read_output_queues_and_returns_result('foo')
-
-    def test__read_returns_none_for_missing_output(self):
-        self._test_read_output_queues_and_returns_result('')
-
-    def test_start_raises_exception_if_process_already_started(self):
-        self.proc._is_running = True
-        with testtools.ExpectedException(async_process.AsyncProcessException):
-            self.proc.start()
-
-    def test_start_invokes__spawn(self):
-        with mock.patch.object(self.proc, '_spawn') as mock_start:
-            self.proc.start()
-
-        mock_start.assert_called_once_with()
-
-    def test__iter_queue_returns_empty_list_for_empty_queue(self):
-        result = list(self.proc._iter_queue(eventlet.queue.LightQueue(),
-                                            False))
-        self.assertEqual([], result)
-
-    def test__iter_queue_returns_queued_data(self):
-        queue = eventlet.queue.LightQueue()
-        queue.put('foo')
-        result = list(self.proc._iter_queue(queue, False))
-        self.assertEqual(result, ['foo'])
-
-    def _test_iter_output_calls_iter_queue_on_output_queue(self, output_type):
-        expected_value = 'foo'
-        with mock.patch.object(self.proc, '_iter_queue') as mock_iter_queue:
-            mock_iter_queue.return_value = expected_value
-            target_func = getattr(self.proc, 'iter_%s' % output_type, None)
-            value = target_func()
-
-        self.assertEqual(value, expected_value)
-        queue = getattr(self.proc, '_%s_lines' % output_type, None)
-        mock_iter_queue.assert_called_with(queue, False)
-
-    def test_iter_stdout(self):
-        self._test_iter_output_calls_iter_queue_on_output_queue('stdout')
-
-    def test_iter_stderr(self):
-        self._test_iter_output_calls_iter_queue_on_output_queue('stderr')
-
-    def test__kill_targets_process_for_pid(self):
-        pid = 1
-
-        with mock.patch.object(self.proc, '_kill_event'
-                               ) as mock_kill_event,\
-                mock.patch.object(utils, 'get_root_helper_child_pid',
-                                  return_value=pid),\
-                mock.patch.object(self.proc, '_kill_process'
-                                  ) as mock_kill_process,\
-                mock.patch.object(self.proc, '_process'):
-            self.proc._kill(signal.SIGKILL)
-
-            self.assertIsNone(self.proc._kill_event)
-            self.assertFalse(self.proc._is_running)
-
-        mock_kill_event.send.assert_called_once_with()
-        if pid:
-            mock_kill_process.assert_called_once_with(pid, signal.SIGKILL)
-
-    def _test__kill_process(self, pid, expected, exception_message=None,
-                            kill_signal=signal.SIGKILL):
-        self.proc.run_as_root = True
-        if exception_message:
-            exc = RuntimeError(exception_message)
-        else:
-            exc = None
-        with mock.patch.object(utils, 'execute',
-                               side_effect=exc) as mock_execute:
-            actual = self.proc._kill_process(pid, kill_signal)
-
-        self.assertEqual(expected, actual)
-        mock_execute.assert_called_with(['kill', '-%d' % kill_signal, pid],
-                                        run_as_root=self.proc.run_as_root)
-
-    def test__kill_process_returns_true_for_valid_pid(self):
-        self._test__kill_process('1', True)
-
-    def test__kill_process_returns_true_for_stale_pid(self):
-        self._test__kill_process('1', True, 'No such process')
-
-    def test__kill_process_returns_false_for_execute_exception(self):
-        self._test__kill_process('1', False, 'Invalid')
-
-    def test_kill_process_with_different_signal(self):
-        self._test__kill_process('1', True, kill_signal=signal.SIGTERM)
-
-    def test_stop_calls_kill_with_provided_signal_number(self):
-        self.proc._is_running = True
-        with mock.patch.object(self.proc, '_kill') as mock_kill:
-            self.proc.stop(kill_signal=signal.SIGTERM)
-        mock_kill.assert_called_once_with(signal.SIGTERM)
-
-    def test_stop_raises_exception_if_already_started(self):
-        with testtools.ExpectedException(async_process.AsyncProcessException):
-            self.proc.stop()
-
-    def test_cmd(self):
-        for expected, cmd in (('ls -l file', ['ls', '-l', 'file']),
-                              ('fake', ['fake'])):
-            proc = async_process.AsyncProcess(cmd)
-            self.assertEqual(expected, proc.cmd)
-
-
-class TestAsyncProcessLogging(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestAsyncProcessLogging, self).setUp()
-        self.log_mock = mock.patch.object(async_process, 'LOG').start()
-
-    def _test__read_stdout_logging(self, enable):
-        proc = async_process.AsyncProcess(['fakecmd'], log_output=enable)
-        with mock.patch.object(proc, '_read', return_value='fakedata'),\
-            mock.patch.object(proc, '_process'):
-            proc._read_stdout()
-        self.assertEqual(enable, self.log_mock.debug.called)
-
-    def _test__read_stderr_logging(self, enable):
-        proc = async_process.AsyncProcess(['fake'], log_output=enable)
-        with mock.patch.object(proc, '_read', return_value='fakedata'),\
-                mock.patch.object(proc, '_process'):
-            proc._read_stderr()
-        self.assertEqual(enable, self.log_mock.error.called)
-
-    def test__read_stdout_logging_enabled(self):
-        self._test__read_stdout_logging(enable=True)
-
-    def test__read_stdout_logging_disabled(self):
-        self._test__read_stdout_logging(enable=False)
-
-    def test__read_stderr_logging_enabled(self):
-        self._test__read_stderr_logging(enable=True)
-
-    def test__read_stderr_logging_disabled(self):
-        self._test__read_stderr_logging(enable=False)
-
-
-class TestAsyncProcessDieOnError(base.BaseTestCase):
-
-    def test__read_stderr_returns_none_on_error(self):
-        proc = async_process.AsyncProcess(['fakecmd'], die_on_error=True)
-        with mock.patch.object(proc, '_read', return_value='fakedata'),\
-                mock.patch.object(proc, '_process'):
-            self.assertIsNone(proc._read_stderr())
-
-
-class TestFailingAsyncProcess(base.BaseTestCase):
-    def setUp(self):
-        super(TestFailingAsyncProcess, self).setUp()
-        path = self.get_temp_file_path('async.tmp', self.get_new_temp_dir())
-        self.process = async_process.AsyncProcess(['python',
-                                                   failing_process.__file__,
-                                                   path],
-                                                  respawn_interval=0)
-
-    def test_failing_async_process_handle_error_once(self):
-        with mock.patch.object(self.process, '_handle_process_error')\
-                as handle_error_mock:
-            self.process.start()
-            self.process._process.wait()
-            # Wait for the monitor process to complete
-            for thread in self.process._watchers:
-                thread.wait()
-            self.assertEqual(1, handle_error_mock.call_count)
diff --git a/neutron/tests/unit/agent/linux/test_bridge_lib.py b/neutron/tests/unit/agent/linux/test_bridge_lib.py
deleted file mode 100644 (file)
index 0c28ac8..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2015 Intel Corporation.
-# Copyright 2015 Isaku Yamahata <isaku.yamahata at intel com>
-#                               <isaku.yamahata at gmail com>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.agent.linux import bridge_lib
-from neutron.tests import base
-
-
-class BridgeLibTest(base.BaseTestCase):
-    """A test suite to exercise the bridge libraries """
-    _NAMESPACE = 'test-namespace'
-    _BR_NAME = 'test-br'
-    _IF_NAME = 'test-if'
-
-    def setUp(self):
-        super(BridgeLibTest, self).setUp()
-        ip_wrapper = mock.patch('neutron.agent.linux.ip_lib.IPWrapper').start()
-        self.execute = ip_wrapper.return_value.netns.execute
-
-    def _verify_bridge_mock(self, cmd):
-        self.execute.assert_called_once_with(cmd, run_as_root=True)
-        self.execute.reset_mock()
-
-    def _verify_bridge_mock_check_exit_code(self, cmd):
-        self.execute.assert_called_once_with(cmd, run_as_root=True,
-                                             check_exit_code=True)
-        self.execute.reset_mock()
-
-    def test_is_bridged_interface(self):
-        exists = lambda path: path == "/sys/class/net/tapOK/brport"
-        with mock.patch('os.path.exists', side_effect=exists):
-            self.assertTrue(bridge_lib.is_bridged_interface("tapOK"))
-            self.assertFalse(bridge_lib.is_bridged_interface("tapKO"))
-
-    def test_get_interface_bridge(self):
-        with mock.patch('os.readlink', side_effect=["prefix/br0", OSError()]):
-            br = bridge_lib.BridgeDevice.get_interface_bridge('tap0')
-            self.assertIsInstance(br, bridge_lib.BridgeDevice)
-            self.assertEqual("br0", br.name)
-
-            br = bridge_lib.BridgeDevice.get_interface_bridge('tap0')
-            self.assertIsNone(br)
-
-    def _test_br(self, namespace=None):
-        br = bridge_lib.BridgeDevice.addbr(self._BR_NAME, namespace)
-        self.assertEqual(namespace, br.namespace)
-        self._verify_bridge_mock(['brctl', 'addbr', self._BR_NAME])
-
-        br.setfd(0)
-        self._verify_bridge_mock(['brctl', 'setfd', self._BR_NAME, '0'])
-
-        br.disable_stp()
-        self._verify_bridge_mock(['brctl', 'stp', self._BR_NAME, 'off'])
-
-        br.disable_ipv6()
-        cmd = 'net.ipv6.conf.%s.disable_ipv6=1' % self._BR_NAME
-        self._verify_bridge_mock_check_exit_code(['sysctl', '-w', cmd])
-
-        br.addif(self._IF_NAME)
-        self._verify_bridge_mock(
-            ['brctl', 'addif', self._BR_NAME, self._IF_NAME])
-
-        br.delif(self._IF_NAME)
-        self._verify_bridge_mock(
-            ['brctl', 'delif', self._BR_NAME, self._IF_NAME])
-
-        br.delbr()
-        self._verify_bridge_mock(['brctl', 'delbr', self._BR_NAME])
-
-    def test_addbr_with_namespace(self):
-        self._test_br(self._NAMESPACE)
-
-    def test_addbr_without_namespace(self):
-        self._test_br()
-
-    def test_owns_interface(self):
-        br = bridge_lib.BridgeDevice('br-int')
-        exists = lambda path: path == "/sys/class/net/br-int/brif/abc"
-        with mock.patch('os.path.exists', side_effect=exists):
-            self.assertTrue(br.owns_interface("abc"))
-            self.assertFalse(br.owns_interface("def"))
-
-    def test_get_interfaces(self):
-        br = bridge_lib.BridgeDevice('br-int')
-        interfaces = ["tap1", "tap2"]
-        with mock.patch('os.listdir', side_effect=[interfaces, OSError()]):
-            self.assertEqual(interfaces, br.get_interfaces())
-            self.assertEqual([], br.get_interfaces())
diff --git a/neutron/tests/unit/agent/linux/test_daemon.py b/neutron/tests/unit/agent/linux/test_daemon.py
deleted file mode 100644 (file)
index c093909..0000000
+++ /dev/null
@@ -1,320 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import logging
-from logging import handlers
-import os
-import sys
-
-import mock
-import testtools
-
-from neutron.agent.linux import daemon
-from neutron.common import exceptions
-from neutron.tests import base
-from neutron.tests import tools
-
-FAKE_FD = 8
-
-
-class FakeEntry(object):
-    def __init__(self, name, value):
-        setattr(self, name, value)
-
-
-class TestUnwatchLog(base.BaseTestCase):
-
-    def test_unwatch_log(self):
-        stream_handler = logging.StreamHandler()
-        logger = logging.Logger('fake')
-        logger.addHandler(stream_handler)
-        logger.addHandler(handlers.WatchedFileHandler('/tmp/filename1'))
-
-        with mock.patch('logging.getLogger', return_value=logger):
-            daemon.unwatch_log()
-            self.assertEqual(2, len(logger.handlers))
-            logger.handlers.remove(stream_handler)
-            observed = logger.handlers[0]
-            self.assertEqual(logging.FileHandler, type(observed))
-            self.assertEqual('/tmp/filename1', observed.baseFilename)
-
-
-class TestPrivileges(base.BaseTestCase):
-    def test_setuid_with_name(self):
-        with mock.patch('pwd.getpwnam', return_value=FakeEntry('pw_uid', 123)):
-            with mock.patch('os.setuid') as setuid_mock:
-                daemon.setuid('user')
-                setuid_mock.assert_called_once_with(123)
-
-    def test_setuid_with_id(self):
-        with mock.patch('os.setuid') as setuid_mock:
-            daemon.setuid('321')
-            setuid_mock.assert_called_once_with(321)
-
-    def test_setuid_fails(self):
-        with mock.patch('os.setuid', side_effect=OSError()):
-            with mock.patch.object(daemon.LOG, 'critical') as log_critical:
-                self.assertRaises(exceptions.FailToDropPrivilegesExit,
-                                  daemon.setuid, '321')
-                log_critical.assert_called_once_with(mock.ANY)
-
-    def test_setgid_with_name(self):
-        with mock.patch('grp.getgrnam', return_value=FakeEntry('gr_gid', 123)):
-            with mock.patch('os.setgid') as setgid_mock:
-                daemon.setgid('group')
-                setgid_mock.assert_called_once_with(123)
-
-    def test_setgid_with_id(self):
-        with mock.patch('os.setgid') as setgid_mock:
-            daemon.setgid('321')
-            setgid_mock.assert_called_once_with(321)
-
-    def test_setgid_fails(self):
-        with mock.patch('os.setgid', side_effect=OSError()):
-            with mock.patch.object(daemon.LOG, 'critical') as log_critical:
-                self.assertRaises(exceptions.FailToDropPrivilegesExit,
-                                  daemon.setgid, '321')
-                log_critical.assert_called_once_with(mock.ANY)
-
-    @mock.patch.object(os, 'setgroups')
-    @mock.patch.object(daemon, 'setgid')
-    @mock.patch.object(daemon, 'setuid')
-    def test_drop_no_privileges(self, mock_setuid, mock_setgid,
-                                mock_setgroups):
-        daemon.drop_privileges()
-        for cursor in (mock_setuid, mock_setgid, mock_setgroups):
-            self.assertFalse(cursor.called)
-
-    @mock.patch.object(os, 'geteuid', return_value=0)
-    @mock.patch.object(os, 'setgroups')
-    @mock.patch.object(daemon, 'setgid')
-    @mock.patch.object(daemon, 'setuid')
-    def _test_drop_privileges(self, setuid, setgid, setgroups,
-                              geteuid, user=None, group=None):
-        daemon.drop_privileges(user=user, group=group)
-        if user:
-            setuid.assert_called_once_with(user)
-        else:
-            self.assertFalse(setuid.called)
-        if group:
-            setgroups.assert_called_once_with([])
-            setgid.assert_called_once_with(group)
-        else:
-            self.assertFalse(setgroups.called)
-            self.assertFalse(setgid.called)
-
-    def test_drop_user_privileges(self):
-        self._test_drop_privileges(user='user')
-
-    def test_drop_uid_privileges(self):
-        self._test_drop_privileges(user='321')
-
-    def test_drop_group_privileges(self):
-        self._test_drop_privileges(group='group')
-
-    def test_drop_gid_privileges(self):
-        self._test_drop_privileges(group='654')
-
-    def test_drop_privileges_without_root_permissions(self):
-        with mock.patch('os.geteuid', return_value=1):
-            with mock.patch.object(daemon.LOG, 'critical') as log_critical:
-                self.assertRaises(exceptions.FailToDropPrivilegesExit,
-                                  daemon.drop_privileges, 'user')
-                log_critical.assert_called_once_with(mock.ANY)
-
-
-class TestPidfile(base.BaseTestCase):
-    def setUp(self):
-        super(TestPidfile, self).setUp()
-        self.os_p = mock.patch.object(daemon, 'os')
-        self.os = self.os_p.start()
-        self.os.open.return_value = FAKE_FD
-
-        self.fcntl_p = mock.patch.object(daemon, 'fcntl')
-        self.fcntl = self.fcntl_p.start()
-        self.fcntl.flock.return_value = 0
-
-    def test_init(self):
-        self.os.O_CREAT = os.O_CREAT
-        self.os.O_RDWR = os.O_RDWR
-
-        daemon.Pidfile('thefile', 'python')
-        self.os.open.assert_called_once_with('thefile', os.O_CREAT | os.O_RDWR)
-        self.fcntl.flock.assert_called_once_with(FAKE_FD, self.fcntl.LOCK_EX |
-                                                 self.fcntl.LOCK_NB)
-
-    def test_init_open_fail(self):
-        self.os.open.side_effect = IOError
-
-        with mock.patch.object(daemon.sys, 'stderr'):
-            with testtools.ExpectedException(SystemExit):
-                daemon.Pidfile('thefile', 'python')
-                sys.assert_has_calls([
-                    mock.call.stderr.write(mock.ANY),
-                    mock.call.exit(1)]
-                )
-
-    def test_unlock(self):
-        p = daemon.Pidfile('thefile', 'python')
-        p.unlock()
-        self.fcntl.flock.assert_has_calls([
-            mock.call(FAKE_FD, self.fcntl.LOCK_EX | self.fcntl.LOCK_NB),
-            mock.call(FAKE_FD, self.fcntl.LOCK_UN)]
-        )
-
-    def test_write(self):
-        p = daemon.Pidfile('thefile', 'python')
-        p.write(34)
-
-        self.os.assert_has_calls([
-            mock.call.ftruncate(FAKE_FD, 0),
-            mock.call.write(FAKE_FD, '34'),
-            mock.call.fsync(FAKE_FD)]
-        )
-
-    def test_read(self):
-        self.os.read.return_value = '34'
-        p = daemon.Pidfile('thefile', 'python')
-        self.assertEqual(34, p.read())
-
-    def test_is_running(self):
-        mock_open = self.useFixture(
-            tools.OpenFixture('/proc/34/cmdline', 'python')).mock_open
-        p = daemon.Pidfile('thefile', 'python')
-
-        with mock.patch.object(p, 'read') as read:
-            read.return_value = 34
-            self.assertTrue(p.is_running())
-
-        mock_open.assert_called_once_with('/proc/34/cmdline', 'r')
-
-    def test_is_running_uuid_true(self):
-        mock_open = self.useFixture(
-            tools.OpenFixture('/proc/34/cmdline', 'python 1234')).mock_open
-        p = daemon.Pidfile('thefile', 'python', uuid='1234')
-
-        with mock.patch.object(p, 'read') as read:
-            read.return_value = 34
-            self.assertTrue(p.is_running())
-
-        mock_open.assert_called_once_with('/proc/34/cmdline', 'r')
-
-    def test_is_running_uuid_false(self):
-        mock_open = self.useFixture(
-            tools.OpenFixture('/proc/34/cmdline', 'python 1234')).mock_open
-        p = daemon.Pidfile('thefile', 'python', uuid='6789')
-
-        with mock.patch.object(p, 'read') as read:
-            read.return_value = 34
-            self.assertFalse(p.is_running())
-
-        mock_open.assert_called_once_with('/proc/34/cmdline', 'r')
-
-
-class TestDaemon(base.BaseTestCase):
-    def setUp(self):
-        super(TestDaemon, self).setUp()
-        self.os_p = mock.patch.object(daemon, 'os')
-        self.os = self.os_p.start()
-
-        self.pidfile_p = mock.patch.object(daemon, 'Pidfile')
-        self.pidfile = self.pidfile_p.start()
-
-    def test_init(self):
-        d = daemon.Daemon('pidfile')
-        self.assertEqual(d.procname, 'python')
-
-    def test_init_nopidfile(self):
-        d = daemon.Daemon(pidfile=None)
-        self.assertEqual(d.procname, 'python')
-        self.assertFalse(self.pidfile.called)
-
-    def test_fork_parent(self):
-        self.os.fork.return_value = 1
-        d = daemon.Daemon('pidfile')
-        d._fork()
-        self.os._exit.assert_called_once_with(mock.ANY)
-
-    def test_fork_child(self):
-        self.os.fork.return_value = 0
-        d = daemon.Daemon('pidfile')
-        self.assertIsNone(d._fork())
-
-    def test_fork_error(self):
-        self.os.fork.side_effect = OSError(1)
-        with mock.patch.object(daemon.sys, 'stderr'):
-            with testtools.ExpectedException(SystemExit):
-                d = daemon.Daemon('pidfile', 'stdin')
-                d._fork()
-
-    def test_daemonize(self):
-        self.os.devnull = '/dev/null'
-
-        d = daemon.Daemon('pidfile')
-        with mock.patch.object(d, '_fork') as fork:
-            with mock.patch.object(daemon, 'atexit') as atexit:
-                with mock.patch.object(daemon, 'signal') as signal:
-                    signal.SIGTERM = 15
-                    with mock.patch.object(daemon, 'sys') as sys:
-                        sys.stdin.fileno.return_value = 0
-                        sys.stdout.fileno.return_value = 1
-                        sys.stderr.fileno.return_value = 2
-                        d.daemonize()
-
-                    signal.signal.assert_called_once_with(15, d.handle_sigterm)
-                atexit.register.assert_called_once_with(d.delete_pid)
-            fork.assert_has_calls([mock.call(), mock.call()])
-
-        self.os.assert_has_calls([
-            mock.call.chdir('/'),
-            mock.call.setsid(),
-            mock.call.umask(0),
-            mock.call.dup2(mock.ANY, 0),
-            mock.call.dup2(mock.ANY, 1),
-            mock.call.dup2(mock.ANY, 2),
-            mock.call.getpid()]
-        )
-
-    def test_delete_pid(self):
-        self.pidfile.return_value.__str__.return_value = 'pidfile'
-        d = daemon.Daemon('pidfile')
-        d.delete_pid()
-        self.os.remove.assert_called_once_with('pidfile')
-
-    def test_handle_sigterm(self):
-        d = daemon.Daemon('pidfile')
-        with mock.patch.object(daemon, 'sys') as sys:
-            d.handle_sigterm(15, 1234)
-            sys.exit.assert_called_once_with(0)
-
-    def test_start(self):
-        self.pidfile.return_value.is_running.return_value = False
-        d = daemon.Daemon('pidfile')
-
-        with mock.patch.object(d, 'daemonize') as daemonize:
-            with mock.patch.object(d, 'run') as run:
-                d.start()
-                run.assert_called_once_with()
-                daemonize.assert_called_once_with()
-
-    def test_start_running(self):
-        self.pidfile.return_value.is_running.return_value = True
-        d = daemon.Daemon('pidfile')
-
-        with mock.patch.object(daemon.sys, 'stderr'):
-            with mock.patch.object(d, 'daemonize') as daemonize:
-                with testtools.ExpectedException(SystemExit):
-                    d.start()
-                self.assertFalse(daemonize.called)
diff --git a/neutron/tests/unit/agent/linux/test_dhcp.py b/neutron/tests/unit/agent/linux/test_dhcp.py
deleted file mode 100644 (file)
index 53ecd60..0000000
+++ /dev/null
@@ -1,2117 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-import mock
-import netaddr
-from oslo_config import cfg
-
-from neutron.agent.common import config
-from neutron.agent.dhcp import config as dhcp_config
-from neutron.agent.linux import dhcp
-from neutron.agent.linux import external_process
-from neutron.common import config as base_config
-from neutron.common import constants
-from neutron.common import utils
-from neutron.extensions import extra_dhcp_opt as edo_ext
-from neutron.tests import base
-from neutron.tests import tools
-
-
-class FakeIPAllocation(object):
-    def __init__(self, address, subnet_id=None):
-        self.ip_address = address
-        self.subnet_id = subnet_id
-
-
-class FakeDNSAssignment(object):
-    def __init__(self, ip_address, dns_name='', domain='openstacklocal'):
-        if dns_name:
-            self.hostname = dns_name
-        else:
-            self.hostname = 'host-%s' % ip_address.replace(
-                '.', '-').replace(':', '-')
-        self.ip_address = ip_address
-        self.fqdn = self.hostname
-        if domain:
-            self.fqdn = '%s.%s.' % (self.hostname, domain)
-
-
-class DhcpOpt(object):
-    def __init__(self, **kwargs):
-        self.__dict__.update(ip_version=4)
-        self.__dict__.update(kwargs)
-
-    def __str__(self):
-        return str(self.__dict__)
-
-
-# A base class where class attributes can also be accessed by treating
-# an instance as a dict.
-class Dictable(object):
-    def __getitem__(self, k):
-        return self.__class__.__dict__.get(k)
-
-
-class FakeDhcpPort(object):
-    id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa'
-    admin_state_up = True
-    device_owner = constants.DEVICE_OWNER_DHCP
-    fixed_ips = [FakeIPAllocation('192.168.0.1',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd')]
-    mac_address = '00:00:80:aa:bb:ee'
-    device_id = 'fake_dhcp_port'
-
-    def __init__(self):
-        self.extra_dhcp_opts = []
-
-
-class FakeReservedPort(object):
-    admin_state_up = True
-    device_owner = constants.DEVICE_OWNER_DHCP
-    fixed_ips = [FakeIPAllocation('192.168.0.6',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd')]
-    mac_address = '00:00:80:aa:bb:ee'
-    device_id = constants.DEVICE_ID_RESERVED_DHCP_PORT
-
-    def __init__(self, id='reserved-aaaa-aaaa-aaaa-aaaaaaaaaaa'):
-        self.extra_dhcp_opts = []
-        self.id = id
-
-
-class FakePort1(object):
-    id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
-    admin_state_up = True
-    device_owner = 'foo1'
-    fixed_ips = [FakeIPAllocation('192.168.0.2',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd')]
-    mac_address = '00:00:80:aa:bb:cc'
-    device_id = 'fake_port1'
-
-    def __init__(self, domain='openstacklocal'):
-        self.extra_dhcp_opts = []
-        self.dns_assignment = [FakeDNSAssignment('192.168.0.2', domain=domain)]
-
-
-class FakePort2(object):
-    id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
-    admin_state_up = False
-    device_owner = 'foo2'
-    fixed_ips = [FakeIPAllocation('192.168.0.3',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd')]
-    mac_address = '00:00:f3:aa:bb:cc'
-    device_id = 'fake_port2'
-    dns_assignment = [FakeDNSAssignment('192.168.0.3')]
-
-    def __init__(self):
-        self.extra_dhcp_opts = []
-
-
-class FakePort3(object):
-    id = '44444444-4444-4444-4444-444444444444'
-    admin_state_up = True
-    device_owner = 'foo3'
-    fixed_ips = [FakeIPAllocation('192.168.0.4',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd'),
-                 FakeIPAllocation('192.168.1.2',
-                                  'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')]
-    dns_assignment = [FakeDNSAssignment('192.168.0.4'),
-                      FakeDNSAssignment('192.168.1.2')]
-    mac_address = '00:00:0f:aa:bb:cc'
-    device_id = 'fake_port3'
-
-    def __init__(self):
-        self.extra_dhcp_opts = []
-
-
-class FakePort4(object):
-
-    id = 'gggggggg-gggg-gggg-gggg-gggggggggggg'
-    admin_state_up = False
-    device_owner = 'foo3'
-    fixed_ips = [FakeIPAllocation('192.168.0.4',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd'),
-                 FakeIPAllocation('ffda:3ba5:a17a:4ba3:0216:3eff:fec2:771d',
-                                  'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')]
-    dns_assignment = [
-        FakeDNSAssignment('192.168.0.4'),
-        FakeDNSAssignment('ffda:3ba5:a17a:4ba3:0216:3eff:fec2:771d')]
-    mac_address = '00:16:3E:C2:77:1D'
-    device_id = 'fake_port4'
-
-    def __init__(self):
-        self.extra_dhcp_opts = []
-
-
-class FakePort5(object):
-    id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeee'
-    admin_state_up = True
-    device_owner = 'foo5'
-    fixed_ips = [FakeIPAllocation('192.168.0.5',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd')]
-    dns_assignment = [FakeDNSAssignment('192.168.0.5')]
-    mac_address = '00:00:0f:aa:bb:55'
-    device_id = 'fake_port5'
-
-    def __init__(self):
-        self.extra_dhcp_opts = [
-            DhcpOpt(opt_name=edo_ext.CLIENT_ID,
-                    opt_value='test5')]
-
-
-class FakePort6(object):
-    id = 'ccccccccc-cccc-cccc-cccc-ccccccccc'
-    admin_state_up = True
-    device_owner = 'foo6'
-    fixed_ips = [FakeIPAllocation('192.168.0.6',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd')]
-    dns_assignment = [FakeDNSAssignment('192.168.0.6')]
-    mac_address = '00:00:0f:aa:bb:66'
-    device_id = 'fake_port6'
-
-    def __init__(self):
-        self.extra_dhcp_opts = [
-            DhcpOpt(opt_name=edo_ext.CLIENT_ID,
-                    opt_value='test6',
-                    ip_version=4),
-            DhcpOpt(opt_name='dns-server',
-                    opt_value='123.123.123.45',
-                    ip_version=4)]
-
-
-class FakeV6Port(object):
-    id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh'
-    admin_state_up = True
-    device_owner = 'foo3'
-    fixed_ips = [FakeIPAllocation('fdca:3ba5:a17a:4ba3::2',
-                                  'ffffffff-ffff-ffff-ffff-ffffffffffff')]
-    mac_address = '00:00:f3:aa:bb:cc'
-    device_id = 'fake_port6'
-
-    def __init__(self, domain='openstacklocal'):
-        self.extra_dhcp_opts = []
-        self.dns_assignment = [FakeDNSAssignment('fdca:3ba5:a17a:4ba3::2',
-                               domain=domain)]
-
-
-class FakeV6PortExtraOpt(object):
-    id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh'
-    admin_state_up = True
-    device_owner = 'foo3'
-    fixed_ips = [FakeIPAllocation('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d',
-                                  'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')]
-    dns_assignment = [
-        FakeDNSAssignment('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d')]
-    mac_address = '00:16:3e:c2:77:1d'
-    device_id = 'fake_port6'
-
-    def __init__(self):
-        self.extra_dhcp_opts = [
-            DhcpOpt(opt_name='dns-server',
-                    opt_value='ffea:3ba5:a17a:4ba3::100',
-                    ip_version=6)]
-
-
-class FakeDualPortWithV6ExtraOpt(object):
-    id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh'
-    admin_state_up = True
-    device_owner = 'foo3'
-    fixed_ips = [FakeIPAllocation('192.168.0.3',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd'),
-                 FakeIPAllocation('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d',
-                                  'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')]
-    dns_assignment = [
-        FakeDNSAssignment('192.168.0.3'),
-        FakeDNSAssignment('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d')]
-    mac_address = '00:16:3e:c2:77:1d'
-    device_id = 'fake_port6'
-
-    def __init__(self):
-        self.extra_dhcp_opts = [
-            DhcpOpt(opt_name='dns-server',
-                    opt_value='ffea:3ba5:a17a:4ba3::100',
-                    ip_version=6)]
-
-
-class FakeDualPort(object):
-    id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh'
-    admin_state_up = True
-    device_owner = 'foo3'
-    fixed_ips = [FakeIPAllocation('192.168.0.3',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd'),
-                 FakeIPAllocation('fdca:3ba5:a17a:4ba3::3',
-                                  'ffffffff-ffff-ffff-ffff-ffffffffffff')]
-    mac_address = '00:00:0f:aa:bb:cc'
-    device_id = 'fake_dual_port'
-
-    def __init__(self, domain='openstacklocal'):
-        self.extra_dhcp_opts = []
-        self.dns_assignment = [FakeDNSAssignment('192.168.0.3', domain=domain),
-                               FakeDNSAssignment('fdca:3ba5:a17a:4ba3::3',
-                                                 domain=domain)]
-
-
-class FakeRouterPort(object):
-    id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr'
-    admin_state_up = True
-    device_owner = constants.DEVICE_OWNER_ROUTER_INTF
-    mac_address = '00:00:0f:rr:rr:rr'
-    device_id = 'fake_router_port'
-    dns_assignment = []
-
-    def __init__(self, dev_owner=constants.DEVICE_OWNER_ROUTER_INTF,
-                 ip_address='192.168.0.1', domain='openstacklocal'):
-        self.extra_dhcp_opts = []
-        self.device_owner = dev_owner
-        self.fixed_ips = [FakeIPAllocation(
-            ip_address, 'dddddddd-dddd-dddd-dddd-dddddddddddd')]
-        self.dns_assignment = [FakeDNSAssignment(ip.ip_address, domain=domain)
-                               for ip in self.fixed_ips]
-
-
-class FakeRouterPort2(object):
-    id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr'
-    admin_state_up = True
-    device_owner = constants.DEVICE_OWNER_ROUTER_INTF
-    fixed_ips = [FakeIPAllocation('192.168.1.1',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd')]
-    dns_assignment = [FakeDNSAssignment('192.168.1.1')]
-    mac_address = '00:00:0f:rr:rr:r2'
-    device_id = 'fake_router_port2'
-
-    def __init__(self):
-        self.extra_dhcp_opts = []
-
-
-class FakePortMultipleAgents1(object):
-    id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr'
-    admin_state_up = True
-    device_owner = constants.DEVICE_OWNER_DHCP
-    fixed_ips = [FakeIPAllocation('192.168.0.5',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd')]
-    dns_assignment = [FakeDNSAssignment('192.168.0.5')]
-    mac_address = '00:00:0f:dd:dd:dd'
-    device_id = 'fake_multiple_agents_port'
-
-    def __init__(self):
-        self.extra_dhcp_opts = []
-
-
-class FakePortMultipleAgents2(object):
-    id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-    admin_state_up = True
-    device_owner = constants.DEVICE_OWNER_DHCP
-    fixed_ips = [FakeIPAllocation('192.168.0.6',
-                                  'dddddddd-dddd-dddd-dddd-dddddddddddd')]
-    dns_assignment = [FakeDNSAssignment('192.168.0.6')]
-    mac_address = '00:00:0f:ee:ee:ee'
-    device_id = 'fake_multiple_agents_port2'
-
-    def __init__(self):
-        self.extra_dhcp_opts = []
-
-
-class FakeV4HostRoute(object):
-    destination = '20.0.0.1/24'
-    nexthop = '20.0.0.1'
-
-
-class FakeV4HostRouteGateway(object):
-    destination = constants.IPv4_ANY
-    nexthop = '10.0.0.1'
-
-
-class FakeV6HostRoute(object):
-    destination = '2001:0200:feed:7ac0::/64'
-    nexthop = '2001:0200:feed:7ac0::1'
-
-
-class FakeV4Subnet(Dictable):
-    id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
-    ip_version = 4
-    cidr = '192.168.0.0/24'
-    gateway_ip = '192.168.0.1'
-    enable_dhcp = True
-    host_routes = [FakeV4HostRoute]
-    dns_nameservers = ['8.8.8.8']
-
-
-class FakeV4Subnet2(FakeV4Subnet):
-    cidr = '192.168.1.0/24'
-    gateway_ip = '192.168.1.1'
-    host_routes = []
-
-
-class FakeV4MetadataSubnet(FakeV4Subnet):
-    cidr = '169.254.169.254/30'
-    gateway_ip = '169.254.169.253'
-    host_routes = []
-    dns_nameservers = []
-
-
-class FakeV4SubnetGatewayRoute(FakeV4Subnet):
-    host_routes = [FakeV4HostRouteGateway]
-
-
-class FakeV4SubnetMultipleAgentsWithoutDnsProvided(FakeV4Subnet):
-    dns_nameservers = []
-    host_routes = []
-
-
-class FakeV4SubnetAgentWithManyDnsProvided(FakeV4Subnet):
-    dns_nameservers = ['2.2.2.2', '9.9.9.9', '1.1.1.1',
-                       '3.3.3.3']
-    host_routes = []
-
-
-class FakeV4MultipleAgentsWithoutDnsProvided(object):
-    id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
-    subnets = [FakeV4SubnetMultipleAgentsWithoutDnsProvided()]
-    ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(),
-             FakePortMultipleAgents1(), FakePortMultipleAgents2()]
-    namespace = 'qdhcp-ns'
-
-
-class FakeV4AgentWithManyDnsProvided(object):
-    id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
-    subnets = [FakeV4SubnetAgentWithManyDnsProvided()]
-    ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(),
-             FakePortMultipleAgents1()]
-    namespace = 'qdhcp-ns'
-
-
-class FakeV4SubnetMultipleAgentsWithDnsProvided(FakeV4Subnet):
-    host_routes = []
-
-
-class FakeV4MultipleAgentsWithDnsProvided(object):
-    id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
-    subnets = [FakeV4SubnetMultipleAgentsWithDnsProvided()]
-    ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(),
-             FakePortMultipleAgents1(), FakePortMultipleAgents2()]
-    namespace = 'qdhcp-ns'
-
-
-class FakeV6Subnet(object):
-    id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
-    ip_version = 6
-    cidr = 'fdca:3ba5:a17a:4ba3::/64'
-    gateway_ip = 'fdca:3ba5:a17a:4ba3::1'
-    enable_dhcp = True
-    host_routes = [FakeV6HostRoute]
-    dns_nameservers = ['2001:0200:feed:7ac0::1']
-    ipv6_ra_mode = None
-    ipv6_address_mode = None
-
-
-class FakeV4SubnetNoDHCP(object):
-    id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
-    ip_version = 4
-    cidr = '192.168.1.0/24'
-    gateway_ip = '192.168.1.1'
-    enable_dhcp = False
-    host_routes = []
-    dns_nameservers = []
-
-
-class FakeV6SubnetDHCPStateful(Dictable):
-    id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
-    ip_version = 6
-    cidr = 'fdca:3ba5:a17a:4ba3::/64'
-    gateway_ip = 'fdca:3ba5:a17a:4ba3::1'
-    enable_dhcp = True
-    host_routes = [FakeV6HostRoute]
-    dns_nameservers = ['2001:0200:feed:7ac0::1']
-    ipv6_ra_mode = None
-    ipv6_address_mode = constants.DHCPV6_STATEFUL
-
-
-class FakeV6SubnetSlaac(object):
-    id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
-    ip_version = 6
-    cidr = 'ffda:3ba5:a17a:4ba3::/64'
-    gateway_ip = 'ffda:3ba5:a17a:4ba3::1'
-    enable_dhcp = True
-    host_routes = [FakeV6HostRoute]
-    ipv6_address_mode = constants.IPV6_SLAAC
-    ipv6_ra_mode = None
-
-
-class FakeV6SubnetStateless(object):
-    id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
-    ip_version = 6
-    cidr = 'ffea:3ba5:a17a:4ba3::/64'
-    gateway_ip = 'ffea:3ba5:a17a:4ba3::1'
-    enable_dhcp = True
-    dns_nameservers = []
-    host_routes = []
-    ipv6_address_mode = constants.DHCPV6_STATELESS
-    ipv6_ra_mode = None
-
-
-class FakeV4SubnetNoGateway(FakeV4Subnet):
-    id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
-    cidr = '192.168.1.0/24'
-    gateway_ip = None
-    enable_dhcp = True
-    host_routes = []
-    dns_nameservers = []
-
-
-class FakeV4SubnetNoRouter(FakeV4Subnet):
-    id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
-    cidr = '192.168.1.0/24'
-    gateway_ip = '192.168.1.1'
-    host_routes = []
-    dns_nameservers = []
-
-
-class FakeV4Network(object):
-    id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-    subnets = [FakeV4Subnet()]
-    ports = [FakePort1()]
-    namespace = 'qdhcp-ns'
-
-
-class FakeV4NetworkClientId(object):
-    id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-    subnets = [FakeV4Subnet()]
-    ports = [FakePort1(), FakePort5(), FakePort6()]
-    namespace = 'qdhcp-ns'
-
-
-class FakeV6Network(object):
-    id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
-    subnets = [FakeV6Subnet()]
-    ports = [FakePort2()]
-    namespace = 'qdhcp-ns'
-
-
-class FakeDualNetwork(object):
-    id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()]
-    # ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort()]
-    namespace = 'qdhcp-ns'
-
-    def __init__(self, domain='openstacklocal'):
-        self.ports = [FakePort1(domain=domain), FakeV6Port(domain=domain),
-                      FakeDualPort(domain=domain),
-                      FakeRouterPort(domain=domain)]
-
-
-class FakeDeviceManagerNetwork(object):
-    # Use instance rather than class attributes here, so that we get
-    # an independent set of ports each time FakeDeviceManagerNetwork()
-    # is used.
-    def __init__(self):
-        self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-        self.subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()]
-        self.ports = [FakePort1(),
-                      FakeV6Port(),
-                      FakeDualPort(),
-                      FakeRouterPort()]
-        self.namespace = 'qdhcp-ns'
-
-
-class FakeDualNetworkReserved(object):
-    id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()]
-    ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort(),
-             FakeReservedPort()]
-    namespace = 'qdhcp-ns'
-
-
-class FakeDualNetworkReserved2(object):
-    id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()]
-    ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort(),
-             FakeReservedPort(), FakeReservedPort(id='reserved-2')]
-    namespace = 'qdhcp-ns'
-
-
-class FakeNetworkDhcpPort(object):
-    id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-    subnets = [FakeV4Subnet()]
-    ports = [FakePort1(), FakeDhcpPort()]
-    namespace = 'qdhcp-ns'
-
-
-class FakeDualNetworkGatewayRoute(object):
-    id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    subnets = [FakeV4SubnetGatewayRoute(), FakeV6SubnetDHCPStateful()]
-    ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
-    namespace = 'qdhcp-ns'
-
-
-class FakeDualNetworkSingleDHCP(object):
-    id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()]
-    ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
-    namespace = 'qdhcp-ns'
-
-
-class FakeDualNetworkDualDHCP(object):
-    id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    subnets = [FakeV4Subnet(), FakeV4Subnet2()]
-    ports = [FakePort1(), FakeRouterPort(), FakeRouterPort2()]
-    namespace = 'qdhcp-ns'
-
-
-class FakeV4NoGatewayNetwork(object):
-    id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    subnets = [FakeV4SubnetNoGateway()]
-    ports = [FakePort1()]
-
-
-class FakeV4NetworkNoRouter(object):
-    id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    subnets = [FakeV4SubnetNoRouter()]
-    ports = [FakePort1()]
-
-
-class FakeV4MetadataNetwork(object):
-    id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    subnets = [FakeV4MetadataSubnet()]
-    ports = [FakeRouterPort(ip_address='169.254.169.253')]
-
-
-class FakeV4NetworkDistRouter(object):
-    id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    subnets = [FakeV4Subnet()]
-    ports = [FakePort1(),
-             FakeRouterPort(dev_owner=constants.DEVICE_OWNER_DVR_INTERFACE)]
-
-
-class FakeDualV4Pxe3Ports(object):
-    id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()]
-    ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
-    namespace = 'qdhcp-ns'
-
-    def __init__(self, port_detail="portsSame"):
-        if port_detail == "portsSame":
-            self.ports[0].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
-            self.ports[1].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')]
-            self.ports[2].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')]
-        else:
-            self.ports[0].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.2'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
-            self.ports[1].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')]
-            self.ports[2].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.7'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')]
-
-
-class FakeV4NetworkPxe2Ports(object):
-    id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
-    subnets = [FakeV4Subnet()]
-    ports = [FakePort1(), FakePort2(), FakeRouterPort()]
-    namespace = 'qdhcp-ns'
-
-    def __init__(self, port_detail="portsSame"):
-        if port_detail == "portsSame":
-            self.ports[0].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
-            self.ports[1].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
-        else:
-            self.ports[0].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
-            self.ports[1].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
-
-
-class FakeV4NetworkPxe3Ports(object):
-    id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
-    subnets = [FakeV4Subnet()]
-    ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
-    namespace = 'qdhcp-ns'
-
-    def __init__(self, port_detail="portsSame"):
-        if port_detail == "portsSame":
-            self.ports[0].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
-            self.ports[1].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
-            self.ports[2].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
-        else:
-            self.ports[0].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
-            self.ports[1].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')]
-            self.ports[2].extra_dhcp_opts = [
-                DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7'),
-                DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.7'),
-                DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')]
-
-
-class FakeV6NetworkPxePort(object):
-    id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
-    subnets = [FakeV6SubnetDHCPStateful()]
-    ports = [FakeV6Port()]
-    namespace = 'qdhcp-ns'
-
-    def __init__(self):
-        self.ports[0].extra_dhcp_opts = [
-            DhcpOpt(opt_name='tftp-server', opt_value='2001:192:168::1',
-                    ip_version=6),
-            DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0',
-                    ip_version=6)]
-
-
-class FakeV6NetworkPxePortWrongOptVersion(object):
-    id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
-    subnets = [FakeV6SubnetDHCPStateful()]
-    ports = [FakeV6Port()]
-    namespace = 'qdhcp-ns'
-
-    def __init__(self):
-        self.ports[0].extra_dhcp_opts = [
-            DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7',
-                    ip_version=4),
-            DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0',
-                    ip_version=6)]
-
-
-class FakeDualStackNetworkSingleDHCP(object):
-    id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
-
-    subnets = [FakeV4Subnet(), FakeV6SubnetSlaac()]
-    ports = [FakePort1(), FakePort4(), FakeRouterPort()]
-
-
-class FakeDualStackNetworkingSingleDHCPTags(object):
-    id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
-
-    subnets = [FakeV4Subnet(), FakeV6SubnetSlaac()]
-    ports = [FakePort1(), FakePort4(), FakeRouterPort()]
-
-    def __init__(self):
-        for port in self.ports:
-            port.extra_dhcp_opts = [
-                DhcpOpt(opt_name='tag:ipxe,bootfile-name',
-                        opt_value='pxelinux.0')]
-
-
-class FakeV4NetworkMultipleTags(object):
-    id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
-    subnets = [FakeV4Subnet()]
-    ports = [FakePort1(), FakeRouterPort()]
-    namespace = 'qdhcp-ns'
-
-    def __init__(self):
-        self.ports[0].extra_dhcp_opts = [
-            DhcpOpt(opt_name='tag:ipxe,bootfile-name', opt_value='pxelinux.0')]
-
-
-class FakeV6NetworkStatelessDHCP(object):
-    id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
-
-    subnets = [FakeV6SubnetStateless()]
-    ports = [FakeV6PortExtraOpt()]
-    namespace = 'qdhcp-ns'
-
-
-class FakeNetworkWithV6SatelessAndV4DHCPSubnets(object):
-    id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
-
-    subnets = [FakeV6SubnetStateless(), FakeV4Subnet()]
-    ports = [FakeDualPortWithV6ExtraOpt(), FakeRouterPort()]
-    namespace = 'qdhcp-ns'
-
-
-class LocalChild(dhcp.DhcpLocalProcess):
-    PORTS = {4: [4], 6: [6]}
-
-    def __init__(self, *args, **kwargs):
-        self.process_monitor = mock.Mock()
-        kwargs['process_monitor'] = self.process_monitor
-        super(LocalChild, self).__init__(*args, **kwargs)
-        self.called = []
-
-    def reload_allocations(self):
-        self.called.append('reload')
-
-    def restart(self):
-        self.called.append('restart')
-
-    def spawn_process(self):
-        self.called.append('spawn')
-
-
-class TestConfBase(base.BaseTestCase):
-    def setUp(self):
-        super(TestConfBase, self).setUp()
-        self.conf = config.setup_conf()
-        self.conf.register_opts(base_config.core_opts)
-        self.conf.register_opts(dhcp_config.DHCP_OPTS)
-        self.conf.register_opts(dhcp_config.DNSMASQ_OPTS)
-        self.conf.register_opts(external_process.OPTS)
-        config.register_interface_driver_opts_helper(self.conf)
-
-
-class TestBase(TestConfBase):
-    def setUp(self):
-        super(TestBase, self).setUp()
-        instance = mock.patch("neutron.agent.linux.dhcp.DeviceManager")
-        self.mock_mgr = instance.start()
-        self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata',
-                                           default=True))
-        self.conf.register_opt(cfg.BoolOpt("force_metadata",
-                                           default=False))
-        self.conf.register_opt(cfg.BoolOpt('enable_metadata_network',
-                                           default=False))
-        self.config_parse(self.conf)
-        self.conf.set_override('state_path', '')
-
-        self.replace_p = mock.patch('neutron.common.utils.replace_file')
-        self.execute_p = mock.patch('neutron.agent.common.utils.execute')
-        self.safe = self.replace_p.start()
-        self.execute = self.execute_p.start()
-
-        self.makedirs = mock.patch('os.makedirs').start()
-        self.rmtree = mock.patch('shutil.rmtree').start()
-
-        self.external_process = mock.patch(
-            'neutron.agent.linux.external_process.ProcessManager').start()
-
-        self.mock_mgr.return_value.driver.bridged = True
-
-
-class TestDhcpBase(TestBase):
-
-    def test_existing_dhcp_networks_abstract_error(self):
-        self.assertRaises(NotImplementedError,
-                          dhcp.DhcpBase.existing_dhcp_networks,
-                          None)
-
-    def test_check_version_abstract_error(self):
-        self.assertRaises(NotImplementedError,
-                          dhcp.DhcpBase.check_version)
-
-    def test_base_abc_error(self):
-        self.assertRaises(TypeError, dhcp.DhcpBase, None)
-
-    def test_restart(self):
-        class SubClass(dhcp.DhcpBase):
-            def __init__(self):
-                dhcp.DhcpBase.__init__(self, cfg.CONF, FakeV4Network(),
-                                       mock.Mock(), None)
-                self.called = []
-
-            def enable(self):
-                self.called.append('enable')
-
-            def disable(self, retain_port=False):
-                self.called.append('disable %s' % retain_port)
-
-            def reload_allocations(self):
-                pass
-
-            @property
-            def active(self):
-                return True
-
-        c = SubClass()
-        c.restart()
-        self.assertEqual(c.called, ['disable True', 'enable'])
-
-
-class TestDhcpLocalProcess(TestBase):
-
-    def test_get_conf_file_name(self):
-        tpl = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dev'
-        lp = LocalChild(self.conf, FakeV4Network())
-        self.assertEqual(lp.get_conf_file_name('dev'), tpl)
-
-    @mock.patch.object(utils, 'ensure_dir')
-    def test_ensure_dir_called(self, ensure_dir):
-        LocalChild(self.conf, FakeV4Network())
-        ensure_dir.assert_called_once_with(
-            '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
-
-    def test_enable_already_active(self):
-        with mock.patch.object(LocalChild, 'active') as patched:
-            patched.__get__ = mock.Mock(return_value=True)
-            lp = LocalChild(self.conf, FakeV4Network())
-            lp.enable()
-
-            self.assertEqual(lp.called, ['restart'])
-            self.assertFalse(self.mock_mgr.return_value.setup.called)
-
-    @mock.patch.object(utils, 'ensure_dir')
-    def test_enable(self, ensure_dir):
-        attrs_to_mock = dict(
-            [(a, mock.DEFAULT) for a in
-                ['active', 'interface_name']]
-        )
-
-        with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
-            mocks['active'].__get__ = mock.Mock(return_value=False)
-            mocks['interface_name'].__set__ = mock.Mock()
-            lp = LocalChild(self.conf,
-                            FakeDualNetwork())
-            lp.enable()
-
-            self.mock_mgr.assert_has_calls(
-                [mock.call(self.conf, None),
-                 mock.call().setup(mock.ANY)])
-            self.assertEqual(lp.called, ['spawn'])
-            self.assertTrue(mocks['interface_name'].__set__.called)
-            ensure_dir.assert_called_with(
-                '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc')
-
-    def _assert_disabled(self, lp):
-        self.assertTrue(lp.process_monitor.unregister.called)
-        self.assertTrue(self.external_process().disable.called)
-
-    def test_disable_not_active(self):
-        attrs_to_mock = dict([(a, mock.DEFAULT) for a in
-                              ['active', 'interface_name']])
-        with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
-            mocks['active'].__get__ = mock.Mock(return_value=False)
-            mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
-            network = FakeDualNetwork()
-            lp = LocalChild(self.conf, network)
-            lp.device_manager = mock.Mock()
-            lp.disable()
-            lp.device_manager.destroy.assert_called_once_with(
-                network, 'tap0')
-            self._assert_disabled(lp)
-
-    def test_disable_retain_port(self):
-        attrs_to_mock = dict([(a, mock.DEFAULT) for a in
-                              ['active', 'interface_name']])
-        network = FakeDualNetwork()
-        with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
-            mocks['active'].__get__ = mock.Mock(return_value=True)
-            mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
-            lp = LocalChild(self.conf, network)
-            lp.disable(retain_port=True)
-            self._assert_disabled(lp)
-
-    def test_disable(self):
-        attrs_to_mock = {'active': mock.DEFAULT}
-
-        with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
-            mocks['active'].__get__ = mock.Mock(return_value=False)
-            lp = LocalChild(self.conf, FakeDualNetwork())
-            with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip:
-                lp.disable()
-
-            self._assert_disabled(lp)
-
-        ip.return_value.netns.delete.assert_called_with('qdhcp-ns')
-
-    def test_disable_config_dir_removed_after_destroy(self):
-        parent = mock.MagicMock()
-        parent.attach_mock(self.rmtree, 'rmtree')
-        parent.attach_mock(self.mock_mgr, 'DeviceManager')
-
-        lp = LocalChild(self.conf, FakeDualNetwork())
-        lp.disable(retain_port=False)
-
-        expected = [mock.call.DeviceManager().destroy(mock.ANY, mock.ANY),
-                    mock.call.rmtree(mock.ANY, ignore_errors=True)]
-        parent.assert_has_calls(expected)
-
-    def test_get_interface_name(self):
-        net = FakeDualNetwork()
-        path = '/dhcp/%s/interface' % net.id
-        self.useFixture(tools.OpenFixture(path, 'tap0'))
-        lp = LocalChild(self.conf, net)
-        self.assertEqual(lp.interface_name, 'tap0')
-
-    def test_set_interface_name(self):
-        with mock.patch('neutron.common.utils.replace_file') as replace:
-            lp = LocalChild(self.conf, FakeDualNetwork())
-            with mock.patch.object(lp, 'get_conf_file_name') as conf_file:
-                conf_file.return_value = '/interface'
-                lp.interface_name = 'tap0'
-                conf_file.assert_called_once_with('interface')
-                replace.assert_called_once_with(mock.ANY, 'tap0')
-
-
-class TestDnsmasq(TestBase):
-
-    def _get_dnsmasq(self, network, process_monitor=None):
-        process_monitor = process_monitor or mock.Mock()
-        return dhcp.Dnsmasq(self.conf, network,
-                            process_monitor=process_monitor)
-
-    def _test_spawn(self, extra_options, network=FakeDualNetwork(),
-                    max_leases=16777216, lease_duration=86400,
-                    has_static=True):
-        def mock_get_conf_file_name(kind):
-            return '/dhcp/%s/%s' % (network.id, kind)
-
-        # if you need to change this path here, think twice,
-        # that means pid files will move around, breaking upgrades
-        # or backwards-compatibility
-        expected_pid_file = '/dhcp/%s/pid' % network.id
-
-        expected = [
-            'dnsmasq',
-            '--no-hosts',
-            '--strict-order',
-            '--except-interface=lo',
-            '--pid-file=%s' % expected_pid_file,
-            '--dhcp-hostsfile=/dhcp/%s/host' % network.id,
-            '--addn-hosts=/dhcp/%s/addn_hosts' % network.id,
-            '--dhcp-optsfile=/dhcp/%s/opts' % network.id,
-            '--dhcp-leasefile=/dhcp/%s/leases' % network.id,
-            '--dhcp-match=set:ipxe,175',
-            '--bind-interfaces',
-            '--interface=tap0',
-        ]
-
-        seconds = ''
-        if lease_duration == -1:
-            lease_duration = 'infinite'
-        else:
-            seconds = 's'
-        if has_static:
-            prefix = '--dhcp-range=set:tag%d,%s,static,%s%s'
-            prefix6 = '--dhcp-range=set:tag%d,%s,static,%s,%s%s'
-        else:
-            prefix = '--dhcp-range=set:tag%d,%s,%s%s'
-            prefix6 = '--dhcp-range=set:tag%d,%s,%s,%s%s'
-        possible_leases = 0
-        for i, s in enumerate(network.subnets):
-            if (s.ip_version != 6
-                or s.ipv6_address_mode == constants.DHCPV6_STATEFUL):
-                if s.ip_version == 4:
-                    expected.extend([prefix % (
-                        i, s.cidr.split('/')[0], lease_duration, seconds)])
-                else:
-                    expected.extend([prefix6 % (
-                        i, s.cidr.split('/')[0], s.cidr.split('/')[1],
-                        lease_duration, seconds)])
-                possible_leases += netaddr.IPNetwork(s.cidr).size
-
-        if cfg.CONF.advertise_mtu:
-            if hasattr(network, 'mtu'):
-                expected.append(
-                    '--dhcp-option-force=option:mtu,%s' % network.mtu)
-
-        expected.append('--dhcp-lease-max=%d' % min(
-            possible_leases, max_leases))
-        expected.extend(extra_options)
-
-        self.execute.return_value = ('', '')
-
-        attrs_to_mock = dict(
-            [(a, mock.DEFAULT) for a in
-                ['_output_opts_file', 'get_conf_file_name', 'interface_name']]
-        )
-
-        test_pm = mock.Mock()
-
-        with mock.patch.multiple(dhcp.Dnsmasq, **attrs_to_mock) as mocks:
-            mocks['get_conf_file_name'].side_effect = mock_get_conf_file_name
-            mocks['_output_opts_file'].return_value = (
-                '/dhcp/%s/opts' % network.id
-            )
-            mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
-
-            dm = self._get_dnsmasq(network, test_pm)
-            dm.spawn_process()
-            self.assertTrue(mocks['_output_opts_file'].called)
-
-            self.assertTrue(test_pm.register.called)
-            self.external_process().enable.assert_called_once_with(
-                reload_cfg=False)
-            call_kwargs = self.external_process.mock_calls[0][2]
-            cmd_callback = call_kwargs['default_cmd_callback']
-
-            result_cmd = cmd_callback(expected_pid_file)
-
-            self.assertEqual(expected, result_cmd)
-
-    def test_spawn(self):
-        self._test_spawn(['--conf-file=', '--domain=openstacklocal'])
-
-    def test_spawn_infinite_lease_duration(self):
-        self.conf.set_override('dhcp_lease_duration', -1)
-        self._test_spawn(['--conf-file=', '--domain=openstacklocal'],
-                         FakeDualNetwork(), 16777216, -1)
-
-    def test_spawn_cfg_config_file(self):
-        self.conf.set_override('dnsmasq_config_file', '/foo')
-        self._test_spawn(['--conf-file=/foo', '--domain=openstacklocal'])
-
-    def test_spawn_no_dhcp_domain(self):
-        (exp_host_name, exp_host_data,
-         exp_addn_name, exp_addn_data) = self._test_no_dhcp_domain_alloc_data
-        self.conf.set_override('dhcp_domain', '')
-        network = FakeDualNetwork(domain=self.conf.dhcp_domain)
-        self._test_spawn(['--conf-file='], network=network)
-        self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data),
-                                    mock.call(exp_addn_name, exp_addn_data)])
-
-    def test_spawn_no_dhcp_range(self):
-        network = FakeV6Network()
-        subnet = FakeV6SubnetSlaac()
-        network.subnets = [subnet]
-        self._test_spawn(['--conf-file=', '--domain=openstacklocal'],
-                         network, has_static=False)
-
-    def test_spawn_cfg_dns_server(self):
-        self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8'])
-        self._test_spawn(['--conf-file=',
-                          '--server=8.8.8.8',
-                          '--domain=openstacklocal'])
-
-    def test_spawn_cfg_multiple_dns_server(self):
-        self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8',
-                                                       '9.9.9.9'])
-        self._test_spawn(['--conf-file=',
-                          '--server=8.8.8.8',
-                          '--server=9.9.9.9',
-                          '--domain=openstacklocal'])
-
-    def test_spawn_cfg_enable_dnsmasq_log(self):
-        self.conf.set_override('dnsmasq_base_log_dir', '/tmp')
-        network = FakeV4Network()
-        dhcp_dns_log = \
-            '/tmp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dhcp_dns_log'
-
-        self._test_spawn(['--conf-file=',
-                          '--domain=openstacklocal',
-                          '--log-queries',
-                          '--log-dhcp',
-                          ('--log-facility=%s' % dhcp_dns_log)],
-                         network)
-
-    def test_spawn_cfg_no_local_resolv(self):
-        self.conf.set_override('dnsmasq_local_resolv', False)
-
-        self._test_spawn(['--conf-file=', '--no-resolv',
-                          '--domain=openstacklocal'])
-
-    def test_spawn_max_leases_is_smaller_than_cap(self):
-        self._test_spawn(
-            ['--conf-file=', '--domain=openstacklocal'],
-            network=FakeV4Network(),
-            max_leases=256)
-
-    def test_spawn_cfg_broadcast(self):
-        self.conf.set_override('dhcp_broadcast_reply', True)
-        self._test_spawn(['--conf-file=', '--domain=openstacklocal',
-                          '--dhcp-broadcast'])
-
-    def test_spawn_cfg_advertise_mtu(self):
-        cfg.CONF.set_override('advertise_mtu', True)
-        network = FakeV4Network()
-        network.mtu = 1500
-        self._test_spawn(['--conf-file=', '--domain=openstacklocal'],
-                         network)
-
-    def test_spawn_cfg_advertise_mtu_plugin_doesnt_pass_mtu_value(self):
-        cfg.CONF.set_override('advertise_mtu', True)
-        network = FakeV4Network()
-        self._test_spawn(['--conf-file=', '--domain=openstacklocal'],
-                         network)
-
-    def _test_output_init_lease_file(self, timestamp):
-        expected = [
-            '00:00:80:aa:bb:cc 192.168.0.2 * *',
-            '00:00:f3:aa:bb:cc [fdca:3ba5:a17a:4ba3::2] * *',
-            '00:00:0f:aa:bb:cc 192.168.0.3 * *',
-            '00:00:0f:aa:bb:cc [fdca:3ba5:a17a:4ba3::3] * *',
-            '00:00:0f:rr:rr:rr 192.168.0.1 * *\n']
-        expected = "\n".join(['%s %s' % (timestamp, l) for l in expected])
-        with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
-            conf_fn.return_value = '/foo/leases'
-            dm = self._get_dnsmasq(FakeDualNetwork())
-            dm._output_init_lease_file()
-        self.safe.assert_called_once_with('/foo/leases', expected)
-
-    @mock.patch('time.time')
-    def test_output_init_lease_file(self, tmock):
-        self.conf.set_override('dhcp_lease_duration', 500)
-        tmock.return_value = 1000000
-        # lease duration should be added to current time
-        timestamp = 1000000 + 500
-        self._test_output_init_lease_file(timestamp)
-
-    def test_output_init_lease_file_infinite_duration(self):
-        self.conf.set_override('dhcp_lease_duration', -1)
-        # when duration is infinite, lease db timestamp should be 0
-        timestamp = 0
-        self._test_output_init_lease_file(timestamp)
-
-    def _test_output_opts_file(self, expected, network, ipm_retval=None):
-        with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
-            conf_fn.return_value = '/foo/opts'
-            dm = self._get_dnsmasq(network)
-            if ipm_retval:
-                with mock.patch.object(
-                        dm, '_make_subnet_interface_ip_map') as ipm:
-                    ipm.return_value = ipm_retval
-                    dm._output_opts_file()
-                    self.assertTrue(ipm.called)
-            else:
-                dm._output_opts_file()
-        self.safe.assert_called_once_with('/foo/opts', expected)
-
-    def test_output_opts_file(self):
-        fake_v6 = '2001:0200:feed:7ac0::1'
-        expected = (
-            'tag:tag0,option:dns-server,8.8.8.8\n'
-            'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,249,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,option:router,192.168.0.1\n'
-            'tag:tag1,option6:dns-server,%s\n'
-            'tag:tag1,option6:domain-search,openstacklocal').lstrip() % (
-                '[' + fake_v6 + ']')
-
-        self._test_output_opts_file(expected, FakeDualNetwork())
-
-    def test_output_opts_file_gateway_route(self):
-        fake_v6 = '2001:0200:feed:7ac0::1'
-        expected = ('tag:tag0,option:dns-server,8.8.8.8\n'
-                    'tag:tag0,option:classless-static-route,'
-                    '169.254.169.254/32,192.168.0.1,0.0.0.0/0,'
-                    '192.168.0.1\ntag:tag0,249,169.254.169.254/32,'
-                    '192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-                    'tag:tag0,option:router,192.168.0.1\n'
-                    'tag:tag1,option6:dns-server,%s\n'
-                    'tag:tag1,option6:domain-search,'
-                    'openstacklocal').lstrip() % ('[' + fake_v6 + ']')
-
-        self._test_output_opts_file(expected, FakeDualNetworkGatewayRoute())
-
-    def test_output_opts_file_multiple_agents_without_dns_provided(self):
-        expected = ('tag:tag0,option:classless-static-route,'
-                    '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-                    'tag:tag0,249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,'
-                    '192.168.0.1\ntag:tag0,option:router,192.168.0.1\n'
-                    'tag:tag0,option:dns-server,192.168.0.5,'
-                    '192.168.0.6').lstrip()
-
-        self._test_output_opts_file(expected,
-                                    FakeV4MultipleAgentsWithoutDnsProvided())
-
-    def test_output_opts_file_agent_with_many_dns_provided(self):
-        expected = ('tag:tag0,'
-                    'option:dns-server,2.2.2.2,9.9.9.9,1.1.1.1,3.3.3.3\n'
-                    'tag:tag0,option:classless-static-route,'
-                    '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-                    'tag:tag0,249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,'
-                    '192.168.0.1\n'
-                    'tag:tag0,option:router,192.168.0.1').lstrip()
-
-        self._test_output_opts_file(expected,
-                                    FakeV4AgentWithManyDnsProvided())
-
-    def test_output_opts_file_multiple_agents_with_dns_provided(self):
-        expected = ('tag:tag0,option:dns-server,8.8.8.8\n'
-                    'tag:tag0,option:classless-static-route,'
-                    '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-                    'tag:tag0,249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,'
-                    '192.168.0.1\n'
-                    'tag:tag0,option:router,192.168.0.1').lstrip()
-
-        self._test_output_opts_file(expected,
-                                    FakeV4MultipleAgentsWithDnsProvided())
-
-    def test_output_opts_file_single_dhcp(self):
-        expected = (
-            'tag:tag0,option:dns-server,8.8.8.8\n'
-            'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,'
-            '192.168.1.0/24,0.0.0.0,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,249,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,192.168.1.0/24,0.0.0.0,'
-            '0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,option:router,192.168.0.1').lstrip()
-
-        self._test_output_opts_file(expected, FakeDualNetworkSingleDHCP())
-
-    def test_output_opts_file_dual_dhcp_rfc3442(self):
-        expected = (
-            'tag:tag0,option:dns-server,8.8.8.8\n'
-            'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,'
-            '192.168.1.0/24,0.0.0.0,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,249,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,192.168.1.0/24,0.0.0.0,'
-            '0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,option:router,192.168.0.1\n'
-            'tag:tag1,option:dns-server,8.8.8.8\n'
-            'tag:tag1,option:classless-static-route,'
-            '169.254.169.254/32,192.168.1.1,'
-            '192.168.0.0/24,0.0.0.0,0.0.0.0/0,192.168.1.1\n'
-            'tag:tag1,249,169.254.169.254/32,192.168.1.1,'
-            '192.168.0.0/24,0.0.0.0,0.0.0.0/0,192.168.1.1\n'
-            'tag:tag1,option:router,192.168.1.1').lstrip()
-
-        self._test_output_opts_file(expected, FakeDualNetworkDualDHCP())
-
-    def test_output_opts_file_no_gateway(self):
-        expected = (
-            'tag:tag0,option:classless-static-route,'
-            '169.254.169.254/32,192.168.1.1\n'
-            'tag:tag0,249,169.254.169.254/32,192.168.1.1\n'
-            'tag:tag0,option:router').lstrip()
-
-        ipm_retval = {FakeV4SubnetNoGateway.id: '192.168.1.1'}
-        self._test_output_opts_file(expected, FakeV4NoGatewayNetwork(),
-                                    ipm_retval=ipm_retval)
-
-    def test_output_opts_file_no_neutron_router_on_subnet(self):
-        expected = (
-            'tag:tag0,option:classless-static-route,'
-            '169.254.169.254/32,192.168.1.2,0.0.0.0/0,192.168.1.1\n'
-            'tag:tag0,249,169.254.169.254/32,192.168.1.2,'
-            '0.0.0.0/0,192.168.1.1\n'
-            'tag:tag0,option:router,192.168.1.1').lstrip()
-
-        ipm_retval = {FakeV4SubnetNoRouter.id: '192.168.1.2'}
-        self._test_output_opts_file(expected, FakeV4NetworkNoRouter(),
-                                    ipm_retval=ipm_retval)
-
-    def test_output_opts_file_dist_neutron_router_on_subnet(self):
-        expected = (
-            'tag:tag0,option:dns-server,8.8.8.8\n'
-            'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,249,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,option:router,192.168.0.1').lstrip()
-
-        ipm_retval = {FakeV4Subnet.id: '192.168.0.1'}
-        self._test_output_opts_file(expected, FakeV4NetworkDistRouter(),
-                                    ipm_retval=ipm_retval)
-
-    def test_output_opts_file_pxe_2port_1net(self):
-        expected = (
-            'tag:tag0,option:dns-server,8.8.8.8\n'
-            'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,249,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,option:router,192.168.0.1\n'
-            'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,'
-            'option:tftp-server,192.168.0.3\n'
-            'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,'
-            'option:server-ip-address,192.168.0.2\n'
-            'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,'
-            'option:bootfile-name,pxelinux.0\n'
-            'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,'
-            'option:tftp-server,192.168.0.3\n'
-            'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,'
-            'option:server-ip-address,192.168.0.2\n'
-            'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,'
-            'option:bootfile-name,pxelinux.0').lstrip()
-
-        self._test_output_opts_file(expected, FakeV4NetworkPxe2Ports())
-
-    def test_output_opts_file_pxe_2port_1net_diff_details(self):
-        expected = (
-            'tag:tag0,option:dns-server,8.8.8.8\n'
-            'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,249,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,option:router,192.168.0.1\n'
-            'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,'
-            'option:tftp-server,192.168.0.3\n'
-            'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,'
-            'option:server-ip-address,192.168.0.2\n'
-            'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,'
-            'option:bootfile-name,pxelinux.0\n'
-            'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,'
-            'option:tftp-server,192.168.0.5\n'
-            'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,'
-            'option:server-ip-address,192.168.0.5\n'
-            'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,'
-            'option:bootfile-name,pxelinux.0').lstrip()
-
-        self._test_output_opts_file(expected,
-                                    FakeV4NetworkPxe2Ports("portsDiff"))
-
-    def test_output_opts_file_pxe_3port_2net(self):
-        expected = (
-            'tag:tag0,option:dns-server,8.8.8.8\n'
-            'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,'
-            '192.168.1.0/24,0.0.0.0,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,249,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,192.168.1.0/24,0.0.0.0,'
-            '0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,option:router,192.168.0.1\n'
-            'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,'
-            'option:tftp-server,192.168.0.3\n'
-            'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,'
-            'option:server-ip-address,192.168.0.2\n'
-            'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,'
-            'option:bootfile-name,pxelinux.0\n'
-            'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,'
-            'option:tftp-server,192.168.1.3\n'
-            'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,'
-            'option:server-ip-address,192.168.1.2\n'
-            'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,'
-            'option:bootfile-name,pxelinux2.0\n'
-            'tag:44444444-4444-4444-4444-444444444444,'
-            'option:tftp-server,192.168.1.3\n'
-            'tag:44444444-4444-4444-4444-444444444444,'
-            'option:server-ip-address,192.168.1.2\n'
-            'tag:44444444-4444-4444-4444-444444444444,'
-            'option:bootfile-name,pxelinux3.0').lstrip()
-
-        self._test_output_opts_file(expected, FakeDualV4Pxe3Ports())
-
-    def test_output_opts_file_multiple_tags(self):
-        expected = (
-            'tag:tag0,option:dns-server,8.8.8.8\n'
-            'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,249,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,option:router,192.168.0.1\n'
-            'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,'
-            'tag:ipxe,option:bootfile-name,pxelinux.0')
-        expected = expected.lstrip()
-
-        with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
-            conf_fn.return_value = '/foo/opts'
-            dm = self._get_dnsmasq(FakeV4NetworkMultipleTags())
-            dm._output_opts_file()
-
-        self.safe.assert_called_once_with('/foo/opts', expected)
-
-    @mock.patch('neutron.agent.linux.dhcp.Dnsmasq.get_conf_file_name',
-                return_value='/foo/opts')
-    def test_output_opts_file_pxe_ipv6_port_with_ipv6_opt(self,
-                                                          mock_get_conf_fn):
-        expected = (
-            'tag:tag0,option6:dns-server,[2001:0200:feed:7ac0::1]\n'
-            'tag:tag0,option6:domain-search,openstacklocal\n'
-            'tag:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,'
-            'option6:tftp-server,2001:192:168::1\n'
-            'tag:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,'
-            'option6:bootfile-name,pxelinux.0')
-        expected = expected.lstrip()
-
-        dm = self._get_dnsmasq(FakeV6NetworkPxePort())
-        dm._output_opts_file()
-
-        self.safe.assert_called_once_with('/foo/opts', expected)
-
-    @mock.patch('neutron.agent.linux.dhcp.Dnsmasq.get_conf_file_name',
-                return_value='/foo/opts')
-    def test_output_opts_file_pxe_ipv6_port_with_ipv4_opt(self,
-                                                          mock_get_conf_fn):
-        expected = (
-            'tag:tag0,option6:dns-server,[2001:0200:feed:7ac0::1]\n'
-            'tag:tag0,option6:domain-search,openstacklocal\n'
-            'tag:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,'
-            'option6:bootfile-name,pxelinux.0')
-        expected = expected.lstrip()
-
-        dm = self._get_dnsmasq(FakeV6NetworkPxePortWrongOptVersion())
-        dm._output_opts_file()
-
-        self.safe.assert_called_once_with('/foo/opts', expected)
-
-    def test_output_opts_file_ipv6_address_mode_unset(self):
-        fake_v6 = '2001:0200:feed:7ac0::1'
-        expected = (
-            'tag:tag0,option6:dns-server,%s\n'
-            'tag:tag0,option6:domain-search,openstacklocal').lstrip() % (
-                '[' + fake_v6 + ']')
-
-        self._test_output_opts_file(expected, FakeV6Network())
-
-    @property
-    def _test_no_dhcp_domain_alloc_data(self):
-        exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host'
-        exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2,'
-                         '192.168.0.2\n'
-                         '00:00:f3:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--2,'
-                         '[fdca:3ba5:a17a:4ba3::2]\n'
-                         '00:00:0f:aa:bb:cc,host-192-168-0-3,'
-                         '192.168.0.3\n'
-                         '00:00:0f:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--3,'
-                         '[fdca:3ba5:a17a:4ba3::3]\n'
-                         '00:00:0f:rr:rr:rr,host-192-168-0-1,'
-                         '192.168.0.1\n').lstrip()
-        exp_addn_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/addn_hosts'
-        exp_addn_data = (
-            '192.168.0.2\t'
-            'host-192-168-0-2 host-192-168-0-2\n'
-            'fdca:3ba5:a17a:4ba3::2\t'
-            'host-fdca-3ba5-a17a-4ba3--2 '
-            'host-fdca-3ba5-a17a-4ba3--2\n'
-            '192.168.0.3\thost-192-168-0-3 '
-            'host-192-168-0-3\n'
-            'fdca:3ba5:a17a:4ba3::3\t'
-            'host-fdca-3ba5-a17a-4ba3--3 '
-            'host-fdca-3ba5-a17a-4ba3--3\n'
-            '192.168.0.1\t'
-            'host-192-168-0-1 '
-            'host-192-168-0-1\n'
-        ).lstrip()
-        return (exp_host_name, exp_host_data,
-                exp_addn_name, exp_addn_data)
-
-    @property
-    def _test_reload_allocation_data(self):
-        exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host'
-        exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,'
-                         '192.168.0.2\n'
-                         '00:00:f3:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--2.'
-                         'openstacklocal.,[fdca:3ba5:a17a:4ba3::2]\n'
-                         '00:00:0f:aa:bb:cc,host-192-168-0-3.openstacklocal.,'
-                         '192.168.0.3\n'
-                         '00:00:0f:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--3.'
-                         'openstacklocal.,[fdca:3ba5:a17a:4ba3::3]\n'
-                         '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal.,'
-                         '192.168.0.1\n').lstrip()
-        exp_addn_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/addn_hosts'
-        exp_addn_data = (
-            '192.168.0.2\t'
-            'host-192-168-0-2.openstacklocal. host-192-168-0-2\n'
-            'fdca:3ba5:a17a:4ba3::2\t'
-            'host-fdca-3ba5-a17a-4ba3--2.openstacklocal. '
-            'host-fdca-3ba5-a17a-4ba3--2\n'
-            '192.168.0.3\thost-192-168-0-3.openstacklocal. '
-            'host-192-168-0-3\n'
-            'fdca:3ba5:a17a:4ba3::3\t'
-            'host-fdca-3ba5-a17a-4ba3--3.openstacklocal. '
-            'host-fdca-3ba5-a17a-4ba3--3\n'
-            '192.168.0.1\t'
-            'host-192-168-0-1.openstacklocal. '
-            'host-192-168-0-1\n'
-        ).lstrip()
-        exp_opt_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/opts'
-        fake_v6 = '2001:0200:feed:7ac0::1'
-        exp_opt_data = (
-            'tag:tag0,option:dns-server,8.8.8.8\n'
-            'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,249,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag0,option:router,192.168.0.1\n'
-            'tag:tag1,option6:dns-server,%s\n'
-            'tag:tag1,option6:domain-search,openstacklocal').lstrip() % (
-            '[' + fake_v6 + ']')
-        return (exp_host_name, exp_host_data,
-                exp_addn_name, exp_addn_data,
-                exp_opt_name, exp_opt_data,)
-
-    def test_reload_allocations(self):
-        (exp_host_name, exp_host_data,
-         exp_addn_name, exp_addn_data,
-         exp_opt_name, exp_opt_data,) = self._test_reload_allocation_data
-
-        net = FakeDualNetwork()
-        hpath = '/dhcp/%s/host' % net.id
-        ipath = '/dhcp/%s/interface' % net.id
-        self.useFixture(tools.OpenFixture(hpath))
-        self.useFixture(tools.OpenFixture(ipath))
-        test_pm = mock.Mock()
-        dm = self._get_dnsmasq(net, test_pm)
-        dm.reload_allocations()
-        self.assertTrue(test_pm.register.called)
-        self.external_process().enable.assert_called_once_with(
-            reload_cfg=True)
-
-        self.safe.assert_has_calls([
-            mock.call(exp_host_name, exp_host_data),
-            mock.call(exp_addn_name, exp_addn_data),
-            mock.call(exp_opt_name, exp_opt_data),
-        ])
-
-    def test_release_unused_leases(self):
-        dnsmasq = self._get_dnsmasq(FakeDualNetwork())
-
-        ip1 = '192.168.1.2'
-        mac1 = '00:00:80:aa:bb:cc'
-        ip2 = '192.168.1.3'
-        mac2 = '00:00:80:cc:bb:aa'
-
-        old_leases = set([(ip1, mac1, None), (ip2, mac2, None)])
-        dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases)
-        dnsmasq._output_hosts_file = mock.Mock()
-        dnsmasq._release_lease = mock.Mock()
-        dnsmasq.network.ports = []
-        dnsmasq.device_manager.driver.unplug = mock.Mock()
-
-        dnsmasq._release_unused_leases()
-
-        dnsmasq._release_lease.assert_has_calls([mock.call(mac1, ip1, None),
-                                                 mock.call(mac2, ip2, None)],
-                                                any_order=True)
-        dnsmasq.device_manager.driver.unplug.assert_has_calls(
-            [mock.call(dnsmasq.interface_name,
-                       namespace=dnsmasq.network.namespace)])
-
-    def test_release_for_ipv6_lease(self):
-        dnsmasq = self._get_dnsmasq(FakeDualNetwork())
-
-        ip1 = 'fdca:3ba5:a17a::1'
-        mac1 = '00:00:80:aa:bb:cc'
-        ip2 = '192.168.1.3'
-        mac2 = '00:00:80:cc:bb:aa'
-
-        old_leases = set([(ip1, mac1, None), (ip2, mac2, None)])
-        dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases)
-        ipw = mock.patch(
-            'neutron.agent.linux.ip_lib.IpNetnsCommand.execute').start()
-        dnsmasq._release_unused_leases()
-        # Verify that dhcp_release is called only for ipv4 addresses.
-        self.assertEqual(1, ipw.call_count)
-        ipw.assert_has_calls([mock.call(['dhcp_release', None, ip2, mac2],
-                             run_as_root=True)])
-
-    def test_release_unused_leases_with_dhcp_port(self):
-        dnsmasq = self._get_dnsmasq(FakeNetworkDhcpPort())
-        ip1 = '192.168.1.2'
-        mac1 = '00:00:80:aa:bb:cc'
-        ip2 = '192.168.1.3'
-        mac2 = '00:00:80:cc:bb:aa'
-
-        old_leases = set([(ip1, mac1, None), (ip2, mac2, None)])
-        dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases)
-        dnsmasq._output_hosts_file = mock.Mock()
-        dnsmasq._release_lease = mock.Mock()
-        dnsmasq.device_manager.get_device_id = mock.Mock(
-            return_value='fake_dhcp_port')
-        dnsmasq._release_unused_leases()
-        self.assertFalse(
-            dnsmasq.device_manager.driver.unplug.called)
-
-    def test_release_unused_leases_with_client_id(self):
-        dnsmasq = self._get_dnsmasq(FakeDualNetwork())
-
-        ip1 = '192.168.1.2'
-        mac1 = '00:00:80:aa:bb:cc'
-        client_id1 = 'client1'
-        ip2 = '192.168.1.3'
-        mac2 = '00:00:80:cc:bb:aa'
-        client_id2 = 'client2'
-
-        old_leases = set([(ip1, mac1, client_id1), (ip2, mac2, client_id2)])
-        dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases)
-        dnsmasq._output_hosts_file = mock.Mock()
-        dnsmasq._release_lease = mock.Mock()
-        dnsmasq.network.ports = []
-
-        dnsmasq._release_unused_leases()
-
-        dnsmasq._release_lease.assert_has_calls(
-            [mock.call(mac1, ip1, client_id1),
-             mock.call(mac2, ip2, client_id2)],
-            any_order=True)
-
-    def test_release_unused_leases_one_lease(self):
-        dnsmasq = self._get_dnsmasq(FakeDualNetwork())
-
-        ip1 = '192.168.0.2'
-        mac1 = '00:00:80:aa:bb:cc'
-        ip2 = '192.168.0.3'
-        mac2 = '00:00:80:cc:bb:aa'
-
-        old_leases = set([(ip1, mac1, None), (ip2, mac2, None)])
-        dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases)
-        dnsmasq._output_hosts_file = mock.Mock()
-        dnsmasq._release_lease = mock.Mock()
-        dnsmasq.network.ports = [FakePort1()]
-
-        dnsmasq._release_unused_leases()
-
-        dnsmasq._release_lease.assert_called_once_with(
-            mac2, ip2, None)
-
-    def test_release_unused_leases_one_lease_with_client_id(self):
-        dnsmasq = self._get_dnsmasq(FakeDualNetwork())
-
-        ip1 = '192.168.0.2'
-        mac1 = '00:00:80:aa:bb:cc'
-        client_id1 = 'client1'
-        ip2 = '192.168.0.5'
-        mac2 = '00:00:0f:aa:bb:55'
-        client_id2 = 'test5'
-
-        old_leases = set([(ip1, mac1, client_id1), (ip2, mac2, client_id2)])
-        dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases)
-        dnsmasq._output_hosts_file = mock.Mock()
-        dnsmasq._release_lease = mock.Mock()
-        dnsmasq.network.ports = [FakePort5()]
-
-        dnsmasq._release_unused_leases()
-
-        dnsmasq._release_lease.assert_called_once_with(
-            mac1, ip1, client_id1)
-
-    def test_read_hosts_file_leases(self):
-        filename = '/path/to/file'
-        lines = ["00:00:80:aa:bb:cc,inst-name,192.168.0.1",
-                 "00:00:80:aa:bb:cc,inst-name,[fdca:3ba5:a17a::1]"]
-        mock_open = self.useFixture(
-            tools.OpenFixture(filename, '\n'.join(lines))).mock_open
-        dnsmasq = self._get_dnsmasq(FakeDualNetwork())
-        leases = dnsmasq._read_hosts_file_leases(filename)
-
-        self.assertEqual(set([("192.168.0.1", "00:00:80:aa:bb:cc", None),
-                              ("fdca:3ba5:a17a::1", "00:00:80:aa:bb:cc",
-                               None)]), leases)
-        mock_open.assert_called_once_with(filename)
-
-    def test_read_hosts_file_leases_with_client_id(self):
-        filename = '/path/to/file'
-        lines = ["00:00:80:aa:bb:cc,id:client1,inst-name,192.168.0.1",
-                 "00:00:80:aa:bb:cc,id:client2,inst-name,"
-                 "[fdca:3ba5:a17a::1]"]
-        mock_open = self.useFixture(
-            tools.OpenFixture(filename, '\n'.join(lines))).mock_open
-        dnsmasq = self._get_dnsmasq(FakeDualNetwork())
-        leases = dnsmasq._read_hosts_file_leases(filename)
-
-        self.assertEqual(set([("192.168.0.1", "00:00:80:aa:bb:cc", 'client1'),
-                              ("fdca:3ba5:a17a::1", "00:00:80:aa:bb:cc",
-                               'client2')]), leases)
-        mock_open.assert_called_once_with(filename)
-
-    def test_read_hosts_file_leases_with_stateless_IPv6_tag(self):
-        filename = self.get_temp_file_path('leases')
-        with open(filename, "w") as leasesfile:
-            lines = [
-                "00:00:80:aa:bb:cc,id:client1,inst-name,192.168.0.1\n",
-                "00:00:80:aa:bb:cc,set:ccccccccc-cccc-cccc-cccc-cccccccc\n",
-                "00:00:80:aa:bb:cc,id:client2,inst-name,[fdca:3ba5:a17a::1]\n"]
-            for line in lines:
-                leasesfile.write(line)
-
-        dnsmasq = self._get_dnsmasq(FakeDualNetwork())
-        leases = dnsmasq._read_hosts_file_leases(filename)
-
-        self.assertEqual(set([("192.168.0.1", "00:00:80:aa:bb:cc", 'client1'),
-                              ("fdca:3ba5:a17a::1", "00:00:80:aa:bb:cc",
-                              'client2')]), leases)
-
-    def test_make_subnet_interface_ip_map(self):
-        with mock.patch('neutron.agent.linux.ip_lib.IPDevice') as ip_dev:
-            ip_dev.return_value.addr.list.return_value = [
-                {'cidr': '192.168.0.1/24'}
-            ]
-
-            dm = self._get_dnsmasq(FakeDualNetwork())
-
-            self.assertEqual(
-                dm._make_subnet_interface_ip_map(),
-                {FakeV4Subnet.id: '192.168.0.1'}
-            )
-
-    def test_remove_config_files(self):
-        net = FakeV4Network()
-        path = '/opt/data/neutron/dhcp'
-        self.conf.dhcp_confs = path
-        lp = LocalChild(self.conf, net)
-        lp._remove_config_files()
-        self.rmtree.assert_called_once_with(os.path.join(path, net.id),
-                                            ignore_errors=True)
-
-    def test_existing_dhcp_networks(self):
-        path = '/opt/data/neutron/dhcp'
-        self.conf.dhcp_confs = path
-
-        cases = {
-            # network_uuid --> is_dhcp_alive?
-            'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa': True,
-            'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb': False,
-            'not_uuid_like_name': True
-        }
-
-        def active_fake(self, instance, cls):
-            return cases[instance.network.id]
-
-        with mock.patch('os.listdir') as mock_listdir:
-            with mock.patch.object(dhcp.Dnsmasq, 'active') as mock_active:
-                mock_active.__get__ = active_fake
-                mock_listdir.return_value = cases.keys()
-
-                result = dhcp.Dnsmasq.existing_dhcp_networks(self.conf)
-
-                mock_listdir.assert_called_once_with(path)
-                self.assertEqual(['aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
-                                  'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'],
-                                 sorted(result))
-
-    def test__output_hosts_file_log_only_twice(self):
-        dm = self._get_dnsmasq(FakeDualStackNetworkSingleDHCP())
-        with mock.patch.object(dhcp, 'LOG') as logger:
-            logger.process.return_value = ('fake_message', {})
-            dm._output_hosts_file()
-        # The method logs twice, at the start of and the end. There should be
-        # no other logs, no matter how many hosts there are to dump in the
-        # file.
-        self.assertEqual(2, len(logger.method_calls))
-
-    def test_only_populates_dhcp_enabled_subnets(self):
-        exp_host_name = '/dhcp/eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee/host'
-        exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,'
-                         '192.168.0.2\n'
-                         '00:16:3E:C2:77:1D,host-192-168-0-4.openstacklocal.,'
-                         '192.168.0.4\n'
-                         '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal.,'
-                         '192.168.0.1\n').lstrip()
-        dm = self._get_dnsmasq(FakeDualStackNetworkSingleDHCP())
-        dm._output_hosts_file()
-        self.safe.assert_has_calls([mock.call(exp_host_name,
-                                              exp_host_data)])
-
-    def test_only_populates_dhcp_client_id(self):
-        exp_host_name = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/host'
-        exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,'
-                         '192.168.0.2\n'
-                         '00:00:0f:aa:bb:55,id:test5,'
-                         'host-192-168-0-5.openstacklocal.,'
-                         '192.168.0.5\n'
-                         '00:00:0f:aa:bb:66,id:test6,'
-                         'host-192-168-0-6.openstacklocal.,192.168.0.6,'
-                         'set:ccccccccc-cccc-cccc-cccc-ccccccccc\n').lstrip()
-
-        dm = self._get_dnsmasq(FakeV4NetworkClientId)
-        dm._output_hosts_file()
-        self.safe.assert_has_calls([mock.call(exp_host_name,
-                                              exp_host_data)])
-
-    def test_only_populates_dhcp_enabled_subnet_on_a_network(self):
-        exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host'
-        exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,'
-                         '192.168.0.2\n'
-                         '00:00:f3:aa:bb:cc,host-192-168-0-3.openstacklocal.,'
-                         '192.168.0.3\n'
-                         '00:00:0f:aa:bb:cc,host-192-168-0-4.openstacklocal.,'
-                         '192.168.0.4\n'
-                         '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal.,'
-                         '192.168.0.1\n').lstrip()
-        dm = self._get_dnsmasq(FakeDualNetworkSingleDHCP())
-        dm._output_hosts_file()
-        self.safe.assert_has_calls([mock.call(exp_host_name,
-                                              exp_host_data)])
-
-    def test_host_and_opts_file_on_stateless_dhcpv6_network(self):
-        exp_host_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/host'
-        exp_host_data = ('00:16:3e:c2:77:1d,'
-                         'set:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n').lstrip()
-        exp_opt_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/opts'
-        exp_opt_data = ('tag:tag0,option6:domain-search,openstacklocal\n'
-                        'tag:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,'
-                        'option6:dns-server,ffea:3ba5:a17a:4ba3::100').lstrip()
-        dm = self._get_dnsmasq(FakeV6NetworkStatelessDHCP())
-        dm._output_hosts_file()
-        dm._output_opts_file()
-        self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data),
-                                    mock.call(exp_opt_name, exp_opt_data)])
-
-    def test_host_file_on_net_with_v6_slaac_and_v4(self):
-        exp_host_name = '/dhcp/eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee/host'
-        exp_host_data = (
-            '00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,192.168.0.2,'
-            'set:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee\n'
-            '00:16:3E:C2:77:1D,host-192-168-0-4.openstacklocal.,192.168.0.4,'
-            'set:gggggggg-gggg-gggg-gggg-gggggggggggg\n00:00:0f:rr:rr:rr,'
-            'host-192-168-0-1.openstacklocal.,192.168.0.1,'
-            'set:rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr\n').lstrip()
-        dm = self._get_dnsmasq(FakeDualStackNetworkingSingleDHCPTags())
-        dm._output_hosts_file()
-        self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data)])
-
-    def test_host_and_opts_file_on_net_with_V6_stateless_and_V4_subnets(
-                                                                    self):
-        exp_host_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/host'
-        exp_host_data = (
-            '00:16:3e:c2:77:1d,set:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n'
-            '00:16:3e:c2:77:1d,host-192-168-0-3.openstacklocal.,'
-            '192.168.0.3,set:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n'
-            '00:00:0f:rr:rr:rr,'
-            'host-192-168-0-1.openstacklocal.,192.168.0.1\n').lstrip()
-        exp_opt_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/opts'
-        exp_opt_data = (
-            'tag:tag0,option6:domain-search,openstacklocal\n'
-            'tag:tag1,option:dns-server,8.8.8.8\n'
-            'tag:tag1,option:classless-static-route,20.0.0.1/24,20.0.0.1,'
-            '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag1,249,20.0.0.1/24,20.0.0.1,169.254.169.254/32,'
-            '192.168.0.1,0.0.0.0/0,192.168.0.1\n'
-            'tag:tag1,option:router,192.168.0.1\n'
-            'tag:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,'
-            'option6:dns-server,ffea:3ba5:a17a:4ba3::100').lstrip()
-
-        dm = self._get_dnsmasq(FakeNetworkWithV6SatelessAndV4DHCPSubnets())
-        dm._output_hosts_file()
-        dm._output_opts_file()
-        self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data),
-                                    mock.call(exp_opt_name, exp_opt_data)])
-
-    def test_should_enable_metadata_isolated_network_returns_true(self):
-        self.assertTrue(dhcp.Dnsmasq.should_enable_metadata(
-            self.conf, FakeV4NetworkNoRouter()))
-
-    def test_should_enable_metadata_non_isolated_network_returns_false(self):
-        self.assertFalse(dhcp.Dnsmasq.should_enable_metadata(
-            self.conf, FakeV4NetworkDistRouter()))
-
-    def test_should_enable_metadata_isolated_meta_disabled_returns_false(self):
-        self.conf.set_override('enable_isolated_metadata', False)
-        self.assertFalse(dhcp.Dnsmasq.should_enable_metadata(self.conf,
-                                                             mock.ANY))
-
-    def test_should_enable_metadata_with_metadata_network_returns_true(self):
-        self.conf.set_override('enable_metadata_network', True)
-        self.assertTrue(dhcp.Dnsmasq.should_enable_metadata(
-            self.conf, FakeV4MetadataNetwork()))
-
-    def test_should_force_metadata_returns_true(self):
-        self.conf.set_override("force_metadata", True)
-        self.assertTrue(dhcp.Dnsmasq.should_enable_metadata(self.conf,
-                                                            mock.ANY))
-
-    def _test__generate_opts_per_subnet_helper(self, config_opts,
-                                               expected_mdt_ip):
-        for key, value in config_opts.items():
-            self.conf.set_override(key, value)
-        dm = self._get_dnsmasq(FakeNetworkDhcpPort)
-        with mock.patch('neutron.agent.linux.ip_lib.IPDevice') as ipdev_mock:
-            list_addr = ipdev_mock.return_value.addr.list
-            list_addr.return_value = [{'cidr': alloc.ip_address + '/24'}
-                                      for alloc in FakeDhcpPort.fixed_ips]
-            options, idx_map = dm._generate_opts_per_subnet()
-
-        contains_metadata_ip = any(['%s/32' % dhcp.METADATA_DEFAULT_IP in line
-                                    for line in options])
-        self.assertEqual(expected_mdt_ip, contains_metadata_ip)
-
-    def test__generate_opts_per_subnet_no_metadata(self):
-        config = {'enable_isolated_metadata': False,
-                  'force_metadata': False}
-        self._test__generate_opts_per_subnet_helper(config, False)
-
-    def test__generate_opts_per_subnet_isolated_metadata_with_router(self):
-        config = {'enable_isolated_metadata': True,
-                  'force_metadata': False}
-        self._test__generate_opts_per_subnet_helper(config, True)
-
-    def test__generate_opts_per_subnet_forced_metadata(self):
-        config = {'enable_isolated_metadata': False,
-                  'force_metadata': True}
-        self._test__generate_opts_per_subnet_helper(config, True)
-
-
-class TestDeviceManager(TestConfBase):
-    def setUp(self):
-        super(TestDeviceManager, self).setUp()
-        ip_lib_patcher = mock.patch('neutron.agent.linux.dhcp.ip_lib')
-        load_interface_driver_patcher = mock.patch(
-            'neutron.agent.linux.dhcp.agent_common_utils.'
-            'load_interface_driver')
-        self.mock_ip_lib = ip_lib_patcher.start()
-        self.mock_load_interface_driver = load_interface_driver_patcher.start()
-
-    def _test_setup(self, load_interface_driver, ip_lib, use_gateway_ips):
-        # Create DeviceManager.
-        self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata',
-                                           default=False))
-        plugin = mock.Mock()
-        mgr = dhcp.DeviceManager(self.conf, plugin)
-        load_interface_driver.assert_called_with(self.conf)
-
-        # Setup with no existing DHCP port - expect a new DHCP port to
-        # be created.
-        network = FakeDeviceManagerNetwork()
-        network.tenant_id = 'Tenant A'
-
-        def mock_create(dict):
-            port = dhcp.DictModel(dict['port'])
-            port.id = 'abcd-123456789'
-            port.mac_address = '00-12-34-56-78-90'
-            port.fixed_ips = [
-                dhcp.DictModel({'subnet_id': ip['subnet_id'],
-                                'ip_address': 'unique-IP-address'})
-                for ip in port.fixed_ips
-            ]
-            return port
-
-        plugin.create_dhcp_port.side_effect = mock_create
-        mgr.driver.get_device_name.return_value = 'ns-XXX'
-        mgr.driver.use_gateway_ips = use_gateway_ips
-        ip_lib.ensure_device_is_ready.return_value = True
-        mgr.setup(network)
-        plugin.create_dhcp_port.assert_called_with(mock.ANY)
-
-        mgr.driver.init_l3.assert_called_with('ns-XXX',
-                                              mock.ANY,
-                                              namespace='qdhcp-ns')
-        cidrs = set(mgr.driver.init_l3.call_args[0][1])
-        if use_gateway_ips:
-            self.assertEqual(cidrs, set(['%s/%s' % (s.gateway_ip,
-                                                    s.cidr.split('/')[1])
-                                         for s in network.subnets]))
-        else:
-            self.assertEqual(cidrs, set(['unique-IP-address/24',
-                                         'unique-IP-address/64']))
-
-        # Now call setup again.  This time we go through the existing
-        # port code path, and the driver's init_l3 method is called
-        # again.
-        plugin.create_dhcp_port.reset_mock()
-        mgr.driver.init_l3.reset_mock()
-        mgr.setup(network)
-        mgr.driver.init_l3.assert_called_with('ns-XXX',
-                                              mock.ANY,
-                                              namespace='qdhcp-ns')
-        cidrs = set(mgr.driver.init_l3.call_args[0][1])
-        if use_gateway_ips:
-            self.assertEqual(cidrs, set(['%s/%s' % (s.gateway_ip,
-                                                    s.cidr.split('/')[1])
-                                         for s in network.subnets]))
-        else:
-            self.assertEqual(cidrs, set(['unique-IP-address/24',
-                                         'unique-IP-address/64']))
-        self.assertFalse(plugin.create_dhcp_port.called)
-
-    def test_setup_device_manager_dhcp_port_without_gateway_ips(self):
-        self._test_setup(self.mock_load_interface_driver,
-                         self.mock_ip_lib, use_gateway_ips=False)
-
-    def test_setup_device_manager_dhcp_port_with_gateway_ips(self):
-        self._test_setup(self.mock_load_interface_driver,
-                         self.mock_ip_lib, use_gateway_ips=True)
-
-    def test_setup_reserved(self):
-        """Test reserved port case of DeviceManager's DHCP port setup
-        logic.
-        """
-
-        # Create DeviceManager.
-        self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata',
-                                           default=False))
-        plugin = mock.Mock()
-        mgr = dhcp.DeviceManager(self.conf, plugin)
-        self.mock_load_interface_driver.assert_called_with(self.conf)
-
-        # Setup with a reserved DHCP port.
-        network = FakeDualNetworkReserved()
-        network.tenant_id = 'Tenant A'
-        reserved_port = network.ports[-1]
-
-        def mock_update(port_id, dict):
-            port = reserved_port
-            port.network_id = dict['port']['network_id']
-            port.device_id = dict['port']['device_id']
-            return port
-
-        plugin.update_dhcp_port.side_effect = mock_update
-        mgr.driver.get_device_name.return_value = 'ns-XXX'
-        mgr.driver.use_gateway_ips = False
-        self.mock_ip_lib.ensure_device_is_ready.return_value = True
-        mgr.setup(network)
-        plugin.update_dhcp_port.assert_called_with(reserved_port.id, mock.ANY)
-
-        mgr.driver.init_l3.assert_called_with('ns-XXX',
-                                              ['192.168.0.6/24'],
-                                              namespace='qdhcp-ns')
-
-    def test_setup_reserved_2(self):
-        """Test scenario where a network has two reserved ports, and
-        update_dhcp_port fails for the first of those.
-        """
-
-        # Create DeviceManager.
-        self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata',
-                                           default=False))
-        plugin = mock.Mock()
-        mgr = dhcp.DeviceManager(self.conf, plugin)
-        self.mock_load_interface_driver.assert_called_with(self.conf)
-
-        # Setup with a reserved DHCP port.
-        network = FakeDualNetworkReserved2()
-        network.tenant_id = 'Tenant A'
-        reserved_port_1 = network.ports[-2]
-        reserved_port_2 = network.ports[-1]
-
-        def mock_update(port_id, dict):
-            if port_id == reserved_port_1.id:
-                return None
-
-            port = reserved_port_2
-            port.network_id = dict['port']['network_id']
-            port.device_id = dict['port']['device_id']
-            return port
-
-        plugin.update_dhcp_port.side_effect = mock_update
-        mgr.driver.get_device_name.return_value = 'ns-XXX'
-        mgr.driver.use_gateway_ips = False
-        self.mock_ip_lib.ensure_device_is_ready.return_value = True
-        mgr.setup(network)
-        plugin.update_dhcp_port.assert_called_with(reserved_port_2.id,
-                                                   mock.ANY)
-
-        mgr.driver.init_l3.assert_called_with('ns-XXX',
-                                              ['192.168.0.6/24'],
-                                              namespace='qdhcp-ns')
-
-
-class TestDictModel(base.BaseTestCase):
-
-    def test_string_representation_port(self):
-        port = dhcp.DictModel({'id': 'id', 'network_id': 'net_id'})
-        self.assertEqual('id=id, network_id=net_id', str(port))
-
-    def test_string_representation_network(self):
-        net = dhcp.DictModel({'id': 'id', 'name': 'myname'})
-        self.assertEqual('id=id, name=myname', str(net))
diff --git a/neutron/tests/unit/agent/linux/test_external_process.py b/neutron/tests/unit/agent/linux/test_external_process.py
deleted file mode 100644 (file)
index e5ff672..0000000
+++ /dev/null
@@ -1,264 +0,0 @@
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import os.path
-
-from neutron.agent.linux import external_process as ep
-from neutron.common import utils as common_utils
-from neutron.tests import base
-from neutron.tests import tools
-
-
-TEST_UUID = 'test-uuid'
-TEST_SERVICE = 'testsvc'
-TEST_PID = 1234
-
-
-class BaseTestProcessMonitor(base.BaseTestCase):
-
-    def setUp(self):
-        super(BaseTestProcessMonitor, self).setUp()
-        self.log_patch = mock.patch("neutron.agent.linux.external_process."
-                                    "LOG.error")
-        self.error_log = self.log_patch.start()
-
-        self.spawn_patch = mock.patch("eventlet.spawn")
-        self.eventlent_spawn = self.spawn_patch.start()
-
-        # create a default process monitor
-        self.create_child_process_monitor('respawn')
-
-    def create_child_process_monitor(self, action):
-        conf = mock.Mock()
-        conf.AGENT.check_child_processes_action = action
-        conf.AGENT.check_child_processes = True
-        self.pmonitor = ep.ProcessMonitor(
-            config=conf,
-            resource_type='test')
-
-    def get_monitored_process(self, uuid, service=None):
-        monitored_process = mock.Mock()
-        self.pmonitor.register(uuid=uuid,
-                               service_name=service,
-                               monitored_process=monitored_process)
-        return monitored_process
-
-
-class TestProcessMonitor(BaseTestProcessMonitor):
-
-    def test_error_logged(self):
-        pm = self.get_monitored_process(TEST_UUID)
-        pm.active = False
-        self.pmonitor._check_child_processes()
-        self.assertTrue(self.error_log.called)
-
-    def test_exit_handler(self):
-        self.create_child_process_monitor('exit')
-        pm = self.get_monitored_process(TEST_UUID)
-        pm.active = False
-        with mock.patch.object(ep.ProcessMonitor,
-                               '_exit_handler') as exit_handler:
-            self.pmonitor._check_child_processes()
-            exit_handler.assert_called_once_with(TEST_UUID, None)
-
-    def test_register(self):
-        pm = self.get_monitored_process(TEST_UUID)
-        self.assertEqual(len(self.pmonitor._monitored_processes), 1)
-        self.assertIn(pm, self.pmonitor._monitored_processes.values())
-
-    def test_register_same_service_twice(self):
-        self.get_monitored_process(TEST_UUID)
-        self.get_monitored_process(TEST_UUID)
-        self.assertEqual(len(self.pmonitor._monitored_processes), 1)
-
-    def test_register_different_service_types(self):
-        self.get_monitored_process(TEST_UUID)
-        self.get_monitored_process(TEST_UUID, TEST_SERVICE)
-        self.assertEqual(len(self.pmonitor._monitored_processes), 2)
-
-    def test_unregister(self):
-        self.get_monitored_process(TEST_UUID)
-        self.pmonitor.unregister(TEST_UUID, None)
-        self.assertEqual(len(self.pmonitor._monitored_processes), 0)
-
-    def test_unregister_unknown_process(self):
-        self.pmonitor.unregister(TEST_UUID, None)
-        self.assertEqual(len(self.pmonitor._monitored_processes), 0)
-
-
-class TestProcessManager(base.BaseTestCase):
-    def setUp(self):
-        super(TestProcessManager, self).setUp()
-        self.execute_p = mock.patch('neutron.agent.common.utils.execute')
-        self.execute = self.execute_p.start()
-        self.delete_if_exists = mock.patch(
-            'oslo_utils.fileutils.delete_if_exists').start()
-        self.ensure_dir = mock.patch.object(
-            common_utils, 'ensure_dir').start()
-
-        self.conf = mock.Mock()
-        self.conf.external_pids = '/var/path'
-
-    def test_processmanager_ensures_pid_dir(self):
-        pid_file = os.path.join(self.conf.external_pids, 'pid')
-        ep.ProcessManager(self.conf, 'uuid', pid_file=pid_file)
-        self.ensure_dir.assert_called_once_with(self.conf.external_pids)
-
-    def test_enable_no_namespace(self):
-        callback = mock.Mock()
-        callback.return_value = ['the', 'cmd']
-
-        with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name:
-            name.return_value = 'pidfile'
-            with mock.patch.object(ep.ProcessManager, 'active') as active:
-                active.__get__ = mock.Mock(return_value=False)
-
-                manager = ep.ProcessManager(self.conf, 'uuid')
-                manager.enable(callback)
-                callback.assert_called_once_with('pidfile')
-                self.execute.assert_called_once_with(['the', 'cmd'],
-                                                     check_exit_code=True,
-                                                     extra_ok_codes=None,
-                                                     run_as_root=False,
-                                                     log_fail_as_error=True)
-
-    def test_enable_with_namespace(self):
-        callback = mock.Mock()
-        callback.return_value = ['the', 'cmd']
-
-        with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name:
-            name.return_value = 'pidfile'
-            with mock.patch.object(ep.ProcessManager, 'active') as active:
-                active.__get__ = mock.Mock(return_value=False)
-
-                manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
-                with mock.patch.object(ep, 'ip_lib') as ip_lib:
-                    manager.enable(callback)
-                    callback.assert_called_once_with('pidfile')
-                    ip_lib.assert_has_calls([
-                        mock.call.IPWrapper(namespace='ns'),
-                        mock.call.IPWrapper().netns.execute(
-                            ['the', 'cmd'], addl_env=None, run_as_root=False)])
-
-    def test_enable_with_namespace_process_active(self):
-        callback = mock.Mock()
-        callback.return_value = ['the', 'cmd']
-
-        with mock.patch.object(ep.ProcessManager, 'active') as active:
-            active.__get__ = mock.Mock(return_value=True)
-
-            manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
-            with mock.patch.object(ep, 'ip_lib'):
-                manager.enable(callback)
-                self.assertFalse(callback.called)
-
-    def test_disable_no_namespace(self):
-        with mock.patch.object(ep.ProcessManager, 'pid') as pid:
-            pid.__get__ = mock.Mock(return_value=4)
-            with mock.patch.object(ep.ProcessManager, 'active') as active:
-                active.__get__ = mock.Mock(return_value=True)
-                manager = ep.ProcessManager(self.conf, 'uuid')
-
-                with mock.patch.object(ep, 'utils') as utils:
-                    manager.disable()
-                    utils.assert_has_calls([
-                        mock.call.execute(['kill', '-9', 4],
-                                          run_as_root=True)])
-
-    def test_disable_namespace(self):
-        with mock.patch.object(ep.ProcessManager, 'pid') as pid:
-            pid.__get__ = mock.Mock(return_value=4)
-            with mock.patch.object(ep.ProcessManager, 'active') as active:
-                active.__get__ = mock.Mock(return_value=True)
-
-                manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
-
-                with mock.patch.object(ep, 'utils') as utils:
-                    manager.disable()
-                    utils.assert_has_calls([
-                        mock.call.execute(['kill', '-9', 4],
-                                          run_as_root=True)])
-
-    def test_disable_not_active(self):
-        with mock.patch.object(ep.ProcessManager, 'pid') as pid:
-            pid.__get__ = mock.Mock(return_value=4)
-            with mock.patch.object(ep.ProcessManager, 'active') as active:
-                active.__get__ = mock.Mock(return_value=False)
-                with mock.patch.object(ep.LOG, 'debug') as debug:
-                    manager = ep.ProcessManager(self.conf, 'uuid')
-                    manager.disable()
-                    debug.assert_called_once_with(mock.ANY, mock.ANY)
-
-    def test_disable_no_pid(self):
-        with mock.patch.object(ep.ProcessManager, 'pid') as pid:
-            pid.__get__ = mock.Mock(return_value=None)
-            with mock.patch.object(ep.ProcessManager, 'active') as active:
-                active.__get__ = mock.Mock(return_value=False)
-                with mock.patch.object(ep.LOG, 'debug') as debug:
-                    manager = ep.ProcessManager(self.conf, 'uuid')
-                    manager.disable()
-                    debug.assert_called_once_with(mock.ANY, mock.ANY)
-
-    def test_get_pid_file_name_default(self):
-        manager = ep.ProcessManager(self.conf, 'uuid')
-        retval = manager.get_pid_file_name()
-        self.assertEqual(retval, '/var/path/uuid.pid')
-
-    def test_pid(self):
-        self.useFixture(tools.OpenFixture('/var/path/uuid.pid', '5'))
-        manager = ep.ProcessManager(self.conf, 'uuid')
-        self.assertEqual(manager.pid, 5)
-
-    def test_pid_no_an_int(self):
-        self.useFixture(tools.OpenFixture('/var/path/uuid.pid', 'foo'))
-        manager = ep.ProcessManager(self.conf, 'uuid')
-        self.assertIsNone(manager.pid)
-
-    def test_pid_invalid_file(self):
-        with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name:
-            name.return_value = '.doesnotexist/pid'
-            manager = ep.ProcessManager(self.conf, 'uuid')
-            self.assertIsNone(manager.pid)
-
-    def test_active(self):
-        mock_open = self.useFixture(
-            tools.OpenFixture('/proc/4/cmdline', 'python foo --router_id=uuid')
-        ).mock_open
-        with mock.patch.object(ep.ProcessManager, 'pid') as pid:
-            pid.__get__ = mock.Mock(return_value=4)
-            manager = ep.ProcessManager(self.conf, 'uuid')
-            self.assertTrue(manager.active)
-
-        mock_open.assert_called_once_with('/proc/4/cmdline', 'r')
-
-    def test_active_none(self):
-        dummy_cmd_line = 'python foo --router_id=uuid'
-        self.execute.return_value = dummy_cmd_line
-        with mock.patch.object(ep.ProcessManager, 'pid') as pid:
-            pid.__get__ = mock.Mock(return_value=None)
-            manager = ep.ProcessManager(self.conf, 'uuid')
-            self.assertFalse(manager.active)
-
-    def test_active_cmd_mismatch(self):
-        mock_open = self.useFixture(
-            tools.OpenFixture('/proc/4/cmdline',
-                              'python foo --router_id=anotherid')
-        ).mock_open
-        with mock.patch.object(ep.ProcessManager, 'pid') as pid:
-            pid.__get__ = mock.Mock(return_value=4)
-            manager = ep.ProcessManager(self.conf, 'uuid')
-            self.assertFalse(manager.active)
-
-        mock_open.assert_called_once_with('/proc/4/cmdline', 'r')
diff --git a/neutron/tests/unit/agent/linux/test_interface.py b/neutron/tests/unit/agent/linux/test_interface.py
deleted file mode 100644 (file)
index ce008cf..0000000
+++ /dev/null
@@ -1,655 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import testtools
-
-from neutron.agent.common import config
-from neutron.agent.common import ovs_lib
-from neutron.agent.linux import interface
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import constants
-from neutron.tests import base
-
-
-class BaseChild(interface.LinuxInterfaceDriver):
-    def plug_new(*args):
-        pass
-
-    def unplug(*args):
-        pass
-
-
-class FakeNetwork(object):
-    id = '12345678-1234-5678-90ab-ba0987654321'
-
-
-class FakeSubnet(object):
-    cidr = '192.168.1.1/24'
-
-
-class FakeAllocation(object):
-    subnet = FakeSubnet()
-    ip_address = '192.168.1.2'
-    ip_version = 4
-
-
-class FakePort(object):
-    id = 'abcdef01-1234-5678-90ab-ba0987654321'
-    fixed_ips = [FakeAllocation]
-    device_id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-    network = FakeNetwork()
-    network_id = network.id
-
-
-class TestBase(base.BaseTestCase):
-    def setUp(self):
-        super(TestBase, self).setUp()
-        self.conf = config.setup_conf()
-        self.conf.register_opts(interface.OPTS)
-        self.ip_dev_p = mock.patch.object(ip_lib, 'IPDevice')
-        self.ip_dev = self.ip_dev_p.start()
-        self.ip_p = mock.patch.object(ip_lib, 'IPWrapper')
-        self.ip = self.ip_p.start()
-        self.device_exists_p = mock.patch.object(ip_lib, 'device_exists')
-        self.device_exists = self.device_exists_p.start()
-
-
-class TestABCDriver(TestBase):
-    def setUp(self):
-        super(TestABCDriver, self).setUp()
-        mock_link_addr = mock.PropertyMock(return_value='aa:bb:cc:dd:ee:ff')
-        type(self.ip_dev().link).address = mock_link_addr
-
-    def test_get_device_name(self):
-        bc = BaseChild(self.conf)
-        device_name = bc.get_device_name(FakePort())
-        self.assertEqual('tapabcdef01-12', device_name)
-
-    def test_init_router_port(self):
-        addresses = [dict(scope='global',
-                          dynamic=False, cidr='172.16.77.240/24')]
-        self.ip_dev().addr.list = mock.Mock(return_value=addresses)
-        self.ip_dev().route.list_onlink_routes.return_value = []
-
-        bc = BaseChild(self.conf)
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        bc.init_router_port('tap0', ['192.168.1.2/24'], namespace=ns,
-                            extra_subnets=[{'cidr': '172.20.0.0/24'}])
-        self.ip_dev.assert_has_calls(
-            [mock.call('tap0', namespace=ns),
-             mock.call().addr.list(filters=['permanent']),
-             mock.call().addr.add('192.168.1.2/24'),
-             mock.call().addr.delete('172.16.77.240/24'),
-             mock.call('tap0', namespace=ns),
-             mock.call().route.list_onlink_routes(constants.IP_VERSION_4),
-             mock.call().route.list_onlink_routes(constants.IP_VERSION_6),
-             mock.call().route.add_onlink_route('172.20.0.0/24')])
-
-    def test_init_router_port_delete_onlink_routes(self):
-        addresses = [dict(scope='global',
-                          dynamic=False, cidr='172.16.77.240/24')]
-        self.ip_dev().addr.list = mock.Mock(return_value=addresses)
-        self.ip_dev().route.list_onlink_routes.return_value = [
-            {'cidr': '172.20.0.0/24'}]
-
-        bc = BaseChild(self.conf)
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        bc.init_router_port('tap0', ['192.168.1.2/24'], namespace=ns)
-        self.ip_dev.assert_has_calls(
-            [mock.call().route.list_onlink_routes(constants.IP_VERSION_4),
-             mock.call().route.list_onlink_routes(constants.IP_VERSION_6),
-             mock.call().route.delete_onlink_route('172.20.0.0/24')])
-
-    def test_l3_init_with_preserve(self):
-        addresses = [dict(scope='global',
-                          dynamic=False, cidr='192.168.1.3/32')]
-        self.ip_dev().addr.list = mock.Mock(return_value=addresses)
-
-        bc = BaseChild(self.conf)
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns,
-                   preserve_ips=['192.168.1.3/32'])
-        self.ip_dev.assert_has_calls(
-            [mock.call('tap0', namespace=ns),
-             mock.call().addr.list(filters=['permanent']),
-             mock.call().addr.add('192.168.1.2/24')])
-        self.assertFalse(self.ip_dev().addr.delete.called)
-        self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called)
-
-    def _test_l3_init_clean_connections(self, clean_connections):
-        addresses = [
-            dict(scope='global', dynamic=False, cidr='10.0.0.1/24'),
-            dict(scope='global', dynamic=False, cidr='10.0.0.3/32')]
-        self.ip_dev().addr.list = mock.Mock(return_value=addresses)
-
-        bc = BaseChild(self.conf)
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        bc.init_l3('tap0', ['10.0.0.1/24'], namespace=ns,
-                   clean_connections=clean_connections)
-
-        delete = self.ip_dev().delete_addr_and_conntrack_state
-        if clean_connections:
-            delete.assert_called_once_with('10.0.0.3/32')
-        else:
-            self.assertFalse(delete.called)
-
-    def test_l3_init_with_clean_connections(self):
-        self._test_l3_init_clean_connections(True)
-
-    def test_l3_init_without_clean_connections(self):
-        self._test_l3_init_clean_connections(False)
-
-    def test_init_router_port_ipv6_with_gw_ip(self):
-        addresses = [dict(scope='global',
-                          dynamic=False,
-                          cidr='2001:db8:a::123/64')]
-        self.ip_dev().addr.list = mock.Mock(return_value=addresses)
-        self.ip_dev().route.list_onlink_routes.return_value = []
-
-        bc = BaseChild(self.conf)
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        new_cidr = '2001:db8:a::124/64'
-        kwargs = {'namespace': ns,
-                  'extra_subnets': [{'cidr': '2001:db8:b::/64'}]}
-        bc.init_router_port('tap0', [new_cidr], **kwargs)
-        expected_calls = (
-            [mock.call('tap0', namespace=ns),
-             mock.call().addr.list(filters=['permanent']),
-             mock.call().addr.add('2001:db8:a::124/64'),
-             mock.call().addr.delete('2001:db8:a::123/64')])
-        expected_calls += (
-             [mock.call('tap0', namespace=ns),
-              mock.call().route.list_onlink_routes(constants.IP_VERSION_4),
-              mock.call().route.list_onlink_routes(constants.IP_VERSION_6),
-              mock.call().route.add_onlink_route('2001:db8:b::/64')])
-        self.ip_dev.assert_has_calls(expected_calls)
-
-    def test_init_router_port_ext_gw_with_dual_stack(self):
-        old_addrs = [dict(ip_version=4, scope='global',
-                          dynamic=False, cidr='172.16.77.240/24'),
-                     dict(ip_version=6, scope='global',
-                          dynamic=False, cidr='2001:db8:a::123/64')]
-        self.ip_dev().addr.list = mock.Mock(return_value=old_addrs)
-        self.ip_dev().route.list_onlink_routes.return_value = []
-        bc = BaseChild(self.conf)
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        new_cidrs = ['192.168.1.2/24', '2001:db8:a::124/64']
-        bc.init_router_port('tap0', new_cidrs, namespace=ns,
-            extra_subnets=[{'cidr': '172.20.0.0/24'}])
-        self.ip_dev.assert_has_calls(
-            [mock.call('tap0', namespace=ns),
-             mock.call().addr.list(filters=['permanent']),
-             mock.call().addr.add('192.168.1.2/24'),
-             mock.call().addr.add('2001:db8:a::124/64'),
-             mock.call().addr.delete('172.16.77.240/24'),
-             mock.call().addr.delete('2001:db8:a::123/64'),
-             mock.call().route.list_onlink_routes(constants.IP_VERSION_4),
-             mock.call().route.list_onlink_routes(constants.IP_VERSION_6),
-             mock.call().route.add_onlink_route('172.20.0.0/24')],
-            any_order=True)
-
-    def test_init_router_port_with_ipv6_delete_onlink_routes(self):
-        addresses = [dict(scope='global',
-                          dynamic=False, cidr='2001:db8:a::123/64')]
-        route = '2001:db8:a::/64'
-        self.ip_dev().addr.list = mock.Mock(return_value=addresses)
-        self.ip_dev().route.list_onlink_routes.return_value = [{'cidr': route}]
-
-        bc = BaseChild(self.conf)
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        bc.init_router_port('tap0', ['2001:db8:a::124/64'], namespace=ns)
-        self.ip_dev.assert_has_calls(
-            [mock.call().route.list_onlink_routes(constants.IP_VERSION_4),
-             mock.call().route.list_onlink_routes(constants.IP_VERSION_6),
-             mock.call().route.delete_onlink_route(route)])
-
-    def test_l3_init_with_duplicated_ipv6(self):
-        addresses = [dict(scope='global',
-                          dynamic=False,
-                          cidr='2001:db8:a::123/64')]
-        self.ip_dev().addr.list = mock.Mock(return_value=addresses)
-        bc = BaseChild(self.conf)
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        bc.init_l3('tap0', ['2001:db8:a::123/64'], namespace=ns)
-        self.assertFalse(self.ip_dev().addr.add.called)
-
-    def test_l3_init_with_duplicated_ipv6_uncompact(self):
-        addresses = [dict(scope='global',
-                          dynamic=False,
-                          cidr='2001:db8:a::123/64')]
-        self.ip_dev().addr.list = mock.Mock(return_value=addresses)
-        bc = BaseChild(self.conf)
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        bc.init_l3('tap0',
-                   ['2001:db8:a:0000:0000:0000:0000:0123/64'],
-                   namespace=ns)
-        self.assertFalse(self.ip_dev().addr.add.called)
-
-    def test_add_ipv6_addr(self):
-        device_name = 'tap0'
-        cidr = '2001:db8::/64'
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        bc = BaseChild(self.conf)
-
-        bc.add_ipv6_addr(device_name, cidr, ns)
-
-        self.ip_dev.assert_has_calls(
-            [mock.call(device_name, namespace=ns),
-             mock.call().addr.add(cidr, 'global')])
-
-    def test_delete_ipv6_addr(self):
-        device_name = 'tap0'
-        cidr = '2001:db8::/64'
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        bc = BaseChild(self.conf)
-
-        bc.delete_ipv6_addr(device_name, cidr, ns)
-
-        self.ip_dev.assert_has_calls(
-            [mock.call(device_name, namespace=ns),
-             mock.call().delete_addr_and_conntrack_state(cidr)])
-
-    def test_delete_ipv6_addr_with_prefix(self):
-        device_name = 'tap0'
-        prefix = '2001:db8::/48'
-        in_cidr = '2001:db8::/64'
-        out_cidr = '2001:db7::/64'
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        in_addresses = [dict(scope='global',
-                        dynamic=False,
-                        cidr=in_cidr)]
-        out_addresses = [dict(scope='global',
-                         dynamic=False,
-                         cidr=out_cidr)]
-        # Initially set the address list to be empty
-        self.ip_dev().addr.list = mock.Mock(return_value=[])
-
-        bc = BaseChild(self.conf)
-
-        # Call delete_v6addr_with_prefix when the address list is empty
-        bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns)
-        # Assert that delete isn't called
-        self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called)
-
-        # Set the address list to contain only an address outside of the range
-        # of the given prefix
-        self.ip_dev().addr.list = mock.Mock(return_value=out_addresses)
-        bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns)
-        # Assert that delete isn't called
-        self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called)
-
-        # Set the address list to contain only an address inside of the range
-        # of the given prefix
-        self.ip_dev().addr.list = mock.Mock(return_value=in_addresses)
-        bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns)
-        # Assert that delete is called
-        self.ip_dev.assert_has_calls(
-            [mock.call(device_name, namespace=ns),
-             mock.call().addr.list(scope='global', filters=['permanent']),
-             mock.call().delete_addr_and_conntrack_state(in_cidr)])
-
-    def test_get_ipv6_llas(self):
-        ns = '12345678-1234-5678-90ab-ba0987654321'
-        addresses = [dict(scope='link',
-                          dynamic=False,
-                          cidr='fe80:cafe::/64')]
-        self.ip_dev().addr.list = mock.Mock(return_value=addresses)
-        device_name = self.ip_dev().name
-        bc = BaseChild(self.conf)
-
-        llas = bc.get_ipv6_llas(device_name, ns)
-
-        self.assertEqual(addresses, llas)
-        self.ip_dev.assert_has_calls(
-            [mock.call(device_name, namespace=ns),
-             mock.call().addr.list(scope='link', ip_version=6)])
-
-
-class TestOVSInterfaceDriver(TestBase):
-
-    def test_get_device_name(self):
-        br = interface.OVSInterfaceDriver(self.conf)
-        device_name = br.get_device_name(FakePort())
-        self.assertEqual('tapabcdef01-12', device_name)
-
-    def test_plug_no_ns(self):
-        self._test_plug()
-
-    def test_plug_with_ns(self):
-        self._test_plug(namespace='01234567-1234-1234-99')
-
-    def test_plug_alt_bridge(self):
-        self._test_plug(bridge='br-foo')
-
-    def test_plug_configured_bridge(self):
-        br = 'br-v'
-        self.conf.set_override('ovs_use_veth', False)
-        self.conf.set_override('ovs_integration_bridge', br)
-        self.assertEqual(self.conf.ovs_integration_bridge, br)
-
-        def device_exists(dev, namespace=None):
-            return dev == br
-
-        ovs = interface.OVSInterfaceDriver(self.conf)
-        with mock.patch.object(ovs, '_ovs_add_port') as add_port:
-            self.device_exists.side_effect = device_exists
-            ovs.plug('01234567-1234-1234-99',
-                     'port-1234',
-                     'tap0',
-                     'aa:bb:cc:dd:ee:ff',
-                     bridge=None,
-                     namespace=None)
-
-        add_port.assert_called_once_with('br-v',
-                                         'tap0',
-                                         'port-1234',
-                                         'aa:bb:cc:dd:ee:ff',
-                                         internal=True)
-
-    def _test_plug(self, additional_expectation=None, bridge=None,
-                   namespace=None):
-        additional_expectation = additional_expectation or []
-        if not bridge:
-            bridge = 'br-int'
-
-        def device_exists(dev, namespace=None):
-            return dev == bridge
-
-        with mock.patch.object(ovs_lib.OVSBridge, 'replace_port') as replace:
-            ovs = interface.OVSInterfaceDriver(self.conf)
-            self.device_exists.side_effect = device_exists
-            ovs.plug('01234567-1234-1234-99',
-                     'port-1234',
-                     'tap0',
-                     'aa:bb:cc:dd:ee:ff',
-                     bridge=bridge,
-                     namespace=namespace)
-            replace.assert_called_once_with(
-                'tap0',
-                ('type', 'internal'),
-                ('external_ids', {
-                    'iface-id': 'port-1234',
-                    'iface-status': 'active',
-                    'attached-mac': 'aa:bb:cc:dd:ee:ff'}))
-
-        expected = [mock.call(),
-                    mock.call().device('tap0'),
-                    mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')]
-        expected.extend(additional_expectation)
-        if namespace:
-            expected.extend(
-                [mock.call().ensure_namespace(namespace),
-                 mock.call().ensure_namespace().add_device_to_namespace(
-                     mock.ANY)])
-        expected.extend([mock.call().device().link.set_up()])
-
-        self.ip.assert_has_calls(expected)
-
-    def test_mtu_int(self):
-        self.assertIsNone(self.conf.network_device_mtu)
-        self.conf.set_override('network_device_mtu', 9000)
-        self.assertEqual(self.conf.network_device_mtu, 9000)
-
-    def test_validate_min_ipv6_mtu(self):
-        self.conf.set_override('network_device_mtu', 1200)
-        with mock.patch('neutron.common.ipv6_utils.is_enabled') as ipv6_status:
-            with testtools.ExpectedException(SystemExit):
-                ipv6_status.return_value = True
-                BaseChild(self.conf)
-
-    def test_plug_mtu(self):
-        self.conf.set_override('network_device_mtu', 9000)
-        self._test_plug([mock.call().device().link.set_mtu(9000)])
-
-    def test_unplug(self, bridge=None):
-        if not bridge:
-            bridge = 'br-int'
-        with mock.patch('neutron.agent.common.ovs_lib.OVSBridge') as ovs_br:
-            ovs = interface.OVSInterfaceDriver(self.conf)
-            ovs.unplug('tap0')
-            ovs_br.assert_has_calls([mock.call(bridge),
-                                     mock.call().delete_port('tap0')])
-
-
-class TestOVSInterfaceDriverWithVeth(TestOVSInterfaceDriver):
-
-    def setUp(self):
-        super(TestOVSInterfaceDriverWithVeth, self).setUp()
-        self.conf.set_override('ovs_use_veth', True)
-
-    def test_get_device_name(self):
-        br = interface.OVSInterfaceDriver(self.conf)
-        device_name = br.get_device_name(FakePort())
-        self.assertEqual('ns-abcdef01-12', device_name)
-
-    def test_plug_with_prefix(self):
-        self._test_plug(devname='qr-0', prefix='qr-')
-
-    def _test_plug(self, devname=None, bridge=None, namespace=None,
-                   prefix=None, mtu=None):
-
-        if not devname:
-            devname = 'ns-0'
-        if not bridge:
-            bridge = 'br-int'
-
-        def device_exists(dev, namespace=None):
-            return dev == bridge
-
-        ovs = interface.OVSInterfaceDriver(self.conf)
-        self.device_exists.side_effect = device_exists
-
-        root_dev = mock.Mock()
-        ns_dev = mock.Mock()
-        self.ip().add_veth = mock.Mock(return_value=(root_dev, ns_dev))
-        expected = [mock.call(),
-                    mock.call().add_veth('tap0', devname,
-                                         namespace2=namespace)]
-
-        with mock.patch.object(ovs_lib.OVSBridge, 'replace_port') as replace:
-            ovs.plug('01234567-1234-1234-99',
-                     'port-1234',
-                     devname,
-                     'aa:bb:cc:dd:ee:ff',
-                     bridge=bridge,
-                     namespace=namespace,
-                     prefix=prefix)
-            replace.assert_called_once_with(
-                'tap0',
-                ('external_ids', {
-                    'iface-id': 'port-1234',
-                    'iface-status': 'active',
-                    'attached-mac': 'aa:bb:cc:dd:ee:ff'}))
-
-        ns_dev.assert_has_calls(
-            [mock.call.link.set_address('aa:bb:cc:dd:ee:ff')])
-        if mtu:
-            ns_dev.assert_has_calls([mock.call.link.set_mtu(mtu)])
-            root_dev.assert_has_calls([mock.call.link.set_mtu(mtu)])
-
-        self.ip.assert_has_calls(expected)
-        root_dev.assert_has_calls([mock.call.link.set_up()])
-        ns_dev.assert_has_calls([mock.call.link.set_up()])
-
-    def test_plug_mtu(self):
-        self.conf.set_override('network_device_mtu', 9000)
-        self._test_plug(mtu=9000)
-
-    def test_unplug(self, bridge=None):
-        if not bridge:
-            bridge = 'br-int'
-        with mock.patch('neutron.agent.common.ovs_lib.OVSBridge') as ovs_br:
-            ovs = interface.OVSInterfaceDriver(self.conf)
-            ovs.unplug('ns-0', bridge=bridge)
-            ovs_br.assert_has_calls([mock.call(bridge),
-                                     mock.call().delete_port('tap0')])
-        self.ip_dev.assert_has_calls([mock.call('ns-0', namespace=None),
-                                      mock.call().link.delete()])
-
-
-class TestBridgeInterfaceDriver(TestBase):
-    def test_get_device_name(self):
-        br = interface.BridgeInterfaceDriver(self.conf)
-        device_name = br.get_device_name(FakePort())
-        self.assertEqual('ns-abcdef01-12', device_name)
-
-    def test_plug_no_ns(self):
-        self._test_plug()
-
-    def test_plug_with_ns(self):
-        self._test_plug(namespace='01234567-1234-1234-99')
-
-    def _test_plug(self, namespace=None, mtu=None):
-        def device_exists(device, namespace=None):
-            return device.startswith('brq')
-
-        root_veth = mock.Mock()
-        ns_veth = mock.Mock()
-
-        self.ip().add_veth = mock.Mock(return_value=(root_veth, ns_veth))
-
-        self.device_exists.side_effect = device_exists
-        br = interface.BridgeInterfaceDriver(self.conf)
-        mac_address = 'aa:bb:cc:dd:ee:ff'
-        br.plug('01234567-1234-1234-99',
-                'port-1234',
-                'ns-0',
-                mac_address,
-                namespace=namespace)
-
-        ip_calls = [mock.call(),
-                    mock.call().add_veth('tap0', 'ns-0', namespace2=namespace)]
-        ns_veth.assert_has_calls([mock.call.link.set_address(mac_address)])
-        if mtu:
-            ns_veth.assert_has_calls([mock.call.link.set_mtu(mtu)])
-            root_veth.assert_has_calls([mock.call.link.set_mtu(mtu)])
-
-        self.ip.assert_has_calls(ip_calls)
-
-        root_veth.assert_has_calls([mock.call.link.set_up()])
-        ns_veth.assert_has_calls([mock.call.link.set_up()])
-
-    def test_plug_dev_exists(self):
-        self.device_exists.return_value = True
-        with mock.patch('neutron.agent.linux.interface.LOG.info') as log:
-            br = interface.BridgeInterfaceDriver(self.conf)
-            br.plug('01234567-1234-1234-99',
-                    'port-1234',
-                    'tap0',
-                    'aa:bb:cc:dd:ee:ff')
-            self.assertFalse(self.ip_dev.called)
-            self.assertEqual(log.call_count, 1)
-
-    def test_plug_mtu(self):
-        self.device_exists.return_value = False
-        self.conf.set_override('network_device_mtu', 9000)
-        self._test_plug(mtu=9000)
-
-    def test_unplug_no_device(self):
-        self.device_exists.return_value = False
-        self.ip_dev().link.delete.side_effect = RuntimeError
-        with mock.patch('neutron.agent.linux.interface.LOG') as log:
-            br = interface.BridgeInterfaceDriver(self.conf)
-            br.unplug('tap0')
-            [mock.call(), mock.call('tap0'), mock.call().link.delete()]
-            self.assertEqual(log.error.call_count, 1)
-
-    def test_unplug(self):
-        self.device_exists.return_value = True
-        with mock.patch('neutron.agent.linux.interface.LOG.debug') as log:
-            br = interface.BridgeInterfaceDriver(self.conf)
-            br.unplug('tap0')
-            self.assertEqual(log.call_count, 1)
-
-        self.ip_dev.assert_has_calls([mock.call('tap0', namespace=None),
-                                      mock.call().link.delete()])
-
-
-class TestIVSInterfaceDriver(TestBase):
-
-    def setUp(self):
-        super(TestIVSInterfaceDriver, self).setUp()
-
-    def test_get_device_name(self):
-        br = interface.IVSInterfaceDriver(self.conf)
-        device_name = br.get_device_name(FakePort())
-        self.assertEqual('ns-abcdef01-12', device_name)
-
-    def test_plug_with_prefix(self):
-        self._test_plug(devname='qr-0', prefix='qr-')
-
-    def _test_plug(self, devname=None, namespace=None,
-                   prefix=None, mtu=None):
-
-        if not devname:
-            devname = 'ns-0'
-
-        def device_exists(dev, namespace=None):
-            return dev == 'indigo'
-
-        ivs = interface.IVSInterfaceDriver(self.conf)
-        self.device_exists.side_effect = device_exists
-
-        root_dev = mock.Mock()
-        _ns_dev = mock.Mock()
-        ns_dev = mock.Mock()
-        self.ip().add_veth = mock.Mock(return_value=(root_dev, _ns_dev))
-        self.ip().device = mock.Mock(return_value=(ns_dev))
-        expected = [mock.call(), mock.call().add_veth('tap0', devname),
-                    mock.call().device(devname)]
-
-        ivsctl_cmd = ['ivs-ctl', 'add-port', 'tap0']
-
-        with mock.patch.object(utils, 'execute') as execute:
-            ivs.plug('01234567-1234-1234-99',
-                     'port-1234',
-                     devname,
-                     'aa:bb:cc:dd:ee:ff',
-                     namespace=namespace,
-                     prefix=prefix)
-            execute.assert_called_once_with(ivsctl_cmd, run_as_root=True)
-
-        ns_dev.assert_has_calls(
-            [mock.call.link.set_address('aa:bb:cc:dd:ee:ff')])
-        if mtu:
-            ns_dev.assert_has_calls([mock.call.link.set_mtu(mtu)])
-            root_dev.assert_has_calls([mock.call.link.set_mtu(mtu)])
-        if namespace:
-            expected.extend(
-                [mock.call().ensure_namespace(namespace),
-                 mock.call().ensure_namespace().add_device_to_namespace(
-                     mock.ANY)])
-
-        self.ip.assert_has_calls(expected)
-        root_dev.assert_has_calls([mock.call.link.set_up()])
-        ns_dev.assert_has_calls([mock.call.link.set_up()])
-
-    def test_plug_mtu(self):
-        self.conf.set_override('network_device_mtu', 9000)
-        self._test_plug(mtu=9000)
-
-    def test_plug_namespace(self):
-        self._test_plug(namespace='mynamespace')
-
-    def test_unplug(self):
-        ivs = interface.IVSInterfaceDriver(self.conf)
-        ivsctl_cmd = ['ivs-ctl', 'del-port', 'tap0']
-        with mock.patch.object(utils, 'execute') as execute:
-            ivs.unplug('ns-0')
-            execute.assert_called_once_with(ivsctl_cmd, run_as_root=True)
-            self.ip_dev.assert_has_calls([mock.call('ns-0', namespace=None),
-                                          mock.call().link.delete()])
diff --git a/neutron/tests/unit/agent/linux/test_ip_lib.py b/neutron/tests/unit/agent/linux/test_ip_lib.py
deleted file mode 100644 (file)
index bde8843..0000000
+++ /dev/null
@@ -1,1347 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import netaddr
-from oslo_config import cfg
-import testtools
-
-from neutron.agent.common import utils  # noqa
-from neutron.agent.linux import ip_lib
-from neutron.common import exceptions
-from neutron.tests import base
-
-NETNS_SAMPLE = [
-    '12345678-1234-5678-abcd-1234567890ab',
-    'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
-    'cccccccc-cccc-cccc-cccc-cccccccccccc']
-
-NETNS_SAMPLE_IPROUTE2_4 = [
-    '12345678-1234-5678-abcd-1234567890ab (id: 1)',
-    'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb (id: 0)',
-    'cccccccc-cccc-cccc-cccc-cccccccccccc (id: 2)']
-
-LINK_SAMPLE = [
-    '1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \\'
-    'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0',
-    '2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP '
-    'qlen 1000\    link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff'
-    '\    alias openvswitch',
-    '3: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN '
-    '\    link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff promiscuity 0',
-    '4: gw-ddc717df-49: <BROADCAST,MULTICAST> mtu 1500 qdisc noop '
-    'state DOWN \    link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff '
-    'promiscuity 0',
-    '5: foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
-    'UP qlen 1000\    link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
-    'promiscuity 0',
-    '6: foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
-    'UP qlen 1000\    link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
-    'promiscuity 0',
-    '7: foo:foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
-    'state UP qlen 1000'
-    '\    link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
-    '8: foo@foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
-    'state UP qlen 1000'
-    '\    link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
-    '9: bar.9@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
-    ' noqueue master brq0b24798c-07 state UP mode DEFAULT'
-    '\    link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
-    '\    vlan protocol 802.1q id 9 <REORDER_HDR>',
-    '10: bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
-    ' noqueue master brq0b24798c-07 state UP mode DEFAULT'
-    '\    link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
-    '\    vlan protocol 802.1Q id 10 <REORDER_HDR>',
-    '11: bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
-    'state UP qlen 1000'
-    '\    link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
-    '\    vlan id 11 <REORDER_HDR>',
-    '12: bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
-    'state UP qlen 1000'
-    '\    link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
-    '\    vlan id 12 <REORDER_HDR>',
-    '13: bar:bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
-    'qdisc mq state UP qlen 1000'
-    '\    link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
-    '\    vlan protocol 802.1q id 13 <REORDER_HDR>',
-    '14: bar@bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
-    'qdisc mq state UP qlen 1000'
-    '\    link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
-    '\    vlan protocol 802.1Q id 14 <REORDER_HDR>']
-
-ADDR_SAMPLE = ("""
-2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
-    link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
-    inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0
-    inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
-       valid_lft 14187sec preferred_lft 3387sec
-    inet6 fe80::3023:39ff:febc:22ae/64 scope link tentative
-        valid_lft forever preferred_lft forever
-    inet6 fe80::3023:39ff:febc:22af/64 scope link tentative dadfailed
-        valid_lft forever preferred_lft forever
-    inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
-               """deprecated dynamic
-       valid_lft 14187sec preferred_lft 0sec
-    inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
-               """deprecated dynamic
-       valid_lft 14187sec preferred_lft 0sec
-    inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
-       valid_lft 14187sec preferred_lft 3387sec
-    inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
-       valid_lft forever preferred_lft forever
-""")
-
-ADDR_SAMPLE2 = ("""
-2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
-    link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
-    inet 172.16.77.240/24 scope global eth0
-    inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
-       valid_lft 14187sec preferred_lft 3387sec
-    inet6 fe80::3023:39ff:febc:22ae/64 scope link tentative
-        valid_lft forever preferred_lft forever
-    inet6 fe80::3023:39ff:febc:22af/64 scope link tentative dadfailed
-        valid_lft forever preferred_lft forever
-    inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
-                """deprecated dynamic
-       valid_lft 14187sec preferred_lft 0sec
-    inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
-                """deprecated dynamic
-       valid_lft 14187sec preferred_lft 0sec
-    inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
-       valid_lft 14187sec preferred_lft 3387sec
-    inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
-       valid_lft forever preferred_lft forever
-""")
-
-
-ADDR_SAMPLE3 = ("""
-2: eth0@NONE: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP
-    link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
-    inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0
-""")
-
-GATEWAY_SAMPLE1 = ("""
-default via 10.35.19.254  metric 100
-10.35.16.0/22  proto kernel  scope link  src 10.35.17.97
-""")
-
-GATEWAY_SAMPLE2 = ("""
-default via 10.35.19.254  metric 100
-""")
-
-GATEWAY_SAMPLE3 = ("""
-10.35.16.0/22  proto kernel  scope link  src 10.35.17.97
-""")
-
-GATEWAY_SAMPLE4 = ("""
-default via 10.35.19.254
-""")
-
-GATEWAY_SAMPLE5 = ("""
-default via 192.168.99.1 proto static
-""")
-
-GATEWAY_SAMPLE6 = ("""
-default via 192.168.99.1 proto static metric 100
-""")
-
-GATEWAY_SAMPLE7 = ("""
-default dev qg-31cd36 metric 1
-""")
-
-IPv6_GATEWAY_SAMPLE1 = ("""
-default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
-2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
-""")
-
-IPv6_GATEWAY_SAMPLE2 = ("""
-default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
-""")
-
-IPv6_GATEWAY_SAMPLE3 = ("""
-2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
-""")
-
-IPv6_GATEWAY_SAMPLE4 = ("""
-default via fe80::dfcc:aaff:feb9:76ce
-""")
-
-IPv6_GATEWAY_SAMPLE5 = ("""
-default via 2001:470:9:1224:4508:b885:5fb:740b metric 1024
-""")
-
-DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24  scope link  src 10.0.0.2")
-
-SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2  scope link  src 10.0.0.1\n"
-                  "10.0.0.0/24 dev tap1d7888a7-10  scope link  src 10.0.0.2")
-SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10  scope link  src 10.0.0.2\n"
-                  "10.0.0.0/24 dev qr-23380d11-d2  scope link  src 10.0.0.1")
-
-RULE_V4_SAMPLE = ("""
-0:      from all lookup local
-32766:  from all lookup main
-32767:  from all lookup default
-101:    from 192.168.45.100 lookup 2
-""")
-
-RULE_V6_SAMPLE = ("""
-0:      from all lookup local
-32766:  from all lookup main
-32767:  from all lookup default
-201:    from 2001:db8::1 lookup 3
-""")
-
-
-class TestSubProcessBase(base.BaseTestCase):
-    def setUp(self):
-        super(TestSubProcessBase, self).setUp()
-        self.execute_p = mock.patch('neutron.agent.common.utils.execute')
-        self.execute = self.execute_p.start()
-
-    def test_execute_wrapper(self):
-        ip_lib.SubProcessBase._execute(['o'], 'link', ('list',),
-                                       run_as_root=True)
-
-        self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'],
-                                             run_as_root=True,
-                                             log_fail_as_error=True)
-
-    def test_execute_wrapper_int_options(self):
-        ip_lib.SubProcessBase._execute([4], 'link', ('list',))
-
-        self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'],
-                                             run_as_root=False,
-                                             log_fail_as_error=True)
-
-    def test_execute_wrapper_no_options(self):
-        ip_lib.SubProcessBase._execute([], 'link', ('list',))
-
-        self.execute.assert_called_once_with(['ip', 'link', 'list'],
-                                             run_as_root=False,
-                                             log_fail_as_error=True)
-
-    def test_run_no_namespace(self):
-        base = ip_lib.SubProcessBase()
-        base._run([], 'link', ('list',))
-        self.execute.assert_called_once_with(['ip', 'link', 'list'],
-                                             run_as_root=False,
-                                             log_fail_as_error=True)
-
-    def test_run_namespace(self):
-        base = ip_lib.SubProcessBase(namespace='ns')
-        base._run([], 'link', ('list',))
-        self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
-                                              'ip', 'link', 'list'],
-                                             run_as_root=True,
-                                             log_fail_as_error=True)
-
-    def test_as_root_namespace(self):
-        base = ip_lib.SubProcessBase(namespace='ns')
-        base._as_root([], 'link', ('list',))
-        self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
-                                              'ip', 'link', 'list'],
-                                             run_as_root=True,
-                                             log_fail_as_error=True)
-
-
-class TestIpWrapper(base.BaseTestCase):
-    def setUp(self):
-        super(TestIpWrapper, self).setUp()
-        self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute')
-        self.execute = self.execute_p.start()
-
-    @mock.patch('os.path.islink')
-    @mock.patch('os.listdir', return_value=['lo'])
-    def test_get_devices(self, mocked_listdir, mocked_islink):
-        retval = ip_lib.IPWrapper().get_devices()
-        mocked_islink.assert_called_once_with('/sys/class/net/lo')
-        self.assertEqual(retval, [ip_lib.IPDevice('lo')])
-
-    @mock.patch('neutron.agent.common.utils.execute')
-    def test_get_devices_namespaces(self, mocked_execute):
-        fake_str = mock.Mock()
-        fake_str.split.return_value = ['lo']
-        mocked_execute.return_value = fake_str
-        retval = ip_lib.IPWrapper(namespace='foo').get_devices()
-        mocked_execute.assert_called_once_with(
-                ['ip', 'netns', 'exec', 'foo', 'find', '/sys/class/net',
-                 '-maxdepth', '1', '-type', 'l', '-printf', '%f '],
-                run_as_root=True, log_fail_as_error=True)
-        self.assertTrue(fake_str.split.called)
-        self.assertEqual(retval, [ip_lib.IPDevice('lo', namespace='foo')])
-
-    def test_get_namespaces(self):
-        self.execute.return_value = '\n'.join(NETNS_SAMPLE)
-        cfg.CONF.AGENT.use_helper_for_ns_read = True
-        retval = ip_lib.IPWrapper.get_namespaces()
-        self.assertEqual(retval,
-                         ['12345678-1234-5678-abcd-1234567890ab',
-                          'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
-                          'cccccccc-cccc-cccc-cccc-cccccccccccc'])
-
-        self.execute.assert_called_once_with([], 'netns', ['list'],
-                                             run_as_root=True)
-
-    def test_get_namespaces_iproute2_4(self):
-        self.execute.return_value = '\n'.join(NETNS_SAMPLE_IPROUTE2_4)
-        cfg.CONF.AGENT.use_helper_for_ns_read = True
-        retval = ip_lib.IPWrapper.get_namespaces()
-        self.assertEqual(retval,
-                         ['12345678-1234-5678-abcd-1234567890ab',
-                          'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
-                          'cccccccc-cccc-cccc-cccc-cccccccccccc'])
-
-        self.execute.assert_called_once_with([], 'netns', ['list'],
-                                             run_as_root=True)
-
-    @mock.patch('os.listdir', return_value=NETNS_SAMPLE)
-    def test_get_namespaces_listdir(self, mocked_listdir):
-        cfg.CONF.AGENT.use_helper_for_ns_read = False
-        retval = ip_lib.IPWrapper.get_namespaces()
-        self.assertEqual(retval,
-                         ['12345678-1234-5678-abcd-1234567890ab',
-                          'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
-                          'cccccccc-cccc-cccc-cccc-cccccccccccc'])
-        mocked_listdir.assert_called_once_with(ip_lib.IP_NETNS_PATH)
-
-    def test_add_tuntap(self):
-        ip_lib.IPWrapper().add_tuntap('tap0')
-        self.execute.assert_called_once_with([], 'tuntap',
-                                             ('add', 'tap0', 'mode', 'tap'),
-                                             run_as_root=True, namespace=None,
-                                             log_fail_as_error=True)
-
-    def test_add_veth(self):
-        ip_lib.IPWrapper().add_veth('tap0', 'tap1')
-        self.execute.assert_called_once_with([], 'link',
-                                             ('add', 'tap0', 'type', 'veth',
-                                              'peer', 'name', 'tap1'),
-                                             run_as_root=True, namespace=None,
-                                             log_fail_as_error=True)
-
-    def test_del_veth(self):
-        ip_lib.IPWrapper().del_veth('fpr-1234')
-        self.execute.assert_called_once_with([], 'link',
-                                             ('del', 'fpr-1234'),
-                                             run_as_root=True, namespace=None,
-                                             log_fail_as_error=True)
-
-    def test_add_veth_with_namespaces(self):
-        ns2 = 'ns2'
-        with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en:
-            ip_lib.IPWrapper().add_veth('tap0', 'tap1', namespace2=ns2)
-            en.assert_has_calls([mock.call(ns2)])
-        self.execute.assert_called_once_with([], 'link',
-                                             ('add', 'tap0', 'type', 'veth',
-                                              'peer', 'name', 'tap1',
-                                              'netns', ns2),
-                                             run_as_root=True, namespace=None,
-                                             log_fail_as_error=True)
-
-    def test_add_dummy(self):
-        ip_lib.IPWrapper().add_dummy('dummy0')
-        self.execute.assert_called_once_with([], 'link',
-                                             ('add', 'dummy0',
-                                              'type', 'dummy'),
-                                             run_as_root=True, namespace=None,
-                                             log_fail_as_error=True)
-
-    def test_get_device(self):
-        dev = ip_lib.IPWrapper(namespace='ns').device('eth0')
-        self.assertEqual(dev.namespace, 'ns')
-        self.assertEqual(dev.name, 'eth0')
-
-    def test_ensure_namespace(self):
-        with mock.patch.object(ip_lib, 'IPDevice') as ip_dev:
-            ip = ip_lib.IPWrapper()
-            with mock.patch.object(ip.netns, 'exists') as ns_exists:
-                with mock.patch('neutron.agent.common.utils.execute'):
-                    ns_exists.return_value = False
-                    ip.ensure_namespace('ns')
-                    self.execute.assert_has_calls(
-                        [mock.call([], 'netns', ('add', 'ns'),
-                                   run_as_root=True, namespace=None,
-                                   log_fail_as_error=True)])
-                    ip_dev.assert_has_calls([mock.call('lo', namespace='ns'),
-                                             mock.call().link.set_up()])
-
-    def test_ensure_namespace_existing(self):
-        with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd:
-            ip_ns_cmd.exists.return_value = True
-            ns = ip_lib.IPWrapper().ensure_namespace('ns')
-            self.assertFalse(self.execute.called)
-            self.assertEqual(ns.namespace, 'ns')
-
-    def test_namespace_is_empty_no_devices(self):
-        ip = ip_lib.IPWrapper(namespace='ns')
-        with mock.patch.object(ip, 'get_devices') as get_devices:
-            get_devices.return_value = []
-
-            self.assertTrue(ip.namespace_is_empty())
-            get_devices.assert_called_once_with(exclude_loopback=True)
-
-    def test_namespace_is_empty(self):
-        ip = ip_lib.IPWrapper(namespace='ns')
-        with mock.patch.object(ip, 'get_devices') as get_devices:
-            get_devices.return_value = [mock.Mock()]
-
-            self.assertFalse(ip.namespace_is_empty())
-            get_devices.assert_called_once_with(exclude_loopback=True)
-
-    def test_garbage_collect_namespace_does_not_exist(self):
-        with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
-            ip_ns_cmd_cls.return_value.exists.return_value = False
-            ip = ip_lib.IPWrapper(namespace='ns')
-            with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
-
-                self.assertFalse(ip.garbage_collect_namespace())
-                ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')])
-                self.assertNotIn(mock.call().delete('ns'),
-                                 ip_ns_cmd_cls.return_value.mock_calls)
-                self.assertEqual([], mock_is_empty.mock_calls)
-
-    def test_garbage_collect_namespace_existing_empty_ns(self):
-        with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
-            ip_ns_cmd_cls.return_value.exists.return_value = True
-
-            ip = ip_lib.IPWrapper(namespace='ns')
-
-            with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
-                mock_is_empty.return_value = True
-                self.assertTrue(ip.garbage_collect_namespace())
-
-                mock_is_empty.assert_called_once_with()
-                expected = [mock.call().exists('ns'),
-                            mock.call().delete('ns')]
-                ip_ns_cmd_cls.assert_has_calls(expected)
-
-    def test_garbage_collect_namespace_existing_not_empty(self):
-        lo_device = mock.Mock()
-        lo_device.name = 'lo'
-        tap_device = mock.Mock()
-        tap_device.name = 'tap1'
-
-        with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
-            ip_ns_cmd_cls.return_value.exists.return_value = True
-
-            ip = ip_lib.IPWrapper(namespace='ns')
-
-            with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
-                mock_is_empty.return_value = False
-
-                self.assertFalse(ip.garbage_collect_namespace())
-
-                mock_is_empty.assert_called_once_with()
-                expected = [mock.call(ip),
-                            mock.call().exists('ns')]
-                self.assertEqual(ip_ns_cmd_cls.mock_calls, expected)
-                self.assertNotIn(mock.call().delete('ns'),
-                                 ip_ns_cmd_cls.mock_calls)
-
-    def test_add_vxlan_valid_port_length(self):
-        retval = ip_lib.IPWrapper().add_vxlan('vxlan0', 'vni0',
-                                              group='group0',
-                                              dev='dev0', ttl='ttl0',
-                                              tos='tos0',
-                                              local='local0', proxy=True,
-                                              port=('1', '2'))
-        self.assertIsInstance(retval, ip_lib.IPDevice)
-        self.assertEqual(retval.name, 'vxlan0')
-        self.execute.assert_called_once_with([], 'link',
-                                             ['add', 'vxlan0', 'type',
-                                              'vxlan', 'id', 'vni0', 'group',
-                                              'group0', 'dev', 'dev0',
-                                              'ttl', 'ttl0', 'tos', 'tos0',
-                                              'local', 'local0', 'proxy',
-                                              'port', '1', '2'],
-                                             run_as_root=True, namespace=None,
-                                             log_fail_as_error=True)
-
-    def test_add_vxlan_invalid_port_length(self):
-        wrapper = ip_lib.IPWrapper()
-        self.assertRaises(exceptions.NetworkVxlanPortRangeError,
-                          wrapper.add_vxlan, 'vxlan0', 'vni0', group='group0',
-                          dev='dev0', ttl='ttl0', tos='tos0',
-                          local='local0', proxy=True,
-                          port=('1', '2', '3'))
-
-    def test_add_device_to_namespace(self):
-        dev = mock.Mock()
-        ip_lib.IPWrapper(namespace='ns').add_device_to_namespace(dev)
-        dev.assert_has_calls([mock.call.link.set_netns('ns')])
-
-    def test_add_device_to_namespace_is_none(self):
-        dev = mock.Mock()
-        ip_lib.IPWrapper().add_device_to_namespace(dev)
-        self.assertEqual([], dev.mock_calls)
-
-
-class TestIPDevice(base.BaseTestCase):
-    def test_eq_same_name(self):
-        dev1 = ip_lib.IPDevice('tap0')
-        dev2 = ip_lib.IPDevice('tap0')
-        self.assertEqual(dev1, dev2)
-
-    def test_eq_diff_name(self):
-        dev1 = ip_lib.IPDevice('tap0')
-        dev2 = ip_lib.IPDevice('tap1')
-        self.assertNotEqual(dev1, dev2)
-
-    def test_eq_same_namespace(self):
-        dev1 = ip_lib.IPDevice('tap0', 'ns1')
-        dev2 = ip_lib.IPDevice('tap0', 'ns1')
-        self.assertEqual(dev1, dev2)
-
-    def test_eq_diff_namespace(self):
-        dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
-        dev2 = ip_lib.IPDevice('tap0', namespace='ns2')
-        self.assertNotEqual(dev1, dev2)
-
-    def test_eq_other_is_none(self):
-        dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
-        self.assertIsNotNone(dev1)
-
-    def test_str(self):
-        self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0')
-
-
-class TestIPCommandBase(base.BaseTestCase):
-    def setUp(self):
-        super(TestIPCommandBase, self).setUp()
-        self.ip = mock.Mock()
-        self.ip.namespace = 'namespace'
-        self.ip_cmd = ip_lib.IpCommandBase(self.ip)
-        self.ip_cmd.COMMAND = 'foo'
-
-    def test_run(self):
-        self.ip_cmd._run([], ('link', 'show'))
-        self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))])
-
-    def test_run_with_options(self):
-        self.ip_cmd._run(['o'], ('link'))
-        self.ip.assert_has_calls([mock.call._run(['o'], 'foo', ('link'))])
-
-    def test_as_root_namespace_false(self):
-        self.ip_cmd._as_root([], ('link'))
-        self.ip.assert_has_calls(
-            [mock.call._as_root([],
-                                'foo',
-                                ('link'),
-                                use_root_namespace=False)])
-
-    def test_as_root_namespace_true(self):
-        self.ip_cmd._as_root([], ('link'), use_root_namespace=True)
-        self.ip.assert_has_calls(
-            [mock.call._as_root([],
-                                'foo',
-                                ('link'),
-                                use_root_namespace=True)])
-
-    def test_as_root_namespace_true_with_options(self):
-        self.ip_cmd._as_root('o', 'link', use_root_namespace=True)
-        self.ip.assert_has_calls(
-            [mock.call._as_root('o',
-                                'foo',
-                                ('link'),
-                                use_root_namespace=True)])
-
-
-class TestIPDeviceCommandBase(base.BaseTestCase):
-    def setUp(self):
-        super(TestIPDeviceCommandBase, self).setUp()
-        self.ip_dev = mock.Mock()
-        self.ip_dev.name = 'eth0'
-        self.ip_dev._execute = mock.Mock(return_value='executed')
-        self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev)
-        self.ip_cmd.COMMAND = 'foo'
-
-    def test_name_property(self):
-        self.assertEqual(self.ip_cmd.name, 'eth0')
-
-
-class TestIPCmdBase(base.BaseTestCase):
-    def setUp(self):
-        super(TestIPCmdBase, self).setUp()
-        self.parent = mock.Mock()
-        self.parent.name = 'eth0'
-
-    def _assert_call(self, options, args):
-        self.parent._run.assert_has_calls([
-            mock.call(options, self.command, args)])
-
-    def _assert_sudo(self, options, args, use_root_namespace=False):
-        self.parent._as_root.assert_has_calls(
-            [mock.call(options, self.command, args,
-                       use_root_namespace=use_root_namespace)])
-
-
-class TestIpRuleCommand(TestIPCmdBase):
-    def setUp(self):
-        super(TestIpRuleCommand, self).setUp()
-        self.parent._as_root.return_value = ''
-        self.command = 'rule'
-        self.rule_cmd = ip_lib.IpRuleCommand(self.parent)
-
-    def _test_add_rule(self, ip, table, priority):
-        ip_version = netaddr.IPNetwork(ip).version
-        self.rule_cmd.add(ip, table=table, priority=priority)
-        self._assert_sudo([ip_version], (['show']))
-        self._assert_sudo([ip_version], ('add', 'from', ip,
-                                         'priority', str(priority),
-                                         'table', str(table),
-                                         'type', 'unicast'))
-
-    def _test_add_rule_exists(self, ip, table, priority, output):
-        self.parent._as_root.return_value = output
-        ip_version = netaddr.IPNetwork(ip).version
-        self.rule_cmd.add(ip, table=table, priority=priority)
-        self._assert_sudo([ip_version], (['show']))
-
-    def _test_delete_rule(self, ip, table, priority):
-        ip_version = netaddr.IPNetwork(ip).version
-        self.rule_cmd.delete(ip, table=table, priority=priority)
-        self._assert_sudo([ip_version],
-                          ('del', 'priority', str(priority),
-                           'table', str(table), 'type', 'unicast'))
-
-    def test__parse_line(self):
-        def test(ip_version, line, expected):
-            actual = self.rule_cmd._parse_line(ip_version, line)
-            self.assertEqual(expected, actual)
-
-        test(4, "4030201:\tfrom 1.2.3.4/24 lookup 10203040",
-             {'from': '1.2.3.4/24',
-              'table': '10203040',
-              'type': 'unicast',
-              'priority': '4030201'})
-        test(6, "1024:    from all iif qg-c43b1928-48 lookup noscope",
-             {'priority': '1024',
-              'from': '::/0',
-              'type': 'unicast',
-              'iif': 'qg-c43b1928-48',
-              'table': 'noscope'})
-
-    def test__make_canonical_all_v4(self):
-        actual = self.rule_cmd._make_canonical(4, {'from': 'all'})
-        self.assertEqual({'from': '0.0.0.0/0', 'type': 'unicast'}, actual)
-
-    def test__make_canonical_all_v6(self):
-        actual = self.rule_cmd._make_canonical(6, {'from': 'all'})
-        self.assertEqual({'from': '::/0', 'type': 'unicast'}, actual)
-
-    def test__make_canonical_lookup(self):
-        actual = self.rule_cmd._make_canonical(6, {'lookup': 'table'})
-        self.assertEqual({'table': 'table', 'type': 'unicast'}, actual)
-
-    def test__make_canonical_iif(self):
-        actual = self.rule_cmd._make_canonical(6, {'iif': 'iface_name'})
-        self.assertEqual({'iif': 'iface_name', 'type': 'unicast'}, actual)
-
-    def test__make_canonical_fwmark(self):
-        actual = self.rule_cmd._make_canonical(6, {'fwmark': '0x400'})
-        self.assertEqual({'fwmark': '0x400/0xffffffff',
-                          'type': 'unicast'}, actual)
-
-    def test__make_canonical_fwmark_with_mask(self):
-        actual = self.rule_cmd._make_canonical(6, {'fwmark': '0x400/0x00ff'})
-        self.assertEqual({'fwmark': '0x400/0xff', 'type': 'unicast'}, actual)
-
-    def test__make_canonical_fwmark_integer(self):
-        actual = self.rule_cmd._make_canonical(6, {'fwmark': 0x400})
-        self.assertEqual({'fwmark': '0x400/0xffffffff',
-                          'type': 'unicast'}, actual)
-
-    def test__make_canonical_fwmark_iterable(self):
-        actual = self.rule_cmd._make_canonical(6, {'fwmark': (0x400, 0xffff)})
-        self.assertEqual({'fwmark': '0x400/0xffff', 'type': 'unicast'}, actual)
-
-    def test_add_rule_v4(self):
-        self._test_add_rule('192.168.45.100', 2, 100)
-
-    def test_add_rule_v4_exists(self):
-        self._test_add_rule_exists('192.168.45.100', 2, 101, RULE_V4_SAMPLE)
-
-    def test_add_rule_v6(self):
-        self._test_add_rule('2001:db8::1', 3, 200)
-
-    def test_add_rule_v6_exists(self):
-        self._test_add_rule_exists('2001:db8::1', 3, 201, RULE_V6_SAMPLE)
-
-    def test_delete_rule_v4(self):
-        self._test_delete_rule('192.168.45.100', 2, 100)
-
-    def test_delete_rule_v6(self):
-        self._test_delete_rule('2001:db8::1', 3, 200)
-
-
-class TestIpLinkCommand(TestIPCmdBase):
-    def setUp(self):
-        super(TestIpLinkCommand, self).setUp()
-        self.parent._run.return_value = LINK_SAMPLE[1]
-        self.command = 'link'
-        self.link_cmd = ip_lib.IpLinkCommand(self.parent)
-
-    def test_set_address(self):
-        self.link_cmd.set_address('aa:bb:cc:dd:ee:ff')
-        self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff'))
-
-    def test_set_mtu(self):
-        self.link_cmd.set_mtu(1500)
-        self._assert_sudo([], ('set', 'eth0', 'mtu', 1500))
-
-    def test_set_up(self):
-        observed = self.link_cmd.set_up()
-        self.assertEqual(self.parent._as_root.return_value, observed)
-        self._assert_sudo([], ('set', 'eth0', 'up'))
-
-    def test_set_down(self):
-        observed = self.link_cmd.set_down()
-        self.assertEqual(self.parent._as_root.return_value, observed)
-        self._assert_sudo([], ('set', 'eth0', 'down'))
-
-    def test_set_netns(self):
-        self.link_cmd.set_netns('foo')
-        self._assert_sudo([], ('set', 'eth0', 'netns', 'foo'))
-        self.assertEqual(self.parent.namespace, 'foo')
-
-    def test_set_name(self):
-        self.link_cmd.set_name('tap1')
-        self._assert_sudo([], ('set', 'eth0', 'name', 'tap1'))
-        self.assertEqual(self.parent.name, 'tap1')
-
-    def test_set_alias(self):
-        self.link_cmd.set_alias('openvswitch')
-        self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch'))
-
-    def test_delete(self):
-        self.link_cmd.delete()
-        self._assert_sudo([], ('delete', 'eth0'))
-
-    def test_address_property(self):
-        self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
-        self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd')
-
-    def test_mtu_property(self):
-        self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
-        self.assertEqual(self.link_cmd.mtu, 1500)
-
-    def test_qdisc_property(self):
-        self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
-        self.assertEqual(self.link_cmd.qdisc, 'mq')
-
-    def test_qlen_property(self):
-        self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
-        self.assertEqual(self.link_cmd.qlen, 1000)
-
-    def test_alias_property(self):
-        self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
-        self.assertEqual(self.link_cmd.alias, 'openvswitch')
-
-    def test_state_property(self):
-        self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
-        self.assertEqual(self.link_cmd.state, 'UP')
-
-    def test_settings_property(self):
-        expected = {'mtu': 1500,
-                    'qlen': 1000,
-                    'state': 'UP',
-                    'qdisc': 'mq',
-                    'brd': 'ff:ff:ff:ff:ff:ff',
-                    'link/ether': 'cc:dd:ee:ff:ab:cd',
-                    'alias': 'openvswitch'}
-        self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
-        self.assertEqual(self.link_cmd.attributes, expected)
-        self._assert_call(['o'], ('show', 'eth0'))
-
-
-class TestIpAddrCommand(TestIPCmdBase):
-    def setUp(self):
-        super(TestIpAddrCommand, self).setUp()
-        self.parent.name = 'tap0'
-        self.command = 'addr'
-        self.addr_cmd = ip_lib.IpAddrCommand(self.parent)
-
-    def test_add_address(self):
-        self.addr_cmd.add('192.168.45.100/24')
-        self._assert_sudo([4],
-                          ('add', '192.168.45.100/24',
-                           'scope', 'global',
-                           'dev', 'tap0',
-                           'brd', '192.168.45.255'))
-
-    def test_add_address_scoped(self):
-        self.addr_cmd.add('192.168.45.100/24', scope='link')
-        self._assert_sudo([4],
-                          ('add', '192.168.45.100/24',
-                           'scope', 'link',
-                           'dev', 'tap0',
-                           'brd', '192.168.45.255'))
-
-    def test_del_address(self):
-        self.addr_cmd.delete('192.168.45.100/24')
-        self._assert_sudo([4],
-                          ('del', '192.168.45.100/24', 'dev', 'tap0'))
-
-    def test_flush(self):
-        self.addr_cmd.flush(6)
-        self._assert_sudo([6], ('flush', 'tap0'))
-
-    def test_list(self):
-        expected = [
-            dict(name='eth0', scope='global', dadfailed=False, tentative=False,
-                 dynamic=False, cidr='172.16.77.240/24'),
-            dict(name='eth0', scope='global', dadfailed=False, tentative=False,
-                 dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64'),
-            dict(name='eth0', scope='link', dadfailed=False, tentative=True,
-                 dynamic=False, cidr='fe80::3023:39ff:febc:22ae/64'),
-            dict(name='eth0', scope='link', dadfailed=True, tentative=True,
-                 dynamic=False, cidr='fe80::3023:39ff:febc:22af/64'),
-            dict(name='eth0', scope='global', dadfailed=False, tentative=False,
-                 dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64'),
-            dict(name='eth0', scope='global', dadfailed=False, tentative=False,
-                 dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64'),
-            dict(name='eth0', scope='global', dadfailed=False, tentative=False,
-                 dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64'),
-            dict(name='eth0', scope='link', dadfailed=False, tentative=False,
-                 dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64')]
-
-        test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
-
-        for test_case in test_cases:
-            self.parent._run = mock.Mock(return_value=test_case)
-            self.assertEqual(expected, self.addr_cmd.list())
-            self._assert_call([], ('show', 'tap0'))
-
-    def test_wait_until_address_ready(self):
-        self.parent._run.return_value = ADDR_SAMPLE
-        # this address is not tentative or failed so it should return
-        self.assertIsNone(self.addr_cmd.wait_until_address_ready(
-            '2001:470:9:1224:fd91:272:581e:3a32'))
-
-    def test_wait_until_address_ready_non_existent_address(self):
-        self.addr_cmd.list = mock.Mock(return_value=[])
-        with testtools.ExpectedException(ip_lib.AddressNotReady):
-            self.addr_cmd.wait_until_address_ready('abcd::1234')
-
-    def test_wait_until_address_ready_timeout(self):
-        tentative_address = 'fe80::3023:39ff:febc:22ae'
-        self.addr_cmd.list = mock.Mock(return_value=[
-            dict(scope='link', dadfailed=False, tentative=True, dynamic=False,
-                 cidr=tentative_address + '/64')])
-        with testtools.ExpectedException(ip_lib.AddressNotReady):
-            self.addr_cmd.wait_until_address_ready(tentative_address,
-                                                   wait_time=1)
-
-    def test_list_filtered(self):
-        expected = [
-            dict(name='eth0', scope='global', tentative=False, dadfailed=False,
-                 dynamic=False, cidr='172.16.77.240/24')]
-
-        test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
-
-        for test_case in test_cases:
-            output = '\n'.join(test_case.split('\n')[0:4])
-            self.parent._run.return_value = output
-            self.assertEqual(self.addr_cmd.list('global',
-                             filters=['permanent']), expected)
-            self._assert_call([], ('show', 'tap0', 'permanent', 'scope',
-                              'global'))
-
-    def test_get_devices_with_ip(self):
-        self.parent._run.return_value = ADDR_SAMPLE3
-        devices = self.addr_cmd.get_devices_with_ip('172.16.77.240/24')
-        self.assertEqual(1, len(devices))
-        self.assertEqual('eth0', devices[0]['name'])
-
-
-class TestIpRouteCommand(TestIPCmdBase):
-    def setUp(self):
-        super(TestIpRouteCommand, self).setUp()
-        self.parent.name = 'eth0'
-        self.command = 'route'
-        self.route_cmd = ip_lib.IpRouteCommand(self.parent)
-        self.ip_version = 4
-        self.table = 14
-        self.metric = 100
-        self.cidr = '192.168.45.100/24'
-        self.ip = '10.0.0.1'
-        self.gateway = '192.168.45.100'
-        self.test_cases = [{'sample': GATEWAY_SAMPLE1,
-                            'expected': {'gateway': '10.35.19.254',
-                                         'metric': 100}},
-                           {'sample': GATEWAY_SAMPLE2,
-                            'expected': {'gateway': '10.35.19.254',
-                                         'metric': 100}},
-                           {'sample': GATEWAY_SAMPLE3,
-                            'expected': None},
-                           {'sample': GATEWAY_SAMPLE4,
-                            'expected': {'gateway': '10.35.19.254'}},
-                           {'sample': GATEWAY_SAMPLE5,
-                            'expected': {'gateway': '192.168.99.1'}},
-                           {'sample': GATEWAY_SAMPLE6,
-                            'expected': {'gateway': '192.168.99.1',
-                                         'metric': 100}},
-                           {'sample': GATEWAY_SAMPLE7,
-                            'expected': {'metric': 1}}]
-
-    def test_add_gateway(self):
-        self.route_cmd.add_gateway(self.gateway, self.metric, self.table)
-        self._assert_sudo([self.ip_version],
-                          ('replace', 'default',
-                           'via', self.gateway,
-                           'metric', self.metric,
-                           'dev', self.parent.name,
-                           'table', self.table))
-
-    def test_add_gateway_subtable(self):
-        self.route_cmd.table(self.table).add_gateway(self.gateway, self.metric)
-        self._assert_sudo([self.ip_version],
-                          ('replace', 'default',
-                           'via', self.gateway,
-                           'metric', self.metric,
-                           'dev', self.parent.name,
-                           'table', self.table))
-
-    def test_del_gateway_success(self):
-        self.route_cmd.delete_gateway(self.gateway, table=self.table)
-        self._assert_sudo([self.ip_version],
-                          ('del', 'default',
-                           'via', self.gateway,
-                           'dev', self.parent.name,
-                           'table', self.table))
-
-    def test_del_gateway_success_subtable(self):
-        self.route_cmd.table(table=self.table).delete_gateway(self.gateway)
-        self._assert_sudo([self.ip_version],
-                          ('del', 'default',
-                           'via', self.gateway,
-                           'dev', self.parent.name,
-                           'table', self.table))
-
-    def test_del_gateway_cannot_find_device(self):
-        self.parent._as_root.side_effect = RuntimeError("Cannot find device")
-
-        exc = self.assertRaises(exceptions.DeviceNotFoundError,
-                          self.route_cmd.delete_gateway,
-                          self.gateway, table=self.table)
-        self.assertIn(self.parent.name, str(exc))
-
-    def test_del_gateway_other_error(self):
-        self.parent._as_root.side_effect = RuntimeError()
-
-        self.assertRaises(RuntimeError, self.route_cmd.delete_gateway,
-                          self.gateway, table=self.table)
-
-    def test_get_gateway(self):
-        for test_case in self.test_cases:
-            self.parent._run = mock.Mock(return_value=test_case['sample'])
-            self.assertEqual(self.route_cmd.get_gateway(),
-                             test_case['expected'])
-
-    def test_pullup_route(self):
-        # NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
-        # for pullup_route, hence skipping. Revisit, if required, in future.
-        if self.ip_version == 6:
-            return
-        # interface is not the first in the list - requires
-        # deleting and creating existing entries
-        output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1]
-
-        def pullup_side_effect(self, *args):
-            result = output.pop(0)
-            return result
-
-        self.parent._run = mock.Mock(side_effect=pullup_side_effect)
-        self.route_cmd.pullup_route('tap1d7888a7-10', ip_version=4)
-        self._assert_sudo([4], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2'))
-        self._assert_sudo([4], ('append', '10.0.0.0/24', 'proto', 'kernel',
-                                'src', '10.0.0.1', 'dev', 'qr-23380d11-d2'))
-
-    def test_pullup_route_first(self):
-        # NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
-        # for pullup_route, hence skipping. Revisit, if required, in future.
-        if self.ip_version == 6:
-            return
-        # interface is first in the list - no changes
-        output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2]
-
-        def pullup_side_effect(self, *args):
-            result = output.pop(0)
-            return result
-
-        self.parent._run = mock.Mock(side_effect=pullup_side_effect)
-        self.route_cmd.pullup_route('tap1d7888a7-10', ip_version=4)
-        # Check two calls - device get and subnet get
-        self.assertEqual(len(self.parent._run.mock_calls), 2)
-
-    def test_add_route(self):
-        self.route_cmd.add_route(self.cidr, self.ip, self.table)
-        self._assert_sudo([self.ip_version],
-                          ('replace', self.cidr,
-                           'via', self.ip,
-                           'dev', self.parent.name,
-                           'table', self.table))
-
-    def test_add_route_no_via(self):
-        self.route_cmd.add_route(self.cidr, table=self.table)
-        self._assert_sudo([self.ip_version],
-                          ('replace', self.cidr,
-                           'dev', self.parent.name,
-                           'table', self.table))
-
-    def test_add_route_with_scope(self):
-        self.route_cmd.add_route(self.cidr, scope='link')
-        self._assert_sudo([self.ip_version],
-                          ('replace', self.cidr,
-                           'dev', self.parent.name,
-                           'scope', 'link'))
-
-    def test_add_route_no_device(self):
-        self.parent._as_root.side_effect = RuntimeError("Cannot find device")
-        self.assertRaises(exceptions.DeviceNotFoundError,
-                          self.route_cmd.add_route,
-                          self.cidr, self.ip, self.table)
-
-    def test_delete_route(self):
-        self.route_cmd.delete_route(self.cidr, self.ip, self.table)
-        self._assert_sudo([self.ip_version],
-                          ('del', self.cidr,
-                           'via', self.ip,
-                           'dev', self.parent.name,
-                           'table', self.table))
-
-    def test_delete_route_no_via(self):
-        self.route_cmd.delete_route(self.cidr, table=self.table)
-        self._assert_sudo([self.ip_version],
-                          ('del', self.cidr,
-                           'dev', self.parent.name,
-                           'table', self.table))
-
-    def test_delete_route_with_scope(self):
-        self.route_cmd.delete_route(self.cidr, scope='link')
-        self._assert_sudo([self.ip_version],
-                          ('del', self.cidr,
-                           'dev', self.parent.name,
-                           'scope', 'link'))
-
-    def test_delete_route_no_device(self):
-        self.parent._as_root.side_effect = RuntimeError("Cannot find device")
-        self.assertRaises(exceptions.DeviceNotFoundError,
-                          self.route_cmd.delete_route,
-                          self.cidr, self.ip, self.table)
-
-    def test_list_routes(self):
-        self.parent._run.return_value = (
-            "default via 172.124.4.1 dev eth0 metric 100\n"
-            "10.0.0.0/22 dev eth0 scope link\n"
-            "172.24.4.0/24 dev eth0 proto kernel src 172.24.4.2\n")
-        routes = self.route_cmd.table(self.table).list_routes(self.ip_version)
-        self.assertEqual([{'cidr': '0.0.0.0/0',
-                           'dev': 'eth0',
-                           'metric': '100',
-                           'table': 14,
-                           'via': '172.124.4.1'},
-                          {'cidr': '10.0.0.0/22',
-                           'dev': 'eth0',
-                           'scope': 'link',
-                           'table': 14},
-                          {'cidr': '172.24.4.0/24',
-                           'dev': 'eth0',
-                           'proto': 'kernel',
-                           'src': '172.24.4.2',
-                           'table': 14}], routes)
-
-    def test_list_onlink_routes_subtable(self):
-        self.parent._run.return_value = (
-            "10.0.0.0/22\n"
-            "172.24.4.0/24 proto kernel src 172.24.4.2\n")
-        routes = self.route_cmd.table(self.table).list_onlink_routes(
-            self.ip_version)
-        self.assertEqual(['10.0.0.0/22'], [r['cidr'] for r in routes])
-        self._assert_call([self.ip_version],
-                          ('list', 'dev', self.parent.name,
-                           'table', self.table, 'scope', 'link'))
-
-    def test_add_onlink_route_subtable(self):
-        self.route_cmd.table(self.table).add_onlink_route(self.cidr)
-        self._assert_sudo([self.ip_version],
-                          ('replace', self.cidr,
-                           'dev', self.parent.name,
-                           'table', self.table,
-                           'scope', 'link'))
-
-    def test_delete_onlink_route_subtable(self):
-        self.route_cmd.table(self.table).delete_onlink_route(self.cidr)
-        self._assert_sudo([self.ip_version],
-                          ('del', self.cidr,
-                           'dev', self.parent.name,
-                           'table', self.table,
-                           'scope', 'link'))
-
-
-class TestIPv6IpRouteCommand(TestIpRouteCommand):
-    def setUp(self):
-        super(TestIPv6IpRouteCommand, self).setUp()
-        self.ip_version = 6
-        self.cidr = '2001:db8::/64'
-        self.ip = '2001:db8::100'
-        self.gateway = '2001:db8::1'
-        self.test_cases = [{'sample': IPv6_GATEWAY_SAMPLE1,
-                            'expected':
-                            {'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
-                             'metric': 100}},
-                           {'sample': IPv6_GATEWAY_SAMPLE2,
-                            'expected':
-                            {'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
-                             'metric': 100}},
-                           {'sample': IPv6_GATEWAY_SAMPLE3,
-                            'expected': None},
-                           {'sample': IPv6_GATEWAY_SAMPLE4,
-                            'expected':
-                            {'gateway': 'fe80::dfcc:aaff:feb9:76ce'}},
-                           {'sample': IPv6_GATEWAY_SAMPLE5,
-                            'expected':
-                            {'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
-                             'metric': 1024}}]
-
-    def test_list_routes(self):
-        self.parent._run.return_value = (
-            "default via 2001:db8::1 dev eth0 metric 100\n"
-            "2001:db8::/64 dev eth0 proto kernel src 2001:db8::2\n")
-        routes = self.route_cmd.table(self.table).list_routes(self.ip_version)
-        self.assertEqual([{'cidr': '::/0',
-                           'dev': 'eth0',
-                           'metric': '100',
-                           'table': 14,
-                           'via': '2001:db8::1'},
-                          {'cidr': '2001:db8::/64',
-                           'dev': 'eth0',
-                           'proto': 'kernel',
-                           'src': '2001:db8::2',
-                           'table': 14}], routes)
-
-
-class TestIPRoute(TestIpRouteCommand):
-    """Leverage existing tests for IpRouteCommand for IPRoute
-
-    This test leverages the tests written for IpRouteCommand.  The difference
-    is that the 'dev' argument should not be passed for each of the commands.
-    So, this test removes the dev argument from the expected arguments in each
-    assert.
-    """
-    def setUp(self):
-        super(TestIPRoute, self).setUp()
-        self.parent = ip_lib.IPRoute()
-        self.parent._run = mock.Mock()
-        self.parent._as_root = mock.Mock()
-        self.route_cmd = self.parent.route
-        self.check_dev_args = False
-
-    def _remove_dev_args(self, args):
-        def args_without_dev():
-            previous = None
-            for arg in args:
-                if 'dev' not in (arg, previous):
-                    yield arg
-                previous = arg
-
-        return tuple(arg for arg in args_without_dev())
-
-    def _assert_call(self, options, args):
-        if not self.check_dev_args:
-            args = self._remove_dev_args(args)
-        super(TestIPRoute, self)._assert_call(options, args)
-
-    def _assert_sudo(self, options, args, use_root_namespace=False):
-        if not self.check_dev_args:
-            args = self._remove_dev_args(args)
-        super(TestIPRoute, self)._assert_sudo(options, args)
-
-    def test_pullup_route(self):
-        # This method gets the interface name passed to it as an argument.  So,
-        # don't remove it from the expected arguments.
-        self.check_dev_args = True
-        super(TestIPRoute, self).test_pullup_route()
-
-    def test_del_gateway_cannot_find_device(self):
-        # This test doesn't make sense for this case since dev won't be passed
-        pass
-
-
-class TestIpNetnsCommand(TestIPCmdBase):
-    def setUp(self):
-        super(TestIpNetnsCommand, self).setUp()
-        self.command = 'netns'
-        self.netns_cmd = ip_lib.IpNetnsCommand(self.parent)
-
-    def test_add_namespace(self):
-        with mock.patch('neutron.agent.common.utils.execute') as execute:
-            ns = self.netns_cmd.add('ns')
-            self._assert_sudo([], ('add', 'ns'), use_root_namespace=True)
-            self.assertEqual(ns.namespace, 'ns')
-            execute.assert_called_once_with(
-                ['ip', 'netns', 'exec', 'ns',
-                 'sysctl', '-w', 'net.ipv4.conf.all.promote_secondaries=1'],
-                run_as_root=True, check_exit_code=True, extra_ok_codes=None,
-                log_fail_as_error=True)
-
-    def test_delete_namespace(self):
-        with mock.patch('neutron.agent.common.utils.execute'):
-            self.netns_cmd.delete('ns')
-            self._assert_sudo([], ('delete', 'ns'), use_root_namespace=True)
-
-    def test_execute(self):
-        self.parent.namespace = 'ns'
-        with mock.patch('neutron.agent.common.utils.execute') as execute:
-            self.netns_cmd.execute(['ip', 'link', 'list'])
-            execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip',
-                                             'link', 'list'],
-                                            run_as_root=True,
-                                            check_exit_code=True,
-                                            extra_ok_codes=None,
-                                            log_fail_as_error=True)
-
-    def test_execute_env_var_prepend(self):
-        self.parent.namespace = 'ns'
-        with mock.patch('neutron.agent.common.utils.execute') as execute:
-            env = dict(FOO=1, BAR=2)
-            self.netns_cmd.execute(['ip', 'link', 'list'], env)
-            execute.assert_called_once_with(
-                ['ip', 'netns', 'exec', 'ns', 'env'] +
-                ['%s=%s' % (k, v) for k, v in env.items()] +
-                ['ip', 'link', 'list'],
-                run_as_root=True, check_exit_code=True, extra_ok_codes=None,
-                log_fail_as_error=True)
-
-    def test_execute_nosudo_with_no_namespace(self):
-        with mock.patch('neutron.agent.common.utils.execute') as execute:
-            self.parent.namespace = None
-            self.netns_cmd.execute(['test'])
-            execute.assert_called_once_with(['test'],
-                                            check_exit_code=True,
-                                            extra_ok_codes=None,
-                                            run_as_root=False,
-                                            log_fail_as_error=True)
-
-
-class TestDeviceExists(base.BaseTestCase):
-    def test_device_exists(self):
-        with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
-            _execute.return_value = LINK_SAMPLE[1]
-            self.assertTrue(ip_lib.device_exists('eth0'))
-            _execute.assert_called_once_with(['o'], 'link', ('show', 'eth0'),
-                                             log_fail_as_error=False)
-
-    def test_device_exists_reset_fail(self):
-        device = ip_lib.IPDevice('eth0')
-        device.set_log_fail_as_error(True)
-        with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
-            _execute.return_value = LINK_SAMPLE[1]
-            self.assertTrue(device.exists())
-            self.assertTrue(device.get_log_fail_as_error())
-
-    def test_device_does_not_exist(self):
-        with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
-            _execute.return_value = ''
-            _execute.side_effect = RuntimeError
-            self.assertFalse(ip_lib.device_exists('eth0'))
-
-    def test_ensure_device_is_ready(self):
-        ip_lib_mock = mock.Mock()
-        with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock):
-            self.assertTrue(ip_lib.ensure_device_is_ready("eth0"))
-            self.assertTrue(ip_lib_mock.link.set_up.called)
-            ip_lib_mock.reset_mock()
-            # device doesn't exists
-            ip_lib_mock.link.set_up.side_effect = RuntimeError
-            self.assertFalse(ip_lib.ensure_device_is_ready("eth0"))
-
-
-class TestIpNeighCommand(TestIPCmdBase):
-    def setUp(self):
-        super(TestIpNeighCommand, self).setUp()
-        self.parent.name = 'tap0'
-        self.command = 'neigh'
-        self.neigh_cmd = ip_lib.IpNeighCommand(self.parent)
-
-    def test_add_entry(self):
-        self.neigh_cmd.add('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
-        self._assert_sudo([4],
-                          ('replace', '192.168.45.100',
-                           'lladdr', 'cc:dd:ee:ff:ab:cd',
-                           'nud', 'permanent',
-                           'dev', 'tap0'))
-
-    def test_delete_entry(self):
-        self.neigh_cmd.delete('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
-        self._assert_sudo([4],
-                          ('del', '192.168.45.100',
-                           'lladdr', 'cc:dd:ee:ff:ab:cd',
-                           'dev', 'tap0'))
-
-    def test_flush(self):
-        self.neigh_cmd.flush(4, '192.168.0.1')
-        self._assert_sudo([4], ('flush', 'to', '192.168.0.1'))
-
-
-class TestArpPing(TestIPCmdBase):
-    @mock.patch.object(ip_lib, 'IPWrapper')
-    @mock.patch('eventlet.spawn_n')
-    def test_send_ipv4_addr_adv_notif(self, spawn_n, mIPWrapper):
-        spawn_n.side_effect = lambda f: f()
-        ARPING_COUNT = 3
-        address = '20.0.0.1'
-        config = mock.Mock()
-        config.send_arp_for_ha = ARPING_COUNT
-        ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name,
-                                      mock.sentinel.iface_name,
-                                      address,
-                                      config)
-
-        self.assertTrue(spawn_n.called)
-        mIPWrapper.assert_called_once_with(namespace=mock.sentinel.ns_name)
-
-        ip_wrapper = mIPWrapper(namespace=mock.sentinel.ns_name)
-
-        # Just test that arping is called with the right arguments
-        arping_cmd = ['arping', '-A',
-                      '-I', mock.sentinel.iface_name,
-                      '-c', ARPING_COUNT,
-                      '-w', mock.ANY,
-                      address]
-        ip_wrapper.netns.execute.assert_any_call(arping_cmd,
-                                                 check_exit_code=True)
-
-    @mock.patch('eventlet.spawn_n')
-    def test_no_ipv6_addr_notif(self, spawn_n):
-        ipv6_addr = 'fd00::1'
-        config = mock.Mock()
-        config.send_arp_for_ha = 3
-        ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name,
-                                      mock.sentinel.iface_name,
-                                      ipv6_addr,
-                                      config)
-        self.assertFalse(spawn_n.called)
-
-
-class TestAddNamespaceToCmd(base.BaseTestCase):
-    def test_add_namespace_to_cmd_with_namespace(self):
-        cmd = ['ping', '8.8.8.8']
-        self.assertEqual(['ip', 'netns', 'exec', 'tmp'] + cmd,
-                         ip_lib.add_namespace_to_cmd(cmd, 'tmp'))
-
-    def test_add_namespace_to_cmd_without_namespace(self):
-        cmd = ['ping', '8.8.8.8']
-        self.assertEqual(cmd, ip_lib.add_namespace_to_cmd(cmd, None))
diff --git a/neutron/tests/unit/agent/linux/test_ip_link_support.py b/neutron/tests/unit/agent/linux/test_ip_link_support.py
deleted file mode 100644 (file)
index cf29530..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import mock
-
-from neutron.agent.linux import ip_link_support as ip_link
-from neutron.tests import base
-
-
-class TestIpLinkSupport(base.BaseTestCase):
-    IP_LINK_HELP = """Usage: ip link add [link DEV] [ name ] NAME
-                   [ txqueuelen PACKETS ]
-                   [ address LLADDR ]
-                   [ broadcast LLADDR ]
-                   [ mtu MTU ] [index IDX ]
-                   [ numtxqueues QUEUE_COUNT ]
-                   [ numrxqueues QUEUE_COUNT ]
-                   type TYPE [ ARGS ]
-       ip link delete DEV type TYPE [ ARGS ]
-
-       ip link set { dev DEVICE | group DEVGROUP } [ { up | down } ]
-                          [ arp { on | off } ]
-                          [ dynamic { on | off } ]
-                          [ multicast { on | off } ]
-                          [ allmulticast { on | off } ]
-                          [ promisc { on | off } ]
-                          [ trailers { on | off } ]
-                          [ txqueuelen PACKETS ]
-                          [ name NEWNAME ]
-                          [ address LLADDR ]
-                          [ broadcast LLADDR ]
-                          [ mtu MTU ]
-                          [ netns PID ]
-                          [ netns NAME ]
-                          [ alias NAME ]
-                          [ vf NUM [ mac LLADDR ]
-                                   [ vlan VLANID [ qos VLAN-QOS ] ]
-                                   [ rate TXRATE ] ]
-                                   [ spoofchk { on | off} ] ]
-                                   [ state { auto | enable | disable} ] ]
-                          [ master DEVICE ]
-                          [ nomaster ]
-       ip link show [ DEVICE | group GROUP ] [up]
-
-TYPE := { vlan | veth | vcan | dummy | ifb | macvlan | macvtap |
-          can | bridge | bond | ipoib | ip6tnl | ipip | sit |
-          vxlan | gre | gretap | ip6gre | ip6gretap | vti }
-    """
-
-    IP_LINK_HELP_NO_STATE = """Usage: ip link add link DEV [ name ] NAME
-                   [ txqueuelen PACKETS ]
-                   [ address LLADDR ]
-                   [ broadcast LLADDR ]
-                   [ mtu MTU ]
-                   type TYPE [ ARGS ]
-       ip link delete DEV type TYPE [ ARGS ]
-
-       ip link set DEVICE [ { up | down } ]
-                          [ arp { on | off } ]
-                          [ dynamic { on | off } ]
-                          [ multicast { on | off } ]
-                          [ allmulticast { on | off } ]
-                          [ promisc { on | off } ]
-                          [ trailers { on | off } ]
-                          [ txqueuelen PACKETS ]
-                          [ name NEWNAME ]
-                          [ address LLADDR ]
-                          [ broadcast LLADDR ]
-                          [ mtu MTU ]
-                          [ netns PID ]
-                          [ alias NAME ]
-                          [ vf NUM [ mac LLADDR ]
-                                   [ vlan VLANID [ qos VLAN-QOS ] ]
-                                   [ rate TXRATE ] ]
-       ip link show [ DEVICE ]
-
-TYPE := { vlan | veth | vcan | dummy | ifb | macvlan | can }
-    """
-
-    IP_LINK_HELP_NO_SPOOFCHK = IP_LINK_HELP_NO_STATE
-
-    IP_LINK_HELP_NO_VF = """Usage: ip link set DEVICE { up | down |
-                             arp { on | off } |
-                             dynamic { on | off } |
-                             multicast { on | off } |
-                             allmulticast { on | off } |
-                             promisc { on | off } |
-                             trailers { on | off } |
-                             txqueuelen PACKETS |
-                             name NEWNAME |
-                             address LLADDR | broadcast LLADDR |
-                             mtu MTU }
-       ip link show [ DEVICE ]
-
-    """
-
-    def _test_capability(self, capability, subcapability=None,
-                         expected=True, stdout="", stderr=""):
-        with mock.patch("neutron.agent.linux.utils.execute") as mock_exec:
-            mock_exec.return_value = (stdout, stderr)
-            vf_section = ip_link.IpLinkSupport.get_vf_mgmt_section()
-            capable = ip_link.IpLinkSupport.vf_mgmt_capability_supported(
-                vf_section, capability, subcapability)
-            self.assertEqual(expected, capable)
-            mock_exec.assert_called_once_with(['ip', 'link', 'help'],
-                                              check_exit_code=False,
-                                              return_stderr=True,
-                                              log_fail_as_error=False)
-
-    def test_vf_mgmt(self):
-        self._test_capability(
-            ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
-            stderr=self.IP_LINK_HELP)
-
-    def test_execute_with_stdout(self):
-        self._test_capability(
-            ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
-            stdout=self.IP_LINK_HELP)
-
-    def test_vf_mgmt_no_state(self):
-        self._test_capability(
-            ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
-            expected=False,
-            stderr=self.IP_LINK_HELP_NO_STATE)
-
-    def test_vf_mgmt_no_spoofchk(self):
-        self._test_capability(
-            ip_link.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK,
-            expected=False,
-            stderr=self.IP_LINK_HELP_NO_SPOOFCHK)
-
-    def test_vf_mgmt_no_vf(self):
-        self._test_capability(
-            ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
-            expected=False,
-            stderr=self.IP_LINK_HELP_NO_VF)
-
-    def test_vf_mgmt_unknown_capability(self):
-        self._test_capability(
-            "state1",
-            expected=False,
-            stderr=self.IP_LINK_HELP)
-
-    def test_vf_mgmt_sub_capability(self):
-        self._test_capability(
-            ip_link.IpLinkConstants.IP_LINK_CAPABILITY_VLAN,
-            ip_link.IpLinkConstants.IP_LINK_SUB_CAPABILITY_QOS,
-            stderr=self.IP_LINK_HELP)
-
-    def test_vf_mgmt_sub_capability_mismatch(self):
-        self._test_capability(
-            ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
-            ip_link.IpLinkConstants.IP_LINK_SUB_CAPABILITY_QOS,
-            expected=False,
-            stderr=self.IP_LINK_HELP)
-
-    def test_vf_mgmt_sub_capability_invalid(self):
-        self._test_capability(
-            ip_link.IpLinkConstants.IP_LINK_CAPABILITY_VLAN,
-            "qos1",
-            expected=False,
-            stderr=self.IP_LINK_HELP)
-
-    def test_vf_mgmt_error(self):
-        with mock.patch("neutron.agent.linux.utils.execute") as mock_exec:
-            mock_exec.side_effect = Exception()
-            self.assertRaises(
-                ip_link.UnsupportedIpLinkCommand,
-                ip_link.IpLinkSupport.get_vf_mgmt_section)
diff --git a/neutron/tests/unit/agent/linux/test_ip_monitor.py b/neutron/tests/unit/agent/linux/test_ip_monitor.py
deleted file mode 100644 (file)
index ea71237..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.linux import ip_monitor
-from neutron.tests import base
-
-
-class TestIPMonitorEvent(base.BaseTestCase):
-    def test_from_text_parses_added_line(self):
-        event = ip_monitor.IPMonitorEvent.from_text(
-            '3: wlp3s0    inet 192.168.3.59/24 brd 192.168.3.255 '
-            'scope global dynamic wlp3s0\       valid_lft 300sec '
-            'preferred_lft 300sec')
-        self.assertEqual('wlp3s0', event.interface)
-        self.assertTrue(event.added)
-        self.assertEqual('192.168.3.59/24', event.cidr)
-
-    def test_from_text_parses_deleted_line(self):
-        event = ip_monitor.IPMonitorEvent.from_text(
-            'Deleted 1: lo    inet 127.0.0.2/8 scope host secondary lo\''
-            '       valid_lft forever preferred_lft forever')
-        self.assertEqual('lo', event.interface)
-        self.assertFalse(event.added)
-        self.assertEqual('127.0.0.2/8', event.cidr)
diff --git a/neutron/tests/unit/agent/linux/test_ipset_manager.py b/neutron/tests/unit/agent/linux/test_ipset_manager.py
deleted file mode 100644 (file)
index bb79846..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-import mock
-
-from neutron.agent.linux import ipset_manager
-from neutron.tests import base
-
-TEST_SET_ID = 'fake_sgid'
-ETHERTYPE = 'IPv4'
-TEST_SET_NAME = ipset_manager.IpsetManager.get_name(TEST_SET_ID, ETHERTYPE)
-TEST_SET_NAME_NEW = TEST_SET_NAME + ipset_manager.SWAP_SUFFIX
-FAKE_IPS = ['10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4',
-            '10.0.0.5', '10.0.0.6']
-
-
-class BaseIpsetManagerTest(base.BaseTestCase):
-    def setUp(self):
-        super(BaseIpsetManagerTest, self).setUp()
-        self.ipset = ipset_manager.IpsetManager()
-        self.execute = mock.patch.object(self.ipset, "execute").start()
-        self.expected_calls = []
-        self.expect_create()
-        self.force_sorted_get_set_ips()
-
-    def force_sorted_get_set_ips(self):
-        """Force sorted responses by self.ipset._get_new/deleted_set_ips.
-
-        _get_new/deleted_set_ips use internally sets and return randomly
-        ordered responses. This method ensures sorted responses from them
-        in order to guarantee call order in self.ipset.set_members.
-        """
-        original_get_new_set_ips = self.ipset._get_new_set_ips
-        original_get_deleted_set_ips = self.ipset._get_deleted_set_ips
-
-        def sorted_get_new_set_ips(set_name, expected_ips):
-            unsorted = original_get_new_set_ips(set_name, expected_ips)
-            return sorted(unsorted)
-
-        def sorted_get_deleted_set_ips(set_name, expected_ips):
-            unsorted = original_get_deleted_set_ips(set_name, expected_ips)
-            return sorted(unsorted)
-
-        mock.patch.object(self.ipset, '_get_new_set_ips',
-                          side_effect=sorted_get_new_set_ips).start()
-        mock.patch.object(self.ipset, '_get_deleted_set_ips',
-                          side_effect=sorted_get_deleted_set_ips).start()
-
-    def verify_mock_calls(self):
-        self.execute.assert_has_calls(self.expected_calls, any_order=False)
-
-    def expect_set(self, addresses):
-        temp_input = ['create %s hash:net family inet' % TEST_SET_NAME_NEW]
-        temp_input.extend('add %s %s' % (TEST_SET_NAME_NEW, ip)
-                          for ip in self.ipset._sanitize_addresses(addresses))
-        input = '\n'.join(temp_input)
-        self.expected_calls.extend([
-            mock.call(['ipset', 'restore', '-exist'],
-                      process_input=input,
-                      run_as_root=True,
-                      check_exit_code=True),
-            mock.call(['ipset', 'swap', TEST_SET_NAME_NEW, TEST_SET_NAME],
-                      process_input=None,
-                      run_as_root=True,
-                      check_exit_code=True),
-            mock.call(['ipset', 'destroy', TEST_SET_NAME_NEW],
-                      process_input=None,
-                      run_as_root=True,
-                      check_exit_code=False)])
-
-    def expect_add(self, addresses):
-        self.expected_calls.extend(
-            mock.call(['ipset', 'add', '-exist', TEST_SET_NAME, ip],
-                      process_input=None,
-                      run_as_root=True,
-                      check_exit_code=True)
-            for ip in self.ipset._sanitize_addresses(addresses))
-
-    def expect_del(self, addresses):
-
-        self.expected_calls.extend(
-            mock.call(['ipset', 'del', TEST_SET_NAME, ip],
-                      process_input=None,
-                      run_as_root=True,
-                      check_exit_code=False)
-            for ip in self.ipset._sanitize_addresses(addresses))
-
-    def expect_create(self):
-        self.expected_calls.append(
-            mock.call(['ipset', 'create', '-exist', TEST_SET_NAME,
-                       'hash:net', 'family', 'inet'],
-                      process_input=None,
-                      run_as_root=True,
-                      check_exit_code=True))
-
-    def expect_destroy(self):
-        self.expected_calls.append(
-            mock.call(['ipset', 'destroy', TEST_SET_NAME],
-                      process_input=None,
-                      run_as_root=True,
-                      check_exit_code=False))
-
-    def add_first_ip(self):
-        self.expect_set([FAKE_IPS[0]])
-        self.ipset.set_members(TEST_SET_ID, ETHERTYPE, [FAKE_IPS[0]])
-
-    def add_all_ips(self):
-        self.expect_set(FAKE_IPS)
-        self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS)
-
-
-class IpsetManagerTestCase(BaseIpsetManagerTest):
-
-    def test_set_name_exists(self):
-        self.add_first_ip()
-        self.assertTrue(self.ipset.set_name_exists('N' + ETHERTYPE +
-                                                   TEST_SET_ID))
-
-    def test_set_members_with_first_add_member(self):
-        self.add_first_ip()
-        self.verify_mock_calls()
-
-    def test_set_members_adding_less_than_5(self):
-        self.add_first_ip()
-        self.expect_add(FAKE_IPS[1:5])
-        self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:5])
-        self.verify_mock_calls()
-
-    def test_set_members_deleting_less_than_5(self):
-        self.add_all_ips()
-        self.expect_del(FAKE_IPS[3:])
-        self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:3])
-        self.verify_mock_calls()
-
-    def test_set_members_adding_more_than_5(self):
-        self.add_first_ip()
-        self.expect_set(FAKE_IPS)
-        self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS)
-        self.verify_mock_calls()
-
-    def test_set_members_adding_all_zero_ipv4(self):
-        self.expect_set(['0.0.0.0/0'])
-        self.ipset.set_members(TEST_SET_ID, ETHERTYPE, ['0.0.0.0/0'])
-        self.verify_mock_calls()
-
-    def test_set_members_adding_all_zero_ipv6(self):
-        self.expect_set(['::/0'])
-        self.ipset.set_members(TEST_SET_ID, ETHERTYPE, ['::/0'])
-        self.verify_mock_calls()
-
-    def test_destroy(self):
-        self.add_first_ip()
-        self.expect_destroy()
-        self.ipset.destroy(TEST_SET_ID, ETHERTYPE)
-        self.verify_mock_calls()
diff --git a/neutron/tests/unit/agent/linux/test_iptables_firewall.py b/neutron/tests/unit/agent/linux/test_iptables_firewall.py
deleted file mode 100644 (file)
index cdc9b61..0000000
+++ /dev/null
@@ -1,1901 +0,0 @@
-# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-import mock
-from oslo_config import cfg
-import six
-import testtools
-
-from neutron.agent.common import config as a_cfg
-from neutron.agent.linux import ipset_manager
-from neutron.agent.linux import iptables_comments as ic
-from neutron.agent.linux import iptables_firewall
-from neutron.agent import securitygroups_rpc as sg_cfg
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import utils
-from neutron.tests import base
-from neutron.tests.unit.api.v2 import test_base
-
-
-_uuid = test_base._uuid
-#TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants
-FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
-               'IPv6': 'fe80::/48'}
-FAKE_IP = {'IPv4': '10.0.0.1',
-           'IPv6': 'fe80::1'}
-#TODO(mangelajo): replace all '*_sgid' strings for the constants
-FAKE_SGID = 'fake_sgid'
-OTHER_SGID = 'other_sgid'
-_IPv6 = constants.IPv6
-_IPv4 = constants.IPv4
-
-RAW_TABLE_OUTPUT = """
-# Generated by iptables-save v1.4.21 on Fri Jul 31 16:13:28 2015
-*raw
-:PREROUTING ACCEPT [11561:3470468]
-:OUTPUT ACCEPT [11504:4064044]
-:neutron-openvswi-OUTPUT - [0:0]
-:neutron-openvswi-PREROUTING - [0:0]
--A PREROUTING -j neutron-openvswi-PREROUTING
- -A OUTPUT -j neutron-openvswi-OUTPUT
--A neutron-openvswi-PREROUTING -m physdev --physdev-in qvbe804433b-61 -j CT --zone 1
--A neutron-openvswi-PREROUTING -m physdev --physdev-in tape804433b-61 -j CT --zone 1
--A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb95c24827-02 -j CT --zone 2
--A neutron-openvswi-PREROUTING -m physdev --physdev-in tap95c24827-02 -j CT --zone 2
--A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb61634509-31 -j CT --zone 2
--A neutron-openvswi-PREROUTING -m physdev --physdev-in tap61634509-31 -j CT --zone 2
--A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb8f46cf18-12 -j CT --zone 9
--A neutron-openvswi-PREROUTING -m physdev --physdev-in tap8f46cf18-12 -j CT --zone 9
-COMMIT
-# Completed on Fri Jul 31 16:13:28 2015
-"""  # noqa
-
-
-class BaseIptablesFirewallTestCase(base.BaseTestCase):
-    def setUp(self):
-        super(BaseIptablesFirewallTestCase, self).setUp()
-        cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
-        cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP')
-        cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
-        self.utils_exec_p = mock.patch(
-            'neutron.agent.linux.utils.execute')
-        self.utils_exec = self.utils_exec_p.start()
-        self.iptables_cls_p = mock.patch(
-            'neutron.agent.linux.iptables_manager.IptablesManager')
-        iptables_cls = self.iptables_cls_p.start()
-        self.iptables_inst = mock.Mock()
-        self.v4filter_inst = mock.Mock()
-        self.v6filter_inst = mock.Mock()
-        self.iptables_inst.ipv4 = {'filter': self.v4filter_inst,
-                                   'raw': self.v4filter_inst
-                                   }
-        self.iptables_inst.ipv6 = {'filter': self.v6filter_inst,
-                                   'raw': self.v6filter_inst
-                                   }
-        iptables_cls.return_value = self.iptables_inst
-
-        self.iptables_inst.get_rules_for_table.return_value = (
-            RAW_TABLE_OUTPUT.splitlines())
-        self.firewall = iptables_firewall.IptablesFirewallDriver()
-        self.firewall.iptables = self.iptables_inst
-
-
-class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
-
-    def _fake_port(self):
-        return {'device': 'tapfake_dev',
-                'mac_address': 'ff:ff:ff:ff:ff:ff',
-                'network_id': 'fake_net',
-                'fixed_ips': [FAKE_IP['IPv4'],
-                              FAKE_IP['IPv6']]}
-
-    def test_prepare_port_filter_with_no_sg(self):
-        port = self._fake_port()
-        self.firewall.prepare_port_filter(port)
-        calls = [mock.call.add_chain('sg-fallback'),
-                 mock.call.add_rule(
-                     'sg-fallback', '-j DROP',
-                     comment=ic.UNMATCH_DROP),
-                 mock.call.remove_chain('sg-chain'),
-                 mock.call.add_chain('sg-chain'),
-                 mock.call.add_chain('ifake_dev'),
-                 mock.call.add_rule('FORWARD',
-                                    '-m physdev --physdev-out tapfake_dev '
-                                    '--physdev-is-bridged '
-                                    '-j $sg-chain', comment=ic.VM_INT_SG),
-                 mock.call.add_rule('sg-chain',
-                                    '-m physdev --physdev-out tapfake_dev '
-                                    '--physdev-is-bridged '
-                                    '-j $ifake_dev',
-                                    comment=ic.SG_TO_VM_SG),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-m state --state RELATED,ESTABLISHED -j RETURN',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-m state --state INVALID -j DROP',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-j $sg-fallback', comment=None),
-                 mock.call.add_chain('ofake_dev'),
-                 mock.call.add_rule('FORWARD',
-                                    '-m physdev --physdev-in tapfake_dev '
-                                    '--physdev-is-bridged '
-                                    '-j $sg-chain', comment=ic.VM_INT_SG),
-                 mock.call.add_rule('sg-chain',
-                                    '-m physdev --physdev-in tapfake_dev '
-                                    '--physdev-is-bridged -j $ofake_dev',
-                                    comment=ic.SG_TO_VM_SG),
-                 mock.call.add_rule('INPUT',
-                                    '-m physdev --physdev-in tapfake_dev '
-                                    '--physdev-is-bridged -j $ofake_dev',
-                                    comment=ic.INPUT_TO_SG),
-                 mock.call.add_chain('sfake_dev'),
-                 mock.call.add_rule(
-                     'sfake_dev',
-                     '-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
-                     '-j RETURN',
-                     comment=ic.PAIR_ALLOW),
-                 mock.call.add_rule(
-                     'sfake_dev', '-j DROP',
-                     comment=ic.PAIR_DROP),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-p udp -m udp --sport 68 -m udp --dport 67 -j RETURN',
-                     comment=None),
-                 mock.call.add_rule('ofake_dev', '-j $sfake_dev',
-                                    comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-m state --state RELATED,ESTABLISHED -j RETURN',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-m state --state INVALID -j DROP', comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-j $sg-fallback',
-                     comment=None),
-                 mock.call.add_rule('sg-chain', '-j ACCEPT')]
-
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_filter_ipv4_ingress(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress'}
-        ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
-                                     comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_prefix(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'source_ip_prefix': prefix}
-        ingress = mock.call.add_rule(
-            'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_tcp(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'protocol': 'tcp'}
-        ingress = mock.call.add_rule(
-            'ifake_dev', '-p tcp -j RETURN', comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_tcp_prefix(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'protocol': 'tcp',
-                'source_ip_prefix': prefix}
-        ingress = mock.call.add_rule('ifake_dev',
-                                     '-s %s -p tcp -j RETURN' % prefix,
-                                     comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_icmp(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'protocol': 'icmp'}
-        ingress = mock.call.add_rule('ifake_dev', '-p icmp -j RETURN',
-                                     comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_icmp_prefix(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'protocol': 'icmp',
-                'source_ip_prefix': prefix}
-        ingress = mock.call.add_rule(
-            'ifake_dev', '-s %s -p icmp -j RETURN' % prefix,
-            comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_tcp_port(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'protocol': 'tcp',
-                'port_range_min': 10,
-                'port_range_max': 10}
-        ingress = mock.call.add_rule('ifake_dev',
-                                     '-p tcp -m tcp --dport 10 -j RETURN',
-                                     comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_tcp_mport(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'protocol': 'tcp',
-                'port_range_min': 10,
-                'port_range_max': 100}
-        ingress = mock.call.add_rule(
-            'ifake_dev',
-            '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
-            comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_tcp_mport_prefix(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'protocol': 'tcp',
-                'port_range_min': 10,
-                'port_range_max': 100,
-                'source_ip_prefix': prefix}
-        ingress = mock.call.add_rule(
-            'ifake_dev',
-            '-s %s -p tcp -m tcp -m multiport --dports 10:100 '
-            '-j RETURN' % prefix, comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_udp(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'protocol': 'udp'}
-        ingress = mock.call.add_rule(
-            'ifake_dev', '-p udp -j RETURN', comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_udp_prefix(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'protocol': 'udp',
-                'source_ip_prefix': prefix}
-        ingress = mock.call.add_rule('ifake_dev',
-                                     '-s %s -p udp -j RETURN' % prefix,
-                                     comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_udp_port(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'protocol': 'udp',
-                'port_range_min': 10,
-                'port_range_max': 10}
-        ingress = mock.call.add_rule('ifake_dev',
-                                     '-p udp -m udp --dport 10 -j RETURN',
-                                     comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_udp_mport(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'protocol': 'udp',
-                'port_range_min': 10,
-                'port_range_max': 100}
-        ingress = mock.call.add_rule(
-            'ifake_dev',
-            '-p udp -m udp -m multiport --dports 10:100 -j RETURN',
-            comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_ingress_udp_mport_prefix(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'ingress',
-                'protocol': 'udp',
-                'port_range_min': 10,
-                'port_range_max': 100,
-                'source_ip_prefix': prefix}
-        ingress = mock.call.add_rule(
-            'ifake_dev',
-            '-s %s -p udp -m udp -m multiport --dports 10:100 '
-            '-j RETURN' % prefix, comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress'}
-        egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_prefix(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_tcp(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'tcp'}
-        egress = mock.call.add_rule(
-            'ofake_dev', '-p tcp -j RETURN', comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_tcp_prefix(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'tcp',
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule('ofake_dev',
-                                    '-s %s -p tcp -j RETURN' % prefix,
-                                    comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_icmp(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'icmp'}
-        egress = mock.call.add_rule('ofake_dev', '-p icmp -j RETURN',
-                                    comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_icmp_prefix(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'icmp',
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev', '-s %s -p icmp -j RETURN' % prefix,
-            comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_icmp_type(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'icmp',
-                'source_port_range_min': 8,
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-s %s -p icmp -m icmp --icmp-type 8 -j RETURN' % prefix,
-            comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_icmp_type_name(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'icmp',
-                'source_port_range_min': 'echo-request',
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-s %s -p icmp -m icmp --icmp-type echo-request '
-            '-j RETURN' % prefix,
-            comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_icmp_type_code(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'icmp',
-                'source_port_range_min': 8,
-                'source_port_range_max': 0,
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-s %s -p icmp -m icmp --icmp-type 8/0 -j RETURN' % prefix,
-            comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_tcp_port(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'tcp',
-                'port_range_min': 10,
-                'port_range_max': 10}
-        egress = mock.call.add_rule('ofake_dev',
-                                    '-p tcp -m tcp --dport 10 -j RETURN',
-                                    comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_tcp_mport(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'tcp',
-                'port_range_min': 10,
-                'port_range_max': 100}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
-            comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_tcp_mport_prefix(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'tcp',
-                'port_range_min': 10,
-                'port_range_max': 100,
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-s %s -p tcp -m tcp -m multiport --dports 10:100 '
-            '-j RETURN' % prefix, comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_udp(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'udp'}
-        egress = mock.call.add_rule(
-            'ofake_dev', '-p udp -j RETURN', comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_udp_prefix(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'udp',
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule('ofake_dev',
-                                    '-s %s -p udp -j RETURN' % prefix,
-                                    comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_udp_port(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'udp',
-                'port_range_min': 10,
-                'port_range_max': 10}
-        egress = mock.call.add_rule('ofake_dev',
-                                    '-p udp -m udp --dport 10 -j RETURN',
-                                    comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_udp_mport(self):
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'udp',
-                'port_range_min': 10,
-                'port_range_max': 100}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-p udp -m udp -m multiport --dports 10:100 -j RETURN',
-            comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv4_egress_udp_mport_prefix(self):
-        prefix = FAKE_PREFIX['IPv4']
-        rule = {'ethertype': 'IPv4',
-                'direction': 'egress',
-                'protocol': 'udp',
-                'port_range_min': 10,
-                'port_range_max': 100,
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-s %s -p udp -m udp -m multiport --dports 10:100 '
-            '-j RETURN' % prefix, comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress'}
-        ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
-                                     comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress_prefix(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'source_ip_prefix': prefix}
-        ingress = mock.call.add_rule(
-            'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress_tcp(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'protocol': 'tcp'}
-        ingress = mock.call.add_rule(
-            'ifake_dev', '-p tcp -j RETURN', comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress_tcp_prefix(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'protocol': 'tcp',
-                'source_ip_prefix': prefix}
-        ingress = mock.call.add_rule('ifake_dev',
-                                     '-s %s -p tcp -j RETURN' % prefix,
-                                     comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress_tcp_port(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'protocol': 'tcp',
-                'port_range_min': 10,
-                'port_range_max': 10}
-        ingress = mock.call.add_rule('ifake_dev',
-                                     '-p tcp -m tcp --dport 10 -j RETURN',
-                                     comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress_icmp(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'protocol': 'icmp'}
-        ingress = mock.call.add_rule(
-            'ifake_dev', '-p ipv6-icmp -j RETURN', comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress_icmp_prefix(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'protocol': 'icmp',
-                'source_ip_prefix': prefix}
-        ingress = mock.call.add_rule(
-            'ifake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix,
-            comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress_tcp_mport(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'protocol': 'tcp',
-                'port_range_min': 10,
-                'port_range_max': 100}
-        ingress = mock.call.add_rule(
-            'ifake_dev',
-            '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
-            comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def _test_filter_ingress_tcp_min_port_0(self, ethertype):
-        rule = {'ethertype': ethertype,
-                'direction': 'ingress',
-                'protocol': 'tcp',
-                'port_range_min': 0,
-                'port_range_max': 100}
-        ingress = mock.call.add_rule(
-            'ifake_dev',
-            '-p tcp -m tcp -m multiport --dports 0:100 -j RETURN',
-            comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ingress_tcp_min_port_0_for_ipv4(self):
-        self._test_filter_ingress_tcp_min_port_0('IPv4')
-
-    def test_filter_ingress_tcp_min_port_0_for_ipv6(self):
-        self._test_filter_ingress_tcp_min_port_0('IPv6')
-
-    def test_filter_ipv6_ingress_tcp_mport_prefix(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'protocol': 'tcp',
-                'port_range_min': 10,
-                'port_range_max': 100,
-                'source_ip_prefix': prefix}
-        ingress = mock.call.add_rule(
-            'ifake_dev',
-            '-s %s -p tcp -m tcp -m multiport --dports 10:100 '
-            '-j RETURN' % prefix, comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress_udp(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'protocol': 'udp'}
-        ingress = mock.call.add_rule(
-            'ifake_dev', '-p udp -j RETURN', comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress_udp_prefix(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'protocol': 'udp',
-                'source_ip_prefix': prefix}
-        ingress = mock.call.add_rule('ifake_dev',
-                                     '-s %s -p udp -j RETURN' % prefix,
-                                     comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress_udp_port(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'protocol': 'udp',
-                'port_range_min': 10,
-                'port_range_max': 10}
-        ingress = mock.call.add_rule('ifake_dev',
-                                     '-p udp -m udp --dport 10 -j RETURN',
-                                     comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress_udp_mport(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'protocol': 'udp',
-                'port_range_min': 10,
-                'port_range_max': 100}
-        ingress = mock.call.add_rule(
-            'ifake_dev',
-            '-p udp -m udp -m multiport --dports 10:100 -j RETURN',
-            comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_ingress_udp_mport_prefix(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'ingress',
-                'protocol': 'udp',
-                'port_range_min': 10,
-                'port_range_max': 100,
-                'source_ip_prefix': prefix}
-        ingress = mock.call.add_rule(
-            'ifake_dev',
-            '-s %s -p udp -m udp -m multiport --dports 10:100 '
-            '-j RETURN' % prefix, comment=None)
-        egress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress'}
-        egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_prefix(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_tcp(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'tcp'}
-        egress = mock.call.add_rule(
-            'ofake_dev', '-p tcp -j RETURN', comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_tcp_prefix(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'tcp',
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule('ofake_dev',
-                                    '-s %s -p tcp -j RETURN' % prefix,
-                                    comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_icmp(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'icmp'}
-        egress = mock.call.add_rule(
-            'ofake_dev', '-p ipv6-icmp -j RETURN', comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_icmp_prefix(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'icmp',
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix,
-            comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_icmp_type(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'icmp',
-                'source_port_range_min': 8,
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-s %s -p ipv6-icmp -m icmp6 --icmpv6-type 8 -j RETURN' % prefix,
-            comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_icmp_type_name(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'icmp',
-                'source_port_range_min': 'echo-request',
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-s %s -p ipv6-icmp -m icmp6 --icmpv6-type echo-request '
-            '-j RETURN' % prefix,
-            comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_icmp_type_code(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'icmp',
-                'source_port_range_min': 8,
-                'source_port_range_max': 0,
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-s %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix,
-            comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_tcp_port(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'tcp',
-                'port_range_min': 10,
-                'port_range_max': 10}
-        egress = mock.call.add_rule('ofake_dev',
-                                    '-p tcp -m tcp --dport 10 -j RETURN',
-                                    comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_tcp_mport(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'tcp',
-                'port_range_min': 10,
-                'port_range_max': 100}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
-            comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_tcp_mport_prefix(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'tcp',
-                'port_range_min': 10,
-                'port_range_max': 100,
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-s %s -p tcp -m tcp -m multiport --dports 10:100 '
-            '-j RETURN' % prefix, comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_udp(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'udp'}
-        egress = mock.call.add_rule(
-            'ofake_dev', '-p udp -j RETURN', comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_udp_prefix(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'udp',
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule('ofake_dev',
-                                    '-s %s -p udp -j RETURN' % prefix,
-                                    comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_udp_port(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'udp',
-                'port_range_min': 10,
-                'port_range_max': 10}
-        egress = mock.call.add_rule('ofake_dev',
-                                    '-p udp -m udp --dport 10 -j RETURN',
-                                    comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_udp_mport(self):
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'udp',
-                'port_range_min': 10,
-                'port_range_max': 100}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-p udp -m udp -m multiport --dports 10:100 -j RETURN',
-            comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def test_filter_ipv6_egress_udp_mport_prefix(self):
-        prefix = FAKE_PREFIX['IPv6']
-        rule = {'ethertype': 'IPv6',
-                'direction': 'egress',
-                'protocol': 'udp',
-                'port_range_min': 10,
-                'port_range_max': 100,
-                'source_ip_prefix': prefix}
-        egress = mock.call.add_rule(
-            'ofake_dev',
-            '-s %s -p udp -m udp -m multiport --dports 10:100 '
-            '-j RETURN' % prefix, comment=None)
-        ingress = None
-        self._test_prepare_port_filter(rule, ingress, egress)
-
-    def _test_prepare_port_filter(self,
-                                  rule,
-                                  ingress_expected_call=None,
-                                  egress_expected_call=None):
-        port = self._fake_port()
-        ethertype = rule['ethertype']
-        prefix = utils.ip_to_cidr(FAKE_IP[ethertype])
-        filter_inst = self.v4filter_inst
-        dhcp_rule = [mock.call.add_rule(
-            'ofake_dev',
-            '-p udp -m udp --sport 68 -m udp --dport 67 -j RETURN',
-            comment=None)]
-
-        if ethertype == 'IPv6':
-            filter_inst = self.v6filter_inst
-
-            dhcp_rule = [mock.call.add_rule('ofake_dev', '-p ipv6-icmp '
-                                            '-m icmp6 '
-                                            '--icmpv6-type %s -j DROP'
-                                            % constants.ICMPV6_TYPE_RA,
-                                            comment=None),
-                         mock.call.add_rule('ofake_dev',
-                                            '-p ipv6-icmp -j RETURN',
-                                            comment=None),
-                         mock.call.add_rule('ofake_dev', '-p udp -m udp '
-                                            '--sport 546 -m udp --dport 547 '
-                                            '-j RETURN', comment=None)]
-        sg = [rule]
-        port['security_group_rules'] = sg
-        self.firewall.prepare_port_filter(port)
-        calls = [mock.call.add_chain('sg-fallback'),
-                 mock.call.add_rule(
-                     'sg-fallback',
-                     '-j DROP',
-                     comment=ic.UNMATCH_DROP),
-                 mock.call.remove_chain('sg-chain'),
-                 mock.call.add_chain('sg-chain'),
-                 mock.call.add_chain('ifake_dev'),
-                 mock.call.add_rule('FORWARD',
-                                    '-m physdev --physdev-out tapfake_dev '
-                                    '--physdev-is-bridged '
-                                    '-j $sg-chain', comment=ic.VM_INT_SG),
-                 mock.call.add_rule('sg-chain',
-                                    '-m physdev --physdev-out tapfake_dev '
-                                    '--physdev-is-bridged '
-                                    '-j $ifake_dev',
-                                    comment=ic.SG_TO_VM_SG)
-                 ]
-        if ethertype == 'IPv6':
-            for icmp6_type in constants.ICMPV6_ALLOWED_TYPES:
-                calls.append(
-                    mock.call.add_rule('ifake_dev',
-                                       '-p ipv6-icmp -m icmp6 --icmpv6-type '
-                                       '%s -j RETURN' %
-                                       icmp6_type, comment=None))
-        calls += [
-            mock.call.add_rule(
-                'ifake_dev',
-                '-m state --state RELATED,ESTABLISHED -j RETURN',
-                comment=None
-            )
-        ]
-
-        if ingress_expected_call:
-            calls.append(ingress_expected_call)
-
-        calls += [mock.call.add_rule(
-                      'ifake_dev',
-                      '-m state --state INVALID -j DROP', comment=None),
-                  mock.call.add_rule('ifake_dev',
-                                     '-j $sg-fallback', comment=None),
-                  mock.call.add_chain('ofake_dev'),
-                  mock.call.add_rule('FORWARD',
-                                     '-m physdev --physdev-in tapfake_dev '
-                                     '--physdev-is-bridged '
-                                     '-j $sg-chain', comment=ic.VM_INT_SG),
-                  mock.call.add_rule('sg-chain',
-                                     '-m physdev --physdev-in tapfake_dev '
-                                     '--physdev-is-bridged -j $ofake_dev',
-                                     comment=ic.SG_TO_VM_SG),
-                  mock.call.add_rule('INPUT',
-                                     '-m physdev --physdev-in tapfake_dev '
-                                     '--physdev-is-bridged -j $ofake_dev',
-                                     comment=ic.INPUT_TO_SG),
-                  mock.call.add_chain('sfake_dev'),
-                  mock.call.add_rule(
-                      'sfake_dev',
-                      '-s %s -m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN'
-                      % prefix,
-                      comment=ic.PAIR_ALLOW),
-                  mock.call.add_rule(
-                      'sfake_dev', '-j DROP',
-                      comment=ic.PAIR_DROP)]
-        calls += dhcp_rule
-        calls.append(mock.call.add_rule('ofake_dev', '-j $sfake_dev',
-                                        comment=None))
-        if ethertype == 'IPv4':
-            calls.append(mock.call.add_rule(
-                'ofake_dev',
-                '-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
-                comment=None))
-        if ethertype == 'IPv6':
-            calls.append(mock.call.add_rule(
-                'ofake_dev',
-                '-p udp -m udp --sport 547 -m udp --dport 546 -j DROP',
-                comment=None))
-
-        calls += [
-            mock.call.add_rule(
-                'ofake_dev',
-                '-m state --state RELATED,ESTABLISHED -j RETURN',
-                comment=None),
-        ]
-
-        if egress_expected_call:
-            calls.append(egress_expected_call)
-
-        calls += [mock.call.add_rule(
-                      'ofake_dev',
-                      '-m state --state INVALID -j DROP', comment=None),
-                  mock.call.add_rule('ofake_dev',
-                                     '-j $sg-fallback', comment=None),
-                  mock.call.add_rule('sg-chain', '-j ACCEPT')]
-        comb = zip(calls, filter_inst.mock_calls)
-        for (l, r) in comb:
-            self.assertEqual(l, r)
-        filter_inst.assert_has_calls(calls)
-
-    def _test_remove_conntrack_entries(self, ethertype, protocol,
-                                       direction):
-        port = self._fake_port()
-        port['security_groups'] = 'fake_sg_id'
-        self.firewall.filtered_ports[port['device']] = port
-        self.firewall.updated_rule_sg_ids = set(['fake_sg_id'])
-        self.firewall.sg_rules['fake_sg_id'] = [
-            {'direction': direction, 'ethertype': ethertype,
-             'protocol': protocol}]
-
-        self.firewall.filter_defer_apply_on()
-        self.firewall.sg_rules['fake_sg_id'] = []
-        self.firewall.filter_defer_apply_off()
-        cmd = ['conntrack', '-D']
-        if protocol:
-            cmd.extend(['-p', protocol])
-        if ethertype == 'IPv4':
-            cmd.extend(['-f', 'ipv4'])
-            if direction == 'ingress':
-                cmd.extend(['-d', '10.0.0.1'])
-            else:
-                cmd.extend(['-s', '10.0.0.1'])
-        else:
-            cmd.extend(['-f', 'ipv6'])
-            if direction == 'ingress':
-                cmd.extend(['-d', 'fe80::1'])
-            else:
-                cmd.extend(['-s', 'fe80::1'])
-        # initial data has 1, 2, and 9 in use, CT zone will start at 10.
-        cmd.extend(['-w', 10])
-        calls = [
-            mock.call(cmd, run_as_root=True, check_exit_code=True,
-                      extra_ok_codes=[1])]
-        self.utils_exec.assert_has_calls(calls)
-
-    def test_remove_conntrack_entries_for_delete_rule_ipv4(self):
-        for direction in ['ingress', 'egress']:
-            for pro in [None, 'tcp', 'icmp', 'udp']:
-                self._test_remove_conntrack_entries(
-                    'IPv4', pro, direction)
-
-    def test_remove_conntrack_entries_for_delete_rule_ipv6(self):
-        for direction in ['ingress', 'egress']:
-            for pro in [None, 'tcp', 'icmp', 'udp']:
-                self._test_remove_conntrack_entries(
-                    'IPv6', pro, direction)
-
-    def test_remove_conntrack_entries_for_port_sec_group_change(self):
-        port = self._fake_port()
-        port['security_groups'] = ['fake_sg_id']
-        self.firewall.filtered_ports[port['device']] = port
-        self.firewall.updated_sg_members = set(['tapfake_dev'])
-        self.firewall.filter_defer_apply_on()
-        new_port = copy.deepcopy(port)
-        new_port['security_groups'] = ['fake_sg_id2']
-        self.firewall.filtered_ports[port['device']] = new_port
-        self.firewall.filter_defer_apply_off()
-        calls = [
-            # initial data has 1, 2, and 9 in use, CT zone will start at 10.
-            mock.call(['conntrack', '-D', '-f', 'ipv4', '-d', '10.0.0.1',
-                       '-w', 10],
-                      run_as_root=True, check_exit_code=True,
-                      extra_ok_codes=[1]),
-            mock.call(['conntrack', '-D', '-f', 'ipv6', '-d', 'fe80::1',
-                       '-w', 10],
-                      run_as_root=True, check_exit_code=True,
-                      extra_ok_codes=[1])]
-        self.utils_exec.assert_has_calls(calls)
-
-    def test_update_delete_port_filter(self):
-        port = self._fake_port()
-        port['security_group_rules'] = [{'ethertype': 'IPv4',
-                                         'direction': 'ingress'}]
-        self.firewall.prepare_port_filter(port)
-        port['security_group_rules'] = [{'ethertype': 'IPv4',
-                                         'direction': 'egress'}]
-        self.firewall.update_port_filter(port)
-        self.firewall.update_port_filter({'device': 'no-exist-device'})
-        self.firewall.remove_port_filter(port)
-        self.firewall.remove_port_filter({'device': 'no-exist-device'})
-        calls = [mock.call.add_chain('sg-fallback'),
-                 mock.call.add_rule(
-                     'sg-fallback',
-                     '-j DROP',
-                     comment=ic.UNMATCH_DROP),
-                 mock.call.remove_chain('sg-chain'),
-                 mock.call.add_chain('sg-chain'),
-                 mock.call.add_chain('ifake_dev'),
-                 mock.call.add_rule(
-                     'FORWARD',
-                     '-m physdev --physdev-out tapfake_dev '
-                     '--physdev-is-bridged -j $sg-chain',
-                     comment=ic.VM_INT_SG),
-                 mock.call.add_rule(
-                     'sg-chain',
-                     '-m physdev --physdev-out tapfake_dev '
-                     '--physdev-is-bridged -j $ifake_dev',
-                     comment=ic.SG_TO_VM_SG),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-m state --state RELATED,ESTABLISHED -j RETURN',
-                     comment=None),
-                 mock.call.add_rule('ifake_dev', '-j RETURN',
-                                    comment=None),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-m state --state INVALID -j DROP', comment=None),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-j $sg-fallback', comment=None),
-                 mock.call.add_chain('ofake_dev'),
-                 mock.call.add_rule(
-                     'FORWARD',
-                     '-m physdev --physdev-in tapfake_dev '
-                     '--physdev-is-bridged -j $sg-chain',
-                     comment=ic.VM_INT_SG),
-                 mock.call.add_rule(
-                     'sg-chain',
-                     '-m physdev --physdev-in tapfake_dev '
-                     '--physdev-is-bridged -j $ofake_dev',
-                     comment=ic.SG_TO_VM_SG),
-                 mock.call.add_rule(
-                     'INPUT',
-                     '-m physdev --physdev-in tapfake_dev '
-                     '--physdev-is-bridged -j $ofake_dev',
-                     comment=ic.INPUT_TO_SG),
-                 mock.call.add_chain('sfake_dev'),
-                 mock.call.add_rule(
-                     'sfake_dev',
-                     '-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
-                     '-j RETURN',
-                     comment=ic.PAIR_ALLOW),
-                 mock.call.add_rule(
-                     'sfake_dev', '-j DROP',
-                     comment=ic.PAIR_DROP),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-p udp -m udp --sport 68 -m udp --dport 67 -j RETURN',
-                     comment=None),
-                 mock.call.add_rule('ofake_dev', '-j $sfake_dev',
-                                    comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-m state --state RELATED,ESTABLISHED -j RETURN',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev', '-m state --state INVALID -j DROP',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-j $sg-fallback', comment=None),
-                 mock.call.add_rule('sg-chain', '-j ACCEPT'),
-                 mock.call.remove_chain('ifake_dev'),
-                 mock.call.remove_chain('ofake_dev'),
-                 mock.call.remove_chain('sfake_dev'),
-                 mock.call.remove_chain('sg-chain'),
-                 mock.call.add_chain('sg-chain'),
-                 mock.call.add_chain('ifake_dev'),
-                 mock.call.add_rule(
-                     'FORWARD',
-                     '-m physdev --physdev-out tapfake_dev '
-                     '--physdev-is-bridged -j $sg-chain',
-                     comment=ic.VM_INT_SG),
-                 mock.call.add_rule(
-                     'sg-chain',
-                     '-m physdev --physdev-out tapfake_dev '
-                     '--physdev-is-bridged -j $ifake_dev',
-                     comment=ic.SG_TO_VM_SG),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-m state --state RELATED,ESTABLISHED -j RETURN',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-m state --state INVALID -j DROP', comment=None),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-j $sg-fallback', comment=None),
-                 mock.call.add_chain('ofake_dev'),
-                 mock.call.add_rule(
-                     'FORWARD',
-                     '-m physdev --physdev-in tapfake_dev '
-                     '--physdev-is-bridged -j $sg-chain',
-                     comment=ic.VM_INT_SG),
-                 mock.call.add_rule(
-                     'sg-chain',
-                     '-m physdev --physdev-in tapfake_dev '
-                     '--physdev-is-bridged -j $ofake_dev',
-                     comment=ic.SG_TO_VM_SG),
-                 mock.call.add_rule(
-                     'INPUT',
-                     '-m physdev --physdev-in tapfake_dev '
-                     '--physdev-is-bridged -j $ofake_dev',
-                     comment=ic.INPUT_TO_SG),
-                 mock.call.add_chain('sfake_dev'),
-                 mock.call.add_rule(
-                     'sfake_dev',
-                     '-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
-                     '-j RETURN',
-                     comment=ic.PAIR_ALLOW),
-                 mock.call.add_rule(
-                     'sfake_dev', '-j DROP',
-                     comment=ic.PAIR_DROP),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-p udp -m udp --sport 68 -m udp --dport 67 -j RETURN',
-                     comment=None),
-                 mock.call.add_rule('ofake_dev', '-j $sfake_dev',
-                                    comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-m state --state RELATED,ESTABLISHED -j RETURN',
-                     comment=None),
-                 mock.call.add_rule('ofake_dev', '-j RETURN',
-                                    comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-m state --state INVALID -j DROP', comment=None),
-                 mock.call.add_rule('ofake_dev',
-                                    '-j $sg-fallback',
-                                    comment=None),
-                 mock.call.add_rule('sg-chain', '-j ACCEPT'),
-                 mock.call.remove_chain('ifake_dev'),
-                 mock.call.remove_chain('ofake_dev'),
-                 mock.call.remove_chain('sfake_dev'),
-                 mock.call.remove_chain('sg-chain'),
-                 mock.call.add_chain('sg-chain')]
-
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_remove_unknown_port(self):
-        port = self._fake_port()
-        self.firewall.remove_port_filter(port)
-        # checking no exception occurs
-        self.assertFalse(self.v4filter_inst.called)
-
-    def test_defer_apply(self):
-        with self.firewall.defer_apply():
-            pass
-        self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
-                                             mock.call.defer_apply_off()])
-
-    def test_filter_defer_with_exception(self):
-        try:
-            with self.firewall.defer_apply():
-                raise Exception("same exception")
-        except Exception:
-            pass
-        self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
-                                             mock.call.defer_apply_off()])
-
-    def _mock_chain_applies(self):
-        class CopyingMock(mock.MagicMock):
-            """Copies arguments so mutable arguments can be asserted on.
-
-            Copied verbatim from unittest.mock documentation.
-            """
-            def __call__(self, *args, **kwargs):
-                args = copy.deepcopy(args)
-                kwargs = copy.deepcopy(kwargs)
-                return super(CopyingMock, self).__call__(*args, **kwargs)
-        # Need to use CopyingMock because _{setup,remove}_chains_apply are
-        # usually called with that's modified between calls (i.e.,
-        # self.firewall.filtered_ports).
-        chain_applies = CopyingMock()
-        self.firewall._setup_chains_apply = chain_applies.setup
-        self.firewall._remove_chains_apply = chain_applies.remove
-        return chain_applies
-
-    def test_mock_chain_applies(self):
-        chain_applies = self._mock_chain_applies()
-        port_prepare = {'device': 'd1', 'mac_address': 'prepare'}
-        port_update = {'device': 'd1', 'mac_address': 'update'}
-        self.firewall.prepare_port_filter(port_prepare)
-        self.firewall.update_port_filter(port_update)
-        self.firewall.remove_port_filter(port_update)
-        chain_applies.assert_has_calls([mock.call.remove({}, {}),
-                                mock.call.setup({'d1': port_prepare}, {}),
-                                mock.call.remove({'d1': port_prepare}, {}),
-                                mock.call.setup({'d1': port_update}, {}),
-                                mock.call.remove({'d1': port_update}, {}),
-                                mock.call.setup({}, {})])
-
-    def test_defer_chain_apply_need_pre_defer_copy(self):
-        chain_applies = self._mock_chain_applies()
-        port = self._fake_port()
-        device2port = {port['device']: port}
-        self.firewall.prepare_port_filter(port)
-        with self.firewall.defer_apply():
-            self.firewall.remove_port_filter(port)
-        chain_applies.assert_has_calls([mock.call.remove({}, {}),
-                                        mock.call.setup(device2port, {}),
-                                        mock.call.remove(device2port, {}),
-                                        mock.call.setup({}, {})])
-
-    def test_defer_chain_apply_coalesce_simple(self):
-        chain_applies = self._mock_chain_applies()
-        port = self._fake_port()
-        with self.firewall.defer_apply():
-            self.firewall.prepare_port_filter(port)
-            self.firewall.update_port_filter(port)
-            self.firewall.remove_port_filter(port)
-        chain_applies.assert_has_calls([mock.call.remove({}, {}),
-                                        mock.call.setup({}, {})])
-
-    def test_defer_chain_apply_coalesce_multiple_ports(self):
-        chain_applies = self._mock_chain_applies()
-        port1 = {'device': 'd1', 'mac_address': 'mac1', 'network_id': 'net1'}
-        port2 = {'device': 'd2', 'mac_address': 'mac2', 'network_id': 'net1'}
-        device2port = {'d1': port1, 'd2': port2}
-        with self.firewall.defer_apply():
-            self.firewall.prepare_port_filter(port1)
-            self.firewall.prepare_port_filter(port2)
-        chain_applies.assert_has_calls([mock.call.remove({}, {}),
-                                        mock.call.setup(device2port, {})])
-
-    def test_ip_spoofing_filter_with_multiple_ips(self):
-        port = {'device': 'tapfake_dev',
-                'mac_address': 'ff:ff:ff:ff:ff:ff',
-                'network_id': 'fake_net',
-                'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']}
-        self.firewall.prepare_port_filter(port)
-        calls = [mock.call.add_chain('sg-fallback'),
-                 mock.call.add_rule(
-                     'sg-fallback', '-j DROP',
-                     comment=ic.UNMATCH_DROP),
-                 mock.call.remove_chain('sg-chain'),
-                 mock.call.add_chain('sg-chain'),
-                 mock.call.add_chain('ifake_dev'),
-                 mock.call.add_rule('FORWARD',
-                                    '-m physdev --physdev-out tapfake_dev '
-                                    '--physdev-is-bridged '
-                                    '-j $sg-chain', comment=ic.VM_INT_SG),
-                 mock.call.add_rule('sg-chain',
-                                    '-m physdev --physdev-out tapfake_dev '
-                                    '--physdev-is-bridged '
-                                    '-j $ifake_dev',
-                                    comment=ic.SG_TO_VM_SG),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-m state --state RELATED,ESTABLISHED -j RETURN',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-m state --state INVALID -j DROP', comment=None),
-                 mock.call.add_rule('ifake_dev',
-                                    '-j $sg-fallback', comment=None),
-                 mock.call.add_chain('ofake_dev'),
-                 mock.call.add_rule('FORWARD',
-                                    '-m physdev --physdev-in tapfake_dev '
-                                    '--physdev-is-bridged '
-                                    '-j $sg-chain', comment=ic.VM_INT_SG),
-                 mock.call.add_rule('sg-chain',
-                                    '-m physdev --physdev-in tapfake_dev '
-                                    '--physdev-is-bridged -j $ofake_dev',
-                                    comment=ic.SG_TO_VM_SG),
-                 mock.call.add_rule('INPUT',
-                                    '-m physdev --physdev-in tapfake_dev '
-                                    '--physdev-is-bridged -j $ofake_dev',
-                                    comment=ic.INPUT_TO_SG),
-                 mock.call.add_chain('sfake_dev'),
-                 mock.call.add_rule(
-                     'sfake_dev',
-                     '-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
-                     '-j RETURN',
-                     comment=ic.PAIR_ALLOW),
-                 mock.call.add_rule(
-                     'sfake_dev',
-                     '-s 10.0.0.2/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
-                     '-j RETURN',
-                     comment=ic.PAIR_ALLOW),
-                 mock.call.add_rule(
-                     'sfake_dev', '-j DROP',
-                     comment=ic.PAIR_DROP),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-p udp -m udp --sport 68 -m udp --dport 67 -j RETURN',
-                     comment=None),
-                 mock.call.add_rule('ofake_dev', '-j $sfake_dev',
-                                    comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-m state --state RELATED,ESTABLISHED -j RETURN',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-m state --state INVALID -j DROP', comment=None),
-                 mock.call.add_rule('ofake_dev',
-                                    '-j $sg-fallback', comment=None),
-                 mock.call.add_rule('sg-chain', '-j ACCEPT')]
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_ip_spoofing_no_fixed_ips(self):
-        port = {'device': 'tapfake_dev',
-                'mac_address': 'ff:ff:ff:ff:ff:ff',
-                'network_id': 'fake_net',
-                'fixed_ips': []}
-        self.firewall.prepare_port_filter(port)
-        calls = [mock.call.add_chain('sg-fallback'),
-                 mock.call.add_rule(
-                     'sg-fallback', '-j DROP',
-                     comment=ic.UNMATCH_DROP),
-                 mock.call.remove_chain('sg-chain'),
-                 mock.call.add_chain('sg-chain'),
-                 mock.call.add_chain('ifake_dev'),
-                 mock.call.add_rule('FORWARD',
-                                    '-m physdev --physdev-out tapfake_dev '
-                                    '--physdev-is-bridged '
-                                    '-j $sg-chain', comment=ic.VM_INT_SG),
-                 mock.call.add_rule('sg-chain',
-                                    '-m physdev --physdev-out tapfake_dev '
-                                    '--physdev-is-bridged '
-                                    '-j $ifake_dev',
-                                    comment=ic.SG_TO_VM_SG),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-m state --state RELATED,ESTABLISHED -j RETURN',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ifake_dev',
-                     '-m state --state INVALID -j DROP', comment=None),
-                 mock.call.add_rule('ifake_dev', '-j $sg-fallback',
-                                    comment=None),
-                 mock.call.add_chain('ofake_dev'),
-                 mock.call.add_rule('FORWARD',
-                                    '-m physdev --physdev-in tapfake_dev '
-                                    '--physdev-is-bridged '
-                                    '-j $sg-chain', comment=ic.VM_INT_SG),
-                 mock.call.add_rule('sg-chain',
-                                    '-m physdev --physdev-in tapfake_dev '
-                                    '--physdev-is-bridged -j $ofake_dev',
-                                    comment=ic.SG_TO_VM_SG),
-                 mock.call.add_rule('INPUT',
-                                    '-m physdev --physdev-in tapfake_dev '
-                                    '--physdev-is-bridged -j $ofake_dev',
-                                    comment=ic.INPUT_TO_SG),
-                 mock.call.add_chain('sfake_dev'),
-                 mock.call.add_rule(
-                     'sfake_dev',
-                     '-m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN',
-                     comment=ic.PAIR_ALLOW),
-                 mock.call.add_rule(
-                     'sfake_dev', '-j DROP',
-                     comment=ic.PAIR_DROP),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-p udp -m udp --sport 68 -m udp --dport 67 -j RETURN',
-                     comment=None),
-                 mock.call.add_rule('ofake_dev', '-j $sfake_dev',
-                                    comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-m state --state RELATED,ESTABLISHED -j RETURN',
-                     comment=None),
-                 mock.call.add_rule(
-                     'ofake_dev',
-                     '-m state --state INVALID -j DROP',
-                     comment=None),
-                 mock.call.add_rule('ofake_dev', '-j $sg-fallback',
-                                    comment=None),
-                 mock.call.add_rule('sg-chain', '-j ACCEPT')]
-        self.v4filter_inst.assert_has_calls(calls)
-
-
-class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase):
-    def setUp(self):
-        super(IptablesFirewallEnhancedIpsetTestCase, self).setUp()
-        self.firewall.ipset = mock.Mock()
-        self.firewall.ipset.get_name.side_effect = (
-            ipset_manager.IpsetManager.get_name)
-        self.firewall.ipset.set_name_exists.return_value = True
-
-    def _fake_port(self, sg_id=FAKE_SGID):
-        return {'device': 'tapfake_dev',
-                'mac_address': 'ff:ff:ff:ff:ff:ff',
-                'network_id': 'fake_net',
-                'fixed_ips': [FAKE_IP['IPv4'],
-                              FAKE_IP['IPv6']],
-                'security_groups': [sg_id],
-                'security_group_source_groups': [sg_id]}
-
-    def _fake_sg_rule_for_ethertype(self, ethertype, remote_group):
-        return {'direction': 'ingress', 'remote_group_id': remote_group,
-                'ethertype': ethertype}
-
-    def _fake_sg_rules(self, sg_id=FAKE_SGID, remote_groups=None):
-        remote_groups = remote_groups or {_IPv4: [FAKE_SGID],
-                                          _IPv6: [FAKE_SGID]}
-        rules = []
-        for ip_version, remote_group_list in six.iteritems(remote_groups):
-            for remote_group in remote_group_list:
-                rules.append(self._fake_sg_rule_for_ethertype(ip_version,
-                                                              remote_group))
-        return {sg_id: rules}
-
-    def _fake_sg_members(self, sg_ids=None):
-        return {sg_id: copy.copy(FAKE_IP) for sg_id in (sg_ids or [FAKE_SGID])}
-
-    def test_prepare_port_filter_with_new_members(self):
-        self.firewall.sg_rules = self._fake_sg_rules()
-        self.firewall.sg_members = {'fake_sgid': {
-            'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}}
-        self.firewall.pre_sg_members = {}
-        port = self._fake_port()
-        self.firewall.prepare_port_filter(port)
-        calls = [
-            mock.call.set_members('fake_sgid', 'IPv4',
-                                  ['10.0.0.1', '10.0.0.2']),
-            mock.call.set_members('fake_sgid', 'IPv6',
-                                  ['fe80::1'])
-        ]
-        self.firewall.ipset.assert_has_calls(calls, any_order=True)
-
-    def _setup_fake_firewall_members_and_rules(self, firewall):
-        firewall.sg_rules = self._fake_sg_rules()
-        firewall.pre_sg_rules = self._fake_sg_rules()
-        firewall.sg_members = self._fake_sg_members()
-        firewall.pre_sg_members = firewall.sg_members
-
-    def _prepare_rules_and_members_for_removal(self):
-        self._setup_fake_firewall_members_and_rules(self.firewall)
-        self.firewall.pre_sg_members[OTHER_SGID] = (
-            self.firewall.pre_sg_members[FAKE_SGID])
-
-    def test_determine_remote_sgs_to_remove(self):
-        self._prepare_rules_and_members_for_removal()
-        ports = [self._fake_port()]
-
-        self.assertEqual(
-            {_IPv4: set([OTHER_SGID]), _IPv6: set([OTHER_SGID])},
-            self.firewall._determine_remote_sgs_to_remove(ports))
-
-    def test_determine_remote_sgs_to_remove_ipv6_unreferenced(self):
-        self._prepare_rules_and_members_for_removal()
-        ports = [self._fake_port()]
-        self.firewall.sg_rules = self._fake_sg_rules(
-            remote_groups={_IPv4: [OTHER_SGID, FAKE_SGID],
-                           _IPv6: [FAKE_SGID]})
-        self.assertEqual(
-            {_IPv4: set(), _IPv6: set([OTHER_SGID])},
-            self.firewall._determine_remote_sgs_to_remove(ports))
-
-    def test_get_remote_sg_ids_by_ipversion(self):
-        self.firewall.sg_rules = self._fake_sg_rules(
-            remote_groups={_IPv4: [FAKE_SGID], _IPv6: [OTHER_SGID]})
-
-        ports = [self._fake_port()]
-
-        self.assertEqual(
-            {_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
-            self.firewall._get_remote_sg_ids_sets_by_ipversion(ports))
-
-    def test_get_remote_sg_ids(self):
-        self.firewall.sg_rules = self._fake_sg_rules(
-            remote_groups={_IPv4: [FAKE_SGID, FAKE_SGID, FAKE_SGID],
-                           _IPv6: [OTHER_SGID, OTHER_SGID, OTHER_SGID]})
-
-        port = self._fake_port()
-
-        self.assertEqual(
-            {_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
-            self.firewall._get_remote_sg_ids(port))
-
-    def test_determine_sg_rules_to_remove(self):
-        self.firewall.pre_sg_rules = self._fake_sg_rules(sg_id=OTHER_SGID)
-        ports = [self._fake_port()]
-
-        self.assertEqual(set([OTHER_SGID]),
-                         self.firewall._determine_sg_rules_to_remove(ports))
-
-    def test_get_sg_ids_set_for_ports(self):
-        sg_ids = set([FAKE_SGID, OTHER_SGID])
-        ports = [self._fake_port(sg_id) for sg_id in sg_ids]
-
-        self.assertEqual(sg_ids,
-                         self.firewall._get_sg_ids_set_for_ports(ports))
-
-    def test_remove_sg_members(self):
-        self.firewall.sg_members = self._fake_sg_members([FAKE_SGID,
-                                                          OTHER_SGID])
-        remote_sgs_to_remove = {_IPv4: set([FAKE_SGID]),
-                                _IPv6: set([FAKE_SGID, OTHER_SGID])}
-        self.firewall._remove_sg_members(remote_sgs_to_remove)
-
-        self.assertIn(OTHER_SGID, self.firewall.sg_members)
-        self.assertNotIn(FAKE_SGID, self.firewall.sg_members)
-
-    def test_remove_unused_security_group_info_clears_unused_rules(self):
-        self._setup_fake_firewall_members_and_rules(self.firewall)
-        self.firewall.prepare_port_filter(self._fake_port())
-
-        # create another SG which won't be referenced by any filtered port
-        fake_sg_rules = self.firewall.sg_rules['fake_sgid']
-        self.firewall.pre_sg_rules[OTHER_SGID] = fake_sg_rules
-        self.firewall.sg_rules[OTHER_SGID] = fake_sg_rules
-
-        # call the cleanup function, and check the unused sg_rules are out
-        self.firewall._remove_unused_security_group_info()
-        self.assertNotIn(OTHER_SGID, self.firewall.sg_rules)
-
-    def test_remove_unused_security_group_info(self):
-        self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
-        self.firewall.pre_sg_members = self.firewall.sg_members
-        self.firewall.sg_rules = self._fake_sg_rules(
-            remote_groups={_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]})
-        self.firewall.pre_sg_rules = self.firewall.sg_rules
-        port = self._fake_port()
-        self.firewall.filtered_ports['tapfake_dev'] = port
-        self.firewall._remove_unused_security_group_info()
-        self.assertNotIn(OTHER_SGID, self.firewall.sg_members)
-
-    def test_not_remove_used_security_group_info(self):
-        self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
-        self.firewall.pre_sg_members = self.firewall.sg_members
-        self.firewall.sg_rules = self._fake_sg_rules(
-            remote_groups={_IPv4: [OTHER_SGID], _IPv6: [OTHER_SGID]})
-        self.firewall.pre_sg_rules = self.firewall.sg_rules
-        port = self._fake_port()
-        self.firewall.filtered_ports['tapfake_dev'] = port
-        self.firewall._remove_unused_security_group_info()
-        self.assertIn(OTHER_SGID, self.firewall.sg_members)
-
-    def test_remove_all_unused_info(self):
-        self._setup_fake_firewall_members_and_rules(self.firewall)
-        self.firewall.filtered_ports = {}
-        self.firewall._remove_unused_security_group_info()
-        self.assertFalse(self.firewall.sg_members)
-        self.assertFalse(self.firewall.sg_rules)
-
-    def test_single_fallback_accept_rule(self):
-        p1, p2 = self._fake_port(), self._fake_port()
-        self.firewall._setup_chains_apply(dict(p1=p1, p2=p2), {})
-        v4_adds = self.firewall.iptables.ipv4['filter'].add_rule.mock_calls
-        v6_adds = self.firewall.iptables.ipv6['filter'].add_rule.mock_calls
-        sg_chain_v4_accept = [call for call in v4_adds
-                              if call == mock.call('sg-chain', '-j ACCEPT')]
-        sg_chain_v6_accept = [call for call in v6_adds
-                              if call == mock.call('sg-chain', '-j ACCEPT')]
-        self.assertEqual(1, len(sg_chain_v4_accept))
-        self.assertEqual(1, len(sg_chain_v6_accept))
-
-    def test_prepare_port_filter_with_deleted_member(self):
-        self.firewall.sg_rules = self._fake_sg_rules()
-        self.firewall.pre_sg_rules = self._fake_sg_rules()
-        self.firewall.sg_members = {'fake_sgid': {
-            'IPv4': [
-                '10.0.0.1', '10.0.0.3', '10.0.0.4', '10.0.0.5'],
-            'IPv6': ['fe80::1']}}
-        self.firewall.pre_sg_members = {'fake_sgid': {
-            'IPv4': ['10.0.0.2'],
-            'IPv6': ['fe80::1']}}
-        self.firewall.prepare_port_filter(self._fake_port())
-        calls = [
-            mock.call.set_members('fake_sgid', 'IPv4',
-                                  ['10.0.0.1', '10.0.0.3', '10.0.0.4',
-                                   '10.0.0.5']),
-            mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1'])]
-
-        self.firewall.ipset.assert_has_calls(calls, True)
-
-    def test_remove_port_filter_with_destroy_ipset_chain(self):
-        self.firewall.sg_rules = self._fake_sg_rules()
-        port = self._fake_port()
-        self.firewall.sg_members = {'fake_sgid': {
-            'IPv4': ['10.0.0.1'],
-            'IPv6': ['fe80::1']}}
-        self.firewall.pre_sg_members = {'fake_sgid': {
-            'IPv4': [],
-            'IPv6': []}}
-        self.firewall.prepare_port_filter(port)
-        self.firewall.filter_defer_apply_on()
-        self.firewall.sg_members = {'fake_sgid': {
-            'IPv4': [],
-            'IPv6': []}}
-        self.firewall.pre_sg_members = {'fake_sgid': {
-            'IPv4': ['10.0.0.1'],
-            'IPv6': ['fe80::1']}}
-        self.firewall.remove_port_filter(port)
-        self.firewall.filter_defer_apply_off()
-        calls = [
-            mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1']),
-            mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']),
-            mock.call.get_name('fake_sgid', 'IPv4'),
-            mock.call.set_name_exists('NIPv4fake_sgid'),
-            mock.call.get_name('fake_sgid', 'IPv6'),
-            mock.call.set_name_exists('NIPv6fake_sgid'),
-            mock.call.destroy('fake_sgid', 'IPv4'),
-            mock.call.destroy('fake_sgid', 'IPv6')]
-
-        self.firewall.ipset.assert_has_calls(calls, any_order=True)
-
-    def test_prepare_port_filter_with_sg_no_member(self):
-        self.firewall.sg_rules = self._fake_sg_rules()
-        self.firewall.sg_rules[FAKE_SGID].append(
-            {'direction': 'ingress', 'remote_group_id': 'fake_sgid2',
-             'ethertype': 'IPv4'})
-        self.firewall.sg_rules.update()
-        self.firewall.sg_members['fake_sgid'] = {
-            'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}
-        self.firewall.pre_sg_members = {}
-        port = self._fake_port()
-        port['security_group_source_groups'].append('fake_sgid2')
-        self.firewall.prepare_port_filter(port)
-        calls = [mock.call.set_members('fake_sgid', 'IPv4',
-                                       ['10.0.0.1', '10.0.0.2']),
-                 mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1'])]
-
-        self.firewall.ipset.assert_has_calls(calls, any_order=True)
-
-    def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self):
-        self.firewall.sg_rules = self._fake_sg_rules()
-        self.firewall.pre_sg_rules = self._fake_sg_rules()
-        self.firewall.ipset_chains = {'IPv4fake_sgid': ['10.0.0.2'],
-                                      'IPv6fake_sgid': ['fe80::1']}
-        self.firewall.sg_members = {'fake_sgid': {
-            'IPv4': ['10.0.0.2'],
-            'IPv6': ['fe80::1']}}
-        self.firewall.pre_sg_members = {'fake_sgid': {
-            'IPv4': ['10.0.0.2'],
-            'IPv6': ['fe80::1']}}
-        self.firewall.sg_rules['fake_sgid'].remove(
-            {'direction': 'ingress', 'remote_group_id': 'fake_sgid',
-             'ethertype': 'IPv4'})
-        self.firewall.sg_rules.update()
-        self.firewall._defer_apply = True
-        port = self._fake_port()
-        self.firewall.filtered_ports['tapfake_dev'] = port
-        self.firewall._pre_defer_filtered_ports = {}
-        self.firewall._pre_defer_unfiltered_ports = {}
-        self.firewall.filter_defer_apply_off()
-        calls = [mock.call.destroy('fake_sgid', 'IPv4')]
-
-        self.firewall.ipset.assert_has_calls(calls, True)
-
-    def test_sg_rule_expansion_with_remote_ips(self):
-        other_ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4']
-        self.firewall.sg_members = {'fake_sgid': {
-            'IPv4': [FAKE_IP['IPv4']] + other_ips,
-            'IPv6': [FAKE_IP['IPv6']]}}
-
-        port = self._fake_port()
-        rule = self._fake_sg_rule_for_ethertype(_IPv4, FAKE_SGID)
-        rules = self.firewall._expand_sg_rule_with_remote_ips(
-            rule, port, 'ingress')
-        self.assertEqual(list(rules),
-                         [dict(list(rule.items()) +
-                               [('source_ip_prefix', '%s/32' % ip)])
-                          for ip in other_ips])
-
-    def test_build_ipv4v6_mac_ip_list(self):
-        mac_oth = 'ffff-ff0f-ffff'
-        mac_unix = 'FF:FF:FF:0F:FF:FF'
-        ipv4 = FAKE_IP['IPv4']
-        ipv6 = FAKE_IP['IPv6']
-        fake_ipv4_pair = []
-        fake_ipv4_pair.append((mac_unix, ipv4))
-        fake_ipv6_pair = []
-        fake_ipv6_pair.append((mac_unix, ipv6))
-
-        mac_ipv4_pairs = []
-        mac_ipv6_pairs = []
-
-        self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv4,
-                                                mac_ipv4_pairs, mac_ipv6_pairs)
-        self.assertEqual(fake_ipv4_pair, mac_ipv4_pairs)
-        self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6,
-                                                mac_ipv4_pairs, mac_ipv6_pairs)
-        self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
-
-    def test_update_ipset_members(self):
-        self.firewall.sg_members[FAKE_SGID][_IPv4] = []
-        self.firewall.sg_members[FAKE_SGID][_IPv6] = []
-        sg_info = {constants.IPv4: [FAKE_SGID]}
-        self.firewall._update_ipset_members(sg_info)
-        calls = [mock.call.set_members(FAKE_SGID, constants.IPv4, [])]
-        self.firewall.ipset.assert_has_calls(calls)
-
-
-class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase):
-
-    def setUp(self):
-        super(OVSHybridIptablesFirewallTestCase, self).setUp()
-        self.firewall = iptables_firewall.OVSHybridIptablesFirewallDriver()
-        # initial data has 1, 2, and 9 in use, see RAW_TABLE_OUTPUT above.
-        self._dev_zone_map = {'61634509-31': 2, '8f46cf18-12': 9,
-                              '95c24827-02': 2, 'e804433b-61': 1}
-
-    def test__populate_initial_zone_map(self):
-        self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map)
-
-    def test__generate_device_zone(self):
-        # initial data has 1, 2, and 9 in use.
-        # we fill from top up first.
-        self.assertEqual(10, self.firewall._generate_device_zone('test'))
-
-        # once it's maxed out, it scans for gaps
-        self.firewall._device_zone_map['someport'] = (
-            iptables_firewall.MAX_CONNTRACK_ZONES)
-        for i in range(3, 9):
-            self.assertEqual(i, self.firewall._generate_device_zone(i))
-
-        # 9 and 10 are taken so next should be 11
-        self.assertEqual(11, self.firewall._generate_device_zone('p11'))
-
-        # take out zone 1 and make sure it's selected
-        self.firewall._device_zone_map.pop('e804433b-61')
-        self.assertEqual(1, self.firewall._generate_device_zone('p1'))
-
-        # fill it up and then make sure an extra throws an error
-        for i in range(1, 65536):
-            self.firewall._device_zone_map['dev-%s' % i] = i
-        with testtools.ExpectedException(n_exc.CTZoneExhaustedError):
-            self.firewall._find_open_zone()
-
-        # with it full, try again, this should trigger a cleanup and return 1
-        self.assertEqual(1, self.firewall._generate_device_zone('p12'))
-        self.assertEqual({'p12': 1}, self.firewall._device_zone_map)
-
-    def test_get_device_zone(self):
-        # initial data has 1, 2, and 9 in use.
-        self.assertEqual(10,
-                         self.firewall.get_device_zone('12345678901234567'))
-        # should have been truncated to 11 chars
-        self._dev_zone_map.update({'12345678901': 10})
-        self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map)
diff --git a/neutron/tests/unit/agent/linux/test_iptables_manager.py b/neutron/tests/unit/agent/linux/test_iptables_manager.py
deleted file mode 100644 (file)
index 685e66a..0000000
+++ /dev/null
@@ -1,1060 +0,0 @@
-# Copyright 2012 Locaweb.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-import os
-import sys
-
-import mock
-from oslo_config import cfg
-import testtools
-
-from neutron._i18n import _
-from neutron.agent.linux import iptables_comments as ic
-from neutron.agent.linux import iptables_manager
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.tests import base
-from neutron.tests import tools
-
-
-IPTABLES_ARG = {'bn': iptables_manager.binary_name,
-                'snat_out_comment': ic.SNAT_OUT,
-                'filter_rules': '',
-                'mark': constants.ROUTER_MARK_MASK}
-
-NAT_TEMPLATE = ('# Generated by iptables_manager\n'
-                '*nat\n'
-                ':OUTPUT - [0:0]\n'
-                ':POSTROUTING - [0:0]\n'
-                ':PREROUTING - [0:0]\n'
-                ':neutron-postrouting-bottom - [0:0]\n'
-                ':%(bn)s-OUTPUT - [0:0]\n'
-                ':%(bn)s-POSTROUTING - [0:0]\n'
-                ':%(bn)s-PREROUTING - [0:0]\n'
-                ':%(bn)s-float-snat - [0:0]\n'
-                ':%(bn)s-snat - [0:0]\n'
-                '-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
-                '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n'
-                '-I POSTROUTING 2 -j neutron-postrouting-bottom\n'
-                '-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
-                '-I neutron-postrouting-bottom 1 -j %(bn)s-snat\n'
-                '-I %(bn)s-snat 1 -j '
-                '%(bn)s-float-snat\n'
-                'COMMIT\n'
-                '# Completed by iptables_manager\n')
-
-NAT_DUMP = NAT_TEMPLATE % IPTABLES_ARG
-
-FILTER_TEMPLATE = ('# Generated by iptables_manager\n'
-                   '*filter\n'
-                   ':FORWARD - [0:0]\n'
-                   ':INPUT - [0:0]\n'
-                   ':OUTPUT - [0:0]\n'
-                   ':neutron-filter-top - [0:0]\n'
-                   ':%(bn)s-FORWARD - [0:0]\n'
-                   ':%(bn)s-INPUT - [0:0]\n'
-                   ':%(bn)s-OUTPUT - [0:0]\n'
-                   ':%(bn)s-local - [0:0]\n'
-                   '-I FORWARD 1 -j neutron-filter-top\n'
-                   '-I FORWARD 2 -j %(bn)s-FORWARD\n'
-                   '-I INPUT 1 -j %(bn)s-INPUT\n'
-                   '-I OUTPUT 1 -j neutron-filter-top\n'
-                   '-I OUTPUT 2 -j %(bn)s-OUTPUT\n'
-                   '-I neutron-filter-top 1 -j %(bn)s-local\n'
-                   'COMMIT\n'
-                   '# Completed by iptables_manager\n')
-
-FILTER_DUMP = FILTER_TEMPLATE % IPTABLES_ARG
-
-FILTER_WITH_RULES_TEMPLATE = (
-    '# Generated by iptables_manager\n'
-    '*filter\n'
-    ':FORWARD - [0:0]\n'
-    ':INPUT - [0:0]\n'
-    ':OUTPUT - [0:0]\n'
-    ':neutron-filter-top - [0:0]\n'
-    ':%(bn)s-FORWARD - [0:0]\n'
-    ':%(bn)s-INPUT - [0:0]\n'
-    ':%(bn)s-OUTPUT - [0:0]\n'
-    ':%(bn)s-filter - [0:0]\n'
-    ':%(bn)s-local - [0:0]\n'
-    '-I FORWARD 1 -j neutron-filter-top\n'
-    '-I FORWARD 2 -j %(bn)s-FORWARD\n'
-    '-I INPUT 1 -j %(bn)s-INPUT\n'
-    '-I OUTPUT 1 -j neutron-filter-top\n'
-    '-I OUTPUT 2 -j %(bn)s-OUTPUT\n'
-    '-I neutron-filter-top 1 -j %(bn)s-local\n'
-    '%(filter_rules)s'
-    'COMMIT\n'
-    '# Completed by iptables_manager\n')
-
-COMMENTED_NAT_DUMP = (
-    '# Generated by iptables_manager\n'
-    '*nat\n'
-    ':OUTPUT - [0:0]\n'
-    ':POSTROUTING - [0:0]\n'
-    ':PREROUTING - [0:0]\n'
-    ':neutron-postrouting-bottom - [0:0]\n'
-    ':%(bn)s-OUTPUT - [0:0]\n'
-    ':%(bn)s-POSTROUTING - [0:0]\n'
-    ':%(bn)s-PREROUTING - [0:0]\n'
-    ':%(bn)s-float-snat - [0:0]\n'
-    ':%(bn)s-snat - [0:0]\n'
-    '-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
-    '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n'
-    '-I POSTROUTING 2 -j neutron-postrouting-bottom\n'
-    '-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
-    '-I neutron-postrouting-bottom 1 '
-    '-m comment --comment "%(snat_out_comment)s" -j %(bn)s-snat\n'
-    '-I %(bn)s-snat 1 -j '
-    '%(bn)s-float-snat\n'
-    'COMMIT\n'
-    '# Completed by iptables_manager\n' % IPTABLES_ARG)
-
-TRAFFIC_COUNTERS_DUMP = (
-    'Chain OUTPUT (policy ACCEPT 400 packets, 65901 bytes)\n'
-    '    pkts      bytes target     prot opt in     out     source'
-    '               destination         \n'
-    '     400   65901 chain1     all  --  *      *       0.0.0.0/0'
-    '            0.0.0.0/0           \n'
-    '     400   65901 chain2     all  --  *      *       0.0.0.0/0'
-    '            0.0.0.0/0           \n')
-
-
-class IptablesTestCase(base.BaseTestCase):
-
-    def test_get_binary_name_in_unittest(self):
-        # Corresponds to sys.argv content when running python -m unittest class
-        with mock.patch('sys.argv', ['python -m unittest', 'class']):
-            binary_name = iptables_manager.get_binary_name()
-            self.assertEqual('python_-m_unitte', binary_name)
-
-
-class IptablesCommentsTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(IptablesCommentsTestCase, self).setUp()
-        cfg.CONF.set_override('comment_iptables_rules', True, 'AGENT')
-        self.iptables = iptables_manager.IptablesManager()
-        self.execute = mock.patch.object(self.iptables, "execute").start()
-
-    def test_comments_short_enough(self):
-        for attr in dir(ic):
-            if not attr.startswith('__') and len(getattr(ic, attr)) > 255:
-                self.fail("Iptables comment %s is longer than 255 characters."
-                          % attr)
-
-    def test_reordering_of_jump_rule_comments(self):
-        # jump at the start
-        self.assertEqual(
-            '-m comment --comment "aloha" -j sg-chain',
-            iptables_manager.comment_rule('-j sg-chain', 'aloha'))
-        # jump in the middle
-        self.assertEqual(
-            '-s source -m comment --comment "aloha" -j sg-chain',
-            iptables_manager.comment_rule('-s source -j sg-chain', 'aloha'))
-        # no jump rule
-        self.assertEqual(
-            '-s source -m comment --comment "aloha"',
-            iptables_manager.comment_rule('-s source', 'aloha'))
-
-    def test_add_filter_rule(self):
-        iptables_args = {}
-        iptables_args.update(IPTABLES_ARG)
-        filter_rules = ('-I %(bn)s-INPUT 1 -s 0/0 -d 192.168.0.2 -j '
-                        '%(bn)s-filter\n-I %(bn)s-filter 1 -j DROP\n'
-                        % iptables_args)
-        iptables_args['filter_rules'] = filter_rules
-        filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args
-
-        raw_dump = _generate_raw_dump(IPTABLES_ARG)
-        mangle_dump = _generate_mangle_dump(IPTABLES_ARG)
-
-        expected_calls_and_values = [
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(filter_dump_mod + mangle_dump +
-                                      COMMENTED_NAT_DUMP + raw_dump),
-                       run_as_root=True),
-             None),
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(FILTER_DUMP + mangle_dump +
-                                      COMMENTED_NAT_DUMP + raw_dump),
-                       run_as_root=True
-                       ),
-             None),
-        ]
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        self.iptables.ipv4['filter'].add_chain('filter')
-        self.iptables.ipv4['filter'].add_rule('filter', '-j DROP')
-        self.iptables.ipv4['filter'].add_rule('INPUT',
-                                              '-s 0/0 -d 192.168.0.2 -j'
-                                              ' %(bn)s-filter' % IPTABLES_ARG)
-        self.iptables.apply()
-
-        self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP')
-        self.iptables.ipv4['filter'].remove_rule('INPUT',
-                                                 '-s 0/0 -d 192.168.0.2 -j'
-                                                 ' %(bn)s-filter'
-                                                 % IPTABLES_ARG)
-        self.iptables.ipv4['filter'].remove_chain('filter')
-
-        self.iptables.apply()
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-
-def _generate_mangle_dump(iptables_args):
-    return ('# Generated by iptables_manager\n'
-            '*mangle\n'
-            ':FORWARD - [0:0]\n'
-            ':INPUT - [0:0]\n'
-            ':OUTPUT - [0:0]\n'
-            ':POSTROUTING - [0:0]\n'
-            ':PREROUTING - [0:0]\n'
-            ':%(bn)s-FORWARD - [0:0]\n'
-            ':%(bn)s-INPUT - [0:0]\n'
-            ':%(bn)s-OUTPUT - [0:0]\n'
-            ':%(bn)s-POSTROUTING - [0:0]\n'
-            ':%(bn)s-PREROUTING - [0:0]\n'
-            ':%(bn)s-mark - [0:0]\n'
-            '-I FORWARD 1 -j %(bn)s-FORWARD\n'
-            '-I INPUT 1 -j %(bn)s-INPUT\n'
-            '-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
-            '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n'
-            '-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
-            '-I %(bn)s-PREROUTING 1 -j %(bn)s-mark\n'
-            'COMMIT\n'
-            '# Completed by iptables_manager\n' % iptables_args)
-
-
-def _generate_raw_dump(iptables_args):
-    return ('# Generated by iptables_manager\n'
-            '*raw\n'
-            ':OUTPUT - [0:0]\n'
-            ':PREROUTING - [0:0]\n'
-            ':%(bn)s-OUTPUT - [0:0]\n'
-            ':%(bn)s-PREROUTING - [0:0]\n'
-            '-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
-            '-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
-            'COMMIT\n'
-            '# Completed by iptables_manager\n' % iptables_args)
-
-
-MANGLE_DUMP = _generate_mangle_dump(IPTABLES_ARG)
-RAW_DUMP = _generate_raw_dump(IPTABLES_ARG)
-
-
-class IptablesManagerStateFulTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(IptablesManagerStateFulTestCase, self).setUp()
-        cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
-        self.iptables = iptables_manager.IptablesManager()
-        self.execute = mock.patch.object(self.iptables, "execute").start()
-
-    def test_binary_name(self):
-        expected = os.path.basename(sys.argv[0])[:16]
-        self.assertEqual(expected, iptables_manager.binary_name)
-
-    def test_get_chain_name(self):
-        name = '0123456789' * 5
-        # 28 chars is the maximum length of iptables chain name.
-        self.assertEqual(iptables_manager.get_chain_name(name, wrap=False),
-                         name[:28])
-        # 11 chars is the maximum length of chain name of iptable_manager
-        # if binary_name is prepended.
-        self.assertEqual(iptables_manager.get_chain_name(name, wrap=True),
-                         name[:11])
-
-    def test_defer_apply_with_exception(self):
-        self.iptables._apply = mock.Mock(side_effect=Exception)
-        with testtools.ExpectedException(n_exc.IpTablesApplyException):
-            with self.iptables.defer_apply():
-                pass
-
-    def _extend_with_ip6tables_filter(self, expected_calls, filter_dump):
-        expected_calls.insert(2, (
-            mock.call(['ip6tables-save'],
-                      run_as_root=True),
-            ''))
-        expected_calls.insert(3, (
-            mock.call(['ip6tables-restore', '-n'],
-                      process_input=filter_dump,
-                      run_as_root=True),
-            None))
-        expected_calls.extend([
-            (mock.call(['ip6tables-save'],
-                      run_as_root=True),
-             ''),
-            (mock.call(['ip6tables-restore', '-n'],
-                      process_input=filter_dump,
-                      run_as_root=True),
-             None)])
-
-    def _test_add_and_remove_chain_custom_binary_name_helper(self, use_ipv6):
-        bn = ("xbcdef" * 5)
-
-        self.iptables = iptables_manager.IptablesManager(
-            binary_name=bn,
-            use_ipv6=use_ipv6)
-        self.execute = mock.patch.object(self.iptables, "execute").start()
-
-        iptables_args = {'bn': bn[:16], 'filter_rules': ''}
-
-        filter_dump = FILTER_WITH_RULES_TEMPLATE % iptables_args
-
-        filter_dump_ipv6 = FILTER_TEMPLATE % iptables_args
-
-        filter_dump_mod = filter_dump
-
-        nat_dump = NAT_TEMPLATE % iptables_args
-
-        raw_dump = _generate_raw_dump(iptables_args)
-        mangle_dump = _generate_mangle_dump(iptables_args)
-
-        expected_calls_and_values = [
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(filter_dump_mod + mangle_dump +
-                                      nat_dump + raw_dump),
-                       run_as_root=True),
-             None),
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(filter_dump + mangle_dump +
-                                      nat_dump + raw_dump),
-                       run_as_root=True),
-             None),
-        ]
-        if use_ipv6:
-            self._extend_with_ip6tables_filter(expected_calls_and_values,
-                                               filter_dump_ipv6 + raw_dump)
-
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        self.iptables.ipv4['filter'].add_chain('filter')
-        self.iptables.apply()
-
-        self.iptables.ipv4['filter'].empty_chain('filter')
-        self.iptables.apply()
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_add_and_remove_chain_custom_binary_name(self):
-        self._test_add_and_remove_chain_custom_binary_name_helper(False)
-
-    def test_add_and_remove_chain_custom_binary_name_with_ipv6(self):
-        self._test_add_and_remove_chain_custom_binary_name_helper(True)
-
-    def _test_empty_chain_custom_binary_name_helper(self, use_ipv6):
-        bn = ("xbcdef" * 5)[:16]
-
-        self.iptables = iptables_manager.IptablesManager(
-            binary_name=bn,
-            use_ipv6=use_ipv6)
-        self.execute = mock.patch.object(self.iptables, "execute").start()
-
-        iptables_args = {'bn': bn}
-
-        filter_dump = FILTER_TEMPLATE % iptables_args
-
-        filter_rules = ('-I %(bn)s-filter 1 -s 0/0 -d 192.168.0.2\n'
-                        % iptables_args)
-        iptables_args['filter_rules'] = filter_rules
-        filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args
-
-        nat_dump = NAT_TEMPLATE % iptables_args
-
-        raw_dump = _generate_raw_dump(iptables_args)
-        mangle_dump = _generate_mangle_dump(iptables_args)
-
-        expected_calls_and_values = [
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(filter_dump_mod + mangle_dump +
-                                      nat_dump + raw_dump),
-                       run_as_root=True),
-             None),
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(filter_dump + mangle_dump +
-                                      nat_dump + raw_dump),
-                       run_as_root=True),
-             None),
-        ]
-        if use_ipv6:
-            self._extend_with_ip6tables_filter(expected_calls_and_values,
-                                               filter_dump + raw_dump)
-
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        self.iptables.ipv4['filter'].add_chain('filter')
-        self.iptables.ipv4['filter'].add_rule('filter',
-                                              '-s 0/0 -d 192.168.0.2')
-        self.iptables.apply()
-
-        self.iptables.ipv4['filter'].remove_chain('filter')
-        self.iptables.apply()
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_empty_chain_custom_binary_name(self):
-        self._test_empty_chain_custom_binary_name_helper(False)
-
-    def test_empty_chain_custom_binary_name_with_ipv6(self):
-        self._test_empty_chain_custom_binary_name_helper(True)
-
-    def _test_add_and_remove_chain_helper(self, use_ipv6):
-        self.iptables = iptables_manager.IptablesManager(
-            use_ipv6=use_ipv6)
-        self.execute = mock.patch.object(self.iptables, "execute").start()
-
-        filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % IPTABLES_ARG
-
-        expected_calls_and_values = [
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(filter_dump_mod + MANGLE_DUMP +
-                                      NAT_DUMP + RAW_DUMP),
-                       run_as_root=True),
-             None),
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP +
-                                      RAW_DUMP),
-                       run_as_root=True),
-             None),
-        ]
-        if use_ipv6:
-            self._extend_with_ip6tables_filter(expected_calls_and_values,
-                                               FILTER_DUMP + RAW_DUMP)
-
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        self.iptables.ipv4['filter'].add_chain('filter')
-        self.iptables.apply()
-
-        self.iptables.ipv4['filter'].remove_chain('filter')
-        self.iptables.apply()
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_add_and_remove_chain(self):
-        self._test_add_and_remove_chain_helper(False)
-
-    def test_add_and_remove_chain_with_ipv6(self):
-        self._test_add_and_remove_chain_helper(True)
-
-    def _test_add_filter_rule_helper(self, use_ipv6):
-        self.iptables = iptables_manager.IptablesManager(
-            use_ipv6=use_ipv6)
-        self.execute = mock.patch.object(self.iptables, "execute").start()
-
-        iptables_args = {}
-        iptables_args.update(IPTABLES_ARG)
-        filter_rules = ('-I %(bn)s-INPUT 1 -s 0/0 -d 192.168.0.2 -j '
-                        '%(bn)s-filter\n-I %(bn)s-filter 1 -j DROP\n'
-                        % iptables_args)
-        iptables_args['filter_rules'] = filter_rules
-        filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args
-
-        raw_dump = RAW_DUMP % IPTABLES_ARG
-
-        expected_calls_and_values = [
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(filter_dump_mod + MANGLE_DUMP +
-                                      NAT_DUMP + RAW_DUMP),
-                       run_as_root=True),
-             None),
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP +
-                                      RAW_DUMP),
-                       run_as_root=True
-                       ),
-             None),
-        ]
-        if use_ipv6:
-            self._extend_with_ip6tables_filter(expected_calls_and_values,
-                                               FILTER_DUMP + raw_dump)
-
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        self.iptables.ipv4['filter'].add_chain('filter')
-        self.iptables.ipv4['filter'].add_rule('filter', '-j DROP')
-        self.iptables.ipv4['filter'].add_rule('INPUT',
-                                              '-s 0/0 -d 192.168.0.2 -j'
-                                              ' %(bn)s-filter' % IPTABLES_ARG)
-        self.iptables.apply()
-
-        self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP')
-        self.iptables.ipv4['filter'].remove_rule('INPUT',
-                                                 '-s 0/0 -d 192.168.0.2 -j'
-                                                 ' %(bn)s-filter'
-                                                 % IPTABLES_ARG)
-        self.iptables.ipv4['filter'].remove_chain('filter')
-
-        self.iptables.apply()
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_add_filter_rule(self):
-        self._test_add_filter_rule_helper(False)
-
-    def test_add_filter_rule_with_ipv6(self):
-        self._test_add_filter_rule_helper(True)
-
-    def _test_rule_with_wrap_target_helper(self, use_ipv6):
-        self.iptables = iptables_manager.IptablesManager(
-            use_ipv6=use_ipv6)
-        self.execute = mock.patch.object(self.iptables, "execute").start()
-
-        name = '0123456789' * 5
-        wrap = "%s-%s" % (iptables_manager.binary_name,
-                          iptables_manager.get_chain_name(name))
-
-        iptables_args = {'bn': iptables_manager.binary_name,
-                         'wrap': wrap}
-
-        filter_dump_mod = ('# Generated by iptables_manager\n'
-                           '*filter\n'
-                           ':FORWARD - [0:0]\n'
-                           ':INPUT - [0:0]\n'
-                           ':OUTPUT - [0:0]\n'
-                           ':neutron-filter-top - [0:0]\n'
-                           ':%(wrap)s - [0:0]\n'
-                           ':%(bn)s-FORWARD - [0:0]\n'
-                           ':%(bn)s-INPUT - [0:0]\n'
-                           ':%(bn)s-OUTPUT - [0:0]\n'
-                           ':%(bn)s-local - [0:0]\n'
-                           '-I FORWARD 1 -j neutron-filter-top\n'
-                           '-I FORWARD 2 -j %(bn)s-FORWARD\n'
-                           '-I INPUT 1 -j %(bn)s-INPUT\n'
-                           '-I OUTPUT 1 -j neutron-filter-top\n'
-                           '-I OUTPUT 2 -j %(bn)s-OUTPUT\n'
-                           '-I neutron-filter-top 1 -j %(bn)s-local\n'
-                           '-I %(bn)s-INPUT 1 -s 0/0 -d 192.168.0.2 -j '
-                           '%(wrap)s\n'
-                           'COMMIT\n'
-                           '# Completed by iptables_manager\n'
-                           % iptables_args)
-
-        raw_dump = RAW_DUMP % IPTABLES_ARG
-
-        expected_calls_and_values = [
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(filter_dump_mod + MANGLE_DUMP +
-                                      NAT_DUMP + RAW_DUMP),
-                       run_as_root=True),
-             None),
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(FILTER_DUMP + MANGLE_DUMP +
-                                      NAT_DUMP + RAW_DUMP),
-                       run_as_root=True),
-             None),
-        ]
-        if use_ipv6:
-            self._extend_with_ip6tables_filter(expected_calls_and_values,
-                                               FILTER_DUMP + raw_dump)
-
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        self.iptables.ipv4['filter'].add_chain(name)
-        self.iptables.ipv4['filter'].add_rule('INPUT',
-                                              '-s 0/0 -d 192.168.0.2 -j'
-                                              ' $%s' % name)
-        self.iptables.apply()
-
-        self.iptables.ipv4['filter'].remove_rule('INPUT',
-                                                 '-s 0/0 -d 192.168.0.2 -j'
-                                                 ' $%s' % name)
-        self.iptables.ipv4['filter'].remove_chain(name)
-
-        self.iptables.apply()
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_rule_with_wrap_target(self):
-        self._test_rule_with_wrap_target_helper(False)
-
-    def test_rule_with_wrap_target_with_ipv6(self):
-        self._test_rule_with_wrap_target_helper(True)
-
-    def _test_add_mangle_rule_helper(self, use_ipv6):
-        self.iptables = iptables_manager.IptablesManager(
-            use_ipv6=use_ipv6)
-        self.execute = mock.patch.object(self.iptables, "execute").start()
-
-        mangle_dump_mod = (
-            '# Generated by iptables_manager\n'
-            '*mangle\n'
-            ':FORWARD - [0:0]\n'
-            ':INPUT - [0:0]\n'
-            ':OUTPUT - [0:0]\n'
-            ':POSTROUTING - [0:0]\n'
-            ':PREROUTING - [0:0]\n'
-            ':%(bn)s-FORWARD - [0:0]\n'
-            ':%(bn)s-INPUT - [0:0]\n'
-            ':%(bn)s-OUTPUT - [0:0]\n'
-            ':%(bn)s-POSTROUTING - [0:0]\n'
-            ':%(bn)s-PREROUTING - [0:0]\n'
-            ':%(bn)s-mangle - [0:0]\n'
-            ':%(bn)s-mark - [0:0]\n'
-            '-I FORWARD 1 -j %(bn)s-FORWARD\n'
-            '-I INPUT 1 -j %(bn)s-INPUT\n'
-            '-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
-            '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n'
-            '-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
-            '-I %(bn)s-PREROUTING 1 -j %(bn)s-mark\n'
-            '-I %(bn)s-PREROUTING 2 -j MARK --set-xmark 0x1/%(mark)s\n'
-            'COMMIT\n'
-            '# Completed by iptables_manager\n' % IPTABLES_ARG)
-
-        expected_calls_and_values = [
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(FILTER_DUMP + mangle_dump_mod +
-                                      NAT_DUMP + RAW_DUMP),
-                       run_as_root=True),
-             None),
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(FILTER_DUMP + MANGLE_DUMP +
-                                      NAT_DUMP + RAW_DUMP),
-                       run_as_root=True),
-             None),
-        ]
-        if use_ipv6:
-            self._extend_with_ip6tables_filter(expected_calls_and_values,
-                                               FILTER_DUMP + RAW_DUMP)
-
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        self.iptables.ipv4['mangle'].add_chain('mangle')
-        self.iptables.ipv4['mangle'].add_rule(
-            'PREROUTING',
-            '-j MARK --set-xmark 0x1/%s' % constants.ROUTER_MARK_MASK)
-
-        self.iptables.apply()
-
-        self.iptables.ipv4['mangle'].remove_rule(
-            'PREROUTING',
-            '-j MARK --set-xmark 0x1/%s' % constants.ROUTER_MARK_MASK)
-        self.iptables.ipv4['mangle'].remove_chain('mangle')
-
-        self.iptables.apply()
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_add_mangle_rule(self):
-        self._test_add_mangle_rule_helper(False)
-
-    def test_add_mangle_rule_with_ipv6(self):
-        self._test_add_mangle_rule_helper(True)
-
-    def _test_add_nat_rule_helper(self, use_ipv6):
-        self.iptables = iptables_manager.IptablesManager(
-            use_ipv6=use_ipv6)
-        self.execute = mock.patch.object(self.iptables, "execute").start()
-
-        nat_dump = NAT_TEMPLATE % IPTABLES_ARG
-
-        nat_dump_mod = ('# Generated by iptables_manager\n'
-                        '*nat\n'
-                        ':OUTPUT - [0:0]\n'
-                        ':POSTROUTING - [0:0]\n'
-                        ':PREROUTING - [0:0]\n'
-                        ':neutron-postrouting-bottom - [0:0]\n'
-                        ':%(bn)s-OUTPUT - [0:0]\n'
-                        ':%(bn)s-POSTROUTING - [0:0]\n'
-                        ':%(bn)s-PREROUTING - [0:0]\n'
-                        ':%(bn)s-float-snat - [0:0]\n'
-                        ':%(bn)s-nat - [0:0]\n'
-                        ':%(bn)s-snat - [0:0]\n'
-                        '-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
-                        '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n'
-                        '-I POSTROUTING 2 -j neutron-postrouting-bottom\n'
-                        '-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
-                        '-I neutron-postrouting-bottom 1 -j %(bn)s-snat\n'
-                        '-I %(bn)s-PREROUTING 1 -d 192.168.0.3 -j '
-                        '%(bn)s-nat\n'
-                        '-I %(bn)s-nat 1 -p tcp --dport 8080 -j '
-                        'REDIRECT --to-port 80\n'
-                        '-I %(bn)s-snat 1 -j %(bn)s-float-snat\n'
-                        'COMMIT\n'
-                        '# Completed by iptables_manager\n' % IPTABLES_ARG)
-
-        raw_dump = RAW_DUMP % IPTABLES_ARG
-
-        expected_calls_and_values = [
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(FILTER_DUMP + MANGLE_DUMP +
-                                      nat_dump_mod + RAW_DUMP),
-                       run_as_root=True),
-             None),
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(FILTER_DUMP + MANGLE_DUMP + nat_dump +
-                                      RAW_DUMP),
-                       run_as_root=True),
-             None),
-        ]
-        if use_ipv6:
-            self._extend_with_ip6tables_filter(expected_calls_and_values,
-                                               FILTER_DUMP + raw_dump)
-
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        self.iptables.ipv4['nat'].add_chain('nat')
-        self.iptables.ipv4['nat'].add_rule('PREROUTING',
-                                           '-d 192.168.0.3 -j '
-                                           '%(bn)s-nat' % IPTABLES_ARG)
-        self.iptables.ipv4['nat'].add_rule('nat',
-                                           '-p tcp --dport 8080' +
-                                           ' -j REDIRECT --to-port 80')
-
-        self.iptables.apply()
-
-        self.iptables.ipv4['nat'].remove_rule('nat',
-                                              '-p tcp --dport 8080 -j'
-                                              ' REDIRECT --to-port 80')
-        self.iptables.ipv4['nat'].remove_rule('PREROUTING',
-                                              '-d 192.168.0.3 -j '
-                                              '%(bn)s-nat' % IPTABLES_ARG)
-        self.iptables.ipv4['nat'].remove_chain('nat')
-
-        self.iptables.apply()
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_add_nat_rule(self):
-        self._test_add_nat_rule_helper(False)
-
-    def test_add_nat_rule_with_ipv6(self):
-        self._test_add_nat_rule_helper(True)
-
-    def _test_add_raw_rule_helper(self, use_ipv6):
-        self.iptables = iptables_manager.IptablesManager(
-            use_ipv6=use_ipv6)
-        self.execute = mock.patch.object(self.iptables, "execute").start()
-
-        raw_dump_mod = ('# Generated by iptables_manager\n'
-                        '*raw\n'
-                        ':OUTPUT - [0:0]\n'
-                        ':PREROUTING - [0:0]\n'
-                        ':%(bn)s-OUTPUT - [0:0]\n'
-                        ':%(bn)s-PREROUTING - [0:0]\n'
-                        ':%(bn)s-raw - [0:0]\n'
-                        '-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
-                        '-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
-                        '-I %(bn)s-PREROUTING 1 -j CT --notrack\n'
-                        'COMMIT\n'
-                        '# Completed by iptables_manager\n'
-                        % IPTABLES_ARG)
-
-        expected_calls_and_values = [
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP +
-                                      raw_dump_mod),
-                       run_as_root=True),
-             None),
-            (mock.call(['iptables-save'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables-restore', '-n'],
-                       process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP +
-                                      RAW_DUMP),
-                       run_as_root=True),
-             None),
-        ]
-        if use_ipv6:
-            self._extend_with_ip6tables_filter(expected_calls_and_values,
-                                               FILTER_DUMP + RAW_DUMP)
-
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        self.iptables.ipv4['raw'].add_chain('raw')
-        self.iptables.ipv4['raw'].add_rule('PREROUTING',
-                                           '-j CT --notrack')
-
-        self.iptables.apply()
-
-        self.iptables.ipv4['raw'].remove_rule('PREROUTING',
-                                              '-j CT --notrack')
-        self.iptables.ipv4['raw'].remove_chain('raw')
-
-        self.iptables.apply()
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values)
-
-    def test_add_raw_rule(self):
-        self._test_add_raw_rule_helper(False)
-
-    def test_add_raw_rule_with_ipv6(self):
-        self._test_add_raw_rule_helper(True)
-
-    def test_add_rule_to_a_nonexistent_chain(self):
-        self.assertRaises(LookupError, self.iptables.ipv4['filter'].add_rule,
-                          'nonexistent', '-j DROP')
-
-    def test_remove_nonexistent_chain(self):
-        with mock.patch.object(iptables_manager, "LOG") as log:
-            self.iptables.ipv4['filter'].remove_chain('nonexistent')
-        log.debug.assert_called_once_with(
-            'Attempted to remove chain %s which does not exist',
-            'nonexistent')
-
-    def test_remove_nonexistent_rule(self):
-        with mock.patch.object(iptables_manager, "LOG") as log:
-            self.iptables.ipv4['filter'].remove_rule('nonexistent', '-j DROP')
-        log.warn.assert_called_once_with(
-            'Tried to remove rule that was not there: '
-            '%(chain)r %(rule)r %(wrap)r %(top)r',
-            {'wrap': True, 'top': False, 'rule': '-j DROP',
-             'chain': 'nonexistent'})
-
-    def test_iptables_failure_with_no_failing_line_number(self):
-        with mock.patch.object(iptables_manager, "LOG") as log:
-            # generate Runtime errors on iptables-restore calls
-            def iptables_restore_failer(*args, **kwargs):
-                if 'iptables-restore' in args[0]:
-                    self.input_lines = kwargs['process_input'].split('\n')
-                    # don't provide a specific failure message so all lines
-                    # are logged
-                    raise RuntimeError()
-                return FILTER_DUMP
-            self.execute.side_effect = iptables_restore_failer
-            # _apply_synchronized calls iptables-restore so it should raise
-            # a RuntimeError
-            self.assertRaises(RuntimeError,
-                              self.iptables._apply_synchronized)
-        # The RuntimeError should have triggered a log of the input to the
-        # process that it failed to execute. Verify by comparing the log
-        # call to the 'process_input' arg given to the failed iptables-restore
-        # call.
-        # Failure without a specific line number in the error should cause
-        # all lines to be logged with numbers.
-        logged = ['%7d. %s' % (n, l)
-                  for n, l in enumerate(self.input_lines, 1)]
-        log.error.assert_called_once_with(_(
-            'IPTablesManager.apply failed to apply the '
-            'following set of iptables rules:\n%s'),
-            '\n'.join(logged)
-        )
-
-    def test_iptables_failure_on_specific_line(self):
-        with mock.patch.object(iptables_manager, "LOG") as log:
-            # generate Runtime errors on iptables-restore calls
-            def iptables_restore_failer(*args, **kwargs):
-                if 'iptables-restore' in args[0]:
-                    self.input_lines = kwargs['process_input'].split('\n')
-                    # pretend line 11 failed
-                    msg = ("Exit code: 1\nStdout: ''\n"
-                           "Stderr: 'iptables-restore: line 11 failed\n'")
-                    raise RuntimeError(msg)
-                return FILTER_DUMP
-            self.execute.side_effect = iptables_restore_failer
-            # _apply_synchronized calls iptables-restore so it should raise
-            # a RuntimeError
-            self.assertRaises(RuntimeError,
-                              self.iptables._apply_synchronized)
-        # The RuntimeError should have triggered a log of the input to the
-        # process that it failed to execute. Verify by comparing the log
-        # call to the 'process_input' arg given to the failed iptables-restore
-        # call.
-        # Line 11 of the input was marked as failing so lines (11 - context)
-        # to (11 + context) should be logged
-        ctx = iptables_manager.IPTABLES_ERROR_LINES_OF_CONTEXT
-        log_start = max(0, 11 - ctx)
-        log_end = 11 + ctx
-        logged = ['%7d. %s' % (n, l)
-                  for n, l in enumerate(self.input_lines[log_start:log_end],
-                                        log_start + 1)]
-        log.error.assert_called_once_with(_(
-            'IPTablesManager.apply failed to apply the '
-            'following set of iptables rules:\n%s'),
-            '\n'.join(logged)
-        )
-
-    def test_get_traffic_counters_chain_notexists(self):
-        with mock.patch.object(iptables_manager, "LOG") as log:
-            acc = self.iptables.get_traffic_counters('chain1')
-            self.assertIsNone(acc)
-        self.assertEqual(0, self.execute.call_count)
-        log.warn.assert_called_once_with(
-            'Attempted to get traffic counters of chain %s which '
-            'does not exist', 'chain1')
-
-    def _test_get_traffic_counters_helper(self, use_ipv6):
-        self.iptables = iptables_manager.IptablesManager(
-            use_ipv6=use_ipv6)
-        self.execute = mock.patch.object(self.iptables, "execute").start()
-        exp_packets = 800
-        exp_bytes = 131802
-
-        expected_calls_and_values = [
-            (mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT',
-                        '-n', '-v', '-x'],
-                       run_as_root=True),
-             TRAFFIC_COUNTERS_DUMP),
-            (mock.call(['iptables', '-t', 'raw', '-L', 'OUTPUT', '-n',
-                        '-v', '-x'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables', '-t', 'mangle', '-L', 'OUTPUT', '-n',
-                        '-v', '-x'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n',
-                        '-v', '-x'],
-                       run_as_root=True),
-             ''),
-        ]
-        if use_ipv6:
-            expected_calls_and_values.append(
-                (mock.call(['ip6tables', '-t', 'raw', '-L', 'OUTPUT',
-                           '-n', '-v', '-x'], run_as_root=True),
-                 ''))
-            expected_calls_and_values.append(
-                (mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT',
-                           '-n', '-v', '-x'],
-                           run_as_root=True),
-                 TRAFFIC_COUNTERS_DUMP))
-            exp_packets *= 2
-            exp_bytes *= 2
-
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        acc = self.iptables.get_traffic_counters('OUTPUT')
-        self.assertEqual(acc['pkts'], exp_packets)
-        self.assertEqual(acc['bytes'], exp_bytes)
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values,
-                                any_order=True)
-
-    def test_get_traffic_counters(self):
-        self._test_get_traffic_counters_helper(False)
-
-    def test_get_traffic_counters_with_ipv6(self):
-        self._test_get_traffic_counters_helper(True)
-
-    def _test_get_traffic_counters_with_zero_helper(self, use_ipv6):
-        self.iptables = iptables_manager.IptablesManager(
-            use_ipv6=use_ipv6)
-        self.execute = mock.patch.object(self.iptables, "execute").start()
-        exp_packets = 800
-        exp_bytes = 131802
-
-        expected_calls_and_values = [
-            (mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT',
-                        '-n', '-v', '-x', '-Z'],
-                       run_as_root=True),
-             TRAFFIC_COUNTERS_DUMP),
-            (mock.call(['iptables', '-t', 'raw', '-L', 'OUTPUT', '-n',
-                        '-v', '-x', '-Z'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables', '-t', 'mangle', '-L', 'OUTPUT', '-n',
-                        '-v', '-x', '-Z'],
-                       run_as_root=True),
-             ''),
-            (mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n',
-                        '-v', '-x', '-Z'],
-                       run_as_root=True),
-             '')
-        ]
-        if use_ipv6:
-            expected_calls_and_values.append(
-                (mock.call(['ip6tables', '-t', 'raw', '-L', 'OUTPUT',
-                            '-n', '-v', '-x', '-Z'], run_as_root=True),
-                 ''))
-            expected_calls_and_values.append(
-                (mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT',
-                            '-n', '-v', '-x', '-Z'],
-                           run_as_root=True),
-                 TRAFFIC_COUNTERS_DUMP))
-            exp_packets *= 2
-            exp_bytes *= 2
-
-        tools.setup_mock_calls(self.execute, expected_calls_and_values)
-
-        acc = self.iptables.get_traffic_counters('OUTPUT', zero=True)
-        self.assertEqual(acc['pkts'], exp_packets)
-        self.assertEqual(acc['bytes'], exp_bytes)
-
-        tools.verify_mock_calls(self.execute, expected_calls_and_values,
-                                any_order=True)
-
-    def test_get_traffic_counters_with_zero(self):
-        self._test_get_traffic_counters_with_zero_helper(False)
-
-    def test_get_traffic_counters_with_zero_with_ipv6(self):
-        self._test_get_traffic_counters_with_zero_helper(True)
-
-
-class IptablesManagerStateLessTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(IptablesManagerStateLessTestCase, self).setUp()
-        cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
-        self.iptables = (iptables_manager.IptablesManager(state_less=True))
-
-    def test_nat_not_found(self):
-        self.assertNotIn('nat', self.iptables.ipv4)
-
-    def test_mangle_not_found(self):
-        self.assertNotIn('mangle', self.iptables.ipv4)
diff --git a/neutron/tests/unit/agent/linux/test_keepalived.py b/neutron/tests/unit/agent/linux/test_keepalived.py
deleted file mode 100644 (file)
index 01eb469..0000000
+++ /dev/null
@@ -1,341 +0,0 @@
-# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import testtools
-
-from neutron.agent.linux import keepalived
-from neutron.common import constants as n_consts
-from neutron.tests import base
-
-# Keepalived user guide:
-# http://www.keepalived.org/pdf/UserGuide.pdf
-
-
-class KeepalivedGetFreeRangeTestCase(base.BaseTestCase):
-    def test_get_free_range(self):
-        free_range = keepalived.get_free_range(
-            parent_range='169.254.0.0/16',
-            excluded_ranges=['169.254.0.0/24',
-                             '169.254.1.0/24',
-                             '169.254.2.0/24'],
-            size=24)
-        self.assertEqual('169.254.3.0/24', free_range)
-
-    def test_get_free_range_without_excluded(self):
-        free_range = keepalived.get_free_range(
-            parent_range='169.254.0.0/16',
-            excluded_ranges=[],
-            size=20)
-        self.assertEqual('169.254.0.0/20', free_range)
-
-    def test_get_free_range_excluded_out_of_parent(self):
-        free_range = keepalived.get_free_range(
-            parent_range='169.254.0.0/16',
-            excluded_ranges=['255.255.255.0/24'],
-            size=24)
-        self.assertEqual('169.254.0.0/24', free_range)
-
-    def test_get_free_range_not_found(self):
-        tiny_parent_range = '192.168.1.0/24'
-        huge_size = 8
-        with testtools.ExpectedException(ValueError):
-            keepalived.get_free_range(
-                parent_range=tiny_parent_range,
-                excluded_ranges=[],
-                size=huge_size)
-
-
-class KeepalivedConfBaseMixin(object):
-
-    def _get_config(self):
-        config = keepalived.KeepalivedConf()
-
-        instance1 = keepalived.KeepalivedInstance('MASTER', 'eth0', 1,
-                                                  ['169.254.192.0/18'],
-                                                  advert_int=5)
-        instance1.set_authentication('AH', 'pass123')
-        instance1.track_interfaces.append("eth0")
-
-        vip_address1 = keepalived.KeepalivedVipAddress('192.168.1.0/24',
-                                                       'eth1')
-
-        vip_address2 = keepalived.KeepalivedVipAddress('192.168.2.0/24',
-                                                       'eth2')
-
-        vip_address3 = keepalived.KeepalivedVipAddress('192.168.3.0/24',
-                                                       'eth2')
-
-        vip_address_ex = keepalived.KeepalivedVipAddress('192.168.55.0/24',
-                                                         'eth10')
-
-        instance1.vips.append(vip_address1)
-        instance1.vips.append(vip_address2)
-        instance1.vips.append(vip_address3)
-        instance1.vips.append(vip_address_ex)
-
-        virtual_route = keepalived.KeepalivedVirtualRoute(n_consts.IPv4_ANY,
-                                                          "192.168.1.1",
-                                                          "eth1")
-        instance1.virtual_routes.gateway_routes = [virtual_route]
-
-        instance2 = keepalived.KeepalivedInstance('MASTER', 'eth4', 2,
-                                                  ['169.254.192.0/18'],
-                                                  mcast_src_ip='224.0.0.1')
-        instance2.track_interfaces.append("eth4")
-
-        vip_address1 = keepalived.KeepalivedVipAddress('192.168.3.0/24',
-                                                       'eth6')
-
-        instance2.vips.append(vip_address1)
-        instance2.vips.append(vip_address2)
-        instance2.vips.append(vip_address_ex)
-
-        config.add_instance(instance1)
-        config.add_instance(instance2)
-
-        return config
-
-
-class KeepalivedConfTestCase(base.BaseTestCase,
-                             KeepalivedConfBaseMixin):
-
-    expected = """vrrp_instance VR_1 {
-    state MASTER
-    interface eth0
-    virtual_router_id 1
-    priority 50
-    garp_master_repeat 5
-    garp_master_refresh 10
-    advert_int 5
-    authentication {
-        auth_type AH
-        auth_pass pass123
-    }
-    track_interface {
-        eth0
-    }
-    virtual_ipaddress {
-        169.254.0.1/24 dev eth0
-    }
-    virtual_ipaddress_excluded {
-        192.168.1.0/24 dev eth1
-        192.168.2.0/24 dev eth2
-        192.168.3.0/24 dev eth2
-        192.168.55.0/24 dev eth10
-    }
-    virtual_routes {
-        0.0.0.0/0 via 192.168.1.1 dev eth1
-    }
-}
-vrrp_instance VR_2 {
-    state MASTER
-    interface eth4
-    virtual_router_id 2
-    priority 50
-    garp_master_repeat 5
-    garp_master_refresh 10
-    mcast_src_ip 224.0.0.1
-    track_interface {
-        eth4
-    }
-    virtual_ipaddress {
-        169.254.0.2/24 dev eth4
-    }
-    virtual_ipaddress_excluded {
-        192.168.2.0/24 dev eth2
-        192.168.3.0/24 dev eth6
-        192.168.55.0/24 dev eth10
-    }
-}"""
-
-    def test_config_generation(self):
-        config = self._get_config()
-        self.assertEqual(self.expected, config.get_config_str())
-
-    def test_config_with_reset(self):
-        config = self._get_config()
-        self.assertEqual(self.expected, config.get_config_str())
-
-        config.reset()
-        self.assertEqual('', config.get_config_str())
-
-    def test_get_existing_vip_ip_addresses_returns_list(self):
-        config = self._get_config()
-        instance = config.get_instance(1)
-        current_vips = sorted(instance.get_existing_vip_ip_addresses('eth2'))
-        self.assertEqual(['192.168.2.0/24', '192.168.3.0/24'], current_vips)
-
-
-class KeepalivedStateExceptionTestCase(base.BaseTestCase):
-    def test_state_exception(self):
-        invalid_vrrp_state = 'a seal walks'
-        self.assertRaises(keepalived.InvalidInstanceStateException,
-                          keepalived.KeepalivedInstance,
-                          invalid_vrrp_state, 'eth0', 33,
-                          ['169.254.192.0/18'])
-
-        invalid_auth_type = 'into a club'
-        instance = keepalived.KeepalivedInstance('MASTER', 'eth0', 1,
-                                                 ['169.254.192.0/18'])
-        self.assertRaises(keepalived.InvalidAuthenticationTypeException,
-                          instance.set_authentication,
-                          invalid_auth_type, 'some_password')
-
-
-class KeepalivedInstanceRoutesTestCase(base.BaseTestCase):
-    @classmethod
-    def _get_instance_routes(cls):
-        routes = keepalived.KeepalivedInstanceRoutes()
-        default_gw_eth0 = keepalived.KeepalivedVirtualRoute(
-            '0.0.0.0/0', '1.0.0.254', 'eth0')
-        default_gw_eth1 = keepalived.KeepalivedVirtualRoute(
-            '::/0', 'fe80::3e97:eff:fe26:3bfa/64', 'eth1')
-        routes.gateway_routes = [default_gw_eth0, default_gw_eth1]
-        extra_routes = [
-            keepalived.KeepalivedVirtualRoute('10.0.0.0/8', '1.0.0.1'),
-            keepalived.KeepalivedVirtualRoute('20.0.0.0/8', '2.0.0.2')]
-        routes.extra_routes = extra_routes
-        extra_subnets = [
-            keepalived.KeepalivedVirtualRoute(
-                '30.0.0.0/8', None, 'eth0', scope='link')]
-        routes.extra_subnets = extra_subnets
-        return routes
-
-    def test_routes(self):
-        routes = self._get_instance_routes()
-        self.assertEqual(len(routes.routes), 5)
-
-    def test_remove_routes_on_interface(self):
-        routes = self._get_instance_routes()
-        routes.remove_routes_on_interface('eth0')
-        self.assertEqual(len(routes.routes), 3)
-        routes.remove_routes_on_interface('eth1')
-        self.assertEqual(len(routes.routes), 2)
-
-    def test_build_config(self):
-        expected = """    virtual_routes {
-        0.0.0.0/0 via 1.0.0.254 dev eth0
-        ::/0 via fe80::3e97:eff:fe26:3bfa/64 dev eth1
-        10.0.0.0/8 via 1.0.0.1
-        20.0.0.0/8 via 2.0.0.2
-        30.0.0.0/8 dev eth0 scope link
-    }"""
-        routes = self._get_instance_routes()
-        self.assertEqual(expected, '\n'.join(routes.build_config()))
-
-
-class KeepalivedInstanceTestCase(base.BaseTestCase,
-                                 KeepalivedConfBaseMixin):
-    def test_get_primary_vip(self):
-        instance = keepalived.KeepalivedInstance('MASTER', 'ha0', 42,
-                                                 ['169.254.192.0/18'])
-        self.assertEqual('169.254.0.42/24', instance.get_primary_vip())
-
-    def test_remove_addresses_by_interface(self):
-        config = self._get_config()
-        instance = config.get_instance(1)
-        instance.remove_vips_vroutes_by_interface('eth2')
-        instance.remove_vips_vroutes_by_interface('eth10')
-
-        expected = """vrrp_instance VR_1 {
-    state MASTER
-    interface eth0
-    virtual_router_id 1
-    priority 50
-    garp_master_repeat 5
-    garp_master_refresh 10
-    advert_int 5
-    authentication {
-        auth_type AH
-        auth_pass pass123
-    }
-    track_interface {
-        eth0
-    }
-    virtual_ipaddress {
-        169.254.0.1/24 dev eth0
-    }
-    virtual_ipaddress_excluded {
-        192.168.1.0/24 dev eth1
-    }
-    virtual_routes {
-        0.0.0.0/0 via 192.168.1.1 dev eth1
-    }
-}
-vrrp_instance VR_2 {
-    state MASTER
-    interface eth4
-    virtual_router_id 2
-    priority 50
-    garp_master_repeat 5
-    garp_master_refresh 10
-    mcast_src_ip 224.0.0.1
-    track_interface {
-        eth4
-    }
-    virtual_ipaddress {
-        169.254.0.2/24 dev eth4
-    }
-    virtual_ipaddress_excluded {
-        192.168.2.0/24 dev eth2
-        192.168.3.0/24 dev eth6
-        192.168.55.0/24 dev eth10
-    }
-}"""
-
-        self.assertEqual(expected, config.get_config_str())
-
-    def test_build_config_no_vips(self):
-        expected = """vrrp_instance VR_1 {
-    state MASTER
-    interface eth0
-    virtual_router_id 1
-    priority 50
-    garp_master_repeat 5
-    garp_master_refresh 10
-    virtual_ipaddress {
-        169.254.0.1/24 dev eth0
-    }
-}"""
-        instance = keepalived.KeepalivedInstance(
-            'MASTER', 'eth0', 1, ['169.254.192.0/18'])
-        self.assertEqual(expected, '\n'.join(instance.build_config()))
-
-
-class KeepalivedVipAddressTestCase(base.BaseTestCase):
-    def test_vip_with_scope(self):
-        vip = keepalived.KeepalivedVipAddress('fe80::3e97:eff:fe26:3bfa/64',
-                                              'eth1',
-                                              'link')
-        self.assertEqual('fe80::3e97:eff:fe26:3bfa/64 dev eth1 scope link',
-                         vip.build_config())
-
-    def test_add_vip_idempotent(self):
-        instance = keepalived.KeepalivedInstance('MASTER', 'eth0', 1,
-                                                 ['169.254.192.0/18'])
-        instance.add_vip('192.168.222.1/32', 'eth11', None)
-        instance.add_vip('192.168.222.1/32', 'eth12', 'link')
-        self.assertEqual(1, len(instance.vips))
-
-
-class KeepalivedVirtualRouteTestCase(base.BaseTestCase):
-    def test_virtual_route_with_dev(self):
-        route = keepalived.KeepalivedVirtualRoute(n_consts.IPv4_ANY, '1.2.3.4',
-                                                  'eth0')
-        self.assertEqual('0.0.0.0/0 via 1.2.3.4 dev eth0',
-                         route.build_config())
-
-    def test_virtual_route_without_dev(self):
-        route = keepalived.KeepalivedVirtualRoute('50.0.0.0/8', '1.2.3.4')
-        self.assertEqual('50.0.0.0/8 via 1.2.3.4', route.build_config())
diff --git a/neutron/tests/unit/agent/linux/test_ovsdb_monitor.py b/neutron/tests/unit/agent/linux/test_ovsdb_monitor.py
deleted file mode 100644 (file)
index 73a3a08..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.agent.common import ovs_lib
-from neutron.agent.linux import ovsdb_monitor
-from neutron.tests import base
-
-
-class TestOvsdbMonitor(base.BaseTestCase):
-
-    def test___init__(self):
-        ovsdb_monitor.OvsdbMonitor('Interface')
-
-    def test___init___with_columns(self):
-        columns = ['col1', 'col2']
-        with mock.patch(
-            'neutron.agent.linux.async_process.AsyncProcess.__init__') as init:
-            ovsdb_monitor.OvsdbMonitor('Interface', columns=columns)
-            cmd = init.call_args_list[0][0][0]
-            self.assertEqual('col1,col2', cmd[-1])
-
-    def test___init___with_format(self):
-        with mock.patch(
-            'neutron.agent.linux.async_process.AsyncProcess.__init__') as init:
-            ovsdb_monitor.OvsdbMonitor('Interface', format='blob')
-            cmd = init.call_args_list[0][0][0]
-            self.assertEqual('--format=blob', cmd[-1])
-
-
-class TestSimpleInterfaceMonitor(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestSimpleInterfaceMonitor, self).setUp()
-        self.monitor = ovsdb_monitor.SimpleInterfaceMonitor()
-
-    def test_has_updates_is_false_if_active_with_no_output(self):
-        target = ('neutron.agent.linux.ovsdb_monitor.SimpleInterfaceMonitor'
-                  '.is_active')
-        with mock.patch(target, return_value=True):
-            self.assertFalse(self.monitor.has_updates)
-
-    def test_has_updates_after_calling_get_events_is_false(self):
-        with mock.patch.object(
-                self.monitor, 'process_events') as process_events:
-            self.monitor.new_events = {'added': ['foo'], 'removed': ['foo1']}
-            self.assertTrue(self.monitor.has_updates)
-            self.monitor.get_events()
-            self.assertTrue(process_events.called)
-            self.assertFalse(self.monitor.has_updates)
-
-    def process_event_unassigned_of_port(self):
-        output = '{"data":[["e040fbec-0579-4990-8324-d338da33ae88","insert",'
-        output += '"m50",["set",[]],["map",[]]]],"headings":["row","action",'
-        output += '"name","ofport","external_ids"]}'
-        with mock.patch.object(
-                self.monitor, 'iter_stdout', return_value=[output]):
-            self.monitor.process_events()
-            self.assertEqual(self.monitor.new_events['added'][0]['ofport'],
-                             ovs_lib.UNASSIGNED_OFPORT)
diff --git a/neutron/tests/unit/agent/linux/test_pd.py b/neutron/tests/unit/agent/linux/test_pd.py
deleted file mode 100644 (file)
index e121067..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.agent.linux import pd
-from neutron.tests import base as tests_base
-
-
-class FakeRouter(object):
-    def __init__(self, router_id):
-        self.router_id = router_id
-
-
-class TestPrefixDelegation(tests_base.DietTestCase):
-    def test_remove_router(self):
-        l3_agent = mock.Mock()
-        router_id = 1
-        l3_agent.pd.routers = {router_id: pd.get_router_entry(None)}
-        pd.remove_router(None, None, l3_agent, router=FakeRouter(router_id))
-        self.assertTrue(l3_agent.pd.delete_router_pd.called)
-        self.assertEqual({}, l3_agent.pd.routers)
diff --git a/neutron/tests/unit/agent/linux/test_polling.py b/neutron/tests/unit/agent/linux/test_polling.py
deleted file mode 100644 (file)
index b38c86b..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.agent.common import base_polling
-from neutron.agent.linux import polling
-from neutron.tests import base
-
-
-class TestGetPollingManager(base.BaseTestCase):
-
-    def test_return_always_poll_by_default(self):
-        with polling.get_polling_manager() as pm:
-            self.assertEqual(pm.__class__, base_polling.AlwaysPoll)
-
-    def test_manage_polling_minimizer(self):
-        mock_target = 'neutron.agent.linux.polling.InterfacePollingMinimizer'
-        with mock.patch('%s.start' % mock_target) as mock_start:
-            with mock.patch('%s.stop' % mock_target) as mock_stop:
-                with polling.get_polling_manager(minimize_polling=True) as pm:
-                    self.assertEqual(pm.__class__,
-                                     polling.InterfacePollingMinimizer)
-                mock_stop.assert_has_calls([mock.call()])
-            mock_start.assert_has_calls([mock.call()])
-
-
-class TestInterfacePollingMinimizer(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestInterfacePollingMinimizer, self).setUp()
-        self.pm = polling.InterfacePollingMinimizer()
-
-    def test_start_calls_monitor_start(self):
-        with mock.patch.object(self.pm._monitor, 'start') as mock_start:
-            self.pm.start()
-        mock_start.assert_called_with()
-
-    def test_stop_calls_monitor_stop(self):
-        with mock.patch.object(self.pm._monitor, 'stop') as mock_stop:
-            self.pm.stop()
-        mock_stop.assert_called_with()
-
-    def mock_has_updates(self, return_value):
-        target = ('neutron.agent.linux.ovsdb_monitor.SimpleInterfaceMonitor'
-                  '.has_updates')
-        return mock.patch(
-            target,
-            new_callable=mock.PropertyMock(return_value=return_value),
-        )
-
-    def test__is_polling_required_returns_when_updates_are_present(self):
-        with self.mock_has_updates(True):
-            self.assertTrue(self.pm._is_polling_required())
diff --git a/neutron/tests/unit/agent/linux/test_utils.py b/neutron/tests/unit/agent/linux/test_utils.py
deleted file mode 100644 (file)
index 7a53ee4..0000000
+++ /dev/null
@@ -1,416 +0,0 @@
-# Copyright 2012, VMware, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import socket
-
-import mock
-import six
-import testtools
-
-import oslo_i18n
-
-from neutron.agent.linux import utils
-from neutron.tests import base
-from neutron.tests.common import helpers
-
-
-_marker = object()
-
-
-class AgentUtilsExecuteTest(base.BaseTestCase):
-    def setUp(self):
-        super(AgentUtilsExecuteTest, self).setUp()
-        self.test_file = self.get_temp_file_path('test_execute.tmp')
-        open(self.test_file, 'w').close()
-        self.process = mock.patch('eventlet.green.subprocess.Popen').start()
-        self.process.return_value.returncode = 0
-        self.mock_popen = self.process.return_value.communicate
-
-    def test_without_helper(self):
-        expected = "%s\n" % self.test_file
-        self.mock_popen.return_value = [expected, ""]
-        result = utils.execute(["ls", self.test_file])
-        self.assertEqual(result, expected)
-
-    def test_with_helper(self):
-        expected = "ls %s\n" % self.test_file
-        self.mock_popen.return_value = [expected, ""]
-        self.config(group='AGENT', root_helper='echo')
-        result = utils.execute(["ls", self.test_file], run_as_root=True)
-        self.assertEqual(result, expected)
-
-    def test_stderr_true(self):
-        expected = "%s\n" % self.test_file
-        self.mock_popen.return_value = [expected, ""]
-        out = utils.execute(["ls", self.test_file], return_stderr=True)
-        self.assertIsInstance(out, tuple)
-        self.assertEqual(out, (expected, ""))
-
-    def test_check_exit_code(self):
-        self.mock_popen.return_value = ["", ""]
-        stdout = utils.execute(["ls", self.test_file[:-1]],
-                               check_exit_code=False)
-        self.assertEqual("", stdout)
-
-    def test_execute_raises(self):
-        self.mock_popen.side_effect = RuntimeError
-        self.assertRaises(RuntimeError, utils.execute,
-                          ["ls", self.test_file[:-1]])
-
-    def test_process_input(self):
-        expected = "%s\n" % self.test_file[:-1]
-        self.mock_popen.return_value = [expected, ""]
-        result = utils.execute(["cat"], process_input="%s\n" %
-                               self.test_file[:-1])
-        self.assertEqual(result, expected)
-
-    def test_with_addl_env(self):
-        expected = "%s\n" % self.test_file
-        self.mock_popen.return_value = [expected, ""]
-        result = utils.execute(["ls", self.test_file],
-                               addl_env={'foo': 'bar'})
-        self.assertEqual(result, expected)
-
-    def test_return_code_log_error_raise_runtime(self):
-        self.mock_popen.return_value = ('', '')
-        self.process.return_value.returncode = 1
-        with mock.patch.object(utils, 'LOG') as log:
-            self.assertRaises(RuntimeError, utils.execute,
-                              ['ls'])
-            self.assertTrue(log.error.called)
-
-    def test_return_code_log_error_no_raise_runtime(self):
-        self.mock_popen.return_value = ('', '')
-        self.process.return_value.returncode = 1
-        with mock.patch.object(utils, 'LOG') as log:
-            utils.execute(['ls'], check_exit_code=False)
-            self.assertTrue(log.error.called)
-
-    def test_return_code_log_debug(self):
-        self.mock_popen.return_value = ('', '')
-        with mock.patch.object(utils, 'LOG') as log:
-            utils.execute(['ls'])
-            self.assertTrue(log.debug.called)
-
-    def test_return_code_log_error_change_locale(self):
-        ja_output = 'std_out in Japanese'
-        ja_error = 'std_err in Japanese'
-        ja_message_out = oslo_i18n._message.Message(ja_output)
-        ja_message_err = oslo_i18n._message.Message(ja_error)
-        ja_translate_out = oslo_i18n._translate.translate(ja_message_out, 'ja')
-        ja_translate_err = oslo_i18n._translate.translate(ja_message_err, 'ja')
-        self.mock_popen.return_value = (ja_translate_out, ja_translate_err)
-        self.process.return_value.returncode = 1
-
-        with mock.patch.object(utils, 'LOG') as log:
-            utils.execute(['ls'], check_exit_code=False)
-            self.assertIn(ja_translate_out, str(log.error.call_args_list))
-            self.assertIn(ja_translate_err, str(log.error.call_args_list))
-
-    def test_return_code_raise_runtime_do_not_log_fail_as_error(self):
-        self.mock_popen.return_value = ('', '')
-        self.process.return_value.returncode = 1
-        with mock.patch.object(utils, 'LOG') as log:
-            self.assertRaises(RuntimeError, utils.execute,
-                              ['ls'], log_fail_as_error=False)
-            self.assertFalse(log.error.called)
-
-    def test_encode_process_input(self):
-        str_idata = "%s\n" % self.test_file[:-1]
-        str_odata = "%s\n" % self.test_file
-        if six.PY3:
-            bytes_idata = str_idata.encode(encoding='utf-8')
-            bytes_odata = str_odata.encode(encoding='utf-8')
-            self.mock_popen.return_value = [bytes_odata, b'']
-            result = utils.execute(['cat'], process_input=str_idata)
-            self.mock_popen.assert_called_once_with(bytes_idata)
-            self.assertEqual(str_odata, result)
-        else:
-            self.mock_popen.return_value = [str_odata, '']
-            result = utils.execute(['cat'], process_input=str_idata)
-            self.mock_popen.assert_called_once_with(str_idata)
-            self.assertEqual(str_odata, result)
-
-    def test_return_str_data(self):
-        str_data = "%s\n" % self.test_file
-        self.mock_popen.return_value = [str_data, '']
-        result = utils.execute(['ls', self.test_file], return_stderr=True)
-        self.assertEqual((str_data, ''), result)
-
-    @helpers.requires_py3
-    def test_surrogateescape_in_decoding_out_data(self):
-        bytes_err_data = b'\xed\xa0\xbd'
-        err_data = bytes_err_data.decode('utf-8', 'surrogateescape')
-        out_data = "%s\n" % self.test_file
-        bytes_out_data = out_data.encode(encoding='utf-8')
-        self.mock_popen.return_value = [bytes_out_data, bytes_err_data]
-        result = utils.execute(['ls', self.test_file], return_stderr=True)
-        self.assertEqual((out_data, err_data), result)
-
-
-class AgentUtilsExecuteEncodeTest(base.BaseTestCase):
-    def setUp(self):
-        super(AgentUtilsExecuteEncodeTest, self).setUp()
-        self.test_file = self.get_temp_file_path('test_execute.tmp')
-        open(self.test_file, 'w').close()
-
-    def test_decode_return_data(self):
-        str_data = "%s\n" % self.test_file
-        result = utils.execute(['ls', self.test_file], return_stderr=True)
-        self.assertEqual((str_data, ''), result)
-
-
-class AgentUtilsGetInterfaceMAC(base.BaseTestCase):
-    def test_get_interface_mac(self):
-        expect_val = '01:02:03:04:05:06'
-        with mock.patch('fcntl.ioctl') as ioctl:
-            ioctl.return_value = ''.join(['\x00' * 18,
-                                          '\x01\x02\x03\x04\x05\x06',
-                                          '\x00' * 232])
-            actual_val = utils.get_interface_mac('eth0')
-        self.assertEqual(actual_val, expect_val)
-
-
-class AgentUtilsReplaceFile(base.BaseTestCase):
-    def _test_replace_file_helper(self, explicit_perms=None):
-        # make file to replace
-        with mock.patch('tempfile.NamedTemporaryFile') as ntf:
-            ntf.return_value.name = '/baz'
-            with mock.patch('os.chmod') as chmod:
-                with mock.patch('os.rename') as rename:
-                    if explicit_perms is None:
-                        expected_perms = 0o644
-                        utils.replace_file('/foo', 'bar')
-                    else:
-                        expected_perms = explicit_perms
-                        utils.replace_file('/foo', 'bar', explicit_perms)
-
-                    expected = [mock.call('w+', dir='/', delete=False),
-                                mock.call().write('bar'),
-                                mock.call().close()]
-
-                    ntf.assert_has_calls(expected)
-                    chmod.assert_called_once_with('/baz', expected_perms)
-                    rename.assert_called_once_with('/baz', '/foo')
-
-    def test_replace_file_with_default_perms(self):
-        self._test_replace_file_helper()
-
-    def test_replace_file_with_0o600_perms(self):
-        self._test_replace_file_helper(0o600)
-
-
-class TestFindChildPids(base.BaseTestCase):
-
-    def test_returns_empty_list_for_exit_code_1(self):
-        with mock.patch.object(utils, 'execute',
-                               side_effect=RuntimeError('Exit code: 1')):
-            self.assertEqual([], utils.find_child_pids(-1))
-
-    def test_returns_empty_list_for_no_output(self):
-        with mock.patch.object(utils, 'execute', return_value=''):
-            self.assertEqual([], utils.find_child_pids(-1))
-
-    def test_returns_list_of_child_process_ids_for_good_ouput(self):
-        with mock.patch.object(utils, 'execute', return_value=' 123 \n 185\n'):
-            self.assertEqual(utils.find_child_pids(-1), ['123', '185'])
-
-    def test_raises_unknown_exception(self):
-        with testtools.ExpectedException(RuntimeError):
-            with mock.patch.object(utils, 'execute',
-                                   side_effect=RuntimeError()):
-                utils.find_child_pids(-1)
-
-
-class TestGetRoothelperChildPid(base.BaseTestCase):
-    def _test_get_root_helper_child_pid(self, expected=_marker,
-                                        run_as_root=False, pids=None):
-        def _find_child_pids(x):
-            if not pids:
-                return []
-            pids.pop(0)
-            return pids
-
-        mock_pid = object()
-        with mock.patch.object(utils, 'find_child_pids',
-                               side_effect=_find_child_pids):
-            actual = utils.get_root_helper_child_pid(mock_pid, run_as_root)
-        if expected is _marker:
-            expected = str(mock_pid)
-        self.assertEqual(expected, actual)
-
-    def test_returns_process_pid_not_root(self):
-        self._test_get_root_helper_child_pid()
-
-    def test_returns_child_pid_as_root(self):
-        self._test_get_root_helper_child_pid(expected='2', pids=['1', '2'],
-                                             run_as_root=True)
-
-    def test_returns_last_child_pid_as_root(self):
-        self._test_get_root_helper_child_pid(expected='3',
-                                             pids=['1', '2', '3'],
-                                             run_as_root=True)
-
-    def test_returns_none_as_root(self):
-        self._test_get_root_helper_child_pid(expected=None, run_as_root=True)
-
-
-class TestPathUtilities(base.BaseTestCase):
-    def test_remove_abs_path(self):
-        self.assertEqual(['ping', '8.8.8.8'],
-                         utils.remove_abs_path(['/usr/bin/ping', '8.8.8.8']))
-
-    def test_cmd_matches_expected_matches_abs_path(self):
-        cmd = ['/bar/../foo']
-        self.assertTrue(utils.cmd_matches_expected(cmd, cmd))
-
-    def test_cmd_matches_expected_matches_script(self):
-        self.assertTrue(utils.cmd_matches_expected(['python', 'script'],
-                                                   ['script']))
-
-    def test_cmd_matches_expected_doesnt_match(self):
-        self.assertFalse(utils.cmd_matches_expected('foo', 'bar'))
-
-
-class FakeUser(object):
-    def __init__(self, name):
-        self.pw_name = name
-
-
-class FakeGroup(object):
-    def __init__(self, name):
-        self.gr_name = name
-
-
-class TestBaseOSUtils(base.BaseTestCase):
-
-    EUID = 123
-    EUNAME = 'user'
-    EGID = 456
-    EGNAME = 'group'
-
-    @mock.patch('os.geteuid', return_value=EUID)
-    @mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME))
-    def test_is_effective_user_id(self, getpwuid, geteuid):
-        self.assertTrue(utils.is_effective_user(self.EUID))
-        geteuid.assert_called_once_with()
-        self.assertFalse(getpwuid.called)
-
-    @mock.patch('os.geteuid', return_value=EUID)
-    @mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME))
-    def test_is_effective_user_str_id(self, getpwuid, geteuid):
-        self.assertTrue(utils.is_effective_user(str(self.EUID)))
-        geteuid.assert_called_once_with()
-        self.assertFalse(getpwuid.called)
-
-    @mock.patch('os.geteuid', return_value=EUID)
-    @mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME))
-    def test_is_effective_user_name(self, getpwuid, geteuid):
-        self.assertTrue(utils.is_effective_user(self.EUNAME))
-        geteuid.assert_called_once_with()
-        getpwuid.assert_called_once_with(self.EUID)
-
-    @mock.patch('os.geteuid', return_value=EUID)
-    @mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME))
-    def test_is_not_effective_user(self, getpwuid, geteuid):
-        self.assertFalse(utils.is_effective_user('wrong'))
-        geteuid.assert_called_once_with()
-        getpwuid.assert_called_once_with(self.EUID)
-
-    @mock.patch('os.getegid', return_value=EGID)
-    @mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME))
-    def test_is_effective_group_id(self, getgrgid, getegid):
-        self.assertTrue(utils.is_effective_group(self.EGID))
-        getegid.assert_called_once_with()
-        self.assertFalse(getgrgid.called)
-
-    @mock.patch('os.getegid', return_value=EGID)
-    @mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME))
-    def test_is_effective_group_str_id(self, getgrgid, getegid):
-        self.assertTrue(utils.is_effective_group(str(self.EGID)))
-        getegid.assert_called_once_with()
-        self.assertFalse(getgrgid.called)
-
-    @mock.patch('os.getegid', return_value=EGID)
-    @mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME))
-    def test_is_effective_group_name(self, getgrgid, getegid):
-        self.assertTrue(utils.is_effective_group(self.EGNAME))
-        getegid.assert_called_once_with()
-        getgrgid.assert_called_once_with(self.EGID)
-
-    @mock.patch('os.getegid', return_value=EGID)
-    @mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME))
-    def test_is_not_effective_group(self, getgrgid, getegid):
-        self.assertFalse(utils.is_effective_group('wrong'))
-        getegid.assert_called_once_with()
-        getgrgid.assert_called_once_with(self.EGID)
-
-
-class TestUnixDomainHttpConnection(base.BaseTestCase):
-    def test_connect(self):
-        with mock.patch.object(utils, 'cfg') as cfg:
-            cfg.CONF.metadata_proxy_socket = '/the/path'
-            with mock.patch('socket.socket') as socket_create:
-                conn = utils.UnixDomainHTTPConnection('169.254.169.254',
-                                                      timeout=3)
-                conn.connect()
-
-                socket_create.assert_has_calls([
-                    mock.call(socket.AF_UNIX, socket.SOCK_STREAM),
-                    mock.call().settimeout(3),
-                    mock.call().connect('/the/path')]
-                )
-                self.assertEqual(conn.timeout, 3)
-
-
-class TestUnixDomainHttpProtocol(base.BaseTestCase):
-    def test_init_empty_client(self):
-        u = utils.UnixDomainHttpProtocol(mock.Mock(), '', mock.Mock())
-        self.assertEqual(u.client_address, ('<local>', 0))
-
-    def test_init_with_client(self):
-        u = utils.UnixDomainHttpProtocol(mock.Mock(), 'foo', mock.Mock())
-        self.assertEqual(u.client_address, 'foo')
-
-
-class TestUnixDomainWSGIServer(base.BaseTestCase):
-    def setUp(self):
-        super(TestUnixDomainWSGIServer, self).setUp()
-        self.eventlet_p = mock.patch.object(utils, 'eventlet')
-        self.eventlet = self.eventlet_p.start()
-        self.server = utils.UnixDomainWSGIServer('test')
-
-    def test_start(self):
-        mock_app = mock.Mock()
-        with mock.patch.object(self.server, '_launch') as launcher:
-            self.server.start(mock_app, '/the/path', workers=5, backlog=128)
-            self.eventlet.assert_has_calls([
-                mock.call.listen(
-                    '/the/path',
-                    family=socket.AF_UNIX,
-                    backlog=128
-                )]
-            )
-            launcher.assert_called_once_with(mock_app, workers=5)
-
-    def test_run(self):
-        self.server._run('app', 'sock')
-
-        self.eventlet.wsgi.server.assert_called_once_with(
-            'sock',
-            'app',
-            protocol=utils.UnixDomainHttpProtocol,
-            log=mock.ANY,
-            max_size=self.server.num_threads
-        )
diff --git a/neutron/tests/unit/agent/metadata/__init__.py b/neutron/tests/unit/agent/metadata/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/agent/metadata/test_agent.py b/neutron/tests/unit/agent/metadata/test_agent.py
deleted file mode 100644 (file)
index 212d08e..0000000
+++ /dev/null
@@ -1,508 +0,0 @@
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import testtools
-import webob
-
-from neutron.agent.linux import utils as agent_utils
-from neutron.agent.metadata import agent
-from neutron.agent.metadata import config
-from neutron.agent import metadata_agent
-from neutron.common import constants as n_const
-from neutron.common import utils
-from neutron.tests import base
-
-
-class FakeConf(object):
-    auth_ca_cert = None
-    nova_metadata_ip = '9.9.9.9'
-    nova_metadata_port = 8775
-    metadata_proxy_shared_secret = 'secret'
-    nova_metadata_protocol = 'http'
-    nova_metadata_insecure = True
-    nova_client_cert = 'nova_cert'
-    nova_client_priv_key = 'nova_priv_key'
-    cache_url = ''
-
-
-class FakeConfCache(FakeConf):
-    cache_url = 'memory://?default_ttl=5'
-
-
-class TestMetadataProxyHandlerBase(base.BaseTestCase):
-    fake_conf = FakeConf
-
-    def setUp(self):
-        super(TestMetadataProxyHandlerBase, self).setUp()
-        self.log_p = mock.patch.object(agent, 'LOG')
-        self.log = self.log_p.start()
-        self.handler = agent.MetadataProxyHandler(self.fake_conf)
-        self.handler.plugin_rpc = mock.Mock()
-        self.handler.context = mock.Mock()
-
-
-class TestMetadataProxyHandlerRpc(TestMetadataProxyHandlerBase):
-    def test_get_port_filters(self):
-        router_id = 'test_router_id'
-        ip = '1.2.3.4'
-        networks = ('net_id1', 'net_id2')
-        expected = {'device_id': [router_id],
-                    'device_owner': n_const.ROUTER_INTERFACE_OWNERS,
-                    'network_id': networks,
-                    'fixed_ips': {'ip_address': [ip]}}
-        actual = self.handler._get_port_filters(router_id, ip, networks)
-        self.assertEqual(expected, actual)
-
-    def test_get_router_networks(self):
-        router_id = 'router-id'
-        expected = ('network_id1', 'network_id2')
-        ports = [{'network_id': 'network_id1', 'something': 42},
-                 {'network_id': 'network_id2', 'something_else': 32}]
-        self.handler.plugin_rpc.get_ports.return_value = ports
-        networks = self.handler._get_router_networks(router_id)
-        self.assertEqual(expected, networks)
-
-    def test_get_ports_for_remote_address(self):
-        ip = '1.1.1.1'
-        networks = ('network_id1', 'network_id2')
-        expected = [{'port_id': 'port_id1'},
-                    {'port_id': 'port_id2'}]
-        self.handler.plugin_rpc.get_ports.return_value = expected
-        ports = self.handler._get_ports_for_remote_address(ip, networks)
-        self.assertEqual(expected, ports)
-
-
-class TestMetadataProxyHandlerCache(TestMetadataProxyHandlerBase):
-    fake_conf = FakeConfCache
-
-    def test_call(self):
-        req = mock.Mock()
-        with mock.patch.object(self.handler,
-                               '_get_instance_and_tenant_id') as get_ids:
-            get_ids.return_value = ('instance_id', 'tenant_id')
-            with mock.patch.object(self.handler, '_proxy_request') as proxy:
-                proxy.return_value = 'value'
-
-                retval = self.handler(req)
-                self.assertEqual(retval, 'value')
-
-    def test_call_no_instance_match(self):
-        req = mock.Mock()
-        with mock.patch.object(self.handler,
-                               '_get_instance_and_tenant_id') as get_ids:
-            get_ids.return_value = None, None
-            retval = self.handler(req)
-            self.assertIsInstance(retval, webob.exc.HTTPNotFound)
-
-    def test_call_internal_server_error(self):
-        req = mock.Mock()
-        with mock.patch.object(self.handler,
-                               '_get_instance_and_tenant_id') as get_ids:
-            get_ids.side_effect = Exception
-            retval = self.handler(req)
-            self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
-            self.assertEqual(len(self.log.mock_calls), 2)
-
-    def test_get_router_networks(self):
-        router_id = 'router-id'
-        expected = ('network_id1', 'network_id2')
-        ports = [{'network_id': 'network_id1', 'something': 42},
-                 {'network_id': 'network_id2', 'something_else': 32}]
-        mock_get_ports = self.handler.plugin_rpc.get_ports
-        mock_get_ports.return_value = ports
-        networks = self.handler._get_router_networks(router_id)
-        mock_get_ports.assert_called_once_with(
-            mock.ANY,
-            {'device_id': [router_id],
-             'device_owner': n_const.ROUTER_INTERFACE_OWNERS})
-        self.assertEqual(expected, networks)
-
-    def _test_get_router_networks_twice_helper(self):
-        router_id = 'router-id'
-        ports = [{'network_id': 'network_id1', 'something': 42}]
-        expected_networks = ('network_id1',)
-        with mock.patch(
-            'oslo_utils.timeutils.utcnow_ts', return_value=0):
-            mock_get_ports = self.handler.plugin_rpc.get_ports
-            mock_get_ports.return_value = ports
-            networks = self.handler._get_router_networks(router_id)
-            mock_get_ports.assert_called_once_with(
-                mock.ANY,
-                {'device_id': [router_id],
-                 'device_owner': n_const.ROUTER_INTERFACE_OWNERS})
-            self.assertEqual(expected_networks, networks)
-            networks = self.handler._get_router_networks(router_id)
-
-    def test_get_router_networks_twice(self):
-        self._test_get_router_networks_twice_helper()
-        self.assertEqual(
-            1, self.handler.plugin_rpc.get_ports.call_count)
-
-    def _get_ports_for_remote_address_cache_hit_helper(self):
-        remote_address = 'remote_address'
-        networks = ('net1', 'net2')
-        mock_get_ports = self.handler.plugin_rpc.get_ports
-        mock_get_ports.return_value = [{'network_id': 'net1', 'something': 42}]
-        self.handler._get_ports_for_remote_address(remote_address, networks)
-        mock_get_ports.assert_called_once_with(
-            mock.ANY,
-            {'network_id': networks,
-             'fixed_ips': {'ip_address': [remote_address]}}
-        )
-        self.assertEqual(1, mock_get_ports.call_count)
-        self.handler._get_ports_for_remote_address(remote_address,
-                                                   networks)
-
-    def test_get_ports_for_remote_address_cache_hit(self):
-        self._get_ports_for_remote_address_cache_hit_helper()
-        self.assertEqual(
-            1, self.handler.plugin_rpc.get_ports.call_count)
-
-    def test_get_ports_network_id(self):
-        network_id = 'network-id'
-        router_id = 'router-id'
-        remote_address = 'remote-address'
-        expected = ['port1']
-        networks = (network_id,)
-        with mock.patch.object(self.handler,
-                               '_get_ports_for_remote_address'
-                               ) as mock_get_ip_addr,\
-                mock.patch.object(self.handler,
-                                  '_get_router_networks'
-                                  ) as mock_get_router_networks:
-            mock_get_ip_addr.return_value = expected
-            ports = self.handler._get_ports(remote_address, network_id,
-                                            router_id)
-            mock_get_ip_addr.assert_called_once_with(remote_address,
-                                                     networks)
-            self.assertFalse(mock_get_router_networks.called)
-        self.assertEqual(expected, ports)
-
-    def test_get_ports_router_id(self):
-        router_id = 'router-id'
-        remote_address = 'remote-address'
-        expected = ['port1']
-        networks = ('network1', 'network2')
-        with mock.patch.object(self.handler,
-                               '_get_ports_for_remote_address',
-                               return_value=expected
-                               ) as mock_get_ip_addr,\
-                mock.patch.object(self.handler,
-                                  '_get_router_networks',
-                                  return_value=networks
-                                  ) as mock_get_router_networks:
-            ports = self.handler._get_ports(remote_address,
-                                            router_id=router_id)
-            mock_get_router_networks.called_once_with(router_id)
-        mock_get_ip_addr.assert_called_once_with(remote_address, networks)
-        self.assertEqual(expected, ports)
-
-    def test_get_ports_no_id(self):
-        self.assertRaises(TypeError, self.handler._get_ports, 'remote_address')
-
-    def _get_instance_and_tenant_id_helper(self, headers, list_ports_retval,
-                                           networks=None, router_id=None):
-        remote_address = '192.168.1.1'
-        headers['X-Forwarded-For'] = remote_address
-        req = mock.Mock(headers=headers)
-
-        def mock_get_ports(*args, **kwargs):
-            return list_ports_retval.pop(0)
-
-        self.handler.plugin_rpc.get_ports.side_effect = mock_get_ports
-        instance_id, tenant_id = self.handler._get_instance_and_tenant_id(req)
-
-        expected = []
-
-        if router_id:
-            expected.append(
-                mock.call(
-                    mock.ANY,
-                    {'device_id': [router_id],
-                     'device_owner': n_const.ROUTER_INTERFACE_OWNERS}
-                )
-            )
-
-        expected.append(
-            mock.call(
-                mock.ANY,
-                {'network_id': networks,
-                 'fixed_ips': {'ip_address': ['192.168.1.1']}}
-            )
-        )
-
-        self.handler.plugin_rpc.get_ports.assert_has_calls(expected)
-
-        return (instance_id, tenant_id)
-
-    def test_get_instance_id_router_id(self):
-        router_id = 'the_id'
-        headers = {
-            'X-Neutron-Router-ID': router_id
-        }
-
-        networks = ('net1', 'net2')
-        ports = [
-            [{'network_id': 'net1'}, {'network_id': 'net2'}],
-            [{'device_id': 'device_id', 'tenant_id': 'tenant_id',
-              'network_id': 'net1'}]
-        ]
-
-        self.assertEqual(
-            self._get_instance_and_tenant_id_helper(headers, ports,
-                                                    networks=networks,
-                                                    router_id=router_id),
-            ('device_id', 'tenant_id')
-        )
-
-    def test_get_instance_id_router_id_no_match(self):
-        router_id = 'the_id'
-        headers = {
-            'X-Neutron-Router-ID': router_id
-        }
-
-        networks = ('net1', 'net2')
-        ports = [
-            [{'network_id': 'net1'}, {'network_id': 'net2'}],
-            []
-        ]
-        self.assertEqual(
-            self._get_instance_and_tenant_id_helper(headers, ports,
-                                                    networks=networks,
-                                                    router_id=router_id),
-            (None, None)
-        )
-
-    def test_get_instance_id_network_id(self):
-        network_id = 'the_id'
-        headers = {
-            'X-Neutron-Network-ID': network_id
-        }
-
-        ports = [
-            [{'device_id': 'device_id',
-              'tenant_id': 'tenant_id',
-              'network_id': 'the_id'}]
-        ]
-
-        self.assertEqual(
-            self._get_instance_and_tenant_id_helper(headers, ports,
-                                                    networks=('the_id',)),
-            ('device_id', 'tenant_id')
-        )
-
-    def test_get_instance_id_network_id_no_match(self):
-        network_id = 'the_id'
-        headers = {
-            'X-Neutron-Network-ID': network_id
-        }
-
-        ports = [[]]
-
-        self.assertEqual(
-            self._get_instance_and_tenant_id_helper(headers, ports,
-                                                    networks=('the_id',)),
-            (None, None)
-        )
-
-    def _proxy_request_test_helper(self, response_code=200, method='GET'):
-        hdrs = {'X-Forwarded-For': '8.8.8.8'}
-        body = 'body'
-
-        req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs,
-                        method=method, body=body)
-        resp = mock.MagicMock(status=response_code)
-        req.response = resp
-        with mock.patch.object(self.handler, '_sign_instance_id') as sign:
-            sign.return_value = 'signed'
-            with mock.patch('httplib2.Http') as mock_http:
-                resp.__getitem__.return_value = "text/plain"
-                mock_http.return_value.request.return_value = (resp, 'content')
-
-                retval = self.handler._proxy_request('the_id', 'tenant_id',
-                                                     req)
-                mock_http.assert_called_once_with(
-                    ca_certs=None, disable_ssl_certificate_validation=True)
-                mock_http.assert_has_calls([
-                    mock.call().add_certificate(
-                        FakeConf.nova_client_priv_key,
-                        FakeConf.nova_client_cert,
-                        "%s:%s" % (FakeConf.nova_metadata_ip,
-                                   FakeConf.nova_metadata_port)
-                    ),
-                    mock.call().request(
-                        'http://9.9.9.9:8775/the_path',
-                        method=method,
-                        headers={
-                            'X-Forwarded-For': '8.8.8.8',
-                            'X-Instance-ID-Signature': 'signed',
-                            'X-Instance-ID': 'the_id',
-                            'X-Tenant-ID': 'tenant_id'
-                        },
-                        body=body
-                    )]
-                )
-
-                return retval
-
-    def test_proxy_request_post(self):
-        response = self._proxy_request_test_helper(method='POST')
-        self.assertEqual(response.content_type, "text/plain")
-        self.assertEqual(response.body, 'content')
-
-    def test_proxy_request_200(self):
-        response = self._proxy_request_test_helper(200)
-        self.assertEqual(response.content_type, "text/plain")
-        self.assertEqual(response.body, 'content')
-
-    def test_proxy_request_400(self):
-        self.assertIsInstance(self._proxy_request_test_helper(400),
-                              webob.exc.HTTPBadRequest)
-
-    def test_proxy_request_403(self):
-        self.assertIsInstance(self._proxy_request_test_helper(403),
-                              webob.exc.HTTPForbidden)
-
-    def test_proxy_request_404(self):
-        self.assertIsInstance(self._proxy_request_test_helper(404),
-                              webob.exc.HTTPNotFound)
-
-    def test_proxy_request_409(self):
-        self.assertIsInstance(self._proxy_request_test_helper(409),
-                              webob.exc.HTTPConflict)
-
-    def test_proxy_request_500(self):
-        self.assertIsInstance(self._proxy_request_test_helper(500),
-                              webob.exc.HTTPInternalServerError)
-
-    def test_proxy_request_other_code(self):
-        with testtools.ExpectedException(Exception):
-            self._proxy_request_test_helper(302)
-
-    def test_sign_instance_id(self):
-        self.assertEqual(
-            self.handler._sign_instance_id('foo'),
-            '773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4'
-        )
-
-
-class TestMetadataProxyHandlerNoCache(TestMetadataProxyHandlerCache):
-    fake_conf = FakeConf
-
-    def test_get_router_networks_twice(self):
-        self._test_get_router_networks_twice_helper()
-        self.assertEqual(
-            2, self.handler.plugin_rpc.get_ports.call_count)
-
-    def test_get_ports_for_remote_address_cache_hit(self):
-        self._get_ports_for_remote_address_cache_hit_helper()
-        self.assertEqual(
-            2, self.handler.plugin_rpc.get_ports.call_count)
-
-
-class TestUnixDomainMetadataProxy(base.BaseTestCase):
-    def setUp(self):
-        super(TestUnixDomainMetadataProxy, self).setUp()
-        self.cfg_p = mock.patch.object(agent, 'cfg')
-        self.cfg = self.cfg_p.start()
-        looping_call_p = mock.patch(
-            'oslo_service.loopingcall.FixedIntervalLoopingCall')
-        self.looping_mock = looping_call_p.start()
-        self.cfg.CONF.metadata_proxy_socket = '/the/path'
-        self.cfg.CONF.metadata_workers = 0
-        self.cfg.CONF.metadata_backlog = 128
-        self.cfg.CONF.metadata_proxy_socket_mode = config.USER_MODE
-
-    @mock.patch.object(utils, 'ensure_dir')
-    def test_init_doesnot_exists(self, ensure_dir):
-        agent.UnixDomainMetadataProxy(mock.Mock())
-        ensure_dir.assert_called_once_with('/the')
-
-    def test_init_exists(self):
-        with mock.patch('os.path.isdir') as isdir:
-            with mock.patch('os.unlink') as unlink:
-                isdir.return_value = True
-                agent.UnixDomainMetadataProxy(mock.Mock())
-                unlink.assert_called_once_with('/the/path')
-
-    def test_init_exists_unlink_no_file(self):
-        with mock.patch('os.path.isdir') as isdir:
-            with mock.patch('os.unlink') as unlink:
-                with mock.patch('os.path.exists') as exists:
-                    isdir.return_value = True
-                    exists.return_value = False
-                    unlink.side_effect = OSError
-
-                    agent.UnixDomainMetadataProxy(mock.Mock())
-                    unlink.assert_called_once_with('/the/path')
-
-    def test_init_exists_unlink_fails_file_still_exists(self):
-        with mock.patch('os.path.isdir') as isdir:
-            with mock.patch('os.unlink') as unlink:
-                with mock.patch('os.path.exists') as exists:
-                    isdir.return_value = True
-                    exists.return_value = True
-                    unlink.side_effect = OSError
-
-                    with testtools.ExpectedException(OSError):
-                        agent.UnixDomainMetadataProxy(mock.Mock())
-                    unlink.assert_called_once_with('/the/path')
-
-    @mock.patch.object(agent, 'MetadataProxyHandler')
-    @mock.patch.object(agent_utils, 'UnixDomainWSGIServer')
-    @mock.patch.object(utils, 'ensure_dir')
-    def test_run(self, ensure_dir, server, handler):
-        p = agent.UnixDomainMetadataProxy(self.cfg.CONF)
-        p.run()
-
-        ensure_dir.assert_called_once_with('/the')
-        server.assert_has_calls([
-            mock.call('neutron-metadata-agent'),
-            mock.call().start(handler.return_value,
-                              '/the/path', workers=0,
-                              backlog=128, mode=0o644),
-            mock.call().wait()]
-        )
-
-    def test_main(self):
-        with mock.patch.object(agent, 'UnixDomainMetadataProxy') as proxy:
-            with mock.patch.object(metadata_agent, 'config') as config:
-                with mock.patch.object(metadata_agent, 'cfg') as cfg:
-                    with mock.patch.object(utils, 'cfg'):
-                        metadata_agent.main()
-
-                        self.assertTrue(config.setup_logging.called)
-                        proxy.assert_has_calls([
-                            mock.call(cfg.CONF),
-                            mock.call().run()]
-                        )
-
-    def test_init_state_reporting(self):
-        with mock.patch('os.makedirs'):
-            proxy = agent.UnixDomainMetadataProxy(mock.Mock())
-            self.looping_mock.assert_called_once_with(proxy._report_state)
-            self.looping_mock.return_value.start.assert_called_once_with(
-                interval=mock.ANY)
-
-    def test_report_state(self):
-        with mock.patch('neutron.agent.rpc.PluginReportStateAPI') as state_api:
-            with mock.patch('os.makedirs'):
-                proxy = agent.UnixDomainMetadataProxy(mock.Mock())
-                self.assertTrue(proxy.agent_state['start_flag'])
-                proxy._report_state()
-                self.assertNotIn('start_flag', proxy.agent_state)
-                state_api_inst = state_api.return_value
-                state_api_inst.report_state.assert_called_once_with(
-                    proxy.context, proxy.agent_state, use_call=True)
diff --git a/neutron/tests/unit/agent/metadata/test_driver.py b/neutron/tests/unit/agent/metadata/test_driver.py
deleted file mode 100644 (file)
index ea6047e..0000000
+++ /dev/null
@@ -1,153 +0,0 @@
-# Copyright 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-from neutron.agent.common import config as agent_config
-from neutron.agent.l3 import agent as l3_agent
-from neutron.agent.l3 import config as l3_config
-from neutron.agent.l3 import ha as l3_ha_agent
-from neutron.agent.metadata import config
-from neutron.agent.metadata import driver as metadata_driver
-from neutron.common import constants
-from neutron.tests import base
-
-
-_uuid = uuidutils.generate_uuid
-
-
-class TestMetadataDriverRules(base.BaseTestCase):
-
-    def test_metadata_nat_rules(self):
-        rules = ('PREROUTING', '-d 169.254.169.254/32 -i qr-+ '
-                 '-p tcp -m tcp --dport 80 -j REDIRECT --to-port 8775')
-        self.assertEqual(
-            [rules],
-            metadata_driver.MetadataDriver.metadata_nat_rules(8775))
-
-    def test_metadata_filter_rules(self):
-        rules = [('INPUT', '-m mark --mark 0x1/%s -j ACCEPT' %
-                  constants.ROUTER_MARK_MASK),
-                 ('INPUT', '-p tcp -m tcp --dport 8775 -j DROP')]
-        self.assertEqual(
-            rules,
-            metadata_driver.MetadataDriver.metadata_filter_rules(8775, '0x1'))
-
-    def test_metadata_mangle_rules(self):
-        rule = ('PREROUTING', '-d 169.254.169.254/32 -i qr-+ '
-                '-p tcp -m tcp --dport 80 '
-                '-j MARK --set-xmark 0x1/%s' %
-                constants.ROUTER_MARK_MASK)
-        self.assertEqual(
-            [rule],
-            metadata_driver.MetadataDriver.metadata_mangle_rules('0x1'))
-
-
-class TestMetadataDriverProcess(base.BaseTestCase):
-
-    EUID = 123
-    EGID = 456
-    EUNAME = 'neutron'
-
-    def setUp(self):
-        super(TestMetadataDriverProcess, self).setUp()
-        mock.patch('eventlet.spawn').start()
-        agent_config.register_interface_driver_opts_helper(cfg.CONF)
-        cfg.CONF.set_override('interface_driver',
-                              'neutron.agent.linux.interface.NullDriver')
-
-        mock.patch('neutron.agent.l3.agent.L3PluginApi').start()
-        mock.patch('neutron.agent.l3.ha.AgentMixin'
-                   '._init_ha_conf_path').start()
-
-        cfg.CONF.register_opts(l3_config.OPTS)
-        cfg.CONF.register_opts(l3_ha_agent.OPTS)
-        cfg.CONF.register_opts(config.SHARED_OPTS)
-        cfg.CONF.register_opts(config.DRIVER_OPTS)
-
-    def _test_spawn_metadata_proxy(self, expected_user, expected_group,
-                                   user='', group='', watch_log=True):
-        router_id = _uuid()
-        router_ns = 'qrouter-%s' % router_id
-        metadata_port = 8080
-        ip_class_path = 'neutron.agent.linux.ip_lib.IPWrapper'
-        is_effective_user = 'neutron.agent.linux.utils.is_effective_user'
-        fake_is_effective_user = lambda x: x in [self.EUNAME, str(self.EUID)]
-
-        cfg.CONF.set_override('metadata_proxy_user', user)
-        cfg.CONF.set_override('metadata_proxy_group', group)
-        cfg.CONF.set_override('log_file', 'test.log')
-        cfg.CONF.set_override('debug', True)
-
-        agent = l3_agent.L3NATAgent('localhost')
-        with mock.patch('os.geteuid', return_value=self.EUID),\
-                mock.patch('os.getegid', return_value=self.EGID),\
-                mock.patch(is_effective_user,
-                           side_effect=fake_is_effective_user),\
-                mock.patch(ip_class_path) as ip_mock:
-            agent.metadata_driver.spawn_monitored_metadata_proxy(
-                agent.process_monitor,
-                router_ns,
-                metadata_port,
-                agent.conf,
-                router_id=router_id)
-            netns_execute_args = [
-                'neutron-ns-metadata-proxy',
-                mock.ANY,
-                mock.ANY,
-                '--router_id=%s' % router_id,
-                mock.ANY,
-                '--metadata_port=%s' % metadata_port,
-                '--metadata_proxy_user=%s' % expected_user,
-                '--metadata_proxy_group=%s' % expected_group,
-                '--debug',
-                '--verbose',
-                '--log-file=neutron-ns-metadata-proxy-%s.log' %
-                router_id]
-            if not watch_log:
-                netns_execute_args.append(
-                    '--nometadata_proxy_watch_log')
-            ip_mock.assert_has_calls([
-                mock.call(namespace=router_ns),
-                mock.call().netns.execute(netns_execute_args, addl_env=None,
-                                          run_as_root=False)
-            ])
-
-    def test_spawn_metadata_proxy_with_agent_user(self):
-        self._test_spawn_metadata_proxy(
-            self.EUNAME, str(self.EGID), user=self.EUNAME)
-
-    def test_spawn_metadata_proxy_with_nonagent_user(self):
-        self._test_spawn_metadata_proxy(
-            'notneutron', str(self.EGID), user='notneutron', watch_log=False)
-
-    def test_spawn_metadata_proxy_with_agent_uid(self):
-        self._test_spawn_metadata_proxy(
-            str(self.EUID), str(self.EGID), user=str(self.EUID))
-
-    def test_spawn_metadata_proxy_with_nonagent_uid(self):
-        self._test_spawn_metadata_proxy(
-            '321', str(self.EGID), user='321', watch_log=False)
-
-    def test_spawn_metadata_proxy_with_group(self):
-        self._test_spawn_metadata_proxy(str(self.EUID), 'group', group='group')
-
-    def test_spawn_metadata_proxy_with_gid(self):
-        self._test_spawn_metadata_proxy(str(self.EUID), '654', group='654')
-
-    def test_spawn_metadata_proxy(self):
-        self._test_spawn_metadata_proxy(str(self.EUID), str(self.EGID))
diff --git a/neutron/tests/unit/agent/metadata/test_namespace_proxy.py b/neutron/tests/unit/agent/metadata/test_namespace_proxy.py
deleted file mode 100644 (file)
index fda4e02..0000000
+++ /dev/null
@@ -1,313 +0,0 @@
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import testtools
-import webob
-
-from neutron.agent.linux import utils as agent_utils
-from neutron.agent.metadata import namespace_proxy as ns_proxy
-from neutron.common import exceptions
-from neutron.common import utils
-from neutron.tests import base
-from neutron import wsgi
-
-
-class TestNetworkMetadataProxyHandler(base.BaseTestCase):
-    def setUp(self):
-        super(TestNetworkMetadataProxyHandler, self).setUp()
-        self.handler = ns_proxy.NetworkMetadataProxyHandler('router_id')
-
-    def test_call(self):
-        req = mock.Mock(headers={})
-        with mock.patch.object(self.handler, '_proxy_request') as proxy_req:
-            proxy_req.return_value = 'value'
-
-            retval = self.handler(req)
-            self.assertEqual(retval, 'value')
-            proxy_req.assert_called_once_with(req.remote_addr,
-                                              req.method,
-                                              req.path_info,
-                                              req.query_string,
-                                              req.body)
-
-    def test_no_argument_passed_to_init(self):
-        with testtools.ExpectedException(
-                exceptions.NetworkIdOrRouterIdRequiredError):
-            ns_proxy.NetworkMetadataProxyHandler()
-
-    def test_call_internal_server_error(self):
-        req = mock.Mock(headers={})
-        with mock.patch.object(self.handler, '_proxy_request') as proxy_req:
-            proxy_req.side_effect = Exception
-            retval = self.handler(req)
-            self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
-            self.assertTrue(proxy_req.called)
-
-    def test_proxy_request_router_200(self):
-        self.handler.router_id = 'router_id'
-
-        resp = mock.MagicMock(status=200)
-        with mock.patch('httplib2.Http') as mock_http:
-            resp.__getitem__.return_value = "text/plain"
-            mock_http.return_value.request.return_value = (resp, 'content')
-
-            retval = self.handler._proxy_request('192.168.1.1',
-                                                 'GET',
-                                                 '/latest/meta-data',
-                                                 '',
-                                                 '')
-
-            mock_http.assert_has_calls([
-                mock.call().request(
-                    'http://169.254.169.254/latest/meta-data',
-                    method='GET',
-                    headers={
-                        'X-Forwarded-For': '192.168.1.1',
-                        'X-Neutron-Router-ID': 'router_id'
-                    },
-                    connection_type=agent_utils.UnixDomainHTTPConnection,
-                    body=''
-                )]
-            )
-
-            self.assertEqual(retval.headers['Content-Type'], 'text/plain')
-            self.assertEqual(b'content', retval.body)
-
-    def _test_proxy_request_network_200(self, content):
-        self.handler.network_id = 'network_id'
-
-        resp = mock.MagicMock(status=200)
-        with mock.patch('httplib2.Http') as mock_http:
-            resp.__getitem__.return_value = "application/json"
-            mock_http.return_value.request.return_value = (resp, content)
-
-            retval = self.handler._proxy_request('192.168.1.1',
-                                                 'GET',
-                                                 '/latest/meta-data',
-                                                 '',
-                                                 '')
-
-            mock_http.assert_has_calls([
-                mock.call().request(
-                    'http://169.254.169.254/latest/meta-data',
-                    method='GET',
-                    headers={
-                        'X-Forwarded-For': '192.168.1.1',
-                        'X-Neutron-Network-ID': 'network_id'
-                    },
-                    connection_type=agent_utils.UnixDomainHTTPConnection,
-                    body=''
-                )]
-            )
-
-            self.assertEqual(retval.headers['Content-Type'],
-                             'application/json')
-            self.assertEqual(wsgi.encode_body(content), retval.body)
-
-    def test_proxy_request_network_200(self):
-        self._test_proxy_request_network_200('{}')
-
-    def test_proxy_request_network_200_unicode_in_content(self):
-        self._test_proxy_request_network_200('Gl\xfcck')
-
-    def _test_proxy_request_network_4xx(self, status, method, expected):
-        self.handler.network_id = 'network_id'
-
-        resp = mock.Mock(status=status)
-        with mock.patch('httplib2.Http') as mock_http:
-            mock_http.return_value.request.return_value = (resp, '')
-
-            retval = self.handler._proxy_request('192.168.1.1',
-                                                 method,
-                                                 '/latest/meta-data',
-                                                 '',
-                                                 '')
-
-            mock_http.assert_has_calls([
-                mock.call().request(
-                    'http://169.254.169.254/latest/meta-data',
-                    method=method,
-                    headers={
-                        'X-Forwarded-For': '192.168.1.1',
-                        'X-Neutron-Network-ID': 'network_id'
-                    },
-                    connection_type=agent_utils.UnixDomainHTTPConnection,
-                    body=''
-                )]
-            )
-
-            self.assertIsInstance(retval, expected)
-
-    def test_proxy_request_network_400(self):
-        self._test_proxy_request_network_4xx(
-            400, 'GET', webob.exc.HTTPBadRequest)
-
-    def test_proxy_request_network_404(self):
-        self._test_proxy_request_network_4xx(
-            404, 'GET', webob.exc.HTTPNotFound)
-
-    def test_proxy_request_network_409(self):
-        self._test_proxy_request_network_4xx(
-            409, 'POST', webob.exc.HTTPConflict)
-
-    def test_proxy_request_network_500(self):
-        self.handler.network_id = 'network_id'
-
-        resp = mock.Mock(status=500)
-        with mock.patch('httplib2.Http') as mock_http:
-            mock_http.return_value.request.return_value = (resp, '')
-
-            retval = self.handler._proxy_request('192.168.1.1',
-                                                 'GET',
-                                                 '/latest/meta-data',
-                                                 '',
-                                                 '')
-
-            mock_http.assert_has_calls([
-                mock.call().request(
-                    'http://169.254.169.254/latest/meta-data',
-                    method='GET',
-                    headers={
-                        'X-Forwarded-For': '192.168.1.1',
-                        'X-Neutron-Network-ID': 'network_id'
-                    },
-                    connection_type=agent_utils.UnixDomainHTTPConnection,
-                    body=''
-                )]
-            )
-
-            self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
-
-    def test_proxy_request_network_418(self):
-        self.handler.network_id = 'network_id'
-
-        resp = mock.Mock(status=418)
-        with mock.patch('httplib2.Http') as mock_http:
-            mock_http.return_value.request.return_value = (resp, '')
-
-            with testtools.ExpectedException(Exception):
-                self.handler._proxy_request('192.168.1.1',
-                                            'GET',
-                                            '/latest/meta-data',
-                                            '',
-                                            '')
-
-            mock_http.assert_has_calls([
-                mock.call().request(
-                    'http://169.254.169.254/latest/meta-data',
-                    method='GET',
-                    headers={
-                        'X-Forwarded-For': '192.168.1.1',
-                        'X-Neutron-Network-ID': 'network_id'
-                    },
-                    connection_type=agent_utils.UnixDomainHTTPConnection,
-                    body=''
-                )]
-            )
-
-    def test_proxy_request_network_exception(self):
-        self.handler.network_id = 'network_id'
-
-        mock.Mock(status=500)
-        with mock.patch('httplib2.Http') as mock_http:
-            mock_http.return_value.request.side_effect = Exception
-
-            with testtools.ExpectedException(Exception):
-                self.handler._proxy_request('192.168.1.1',
-                                            'GET',
-                                            '/latest/meta-data',
-                                            '',
-                                            '')
-
-            mock_http.assert_has_calls([
-                mock.call().request(
-                    'http://169.254.169.254/latest/meta-data',
-                    method='GET',
-                    headers={
-                        'X-Forwarded-For': '192.168.1.1',
-                        'X-Neutron-Network-ID': 'network_id'
-                    },
-                    connection_type=agent_utils.UnixDomainHTTPConnection,
-                    body=''
-                )]
-            )
-
-
-class TestProxyDaemon(base.BaseTestCase):
-    def test_init(self):
-        with mock.patch('neutron.agent.linux.daemon.Pidfile'):
-            pd = ns_proxy.ProxyDaemon('pidfile', 9697, 'net_id', 'router_id')
-            self.assertEqual(pd.router_id, 'router_id')
-            self.assertEqual(pd.network_id, 'net_id')
-
-    def test_run(self):
-        with mock.patch('neutron.agent.linux.daemon.Pidfile'):
-            with mock.patch('neutron.wsgi.Server') as Server:
-                pd = ns_proxy.ProxyDaemon('pidfile', 9697, 'net_id',
-                                          'router_id')
-                pd.run()
-                Server.assert_has_calls([
-                    mock.call('neutron-network-metadata-proxy'),
-                    mock.call().start(mock.ANY, 9697),
-                    mock.call().wait()]
-                )
-
-    def test_main(self):
-        with mock.patch.object(ns_proxy, 'ProxyDaemon') as daemon:
-            with mock.patch.object(ns_proxy, 'config') as config:
-                with mock.patch.object(ns_proxy, 'cfg') as cfg:
-                    with mock.patch.object(utils, 'cfg') as utils_cfg:
-                        cfg.CONF.router_id = 'router_id'
-                        cfg.CONF.network_id = None
-                        cfg.CONF.metadata_port = 9697
-                        cfg.CONF.pid_file = 'pidfile'
-                        cfg.CONF.daemonize = True
-                        utils_cfg.CONF.log_opt_values.return_value = None
-                        ns_proxy.main()
-
-                        self.assertTrue(config.setup_logging.called)
-                        daemon.assert_has_calls([
-                            mock.call('pidfile', 9697,
-                                      router_id='router_id',
-                                      network_id=None,
-                                      user=mock.ANY,
-                                      group=mock.ANY,
-                                      watch_log=mock.ANY),
-                            mock.call().start()]
-                        )
-
-    def test_main_dont_fork(self):
-        with mock.patch.object(ns_proxy, 'ProxyDaemon') as daemon:
-            with mock.patch.object(ns_proxy, 'config') as config:
-                with mock.patch.object(ns_proxy, 'cfg') as cfg:
-                    with mock.patch.object(utils, 'cfg') as utils_cfg:
-                        cfg.CONF.router_id = 'router_id'
-                        cfg.CONF.network_id = None
-                        cfg.CONF.metadata_port = 9697
-                        cfg.CONF.pid_file = 'pidfile'
-                        cfg.CONF.daemonize = False
-                        utils_cfg.CONF.log_opt_values.return_value = None
-                        ns_proxy.main()
-
-                        self.assertTrue(config.setup_logging.called)
-                        daemon.assert_has_calls([
-                            mock.call('pidfile', 9697,
-                                      router_id='router_id',
-                                      network_id=None,
-                                      user=mock.ANY,
-                                      group=mock.ANY,
-                                      watch_log=mock.ANY),
-                            mock.call().run()]
-                        )
diff --git a/neutron/tests/unit/agent/ovsdb/__init__.py b/neutron/tests/unit/agent/ovsdb/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/agent/ovsdb/native/__init__.py b/neutron/tests/unit/agent/ovsdb/native/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/agent/ovsdb/native/test_helpers.py b/neutron/tests/unit/agent/ovsdb/native/test_helpers.py
deleted file mode 100644 (file)
index 41495e5..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2015, Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.agent.ovsdb.native import helpers
-from neutron.tests import base
-
-
-CONNECTION_TO_MANAGER_URI_MAP = (
-    ('unix:/path/to/file', 'punix:/path/to/file'),
-    ('tcp:127.0.0.1:6640', 'ptcp:6640:127.0.0.1'),
-    ('ssl:192.168.1.1:8080', 'pssl:8080:192.168.1.1'))
-
-
-class TestOVSNativeHelpers(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestOVSNativeHelpers, self).setUp()
-        self.execute = mock.patch('neutron.agent.common.utils.execute').start()
-
-    def test__connection_to_manager_uri(self):
-        for conn_uri, expected in CONNECTION_TO_MANAGER_URI_MAP:
-            self.assertEqual(expected,
-                             helpers._connection_to_manager_uri(conn_uri))
-
-    def test_enable_connection_uri(self):
-        for conn_uri, manager_uri in CONNECTION_TO_MANAGER_URI_MAP:
-            helpers.enable_connection_uri(conn_uri)
-            self.execute.assert_called_with(
-                ['ovs-vsctl', 'set-manager', manager_uri],
-                run_as_root=True)
diff --git a/neutron/tests/unit/agent/test_rpc.py b/neutron/tests/unit/agent/test_rpc.py
deleted file mode 100644 (file)
index 9435b3e..0000000
+++ /dev/null
@@ -1,175 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import datetime
-import mock
-from oslo_context import context as oslo_context
-import oslo_messaging
-
-from neutron.agent import rpc
-from neutron.tests import base
-
-
-class AgentRPCPluginApi(base.BaseTestCase):
-    def _test_rpc_call(self, method):
-        agent = rpc.PluginApi('fake_topic')
-        ctxt = oslo_context.RequestContext('fake_user', 'fake_project')
-        expect_val = 'foo'
-        with mock.patch.object(agent.client, 'call') as mock_call,\
-                mock.patch.object(agent.client, 'prepare') as mock_prepare:
-            mock_prepare.return_value = agent.client
-            mock_call.return_value = expect_val
-            func_obj = getattr(agent, method)
-            if method == 'tunnel_sync':
-                actual_val = func_obj(ctxt, 'fake_tunnel_ip')
-            else:
-                actual_val = func_obj(ctxt, 'fake_device', 'fake_agent_id')
-        self.assertEqual(actual_val, expect_val)
-
-    def test_get_device_details(self):
-        self._test_rpc_call('get_device_details')
-
-    def test_get_devices_details_list(self):
-        self._test_rpc_call('get_devices_details_list')
-
-    def test_devices_details_list_unsupported(self):
-        agent = rpc.PluginApi('fake_topic')
-        ctxt = oslo_context.RequestContext('fake_user', 'fake_project')
-        expect_val_get_device_details = 'foo'
-        expect_val = [expect_val_get_device_details]
-        with mock.patch.object(agent.client, 'call') as mock_call, \
-                mock.patch.object(agent.client, 'prepare') as mock_prepare:
-            mock_prepare.return_value = agent.client
-            mock_call.side_effect = [oslo_messaging.UnsupportedVersion('1.2'),
-                                    expect_val_get_device_details]
-            func_obj = getattr(agent, 'get_devices_details_list')
-            actual_val = func_obj(ctxt, ['fake_device'], 'fake_agent_id')
-        self.assertEqual(actual_val, expect_val)
-
-    def test_update_device_down(self):
-        self._test_rpc_call('update_device_down')
-
-    def test_tunnel_sync(self):
-        self._test_rpc_call('tunnel_sync')
-
-
-class AgentPluginReportState(base.BaseTestCase):
-    def test_plugin_report_state_use_call(self):
-        topic = 'test'
-        reportStateAPI = rpc.PluginReportStateAPI(topic)
-        expected_agent_state = {'agent': 'test'}
-        with mock.patch.object(reportStateAPI.client, 'call') as mock_call, \
-                mock.patch.object(reportStateAPI.client, 'cast'), \
-                mock.patch.object(reportStateAPI.client, 'prepare'
-                                  ) as mock_prepare:
-            mock_prepare.return_value = reportStateAPI.client
-            ctxt = oslo_context.RequestContext('fake_user', 'fake_project')
-            reportStateAPI.report_state(ctxt, expected_agent_state,
-                                        use_call=True)
-            self.assertEqual(mock_call.call_args[0][0], ctxt)
-            self.assertEqual(mock_call.call_args[0][1], 'report_state')
-            self.assertEqual(mock_call.call_args[1]['agent_state'],
-                             {'agent_state': expected_agent_state})
-            self.assertIsInstance(mock_call.call_args[1]['time'], str)
-
-    def test_plugin_report_state_cast(self):
-        topic = 'test'
-        reportStateAPI = rpc.PluginReportStateAPI(topic)
-        expected_agent_state = {'agent': 'test'}
-        with mock.patch.object(reportStateAPI.client, 'call'), \
-                mock.patch.object(reportStateAPI.client, 'cast'
-                                  ) as mock_cast, \
-                mock.patch.object(reportStateAPI.client, 'prepare'
-                                  ) as mock_prepare:
-            mock_prepare.return_value = reportStateAPI.client
-            ctxt = oslo_context.RequestContext('fake_user', 'fake_project')
-            reportStateAPI.report_state(ctxt, expected_agent_state)
-            self.assertEqual(mock_cast.call_args[0][0], ctxt)
-            self.assertEqual(mock_cast.call_args[0][1], 'report_state')
-            self.assertEqual(mock_cast.call_args[1]['agent_state'],
-                             {'agent_state': expected_agent_state})
-            self.assertIsInstance(mock_cast.call_args[1]['time'], str)
-
-    def test_plugin_report_state_microsecond_is_0(self):
-        topic = 'test'
-        expected_time = datetime.datetime(2015, 7, 27, 15, 33, 30, 0)
-        expected_time_str = '2015-07-27T15:33:30.000000'
-        expected_agent_state = {'agent': 'test'}
-        with mock.patch('neutron.agent.rpc.datetime') as mock_datetime:
-            reportStateAPI = rpc.PluginReportStateAPI(topic)
-            mock_datetime.utcnow.return_value = expected_time
-            with mock.patch.object(reportStateAPI.client, 'call'), \
-                    mock.patch.object(reportStateAPI.client, 'cast'
-                                      ) as mock_cast, \
-                    mock.patch.object(reportStateAPI.client, 'prepare'
-                                      ) as mock_prepare:
-                mock_prepare.return_value = reportStateAPI.client
-                ctxt = oslo_context.RequestContext('fake_user',
-                                                   'fake_project')
-                reportStateAPI.report_state(ctxt, expected_agent_state)
-                self.assertEqual(expected_time_str,
-                                 mock_cast.call_args[1]['time'])
-
-
-class AgentRPCMethods(base.BaseTestCase):
-
-    def _test_create_consumers(
-        self, endpoints, method, expected, topics, listen):
-        call_to_patch = 'neutron.common.rpc.create_connection'
-        with mock.patch(call_to_patch) as create_connection:
-            rpc.create_consumers(
-                endpoints, method, topics, start_listening=listen)
-            create_connection.assert_has_calls(expected)
-
-    def test_create_consumers_start_listening(self):
-        endpoints = [mock.Mock()]
-        expected = [
-            mock.call(),
-            mock.call().create_consumer('foo-topic-op', endpoints,
-                                        fanout=True),
-            mock.call().consume_in_threads()
-        ]
-        method = 'foo'
-        topics = [('topic', 'op')]
-        self._test_create_consumers(
-            endpoints, method, expected, topics, True)
-
-    def test_create_consumers_do_not_listen(self):
-        endpoints = [mock.Mock()]
-        expected = [
-            mock.call(),
-            mock.call().create_consumer('foo-topic-op', endpoints,
-                                        fanout=True),
-        ]
-        method = 'foo'
-        topics = [('topic', 'op')]
-        self._test_create_consumers(
-            endpoints, method, expected, topics, False)
-
-    def test_create_consumers_with_node_name(self):
-        endpoints = [mock.Mock()]
-        expected = [
-            mock.call(),
-            mock.call().create_consumer('foo-topic-op', endpoints,
-                                        fanout=True),
-            mock.call().create_consumer('foo-topic-op.node1', endpoints,
-                                        fanout=False),
-            mock.call().consume_in_threads()
-        ]
-
-        call_to_patch = 'neutron.common.rpc.create_connection'
-        with mock.patch(call_to_patch) as create_connection:
-            rpc.create_consumers(endpoints, 'foo', [('topic', 'op', 'node1')])
-            create_connection.assert_has_calls(expected)
diff --git a/neutron/tests/unit/agent/test_securitygroups_rpc.py b/neutron/tests/unit/agent/test_securitygroups_rpc.py
deleted file mode 100644 (file)
index 132b3aa..0000000
+++ /dev/null
@@ -1,3177 +0,0 @@
-# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import contextlib
-
-import mock
-from oslo_config import cfg
-import oslo_messaging
-from testtools import matchers
-import webob.exc
-
-from neutron.agent import firewall as firewall_base
-from neutron.agent.linux import iptables_manager
-from neutron.agent import securitygroups_rpc as sg_rpc
-from neutron.api.rpc.handlers import securitygroups_rpc
-from neutron.common import constants as const
-from neutron.common import ipv6_utils as ipv6
-from neutron.common import rpc as n_rpc
-from neutron import context
-from neutron.db import securitygroups_rpc_base as sg_db_rpc
-from neutron.extensions import allowedaddresspairs as addr_pair
-from neutron.extensions import securitygroup as ext_sg
-from neutron import manager
-from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent
-from neutron.tests import base
-from neutron.tests import tools
-from neutron.tests.unit.extensions import test_securitygroup as test_sg
-
-FAKE_PREFIX = {const.IPv4: '10.0.0.0/24',
-               const.IPv6: '2001:db8::/64'}
-FAKE_IP = {const.IPv4: '10.0.0.1',
-           const.IPv6: 'fe80::1',
-           'IPv6_GLOBAL': '2001:db8::1',
-           'IPv6_LLA': 'fe80::123',
-           'IPv6_DHCP': '2001:db8::3'}
-
-TEST_PLUGIN_CLASS = ('neutron.tests.unit.agent.test_securitygroups_rpc.'
-                     'SecurityGroupRpcTestPlugin')
-
-
-FIREWALL_BASE_PACKAGE = 'neutron.agent.linux.iptables_firewall.'
-FIREWALL_IPTABLES_DRIVER = FIREWALL_BASE_PACKAGE + 'IptablesFirewallDriver'
-FIREWALL_HYBRID_DRIVER = (FIREWALL_BASE_PACKAGE +
-                          'OVSHybridIptablesFirewallDriver')
-FIREWALL_NOOP_DRIVER = 'neutron.agent.firewall.NoopFirewallDriver'
-
-
-def set_enable_security_groups(enabled):
-    cfg.CONF.set_override('enable_security_group', enabled,
-                          group='SECURITYGROUP')
-
-
-def set_firewall_driver(firewall_driver):
-    cfg.CONF.set_override('firewall_driver', firewall_driver,
-                          group='SECURITYGROUP')
-
-
-class FakeFirewallDriver(firewall_base.FirewallDriver):
-    """Fake FirewallDriver
-
-    FirewallDriver is base class for other types of drivers. To be able to
-    use it in tests, it's needed to overwrite all abstract methods.
-    """
-    def prepare_port_filter(self, port):
-        raise NotImplementedError()
-
-    def update_port_filter(self, port):
-        raise NotImplementedError()
-
-
-class SecurityGroupRpcTestPlugin(test_sg.SecurityGroupTestPlugin,
-                                 sg_db_rpc.SecurityGroupServerRpcMixin):
-    def __init__(self):
-        super(SecurityGroupRpcTestPlugin, self).__init__()
-        self.notifier = mock.Mock()
-        self.devices = {}
-
-    def create_port(self, context, port):
-        result = super(SecurityGroupRpcTestPlugin,
-                       self).create_port(context, port)
-        self.devices[result['id']] = result
-        self.notify_security_groups_member_updated(context, result)
-        return result
-
-    def update_port(self, context, id, port):
-        original_port = self.get_port(context, id)
-        updated_port = super(SecurityGroupRpcTestPlugin,
-                             self).update_port(context, id, port)
-        self.devices[id] = updated_port
-        self.update_security_group_on_port(
-            context, id, port, original_port, updated_port)
-
-    def delete_port(self, context, id):
-        port = self.get_port(context, id)
-        super(SecurityGroupRpcTestPlugin, self).delete_port(context, id)
-        self.notify_security_groups_member_updated(context, port)
-        del self.devices[id]
-
-    def get_port_from_device(self, context, device):
-        device = self.devices.get(device)
-        if device:
-            device['security_group_rules'] = []
-            device['security_group_source_groups'] = []
-            device['fixed_ips'] = [ip['ip_address']
-                                   for ip in device['fixed_ips']]
-        return device
-
-
-class SGServerRpcCallBackTestCase(test_sg.SecurityGroupDBTestCase):
-    def setUp(self, plugin=None):
-        plugin = plugin or TEST_PLUGIN_CLASS
-        set_firewall_driver(FIREWALL_NOOP_DRIVER)
-        super(SGServerRpcCallBackTestCase, self).setUp(plugin)
-        self.notifier = manager.NeutronManager.get_plugin().notifier
-        self.rpc = securitygroups_rpc.SecurityGroupServerRpcCallback()
-
-    def _test_security_group_port(self, device_owner, gw_ip,
-                                  cidr, ip_version, ip_address):
-        with self.network() as net:
-            with self.subnet(net,
-                             gateway_ip=gw_ip,
-                             cidr=cidr,
-                             ip_version=ip_version) as subnet:
-                kwargs = {
-                    'fixed_ips': [{'subnet_id': subnet['subnet']['id'],
-                                   'ip_address': ip_address}]}
-                if device_owner:
-                    kwargs['device_owner'] = device_owner
-                res = self._create_port(
-                    self.fmt, net['network']['id'], **kwargs)
-                res = self.deserialize(self.fmt, res)
-                port_id = res['port']['id']
-                if device_owner in const.ROUTER_INTERFACE_OWNERS:
-                    data = {'port': {'fixed_ips': []}}
-                    req = self.new_update_request('ports', data, port_id)
-                    res = self.deserialize(self.fmt,
-                                           req.get_response(self.api))
-                self._delete('ports', port_id)
-
-    def test_notify_security_group_ipv6_gateway_port_added(self):
-        self._test_security_group_port(
-            const.DEVICE_OWNER_ROUTER_INTF,
-            '2001:0db8::1',
-            '2001:0db8::/64',
-            6,
-            '2001:0db8::1')
-        self.assertTrue(self.notifier.security_groups_provider_updated.called)
-
-    def test_notify_security_group_dvr_ipv6_gateway_port_added(self):
-        self._test_security_group_port(
-            const.DEVICE_OWNER_DVR_INTERFACE,
-            '2001:0db8::1',
-            '2001:0db8::/64',
-            6,
-            '2001:0db8::2')
-        self.assertTrue(self.notifier.security_groups_provider_updated.called)
-
-    def test_notify_security_group_ipv6_normal_port_added(self):
-        self._test_security_group_port(
-            None,
-            '2001:0db8::1',
-            '2001:0db8::/64',
-            6,
-            '2001:0db8::3')
-        self.assertFalse(self.notifier.security_groups_provider_updated.called)
-
-    def test_notify_security_group_ipv4_dhcp_port_added(self):
-        self._test_security_group_port(
-            const.DEVICE_OWNER_DHCP,
-            '192.168.1.1',
-            '192.168.1.0/24',
-            4,
-            '192.168.1.2')
-        self.assertTrue(self.notifier.security_groups_provider_updated.called)
-
-    def test_notify_security_group_ipv4_gateway_port_added(self):
-        self._test_security_group_port(
-            const.DEVICE_OWNER_ROUTER_INTF,
-            '192.168.1.1',
-            '192.168.1.0/24',
-            4,
-            '192.168.1.1')
-        self.assertFalse(self.notifier.security_groups_provider_updated.called)
-
-    def test_notify_security_group_ipv4_normal_port_added(self):
-        self._test_security_group_port(
-            None,
-            '192.168.1.1',
-            '192.168.1.0/24',
-            4,
-            '192.168.1.3')
-        self.assertFalse(self.notifier.security_groups_provider_updated.called)
-
-    def _test_sg_rules_for_devices_ipv4_ingress_port_range(
-            self, min_port, max_port):
-        fake_prefix = FAKE_PREFIX[const.IPv4]
-        with self.network() as n,\
-                self.subnet(n),\
-                self.security_group() as sg1:
-            sg1_id = sg1['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_TCP, str(min_port),
-                str(max_port))
-            rule2 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_TCP, '23',
-                '23', fake_prefix)
-            rules = {
-                'security_group_rules': [rule1['security_group_rule'],
-                                         rule2['security_group_rule']]}
-            res = self._create_security_group_rule(self.fmt, rules)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
-
-            res1 = self._create_port(
-                self.fmt, n['network']['id'],
-                security_groups=[sg1_id])
-            ports_rest1 = self.deserialize(self.fmt, res1)
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_rules_for_devices(
-                ctx, devices=devices)
-            port_rpc = ports_rpc[port_id1]
-            expected = [{'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg1_id},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_TCP,
-                         'ethertype': const.IPv4,
-                         'port_range_max': max_port,
-                         'security_group_id': sg1_id,
-                         'port_range_min': min_port},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_TCP,
-                         'ethertype': const.IPv4,
-                         'port_range_max': 23, 'security_group_id': sg1_id,
-                         'port_range_min': 23,
-                         'source_ip_prefix': fake_prefix},
-                        ]
-            self.assertEqual(port_rpc['security_group_rules'],
-                             expected)
-            self._delete('ports', port_id1)
-
-    def test_sg_rules_for_devices_ipv4_ingress_port_range_min_port_1(self):
-        self._test_sg_rules_for_devices_ipv4_ingress_port_range(1, 10)
-
-    def test_security_group_info_for_ports_with_no_rules(self):
-        with self.network() as n,\
-                self.subnet(n),\
-                self.security_group() as sg:
-            sg_id = sg['security_group']['id']
-            self._delete_default_security_group_egress_rules(sg_id)
-
-            res = self._create_port(
-                self.fmt, n['network']['id'],
-                security_groups=[sg_id])
-            ports_rest = self.deserialize(self.fmt, res)
-            port_id = ports_rest['port']['id']
-            self.rpc.devices = {port_id: ports_rest['port']}
-            devices = [port_id]
-            ctx = context.get_admin_context()
-            sg_info = self.rpc.security_group_info_for_devices(
-                ctx, devices=devices)
-
-            expected = {sg_id: []}
-            self.assertEqual(expected, sg_info['security_groups'])
-            self._delete('ports', port_id)
-
-    @contextlib.contextmanager
-    def _port_with_addr_pairs_and_security_group(self):
-        plugin_obj = manager.NeutronManager.get_plugin()
-        if ('allowed-address-pairs'
-            not in plugin_obj.supported_extension_aliases):
-            self.skipTest("Test depends on allowed-address-pairs extension")
-        fake_prefix = FAKE_PREFIX['IPv4']
-        with self.network() as n,\
-                self.subnet(n),\
-                self.security_group() as sg1:
-            sg1_id = sg1['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', 'tcp', '22',
-                '22', remote_group_id=sg1_id)
-            rule2 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', 'tcp', '23',
-                '23', fake_prefix)
-            rules = {
-                'security_group_rules': [rule1['security_group_rule'],
-                                         rule2['security_group_rule']]}
-            res = self._create_security_group_rule(self.fmt, rules)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(res.status_int, 201)
-            address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                              'ip_address': '10.0.1.0/24'},
-                             {'mac_address': '00:00:00:00:00:01',
-                              'ip_address': '11.0.0.1'}]
-            res1 = self._create_port(
-                self.fmt, n['network']['id'],
-                security_groups=[sg1_id],
-                arg_list=(addr_pair.ADDRESS_PAIRS,),
-                allowed_address_pairs=address_pairs)
-            yield self.deserialize(self.fmt, res1)
-
-    def test_security_group_info_for_devices_ipv4_addr_pair(self):
-        with self._port_with_addr_pairs_and_security_group() as port:
-            port_id = port['port']['id']
-            sg_id = port['port']['security_groups'][0]
-            devices = [port_id, 'no_exist_device']
-            ctx = context.get_admin_context()
-            # verify that address pairs are included in remote SG IPs
-            sg_member_ips = self.rpc.security_group_info_for_devices(
-                ctx, devices=devices)['sg_member_ips']
-            expected_member_ips = [
-                '10.0.1.0/24', '11.0.0.1',
-                port['port']['fixed_ips'][0]['ip_address']]
-            self.assertEqual(sorted(expected_member_ips),
-                             sorted(sg_member_ips[sg_id]['IPv4']))
-            self._delete('ports', port_id)
-
-    def test_security_group_rules_for_devices_ipv4_ingress_addr_pair(self):
-        fake_prefix = FAKE_PREFIX[const.IPv4]
-        with self._port_with_addr_pairs_and_security_group() as port:
-            port_id = port['port']['id']
-            sg_id = port['port']['security_groups'][0]
-            devices = [port_id, 'no_exist_device']
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_rules_for_devices(
-                ctx, devices=devices)
-
-            port_rpc = ports_rpc[port_id]
-            expected = [{'direction': 'egress', 'ethertype': 'IPv4',
-                         'security_group_id': sg_id},
-                        {'direction': 'egress', 'ethertype': 'IPv6',
-                         'security_group_id': sg_id},
-                        {'direction': 'ingress',
-                         'protocol': 'tcp', 'ethertype': 'IPv4',
-                         'port_range_max': 22,
-                         'remote_group_id': sg_id,
-                         'security_group_id': sg_id,
-                         'source_ip_prefix': '11.0.0.1/32',
-                         'port_range_min': 22},
-                        {'direction': 'ingress',
-                         'protocol': 'tcp', 'ethertype': 'IPv4',
-                         'port_range_max': 22,
-                         'remote_group_id': sg_id,
-                         'security_group_id': sg_id,
-                         'source_ip_prefix': '10.0.1.0/24',
-                         'port_range_min': 22},
-                        {'direction': 'ingress', 'protocol': 'tcp',
-                         'ethertype': 'IPv4',
-                         'port_range_max': 23, 'security_group_id': sg_id,
-                         'port_range_min': 23,
-                         'source_ip_prefix': fake_prefix},
-                        ]
-            expected = tools.UnorderedList(expected)
-            self.assertEqual(expected,
-                             port_rpc['security_group_rules'])
-            self.assertEqual(port['port']['allowed_address_pairs'],
-                             port_rpc['allowed_address_pairs'])
-            self._delete('ports', port_id)
-
-    def test_security_group_rules_for_devices_ipv4_egress(self):
-        fake_prefix = FAKE_PREFIX[const.IPv4]
-        with self.network() as n,\
-                self.subnet(n),\
-                self.security_group() as sg1:
-            sg1_id = sg1['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'egress', const.PROTO_NAME_TCP, '22',
-                '22')
-            rule2 = self._build_security_group_rule(
-                sg1_id,
-                'egress', const.PROTO_NAME_UDP, '23',
-                '23', fake_prefix)
-            rules = {
-                'security_group_rules': [rule1['security_group_rule'],
-                                         rule2['security_group_rule']]}
-            res = self._create_security_group_rule(self.fmt, rules)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
-
-            res1 = self._create_port(
-                self.fmt, n['network']['id'],
-                security_groups=[sg1_id])
-            ports_rest1 = self.deserialize(self.fmt, res1)
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_rules_for_devices(
-                ctx, devices=devices)
-            port_rpc = ports_rpc[port_id1]
-            expected = [{'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress',
-                         'protocol': const.PROTO_NAME_TCP,
-                         'ethertype': const.IPv4,
-                         'port_range_max': 22,
-                         'security_group_id': sg1_id,
-                         'port_range_min': 22},
-                        {'direction': 'egress',
-                         'protocol': const.PROTO_NAME_UDP,
-                         'ethertype': const.IPv4,
-                         'port_range_max': 23, 'security_group_id': sg1_id,
-                         'port_range_min': 23,
-                         'dest_ip_prefix': fake_prefix},
-                        ]
-            self.assertEqual(port_rpc['security_group_rules'],
-                             expected)
-            self._delete('ports', port_id1)
-
-    def test_security_group_rules_for_devices_ipv4_source_group(self):
-
-        with self.network() as n,\
-                self.subnet(n),\
-                self.security_group() as sg1,\
-                self.security_group() as sg2:
-            sg1_id = sg1['security_group']['id']
-            sg2_id = sg2['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_TCP, '24',
-                '25', remote_group_id=sg2['security_group']['id'])
-            rules = {
-                'security_group_rules': [rule1['security_group_rule']]}
-            res = self._create_security_group_rule(self.fmt, rules)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
-
-            res1 = self._create_port(
-                self.fmt, n['network']['id'],
-                security_groups=[sg1_id,
-                                 sg2_id])
-            ports_rest1 = self.deserialize(self.fmt, res1)
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-
-            res2 = self._create_port(
-                self.fmt, n['network']['id'],
-                security_groups=[sg2_id])
-            ports_rest2 = self.deserialize(self.fmt, res2)
-            port_id2 = ports_rest2['port']['id']
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_rules_for_devices(
-                ctx, devices=devices)
-            port_rpc = ports_rpc[port_id1]
-            expected = [{'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg2_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg2_id},
-                        {'direction': u'ingress',
-                         'source_ip_prefix': u'10.0.0.3/32',
-                         'protocol': const.PROTO_NAME_TCP,
-                         'ethertype': const.IPv4,
-                         'port_range_max': 25, 'port_range_min': 24,
-                         'remote_group_id': sg2_id,
-                         'security_group_id': sg1_id},
-                        ]
-            self.assertEqual(port_rpc['security_group_rules'],
-                             expected)
-            self._delete('ports', port_id1)
-            self._delete('ports', port_id2)
-
-    def test_security_group_info_for_devices_ipv4_source_group(self):
-
-        with self.network() as n,\
-                self.subnet(n),\
-                self.security_group() as sg1,\
-                self.security_group() as sg2:
-            sg1_id = sg1['security_group']['id']
-            sg2_id = sg2['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_TCP, '24',
-                '25', remote_group_id=sg2['security_group']['id'])
-            rules = {
-                'security_group_rules': [rule1['security_group_rule']]}
-            res = self._create_security_group_rule(self.fmt, rules)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
-
-            res1 = self._create_port(
-                self.fmt, n['network']['id'],
-                security_groups=[sg1_id])
-            ports_rest1 = self.deserialize(self.fmt, res1)
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-
-            res2 = self._create_port(
-                self.fmt, n['network']['id'],
-                security_groups=[sg2_id])
-            ports_rest2 = self.deserialize(self.fmt, res2)
-            port_id2 = ports_rest2['port']['id']
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_info_for_devices(
-                ctx, devices=devices)
-            expected = {
-                'security_groups': {sg1_id: [
-                    {'direction': 'egress', 'ethertype': const.IPv4},
-                    {'direction': 'egress', 'ethertype': const.IPv6},
-                    {'direction': u'ingress',
-                     'protocol': const.PROTO_NAME_TCP,
-                     'ethertype': const.IPv4,
-                     'port_range_max': 25, 'port_range_min': 24,
-                     'remote_group_id': sg2_id}
-                ]},
-                'sg_member_ips': {sg2_id: {
-                    'IPv4': set([u'10.0.0.3']),
-                    'IPv6': set(),
-                }}
-            }
-            self.assertEqual(expected['security_groups'],
-                             ports_rpc['security_groups'])
-            self.assertEqual(expected['sg_member_ips'][sg2_id]['IPv4'],
-                             ports_rpc['sg_member_ips'][sg2_id]['IPv4'])
-            self._delete('ports', port_id1)
-            self._delete('ports', port_id2)
-
-    def test_security_group_rules_for_devices_ipv6_ingress(self):
-        fake_prefix = FAKE_PREFIX[const.IPv6]
-        fake_gateway = FAKE_IP[const.IPv6]
-        with self.network() as n,\
-                self.subnet(n, gateway_ip=fake_gateway,
-                            cidr=fake_prefix, ip_version=6
-                            ) as subnet_v6,\
-                self.security_group() as sg1:
-            sg1_id = sg1['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_TCP, '22',
-                '22',
-                ethertype=const.IPv6)
-            rule2 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_UDP, '23',
-                '23', fake_prefix,
-                ethertype=const.IPv6)
-            rules = {
-                'security_group_rules': [rule1['security_group_rule'],
-                                         rule2['security_group_rule']]}
-            res = self._create_security_group_rule(self.fmt, rules)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
-
-            dhcp_port = self._create_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'],
-                            'ip_address': FAKE_IP['IPv6_DHCP']}],
-                device_owner=const.DEVICE_OWNER_DHCP,
-                security_groups=[sg1_id])
-            dhcp_rest = self.deserialize(self.fmt, dhcp_port)
-            dhcp_mac = dhcp_rest['port']['mac_address']
-            dhcp_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
-                const.IPV6_LLA_PREFIX,
-                dhcp_mac))
-
-            res1 = self._create_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
-                security_groups=[sg1_id])
-            ports_rest1 = self.deserialize(self.fmt, res1)
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_rules_for_devices(
-                ctx, devices=devices)
-            port_rpc = ports_rpc[port_id1]
-            source_port, dest_port, ethertype = sg_db_rpc.DHCP_RULE_PORT[6]
-            expected = [{'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg1_id},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_TCP,
-                         'ethertype': const.IPv6,
-                         'port_range_max': 22,
-                         'security_group_id': sg1_id,
-                         'port_range_min': 22},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_UDP,
-                         'ethertype': const.IPv6,
-                         'port_range_max': 23,
-                         'security_group_id': sg1_id,
-                         'port_range_min': 23,
-                         'source_ip_prefix': fake_prefix},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_ICMP_V6,
-                         'ethertype': const.IPv6,
-                         'source_ip_prefix': fake_gateway,
-                         'source_port_range_min': const.ICMPV6_TYPE_RA},
-                        {'direction': 'ingress',
-                         'ethertype': ethertype,
-                         'port_range_max': dest_port,
-                         'port_range_min': dest_port,
-                         'protocol': const.PROTO_NAME_UDP,
-                         'source_ip_prefix': dhcp_lla_ip,
-                         'source_port_range_max': source_port,
-                         'source_port_range_min': source_port}
-                        ]
-            self.assertEqual(port_rpc['security_group_rules'],
-                             expected)
-            self._delete('ports', port_id1)
-
-    def test_security_group_info_for_devices_only_ipv6_rule(self):
-        with self.network() as n,\
-                self.subnet(n),\
-                self.security_group() as sg1:
-            sg1_id = sg1['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_TCP, '22',
-                '22', remote_group_id=sg1_id,
-                ethertype=const.IPv6)
-            rules = {
-                'security_group_rules': [rule1['security_group_rule']]}
-            self._make_security_group_rule(self.fmt, rules)
-
-            res1 = self._create_port(
-                self.fmt, n['network']['id'],
-                security_groups=[sg1_id])
-            ports_rest1 = self.deserialize(self.fmt, res1)
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_info_for_devices(
-                ctx, devices=devices)
-            expected = {
-                'security_groups': {sg1_id: [
-                    {'direction': 'egress', 'ethertype': const.IPv4},
-                    {'direction': 'egress', 'ethertype': const.IPv6},
-                    {'direction': u'ingress',
-                     'protocol': const.PROTO_NAME_TCP,
-                     'ethertype': const.IPv6,
-                     'port_range_max': 22, 'port_range_min': 22,
-                     'remote_group_id': sg1_id}
-                ]},
-                'sg_member_ips': {sg1_id: {
-                    'IPv6': set(),
-                }}
-            }
-            self.assertEqual(expected['security_groups'],
-                             ports_rpc['security_groups'])
-            self.assertEqual(expected['sg_member_ips'][sg1_id]['IPv6'],
-                             ports_rpc['sg_member_ips'][sg1_id]['IPv6'])
-            self._delete('ports', port_id1)
-
-    def test_security_group_ra_rules_for_devices_ipv6_gateway_global(self):
-        fake_prefix = FAKE_PREFIX[const.IPv6]
-        fake_gateway = FAKE_IP['IPv6_GLOBAL']
-        with self.network() as n,\
-                self.subnet(n, gateway_ip=fake_gateway,
-                            cidr=fake_prefix, ip_version=6,
-                            ipv6_ra_mode=const.IPV6_SLAAC
-                            ) as subnet_v6,\
-                self.security_group() as sg1:
-            sg1_id = sg1['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_TCP, '22',
-                '22',
-                ethertype=const.IPv6)
-            rules = {
-                'security_group_rules': [rule1['security_group_rule']]}
-            self._make_security_group_rule(self.fmt, rules)
-
-            # Create gateway port
-            gateway_res = self._make_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'],
-                            'ip_address': fake_gateway}],
-                device_owner=const.DEVICE_OWNER_ROUTER_INTF)
-            gateway_mac = gateway_res['port']['mac_address']
-            gateway_port_id = gateway_res['port']['id']
-            gateway_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
-                const.IPV6_LLA_PREFIX,
-                gateway_mac))
-
-            ports_rest1 = self._make_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
-                security_groups=[sg1_id])
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_rules_for_devices(
-                ctx, devices=devices)
-            port_rpc = ports_rpc[port_id1]
-            expected = [{'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg1_id},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_TCP,
-                         'ethertype': const.IPv6,
-                         'port_range_max': 22,
-                         'security_group_id': sg1_id,
-                         'port_range_min': 22},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_ICMP_V6,
-                         'ethertype': const.IPv6,
-                         'source_ip_prefix': gateway_lla_ip,
-                         'source_port_range_min': const.ICMPV6_TYPE_RA},
-                        ]
-            self.assertEqual(port_rpc['security_group_rules'],
-                             expected)
-            self._delete('ports', port_id1)
-            # Note(xuhanp): remove gateway port's fixed_ips or gateway port
-            # deletion will be prevented.
-            data = {'port': {'fixed_ips': []}}
-            req = self.new_update_request('ports', data, gateway_port_id)
-            self.deserialize(self.fmt, req.get_response(self.api))
-            self._delete('ports', gateway_port_id)
-
-    def test_security_group_rule_for_device_ipv6_multi_router_interfaces(self):
-        fake_prefix = FAKE_PREFIX[const.IPv6]
-        fake_gateway = FAKE_IP['IPv6_GLOBAL']
-        with self.network() as n,\
-                self.subnet(n, gateway_ip=fake_gateway,
-                            cidr=fake_prefix, ip_version=6,
-                            ipv6_ra_mode=const.IPV6_SLAAC
-                            ) as subnet_v6,\
-                self.security_group() as sg1:
-            sg1_id = sg1['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_TCP, '22',
-                '22',
-                ethertype=const.IPv6)
-            rules = {
-                'security_group_rules': [rule1['security_group_rule']]}
-            self._make_security_group_rule(self.fmt, rules)
-
-            # Create gateway port
-            gateway_res = self._make_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'],
-                            'ip_address': fake_gateway}],
-                device_owner=const.DEVICE_OWNER_ROUTER_INTF)
-            gateway_mac = gateway_res['port']['mac_address']
-            gateway_port_id = gateway_res['port']['id']
-            gateway_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
-                const.IPV6_LLA_PREFIX,
-                gateway_mac))
-            # Create another router interface port
-            interface_res = self._make_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
-                device_owner=const.DEVICE_OWNER_ROUTER_INTF)
-            interface_port_id = interface_res['port']['id']
-
-            ports_rest1 = self._make_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
-                security_groups=[sg1_id])
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_rules_for_devices(
-                ctx, devices=devices)
-            port_rpc = ports_rpc[port_id1]
-            expected = [{'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg1_id},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_TCP,
-                         'ethertype': const.IPv6,
-                         'port_range_max': 22,
-                         'security_group_id': sg1_id,
-                         'port_range_min': 22},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_ICMP_V6,
-                         'ethertype': const.IPv6,
-                         'source_ip_prefix': gateway_lla_ip,
-                         'source_port_range_min': const.ICMPV6_TYPE_RA},
-                        ]
-            self.assertEqual(port_rpc['security_group_rules'],
-                             expected)
-            self._delete('ports', port_id1)
-            data = {'port': {'fixed_ips': []}}
-            req = self.new_update_request('ports', data, gateway_port_id)
-            self.deserialize(self.fmt, req.get_response(self.api))
-            req = self.new_update_request('ports', data, interface_port_id)
-            self.deserialize(self.fmt, req.get_response(self.api))
-            self._delete('ports', gateway_port_id)
-            self._delete('ports', interface_port_id)
-
-    def test_security_group_ra_rules_for_devices_ipv6_dvr(self):
-        fake_prefix = FAKE_PREFIX[const.IPv6]
-        fake_gateway = FAKE_IP['IPv6_GLOBAL']
-        with self.network() as n,\
-                self.subnet(n, gateway_ip=fake_gateway,
-                            cidr=fake_prefix, ip_version=6,
-                            ipv6_ra_mode=const.IPV6_SLAAC
-                            ) as subnet_v6,\
-                self.security_group() as sg1:
-            sg1_id = sg1['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_TCP, '22',
-                '22',
-                ethertype=const.IPv6)
-            rules = {
-                'security_group_rules': [rule1['security_group_rule']]}
-            self._make_security_group_rule(self.fmt, rules)
-
-            # Create DVR router interface port
-            gateway_res = self._make_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'],
-                            'ip_address': fake_gateway}],
-                device_owner=const.DEVICE_OWNER_DVR_INTERFACE)
-            gateway_mac = gateway_res['port']['mac_address']
-            gateway_port_id = gateway_res['port']['id']
-            gateway_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
-                const.IPV6_LLA_PREFIX,
-                gateway_mac))
-
-            ports_rest1 = self._make_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
-                security_groups=[sg1_id])
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_rules_for_devices(
-                ctx, devices=devices)
-            port_rpc = ports_rpc[port_id1]
-            expected = [{'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg1_id},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_TCP,
-                         'ethertype': const.IPv6,
-                         'port_range_max': 22,
-                         'security_group_id': sg1_id,
-                         'port_range_min': 22},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_ICMP_V6,
-                         'ethertype': const.IPv6,
-                         'source_ip_prefix': gateway_lla_ip,
-                         'source_port_range_min': const.ICMPV6_TYPE_RA},
-                        ]
-            self.assertEqual(port_rpc['security_group_rules'],
-                             expected)
-            self._delete('ports', port_id1)
-            # Note(xuhanp): remove gateway port's fixed_ips or gateway port
-            # deletion will be prevented.
-            data = {'port': {'fixed_ips': []}}
-            req = self.new_update_request('ports', data, gateway_port_id)
-            self.deserialize(self.fmt, req.get_response(self.api))
-            self._delete('ports', gateway_port_id)
-
-    def test_security_group_ra_rules_for_devices_ipv6_gateway_lla(self):
-        fake_prefix = FAKE_PREFIX[const.IPv6]
-        fake_gateway = FAKE_IP['IPv6_LLA']
-        with self.network() as n,\
-                self.subnet(n, gateway_ip=fake_gateway,
-                            cidr=fake_prefix, ip_version=6,
-                            ipv6_ra_mode=const.IPV6_SLAAC
-                            ) as subnet_v6,\
-                self.security_group() as sg1:
-            sg1_id = sg1['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_TCP, '22',
-                '22',
-                ethertype=const.IPv6)
-            rules = {
-                'security_group_rules': [rule1['security_group_rule']]}
-            self._make_security_group_rule(self.fmt, rules)
-
-            ports_rest1 = self._make_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
-                security_groups=[sg1_id])
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_rules_for_devices(
-                ctx, devices=devices)
-            port_rpc = ports_rpc[port_id1]
-            expected = [{'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg1_id},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_TCP,
-                         'ethertype': const.IPv6,
-                         'port_range_max': 22,
-                         'security_group_id': sg1_id,
-                         'port_range_min': 22},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_ICMP_V6,
-                         'ethertype': const.IPv6,
-                         'source_ip_prefix': fake_gateway,
-                         'source_port_range_min': const.ICMPV6_TYPE_RA},
-                        ]
-            self.assertEqual(port_rpc['security_group_rules'],
-                             expected)
-            self._delete('ports', port_id1)
-
-    def test_security_group_ra_rules_for_devices_ipv6_no_gateway_port(self):
-        fake_prefix = FAKE_PREFIX[const.IPv6]
-        with self.network() as n,\
-                self.subnet(n, gateway_ip=None, cidr=fake_prefix,
-                            ip_version=6, ipv6_ra_mode=const.IPV6_SLAAC
-                            ) as subnet_v6,\
-                self.security_group() as sg1:
-            sg1_id = sg1['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_TCP, '22',
-                '22',
-                ethertype=const.IPv6)
-            rules = {
-                'security_group_rules': [rule1['security_group_rule']]}
-            self._make_security_group_rule(self.fmt, rules)
-
-            ports_rest1 = self._make_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
-                security_groups=[sg1_id])
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_rules_for_devices(
-                ctx, devices=devices)
-            port_rpc = ports_rpc[port_id1]
-            expected = [{'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg1_id},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_TCP,
-                         'ethertype': const.IPv6,
-                         'port_range_max': 22,
-                         'security_group_id': sg1_id,
-                         'port_range_min': 22},
-                        ]
-            self.assertEqual(port_rpc['security_group_rules'],
-                             expected)
-            self._delete('ports', port_id1)
-
-    def test_security_group_rules_for_devices_ipv6_egress(self):
-        fake_prefix = FAKE_PREFIX[const.IPv6]
-        fake_gateway = FAKE_IP[const.IPv6]
-        with self.network() as n,\
-                self.subnet(n, gateway_ip=fake_gateway,
-                            cidr=fake_prefix, ip_version=6
-                            ) as subnet_v6,\
-                self.security_group() as sg1:
-            sg1_id = sg1['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'egress', const.PROTO_NAME_TCP, '22',
-                '22',
-                ethertype=const.IPv6)
-            rule2 = self._build_security_group_rule(
-                sg1_id,
-                'egress', const.PROTO_NAME_UDP, '23',
-                '23', fake_prefix,
-                ethertype=const.IPv6)
-            rules = {
-                'security_group_rules': [rule1['security_group_rule'],
-                                         rule2['security_group_rule']]}
-            self._make_security_group_rule(self.fmt, rules)
-
-            ports_rest1 = self._make_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
-                security_groups=[sg1_id])
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_rules_for_devices(
-                ctx, devices=devices)
-            port_rpc = ports_rpc[port_id1]
-            expected = [{'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress',
-                         'protocol': const.PROTO_NAME_TCP,
-                         'ethertype': const.IPv6,
-                         'port_range_max': 22,
-                         'security_group_id': sg1_id,
-                         'port_range_min': 22},
-                        {'direction': 'egress',
-                         'protocol': const.PROTO_NAME_UDP,
-                         'ethertype': const.IPv6,
-                         'port_range_max': 23,
-                         'security_group_id': sg1_id,
-                         'port_range_min': 23,
-                         'dest_ip_prefix': fake_prefix},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_ICMP_V6,
-                         'ethertype': const.IPv6,
-                         'source_ip_prefix': fake_gateway,
-                         'source_port_range_min': const.ICMPV6_TYPE_RA},
-                        ]
-            self.assertEqual(port_rpc['security_group_rules'],
-                             expected)
-            self._delete('ports', port_id1)
-
-    def test_security_group_rules_for_devices_ipv6_source_group(self):
-        fake_prefix = FAKE_PREFIX[const.IPv6]
-        fake_gateway = FAKE_IP[const.IPv6]
-        with self.network() as n,\
-                self.subnet(n, gateway_ip=fake_gateway,
-                            cidr=fake_prefix, ip_version=6
-                            ) as subnet_v6,\
-                self.security_group() as sg1,\
-                self.security_group() as sg2:
-            sg1_id = sg1['security_group']['id']
-            sg2_id = sg2['security_group']['id']
-            rule1 = self._build_security_group_rule(
-                sg1_id,
-                'ingress', const.PROTO_NAME_TCP, '24',
-                '25',
-                ethertype=const.IPv6,
-                remote_group_id=sg2['security_group']['id'])
-            rules = {
-                'security_group_rules': [rule1['security_group_rule']]}
-            self._make_security_group_rule(self.fmt, rules)
-
-            ports_rest1 = self._make_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
-                security_groups=[sg1_id,
-                                 sg2_id])
-            port_id1 = ports_rest1['port']['id']
-            self.rpc.devices = {port_id1: ports_rest1['port']}
-            devices = [port_id1, 'no_exist_device']
-
-            ports_rest2 = self._make_port(
-                self.fmt, n['network']['id'],
-                fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
-                security_groups=[sg2_id])
-            port_id2 = ports_rest2['port']['id']
-
-            ctx = context.get_admin_context()
-            ports_rpc = self.rpc.security_group_rules_for_devices(
-                ctx, devices=devices)
-            port_rpc = ports_rpc[port_id1]
-            expected = [{'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg1_id},
-                        {'direction': 'egress', 'ethertype': const.IPv4,
-                         'security_group_id': sg2_id},
-                        {'direction': 'egress', 'ethertype': const.IPv6,
-                         'security_group_id': sg2_id},
-                        {'direction': 'ingress',
-                         'source_ip_prefix': '2001:db8::2/128',
-                         'protocol': const.PROTO_NAME_TCP,
-                         'ethertype': const.IPv6,
-                         'port_range_max': 25, 'port_range_min': 24,
-                         'remote_group_id': sg2_id,
-                         'security_group_id': sg1_id},
-                        {'direction': 'ingress',
-                         'protocol': const.PROTO_NAME_ICMP_V6,
-                         'ethertype': const.IPv6,
-                         'source_ip_prefix': fake_gateway,
-                         'source_port_range_min': const.ICMPV6_TYPE_RA},
-                        ]
-            self.assertEqual(port_rpc['security_group_rules'],
-                             expected)
-            self._delete('ports', port_id1)
-            self._delete('ports', port_id2)
-
-
-class SecurityGroupAgentRpcTestCaseForNoneDriver(base.BaseTestCase):
-    def test_init_firewall_with_none_driver(self):
-        set_enable_security_groups(False)
-        agent = sg_rpc.SecurityGroupAgentRpc(
-                context=None, plugin_rpc=mock.Mock())
-        self.assertEqual(agent.firewall.__class__.__name__,
-                         'NoopFirewallDriver')
-
-
-class BaseSecurityGroupAgentRpcTestCase(base.BaseTestCase):
-    def setUp(self, defer_refresh_firewall=False):
-        super(BaseSecurityGroupAgentRpcTestCase, self).setUp()
-        set_firewall_driver(FIREWALL_NOOP_DRIVER)
-        self.agent = sg_rpc.SecurityGroupAgentRpc(
-                context=None, plugin_rpc=mock.Mock(),
-                defer_refresh_firewall=defer_refresh_firewall)
-        mock.patch('neutron.agent.linux.iptables_manager').start()
-        self.default_firewall = self.agent.firewall
-        self.firewall = mock.Mock()
-        firewall_object = FakeFirewallDriver()
-        self.firewall.defer_apply.side_effect = firewall_object.defer_apply
-        self.agent.firewall = self.firewall
-        self.fake_device = {'device': 'fake_device',
-                            'network_id': 'fake_net',
-                            'security_groups': ['fake_sgid1', 'fake_sgid2'],
-                            'security_group_source_groups': ['fake_sgid2'],
-                            'security_group_rules': [{'security_group_id':
-                                                      'fake_sgid1',
-                                                      'remote_group_id':
-                                                      'fake_sgid2'}]}
-        self.firewall.ports = {'fake_device': self.fake_device}
-        self.firewall.security_group_updated = mock.Mock()
-
-
-class SecurityGroupAgentRpcTestCase(BaseSecurityGroupAgentRpcTestCase):
-    def setUp(self, defer_refresh_firewall=False):
-        super(SecurityGroupAgentRpcTestCase, self).setUp(
-            defer_refresh_firewall)
-        rpc = self.agent.plugin_rpc
-        rpc.security_group_info_for_devices.side_effect = (
-                oslo_messaging.UnsupportedVersion('1.2'))
-        rpc.security_group_rules_for_devices.return_value = (
-            self.firewall.ports)
-
-    def test_prepare_and_remove_devices_filter(self):
-        self.agent.prepare_devices_filter(['fake_device'])
-        self.agent.remove_devices_filter(['fake_device'])
-        # ignore device which is not filtered
-        self.firewall.assert_has_calls([mock.call.defer_apply(),
-                                        mock.call.prepare_port_filter(
-                                            self.fake_device),
-                                        mock.call.defer_apply(),
-                                        mock.call.remove_port_filter(
-                                            self.fake_device),
-                                        ])
-
-    def test_prepare_devices_filter_with_noopfirewall(self):
-        self.agent.firewall = self.default_firewall
-        self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
-        self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
-        self.agent.prepare_devices_filter(['fake_device'])
-        self.assertFalse(self.agent.plugin_rpc.
-                         security_group_info_for_devices.called)
-        self.assertFalse(self.agent.plugin_rpc.
-                         security_group_rules_for_devices.called)
-
-    def test_prepare_devices_filter_with_firewall_disabled(self):
-        cfg.CONF.set_override('enable_security_group', False, 'SECURITYGROUP')
-        self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
-        self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
-        self.agent.prepare_devices_filter(['fake_device'])
-        self.assertFalse(self.agent.plugin_rpc.
-                         security_group_info_for_devices.called)
-        self.assertFalse(self.agent.plugin_rpc.
-                         security_group_rules_for_devices.called)
-
-    def test_security_groups_rule_updated(self):
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.prepare_devices_filter(['fake_port_id'])
-        self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3'])
-        self.agent.refresh_firewall.assert_has_calls(
-            [mock.call.refresh_firewall([self.fake_device['device']])])
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_security_groups_rule_not_updated(self):
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.prepare_devices_filter(['fake_port_id'])
-        self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4'])
-        self.assertFalse(self.agent.refresh_firewall.called)
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_security_groups_member_updated(self):
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.prepare_devices_filter(['fake_port_id'])
-        self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3'])
-        self.agent.refresh_firewall.assert_has_calls(
-            [mock.call.refresh_firewall([self.fake_device['device']])])
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_security_groups_member_not_updated(self):
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.prepare_devices_filter(['fake_port_id'])
-        self.agent.security_groups_member_updated(['fake_sgid3', 'fake_sgid4'])
-        self.assertFalse(self.agent.refresh_firewall.called)
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_security_groups_provider_updated(self):
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.security_groups_provider_updated(None)
-        self.agent.refresh_firewall.assert_has_calls(
-            [mock.call.refresh_firewall(None)])
-
-    def test_refresh_firewall(self):
-        self.agent.prepare_devices_filter(['fake_port_id'])
-        self.agent.refresh_firewall()
-        calls = [mock.call.defer_apply(),
-                 mock.call.prepare_port_filter(self.fake_device),
-                 mock.call.defer_apply(),
-                 mock.call.update_port_filter(self.fake_device)]
-        self.firewall.assert_has_calls(calls)
-
-    def test_refresh_firewall_devices(self):
-        self.agent.prepare_devices_filter(['fake_port_id'])
-        self.agent.refresh_firewall([self.fake_device])
-        calls = [mock.call.defer_apply(),
-                 mock.call.prepare_port_filter(self.fake_device),
-                 mock.call.defer_apply(),
-                 mock.call.update_port_filter(self.fake_device)]
-        self.firewall.assert_has_calls(calls)
-
-    def test_refresh_firewall_none(self):
-        self.agent.refresh_firewall([])
-        self.assertFalse(self.firewall.called)
-
-    def test_refresh_firewall_with_firewall_disabled(self):
-        cfg.CONF.set_override('enable_security_group', False, 'SECURITYGROUP')
-        self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
-        self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
-        self.agent.firewall.defer_apply = mock.Mock()
-        self.agent.refresh_firewall([self.fake_device])
-        self.assertFalse(self.agent.plugin_rpc.
-                         security_group_info_for_devices.called)
-        self.assertFalse(self.agent.plugin_rpc.
-                         security_group_rules_for_devices.called)
-        self.assertFalse(self.agent.firewall.defer_apply.called)
-
-    def test_refresh_firewall_with_noopfirewall(self):
-        self.agent.firewall = self.default_firewall
-        self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
-        self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
-        self.agent.firewall.defer_apply = mock.Mock()
-        self.agent.refresh_firewall([self.fake_device])
-        self.assertFalse(self.agent.plugin_rpc.
-                         security_group_info_for_devices.called)
-        self.assertFalse(self.agent.plugin_rpc.
-                         security_group_rules_for_devices.called)
-        self.assertFalse(self.agent.firewall.defer_apply.called)
-
-
-class SecurityGroupAgentEnhancedRpcTestCase(
-    BaseSecurityGroupAgentRpcTestCase):
-
-    def setUp(self, defer_refresh_firewall=False):
-        super(SecurityGroupAgentEnhancedRpcTestCase, self).setUp(
-            defer_refresh_firewall=defer_refresh_firewall)
-        fake_sg_info = {
-            'security_groups': collections.OrderedDict([
-                ('fake_sgid2', []),
-                ('fake_sgid1', [{'remote_group_id': 'fake_sgid2'}])]),
-            'sg_member_ips': {'fake_sgid2': {'IPv4': [], 'IPv6': []}},
-            'devices': self.firewall.ports}
-        self.agent.plugin_rpc.security_group_info_for_devices.return_value = (
-            fake_sg_info)
-
-    def test_prepare_and_remove_devices_filter_enhanced_rpc(self):
-        self.agent.prepare_devices_filter(['fake_device'])
-        self.agent.remove_devices_filter(['fake_device'])
-        # these two mocks are too long, just use tmp_mock to replace them
-        tmp_mock1 = mock.call.update_security_group_rules(
-            'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}])
-        tmp_mock2 = mock.call.update_security_group_members(
-            'fake_sgid2', {'IPv4': [], 'IPv6': []})
-        # ignore device which is not filtered
-        self.firewall.assert_has_calls([mock.call.defer_apply(),
-                                        mock.call.prepare_port_filter(
-                                            self.fake_device),
-                                        mock.call.update_security_group_rules(
-                                            'fake_sgid2', []),
-                                        tmp_mock1,
-                                        tmp_mock2,
-                                        mock.call.defer_apply(),
-                                        mock.call.remove_port_filter(
-                                            self.fake_device),
-                                        ])
-
-    def test_security_groups_rule_updated_enhanced_rpc(self):
-        sg_list = ['fake_sgid1', 'fake_sgid3']
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.prepare_devices_filter(['fake_port_id'])
-        self.agent.security_groups_rule_updated(sg_list)
-        self.agent.refresh_firewall.assert_called_once_with(
-            [self.fake_device['device']])
-        self.firewall.security_group_updated.assert_called_once_with(
-            'sg_rule', set(sg_list))
-
-    def test_security_groups_rule_not_updated_enhanced_rpc(self):
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.prepare_devices_filter(['fake_port_id'])
-        self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4'])
-        self.assertFalse(self.agent.refresh_firewall.called)
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_security_groups_member_updated_enhanced_rpc(self):
-        sg_list = ['fake_sgid2', 'fake_sgid3']
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.prepare_devices_filter(['fake_port_id'])
-        self.agent.security_groups_member_updated(sg_list)
-        self.agent.refresh_firewall.assert_called_once_with(
-            [self.fake_device['device']])
-        self.firewall.security_group_updated.assert_called_once_with(
-            'sg_member', set(sg_list))
-
-    def test_security_groups_member_not_updated_enhanced_rpc(self):
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.prepare_devices_filter(['fake_port_id'])
-        self.agent.security_groups_member_updated(
-            ['fake_sgid3', 'fake_sgid4'])
-        self.assertFalse(self.agent.refresh_firewall.called)
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_security_groups_provider_updated_enhanced_rpc(self):
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.security_groups_provider_updated(None)
-        self.agent.refresh_firewall.assert_has_calls(
-            [mock.call.refresh_firewall(None)])
-
-    def test_refresh_firewall_enhanced_rpc(self):
-        self.agent.prepare_devices_filter(['fake_port_id'])
-        self.agent.refresh_firewall()
-        calls = [mock.call.defer_apply(),
-                 mock.call.prepare_port_filter(self.fake_device),
-                 mock.call.update_security_group_rules('fake_sgid2', []),
-                 mock.call.update_security_group_rules(
-                     'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]),
-                 mock.call.update_security_group_members(
-                     'fake_sgid2', {'IPv4': [], 'IPv6': []}),
-                 mock.call.defer_apply(),
-                 mock.call.update_port_filter(self.fake_device),
-                 mock.call.update_security_group_rules('fake_sgid2', []),
-                 mock.call.update_security_group_rules(
-                     'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]),
-                 mock.call.update_security_group_members(
-                     'fake_sgid2', {'IPv4': [], 'IPv6': []})]
-        self.firewall.assert_has_calls(calls)
-
-    def test_refresh_firewall_devices_enhanced_rpc(self):
-        self.agent.prepare_devices_filter(['fake_device'])
-        self.agent.refresh_firewall([self.fake_device])
-        calls = [mock.call.defer_apply(),
-                 mock.call.prepare_port_filter(self.fake_device),
-                 mock.call.update_security_group_rules('fake_sgid2', []),
-                 mock.call.update_security_group_rules('fake_sgid1', [
-                     {'remote_group_id': 'fake_sgid2'}]),
-                 mock.call.update_security_group_members('fake_sgid2', {
-                     'IPv4': [], 'IPv6': []
-                 }),
-                 mock.call.defer_apply(),
-                 mock.call.update_port_filter(self.fake_device),
-                 mock.call.update_security_group_rules('fake_sgid2', []),
-                 mock.call.update_security_group_rules('fake_sgid1', [
-                     {'remote_group_id': 'fake_sgid2'}]),
-                 mock.call.update_security_group_members('fake_sgid2', {
-                     'IPv4': [], 'IPv6': []})
-                 ]
-        self.firewall.assert_has_calls(calls)
-
-    def test_refresh_firewall_none_enhanced_rpc(self):
-        self.agent.refresh_firewall([])
-        self.assertFalse(self.firewall.called)
-
-
-class SecurityGroupAgentRpcWithDeferredRefreshTestCase(
-    SecurityGroupAgentRpcTestCase):
-
-    def setUp(self):
-        super(SecurityGroupAgentRpcWithDeferredRefreshTestCase, self).setUp(
-            defer_refresh_firewall=True)
-
-    @contextlib.contextmanager
-    def add_fake_device(self, device, sec_groups, source_sec_groups=None):
-        fake_device = {'device': device,
-                       'security_groups': sec_groups,
-                       'security_group_source_groups': source_sec_groups or [],
-                       'security_group_rules': [{'security_group_id':
-                                                 'fake_sgid1',
-                                                 'remote_group_id':
-                                                 'fake_sgid2'}]}
-        self.firewall.ports[device] = fake_device
-        yield
-        del self.firewall.ports[device]
-
-    def test_security_groups_rule_updated(self):
-        self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3'])
-        self.assertIn('fake_device', self.agent.devices_to_refilter)
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_multiple_security_groups_rule_updated_same_port(self):
-        with self.add_fake_device(device='fake_device_2',
-                                  sec_groups=['fake_sgidX']):
-            self.agent.refresh_firewall = mock.Mock()
-            self.agent.security_groups_rule_updated(['fake_sgid1'])
-            self.agent.security_groups_rule_updated(['fake_sgid2'])
-            self.assertIn('fake_device', self.agent.devices_to_refilter)
-            self.assertNotIn('fake_device_2', self.agent.devices_to_refilter)
-            self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_security_groups_rule_updated_multiple_ports(self):
-        with self.add_fake_device(device='fake_device_2',
-                                  sec_groups=['fake_sgid2']):
-            self.agent.refresh_firewall = mock.Mock()
-            self.agent.security_groups_rule_updated(['fake_sgid1',
-                                                     'fake_sgid2'])
-            self.assertIn('fake_device', self.agent.devices_to_refilter)
-            self.assertIn('fake_device_2', self.agent.devices_to_refilter)
-            self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_multiple_security_groups_rule_updated_multiple_ports(self):
-        with self.add_fake_device(device='fake_device_2',
-                                  sec_groups=['fake_sgid2']):
-            self.agent.refresh_firewall = mock.Mock()
-            self.agent.security_groups_rule_updated(['fake_sgid1'])
-            self.agent.security_groups_rule_updated(['fake_sgid2'])
-            self.assertIn('fake_device', self.agent.devices_to_refilter)
-            self.assertIn('fake_device_2', self.agent.devices_to_refilter)
-            self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_security_groups_member_updated(self):
-        self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3'])
-        self.assertIn('fake_device', self.agent.devices_to_refilter)
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_multiple_security_groups_member_updated_same_port(self):
-        with self.add_fake_device(device='fake_device_2',
-                                  sec_groups=['fake_sgid1', 'fake_sgid1B'],
-                                  source_sec_groups=['fake_sgidX']):
-            self.agent.refresh_firewall = mock.Mock()
-            self.agent.security_groups_member_updated(['fake_sgid1',
-                                                       'fake_sgid3'])
-            self.agent.security_groups_member_updated(['fake_sgid2',
-                                                       'fake_sgid3'])
-            self.assertIn('fake_device', self.agent.devices_to_refilter)
-            self.assertNotIn('fake_device_2', self.agent.devices_to_refilter)
-            self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_security_groups_member_updated_multiple_ports(self):
-        with self.add_fake_device(device='fake_device_2',
-                                  sec_groups=['fake_sgid1', 'fake_sgid1B'],
-                                  source_sec_groups=['fake_sgid2']):
-            self.agent.security_groups_member_updated(['fake_sgid2'])
-            self.assertIn('fake_device', self.agent.devices_to_refilter)
-            self.assertIn('fake_device_2', self.agent.devices_to_refilter)
-            self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_multiple_security_groups_member_updated_multiple_ports(self):
-        with self.add_fake_device(device='fake_device_2',
-                                  sec_groups=['fake_sgid1', 'fake_sgid1B'],
-                                  source_sec_groups=['fake_sgid1B']):
-            self.agent.security_groups_member_updated(['fake_sgid1B'])
-            self.agent.security_groups_member_updated(['fake_sgid2'])
-            self.assertIn('fake_device', self.agent.devices_to_refilter)
-            self.assertIn('fake_device_2', self.agent.devices_to_refilter)
-            self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_security_groups_provider_updated(self):
-        self.agent.security_groups_provider_updated(None)
-        self.assertTrue(self.agent.global_refresh_firewall)
-
-    def test_security_groups_provider_updated_devices_specified(self):
-        self.agent.security_groups_provider_updated(
-            ['fake_device_1', 'fake_device_2'])
-        self.assertFalse(self.agent.global_refresh_firewall)
-        self.assertIn('fake_device_1', self.agent.devices_to_refilter)
-        self.assertIn('fake_device_2', self.agent.devices_to_refilter)
-
-    def test_setup_port_filters_new_ports_only(self):
-        self.agent.prepare_devices_filter = mock.Mock()
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.devices_to_refilter = set()
-        self.agent.global_refresh_firewall = False
-        self.agent.setup_port_filters(set(['fake_new_device']), set())
-        self.assertFalse(self.agent.devices_to_refilter)
-        self.assertFalse(self.agent.global_refresh_firewall)
-        self.agent.prepare_devices_filter.assert_called_once_with(
-            set(['fake_new_device']))
-        self.assertFalse(self.agent.refresh_firewall.called)
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_setup_port_filters_updated_ports_only(self):
-        self.agent.prepare_devices_filter = mock.Mock()
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.devices_to_refilter = set()
-        self.agent.global_refresh_firewall = False
-        self.agent.setup_port_filters(set(), set(['fake_updated_device']))
-        self.assertFalse(self.agent.devices_to_refilter)
-        self.assertFalse(self.agent.global_refresh_firewall)
-        self.agent.refresh_firewall.assert_called_once_with(
-            set(['fake_updated_device']))
-        self.assertFalse(self.agent.prepare_devices_filter.called)
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_setup_port_filter_new_and_updated_ports(self):
-        self.agent.prepare_devices_filter = mock.Mock()
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.devices_to_refilter = set()
-        self.agent.global_refresh_firewall = False
-        self.agent.setup_port_filters(set(['fake_new_device']),
-                                      set(['fake_updated_device']))
-        self.assertFalse(self.agent.devices_to_refilter)
-        self.assertFalse(self.agent.global_refresh_firewall)
-        self.agent.prepare_devices_filter.assert_called_once_with(
-            set(['fake_new_device']))
-        self.agent.refresh_firewall.assert_called_once_with(
-            set(['fake_updated_device']))
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_setup_port_filters_sg_updates_only(self):
-        self.agent.prepare_devices_filter = mock.Mock()
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.devices_to_refilter = set(['fake_device'])
-        self.agent.global_refresh_firewall = False
-        self.agent.setup_port_filters(set(), set())
-        self.assertFalse(self.agent.devices_to_refilter)
-        self.assertFalse(self.agent.global_refresh_firewall)
-        self.agent.refresh_firewall.assert_called_once_with(
-            set(['fake_device']))
-        self.assertFalse(self.agent.prepare_devices_filter.called)
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_setup_port_filters_sg_updates_and_new_ports(self):
-        self.agent.prepare_devices_filter = mock.Mock()
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.devices_to_refilter = set(['fake_device'])
-        self.agent.global_refresh_firewall = False
-        self.agent.setup_port_filters(set(['fake_new_device']), set())
-        self.assertFalse(self.agent.devices_to_refilter)
-        self.assertFalse(self.agent.global_refresh_firewall)
-        self.agent.prepare_devices_filter.assert_called_once_with(
-            set(['fake_new_device']))
-        self.agent.refresh_firewall.assert_called_once_with(
-            set(['fake_device']))
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def _test_prepare_devices_filter(self, devices):
-        # simulate an RPC arriving and calling _security_group_updated()
-        self.agent.devices_to_refilter |= set(['fake_new_device'])
-
-    def test_setup_port_filters_new_port_and_rpc(self):
-        # Make sure that if an RPC arrives and adds a device to
-        # devices_to_refilter while we are in setup_port_filters()
-        # that it is not cleared, and will be processed later.
-        self.agent.prepare_devices_filter = self._test_prepare_devices_filter
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.devices_to_refilter = set(['new_device', 'fake_device'])
-        self.agent.global_refresh_firewall = False
-        self.agent.setup_port_filters(set(['new_device']), set())
-        self.assertEqual(self.agent.devices_to_refilter,
-                         set(['fake_new_device']))
-        self.assertFalse(self.agent.global_refresh_firewall)
-        self.agent.refresh_firewall.assert_called_once_with(
-            set(['fake_device']))
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_setup_port_filters_sg_updates_and_updated_ports(self):
-        self.agent.prepare_devices_filter = mock.Mock()
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2'])
-        self.agent.global_refresh_firewall = False
-        self.agent.setup_port_filters(
-            set(), set(['fake_device', 'fake_updated_device']))
-        self.assertFalse(self.agent.devices_to_refilter)
-        self.assertFalse(self.agent.global_refresh_firewall)
-        self.agent.refresh_firewall.assert_called_once_with(
-            set(['fake_device', 'fake_device_2', 'fake_updated_device']))
-        self.assertFalse(self.agent.prepare_devices_filter.called)
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_setup_port_filters_all_updates(self):
-        self.agent.prepare_devices_filter = mock.Mock()
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2'])
-        self.agent.global_refresh_firewall = False
-        self.agent.setup_port_filters(
-            set(['fake_new_device']),
-            set(['fake_device', 'fake_updated_device']))
-        self.assertFalse(self.agent.devices_to_refilter)
-        self.assertFalse(self.agent.global_refresh_firewall)
-        self.agent.prepare_devices_filter.assert_called_once_with(
-            set(['fake_new_device']))
-        self.agent.refresh_firewall.assert_called_once_with(
-            set(['fake_device', 'fake_device_2', 'fake_updated_device']))
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_setup_port_filters_no_update(self):
-        self.agent.prepare_devices_filter = mock.Mock()
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.devices_to_refilter = set()
-        self.agent.global_refresh_firewall = False
-        self.agent.setup_port_filters(set(), set())
-        self.assertFalse(self.agent.devices_to_refilter)
-        self.assertFalse(self.agent.global_refresh_firewall)
-        self.assertFalse(self.agent.refresh_firewall.called)
-        self.assertFalse(self.agent.prepare_devices_filter.called)
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-    def test_setup_port_filters_with_global_refresh(self):
-        self.agent.prepare_devices_filter = mock.Mock()
-        self.agent.refresh_firewall = mock.Mock()
-        self.agent.devices_to_refilter = set()
-        self.agent.global_refresh_firewall = True
-        self.agent.setup_port_filters(set(), set())
-        self.assertFalse(self.agent.devices_to_refilter)
-        self.assertFalse(self.agent.global_refresh_firewall)
-        self.agent.refresh_firewall.assert_called_once_with()
-        self.assertFalse(self.agent.prepare_devices_filter.called)
-        self.assertFalse(self.firewall.security_group_updated.called)
-
-
-class FakeSGNotifierAPI(sg_rpc.SecurityGroupAgentRpcApiMixin):
-    def __init__(self):
-        self.topic = 'fake'
-        target = oslo_messaging.Target(topic=self.topic, version='1.0')
-        self.client = n_rpc.get_client(target)
-
-
-class SecurityGroupAgentRpcApiTestCase(base.BaseTestCase):
-    def setUp(self):
-        super(SecurityGroupAgentRpcApiTestCase, self).setUp()
-        self.notifier = FakeSGNotifierAPI()
-        self.mock_prepare = mock.patch.object(self.notifier.client, 'prepare',
-                return_value=self.notifier.client).start()
-        self.mock_cast = mock.patch.object(self.notifier.client,
-                'cast').start()
-
-    def test_security_groups_provider_updated(self):
-        self.notifier.security_groups_provider_updated(None)
-        self.mock_cast.assert_has_calls(
-            [mock.call(None, 'security_groups_provider_updated',
-                       devices_to_update=None)])
-
-    def test_security_groups_rule_updated(self):
-        self.notifier.security_groups_rule_updated(
-            None, security_groups=['fake_sgid'])
-        self.mock_cast.assert_has_calls(
-            [mock.call(None, 'security_groups_rule_updated',
-                       security_groups=['fake_sgid'])])
-
-    def test_security_groups_member_updated(self):
-        self.notifier.security_groups_member_updated(
-            None, security_groups=['fake_sgid'])
-        self.mock_cast.assert_has_calls(
-            [mock.call(None, 'security_groups_member_updated',
-                       security_groups=['fake_sgid'])])
-
-    def test_security_groups_rule_not_updated(self):
-        self.notifier.security_groups_rule_updated(
-            None, security_groups=[])
-        self.assertFalse(self.mock_cast.called)
-
-    def test_security_groups_member_not_updated(self):
-        self.notifier.security_groups_member_updated(
-            None, security_groups=[])
-        self.assertFalse(self.mock_cast.called)
-
-#Note(nati) bn -> binary_name
-# id -> device_id
-
-PHYSDEV_MOD = '-m physdev'
-PHYSDEV_IS_BRIDGED = '--physdev-is-bridged'
-
-IPTABLES_ARG = {'bn': iptables_manager.binary_name,
-                'physdev_mod': PHYSDEV_MOD,
-                'physdev_is_bridged': PHYSDEV_IS_BRIDGED}
-
-CHAINS_MANGLE = 'FORWARD|INPUT|OUTPUT|POSTROUTING|PREROUTING|mark'
-IPTABLES_ARG['chains'] = CHAINS_MANGLE
-
-IPTABLES_MANGLE = """# Generated by iptables_manager
-*mangle
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:POSTROUTING - [0:0]
-:PREROUTING - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j %(bn)s-OUTPUT
--I POSTROUTING 1 -j %(bn)s-POSTROUTING
--I PREROUTING 1 -j %(bn)s-PREROUTING
--I %(bn)s-PREROUTING 1 -j %(bn)s-mark
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-CHAINS_NAT = 'OUTPUT|POSTROUTING|PREROUTING|float-snat|snat'
-
-IPTABLES_ARG['port1'] = 'port1'
-IPTABLES_ARG['port2'] = 'port2'
-IPTABLES_ARG['mac1'] = '12:34:56:78:9A:BC'
-IPTABLES_ARG['mac2'] = '12:34:56:78:9A:BD'
-IPTABLES_ARG['ip1'] = '10.0.0.3/32'
-IPTABLES_ARG['ip2'] = '10.0.0.4/32'
-IPTABLES_ARG['chains'] = CHAINS_NAT
-
-IPTABLES_RAW_DEFAULT = """# Generated by iptables_manager
-*raw
-:OUTPUT - [0:0]
-:PREROUTING - [0:0]
-:%(bn)s-OUTPUT - [0:0]
-:%(bn)s-PREROUTING - [0:0]
--I OUTPUT 1 -j %(bn)s-OUTPUT
--I PREROUTING 1 -j %(bn)s-PREROUTING
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-IPTABLES_RAW_DEVICE_1 = """# Generated by iptables_manager
-*raw
-:OUTPUT - [0:0]
-:PREROUTING - [0:0]
-:%(bn)s-OUTPUT - [0:0]
-:%(bn)s-PREROUTING - [0:0]
--I OUTPUT 1 -j %(bn)s-OUTPUT
--I PREROUTING 1 -j %(bn)s-PREROUTING
--I %(bn)s-PREROUTING 1 -m physdev --physdev-in qvbtap_port1 -j CT --zone 1
--I %(bn)s-PREROUTING 2 -m physdev --physdev-in tap_port1 -j CT --zone 1
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-IPTABLES_RAW_DEVICE_2 = """# Generated by iptables_manager
-*raw
-:OUTPUT - [0:0]
-:PREROUTING - [0:0]
-:%(bn)s-OUTPUT - [0:0]
-:%(bn)s-PREROUTING - [0:0]
--I OUTPUT 1 -j %(bn)s-OUTPUT
--I PREROUTING 1 -j %(bn)s-PREROUTING
--I %(bn)s-PREROUTING 1 -m physdev --physdev-in qvbtap_%(port1)s \
--j CT --zone 1
--I %(bn)s-PREROUTING 2 -m physdev --physdev-in tap_%(port1)s -j CT --zone 1
--I %(bn)s-PREROUTING 3 -m physdev --physdev-in qvbtap_%(port2)s \
--j CT --zone 2
--I %(bn)s-PREROUTING 4 -m physdev --physdev-in tap_%(port2)s -j CT --zone 2
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-IPTABLES_NAT = """# Generated by iptables_manager
-*nat
-:OUTPUT - [0:0]
-:POSTROUTING - [0:0]
-:PREROUTING - [0:0]
-:neutron-postrouting-bottom - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I OUTPUT 1 -j %(bn)s-OUTPUT
--I POSTROUTING 1 -j %(bn)s-POSTROUTING
--I POSTROUTING 2 -j neutron-postrouting-bottom
--I PREROUTING 1 -j %(bn)s-PREROUTING
--I neutron-postrouting-bottom 1 -j %(bn)s-snat
--I %(bn)s-snat 1 -j %(bn)s-float-snat
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-CHAINS_RAW = 'OUTPUT|PREROUTING'
-IPTABLES_ARG['chains'] = CHAINS_RAW
-
-IPTABLES_RAW = """# Generated by iptables_manager
-*raw
-:OUTPUT - [0:0]
-:PREROUTING - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I OUTPUT 1 -j %(bn)s-OUTPUT
--I PREROUTING 1 -j %(bn)s-PREROUTING
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-CHAINS_EMPTY = 'FORWARD|INPUT|OUTPUT|local|sg-chain|sg-fallback'
-CHAINS_1 = CHAINS_EMPTY + '|i_port1|o_port1|s_port1'
-CHAINS_2 = CHAINS_1 + '|i_port2|o_port2|s_port2'
-
-IPTABLES_ARG['chains'] = CHAINS_1
-
-IPSET_FILTER_1 = """# Generated by iptables_manager
-*filter
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:neutron-filter-top - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j neutron-filter-top
--I FORWARD 2 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j neutron-filter-top
--I OUTPUT 2 -j %(bn)s-OUTPUT
--I neutron-filter-top 1 -j %(bn)s-local
--I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-o_port1
--I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 -m udp \
---dport 68 -j RETURN
--I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_port1 4 -m set --match-set NIPv4security_group1 src -j \
-RETURN
--I %(bn)s-i_port1 5 -m state --state INVALID -j DROP
--I %(bn)s-i_port1 6 -j %(bn)s-sg-fallback
--I %(bn)s-o_port1 1 -p udp -m udp --sport 68 -m udp --dport 67 \
--j RETURN
--I %(bn)s-o_port1 2 -j %(bn)s-s_port1
--I %(bn)s-o_port1 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_port1 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_port1 5 -j RETURN
--I %(bn)s-o_port1 6 -m state --state INVALID -j DROP
--I %(bn)s-o_port1 7 -j %(bn)s-sg-fallback
--I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \
--j RETURN
--I %(bn)s-s_port1 2 -j DROP
--I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-i_port1
--I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-o_port1
--I %(bn)s-sg-chain 3 -j ACCEPT
--I %(bn)s-sg-fallback 1 -j DROP
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-IPTABLES_FILTER_1 = """# Generated by iptables_manager
-*filter
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:neutron-filter-top - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j neutron-filter-top
--I FORWARD 2 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j neutron-filter-top
--I OUTPUT 2 -j %(bn)s-OUTPUT
--I neutron-filter-top 1 -j %(bn)s-local
--I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-o_port1
--I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 -m udp \
---dport 68 -j RETURN
--I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_port1 4 -m state --state INVALID -j DROP
--I %(bn)s-i_port1 5 -j %(bn)s-sg-fallback
--I %(bn)s-o_port1 1 -p udp -m udp --sport 68 -m udp --dport 67 \
--j RETURN
--I %(bn)s-o_port1 2 -j %(bn)s-s_port1
--I %(bn)s-o_port1 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_port1 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_port1 5 -j RETURN
--I %(bn)s-o_port1 6 -m state --state INVALID -j DROP
--I %(bn)s-o_port1 7 -j %(bn)s-sg-fallback
--I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \
--j RETURN
--I %(bn)s-s_port1 2 -j DROP
--I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-i_port1
--I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-o_port1
--I %(bn)s-sg-chain 3 -j ACCEPT
--I %(bn)s-sg-fallback 1 -j DROP
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-
-IPTABLES_FILTER_1_2 = """# Generated by iptables_manager
-*filter
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:neutron-filter-top - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j neutron-filter-top
--I FORWARD 2 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j neutron-filter-top
--I OUTPUT 2 -j %(bn)s-OUTPUT
--I neutron-filter-top 1 -j %(bn)s-local
--I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-o_port1
--I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 -m udp \
---dport 68 -j RETURN
--I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_port1 4 -s 10.0.0.4/32 -j RETURN
--I %(bn)s-i_port1 5 -m state --state INVALID -j DROP
--I %(bn)s-i_port1 6 -j %(bn)s-sg-fallback
--I %(bn)s-o_port1 1 -p udp -m udp --sport 68 -m udp --dport 67 \
--j RETURN
--I %(bn)s-o_port1 2 -j %(bn)s-s_port1
--I %(bn)s-o_port1 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_port1 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_port1 5 -j RETURN
--I %(bn)s-o_port1 6 -m state --state INVALID -j DROP
--I %(bn)s-o_port1 7 -j %(bn)s-sg-fallback
--I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \
--j RETURN
--I %(bn)s-s_port1 2 -j DROP
--I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-i_port1
--I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-o_port1
--I %(bn)s-sg-chain 3 -j ACCEPT
--I %(bn)s-sg-fallback 1 -j DROP
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-IPTABLES_ARG['chains'] = CHAINS_2
-
-IPSET_FILTER_2 = """# Generated by iptables_manager
-*filter
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:neutron-filter-top - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j neutron-filter-top
--I FORWARD 2 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j neutron-filter-top
--I OUTPUT 2 -j %(bn)s-OUTPUT
--I neutron-filter-top 1 -j %(bn)s-local
--I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
--I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
--I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--m udp --dport 68 -j RETURN
--I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_%(port1)s 4 -m set --match-set NIPv4security_group1 src -j RETURN
--I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP
--I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback
--I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--m udp --dport 68 -j RETURN
--I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_%(port2)s 4 -m set --match-set NIPv4security_group1 src -j RETURN
--I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP
--I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback
--I %(bn)s-o_%(port1)s 1 -p udp -m udp --sport 68 -m udp --dport 67 -j RETURN
--I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
--I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_%(port1)s 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_%(port1)s 5 -j RETURN
--I %(bn)s-o_%(port1)s 6 -m state --state INVALID -j DROP
--I %(bn)s-o_%(port1)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-o_%(port2)s 1 -p udp -m udp --sport 68 -m udp --dport 67 -j RETURN
--I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
--I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_%(port2)s 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_%(port2)s 5 -j RETURN
--I %(bn)s-o_%(port2)s 6 -m state --state INVALID -j DROP
--I %(bn)s-o_%(port2)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
--I %(bn)s-s_%(port1)s 2 -j DROP
--I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
--I %(bn)s-s_%(port2)s 2 -j DROP
--I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
--I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
--I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
--I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
--I %(bn)s-sg-chain 5 -j ACCEPT
--I %(bn)s-sg-fallback 1 -j DROP
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-IPSET_FILTER_2_3 = """# Generated by iptables_manager
-*filter
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:neutron-filter-top - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j neutron-filter-top
--I FORWARD 2 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j neutron-filter-top
--I OUTPUT 2 -j %(bn)s-OUTPUT
--I neutron-filter-top 1 -j %(bn)s-local
--I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
--I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
--I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--m udp --dport 68 -j RETURN
--I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_%(port1)s 4 -m set --match-set NIPv4security_group1 src -j RETURN
--I %(bn)s-i_%(port1)s 5 -p icmp -j RETURN
--I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP
--I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--m udp --dport 68 -j RETURN
--I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_%(port2)s 4 -m set --match-set NIPv4security_group1 src -j RETURN
--I %(bn)s-i_%(port2)s 5 -p icmp -j RETURN
--I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP
--I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-o_%(port1)s 1 -p udp -m udp --sport 68 -m udp --dport 67 -j RETURN
--I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
--I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_%(port1)s 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_%(port1)s 5 -j RETURN
--I %(bn)s-o_%(port1)s 6 -m state --state INVALID -j DROP
--I %(bn)s-o_%(port1)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-o_%(port2)s 1 -p udp -m udp --sport 68 -m udp --dport 67 -j RETURN
--I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
--I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_%(port2)s 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_%(port2)s 5 -j RETURN
--I %(bn)s-o_%(port2)s 6 -m state --state INVALID -j DROP
--I %(bn)s-o_%(port2)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
--I %(bn)s-s_%(port1)s 2 -j DROP
--I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
--I %(bn)s-s_%(port2)s 2 -j DROP
--I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
--I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
--I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
--I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
--I %(bn)s-sg-chain 5 -j ACCEPT
--I %(bn)s-sg-fallback 1 -j DROP
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-IPTABLES_FILTER_2 = """# Generated by iptables_manager
-*filter
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:neutron-filter-top - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j neutron-filter-top
--I FORWARD 2 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j neutron-filter-top
--I OUTPUT 2 -j %(bn)s-OUTPUT
--I neutron-filter-top 1 -j %(bn)s-local
--I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
--I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
--I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--m udp --dport 68 -j RETURN
--I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_%(port1)s 4 -s %(ip2)s -j RETURN
--I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP
--I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback
--I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--m udp --dport 68 -j RETURN
--I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN
--I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP
--I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback
--I %(bn)s-o_%(port1)s 1 -p udp -m udp --sport 68 -m udp --dport 67 \
--j RETURN
--I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
--I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_%(port1)s 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_%(port1)s 5 -j RETURN
--I %(bn)s-o_%(port1)s 6 -m state --state INVALID -j DROP
--I %(bn)s-o_%(port1)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-o_%(port2)s 1 -p udp -m udp --sport 68 -m udp --dport 67 \
--j RETURN
--I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
--I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_%(port2)s 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_%(port2)s 5 -j RETURN
--I %(bn)s-o_%(port2)s 6 -m state --state INVALID -j DROP
--I %(bn)s-o_%(port2)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
--I %(bn)s-s_%(port1)s 2 -j DROP
--I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
--I %(bn)s-s_%(port2)s 2 -j DROP
--I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
--I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
--I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
--I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
--I %(bn)s-sg-chain 5 -j ACCEPT
--I %(bn)s-sg-fallback 1 -j DROP
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-IPTABLES_FILTER_2_2 = """# Generated by iptables_manager
-*filter
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:neutron-filter-top - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j neutron-filter-top
--I FORWARD 2 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j neutron-filter-top
--I OUTPUT 2 -j %(bn)s-OUTPUT
--I neutron-filter-top 1 -j %(bn)s-local
--I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
--I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
--I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--m udp --dport 68 -j RETURN
--I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_%(port1)s 4 -m state --state INVALID -j DROP
--I %(bn)s-i_%(port1)s 5 -j %(bn)s-sg-fallback
--I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--m udp --dport 68 -j RETURN
--I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN
--I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP
--I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback
--I %(bn)s-o_%(port1)s 1 -p udp -m udp --sport 68 -m udp --dport 67 -j RETURN
--I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
--I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_%(port1)s 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_%(port1)s 5 -j RETURN
--I %(bn)s-o_%(port1)s 6 -m state --state INVALID -j DROP
--I %(bn)s-o_%(port1)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-o_%(port2)s 1 -p udp -m udp --sport 68 -m udp --dport 67 -j RETURN
--I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
--I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_%(port2)s 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_%(port2)s 5 -j RETURN
--I %(bn)s-o_%(port2)s 6 -m state --state INVALID -j DROP
--I %(bn)s-o_%(port2)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
--I %(bn)s-s_%(port1)s 2 -j DROP
--I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
--I %(bn)s-s_%(port2)s 2 -j DROP
--I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
--I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
--I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
--I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
--I %(bn)s-sg-chain 5 -j ACCEPT
--I %(bn)s-sg-fallback 1 -j DROP
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-IPTABLES_FILTER_2_3 = """# Generated by iptables_manager
-*filter
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:neutron-filter-top - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j neutron-filter-top
--I FORWARD 2 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j neutron-filter-top
--I OUTPUT 2 -j %(bn)s-OUTPUT
--I neutron-filter-top 1 -j %(bn)s-local
--I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
--I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
--I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--m udp --dport 68 -j RETURN
--I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_%(port1)s 4 -s %(ip2)s -j RETURN
--I %(bn)s-i_%(port1)s 5 -p icmp -j RETURN
--I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP
--I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--m udp --dport 68 -j RETURN
--I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
--I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN
--I %(bn)s-i_%(port2)s 5 -p icmp -j RETURN
--I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP
--I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-o_%(port1)s 1 -p udp -m udp --sport 68 -m udp --dport 67 -j RETURN
--I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
--I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_%(port1)s 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_%(port1)s 5 -j RETURN
--I %(bn)s-o_%(port1)s 6 -m state --state INVALID -j DROP
--I %(bn)s-o_%(port1)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-o_%(port2)s 1 -p udp -m udp --sport 68 -m udp --dport 67 -j RETURN
--I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
--I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP
--I %(bn)s-o_%(port2)s 4 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_%(port2)s 5 -j RETURN
--I %(bn)s-o_%(port2)s 6 -m state --state INVALID -j DROP
--I %(bn)s-o_%(port2)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
--I %(bn)s-s_%(port1)s 2 -j DROP
--I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
--I %(bn)s-s_%(port2)s 2 -j DROP
--I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
--I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
--I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
--I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
--I %(bn)s-sg-chain 5 -j ACCEPT
--I %(bn)s-sg-fallback 1 -j DROP
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-
-IPTABLES_ARG['chains'] = CHAINS_EMPTY
-IPTABLES_FILTER_EMPTY = """# Generated by iptables_manager
-*filter
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:neutron-filter-top - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j neutron-filter-top
--I FORWARD 2 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j neutron-filter-top
--I OUTPUT 2 -j %(bn)s-OUTPUT
--I neutron-filter-top 1 -j %(bn)s-local
--I %(bn)s-sg-chain 1 -j ACCEPT
--I %(bn)s-sg-fallback 1 -j DROP
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-IPTABLES_ARG['chains'] = CHAINS_1
-IPTABLES_FILTER_V6_1 = """# Generated by iptables_manager
-*filter
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:neutron-filter-top - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j neutron-filter-top
--I FORWARD 2 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j neutron-filter-top
--I OUTPUT 2 -j %(bn)s-OUTPUT
--I neutron-filter-top 1 -j %(bn)s-local
--I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-o_port1
--I %(bn)s-i_port1 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN
--I %(bn)s-i_port1 2 -p ipv6-icmp -m icmp6 --icmpv6-type 131 -j RETURN
--I %(bn)s-i_port1 3 -p ipv6-icmp -m icmp6 --icmpv6-type 132 -j RETURN
--I %(bn)s-i_port1 4 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN
--I %(bn)s-i_port1 5 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN
--I %(bn)s-i_port1 6 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_port1 7 -m state --state INVALID -j DROP
--I %(bn)s-i_port1 8 -j %(bn)s-sg-fallback
--I %(bn)s-o_port1 1 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP
--I %(bn)s-o_port1 2 -p ipv6-icmp -j RETURN
--I %(bn)s-o_port1 3 -p udp -m udp --sport 546 -m udp --dport 547 -j RETURN
--I %(bn)s-o_port1 4 -p udp -m udp --sport 547 -m udp --dport 546 -j DROP
--I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_port1 6 -m state --state INVALID -j DROP
--I %(bn)s-o_port1 7 -j %(bn)s-sg-fallback
--I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-i_port1
--I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
-%(physdev_is_bridged)s -j %(bn)s-o_port1
--I %(bn)s-sg-chain 3 -j ACCEPT
--I %(bn)s-sg-fallback 1 -j DROP
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-
-IPTABLES_ARG['chains'] = CHAINS_2
-
-IPTABLES_FILTER_V6_2 = """# Generated by iptables_manager
-*filter
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:neutron-filter-top - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j neutron-filter-top
--I FORWARD 2 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j neutron-filter-top
--I OUTPUT 2 -j %(bn)s-OUTPUT
--I neutron-filter-top 1 -j %(bn)s-local
--I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-sg-chain
--I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
--I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
--I %(bn)s-i_%(port1)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN
--I %(bn)s-i_%(port1)s 2 -p ipv6-icmp -m icmp6 --icmpv6-type 131 -j RETURN
--I %(bn)s-i_%(port1)s 3 -p ipv6-icmp -m icmp6 --icmpv6-type 132 -j RETURN
--I %(bn)s-i_%(port1)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN
--I %(bn)s-i_%(port1)s 5 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN
--I %(bn)s-i_%(port1)s 6 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_%(port1)s 7 -m state --state INVALID -j DROP
--I %(bn)s-i_%(port1)s 8 -j %(bn)s-sg-fallback
--I %(bn)s-i_%(port2)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN
--I %(bn)s-i_%(port2)s 2 -p ipv6-icmp -m icmp6 --icmpv6-type 131 -j RETURN
--I %(bn)s-i_%(port2)s 3 -p ipv6-icmp -m icmp6 --icmpv6-type 132 -j RETURN
--I %(bn)s-i_%(port2)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN
--I %(bn)s-i_%(port2)s 5 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN
--I %(bn)s-i_%(port2)s 6 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-i_%(port2)s 7 -m state --state INVALID -j DROP
--I %(bn)s-i_%(port2)s 8 -j %(bn)s-sg-fallback
--I %(bn)s-o_%(port1)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP
--I %(bn)s-o_%(port1)s 2 -p ipv6-icmp -j RETURN
--I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 546 -m udp --dport 547 -j RETURN
--I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 547 -m udp --dport 546 -j DROP
--I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_%(port1)s 6 -m state --state INVALID -j DROP
--I %(bn)s-o_%(port1)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-o_%(port2)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP
--I %(bn)s-o_%(port2)s 2 -p ipv6-icmp -j RETURN
--I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 546 -m udp --dport 547 -j RETURN
--I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 547 -m udp --dport 546 -j DROP
--I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
--I %(bn)s-o_%(port2)s 6 -m state --state INVALID -j DROP
--I %(bn)s-o_%(port2)s 7 -j %(bn)s-sg-fallback
--I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
--I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
--I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
--I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
-%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
--I %(bn)s-sg-chain 5 -j ACCEPT
--I %(bn)s-sg-fallback 1 -j DROP
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-IPTABLES_ARG['chains'] = CHAINS_EMPTY
-IPTABLES_FILTER_V6_EMPTY = """# Generated by iptables_manager
-*filter
-:FORWARD - [0:0]
-:INPUT - [0:0]
-:OUTPUT - [0:0]
-:neutron-filter-top - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
-:%(bn)s-(%(chains)s) - [0:0]
--I FORWARD 1 -j neutron-filter-top
--I FORWARD 2 -j %(bn)s-FORWARD
--I INPUT 1 -j %(bn)s-INPUT
--I OUTPUT 1 -j neutron-filter-top
--I OUTPUT 2 -j %(bn)s-OUTPUT
--I neutron-filter-top 1 -j %(bn)s-local
--I %(bn)s-sg-chain 1 -j ACCEPT
--I %(bn)s-sg-fallback 1 -j DROP
-COMMIT
-# Completed by iptables_manager
-""" % IPTABLES_ARG
-
-
-class TestSecurityGroupAgentWithIptables(base.BaseTestCase):
-    FIREWALL_DRIVER = FIREWALL_IPTABLES_DRIVER
-    PHYSDEV_INGRESS = 'physdev-out'
-    PHYSDEV_EGRESS = 'physdev-in'
-
-    def setUp(self, defer_refresh_firewall=False, test_rpc_v1_1=True):
-        super(TestSecurityGroupAgentWithIptables, self).setUp()
-        set_firewall_driver(self.FIREWALL_DRIVER)
-        cfg.CONF.set_override('enable_ipset', False, group='SECURITYGROUP')
-        cfg.CONF.set_override('comment_iptables_rules', False, group='AGENT')
-
-        self.utils_exec = mock.patch(
-            'neutron.agent.linux.utils.execute').start()
-
-        self.rpc = mock.Mock()
-        self._init_agent(defer_refresh_firewall)
-
-        if test_rpc_v1_1:
-            self.rpc.security_group_info_for_devices.side_effect = (
-                oslo_messaging.UnsupportedVersion('1.2'))
-
-        self.iptables = self.agent.firewall.iptables
-        # TODO(jlibosva) Get rid of mocking iptables execute and mock out
-        # firewall instead
-        self.iptables.use_ipv6 = True
-        self.iptables_execute = mock.patch.object(self.iptables,
-                                                  "execute").start()
-        self.iptables_execute_return_values = []
-        self.expected_call_count = 0
-        self.expected_calls = []
-        self.expected_process_inputs = []
-        self.iptables_execute.side_effect = self.iptables_execute_return_values
-
-        rule1 = [{'direction': 'ingress',
-                  'protocol': const.PROTO_NAME_UDP,
-                  'ethertype': const.IPv4,
-                  'source_ip_prefix': '10.0.0.2/32',
-                  'source_port_range_min': 67,
-                  'source_port_range_max': 67,
-                  'port_range_min': 68,
-                  'port_range_max': 68},
-                 {'direction': 'ingress',
-                  'protocol': const.PROTO_NAME_TCP,
-                  'ethertype': const.IPv4,
-                  'port_range_min': 22,
-                  'port_range_max': 22},
-                 {'direction': 'egress',
-                  'ethertype': const.IPv4}]
-        rule2 = rule1[:]
-        rule2 += [{'direction': 'ingress',
-                  'source_ip_prefix': '10.0.0.4/32',
-                  'ethertype': const.IPv4}]
-        rule3 = rule2[:]
-        rule3 += [{'direction': 'ingress',
-                  'protocol': const.PROTO_NAME_ICMP,
-                  'ethertype': const.IPv4}]
-        rule4 = rule1[:]
-        rule4 += [{'direction': 'ingress',
-                  'source_ip_prefix': '10.0.0.3/32',
-                  'ethertype': const.IPv4}]
-        rule5 = rule4[:]
-        rule5 += [{'direction': 'ingress',
-                  'protocol': const.PROTO_NAME_ICMP,
-                  'ethertype': const.IPv4}]
-
-        self.devices1 = {'tap_port1': self._device('tap_port1',
-                                                   '10.0.0.3/32',
-                                                   '12:34:56:78:9a:bc',
-                                                   rule1)}
-        self.devices2 = collections.OrderedDict([
-            ('tap_port1', self._device('tap_port1',
-                                       '10.0.0.3/32',
-                                       '12:34:56:78:9a:bc',
-                                       rule2)),
-            ('tap_port2', self._device('tap_port2',
-                                       '10.0.0.4/32',
-                                       '12:34:56:78:9a:bd',
-                                       rule4))
-        ])
-        self.devices3 = collections.OrderedDict([
-            ('tap_port1', self._device('tap_port1',
-                                       '10.0.0.3/32',
-                                       '12:34:56:78:9a:bc',
-                                       rule3)),
-            ('tap_port2', self._device('tap_port2',
-                                       '10.0.0.4/32',
-                                       '12:34:56:78:9a:bd',
-                                       rule5))
-        ])
-        self.agent.firewall.security_group_updated = mock.Mock()
-
-    @staticmethod
-    def _enforce_order_in_firewall(firewall):
-        # for the sake of the test, eliminate any order randomness:
-        # it helps to match iptables output against regexps consistently
-        for attr in ('filtered_ports', 'unfiltered_ports'):
-            setattr(firewall, attr, collections.OrderedDict())
-
-    def _init_agent(self, defer_refresh_firewall):
-        self.agent = sg_rpc.SecurityGroupAgentRpc(
-            context=None, plugin_rpc=self.rpc,
-            defer_refresh_firewall=defer_refresh_firewall)
-        self._enforce_order_in_firewall(self.agent.firewall)
-
-    def _device(self, device, ip, mac_address, rule):
-        return {'device': device,
-                'network_id': 'fakenet',
-                'fixed_ips': [ip],
-                'mac_address': mac_address,
-                'security_groups': ['security_group1'],
-                'security_group_rules': rule,
-                'security_group_source_groups': [
-                    'security_group1']}
-
-    def _regex(self, value):
-        value = value.replace('physdev-INGRESS', self.PHYSDEV_INGRESS)
-        value = value.replace('physdev-EGRESS', self.PHYSDEV_EGRESS)
-        value = value.replace('\n', '\\n')
-        value = value.replace('[', r'\[')
-        value = value.replace(']', r'\]')
-        value = value.replace('*', r'\*')
-        return value
-
-    def _register_mock_call(self, *args, **kwargs):
-        return_value = kwargs.pop('return_value', None)
-        self.iptables_execute_return_values.append(return_value)
-
-        has_process_input = 'process_input' in kwargs
-        process_input = kwargs.get('process_input')
-        self.expected_process_inputs.append((has_process_input, process_input))
-
-        if has_process_input:
-            kwargs['process_input'] = mock.ANY
-        self.expected_calls.append(mock.call(*args, **kwargs))
-        self.expected_call_count += 1
-
-    def _verify_mock_calls(self, exp_fw_sg_updated_call=False):
-        self.assertEqual(self.expected_call_count,
-                         self.iptables_execute.call_count)
-        self.iptables_execute.assert_has_calls(self.expected_calls)
-
-        for i, expected in enumerate(self.expected_process_inputs):
-            check, expected_regex = expected
-            if not check:
-                continue
-            # The second or later arguments of self.iptables.execute
-            # are keyword parameter, so keyword argument is extracted by [1]
-            kwargs = self.iptables_execute.call_args_list[i][1]
-            self.assertThat(kwargs['process_input'],
-                            matchers.MatchesRegex(expected_regex))
-
-        expected = ['net.bridge.bridge-nf-call-arptables=1',
-                    'net.bridge.bridge-nf-call-ip6tables=1',
-                    'net.bridge.bridge-nf-call-iptables=1']
-        for e in expected:
-            self.utils_exec.assert_any_call(['sysctl', '-w', e],
-                                            run_as_root=True)
-        self.assertEqual(exp_fw_sg_updated_call,
-                         self.agent.firewall.security_group_updated.called)
-
-    def _replay_iptables(self, v4_filter, v6_filter, raw):
-        self._register_mock_call(
-            ['iptables-save'],
-            run_as_root=True,
-            return_value='')
-        self._register_mock_call(
-            ['iptables-restore', '-n'],
-            process_input=self._regex(v4_filter + IPTABLES_MANGLE +
-                                      IPTABLES_NAT + raw),
-            run_as_root=True,
-            return_value='')
-        self._register_mock_call(
-            ['ip6tables-save'],
-            run_as_root=True,
-            return_value='')
-        self._register_mock_call(
-            ['ip6tables-restore', '-n'],
-            process_input=self._regex(v6_filter + raw),
-            run_as_root=True,
-            return_value='')
-
-    def test_prepare_remove_port(self):
-        self.rpc.security_group_rules_for_devices.return_value = self.devices1
-        self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
-                              IPTABLES_RAW_DEFAULT)
-
-        self.agent.prepare_devices_filter(['tap_port1'])
-        self.agent.remove_devices_filter(['tap_port1'])
-
-        self._verify_mock_calls()
-
-    def test_security_group_member_updated(self):
-        self.rpc.security_group_rules_for_devices.return_value = self.devices1
-        self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
-                              IPTABLES_RAW_DEFAULT)
-
-        self.agent.prepare_devices_filter(['tap_port1'])
-        self.rpc.security_group_rules_for_devices.return_value = self.devices2
-        self.agent.security_groups_member_updated(['security_group1'])
-        self.agent.prepare_devices_filter(['tap_port2'])
-        self.rpc.security_group_rules_for_devices.return_value = self.devices1
-        self.agent.security_groups_member_updated(['security_group1'])
-        self.agent.remove_devices_filter(['tap_port2'])
-        self.agent.remove_devices_filter(['tap_port1'])
-
-        self._verify_mock_calls()
-
-    def test_security_group_rule_updated(self):
-        self.rpc.security_group_rules_for_devices.return_value = self.devices2
-        self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEFAULT)
-
-        self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
-        self.rpc.security_group_rules_for_devices.return_value = self.devices3
-        self.agent.security_groups_rule_updated(['security_group1'])
-
-        self._verify_mock_calls()
-
-
-class TestSecurityGroupAgentEnhancedRpcWithIptables(
-    TestSecurityGroupAgentWithIptables):
-    def setUp(self, defer_refresh_firewall=False):
-        super(TestSecurityGroupAgentEnhancedRpcWithIptables, self).setUp(
-            defer_refresh_firewall=defer_refresh_firewall, test_rpc_v1_1=False)
-        self.sg_info = self.rpc.security_group_info_for_devices
-
-        rule1 = [{'direction': 'ingress',
-                  'protocol': const.PROTO_NAME_UDP,
-                  'ethertype': const.IPv4,
-                  'source_ip_prefix': '10.0.0.2/32',
-                  'source_port_range_min': 67,
-                  'source_port_range_max': 67,
-                  'port_range_min': 68,
-                  'port_range_max': 68},
-                 {'direction': 'ingress',
-                  'protocol': const.PROTO_NAME_TCP,
-                  'ethertype': const.IPv4,
-                  'port_range_min': 22,
-                  'port_range_max': 22},
-                 {'direction': 'egress',
-                  'ethertype': const.IPv4},
-                 {'direction': 'ingress',
-                  'remote_group_id': 'security_group1',
-                  'ethertype': const.IPv4}]
-        rule2 = rule1[:]
-        rule2 += [{'direction': 'ingress',
-                  'protocol': const.PROTO_NAME_ICMP,
-                  'ethertype': const.IPv4}]
-
-        devices_info1 = {'tap_port1': self._device('tap_port1',
-                                                   '10.0.0.3/32',
-                                                   '12:34:56:78:9a:bc',
-                                                   [])}
-        self.devices_info1 = {'security_groups': {'security_group1': rule1},
-                         'sg_member_ips': {
-                             'security_group1': {
-                                 'IPv4': ['10.0.0.3/32'], 'IPv6': []}},
-                         'devices': devices_info1}
-        devices_info2 = collections.OrderedDict([
-            ('tap_port1', self._device('tap_port1',
-                                       '10.0.0.3/32',
-                                       '12:34:56:78:9a:bc',
-                                       [])),
-            ('tap_port2', self._device('tap_port2',
-                                       '10.0.0.4/32',
-                                       '12:34:56:78:9a:bd',
-                                       []))
-        ])
-        self.devices_info2 = {'security_groups': {'security_group1': rule1},
-                         'sg_member_ips': {
-                             'security_group1': {
-                                 'IPv4': ['10.0.0.3/32', '10.0.0.4/32'],
-                                 'IPv6': []}},
-                         'devices': devices_info2}
-        self.devices_info3 = {'security_groups': {'security_group1': rule2},
-                         'sg_member_ips': {
-                             'security_group1': {
-                                 'IPv4': ['10.0.0.3/32', '10.0.0.4/32'],
-                                 'IPv6': []}},
-                         'devices': devices_info2}
-
-    def test_prepare_remove_port(self):
-        self.sg_info.return_value = self.devices_info1
-        self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
-                              IPTABLES_RAW_DEFAULT)
-
-        self.agent.prepare_devices_filter(['tap_port1'])
-        self.agent.remove_devices_filter(['tap_port1'])
-
-        self._verify_mock_calls()
-
-    def test_security_group_member_updated(self):
-        self.sg_info.return_value = self.devices_info1
-        self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
-                              IPTABLES_RAW_DEFAULT)
-
-        self.agent.prepare_devices_filter(['tap_port1'])
-        self.sg_info.return_value = self.devices_info2
-        self.agent.security_groups_member_updated(['security_group1'])
-        self.agent.prepare_devices_filter(['tap_port2'])
-        self.sg_info.return_value = self.devices_info1
-        self.agent.security_groups_member_updated(['security_group1'])
-        self.agent.remove_devices_filter(['tap_port2'])
-        self.agent.remove_devices_filter(['tap_port1'])
-
-        self._verify_mock_calls(True)
-        self.assertEqual(
-            2, self.agent.firewall.security_group_updated.call_count)
-
-    def test_security_group_rule_updated(self):
-        self.sg_info.return_value = self.devices_info2
-        self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEFAULT)
-
-        self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
-        self.sg_info.return_value = self.devices_info3
-        self.agent.security_groups_rule_updated(['security_group1'])
-
-        self._verify_mock_calls(True)
-        self.agent.firewall.security_group_updated.assert_called_with(
-            'sg_rule', set(['security_group1']))
-
-
-class TestSecurityGroupAgentEnhancedIpsetWithIptables(
-        TestSecurityGroupAgentEnhancedRpcWithIptables):
-    def setUp(self, defer_refresh_firewall=False):
-        super(TestSecurityGroupAgentEnhancedIpsetWithIptables, self).setUp(
-            defer_refresh_firewall)
-        self.agent.firewall.enable_ipset = True
-        self.ipset = self.agent.firewall.ipset
-        self.ipset_execute = mock.patch.object(self.ipset,
-                                               "execute").start()
-
-    def test_prepare_remove_port(self):
-        self.sg_info.return_value = self.devices_info1
-        self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
-                              IPTABLES_RAW_DEFAULT)
-
-        self.agent.prepare_devices_filter(['tap_port1'])
-        self.agent.remove_devices_filter(['tap_port1'])
-
-        self._verify_mock_calls()
-
-    def test_security_group_member_updated(self):
-        self.sg_info.return_value = self.devices_info1
-        self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
-                              IPTABLES_RAW_DEFAULT)
-
-        self.agent.prepare_devices_filter(['tap_port1'])
-        self.sg_info.return_value = self.devices_info2
-        self.agent.security_groups_member_updated(['security_group1'])
-        self.agent.prepare_devices_filter(['tap_port2'])
-        self.sg_info.return_value = self.devices_info1
-        self.agent.security_groups_member_updated(['security_group1'])
-        self.agent.remove_devices_filter(['tap_port2'])
-        self.agent.remove_devices_filter(['tap_port1'])
-
-        self._verify_mock_calls(True)
-        self.assertEqual(
-            2, self.agent.firewall.security_group_updated.call_count)
-
-    def test_security_group_rule_updated(self):
-        self.sg_info.return_value = self.devices_info2
-        self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEFAULT)
-        self._replay_iptables(IPSET_FILTER_2_3, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEFAULT)
-
-        self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
-        self.sg_info.return_value = self.devices_info3
-        self.agent.security_groups_rule_updated(['security_group1'])
-
-        self._verify_mock_calls(True)
-        self.agent.firewall.security_group_updated.assert_called_with(
-            'sg_rule', set(['security_group1']))
-
-
-class SGNotificationTestMixin(object):
-    def test_security_group_rule_updated(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            with self.security_group(name, description):
-                security_group_id = sg['security_group']['id']
-
-                rule = self._build_security_group_rule(
-                    security_group_id,
-                    direction='ingress',
-                    proto=const.PROTO_NAME_TCP)
-                security_group_rule = self._make_security_group_rule(self.fmt,
-                                                                     rule)
-                self._delete('security-group-rules',
-                             security_group_rule['security_group_rule']['id'])
-
-            self.notifier.assert_has_calls(
-                [mock.call.security_groups_rule_updated(mock.ANY,
-                                                        [security_group_id]),
-                 mock.call.security_groups_rule_updated(mock.ANY,
-                                                        [security_group_id])])
-
-    def test_security_group_member_updated(self):
-        with self.network() as n:
-            with self.subnet(n):
-                with self.security_group() as sg:
-                    security_group_id = sg['security_group']['id']
-                    res = self._create_port(self.fmt, n['network']['id'])
-                    port = self.deserialize(self.fmt, res)
-
-                    data = {'port': {'fixed_ips': port['port']['fixed_ips'],
-                                     'name': port['port']['name'],
-                                     ext_sg.SECURITYGROUPS:
-                                     [security_group_id]}}
-
-                    req = self.new_update_request('ports', data,
-                                                  port['port']['id'])
-                    res = self.deserialize(self.fmt,
-                                           req.get_response(self.api))
-                    self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
-                                     security_group_id)
-                    self._delete('ports', port['port']['id'])
-                    self.notifier.assert_has_calls(
-                        [mock.call.security_groups_member_updated(
-                            mock.ANY, [mock.ANY])])
-
-
-class TestSecurityGroupAgentWithOVSIptables(
-        TestSecurityGroupAgentWithIptables):
-
-    FIREWALL_DRIVER = FIREWALL_HYBRID_DRIVER
-
-    def setUp(self, defer_refresh_firewall=False, test_rpc_v1_1=True):
-        super(TestSecurityGroupAgentWithOVSIptables, self).setUp(
-                                                    defer_refresh_firewall,
-                                                    test_rpc_v1_1)
-
-    def _init_agent(self, defer_refresh_firewall):
-        fake_map = ovs_neutron_agent.LocalVLANMapping(1, 'network_type',
-                                                      'physical_network', 1)
-        local_vlan_map = {'fakenet': fake_map}
-        self.agent = sg_rpc.SecurityGroupAgentRpc(
-            context=None, plugin_rpc=self.rpc,
-            local_vlan_map=local_vlan_map,
-            defer_refresh_firewall=defer_refresh_firewall)
-        self._enforce_order_in_firewall(self.agent.firewall)
-
-    def test_prepare_remove_port(self):
-        self.rpc.security_group_rules_for_devices.return_value = self.devices1
-        self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEVICE_1)
-        self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
-                              IPTABLES_RAW_DEFAULT)
-
-        self.agent.prepare_devices_filter(['tap_port1'])
-        self.agent.remove_devices_filter(['tap_port1'])
-
-        self._verify_mock_calls()
-
-    def test_security_group_member_updated(self):
-        self.rpc.security_group_rules_for_devices.return_value = self.devices1
-        self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEVICE_1)
-        self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEVICE_1)
-        self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEVICE_2)
-        self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEVICE_2)
-        self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
-                              IPTABLES_RAW_DEVICE_1)
-        self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
-                              IPTABLES_RAW_DEFAULT)
-
-        self.agent.prepare_devices_filter(['tap_port1'])
-        self.rpc.security_group_rules_for_devices.return_value = self.devices2
-        self.agent.security_groups_member_updated(['security_group1'])
-        self.agent.prepare_devices_filter(['tap_port2'])
-        self.rpc.security_group_rules_for_devices.return_value = self.devices1
-        self.agent.security_groups_member_updated(['security_group1'])
-        self.agent.remove_devices_filter(['tap_port2'])
-        self.agent.remove_devices_filter(['tap_port1'])
-
-        self._verify_mock_calls()
-
-    def test_security_group_rule_updated(self):
-        self.rpc.security_group_rules_for_devices.return_value = self.devices2
-        self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEVICE_2)
-        self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2,
-                              IPTABLES_RAW_DEVICE_2)
-
-        self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
-        self.rpc.security_group_rules_for_devices.return_value = self.devices3
-        self.agent.security_groups_rule_updated(['security_group1'])
-
-        self._verify_mock_calls()
-
-    def _regex(self, value):
-        #Note(nati): tap is prefixed on the device
-        # in the OVSHybridIptablesFirewallDriver
-
-        value = value.replace('tap_port', 'taptap_port')
-        value = value.replace('qvbtaptap_port', 'qvbtap_port')
-        value = value.replace('o_port', 'otap_port')
-        value = value.replace('i_port', 'itap_port')
-        value = value.replace('s_port', 'stap_port')
-        return super(
-            TestSecurityGroupAgentWithOVSIptables,
-            self)._regex(value)
-
-
-class TestSecurityGroupExtensionControl(base.BaseTestCase):
-    def test_disable_security_group_extension_by_config(self):
-        set_enable_security_groups(False)
-        exp_aliases = ['dummy1', 'dummy2']
-        ext_aliases = ['dummy1', 'security-group', 'dummy2']
-        sg_rpc.disable_security_group_extension_by_config(ext_aliases)
-        self.assertEqual(ext_aliases, exp_aliases)
-
-    def test_enable_security_group_extension_by_config(self):
-        set_enable_security_groups(True)
-        exp_aliases = ['dummy1', 'security-group', 'dummy2']
-        ext_aliases = ['dummy1', 'security-group', 'dummy2']
-        sg_rpc.disable_security_group_extension_by_config(ext_aliases)
-        self.assertEqual(ext_aliases, exp_aliases)
-
-    def test_is_invalid_drvier_combination_sg_enabled(self):
-        set_enable_security_groups(True)
-        set_firewall_driver(FIREWALL_NOOP_DRIVER)
-        self.assertFalse(sg_rpc._is_valid_driver_combination())
-
-    def test_is_invalid_drvier_combination_sg_enabled_with_none(self):
-        set_enable_security_groups(True)
-        set_firewall_driver(None)
-        self.assertFalse(sg_rpc._is_valid_driver_combination())
-
-    def test_is_invalid_drvier_combination_sg_disabled(self):
-        set_enable_security_groups(False)
-        set_firewall_driver('NonNoopDriver')
-        self.assertFalse(sg_rpc._is_valid_driver_combination())
-
-    def test_is_valid_drvier_combination_sg_enabled(self):
-        set_enable_security_groups(True)
-        set_firewall_driver('NonNoopDriver')
-        self.assertTrue(sg_rpc._is_valid_driver_combination())
-
-    def test_is_valid_drvier_combination_sg_disabled(self):
-        set_enable_security_groups(False)
-        set_firewall_driver(FIREWALL_NOOP_DRIVER)
-        self.assertTrue(sg_rpc._is_valid_driver_combination())
-
-    def test_is_valid_drvier_combination_sg_disabled_with_none(self):
-        set_enable_security_groups(False)
-        set_firewall_driver(None)
-        self.assertTrue(sg_rpc._is_valid_driver_combination())
diff --git a/neutron/tests/unit/api/__init__.py b/neutron/tests/unit/api/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/api/rpc/__init__.py b/neutron/tests/unit/api/rpc/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/api/rpc/agentnotifiers/__init__.py b/neutron/tests/unit/api/rpc/agentnotifiers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py b/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py
deleted file mode 100644 (file)
index cd2ab2a..0000000
+++ /dev/null
@@ -1,177 +0,0 @@
-# Copyright (c) 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import datetime
-import mock
-
-from oslo_utils import timeutils
-
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.common import utils
-from neutron.db import agents_db
-from neutron.db.agentschedulers_db import cfg
-from neutron.tests import base
-
-
-class TestDhcpAgentNotifyAPI(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestDhcpAgentNotifyAPI, self).setUp()
-        self.notifier = (
-            dhcp_rpc_agent_api.DhcpAgentNotifyAPI(plugin=mock.Mock()))
-
-        mock_util_p = mock.patch.object(utils, 'is_extension_supported')
-        mock_log_p = mock.patch.object(dhcp_rpc_agent_api, 'LOG')
-        mock_fanout_p = mock.patch.object(self.notifier, '_fanout_message')
-        mock_cast_p = mock.patch.object(self.notifier, '_cast_message')
-        self.mock_util = mock_util_p.start()
-        self.mock_log = mock_log_p.start()
-        self.mock_fanout = mock_fanout_p.start()
-        self.mock_cast = mock_cast_p.start()
-
-    def _test__schedule_network(self, network,
-                                new_agents=None, existing_agents=None,
-                                expected_casts=0, expected_warnings=0):
-        self.notifier.plugin.schedule_network.return_value = new_agents
-        agents = self.notifier._schedule_network(
-            mock.ANY, network, existing_agents)
-        if new_agents is None:
-            new_agents = []
-        self.assertEqual(new_agents + existing_agents, agents)
-        self.assertEqual(expected_casts, self.mock_cast.call_count)
-        self.assertEqual(expected_warnings, self.mock_log.warn.call_count)
-
-    def test__schedule_network(self):
-        agent = agents_db.Agent()
-        agent.admin_state_up = True
-        agent.heartbeat_timestamp = timeutils.utcnow()
-        network = {'id': 'foo_net_id'}
-        self._test__schedule_network(network,
-                                     new_agents=[agent], existing_agents=[],
-                                     expected_casts=1, expected_warnings=0)
-
-    def test__schedule_network_no_existing_agents(self):
-        agent = agents_db.Agent()
-        agent.admin_state_up = True
-        agent.heartbeat_timestamp = timeutils.utcnow()
-        network = {'id': 'foo_net_id'}
-        self._test__schedule_network(network,
-                                     new_agents=None, existing_agents=[agent],
-                                     expected_casts=0, expected_warnings=0)
-
-    def test__schedule_network_no_new_agents(self):
-        network = {'id': 'foo_net_id'}
-        self._test__schedule_network(network,
-                                     new_agents=None, existing_agents=[],
-                                     expected_casts=0, expected_warnings=1)
-
-    def _test__get_enabled_agents(self, network,
-                                  agents=None, port_count=0,
-                                  expected_warnings=0, expected_errors=0):
-        self.notifier.plugin.get_ports_count.return_value = port_count
-        enabled_agents = self.notifier._get_enabled_agents(
-            mock.ANY, network, agents, mock.ANY, mock.ANY)
-        if not cfg.CONF.enable_services_on_agents_with_admin_state_down:
-            agents = [x for x in agents if x.admin_state_up]
-        self.assertEqual(agents, enabled_agents)
-        self.assertEqual(expected_warnings, self.mock_log.warn.call_count)
-        self.assertEqual(expected_errors, self.mock_log.error.call_count)
-
-    def test__get_enabled_agents(self):
-        agent1 = agents_db.Agent()
-        agent1.admin_state_up = True
-        agent1.heartbeat_timestamp = timeutils.utcnow()
-        agent2 = agents_db.Agent()
-        agent2.admin_state_up = False
-        agent2.heartbeat_timestamp = timeutils.utcnow()
-        network = {'id': 'foo_network_id'}
-        self._test__get_enabled_agents(network, agents=[agent1])
-
-    def test__get_enabled_agents_with_inactive_ones(self):
-        agent1 = agents_db.Agent()
-        agent1.admin_state_up = True
-        agent1.heartbeat_timestamp = timeutils.utcnow()
-        agent2 = agents_db.Agent()
-        agent2.admin_state_up = True
-        # This is effectively an inactive agent
-        agent2.heartbeat_timestamp = datetime.datetime(2000, 1, 1, 0, 0)
-        network = {'id': 'foo_network_id'}
-        self._test__get_enabled_agents(network,
-                                       agents=[agent1, agent2],
-                                       expected_warnings=1, expected_errors=0)
-
-    def test__get_enabled_agents_with_notification_required(self):
-        network = {'id': 'foo_network_id', 'subnets': ['foo_subnet_id']}
-        agent = agents_db.Agent()
-        agent.admin_state_up = False
-        agent.heartbeat_timestamp = timeutils.utcnow()
-        self._test__get_enabled_agents(network, [agent], port_count=20,
-                                       expected_warnings=0, expected_errors=1)
-
-    def test__get_enabled_agents_with_admin_state_down(self):
-        cfg.CONF.set_override(
-            'enable_services_on_agents_with_admin_state_down', True)
-        agent1 = agents_db.Agent()
-        agent1.admin_state_up = True
-        agent1.heartbeat_timestamp = timeutils.utcnow()
-        agent2 = agents_db.Agent()
-        agent2.admin_state_up = False
-        agent2.heartbeat_timestamp = timeutils.utcnow()
-        network = {'id': 'foo_network_id'}
-        self._test__get_enabled_agents(network, agents=[agent1, agent2])
-
-    def test__notify_agents_fanout_required(self):
-        self.notifier._notify_agents(mock.ANY,
-                                     'network_delete_end',
-                                     mock.ANY, 'foo_network_id')
-        self.assertEqual(1, self.mock_fanout.call_count)
-
-    def _test__notify_agents(self, method,
-                             expected_scheduling=0, expected_casts=0):
-        with mock.patch.object(self.notifier, '_schedule_network') as f:
-            with mock.patch.object(self.notifier, '_get_enabled_agents') as g:
-                agent = agents_db.Agent()
-                agent.admin_state_up = True
-                agent.heartbeat_timestamp = timeutils.utcnow()
-                g.return_value = [agent]
-                dummy_payload = {'port': {}}
-                self.notifier._notify_agents(mock.Mock(), method,
-                                             dummy_payload, 'foo_network_id')
-                self.assertEqual(expected_scheduling, f.call_count)
-                self.assertEqual(expected_casts, self.mock_cast.call_count)
-
-    def test__notify_agents_cast_required_with_scheduling(self):
-        self._test__notify_agents('port_create_end',
-                                  expected_scheduling=1, expected_casts=1)
-
-    def test__notify_agents_cast_required_wo_scheduling_on_port_update(self):
-        self._test__notify_agents('port_update_end',
-                                  expected_scheduling=0, expected_casts=1)
-
-    def test__notify_agents_cast_required_with_scheduling_subnet_create(self):
-        self._test__notify_agents('subnet_create_end',
-                                  expected_scheduling=1, expected_casts=1)
-
-    def test__notify_agents_no_action(self):
-        self._test__notify_agents('network_create_end',
-                                  expected_scheduling=0, expected_casts=0)
-
-    def test__fanout_message(self):
-        self.notifier._fanout_message(mock.ANY, mock.ANY, mock.ANY)
-        self.assertEqual(1, self.mock_fanout.call_count)
-
-    def test__cast_message(self):
-        self.notifier._cast_message(mock.ANY, mock.ANY, mock.ANY)
-        self.assertEqual(1, self.mock_cast.call_count)
diff --git a/neutron/tests/unit/api/rpc/callbacks/__init__.py b/neutron/tests/unit/api/rpc/callbacks/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/api/rpc/callbacks/consumer/__init__.py b/neutron/tests/unit/api/rpc/callbacks/consumer/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py b/neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py
deleted file mode 100644 (file)
index d07b49c..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.api.rpc.callbacks.consumer import registry
-from neutron.tests import base
-
-
-class ConsumerRegistryTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(ConsumerRegistryTestCase, self).setUp()
-
-    def test__get_manager_is_singleton(self):
-        self.assertIs(registry._get_manager(), registry._get_manager())
-
-    @mock.patch.object(registry, '_get_manager')
-    def test_subscribe(self, manager_mock):
-        callback = lambda: None
-        registry.subscribe(callback, 'TYPE')
-        manager_mock().register.assert_called_with(callback, 'TYPE')
-
-    @mock.patch.object(registry, '_get_manager')
-    def test_unsubscribe(self, manager_mock):
-        callback = lambda: None
-        registry.unsubscribe(callback, 'TYPE')
-        manager_mock().unregister.assert_called_with(callback, 'TYPE')
-
-    @mock.patch.object(registry, '_get_manager')
-    def test_clear(self, manager_mock):
-        registry.clear()
-        manager_mock().clear.assert_called_with()
-
-    @mock.patch.object(registry, '_get_manager')
-    def test_push(self, manager_mock):
-        resource_type_ = object()
-        resource_ = object()
-        event_type_ = object()
-
-        callback1 = mock.Mock()
-        callback2 = mock.Mock()
-        callbacks = {callback1, callback2}
-        manager_mock().get_callbacks.return_value = callbacks
-        registry.push(resource_type_, resource_, event_type_)
-        for callback in callbacks:
-            callback.assert_called_with(resource_, event_type_)
diff --git a/neutron/tests/unit/api/rpc/callbacks/producer/__init__.py b/neutron/tests/unit/api/rpc/callbacks/producer/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/api/rpc/callbacks/producer/test_registry.py b/neutron/tests/unit/api/rpc/callbacks/producer/test_registry.py
deleted file mode 100644 (file)
index 5b7b049..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api.rpc.callbacks import exceptions
-from neutron.api.rpc.callbacks.producer import registry
-from neutron.api.rpc.callbacks import resources
-from neutron.objects.qos import policy
-from neutron.tests.unit.services.qos import base
-
-
-class ProducerRegistryTestCase(base.BaseQosTestCase):
-
-    def test_pull_returns_callback_result(self):
-        policy_obj = policy.QosPolicy(context=None)
-
-        def _fake_policy_cb(*args, **kwargs):
-            return policy_obj
-
-        registry.provide(_fake_policy_cb, resources.QOS_POLICY)
-
-        self.assertEqual(
-            policy_obj,
-            registry.pull(resources.QOS_POLICY, 'fake_id'))
-
-    def test_pull_does_not_raise_on_none(self):
-        def _none_cb(*args, **kwargs):
-            pass
-
-        registry.provide(_none_cb, resources.QOS_POLICY)
-
-        obj = registry.pull(resources.QOS_POLICY, 'fake_id')
-        self.assertIsNone(obj)
-
-    def test_pull_raises_on_wrong_object_type(self):
-        def _wrong_type_cb(*args, **kwargs):
-            return object()
-
-        registry.provide(_wrong_type_cb, resources.QOS_POLICY)
-
-        self.assertRaises(
-            exceptions.CallbackWrongResourceType,
-            registry.pull, resources.QOS_POLICY, 'fake_id')
-
-    def test_pull_raises_on_callback_not_found(self):
-        self.assertRaises(
-            exceptions.CallbackNotFound,
-            registry.pull, resources.QOS_POLICY, 'fake_id')
-
-    def test__get_manager_is_singleton(self):
-        self.assertIs(registry._get_manager(), registry._get_manager())
-
-    def test_unprovide(self):
-        def _fake_policy_cb(*args, **kwargs):
-            pass
-
-        registry.provide(_fake_policy_cb, resources.QOS_POLICY)
-        registry.unprovide(_fake_policy_cb, resources.QOS_POLICY)
-
-        self.assertRaises(
-            exceptions.CallbackNotFound,
-            registry.pull, resources.QOS_POLICY, 'fake_id')
-
-    def test_clear_unprovides_all_producers(self):
-        def _fake_policy_cb(*args, **kwargs):
-            pass
-
-        registry.provide(_fake_policy_cb, resources.QOS_POLICY)
-        registry.clear()
-
-        self.assertRaises(
-            exceptions.CallbackNotFound,
-            registry.pull, resources.QOS_POLICY, 'fake_id')
diff --git a/neutron/tests/unit/api/rpc/callbacks/test_resource_manager.py b/neutron/tests/unit/api/rpc/callbacks/test_resource_manager.py
deleted file mode 100644 (file)
index 79d5ed5..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.api.rpc.callbacks import exceptions as rpc_exc
-from neutron.api.rpc.callbacks import resource_manager
-from neutron.callbacks import exceptions as exceptions
-from neutron.tests.unit.services.qos import base
-
-IS_VALID_RESOURCE_TYPE = (
-    'neutron.api.rpc.callbacks.resources.is_valid_resource_type')
-
-
-class ResourceCallbacksManagerTestCaseMixin(object):
-
-    def test_register_fails_on_invalid_type(self):
-        self.assertRaises(
-            exceptions.Invalid,
-            self.mgr.register, lambda: None, 'TYPE')
-
-    @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True)
-    def test_clear_unregisters_all_callbacks(self, *mocks):
-        self.mgr.register(lambda: None, 'TYPE1')
-        self.mgr.register(lambda: None, 'TYPE2')
-        self.mgr.clear()
-        self.assertEqual([], self.mgr.get_subscribed_types())
-
-    def test_unregister_fails_on_invalid_type(self):
-        self.assertRaises(
-            exceptions.Invalid,
-            self.mgr.unregister, lambda: None, 'TYPE')
-
-    @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True)
-    def test_unregister_fails_on_unregistered_callback(self, *mocks):
-        self.assertRaises(
-            rpc_exc.CallbackNotFound,
-            self.mgr.unregister, lambda: None, 'TYPE')
-
-    @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True)
-    def test_unregister_unregisters_callback(self, *mocks):
-        callback = lambda: None
-        self.mgr.register(callback, 'TYPE')
-        self.mgr.unregister(callback, 'TYPE')
-        self.assertEqual([], self.mgr.get_subscribed_types())
-
-    @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True)
-    def test___init___does_not_reset_callbacks(self, *mocks):
-        callback = lambda: None
-        self.mgr.register(callback, 'TYPE')
-        resource_manager.ProducerResourceCallbacksManager()
-        self.assertEqual(['TYPE'], self.mgr.get_subscribed_types())
-
-
-class ProducerResourceCallbacksManagerTestCase(
-    base.BaseQosTestCase, ResourceCallbacksManagerTestCaseMixin):
-
-    def setUp(self):
-        super(ProducerResourceCallbacksManagerTestCase, self).setUp()
-        self.mgr = self.prod_mgr
-
-    @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True)
-    def test_register_registers_callback(self, *mocks):
-        callback = lambda: None
-        self.mgr.register(callback, 'TYPE')
-        self.assertEqual(callback, self.mgr.get_callback('TYPE'))
-
-    @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True)
-    def test_register_fails_on_multiple_calls(self, *mocks):
-        self.mgr.register(lambda: None, 'TYPE')
-        self.assertRaises(
-            rpc_exc.CallbacksMaxLimitReached,
-            self.mgr.register, lambda: None, 'TYPE')
-
-    def test_get_callback_fails_on_invalid_type(self):
-        self.assertRaises(
-            exceptions.Invalid,
-            self.mgr.get_callback, 'TYPE')
-
-    @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True)
-    def test_get_callback_fails_on_unregistered_callback(
-            self, *mocks):
-        self.assertRaises(
-            rpc_exc.CallbackNotFound,
-            self.mgr.get_callback, 'TYPE')
-
-    @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True)
-    def test_get_callback_returns_proper_callback(self, *mocks):
-        callback1 = lambda: None
-        callback2 = lambda: None
-        self.mgr.register(callback1, 'TYPE1')
-        self.mgr.register(callback2, 'TYPE2')
-        self.assertEqual(callback1, self.mgr.get_callback('TYPE1'))
-        self.assertEqual(callback2, self.mgr.get_callback('TYPE2'))
-
-
-class ConsumerResourceCallbacksManagerTestCase(
-    base.BaseQosTestCase, ResourceCallbacksManagerTestCaseMixin):
-
-    def setUp(self):
-        super(ConsumerResourceCallbacksManagerTestCase, self).setUp()
-        self.mgr = self.cons_mgr
-
-    @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True)
-    def test_register_registers_callback(self, *mocks):
-        callback = lambda: None
-        self.mgr.register(callback, 'TYPE')
-        self.assertEqual({callback}, self.mgr.get_callbacks('TYPE'))
-
-    @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True)
-    def test_register_succeeds_on_multiple_calls(self, *mocks):
-        callback1 = lambda: None
-        callback2 = lambda: None
-        self.mgr.register(callback1, 'TYPE')
-        self.mgr.register(callback2, 'TYPE')
-
-    @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True)
-    def test_get_callbacks_fails_on_unregistered_callback(
-        self, *mocks):
-        self.assertRaises(
-            rpc_exc.CallbackNotFound,
-            self.mgr.get_callbacks, 'TYPE')
-
-    @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True)
-    def test_get_callbacks_returns_proper_callbacks(self, *mocks):
-        callback1 = lambda: None
-        callback2 = lambda: None
-        self.mgr.register(callback1, 'TYPE1')
-        self.mgr.register(callback2, 'TYPE2')
-        self.assertEqual(set([callback1]), self.mgr.get_callbacks('TYPE1'))
-        self.assertEqual(set([callback2]), self.mgr.get_callbacks('TYPE2'))
diff --git a/neutron/tests/unit/api/rpc/callbacks/test_resources.py b/neutron/tests/unit/api/rpc/callbacks/test_resources.py
deleted file mode 100644 (file)
index 78d8e5d..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api.rpc.callbacks import resources
-from neutron.objects.qos import policy
-from neutron.tests import base
-
-
-class GetResourceTypeTestCase(base.BaseTestCase):
-
-    def test_get_resource_type_none(self):
-        self.assertIsNone(resources.get_resource_type(None))
-
-    def test_get_resource_type_wrong_type(self):
-        self.assertIsNone(resources.get_resource_type(object()))
-
-    def test_get_resource_type(self):
-        # we could use any other registered NeutronObject type here
-        self.assertEqual(policy.QosPolicy.obj_name(),
-                         resources.get_resource_type(policy.QosPolicy()))
-
-
-class IsValidResourceTypeTestCase(base.BaseTestCase):
-
-    def test_known_type(self):
-        # it could be any other NeutronObject, assuming it's known to RPC
-        # callbacks
-        self.assertTrue(resources.is_valid_resource_type(
-            policy.QosPolicy.obj_name()))
-
-    def test_unknown_type(self):
-        self.assertFalse(
-            resources.is_valid_resource_type('unknown-resource-type'))
-
-
-class GetResourceClsTestCase(base.BaseTestCase):
-
-    def test_known_type(self):
-        # it could be any other NeutronObject, assuming it's known to RPC
-        # callbacks
-        self.assertEqual(policy.QosPolicy,
-                         resources.get_resource_cls(resources.QOS_POLICY))
-
-    def test_unknown_type(self):
-        self.assertIsNone(resources.get_resource_cls('unknown-resource-type'))
diff --git a/neutron/tests/unit/api/rpc/handlers/__init__.py b/neutron/tests/unit/api/rpc/handlers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py
deleted file mode 100644 (file)
index 1786b73..0000000
+++ /dev/null
@@ -1,251 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-from oslo_db import exception as db_exc
-
-from neutron.api.rpc.handlers import dhcp_rpc
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import utils
-from neutron.extensions import portbindings
-from neutron.tests import base
-
-
-class TestDhcpRpcCallback(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestDhcpRpcCallback, self).setUp()
-        self.plugin_p = mock.patch('neutron.manager.NeutronManager.get_plugin')
-        get_plugin = self.plugin_p.start()
-        self.plugin = mock.MagicMock()
-        get_plugin.return_value = self.plugin
-        self.callbacks = dhcp_rpc.DhcpRpcCallback()
-        self.log_p = mock.patch('neutron.api.rpc.handlers.dhcp_rpc.LOG')
-        self.log = self.log_p.start()
-        set_dirty_p = mock.patch('neutron.quota.resource_registry.'
-                                 'set_resources_dirty')
-        self.mock_set_dirty = set_dirty_p.start()
-        self.utils_p = mock.patch('neutron.plugins.common.utils.create_port')
-        self.utils = self.utils_p.start()
-
-    def test_get_active_networks(self):
-        plugin_retval = [dict(id='a'), dict(id='b')]
-        self.plugin.get_networks.return_value = plugin_retval
-
-        networks = self.callbacks.get_active_networks(mock.Mock(), host='host')
-
-        self.assertEqual(networks, ['a', 'b'])
-        self.plugin.assert_has_calls(
-            [mock.call.get_networks(mock.ANY,
-                                    filters=dict(admin_state_up=[True]))])
-
-        self.assertEqual(len(self.log.mock_calls), 1)
-
-    def test_group_by_network_id(self):
-        port1 = {'network_id': 'a'}
-        port2 = {'network_id': 'b'}
-        port3 = {'network_id': 'a'}
-        grouped_ports = self.callbacks._group_by_network_id(
-                                                        [port1, port2, port3])
-        expected = {'a': [port1, port3], 'b': [port2]}
-        self.assertEqual(expected, grouped_ports)
-
-    def test_get_active_networks_info(self):
-        plugin_retval = [{'id': 'a'}, {'id': 'b'}]
-        self.plugin.get_networks.return_value = plugin_retval
-        port = {'network_id': 'a'}
-        subnet = {'network_id': 'b'}
-        self.plugin.get_ports.return_value = [port]
-        self.plugin.get_subnets.return_value = [subnet]
-        networks = self.callbacks.get_active_networks_info(mock.Mock(),
-                                                           host='host')
-        expected = [{'id': 'a', 'subnets': [], 'ports': [port]},
-                    {'id': 'b', 'subnets': [subnet], 'ports': []}]
-        self.assertEqual(expected, networks)
-
-    def _test__port_action_with_failures(self, exc=None, action=None):
-        port = {
-            'network_id': 'foo_network_id',
-            'device_owner': constants.DEVICE_OWNER_DHCP,
-            'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
-        }
-        self.plugin.create_port.side_effect = exc
-        self.utils.side_effect = exc
-        self.assertIsNone(self.callbacks._port_action(self.plugin,
-                                                      mock.Mock(),
-                                                      {'port': port},
-                                                      action))
-
-    def _test__port_action_good_action(self, action, port, expected_call):
-        self.callbacks._port_action(self.plugin, mock.Mock(),
-                                    port, action)
-        if action == 'create_port':
-            self.utils.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
-        else:
-            self.plugin.assert_has_calls([expected_call])
-
-    def test_port_action_create_port(self):
-        self._test__port_action_good_action(
-            'create_port', mock.Mock(),
-            mock.call.create_port(mock.ANY, mock.ANY))
-
-    def test_port_action_update_port(self):
-        fake_port = {'id': 'foo_port_id', 'port': mock.Mock()}
-        self._test__port_action_good_action(
-            'update_port', fake_port,
-            mock.call.update_port(mock.ANY, 'foo_port_id', mock.ANY))
-
-    def test__port_action_bad_action(self):
-        self.assertRaises(
-            n_exc.Invalid,
-            self._test__port_action_with_failures,
-            exc=None,
-            action='foo_action')
-
-    def test_create_port_catch_network_not_found(self):
-        self._test__port_action_with_failures(
-            exc=n_exc.NetworkNotFound(net_id='foo_network_id'),
-            action='create_port')
-
-    def test_create_port_catch_subnet_not_found(self):
-        self._test__port_action_with_failures(
-            exc=n_exc.SubnetNotFound(subnet_id='foo_subnet_id'),
-            action='create_port')
-
-    def test_create_port_catch_db_error(self):
-        self._test__port_action_with_failures(exc=db_exc.DBError(),
-                                              action='create_port')
-
-    def test_create_port_catch_ip_generation_failure_reraise(self):
-        self.assertRaises(
-            n_exc.IpAddressGenerationFailure,
-            self._test__port_action_with_failures,
-            exc=n_exc.IpAddressGenerationFailure(net_id='foo_network_id'),
-            action='create_port')
-
-    def test_create_port_catch_and_handle_ip_generation_failure(self):
-        self.plugin.get_subnet.side_effect = (
-            n_exc.SubnetNotFound(subnet_id='foo_subnet_id'))
-        self._test__port_action_with_failures(
-            exc=n_exc.IpAddressGenerationFailure(net_id='foo_network_id'),
-            action='create_port')
-
-    def test_get_network_info_return_none_on_not_found(self):
-        self.plugin.get_network.side_effect = n_exc.NetworkNotFound(net_id='a')
-        retval = self.callbacks.get_network_info(mock.Mock(), network_id='a')
-        self.assertIsNone(retval)
-
-    def test_get_network_info(self):
-        network_retval = dict(id='a')
-
-        subnet_retval = mock.Mock()
-        port_retval = mock.Mock()
-
-        self.plugin.get_network.return_value = network_retval
-        self.plugin.get_subnets.return_value = subnet_retval
-        self.plugin.get_ports.return_value = port_retval
-
-        retval = self.callbacks.get_network_info(mock.Mock(), network_id='a')
-        self.assertEqual(retval, network_retval)
-        self.assertEqual(retval['subnets'], subnet_retval)
-        self.assertEqual(retval['ports'], port_retval)
-
-    def test_update_dhcp_port_verify_port_action_port_dict(self):
-        port = {'port': {'network_id': 'foo_network_id',
-                         'device_owner': constants.DEVICE_OWNER_DHCP,
-                         'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]}
-                }
-        expected_port = {'port': {'network_id': 'foo_network_id',
-                                  'device_owner': constants.DEVICE_OWNER_DHCP,
-                                  portbindings.HOST_ID: 'foo_host',
-                                  'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
-                                  },
-                         'id': 'foo_port_id'
-                         }
-
-        def _fake_port_action(plugin, context, port, action):
-            self.assertEqual(expected_port, port)
-
-        self.plugin.get_port.return_value = {
-            'device_id': constants.DEVICE_ID_RESERVED_DHCP_PORT}
-        self.callbacks._port_action = _fake_port_action
-        self.callbacks.update_dhcp_port(mock.Mock(),
-                                        host='foo_host',
-                                        port_id='foo_port_id',
-                                        port=port)
-
-    def test_update_reserved_dhcp_port(self):
-        port = {'port': {'network_id': 'foo_network_id',
-                         'device_owner': constants.DEVICE_OWNER_DHCP,
-                         'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]}
-                }
-        expected_port = {'port': {'network_id': 'foo_network_id',
-                                  'device_owner': constants.DEVICE_OWNER_DHCP,
-                                  portbindings.HOST_ID: 'foo_host',
-                                  'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
-                                  },
-                         'id': 'foo_port_id'
-                         }
-
-        def _fake_port_action(plugin, context, port, action):
-            self.assertEqual(expected_port, port)
-
-        self.plugin.get_port.return_value = {
-            'device_id': utils.get_dhcp_agent_device_id('foo_network_id',
-                                                        'foo_host')}
-        self.callbacks._port_action = _fake_port_action
-        self.callbacks.update_dhcp_port(
-            mock.Mock(), host='foo_host', port_id='foo_port_id', port=port)
-
-        self.plugin.get_port.return_value = {
-            'device_id': 'other_id'}
-        self.assertRaises(n_exc.DhcpPortInUse,
-                          self.callbacks.update_dhcp_port,
-                          mock.Mock(),
-                          host='foo_host',
-                          port_id='foo_port_id',
-                          port=port)
-
-    def test_update_dhcp_port(self):
-        port = {'port': {'network_id': 'foo_network_id',
-                         'device_owner': constants.DEVICE_OWNER_DHCP,
-                         'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]}
-                }
-        expected_port = {'port': {'network_id': 'foo_network_id',
-                                  'device_owner': constants.DEVICE_OWNER_DHCP,
-                                  portbindings.HOST_ID: 'foo_host',
-                                  'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
-                                  },
-                         'id': 'foo_port_id'
-                         }
-        self.plugin.get_port.return_value = {
-            'device_id': constants.DEVICE_ID_RESERVED_DHCP_PORT}
-        self.callbacks.update_dhcp_port(mock.Mock(),
-                                        host='foo_host',
-                                        port_id='foo_port_id',
-                                        port=port)
-        self.plugin.assert_has_calls([
-            mock.call.update_port(mock.ANY, 'foo_port_id', expected_port)])
-
-    def test_release_dhcp_port(self):
-        port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')])
-        self.plugin.get_ports.return_value = [port_retval]
-
-        self.callbacks.release_dhcp_port(mock.ANY, network_id='netid',
-                                         device_id='devid')
-
-        self.plugin.assert_has_calls([
-            mock.call.delete_ports_by_device_id(mock.ANY, 'devid', 'netid')])
diff --git a/neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py
deleted file mode 100644 (file)
index 0931604..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-from neutron.api.rpc.handlers import dvr_rpc
-from neutron.tests import base
-
-
-class DVRServerRpcApiTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        self.client_p = mock.patch.object(dvr_rpc.n_rpc, "get_client")
-        self.client = self.client_p.start()
-        self.rpc = dvr_rpc.DVRServerRpcApi('fake_topic')
-        self.mock_cctxt = self.rpc.client.prepare.return_value
-        self.ctxt = mock.ANY
-        super(DVRServerRpcApiTestCase, self).setUp()
-
-    def test_get_dvr_mac_address_by_host(self):
-        self.rpc.get_dvr_mac_address_by_host(self.ctxt, 'foo_host')
-        self.mock_cctxt.call.assert_called_with(
-            self.ctxt, 'get_dvr_mac_address_by_host', host='foo_host')
-
-    def test_get_dvr_mac_address_list(self):
-        self.rpc.get_dvr_mac_address_list(self.ctxt)
-        self.mock_cctxt.call.assert_called_with(
-            self.ctxt, 'get_dvr_mac_address_list')
-
-    def test_get_ports_on_host_by_subnet(self):
-        self.rpc.get_ports_on_host_by_subnet(
-            self.ctxt, 'foo_host', 'foo_subnet')
-        self.mock_cctxt.call.assert_called_with(
-            self.ctxt, 'get_ports_on_host_by_subnet',
-            host='foo_host', subnet='foo_subnet')
-
-    def test_get_subnet_for_dvr(self):
-        self.rpc.get_subnet_for_dvr(
-            self.ctxt, 'foo_subnet', fixed_ips='foo_fixed_ips')
-        self.mock_cctxt.call.assert_called_with(
-            self.ctxt, 'get_subnet_for_dvr',
-            subnet='foo_subnet',
-            fixed_ips='foo_fixed_ips')
diff --git a/neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py
deleted file mode 100644 (file)
index eac6d97..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright (c) 2015 Cisco Systems
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from oslo_config import cfg
-
-from neutron.api.rpc.handlers import l3_rpc
-from neutron.common import constants
-from neutron import context
-from neutron import manager
-from neutron.tests.unit.db import test_db_base_plugin_v2
-from neutron.tests.unit import testlib_api
-
-
-class TestL3RpcCallback(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(TestL3RpcCallback, self).setUp()
-        self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS)
-        self.plugin = manager.NeutronManager.get_plugin()
-        self.ctx = context.get_admin_context()
-        cfg.CONF.set_override('ipv6_pd_enabled', True)
-        self.callbacks = l3_rpc.L3RpcCallback()
-        self.network = self._prepare_network()
-
-    def _prepare_network(self):
-        network = {'network': {'name': 'abc',
-                               'shared': False,
-                               'tenant_id': 'tenant_id',
-                               'admin_state_up': True}}
-        return self.plugin.create_network(self.ctx, network)
-
-    def _prepare_ipv6_pd_subnet(self):
-        subnet = {'subnet': {'network_id': self.network['id'],
-                             'tenant_id': 'tenant_id',
-                             'cidr': None,
-                             'ip_version': 6,
-                             'name': 'ipv6_pd',
-                             'enable_dhcp': True,
-                             'host_routes': None,
-                             'dns_nameservers': None,
-                             'allocation_pools': None,
-                             'ipv6_ra_mode': constants.IPV6_SLAAC,
-                             'ipv6_address_mode': constants.IPV6_SLAAC}}
-        return self.plugin.create_subnet(self.ctx, subnet)
-
-    def test_process_prefix_update(self):
-        subnet = self._prepare_ipv6_pd_subnet()
-        data = {subnet['id']: '2001:db8::/64'}
-        allocation_pools = [{'start': '2001:db8::2',
-                             'end': '2001:db8::ffff:ffff:ffff:ffff'}]
-        res = self.callbacks.process_prefix_update(self.ctx, subnets=data)
-        updated_subnet = res[0]
-        self.assertEqual(updated_subnet['cidr'], data[subnet['id']])
-        self.assertEqual(updated_subnet['allocation_pools'], allocation_pools)
diff --git a/neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py
deleted file mode 100755 (executable)
index bd32fe0..0000000
+++ /dev/null
@@ -1,232 +0,0 @@
-# Copyright (c) 2015 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import copy
-
-import mock
-from oslo_versionedobjects import base as obj_base
-from oslo_versionedobjects import fields as obj_fields
-import testtools
-
-from neutron.api.rpc.callbacks import resources
-from neutron.api.rpc.handlers import resources_rpc
-from neutron.common import topics
-from neutron import context
-from neutron.objects import base as objects_base
-from neutron.tests import base
-
-
-def _create_test_dict():
-    return {'id': 'uuid',
-            'field': 'foo'}
-
-
-def _create_test_resource(context=None):
-    resource_dict = _create_test_dict()
-    resource = FakeResource(context, **resource_dict)
-    resource.obj_reset_changes()
-    return resource
-
-
-class FakeResource(objects_base.NeutronObject):
-    # Version 1.0: Initial version
-    VERSION = '1.0'
-
-    fields = {
-        'id': obj_fields.UUIDField(),
-        'field': obj_fields.StringField()
-    }
-
-    @classmethod
-    def get_objects(cls, context, **kwargs):
-        return list()
-
-
-class ResourcesRpcBaseTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(ResourcesRpcBaseTestCase, self).setUp()
-
-        # TODO(mhickey) This is using temp registry pattern. The
-        # pattern solution is to backup the object registry, register
-        # a class locally, and then restore the original registry.
-        # Refer to https://review.openstack.org/#/c/263800/ for more
-        # details. This code should be updated when the patch is merged.
-        self._base_test_backup = copy.copy(
-            obj_base.VersionedObjectRegistry._registry._obj_classes)
-        self.addCleanup(self._restore_obj_registry)
-
-        self.context = context.get_admin_context()
-
-    def _restore_obj_registry(self):
-        obj_base.VersionedObjectRegistry._registry._obj_classes = (
-            self._base_test_backup)
-
-
-class _ValidateResourceTypeTestCase(base.BaseTestCase):
-    def setUp(self):
-        super(_ValidateResourceTypeTestCase, self).setUp()
-        self.is_valid_mock = mock.patch.object(
-            resources_rpc.resources, 'is_valid_resource_type').start()
-
-    def test_valid_type(self):
-        self.is_valid_mock.return_value = True
-        resources_rpc._validate_resource_type('foo')
-
-    def test_invalid_type(self):
-        self.is_valid_mock.return_value = False
-        with testtools.ExpectedException(
-                resources_rpc.InvalidResourceTypeClass):
-            resources_rpc._validate_resource_type('foo')
-
-
-class _ResourceTypeVersionedTopicTestCase(base.BaseTestCase):
-
-    @mock.patch.object(resources_rpc, '_validate_resource_type')
-    def test_resource_type_versioned_topic(self, validate_mock):
-        obj_name = FakeResource.obj_name()
-        expected = topics.RESOURCE_TOPIC_PATTERN % {
-            'resource_type': 'FakeResource', 'version': '1.0'}
-        with mock.patch.object(resources_rpc.resources, 'get_resource_cls',
-                return_value=FakeResource):
-            observed = resources_rpc.resource_type_versioned_topic(obj_name)
-        self.assertEqual(expected, observed)
-
-
-class ResourcesPullRpcApiTestCase(ResourcesRpcBaseTestCase):
-
-    def setUp(self):
-        super(ResourcesPullRpcApiTestCase, self).setUp()
-        mock.patch.object(resources_rpc, '_validate_resource_type').start()
-        mock.patch('neutron.api.rpc.callbacks.resources.get_resource_cls',
-                   return_value=FakeResource).start()
-        self.rpc = resources_rpc.ResourcesPullRpcApi()
-        mock.patch.object(self.rpc, 'client').start()
-        self.cctxt_mock = self.rpc.client.prepare.return_value
-
-    def test_is_singleton(self):
-        self.assertIs(self.rpc, resources_rpc.ResourcesPullRpcApi())
-
-    def test_pull(self):
-        obj_base.VersionedObjectRegistry.register(FakeResource)
-        expected_obj = _create_test_resource(self.context)
-        resource_id = expected_obj.id
-        self.cctxt_mock.call.return_value = expected_obj.obj_to_primitive()
-
-        result = self.rpc.pull(
-            self.context, FakeResource.obj_name(), resource_id)
-
-        self.cctxt_mock.call.assert_called_once_with(
-            self.context, 'pull', resource_type='FakeResource',
-            version=FakeResource.VERSION, resource_id=resource_id)
-        self.assertEqual(expected_obj, result)
-
-    def test_pull_resource_not_found(self):
-        resource_dict = _create_test_dict()
-        resource_id = resource_dict['id']
-        self.cctxt_mock.call.return_value = None
-        with testtools.ExpectedException(resources_rpc.ResourceNotFound):
-            self.rpc.pull(self.context, FakeResource.obj_name(),
-                          resource_id)
-
-
-class ResourcesPullRpcCallbackTestCase(ResourcesRpcBaseTestCase):
-
-    def setUp(self):
-        super(ResourcesPullRpcCallbackTestCase, self).setUp()
-        obj_base.VersionedObjectRegistry.register(FakeResource)
-        self.callbacks = resources_rpc.ResourcesPullRpcCallback()
-        self.resource_obj = _create_test_resource(self.context)
-
-    def test_pull(self):
-        resource_dict = _create_test_dict()
-        with mock.patch.object(
-                resources_rpc.prod_registry, 'pull',
-                return_value=self.resource_obj) as registry_mock:
-            primitive = self.callbacks.pull(
-                self.context, resource_type=FakeResource.obj_name(),
-                version=FakeResource.VERSION,
-                resource_id=self.resource_obj.id)
-        registry_mock.assert_called_once_with(
-            'FakeResource', self.resource_obj.id, context=self.context)
-        self.assertEqual(resource_dict,
-                         primitive['versioned_object.data'])
-        self.assertEqual(self.resource_obj.obj_to_primitive(), primitive)
-
-    @mock.patch.object(FakeResource, 'obj_to_primitive')
-    def test_pull_backports_to_older_version(self, to_prim_mock):
-        with mock.patch.object(resources_rpc.prod_registry, 'pull',
-                               return_value=self.resource_obj):
-            self.callbacks.pull(
-                self.context, resource_type=FakeResource.obj_name(),
-                version='0.9',  # less than initial version 1.0
-                resource_id=self.resource_obj.id)
-            to_prim_mock.assert_called_with(target_version='0.9')
-
-
-class ResourcesPushRpcApiTestCase(ResourcesRpcBaseTestCase):
-
-    def setUp(self):
-        super(ResourcesPushRpcApiTestCase, self).setUp()
-        mock.patch.object(resources_rpc.n_rpc, 'get_client').start()
-        mock.patch.object(resources_rpc, '_validate_resource_type').start()
-        self.rpc = resources_rpc.ResourcesPushRpcApi()
-        self.cctxt_mock = self.rpc.client.prepare.return_value
-        self.resource_obj = _create_test_resource(self.context)
-
-    def test__prepare_object_fanout_context(self):
-        expected_topic = topics.RESOURCE_TOPIC_PATTERN % {
-            'resource_type': resources.get_resource_type(self.resource_obj),
-            'version': self.resource_obj.VERSION}
-
-        with mock.patch.object(resources_rpc.resources, 'get_resource_cls',
-                return_value=FakeResource):
-            observed = self.rpc._prepare_object_fanout_context(
-                self.resource_obj)
-
-        self.rpc.client.prepare.assert_called_once_with(
-            fanout=True, topic=expected_topic)
-        self.assertEqual(self.cctxt_mock, observed)
-
-    def test_pushy(self):
-        with mock.patch.object(resources_rpc.resources, 'get_resource_cls',
-                return_value=FakeResource):
-            self.rpc.push(
-                self.context, self.resource_obj, 'TYPE')
-
-        self.cctxt_mock.cast.assert_called_once_with(
-            self.context, 'push',
-            resource=self.resource_obj.obj_to_primitive(),
-            event_type='TYPE')
-
-
-class ResourcesPushRpcCallbackTestCase(ResourcesRpcBaseTestCase):
-
-    def setUp(self):
-        super(ResourcesPushRpcCallbackTestCase, self).setUp()
-        mock.patch.object(resources_rpc, '_validate_resource_type').start()
-        mock.patch.object(
-            resources_rpc.resources,
-            'get_resource_cls', return_value=FakeResource).start()
-        self.resource_obj = _create_test_resource(self.context)
-        self.resource_prim = self.resource_obj.obj_to_primitive()
-        self.callbacks = resources_rpc.ResourcesPushRpcCallback()
-
-    @mock.patch.object(resources_rpc.cons_registry, 'push')
-    def test_push(self, reg_push_mock):
-        obj_base.VersionedObjectRegistry.register(FakeResource)
-        self.callbacks.push(self.context, self.resource_prim, 'TYPE')
-        reg_push_mock.assert_called_once_with(self.resource_obj.obj_name(),
-                                              self.resource_obj, 'TYPE')
diff --git a/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py
deleted file mode 100644 (file)
index 5cff29a..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.api.rpc.handlers import securitygroups_rpc
-from neutron.tests import base
-
-
-class SecurityGroupServerRpcApiTestCase(base.BaseTestCase):
-
-    def test_security_group_rules_for_devices(self):
-        rpcapi = securitygroups_rpc.SecurityGroupServerRpcApi('fake_topic')
-
-        with mock.patch.object(rpcapi.client, 'call') as rpc_mock,\
-                mock.patch.object(rpcapi.client, 'prepare') as prepare_mock:
-            prepare_mock.return_value = rpcapi.client
-            rpcapi.security_group_rules_for_devices('context', ['fake_device'])
-
-            rpc_mock.assert_called_once_with(
-                    'context',
-                    'security_group_rules_for_devices',
-                    devices=['fake_device'])
-
-
-class SGAgentRpcCallBackMixinTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(SGAgentRpcCallBackMixinTestCase, self).setUp()
-        self.rpc = securitygroups_rpc.SecurityGroupAgentRpcCallbackMixin()
-        self.rpc.sg_agent = mock.Mock()
-
-    def test_security_groups_rule_updated(self):
-        self.rpc.security_groups_rule_updated(None,
-                                              security_groups=['fake_sgid'])
-        self.rpc.sg_agent.assert_has_calls(
-            [mock.call.security_groups_rule_updated(['fake_sgid'])])
-
-    def test_security_groups_member_updated(self):
-        self.rpc.security_groups_member_updated(None,
-                                                security_groups=['fake_sgid'])
-        self.rpc.sg_agent.assert_has_calls(
-            [mock.call.security_groups_member_updated(['fake_sgid'])])
-
-    def test_security_groups_provider_updated(self):
-        self.rpc.security_groups_provider_updated(None)
-        self.rpc.sg_agent.assert_has_calls(
-            [mock.call.security_groups_provider_updated(None)])
diff --git a/neutron/tests/unit/api/test_api_common.py b/neutron/tests/unit/api/test_api_common.py
deleted file mode 100644 (file)
index 5ccf489..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright (c) 2013 Intel Corporation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from testtools import matchers
-from webob import exc
-
-from neutron.api import api_common as common
-from neutron.tests import base
-
-
-class FakeController(common.NeutronController):
-    _resource_name = 'fake'
-
-
-class APICommonTestCase(base.BaseTestCase):
-    def setUp(self):
-        super(APICommonTestCase, self).setUp()
-        self.controller = FakeController(None)
-
-    def test_prepare_request_body(self):
-        body = {
-            'fake': {
-                'name': 'terminator',
-                'model': 'T-800',
-            }
-        }
-        params = [
-            {'param-name': 'name',
-             'required': True},
-            {'param-name': 'model',
-             'required': True},
-            {'param-name': 'quote',
-             'required': False,
-             'default-value': "i'll be back"},
-        ]
-        expect = {
-            'fake': {
-                'name': 'terminator',
-                'model': 'T-800',
-                'quote': "i'll be back",
-            }
-        }
-        actual = self.controller._prepare_request_body(body, params)
-        self.assertThat(expect, matchers.Equals(actual))
-
-    def test_prepare_request_body_none(self):
-        body = None
-        params = [
-            {'param-name': 'quote',
-             'required': False,
-             'default-value': "I'll be back"},
-        ]
-        expect = {
-            'fake': {
-                'quote': "I'll be back",
-            }
-        }
-        actual = self.controller._prepare_request_body(body, params)
-        self.assertThat(expect, matchers.Equals(actual))
-
-    def test_prepare_request_body_keyerror(self):
-        body = {'t2': {}}
-        params = []
-        self.assertRaises(exc.HTTPBadRequest,
-                          self.controller._prepare_request_body,
-                          body,
-                          params)
-
-    def test_prepare_request_param_value_none(self):
-        body = {
-            'fake': {
-                'name': None,
-            }
-        }
-        params = [
-            {'param-name': 'name',
-             'required': True},
-        ]
-        self.assertRaises(exc.HTTPBadRequest,
-                          self.controller._prepare_request_body,
-                          body,
-                          params)
diff --git a/neutron/tests/unit/api/test_extensions.py b/neutron/tests/unit/api/test_extensions.py
deleted file mode 100644 (file)
index 62d4ef9..0000000
+++ /dev/null
@@ -1,884 +0,0 @@
-# Copyright (c) 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-import mock
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_serialization import jsonutils
-from oslo_service import wsgi as base_wsgi
-import routes
-import six
-import webob
-import webob.exc as webexc
-import webtest
-
-import neutron
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.common import config
-from neutron.common import exceptions
-from neutron import manager
-from neutron.plugins.common import constants
-from neutron import quota
-from neutron.tests import base
-from neutron.tests.unit.api.v2 import test_base
-from neutron.tests.unit import extension_stubs as ext_stubs
-import neutron.tests.unit.extensions
-from neutron.tests.unit.extensions import extendedattribute as extattr
-from neutron.tests.unit import testlib_api
-from neutron import wsgi
-
-
-LOG = logging.getLogger(__name__)
-_uuid = test_base._uuid
-_get_path = test_base._get_path
-extensions_path = ':'.join(neutron.tests.unit.extensions.__path__)
-
-
-class ExtensionsTestApp(base_wsgi.Router):
-
-    def __init__(self, options=None):
-        options = options or {}
-        mapper = routes.Mapper()
-        controller = ext_stubs.StubBaseAppController()
-        mapper.resource("dummy_resource", "/dummy_resources",
-                        controller=controller)
-        super(ExtensionsTestApp, self).__init__(mapper)
-
-
-class FakePluginWithExtension(object):
-    """A fake plugin used only for extension testing in this file."""
-
-    supported_extension_aliases = ["FOXNSOX"]
-
-    def method_to_support_foxnsox_extension(self, context):
-        self._log("method_to_support_foxnsox_extension", context)
-
-
-class ExtensionPathTest(base.BaseTestCase):
-
-    def setUp(self):
-        self.base_path = extensions.get_extensions_path()
-        super(ExtensionPathTest, self).setUp()
-
-    def test_get_extensions_path_with_plugins(self):
-        path = extensions.get_extensions_path(
-            {constants.CORE: FakePluginWithExtension()})
-        self.assertEqual(path,
-                         '%s:neutron/tests/unit/extensions' % self.base_path)
-
-    def test_get_extensions_path_no_extensions(self):
-        # Reset to default value, as it's overridden by base class
-        cfg.CONF.set_override('api_extensions_path', '')
-        path = extensions.get_extensions_path()
-        self.assertEqual(path, self.base_path)
-
-    def test_get_extensions_path_single_extension(self):
-        cfg.CONF.set_override('api_extensions_path', 'path1')
-        path = extensions.get_extensions_path()
-        self.assertEqual(path, '%s:path1' % self.base_path)
-
-    def test_get_extensions_path_multiple_extensions(self):
-        cfg.CONF.set_override('api_extensions_path', 'path1:path2')
-        path = extensions.get_extensions_path()
-        self.assertEqual(path, '%s:path1:path2' % self.base_path)
-
-    def test_get_extensions_path_duplicate_extensions(self):
-        cfg.CONF.set_override('api_extensions_path', 'path1:path1')
-        path = extensions.get_extensions_path()
-        self.assertEqual(path, '%s:path1' % self.base_path)
-
-
-class PluginInterfaceTest(base.BaseTestCase):
-    def test_issubclass_hook(self):
-        class A(object):
-            def f(self):
-                pass
-
-        class B(extensions.PluginInterface):
-            @abc.abstractmethod
-            def f(self):
-                pass
-
-        self.assertTrue(issubclass(A, B))
-
-    def test_issubclass_hook_class_without_abstract_methods(self):
-        class A(object):
-            def f(self):
-                pass
-
-        class B(extensions.PluginInterface):
-            def f(self):
-                pass
-
-        self.assertFalse(issubclass(A, B))
-
-    def test_issubclass_hook_not_all_methods_implemented(self):
-        class A(object):
-            def f(self):
-                pass
-
-        class B(extensions.PluginInterface):
-            @abc.abstractmethod
-            def f(self):
-                pass
-
-            @abc.abstractmethod
-            def g(self):
-                pass
-
-        self.assertFalse(issubclass(A, B))
-
-
-class ResourceExtensionTest(base.BaseTestCase):
-
-    class ResourceExtensionController(wsgi.Controller):
-
-        def index(self, request):
-            return "resource index"
-
-        def show(self, request, id):
-            return {'data': {'id': id}}
-
-        def notimplemented_function(self, request, id):
-            return webob.exc.HTTPNotImplemented()
-
-        def custom_member_action(self, request, id):
-            return {'member_action': 'value'}
-
-        def custom_collection_action(self, request, **kwargs):
-            return {'collection': 'value'}
-
-    class DummySvcPlugin(wsgi.Controller):
-            def get_plugin_type(self):
-                return constants.DUMMY
-
-            def index(self, request, **kwargs):
-                return "resource index"
-
-            def custom_member_action(self, request, **kwargs):
-                return {'member_action': 'value'}
-
-            def collection_action(self, request, **kwargs):
-                return {'collection': 'value'}
-
-            def show(self, request, id):
-                return {'data': {'id': id}}
-
-    def test_exceptions_notimplemented(self):
-        controller = self.ResourceExtensionController()
-        member = {'notimplemented_function': "GET"}
-        res_ext = extensions.ResourceExtension('tweedles', controller,
-                                               member_actions=member)
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-
-        # Ideally we would check for a 501 code here but webtest doesn't take
-        # anything that is below 200 or above 400 so we can't actually check
-        # it.  It throws webtest.AppError instead.
-        try:
-            test_app.get("/tweedles/some_id/notimplemented_function")
-            # Shouldn't be reached
-            self.assertTrue(False)
-        except webtest.AppError as e:
-            self.assertIn('501', str(e))
-
-    def test_resource_can_be_added_as_extension(self):
-        res_ext = extensions.ResourceExtension(
-            'tweedles', self.ResourceExtensionController())
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-        index_response = test_app.get("/tweedles")
-        self.assertEqual(200, index_response.status_int)
-        self.assertEqual(b"resource index", index_response.body)
-
-        show_response = test_app.get("/tweedles/25266")
-        self.assertEqual({'data': {'id': "25266"}}, show_response.json)
-
-    def test_resource_gets_prefix_of_plugin(self):
-        class DummySvcPlugin(wsgi.Controller):
-            def index(self, request):
-                return ""
-
-            def get_plugin_type(self):
-                return constants.DUMMY
-
-        res_ext = extensions.ResourceExtension(
-            'tweedles', DummySvcPlugin(), path_prefix="/dummy_svc")
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-        index_response = test_app.get("/dummy_svc/tweedles")
-        self.assertEqual(200, index_response.status_int)
-
-    def test_resource_extension_with_custom_member_action(self):
-        controller = self.ResourceExtensionController()
-        member = {'custom_member_action': "GET"}
-        res_ext = extensions.ResourceExtension('tweedles', controller,
-                                               member_actions=member)
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-
-        response = test_app.get("/tweedles/some_id/custom_member_action")
-        self.assertEqual(200, response.status_int)
-        self.assertEqual(jsonutils.loads(response.body)['member_action'],
-                         "value")
-
-    def test_resource_ext_with_custom_member_action_gets_plugin_prefix(self):
-        controller = self.DummySvcPlugin()
-        member = {'custom_member_action': "GET"}
-        collections = {'collection_action': "GET"}
-        res_ext = extensions.ResourceExtension('tweedles', controller,
-                                               path_prefix="/dummy_svc",
-                                               member_actions=member,
-                                               collection_actions=collections)
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-
-        response = test_app.get("/dummy_svc/tweedles/1/custom_member_action")
-        self.assertEqual(200, response.status_int)
-        self.assertEqual(jsonutils.loads(response.body)['member_action'],
-                         "value")
-
-        response = test_app.get("/dummy_svc/tweedles/collection_action")
-        self.assertEqual(200, response.status_int)
-        self.assertEqual(jsonutils.loads(response.body)['collection'],
-                         "value")
-
-    def test_plugin_prefix_with_parent_resource(self):
-        controller = self.DummySvcPlugin()
-        parent = dict(member_name="tenant",
-                      collection_name="tenants")
-        member = {'custom_member_action': "GET"}
-        collections = {'collection_action': "GET"}
-        res_ext = extensions.ResourceExtension('tweedles', controller, parent,
-                                               path_prefix="/dummy_svc",
-                                               member_actions=member,
-                                               collection_actions=collections)
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-
-        index_response = test_app.get("/dummy_svc/tenants/1/tweedles")
-        self.assertEqual(200, index_response.status_int)
-
-        response = test_app.get("/dummy_svc/tenants/1/"
-                                "tweedles/1/custom_member_action")
-        self.assertEqual(200, response.status_int)
-        self.assertEqual(jsonutils.loads(response.body)['member_action'],
-                         "value")
-
-        response = test_app.get("/dummy_svc/tenants/2/"
-                                "tweedles/collection_action")
-        self.assertEqual(200, response.status_int)
-        self.assertEqual(jsonutils.loads(response.body)['collection'],
-                         "value")
-
-    def test_resource_extension_for_get_custom_collection_action(self):
-        controller = self.ResourceExtensionController()
-        collections = {'custom_collection_action': "GET"}
-        res_ext = extensions.ResourceExtension('tweedles', controller,
-                                               collection_actions=collections)
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-
-        response = test_app.get("/tweedles/custom_collection_action")
-        self.assertEqual(200, response.status_int)
-        LOG.debug(jsonutils.loads(response.body))
-        self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
-
-    def test_resource_extension_for_put_custom_collection_action(self):
-        controller = self.ResourceExtensionController()
-        collections = {'custom_collection_action': "PUT"}
-        res_ext = extensions.ResourceExtension('tweedles', controller,
-                                               collection_actions=collections)
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-
-        response = test_app.put("/tweedles/custom_collection_action")
-
-        self.assertEqual(200, response.status_int)
-        self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
-
-    def test_resource_extension_for_post_custom_collection_action(self):
-        controller = self.ResourceExtensionController()
-        collections = {'custom_collection_action': "POST"}
-        res_ext = extensions.ResourceExtension('tweedles', controller,
-                                               collection_actions=collections)
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-
-        response = test_app.post("/tweedles/custom_collection_action")
-
-        self.assertEqual(200, response.status_int)
-        self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
-
-    def test_resource_extension_for_delete_custom_collection_action(self):
-        controller = self.ResourceExtensionController()
-        collections = {'custom_collection_action': "DELETE"}
-        res_ext = extensions.ResourceExtension('tweedles', controller,
-                                               collection_actions=collections)
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-
-        response = test_app.delete("/tweedles/custom_collection_action")
-
-        self.assertEqual(200, response.status_int)
-        self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
-
-    def test_resource_ext_for_formatted_req_on_custom_collection_action(self):
-        controller = self.ResourceExtensionController()
-        collections = {'custom_collection_action': "GET"}
-        res_ext = extensions.ResourceExtension('tweedles', controller,
-                                               collection_actions=collections)
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-
-        response = test_app.get("/tweedles/custom_collection_action.json")
-
-        self.assertEqual(200, response.status_int)
-        self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
-
-    def test_resource_ext_for_nested_resource_custom_collection_action(self):
-        controller = self.ResourceExtensionController()
-        collections = {'custom_collection_action': "GET"}
-        parent = dict(collection_name='beetles', member_name='beetle')
-        res_ext = extensions.ResourceExtension('tweedles', controller,
-                                               collection_actions=collections,
-                                               parent=parent)
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-
-        response = test_app.get("/beetles/beetle_id"
-                                "/tweedles/custom_collection_action")
-
-        self.assertEqual(200, response.status_int)
-        self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
-
-    def test_resource_extension_with_custom_member_action_and_attr_map(self):
-        controller = self.ResourceExtensionController()
-        member = {'custom_member_action': "GET"}
-        params = {
-            'tweedles': {
-                'id': {'allow_post': False, 'allow_put': False,
-                       'validate': {'type:uuid': None},
-                       'is_visible': True},
-                'name': {'allow_post': True, 'allow_put': True,
-                         'validate': {'type:string': None},
-                         'default': '', 'is_visible': True},
-            }
-        }
-        res_ext = extensions.ResourceExtension('tweedles', controller,
-                                               member_actions=member,
-                                               attr_map=params)
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
-
-        response = test_app.get("/tweedles/some_id/custom_member_action")
-        self.assertEqual(200, response.status_int)
-        self.assertEqual(jsonutils.loads(response.body)['member_action'],
-                         "value")
-
-    def test_returns_404_for_non_existent_extension(self):
-        test_app = _setup_extensions_test_app(SimpleExtensionManager(None))
-
-        response = test_app.get("/non_extistant_extension", status='*')
-
-        self.assertEqual(404, response.status_int)
-
-
-class ActionExtensionTest(base.BaseTestCase):
-
-    def setUp(self):
-        super(ActionExtensionTest, self).setUp()
-        self.extension_app = _setup_extensions_test_app()
-
-    def test_extended_action_for_adding_extra_data(self):
-        action_name = 'FOXNSOX:add_tweedle'
-        action_params = dict(name='Beetle')
-        req_body = jsonutils.dumps({action_name: action_params})
-        response = self.extension_app.post('/dummy_resources/1/action',
-                                           req_body,
-                                           content_type='application/json')
-        self.assertEqual(b"Tweedle Beetle Added.", response.body)
-
-    def test_extended_action_for_deleting_extra_data(self):
-        action_name = 'FOXNSOX:delete_tweedle'
-        action_params = dict(name='Bailey')
-        req_body = jsonutils.dumps({action_name: action_params})
-        response = self.extension_app.post("/dummy_resources/1/action",
-                                           req_body,
-                                           content_type='application/json')
-        self.assertEqual(b"Tweedle Bailey Deleted.", response.body)
-
-    def test_returns_404_for_non_existent_action(self):
-        non_existent_action = 'blah_action'
-        action_params = dict(name="test")
-        req_body = jsonutils.dumps({non_existent_action: action_params})
-
-        response = self.extension_app.post("/dummy_resources/1/action",
-                                           req_body,
-                                           content_type='application/json',
-                                           status='*')
-
-        self.assertEqual(404, response.status_int)
-
-    def test_returns_404_for_non_existent_resource(self):
-        action_name = 'add_tweedle'
-        action_params = dict(name='Beetle')
-        req_body = jsonutils.dumps({action_name: action_params})
-
-        response = self.extension_app.post("/asdf/1/action", req_body,
-                                           content_type='application/json',
-                                           status='*')
-        self.assertEqual(404, response.status_int)
-
-
-class RequestExtensionTest(base.BaseTestCase):
-
-    def test_headers_can_be_extended(self):
-        def extend_headers(req, res):
-            assert req.headers['X-NEW-REQUEST-HEADER'] == "sox"
-            res.headers['X-NEW-RESPONSE-HEADER'] = "response_header_data"
-            return res
-
-        app = self._setup_app_with_request_handler(extend_headers, 'GET')
-        response = app.get("/dummy_resources/1",
-                           headers={'X-NEW-REQUEST-HEADER': "sox"})
-
-        self.assertEqual(response.headers['X-NEW-RESPONSE-HEADER'],
-                         "response_header_data")
-
-    def test_extend_get_resource_response(self):
-        def extend_response_data(req, res):
-            data = jsonutils.loads(res.body)
-            data['FOXNSOX:extended_key'] = req.GET.get('extended_key')
-            res.body = jsonutils.dumps(data).encode('utf-8')
-            return res
-
-        app = self._setup_app_with_request_handler(extend_response_data, 'GET')
-        response = app.get("/dummy_resources/1?extended_key=extended_data")
-
-        self.assertEqual(200, response.status_int)
-        response_data = jsonutils.loads(response.body)
-        self.assertEqual('extended_data',
-                         response_data['FOXNSOX:extended_key'])
-        self.assertEqual('knox', response_data['fort'])
-
-    def test_get_resources(self):
-        app = _setup_extensions_test_app()
-
-        response = app.get("/dummy_resources/1?chewing=newblue")
-
-        response_data = jsonutils.loads(response.body)
-        self.assertEqual('newblue', response_data['FOXNSOX:googoose'])
-        self.assertEqual("Pig Bands!", response_data['FOXNSOX:big_bands'])
-
-    def test_edit_previously_uneditable_field(self):
-
-        def _update_handler(req, res):
-            data = jsonutils.loads(res.body)
-            data['uneditable'] = req.params['uneditable']
-            res.body = jsonutils.dumps(data).encode('utf-8')
-            return res
-
-        base_app = webtest.TestApp(setup_base_app(self))
-        response = base_app.put("/dummy_resources/1",
-                                {'uneditable': "new_value"})
-        self.assertEqual(response.json['uneditable'], "original_value")
-
-        ext_app = self._setup_app_with_request_handler(_update_handler,
-                                                       'PUT')
-        ext_response = ext_app.put("/dummy_resources/1",
-                                   {'uneditable': "new_value"})
-        self.assertEqual(ext_response.json['uneditable'], "new_value")
-
-    def _setup_app_with_request_handler(self, handler, verb):
-        req_ext = extensions.RequestExtension(verb,
-                                              '/dummy_resources/:(id)',
-                                              handler)
-        manager = SimpleExtensionManager(None, None, req_ext)
-        return _setup_extensions_test_app(manager)
-
-
-class ExtensionManagerTest(base.BaseTestCase):
-
-    def test_invalid_extensions_are_not_registered(self):
-
-        class InvalidExtension(object):
-            """Invalid extension.
-
-            This Extension doesn't implement extension methods :
-            get_name, get_description and get_updated
-            """
-            def get_alias(self):
-                return "invalid_extension"
-
-        ext_mgr = extensions.ExtensionManager('')
-        ext_mgr.add_extension(InvalidExtension())
-        ext_mgr.add_extension(ext_stubs.StubExtension("valid_extension"))
-
-        self.assertIn('valid_extension', ext_mgr.extensions)
-        self.assertNotIn('invalid_extension', ext_mgr.extensions)
-
-    def test_assignment_of_attr_map(self):
-        """Unit test for bug 1443342
-
-        In this bug, an extension that extended multiple resources with the
-        same dict would cause future extensions to inadvertently modify the
-        resources of all of the resources since they were referencing the same
-        dictionary.
-        """
-
-        class MultiResourceExtension(ext_stubs.StubExtension):
-            """Generated Extended Resources.
-
-            This extension's extended resource will assign
-            to more than one resource.
-            """
-
-            def get_extended_resources(self, version):
-                EXTENDED_TIMESTAMP = {
-                    'created_at': {'allow_post': False, 'allow_put': False,
-                                   'is_visible': True}}
-                EXTENDED_RESOURCES = ["ext1", "ext2"]
-                attrs = {}
-                for resources in EXTENDED_RESOURCES:
-                    attrs[resources] = EXTENDED_TIMESTAMP
-
-                return attrs
-
-        class AttrExtension(ext_stubs.StubExtension):
-            def get_extended_resources(self, version):
-                attrs = {
-                    self.alias: {
-                        '%s-attr' % self.alias: {'allow_post': False,
-                                                 'allow_put': False,
-                                                 'is_visible': True}}}
-                return attrs
-
-        ext_mgr = extensions.ExtensionManager('')
-        attr_map = {}
-        ext_mgr.add_extension(MultiResourceExtension('timestamp'))
-        ext_mgr.extend_resources("2.0", attr_map)
-        ext_mgr.add_extension(AttrExtension("ext1"))
-        ext_mgr.add_extension(AttrExtension("ext2"))
-        ext_mgr.extend_resources("2.0", attr_map)
-        self.assertIn('created_at', attr_map['ext2'])
-        self.assertIn('created_at', attr_map['ext1'])
-        # now we need to make sure the attrextensions didn't leak across
-        self.assertNotIn('ext1-attr', attr_map['ext2'])
-        self.assertNotIn('ext2-attr', attr_map['ext1'])
-
-
-class PluginAwareExtensionManagerTest(base.BaseTestCase):
-
-    def test_unsupported_extensions_are_not_loaded(self):
-        stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1", "e3"])
-        plugin_info = {constants.CORE: stub_plugin}
-        with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
-                        "check_if_plugin_extensions_loaded"):
-            ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
-
-            ext_mgr.add_extension(ext_stubs.StubExtension("e1"))
-            ext_mgr.add_extension(ext_stubs.StubExtension("e2"))
-            ext_mgr.add_extension(ext_stubs.StubExtension("e3"))
-
-            self.assertIn("e1", ext_mgr.extensions)
-            self.assertNotIn("e2", ext_mgr.extensions)
-            self.assertIn("e3", ext_mgr.extensions)
-
-    def test_extensions_are_not_loaded_for_plugins_unaware_of_extensions(self):
-        class ExtensionUnawarePlugin(object):
-            """This plugin does not implement supports_extension method.
-
-            Extensions will not be loaded when this plugin is used.
-            """
-            pass
-
-        plugin_info = {constants.CORE: ExtensionUnawarePlugin()}
-        ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
-        ext_mgr.add_extension(ext_stubs.StubExtension("e1"))
-
-        self.assertNotIn("e1", ext_mgr.extensions)
-
-    def test_extensions_not_loaded_for_plugin_without_expected_interface(self):
-
-        class PluginWithoutExpectedIface(object):
-            """Does not implement get_foo method as expected by extension."""
-            supported_extension_aliases = ["supported_extension"]
-
-        plugin_info = {constants.CORE: PluginWithoutExpectedIface()}
-        with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
-                        "check_if_plugin_extensions_loaded"):
-            ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
-            ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface(
-                "supported_extension"))
-
-            self.assertNotIn("e1", ext_mgr.extensions)
-
-    def test_extensions_are_loaded_for_plugin_with_expected_interface(self):
-
-        class PluginWithExpectedInterface(object):
-            """Implements get_foo method as expected by extension."""
-            supported_extension_aliases = ["supported_extension"]
-
-            def get_foo(self, bar=None):
-                pass
-
-        plugin_info = {constants.CORE: PluginWithExpectedInterface()}
-        with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
-                        "check_if_plugin_extensions_loaded"):
-            ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
-            ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface(
-                "supported_extension"))
-
-            self.assertIn("supported_extension", ext_mgr.extensions)
-
-    def test_extensions_expecting_neutron_plugin_interface_are_loaded(self):
-        class ExtensionForQuamtumPluginInterface(ext_stubs.StubExtension):
-            """This Extension does not implement get_plugin_interface method.
-
-            This will work with any plugin implementing NeutronPluginBase
-            """
-            pass
-        stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
-        plugin_info = {constants.CORE: stub_plugin}
-
-        with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
-                        "check_if_plugin_extensions_loaded"):
-            ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
-            ext_mgr.add_extension(ExtensionForQuamtumPluginInterface("e1"))
-
-            self.assertIn("e1", ext_mgr.extensions)
-
-    def test_extensions_without_need_for__plugin_interface_are_loaded(self):
-        class ExtensionWithNoNeedForPluginInterface(ext_stubs.StubExtension):
-            """This Extension does not need any plugin interface.
-
-            This will work with any plugin implementing NeutronPluginBase
-            """
-            def get_plugin_interface(self):
-                return None
-
-        stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
-        plugin_info = {constants.CORE: stub_plugin}
-        with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
-                        "check_if_plugin_extensions_loaded"):
-            ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
-            ext_mgr.add_extension(ExtensionWithNoNeedForPluginInterface("e1"))
-
-            self.assertIn("e1", ext_mgr.extensions)
-
-    def test_extension_loaded_for_non_core_plugin(self):
-        class NonCorePluginExtenstion(ext_stubs.StubExtension):
-            def get_plugin_interface(self):
-                return None
-
-        stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
-        plugin_info = {constants.DUMMY: stub_plugin}
-        with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
-                        "check_if_plugin_extensions_loaded"):
-            ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
-            ext_mgr.add_extension(NonCorePluginExtenstion("e1"))
-
-            self.assertIn("e1", ext_mgr.extensions)
-
-    def test_unloaded_supported_extensions_raises_exception(self):
-        stub_plugin = ext_stubs.StubPlugin(
-            supported_extensions=["unloaded_extension"])
-        plugin_info = {constants.CORE: stub_plugin}
-        self.assertRaises(exceptions.ExtensionsNotFound,
-                          extensions.PluginAwareExtensionManager,
-                          '', plugin_info)
-
-
-class ExtensionControllerTest(testlib_api.WebTestCase):
-
-    def setUp(self):
-        super(ExtensionControllerTest, self).setUp()
-        self.test_app = _setup_extensions_test_app()
-
-    def test_index_gets_all_registerd_extensions(self):
-        response = self.test_app.get("/extensions." + self.fmt)
-        res_body = self.deserialize(response)
-        foxnsox = res_body["extensions"][0]
-
-        self.assertEqual(foxnsox["alias"], "FOXNSOX")
-
-    def test_extension_can_be_accessed_by_alias(self):
-        response = self.test_app.get("/extensions/FOXNSOX." + self.fmt)
-        foxnsox_extension = self.deserialize(response)
-        foxnsox_extension = foxnsox_extension['extension']
-        self.assertEqual(foxnsox_extension["alias"], "FOXNSOX")
-
-    def test_show_returns_not_found_for_non_existent_extension(self):
-        response = self.test_app.get("/extensions/non_existent" + self.fmt,
-                                     status="*")
-
-        self.assertEqual(response.status_int, 404)
-
-
-def app_factory(global_conf, **local_conf):
-    conf = global_conf.copy()
-    conf.update(local_conf)
-    return ExtensionsTestApp(conf)
-
-
-def setup_base_app(test):
-    base.BaseTestCase.config_parse()
-    app = config.load_paste_app('extensions_test_app')
-    return app
-
-
-def setup_extensions_middleware(extension_manager=None):
-    extension_manager = (extension_manager or
-                         extensions.PluginAwareExtensionManager(
-                             extensions_path,
-                             {constants.CORE: FakePluginWithExtension()}))
-    base.BaseTestCase.config_parse()
-    app = config.load_paste_app('extensions_test_app')
-    return extensions.ExtensionMiddleware(app, ext_mgr=extension_manager)
-
-
-def _setup_extensions_test_app(extension_manager=None):
-    return webtest.TestApp(setup_extensions_middleware(extension_manager))
-
-
-class SimpleExtensionManager(object):
-
-    def __init__(self, resource_ext=None, action_ext=None, request_ext=None):
-        self.resource_ext = resource_ext
-        self.action_ext = action_ext
-        self.request_ext = request_ext
-
-    def get_resources(self):
-        resource_exts = []
-        if self.resource_ext:
-            resource_exts.append(self.resource_ext)
-        return resource_exts
-
-    def get_actions(self):
-        action_exts = []
-        if self.action_ext:
-            action_exts.append(self.action_ext)
-        return action_exts
-
-    def get_request_extensions(self):
-        request_extensions = []
-        if self.request_ext:
-            request_extensions.append(self.request_ext)
-        return request_extensions
-
-
-class ExtensionExtendedAttributeTestPlugin(object):
-
-    supported_extension_aliases = [
-        'ext-obj-test', "extended-ext-attr"
-    ]
-
-    def __init__(self, configfile=None):
-        super(ExtensionExtendedAttributeTestPlugin, self)
-        self.objs = []
-        self.objh = {}
-
-    def create_ext_test_resource(self, context, ext_test_resource):
-        obj = ext_test_resource['ext_test_resource']
-        id = _uuid()
-        obj['id'] = id
-        self.objs.append(obj)
-        self.objh.update({id: obj})
-        return obj
-
-    def get_ext_test_resources(self, context, filters=None, fields=None):
-        return self.objs
-
-    def get_ext_test_resource(self, context, id, fields=None):
-        return self.objh[id]
-
-
-class ExtensionExtendedAttributeTestCase(base.BaseTestCase):
-    def setUp(self):
-        super(ExtensionExtendedAttributeTestCase, self).setUp()
-        plugin = (
-            "neutron.tests.unit.api.test_extensions."
-            "ExtensionExtendedAttributeTestPlugin"
-        )
-
-        # point config file to: neutron/tests/etc/neutron.conf
-        self.config_parse()
-
-        self.setup_coreplugin(plugin)
-
-        ext_mgr = extensions.PluginAwareExtensionManager(
-            extensions_path,
-            {constants.CORE: ExtensionExtendedAttributeTestPlugin()}
-        )
-        ext_mgr.extend_resources("2.0", {})
-        extensions.PluginAwareExtensionManager._instance = ext_mgr
-
-        app = config.load_paste_app('extensions_test_app')
-        self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
-
-        self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1"
-        # Save the global RESOURCE_ATTRIBUTE_MAP
-        self.saved_attr_map = {}
-        for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP):
-            self.saved_attr_map[res] = attrs.copy()
-        # Add the resources to the global attribute map
-        # This is done here as the setup process won't
-        # initialize the main API router which extends
-        # the global attribute map
-        attributes.RESOURCE_ATTRIBUTE_MAP.update(
-            extattr.EXTENDED_ATTRIBUTES_2_0)
-        self.agentscheduler_dbMinxin = manager.NeutronManager.get_plugin()
-        self.addCleanup(self.restore_attribute_map)
-
-        quota.QUOTAS._driver = None
-        cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
-                              group='QUOTAS')
-
-    def restore_attribute_map(self):
-        # Restore the original RESOURCE_ATTRIBUTE_MAP
-        attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
-
-    def _do_request(self, method, path, data=None, params=None, action=None):
-        content_type = 'application/json'
-        body = None
-        if data is not None:  # empty dict is valid
-            body = wsgi.Serializer().serialize(data, content_type)
-
-        req = testlib_api.create_request(
-            path, body, content_type,
-            method, query_string=params)
-        res = req.get_response(self._api)
-        if res.status_code >= 400:
-            raise webexc.HTTPClientError(detail=res.body, code=res.status_code)
-        if res.status_code != webexc.HTTPNoContent.code:
-            return res.json
-
-    def _ext_test_resource_create(self, attr=None):
-        data = {
-            "ext_test_resource": {
-                "tenant_id": self._tenant_id,
-                "name": "test",
-                extattr.EXTENDED_ATTRIBUTE: attr
-            }
-        }
-
-        res = self._do_request('POST', _get_path('ext_test_resources'), data)
-        return res['ext_test_resource']
-
-    def test_ext_test_resource_create(self):
-        ext_test_resource = self._ext_test_resource_create()
-        attr = _uuid()
-        ext_test_resource = self._ext_test_resource_create(attr)
-        self.assertEqual(ext_test_resource[extattr.EXTENDED_ATTRIBUTE], attr)
-
-    def test_ext_test_resource_get(self):
-        attr = _uuid()
-        obj = self._ext_test_resource_create(attr)
-        obj_id = obj['id']
-        res = self._do_request('GET', _get_path(
-            'ext_test_resources/{0}'.format(obj_id)))
-        obj2 = res['ext_test_resource']
-        self.assertEqual(obj2[extattr.EXTENDED_ATTRIBUTE], attr)
diff --git a/neutron/tests/unit/api/v2/__init__.py b/neutron/tests/unit/api/v2/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/api/v2/test_attributes.py b/neutron/tests/unit/api/v2/test_attributes.py
deleted file mode 100644 (file)
index 198247a..0000000
+++ /dev/null
@@ -1,1076 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import string
-import testtools
-
-import mock
-import netaddr
-import webob.exc
-
-from oslo_utils import uuidutils
-
-from neutron._i18n import _
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.tests import base
-from neutron.tests import tools
-
-
-class TestAttributes(base.BaseTestCase):
-
-    def _construct_dict_and_constraints(self):
-        """Constructs a test dictionary and a definition of constraints.
-        :return: A (dictionary, constraint) tuple
-        """
-        constraints = {'key1': {'type:values': ['val1', 'val2'],
-                                'required': True},
-                       'key2': {'type:string': None,
-                                'required': False},
-                       'key3': {'type:dict': {'k4': {'type:string': None,
-                                                     'required': True}},
-                                'required': True}}
-
-        dictionary = {'key1': 'val1',
-                      'key2': 'a string value',
-                      'key3': {'k4': 'a string value'}}
-
-        return dictionary, constraints
-
-    def test_is_attr_set(self):
-        data = attributes.ATTR_NOT_SPECIFIED
-        self.assertIs(attributes.is_attr_set(data), False)
-
-        data = None
-        self.assertIs(attributes.is_attr_set(data), False)
-
-        data = "I'm set"
-        self.assertIs(attributes.is_attr_set(data), True)
-
-    def test_validate_values(self):
-        msg = attributes._validate_values(4, [4, 6])
-        self.assertIsNone(msg)
-
-        msg = attributes._validate_values(4, (4, 6))
-        self.assertIsNone(msg)
-
-        msg = attributes._validate_values(7, [4, 6])
-        self.assertEqual("'7' is not in [4, 6]", msg)
-
-        msg = attributes._validate_values(7, (4, 6))
-        self.assertEqual("'7' is not in (4, 6)", msg)
-
-    def test_validate_not_empty_string(self):
-        msg = attributes._validate_not_empty_string('    ', None)
-        self.assertEqual(u"'    ' Blank strings are not permitted", msg)
-
-    def test_validate_not_empty_string_or_none(self):
-        msg = attributes._validate_not_empty_string_or_none('    ', None)
-        self.assertEqual(u"'    ' Blank strings are not permitted", msg)
-
-        msg = attributes._validate_not_empty_string_or_none(None, None)
-        self.assertIsNone(msg)
-
-    def test_validate_string_or_none(self):
-        msg = attributes._validate_not_empty_string_or_none('test', None)
-        self.assertIsNone(msg)
-
-        msg = attributes._validate_not_empty_string_or_none(None, None)
-        self.assertIsNone(msg)
-
-    def test_validate_string(self):
-        msg = attributes._validate_string(None, None)
-        self.assertEqual("'None' is not a valid string", msg)
-
-        # 0 == len(data) == max_len
-        msg = attributes._validate_string("", 0)
-        self.assertIsNone(msg)
-
-        # 0 == len(data) < max_len
-        msg = attributes._validate_string("", 9)
-        self.assertIsNone(msg)
-
-        # 0 < len(data) < max_len
-        msg = attributes._validate_string("123456789", 10)
-        self.assertIsNone(msg)
-
-        # 0 < len(data) == max_len
-        msg = attributes._validate_string("123456789", 9)
-        self.assertIsNone(msg)
-
-        # 0 < max_len < len(data)
-        msg = attributes._validate_string("1234567890", 9)
-        self.assertEqual("'1234567890' exceeds maximum length of 9", msg)
-
-        msg = attributes._validate_string("123456789", None)
-        self.assertIsNone(msg)
-
-    def test_validate_list_of_unique_strings(self):
-        data = "TEST"
-        msg = attributes.validate_list_of_unique_strings(data, None)
-        self.assertEqual("'TEST' is not a list", msg)
-
-        data = ["TEST01", "TEST02", "TEST01"]
-        msg = attributes.validate_list_of_unique_strings(data, None)
-        self.assertEqual(
-            "Duplicate items in the list: 'TEST01, TEST02, TEST01'", msg)
-
-        data = ["12345678", "123456789"]
-        msg = attributes.validate_list_of_unique_strings(data, 8)
-        self.assertEqual("'123456789' exceeds maximum length of 8", msg)
-
-        data = ["TEST01", "TEST02", "TEST03"]
-        msg = attributes.validate_list_of_unique_strings(data, None)
-        self.assertIsNone(msg)
-
-    def test_validate_no_whitespace(self):
-        data = 'no_white_space'
-        result = attributes._validate_no_whitespace(data)
-        self.assertEqual(data, result)
-
-        self.assertRaises(n_exc.InvalidInput,
-                          attributes._validate_no_whitespace,
-                          'i have whitespace')
-
-        self.assertRaises(n_exc.InvalidInput,
-                          attributes._validate_no_whitespace,
-                          'i\thave\twhitespace')
-
-        for ws in string.whitespace:
-            self.assertRaises(n_exc.InvalidInput,
-                              attributes._validate_no_whitespace,
-                              '%swhitespace-at-head' % ws)
-            self.assertRaises(n_exc.InvalidInput,
-                              attributes._validate_no_whitespace,
-                              'whitespace-at-tail%s' % ws)
-
-    def test_validate_range(self):
-        msg = attributes._validate_range(1, [1, 9])
-        self.assertIsNone(msg)
-
-        msg = attributes._validate_range(5, [1, 9])
-        self.assertIsNone(msg)
-
-        msg = attributes._validate_range(9, [1, 9])
-        self.assertIsNone(msg)
-
-        msg = attributes._validate_range(1, (1, 9))
-        self.assertIsNone(msg)
-
-        msg = attributes._validate_range(5, (1, 9))
-        self.assertIsNone(msg)
-
-        msg = attributes._validate_range(9, (1, 9))
-        self.assertIsNone(msg)
-
-        msg = attributes._validate_range(0, [1, 9])
-        self.assertEqual("'0' is too small - must be at least '1'", msg)
-
-        msg = attributes._validate_range(10, (1, 9))
-        self.assertEqual("'10' is too large - must be no larger than '9'", msg)
-
-        msg = attributes._validate_range("bogus", (1, 9))
-        self.assertEqual("'bogus' is not an integer", msg)
-
-        msg = attributes._validate_range(10, (attributes.UNLIMITED,
-                                              attributes.UNLIMITED))
-        self.assertIsNone(msg)
-
-        msg = attributes._validate_range(10, (1, attributes.UNLIMITED))
-        self.assertIsNone(msg)
-
-        msg = attributes._validate_range(1, (attributes.UNLIMITED, 9))
-        self.assertIsNone(msg)
-
-        msg = attributes._validate_range(-1, (0, attributes.UNLIMITED))
-        self.assertEqual("'-1' is too small - must be at least '0'", msg)
-
-        msg = attributes._validate_range(10, (attributes.UNLIMITED, 9))
-        self.assertEqual("'10' is too large - must be no larger than '9'", msg)
-
-    def _test_validate_mac_address(self, validator, allow_none=False):
-        mac_addr = "ff:16:3e:4f:00:00"
-        msg = validator(mac_addr)
-        self.assertIsNone(msg)
-
-        mac_addr = "ffa:16:3e:4f:00:00"
-        msg = validator(mac_addr)
-        err_msg = "'%s' is not a valid MAC address"
-        self.assertEqual(err_msg % mac_addr, msg)
-
-        for invalid_mac_addr in constants.INVALID_MAC_ADDRESSES:
-            msg = validator(invalid_mac_addr)
-            self.assertEqual(err_msg % invalid_mac_addr, msg)
-
-        mac_addr = "123"
-        msg = validator(mac_addr)
-        self.assertEqual(err_msg % mac_addr, msg)
-
-        mac_addr = None
-        msg = validator(mac_addr)
-        if allow_none:
-            self.assertIsNone(msg)
-        else:
-            self.assertEqual(err_msg % mac_addr, msg)
-
-        mac_addr = "ff:16:3e:4f:00:00\r"
-        msg = validator(mac_addr)
-        self.assertEqual(err_msg % mac_addr, msg)
-
-    def test_validate_mac_address(self):
-        self._test_validate_mac_address(attributes._validate_mac_address)
-
-    def test_validate_mac_address_or_none(self):
-        self._test_validate_mac_address(
-            attributes._validate_mac_address_or_none, allow_none=True)
-
-    def test_validate_ip_address(self):
-        ip_addr = '1.1.1.1'
-        msg = attributes._validate_ip_address(ip_addr)
-        self.assertIsNone(msg)
-
-        ip_addr = '1111.1.1.1'
-        msg = attributes._validate_ip_address(ip_addr)
-        self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg)
-
-        # Depending on platform to run UTs, this case might or might not be
-        # an equivalent to test_validate_ip_address_bsd.
-        ip_addr = '1' * 59
-        msg = attributes._validate_ip_address(ip_addr)
-        self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg)
-
-        ip_addr = '1.1.1.1 has whitespace'
-        msg = attributes._validate_ip_address(ip_addr)
-        self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg)
-
-        ip_addr = '111.1.1.1\twhitespace'
-        msg = attributes._validate_ip_address(ip_addr)
-        self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg)
-
-        ip_addr = '111.1.1.1\nwhitespace'
-        msg = attributes._validate_ip_address(ip_addr)
-        self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg)
-
-        for ws in string.whitespace:
-            ip_addr = '%s111.1.1.1' % ws
-            msg = attributes._validate_ip_address(ip_addr)
-            self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg)
-
-        for ws in string.whitespace:
-            ip_addr = '111.1.1.1%s' % ws
-            msg = attributes._validate_ip_address(ip_addr)
-            self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg)
-
-    def test_validate_ip_address_with_leading_zero(self):
-        ip_addr = '1.1.1.01'
-        expected_msg = ("'%(data)s' is not an accepted IP address, "
-                        "'%(ip)s' is recommended")
-        msg = attributes._validate_ip_address(ip_addr)
-        self.assertEqual(expected_msg % {"data": ip_addr, "ip": '1.1.1.1'},
-                         msg)
-
-        ip_addr = '1.1.1.011'
-        msg = attributes._validate_ip_address(ip_addr)
-        self.assertEqual(expected_msg % {"data": ip_addr, "ip": '1.1.1.11'},
-                         msg)
-
-        ip_addr = '1.1.1.09'
-        msg = attributes._validate_ip_address(ip_addr)
-        self.assertEqual(expected_msg % {"data": ip_addr, "ip": '1.1.1.9'},
-                         msg)
-
-        ip_addr = "fe80:0:0:0:0:0:0:0001"
-        msg = attributes._validate_ip_address(ip_addr)
-        self.assertIsNone(msg)
-
-    def test_validate_ip_address_bsd(self):
-        # NOTE(yamamoto):  On NetBSD and OS X, netaddr.IPAddress() accepts
-        # '1' * 59 as a valid address.  The behaviour is inherited from
-        # libc behaviour there.  This test ensures that our validator reject
-        # such addresses on such platforms by mocking netaddr to emulate
-        # the behaviour.
-        ip_addr = '1' * 59
-        with mock.patch('netaddr.IPAddress') as ip_address_cls:
-            msg = attributes._validate_ip_address(ip_addr)
-        ip_address_cls.assert_called_once_with(ip_addr,
-                                               flags=netaddr.core.ZEROFILL)
-        self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg)
-
-    def test_validate_ip_pools(self):
-        pools = [[{'end': '10.0.0.254'}],
-                 [{'start': '10.0.0.254'}],
-                 [{'start': '1000.0.0.254',
-                   'end': '1.1.1.1'}],
-                 [{'start': '10.0.0.2', 'end': '10.0.0.254',
-                   'forza': 'juve'}],
-                 [{'start': '10.0.0.2', 'end': '10.0.0.254'},
-                  {'end': '10.0.0.254'}],
-                 [None],
-                 None]
-        for pool in pools:
-            msg = attributes._validate_ip_pools(pool)
-            self.assertIsNotNone(msg)
-
-        pools = [[{'end': '10.0.0.254', 'start': '10.0.0.2'},
-                  {'start': '11.0.0.2', 'end': '11.1.1.1'}],
-                 [{'start': '11.0.0.2', 'end': '11.0.0.100'}]]
-        for pool in pools:
-            msg = attributes._validate_ip_pools(pool)
-            self.assertIsNone(msg)
-
-        invalid_ip = '10.0.0.2\r'
-        pools = [[{'end': '10.0.0.254', 'start': invalid_ip}]]
-        for pool in pools:
-            msg = attributes._validate_ip_pools(pool)
-            self.assertEqual("'%s' is not a valid IP address" % invalid_ip,
-                             msg)
-
-    def test_validate_fixed_ips(self):
-        fixed_ips = [
-            {'data': [{'subnet_id': '00000000-ffff-ffff-ffff-000000000000',
-                       'ip_address': '1111.1.1.1'}],
-             'error_msg': "'1111.1.1.1' is not a valid IP address"},
-            {'data': [{'subnet_id': 'invalid',
-                       'ip_address': '1.1.1.1'}],
-             'error_msg': "'invalid' is not a valid UUID"},
-            {'data': None,
-             'error_msg': "Invalid data format for fixed IP: 'None'"},
-            {'data': "1.1.1.1",
-             'error_msg': "Invalid data format for fixed IP: '1.1.1.1'"},
-            {'data': ['00000000-ffff-ffff-ffff-000000000000', '1.1.1.1'],
-             'error_msg': "Invalid data format for fixed IP: "
-                          "'00000000-ffff-ffff-ffff-000000000000'"},
-            {'data': [['00000000-ffff-ffff-ffff-000000000000', '1.1.1.1']],
-             'error_msg': "Invalid data format for fixed IP: "
-                          "'['00000000-ffff-ffff-ffff-000000000000', "
-                          "'1.1.1.1']'"},
-            {'data': [{'subnet_id': '00000000-0fff-ffff-ffff-000000000000',
-                       'ip_address': '1.1.1.1'},
-                      {'subnet_id': '00000000-ffff-ffff-ffff-000000000000',
-                       'ip_address': '1.1.1.1'}],
-             'error_msg': "Duplicate IP address '1.1.1.1'"}]
-        for fixed in fixed_ips:
-            msg = attributes._validate_fixed_ips(fixed['data'])
-            self.assertEqual(fixed['error_msg'], msg)
-
-        fixed_ips = [[{'subnet_id': '00000000-ffff-ffff-ffff-000000000000',
-                       'ip_address': '1.1.1.1'}],
-                     [{'subnet_id': '00000000-0fff-ffff-ffff-000000000000',
-                       'ip_address': '1.1.1.1'},
-                      {'subnet_id': '00000000-ffff-ffff-ffff-000000000000',
-                       'ip_address': '1.1.1.2'}]]
-        for fixed in fixed_ips:
-            msg = attributes._validate_fixed_ips(fixed)
-            self.assertIsNone(msg)
-
-    def test_validate_nameservers(self):
-        ns_pools = [['1.1.1.2', '1.1.1.2'],
-                    ['www.hostname.com', 'www.hostname.com'],
-                    ['1000.0.0.1'],
-                    ['www.hostname.com'],
-                    ['www.great.marathons.to.travel'],
-                    ['valid'],
-                    ['77.hostname.com'],
-                    ['1' * 59],
-                    ['www.internal.hostname.com'],
-                    None]
-
-        for ns in ns_pools:
-            msg = attributes._validate_nameservers(ns, None)
-            self.assertIsNotNone(msg)
-
-        ns_pools = [['100.0.0.2'],
-                    ['1.1.1.1', '1.1.1.2']]
-
-        for ns in ns_pools:
-            msg = attributes._validate_nameservers(ns, None)
-            self.assertIsNone(msg)
-
-    def test_validate_hostroutes(self):
-        hostroute_pools = [[{'destination': '100.0.0.0/24'}],
-                           [{'nexthop': '10.0.2.20'}],
-                           [{'nexthop': '10.0.2.20',
-                             'forza': 'juve',
-                             'destination': '100.0.0.0/8'}],
-                           [{'nexthop': '1110.0.2.20',
-                             'destination': '100.0.0.0/8'}],
-                           [{'nexthop': '10.0.2.20',
-                             'destination': '100.0.0.0'}],
-                           [{'nexthop': '10.0.2.20',
-                             'destination': '100.0.0.0/8'},
-                            {'nexthop': '10.0.2.20',
-                             'destination': '100.0.0.0/8'}],
-                           [None],
-                           None]
-        for host_routes in hostroute_pools:
-            msg = attributes._validate_hostroutes(host_routes, None)
-            self.assertIsNotNone(msg)
-
-        hostroute_pools = [[{'destination': '100.0.0.0/24',
-                             'nexthop': '10.0.2.20'}],
-                           [{'nexthop': '10.0.2.20',
-                             'destination': '100.0.0.0/8'},
-                            {'nexthop': '10.0.2.20',
-                             'destination': '101.0.0.0/8'}]]
-        for host_routes in hostroute_pools:
-            msg = attributes._validate_hostroutes(host_routes, None)
-            self.assertIsNone(msg)
-
-    def test_validate_ip_address_or_none(self):
-        ip_addr = None
-        msg = attributes._validate_ip_address_or_none(ip_addr)
-        self.assertIsNone(msg)
-
-        ip_addr = '1.1.1.1'
-        msg = attributes._validate_ip_address_or_none(ip_addr)
-        self.assertIsNone(msg)
-
-        ip_addr = '1111.1.1.1'
-        msg = attributes._validate_ip_address_or_none(ip_addr)
-        self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg)
-
-    def test_uuid_pattern(self):
-        data = 'garbage'
-        msg = attributes._validate_regex(data, attributes.UUID_PATTERN)
-        self.assertIsNotNone(msg)
-
-        data = '00000000-ffff-ffff-ffff-000000000000'
-        msg = attributes._validate_regex(data, attributes.UUID_PATTERN)
-        self.assertIsNone(msg)
-
-    def test_mac_pattern(self):
-        # Valid - 3 octets
-        base_mac = "fa:16:3e:00:00:00"
-        msg = attributes._validate_regex(base_mac,
-                                         attributes.MAC_PATTERN)
-        self.assertIsNone(msg)
-
-        # Valid - 4 octets
-        base_mac = "fa:16:3e:4f:00:00"
-        msg = attributes._validate_regex(base_mac,
-                                         attributes.MAC_PATTERN)
-        self.assertIsNone(msg)
-
-        # Invalid - not unicast
-        base_mac = "01:16:3e:4f:00:00"
-        msg = attributes._validate_regex(base_mac,
-                                         attributes.MAC_PATTERN)
-        self.assertIsNotNone(msg)
-
-        # Invalid - invalid format
-        base_mac = "a:16:3e:4f:00:00"
-        msg = attributes._validate_regex(base_mac,
-                                         attributes.MAC_PATTERN)
-        self.assertIsNotNone(msg)
-
-        # Invalid - invalid format
-        base_mac = "ffa:16:3e:4f:00:00"
-        msg = attributes._validate_regex(base_mac,
-                                         attributes.MAC_PATTERN)
-        self.assertIsNotNone(msg)
-
-        # Invalid - invalid format
-        base_mac = "01163e4f0000"
-        msg = attributes._validate_regex(base_mac,
-                                         attributes.MAC_PATTERN)
-        self.assertIsNotNone(msg)
-
-        # Invalid - invalid format
-        base_mac = "01-16-3e-4f-00-00"
-        msg = attributes._validate_regex(base_mac,
-                                         attributes.MAC_PATTERN)
-        self.assertIsNotNone(msg)
-
-        # Invalid - invalid format
-        base_mac = "00:16:3:f:00:00"
-        msg = attributes._validate_regex(base_mac,
-                                         attributes.MAC_PATTERN)
-        self.assertIsNotNone(msg)
-
-        # Invalid - invalid format
-        base_mac = "12:3:4:5:67:89ab"
-        msg = attributes._validate_regex(base_mac,
-                                         attributes.MAC_PATTERN)
-        self.assertIsNotNone(msg)
-
-    def _test_validate_subnet(self, validator, allow_none=False):
-        # Valid - IPv4
-        cidr = "10.0.2.0/24"
-        msg = validator(cidr, None)
-        self.assertIsNone(msg)
-
-        # Valid - IPv6 without final octets
-        cidr = "fe80::/24"
-        msg = validator(cidr, None)
-        self.assertIsNone(msg)
-
-        # Valid - IPv6 with final octets
-        cidr = "fe80::/24"
-        msg = validator(cidr, None)
-        self.assertIsNone(msg)
-
-        # Valid - uncompressed ipv6 address
-        cidr = "fe80:0:0:0:0:0:0:0/128"
-        msg = validator(cidr, None)
-        self.assertIsNone(msg)
-
-        # Valid - ipv6 address with multiple consecutive zero
-        cidr = "2001:0db8:0:0:1::1/128"
-        msg = validator(cidr, None)
-        self.assertIsNone(msg)
-
-        # Valid - ipv6 address with multiple consecutive zero
-        cidr = "2001:0db8::1:0:0:1/128"
-        msg = validator(cidr, None)
-        self.assertIsNone(msg)
-
-        # Valid - ipv6 address with multiple consecutive zero
-        cidr = "2001::0:1:0:0:1100/120"
-        msg = validator(cidr, None)
-        self.assertIsNone(msg)
-
-        # Invalid - abbreviated ipv4 address
-        cidr = "10/24"
-        msg = validator(cidr, None)
-        error = _("'%(data)s' isn't a recognized IP subnet cidr,"
-                  " '%(cidr)s' is recommended") % {"data": cidr,
-                                                   "cidr": "10.0.0.0/24"}
-        self.assertEqual(error, msg)
-
-        # Invalid - IPv4 missing mask
-        cidr = "10.0.2.0"
-        msg = validator(cidr, None)
-        error = _("'%(data)s' isn't a recognized IP subnet cidr,"
-                  " '%(cidr)s' is recommended") % {"data": cidr,
-                                                   "cidr": "10.0.2.0/32"}
-        self.assertEqual(error, msg)
-
-        # Valid - IPv4 with non-zero masked bits is ok
-        for i in range(1, 255):
-            cidr = "192.168.1.%s/24" % i
-            msg = validator(cidr, None)
-            self.assertIsNone(msg)
-
-        # Invalid - IPv6 without final octets, missing mask
-        cidr = "fe80::"
-        msg = validator(cidr, None)
-        error = _("'%(data)s' isn't a recognized IP subnet cidr,"
-                  " '%(cidr)s' is recommended") % {"data": cidr,
-                                                   "cidr": "fe80::/128"}
-        self.assertEqual(error, msg)
-
-        # Invalid - IPv6 with final octets, missing mask
-        cidr = "fe80::0"
-        msg = validator(cidr, None)
-        error = _("'%(data)s' isn't a recognized IP subnet cidr,"
-                  " '%(cidr)s' is recommended") % {"data": cidr,
-                                                   "cidr": "fe80::/128"}
-        self.assertEqual(error, msg)
-
-        # Invalid - Address format error
-        cidr = 'invalid'
-        msg = validator(cidr, None)
-        error = "'%s' is not a valid IP subnet" % cidr
-        self.assertEqual(error, msg)
-
-        cidr = None
-        msg = validator(cidr, None)
-        if allow_none:
-            self.assertIsNone(msg)
-        else:
-            error = "'%s' is not a valid IP subnet" % cidr
-            self.assertEqual(error, msg)
-
-        # Invalid - IPv4 with trailing CR
-        cidr = "10.0.2.0/24\r"
-        msg = validator(cidr, None)
-        error = "'%s' is not a valid IP subnet" % cidr
-        self.assertEqual(error, msg)
-
-    def test_validate_subnet(self):
-        self._test_validate_subnet(attributes._validate_subnet)
-
-    def test_validate_subnet_or_none(self):
-        self._test_validate_subnet(attributes._validate_subnet_or_none,
-                                   allow_none=True)
-
-    def _test_validate_regex(self, validator, allow_none=False):
-        pattern = '[hc]at'
-
-        data = None
-        msg = validator(data, pattern)
-        if allow_none:
-            self.assertIsNone(msg)
-        else:
-            self.assertEqual("'None' is not a valid input", msg)
-
-        data = 'bat'
-        msg = validator(data, pattern)
-        self.assertEqual("'%s' is not a valid input" % data, msg)
-
-        data = 'hat'
-        msg = validator(data, pattern)
-        self.assertIsNone(msg)
-
-        data = 'cat'
-        msg = validator(data, pattern)
-        self.assertIsNone(msg)
-
-    def test_validate_regex(self):
-        self._test_validate_regex(attributes._validate_regex)
-
-    def test_validate_regex_or_none(self):
-        self._test_validate_regex(attributes._validate_regex_or_none,
-                                  allow_none=True)
-
-    def test_validate_uuid(self):
-        invalid_uuids = [None,
-                         123,
-                         '123',
-                         't5069610-744b-42a7-8bd8-ceac1a229cd4',
-                         'e5069610-744bb-42a7-8bd8-ceac1a229cd4']
-        for uuid in invalid_uuids:
-            msg = attributes._validate_uuid(uuid)
-            error = "'%s' is not a valid UUID" % uuid
-            self.assertEqual(error, msg)
-
-        msg = attributes._validate_uuid('00000000-ffff-ffff-ffff-000000000000')
-        self.assertIsNone(msg)
-
-    def test__validate_list_of_items(self):
-        # check not a list
-        items = [None,
-                 123,
-                 'e5069610-744b-42a7-8bd8-ceac1a229cd4',
-                 '12345678123456781234567812345678',
-                 {'uuid': 'e5069610-744b-42a7-8bd8-ceac1a229cd4'}]
-        for item in items:
-            msg = attributes._validate_list_of_items(mock.Mock(), item)
-            error = "'%s' is not a list" % item
-            self.assertEqual(error, msg)
-
-        # check duplicate items in a list
-        duplicate_items = ['e5069610-744b-42a7-8bd8-ceac1a229cd4',
-                           'f3eeab00-8367-4524-b662-55e64d4cacb5',
-                           'e5069610-744b-42a7-8bd8-ceac1a229cd4']
-        msg = attributes._validate_list_of_items(mock.Mock(), duplicate_items)
-        error = ("Duplicate items in the list: "
-                 "'%s'" % ', '.join(duplicate_items))
-        self.assertEqual(error, msg)
-
-        # check valid lists
-        valid_lists = [[],
-                       [1, 2, 3],
-                       ['a', 'b', 'c']]
-        for list_obj in valid_lists:
-            msg = attributes._validate_list_of_items(
-                mock.Mock(return_value=None), list_obj)
-            self.assertIsNone(msg)
-
-    def test_validate_dict_type(self):
-        for value in (None, True, '1', []):
-            self.assertEqual("'%s' is not a dictionary" % value,
-                             attributes._validate_dict(value))
-
-    def test_validate_dict_without_constraints(self):
-        msg = attributes._validate_dict({})
-        self.assertIsNone(msg)
-
-        # Validate a dictionary without constraints.
-        msg = attributes._validate_dict({'key': 'value'})
-        self.assertIsNone(msg)
-
-    def test_validate_a_valid_dict_with_constraints(self):
-        dictionary, constraints = self._construct_dict_and_constraints()
-
-        msg = attributes._validate_dict(dictionary, constraints)
-        self.assertIsNone(msg, 'Validation of a valid dictionary failed.')
-
-    def test_validate_dict_with_invalid_validator(self):
-        dictionary, constraints = self._construct_dict_and_constraints()
-
-        constraints['key1'] = {'type:unsupported': None, 'required': True}
-        msg = attributes._validate_dict(dictionary, constraints)
-        self.assertEqual("Validator 'type:unsupported' does not exist.", msg)
-
-    def test_validate_dict_not_required_keys(self):
-        dictionary, constraints = self._construct_dict_and_constraints()
-
-        del dictionary['key2']
-        msg = attributes._validate_dict(dictionary, constraints)
-        self.assertIsNone(msg, 'Field that was not required by the specs was'
-                               'required by the validator.')
-
-    def test_validate_dict_required_keys(self):
-        dictionary, constraints = self._construct_dict_and_constraints()
-
-        del dictionary['key1']
-        msg = attributes._validate_dict(dictionary, constraints)
-        self.assertIn('Expected keys:', msg)
-
-    def test_validate_dict_wrong_values(self):
-        dictionary, constraints = self._construct_dict_and_constraints()
-
-        dictionary['key1'] = 'UNSUPPORTED'
-        msg = attributes._validate_dict(dictionary, constraints)
-        self.assertIsNotNone(msg)
-
-    def test_validate_dict_convert_boolean(self):
-        dictionary, constraints = self._construct_dict_and_constraints()
-
-        constraints['key_bool'] = {
-            'type:boolean': None,
-            'required': False,
-            'convert_to': attributes.convert_to_boolean}
-        dictionary['key_bool'] = 'true'
-        msg = attributes._validate_dict(dictionary, constraints)
-        self.assertIsNone(msg)
-        # Explicitly comparing with literal 'True' as assertTrue
-        # succeeds also for 'true'
-        self.assertIs(True, dictionary['key_bool'])
-
-    def test_subdictionary(self):
-        dictionary, constraints = self._construct_dict_and_constraints()
-
-        del dictionary['key3']['k4']
-        dictionary['key3']['k5'] = 'a string value'
-        msg = attributes._validate_dict(dictionary, constraints)
-        self.assertIn('Expected keys:', msg)
-
-    def test_validate_dict_or_none(self):
-        dictionary, constraints = self._construct_dict_and_constraints()
-
-        # Check whether None is a valid value.
-        msg = attributes._validate_dict_or_none(None, constraints)
-        self.assertIsNone(msg, 'Validation of a None dictionary failed.')
-
-        # Check validation of a regular dictionary.
-        msg = attributes._validate_dict_or_none(dictionary, constraints)
-        self.assertIsNone(msg, 'Validation of a valid dictionary failed.')
-
-    def test_validate_dict_or_empty(self):
-        dictionary, constraints = self._construct_dict_and_constraints()
-
-        # Check whether an empty dictionary is valid.
-        msg = attributes._validate_dict_or_empty({}, constraints)
-        self.assertIsNone(msg, 'Validation of a None dictionary failed.')
-
-        # Check validation of a regular dictionary.
-        msg = attributes._validate_dict_or_none(dictionary, constraints)
-        self.assertIsNone(msg, 'Validation of a valid dictionary failed.')
-        self.assertIsNone(msg, 'Validation of a valid dictionary failed.')
-
-    def test_validate_non_negative(self):
-        for value in (-1, '-2'):
-            self.assertEqual("'%s' should be non-negative" % value,
-                             attributes._validate_non_negative(value))
-
-        for value in (0, 1, '2', True, False):
-            msg = attributes._validate_non_negative(value)
-            self.assertIsNone(msg)
-
-
-class TestConvertToBoolean(base.BaseTestCase):
-
-    def test_convert_to_boolean_bool(self):
-        self.assertIs(attributes.convert_to_boolean(True), True)
-        self.assertIs(attributes.convert_to_boolean(False), False)
-
-    def test_convert_to_boolean_int(self):
-        self.assertIs(attributes.convert_to_boolean(0), False)
-        self.assertIs(attributes.convert_to_boolean(1), True)
-        self.assertRaises(n_exc.InvalidInput,
-                          attributes.convert_to_boolean,
-                          7)
-
-    def test_convert_to_boolean_str(self):
-        self.assertIs(attributes.convert_to_boolean('True'), True)
-        self.assertIs(attributes.convert_to_boolean('true'), True)
-        self.assertIs(attributes.convert_to_boolean('False'), False)
-        self.assertIs(attributes.convert_to_boolean('false'), False)
-        self.assertIs(attributes.convert_to_boolean('0'), False)
-        self.assertIs(attributes.convert_to_boolean('1'), True)
-        self.assertRaises(n_exc.InvalidInput,
-                          attributes.convert_to_boolean,
-                          '7')
-
-
-class TestConvertToInt(base.BaseTestCase):
-
-    def test_convert_to_int_int(self):
-        self.assertEqual(-1, attributes.convert_to_int(-1))
-        self.assertEqual(0, attributes.convert_to_int(0))
-        self.assertEqual(1, attributes.convert_to_int(1))
-
-    def test_convert_to_int_if_not_none(self):
-        self.assertEqual(-1, attributes.convert_to_int_if_not_none(-1))
-        self.assertEqual(0, attributes.convert_to_int_if_not_none(0))
-        self.assertEqual(1, attributes.convert_to_int_if_not_none(1))
-        self.assertIsNone(attributes.convert_to_int_if_not_none(None))
-
-    def test_convert_to_int_str(self):
-        self.assertEqual(4, attributes.convert_to_int('4'))
-        self.assertEqual(6, attributes.convert_to_int('6'))
-        self.assertRaises(n_exc.InvalidInput,
-                          attributes.convert_to_int,
-                          'garbage')
-
-    def test_convert_to_int_none(self):
-        self.assertRaises(n_exc.InvalidInput,
-                          attributes.convert_to_int,
-                          None)
-
-    def test_convert_none_to_empty_list_none(self):
-        self.assertEqual([], attributes.convert_none_to_empty_list(None))
-
-    def test_convert_none_to_empty_dict(self):
-        self.assertEqual({}, attributes.convert_none_to_empty_dict(None))
-
-    def test_convert_none_to_empty_list_value(self):
-        values = ['1', 3, [], [1], {}, {'a': 3}]
-        for value in values:
-            self.assertEqual(
-                value, attributes.convert_none_to_empty_list(value))
-
-
-class TestConvertToFloat(base.BaseTestCase):
-    # NOTE: the routine being tested here is a plugin-specific extension
-    # module. As the plugin split proceed towards its second phase this
-    # test should either be remove, or the validation routine moved into
-    # neutron.api.v2.attributes
-
-    def test_convert_to_float_positve_value(self):
-        self.assertEqual(
-            1.111, attributes.convert_to_positive_float_or_none(1.111))
-        self.assertEqual(1, attributes.convert_to_positive_float_or_none(1))
-        self.assertEqual(0, attributes.convert_to_positive_float_or_none(0))
-
-    def test_convert_to_float_negative_value(self):
-        self.assertRaises(n_exc.InvalidInput,
-                          attributes.convert_to_positive_float_or_none,
-                          -1.11)
-
-    def test_convert_to_float_string(self):
-        self.assertEqual(4, attributes.convert_to_positive_float_or_none('4'))
-        self.assertEqual(
-            4.44, attributes.convert_to_positive_float_or_none('4.44'))
-        self.assertRaises(n_exc.InvalidInput,
-                          attributes.convert_to_positive_float_or_none,
-                          'garbage')
-
-    def test_convert_to_float_none_value(self):
-        self.assertIsNone(attributes.convert_to_positive_float_or_none(None))
-
-
-class TestConvertKvp(base.BaseTestCase):
-
-    def test_convert_kvp_list_to_dict_succeeds_for_missing_values(self):
-        result = attributes.convert_kvp_list_to_dict(['True'])
-        self.assertEqual({}, result)
-
-    def test_convert_kvp_list_to_dict_succeeds_for_multiple_values(self):
-        result = attributes.convert_kvp_list_to_dict(
-            ['a=b', 'a=c', 'a=c', 'b=a'])
-        expected = {'a': tools.UnorderedList(['c', 'b']), 'b': ['a']}
-        self.assertEqual(expected, result)
-
-    def test_convert_kvp_list_to_dict_succeeds_for_values(self):
-        result = attributes.convert_kvp_list_to_dict(['a=b', 'c=d'])
-        self.assertEqual({'a': ['b'], 'c': ['d']}, result)
-
-    def test_convert_kvp_str_to_list_fails_for_missing_key(self):
-        with testtools.ExpectedException(n_exc.InvalidInput):
-            attributes.convert_kvp_str_to_list('=a')
-
-    def test_convert_kvp_str_to_list_fails_for_missing_equals(self):
-        with testtools.ExpectedException(n_exc.InvalidInput):
-            attributes.convert_kvp_str_to_list('a')
-
-    def test_convert_kvp_str_to_list_succeeds_for_one_equals(self):
-        result = attributes.convert_kvp_str_to_list('a=')
-        self.assertEqual(['a', ''], result)
-
-    def test_convert_kvp_str_to_list_succeeds_for_two_equals(self):
-        result = attributes.convert_kvp_str_to_list('a=a=a')
-        self.assertEqual(['a', 'a=a'], result)
-
-
-class TestConvertToList(base.BaseTestCase):
-
-    def test_convert_to_empty_list(self):
-        for item in (None, [], (), {}):
-            self.assertEqual([], attributes.convert_to_list(item))
-
-    def test_convert_to_list_string(self):
-        for item in ('', 'foo'):
-            self.assertEqual([item], attributes.convert_to_list(item))
-
-    def test_convert_to_list_iterable(self):
-        for item in ([None], [1, 2, 3], (1, 2, 3), set([1, 2, 3]), ['foo']):
-            self.assertEqual(list(item), attributes.convert_to_list(item))
-
-    def test_convert_to_list_non_iterable(self):
-        for item in (True, False, 1, 1.2, object()):
-            self.assertEqual([item], attributes.convert_to_list(item))
-
-
-class TestResDict(base.BaseTestCase):
-    class _MyException(Exception):
-        pass
-    _EXC_CLS = _MyException
-
-    def _test_fill_default_value(self, attr_info, expected, res_dict):
-        attributes.fill_default_value(attr_info, res_dict)
-        self.assertEqual(expected, res_dict)
-
-    def test_fill_default_value(self):
-        attr_info = {
-            'key': {
-                'allow_post': True,
-                'default': attributes.ATTR_NOT_SPECIFIED,
-            },
-        }
-        self._test_fill_default_value(attr_info, {'key': 'X'}, {'key': 'X'})
-        self._test_fill_default_value(
-            attr_info, {'key': attributes.ATTR_NOT_SPECIFIED}, {})
-
-        attr_info = {
-            'key': {
-                'allow_post': True,
-            },
-        }
-        self._test_fill_default_value(attr_info, {'key': 'X'}, {'key': 'X'})
-        self.assertRaises(ValueError, self._test_fill_default_value,
-                          attr_info, {'key': 'X'}, {})
-        self.assertRaises(self._EXC_CLS, attributes.fill_default_value,
-                          attr_info, {}, self._EXC_CLS)
-        attr_info = {
-            'key': {
-                'allow_post': False,
-            },
-        }
-        self.assertRaises(ValueError, self._test_fill_default_value,
-                          attr_info, {'key': 'X'}, {'key': 'X'})
-        self._test_fill_default_value(attr_info, {}, {})
-        self.assertRaises(self._EXC_CLS, attributes.fill_default_value,
-                          attr_info, {'key': 'X'}, self._EXC_CLS)
-
-    def _test_convert_value(self, attr_info, expected, res_dict):
-        attributes.convert_value(attr_info, res_dict)
-        self.assertEqual(expected, res_dict)
-
-    def test_convert_value(self):
-        attr_info = {
-            'key': {
-            },
-        }
-        self._test_convert_value(attr_info,
-                                 {'key': attributes.ATTR_NOT_SPECIFIED},
-                                 {'key': attributes.ATTR_NOT_SPECIFIED})
-        self._test_convert_value(attr_info, {'key': 'X'}, {'key': 'X'})
-        self._test_convert_value(attr_info,
-                                 {'other_key': 'X'}, {'other_key': 'X'})
-
-        attr_info = {
-            'key': {
-                'convert_to': attributes.convert_to_int,
-            },
-        }
-        self._test_convert_value(attr_info,
-                                 {'key': attributes.ATTR_NOT_SPECIFIED},
-                                 {'key': attributes.ATTR_NOT_SPECIFIED})
-        self._test_convert_value(attr_info, {'key': 1}, {'key': '1'})
-        self._test_convert_value(attr_info, {'key': 1}, {'key': 1})
-        self.assertRaises(n_exc.InvalidInput, self._test_convert_value,
-                          attr_info, {'key': 1}, {'key': 'a'})
-
-        attr_info = {
-            'key': {
-                'validate': {'type:uuid': None},
-            },
-        }
-        self._test_convert_value(attr_info,
-                                 {'key': attributes.ATTR_NOT_SPECIFIED},
-                                 {'key': attributes.ATTR_NOT_SPECIFIED})
-        uuid_str = '01234567-1234-1234-1234-1234567890ab'
-        self._test_convert_value(attr_info,
-                                 {'key': uuid_str}, {'key': uuid_str})
-        self.assertRaises(ValueError, self._test_convert_value,
-                          attr_info, {'key': 1}, {'key': 1})
-        self.assertRaises(self._EXC_CLS, attributes.convert_value,
-                          attr_info, {'key': 1}, self._EXC_CLS)
-
-    def test_populate_tenant_id(self):
-        tenant_id_1 = uuidutils.generate_uuid()
-        tenant_id_2 = uuidutils.generate_uuid()
-        # apart from the admin, nobody can create a res on behalf of another
-        # tenant
-        ctx = context.Context(user_id=None, tenant_id=tenant_id_1)
-        res_dict = {'tenant_id': tenant_id_2}
-        self.assertRaises(webob.exc.HTTPBadRequest,
-                          attributes.populate_tenant_id,
-                          ctx, res_dict, None, None)
-        ctx.is_admin = True
-        self.assertIsNone(attributes.populate_tenant_id(ctx, res_dict,
-                                                        None, None))
-
-        # for each create request, the tenant_id should be added to the
-        # req body
-        res_dict2 = {}
-        attributes.populate_tenant_id(ctx, res_dict2, None, True)
-        self.assertEqual({'tenant_id': ctx.tenant_id}, res_dict2)
-
-        # if the tenant_id is mandatory for the resource and not specified
-        # in the request nor in the context, an exception should be raised
-        res_dict3 = {}
-        attr_info = {'tenant_id': {'allow_post': True}, }
-        ctx.tenant_id = None
-        self.assertRaises(webob.exc.HTTPBadRequest,
-                          attributes.populate_tenant_id,
-                          ctx, res_dict3, attr_info, True)
-
-
-class TestHelpers(base.DietTestCase):
-
-    def _verify_port_attributes(self, attrs):
-        for test_attribute in ('id', 'name', 'mac_address', 'network_id',
-                               'tenant_id', 'fixed_ips', 'status'):
-            self.assertIn(test_attribute, attrs)
-
-    def test_get_collection_info(self):
-        attrs = attributes.get_collection_info('ports')
-        self._verify_port_attributes(attrs)
-
-    def test_get_collection_info_missing(self):
-        self.assertFalse(attributes.get_collection_info('meh'))
-
-    def test_get_resource_info(self):
-        attributes.REVERSED_PLURALS.pop('port', None)
-        attrs = attributes.get_resource_info('port')
-        self._verify_port_attributes(attrs)
-        # verify side effect
-        self.assertIn('port', attributes.REVERSED_PLURALS)
-
-    def test_get_resource_info_missing(self):
-        self.assertFalse(attributes.get_resource_info('meh'))
-
-    def test_get_resource_info_cached(self):
-        with mock.patch('neutron.api.v2.attributes.PLURALS') as mock_plurals:
-            attributes.REVERSED_PLURALS['port'] = 'ports'
-            attrs = attributes.get_resource_info('port')
-            self._verify_port_attributes(attrs)
-        self.assertEqual(0, mock_plurals.items.call_count)
diff --git a/neutron/tests/unit/api/v2/test_base.py b/neutron/tests/unit/api/v2/test_base.py
deleted file mode 100644 (file)
index 7e3dbc3..0000000
+++ /dev/null
@@ -1,1599 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-import mock
-from oslo_config import cfg
-from oslo_policy import policy as oslo_policy
-from oslo_utils import uuidutils
-import six
-from six import moves
-import six.moves.urllib.parse as urlparse
-import webob
-from webob import exc
-import webtest
-
-from neutron.api import api_common
-from neutron.api import extensions
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.api.v2 import attributes
-from neutron.api.v2 import base as v2_base
-from neutron.api.v2 import router
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron import manager
-from neutron import policy
-from neutron import quota
-from neutron.quota import resource_registry
-from neutron.tests import base
-from neutron.tests import fake_notifier
-from neutron.tests import tools
-from neutron.tests.unit import testlib_api
-
-
-EXTDIR = os.path.join(base.ROOTDIR, 'unit/extensions')
-
-_uuid = uuidutils.generate_uuid
-
-
-def _get_path(resource, id=None, action=None, fmt=None):
-    path = '/%s' % resource
-
-    if id is not None:
-        path = path + '/%s' % id
-
-    if action is not None:
-        path = path + '/%s' % action
-
-    if fmt is not None:
-        path = path + '.%s' % fmt
-
-    return path
-
-
-class ResourceIndexTestCase(base.BaseTestCase):
-    def test_index_json(self):
-        index = webtest.TestApp(router.Index({'foo': 'bar'}))
-        res = index.get('')
-
-        self.assertIn('resources', res.json)
-        self.assertEqual(len(res.json['resources']), 1)
-
-        resource = res.json['resources'][0]
-        self.assertIn('collection', resource)
-        self.assertEqual(resource['collection'], 'bar')
-
-        self.assertIn('name', resource)
-        self.assertEqual(resource['name'], 'foo')
-
-        self.assertIn('links', resource)
-        self.assertEqual(len(resource['links']), 1)
-
-        link = resource['links'][0]
-        self.assertIn('href', link)
-        self.assertEqual(link['href'], 'http://localhost/bar')
-        self.assertIn('rel', link)
-        self.assertEqual(link['rel'], 'self')
-
-
-class APIv2TestBase(base.BaseTestCase):
-    def setUp(self):
-        super(APIv2TestBase, self).setUp()
-
-        plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
-        # Ensure existing ExtensionManager is not used
-        extensions.PluginAwareExtensionManager._instance = None
-        # Create the default configurations
-        self.config_parse()
-        # Update the plugin
-        self.setup_coreplugin(plugin)
-        cfg.CONF.set_override('allow_pagination', True)
-        cfg.CONF.set_override('allow_sorting', True)
-        self._plugin_patcher = mock.patch(plugin, autospec=True)
-        self.plugin = self._plugin_patcher.start()
-        instance = self.plugin.return_value
-        instance._NeutronPluginBaseV2__native_pagination_support = True
-        instance._NeutronPluginBaseV2__native_sorting_support = True
-
-        api = router.APIRouter()
-        self.api = webtest.TestApp(api)
-
-        quota.QUOTAS._driver = None
-        cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
-                              group='QUOTAS')
-
-        # APIRouter initialization resets policy module, re-initializing it
-        policy.init()
-
-
-class _ArgMatcher(object):
-    """An adapter to assist mock assertions, used to custom compare."""
-
-    def __init__(self, cmp, obj):
-        self.cmp = cmp
-        self.obj = obj
-
-    def __eq__(self, other):
-        return self.cmp(self.obj, other)
-
-
-def _list_cmp(l1, l2):
-    return set(l1) == set(l2)
-
-
-class APIv2TestCase(APIv2TestBase):
-    def _do_field_list(self, resource, base_fields):
-        attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[resource]
-        policy_attrs = [name for (name, info) in attr_info.items()
-                        if info.get('required_by_policy')]
-        for name, info in attr_info.items():
-            if info.get('primary_key'):
-                policy_attrs.append(name)
-        fields = base_fields
-        fields.extend(policy_attrs)
-        return fields
-
-    def _get_collection_kwargs(self, skipargs=None, **kwargs):
-        skipargs = skipargs or []
-        args_list = ['filters', 'fields', 'sorts', 'limit', 'marker',
-                     'page_reverse']
-        args_dict = dict(
-            (arg, mock.ANY) for arg in set(args_list) - set(skipargs))
-        args_dict.update(kwargs)
-        return args_dict
-
-    def test_fields(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'), {'fields': 'foo'})
-        fields = self._do_field_list('networks', ['foo'])
-        kwargs = self._get_collection_kwargs(fields=fields)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_fields_multiple(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        fields = self._do_field_list('networks', ['foo', 'bar'])
-        self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']})
-        kwargs = self._get_collection_kwargs(fields=fields)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_fields_multiple_with_empty(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        fields = self._do_field_list('networks', ['foo'])
-        self.api.get(_get_path('networks'), {'fields': ['foo', '']})
-        kwargs = self._get_collection_kwargs(fields=fields)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_fields_empty(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'), {'fields': ''})
-        kwargs = self._get_collection_kwargs(fields=[])
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_fields_multiple_empty(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'), {'fields': ['', '']})
-        kwargs = self._get_collection_kwargs(fields=[])
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_filters(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'), {'name': 'bar'})
-        filters = {'name': ['bar']}
-        kwargs = self._get_collection_kwargs(filters=filters)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_filters_empty(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'), {'name': ''})
-        filters = {}
-        kwargs = self._get_collection_kwargs(filters=filters)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_filters_multiple_empty(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'), {'name': ['', '']})
-        filters = {}
-        kwargs = self._get_collection_kwargs(filters=filters)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_filters_multiple_with_empty(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'), {'name': ['bar', '']})
-        filters = {'name': ['bar']}
-        kwargs = self._get_collection_kwargs(filters=filters)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_filters_multiple_values(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']})
-        filters = {'name': ['bar', 'bar2']}
-        kwargs = self._get_collection_kwargs(filters=filters)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_filters_multiple(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'), {'name': 'bar',
-                                             'tenant_id': 'bar2'})
-        filters = {'name': ['bar'], 'tenant_id': ['bar2']}
-        kwargs = self._get_collection_kwargs(filters=filters)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_filters_with_fields(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'})
-        filters = {'name': ['bar']}
-        fields = self._do_field_list('networks', ['foo'])
-        kwargs = self._get_collection_kwargs(filters=filters, fields=fields)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_filters_with_convert_to(self):
-        instance = self.plugin.return_value
-        instance.get_ports.return_value = []
-
-        self.api.get(_get_path('ports'), {'admin_state_up': 'true'})
-        filters = {'admin_state_up': [True]}
-        kwargs = self._get_collection_kwargs(filters=filters)
-        instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_filters_with_convert_list_to(self):
-        instance = self.plugin.return_value
-        instance.get_ports.return_value = []
-
-        self.api.get(_get_path('ports'),
-                     {'fixed_ips': ['ip_address=foo', 'subnet_id=bar']})
-        filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}}
-        kwargs = self._get_collection_kwargs(filters=filters)
-        instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_limit(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'),
-                     {'limit': '10'})
-        kwargs = self._get_collection_kwargs(limit=10)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_limit_with_great_than_max_limit(self):
-        cfg.CONF.set_default('pagination_max_limit', '1000')
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'),
-                     {'limit': '1001'})
-        kwargs = self._get_collection_kwargs(limit=1000)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_limit_with_zero(self):
-        cfg.CONF.set_default('pagination_max_limit', '1000')
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'), {'limit': '0'})
-        kwargs = self._get_collection_kwargs(limit=1000)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_limit_with_unspecific(self):
-        cfg.CONF.set_default('pagination_max_limit', '1000')
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'))
-        kwargs = self._get_collection_kwargs(limit=1000)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_limit_with_negative_value(self):
-        cfg.CONF.set_default('pagination_max_limit', '1000')
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        res = self.api.get(_get_path('networks'), {'limit': -1},
-                           expect_errors=True)
-        self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_limit_with_non_integer(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        res = self.api.get(_get_path('networks'),
-                           {'limit': 'abc'}, expect_errors=True)
-        self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_limit_with_infinite_pagination_max_limit(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-        cfg.CONF.set_override('pagination_max_limit', 'Infinite')
-        self.api.get(_get_path('networks'))
-        kwargs = self._get_collection_kwargs(limit=None)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_limit_with_negative_pagination_max_limit(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-        cfg.CONF.set_default('pagination_max_limit', '-1')
-        self.api.get(_get_path('networks'))
-        kwargs = self._get_collection_kwargs(limit=None)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_limit_with_non_integer_pagination_max_limit(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-        cfg.CONF.set_default('pagination_max_limit', 'abc')
-        self.api.get(_get_path('networks'))
-        kwargs = self._get_collection_kwargs(limit=None)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_marker(self):
-        cfg.CONF.set_override('pagination_max_limit', '1000')
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-        marker = _uuid()
-        self.api.get(_get_path('networks'),
-                     {'marker': marker})
-        kwargs = self._get_collection_kwargs(limit=1000, marker=marker)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_page_reverse(self):
-        calls = []
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'),
-                     {'page_reverse': 'True'})
-        kwargs = self._get_collection_kwargs(page_reverse=True)
-        calls.append(mock.call.get_networks(mock.ANY, **kwargs))
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-        instance.get_networks.reset_mock()
-
-        self.api.get(_get_path('networks'),
-                     {'page_reverse': 'False'})
-        kwargs = self._get_collection_kwargs(page_reverse=False)
-        calls.append(mock.call.get_networks(mock.ANY, **kwargs))
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_page_reverse_with_non_bool(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'),
-                     {'page_reverse': 'abc'})
-        kwargs = self._get_collection_kwargs(page_reverse=False)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_page_reverse_with_unspecific(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'))
-        kwargs = self._get_collection_kwargs(page_reverse=False)
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_sort(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'),
-                     {'sort_key': ['name', 'admin_state_up'],
-                      'sort_dir': ['desc', 'asc']})
-        kwargs = self._get_collection_kwargs(sorts=[('name', False),
-                                                    ('admin_state_up', True),
-                                                    ('id', True)])
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_sort_with_primary_key(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        self.api.get(_get_path('networks'),
-                     {'sort_key': ['name', 'admin_state_up', 'id'],
-                      'sort_dir': ['desc', 'asc', 'desc']})
-        kwargs = self._get_collection_kwargs(sorts=[('name', False),
-                                                    ('admin_state_up', True),
-                                                    ('id', False)])
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_sort_without_direction(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        res = self.api.get(_get_path('networks'), {'sort_key': ['name']},
-                           expect_errors=True)
-        self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_sort_with_invalid_attribute(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        res = self.api.get(_get_path('networks'),
-                           {'sort_key': 'abc',
-                            'sort_dir': 'asc'},
-                           expect_errors=True)
-        self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_sort_with_invalid_dirs(self):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-
-        res = self.api.get(_get_path('networks'),
-                           {'sort_key': 'name',
-                            'sort_dir': 'abc'},
-                           expect_errors=True)
-        self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_emulated_sort(self):
-        instance = self.plugin.return_value
-        instance._NeutronPluginBaseV2__native_pagination_support = False
-        instance._NeutronPluginBaseV2__native_sorting_support = False
-        instance.get_networks.return_value = []
-        api = webtest.TestApp(router.APIRouter())
-        api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
-                                        'sort_dir': ['desc', 'asc']})
-        kwargs = self._get_collection_kwargs(
-            skipargs=['sorts', 'limit', 'marker', 'page_reverse'])
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_emulated_sort_without_sort_field(self):
-        instance = self.plugin.return_value
-        instance._NeutronPluginBaseV2__native_pagination_support = False
-        instance._NeutronPluginBaseV2__native_sorting_support = False
-        instance.get_networks.return_value = []
-        api = webtest.TestApp(router.APIRouter())
-        api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
-                                        'sort_dir': ['desc', 'asc'],
-                                        'fields': ['subnets']})
-        kwargs = self._get_collection_kwargs(
-            skipargs=['sorts', 'limit', 'marker', 'page_reverse'],
-            fields=_ArgMatcher(_list_cmp, ['name',
-                                           'status',
-                                           'id',
-                                           'subnets',
-                                           'shared',
-                                           'tenant_id']))
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_emulated_pagination(self):
-        instance = self.plugin.return_value
-        instance._NeutronPluginBaseV2__native_pagination_support = False
-        instance.get_networks.return_value = []
-        api = webtest.TestApp(router.APIRouter())
-        api.get(_get_path('networks'), {'limit': 10,
-                                        'marker': 'foo',
-                                        'page_reverse': False})
-        kwargs = self._get_collection_kwargs(skipargs=['limit',
-                                                       'marker',
-                                                       'page_reverse'])
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-    def test_native_pagination_without_native_sorting(self):
-        instance = self.plugin.return_value
-        instance._NeutronPluginBaseV2__native_sorting_support = False
-        self.assertRaises(n_exc.Invalid, router.APIRouter)
-
-    def test_native_pagination_without_allow_sorting(self):
-        cfg.CONF.set_override('allow_sorting', False)
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = []
-        api = webtest.TestApp(router.APIRouter())
-        api.get(_get_path('networks'),
-                {'sort_key': ['name', 'admin_state_up'],
-                 'sort_dir': ['desc', 'asc']})
-        kwargs = self._get_collection_kwargs(sorts=[('name', False),
-                                                    ('admin_state_up', True),
-                                                    ('id', True)])
-        instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
-
-
-# Note: since all resources use the same controller and validation
-# logic, we actually get really good coverage from testing just networks.
-class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
-
-    def _test_list(self, req_tenant_id, real_tenant_id):
-        env = {}
-        if req_tenant_id:
-            env = {'neutron.context': context.Context('', req_tenant_id)}
-        input_dict = {'id': uuidutils.generate_uuid(),
-                      'name': 'net1',
-                      'admin_state_up': True,
-                      'status': "ACTIVE",
-                      'tenant_id': real_tenant_id,
-                      'shared': False,
-                      'subnets': []}
-        return_value = [input_dict]
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = return_value
-
-        res = self.api.get(_get_path('networks',
-                                     fmt=self.fmt), extra_environ=env)
-        res = self.deserialize(res)
-        self.assertIn('networks', res)
-        if not req_tenant_id or req_tenant_id == real_tenant_id:
-            # expect full list returned
-            self.assertEqual(len(res['networks']), 1)
-            output_dict = res['networks'][0]
-            input_dict['shared'] = False
-            self.assertEqual(len(input_dict), len(output_dict))
-            for k, v in six.iteritems(input_dict):
-                self.assertEqual(v, output_dict[k])
-        else:
-            # expect no results
-            self.assertEqual(len(res['networks']), 0)
-
-    def test_list_noauth(self):
-        self._test_list(None, _uuid())
-
-    def test_list_keystone(self):
-        tenant_id = _uuid()
-        self._test_list(tenant_id, tenant_id)
-
-    def test_list_keystone_bad(self):
-        tenant_id = _uuid()
-        self._test_list(tenant_id + "bad", tenant_id)
-
-    def test_list_pagination(self):
-        id1 = str(_uuid())
-        id2 = str(_uuid())
-        input_dict1 = {'id': id1,
-                       'name': 'net1',
-                       'admin_state_up': True,
-                       'status': "ACTIVE",
-                       'tenant_id': '',
-                       'shared': False,
-                       'subnets': []}
-        input_dict2 = {'id': id2,
-                       'name': 'net2',
-                       'admin_state_up': True,
-                       'status': "ACTIVE",
-                       'tenant_id': '',
-                       'shared': False,
-                       'subnets': []}
-        return_value = [input_dict1, input_dict2]
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = return_value
-        params = {'limit': ['2'],
-                  'marker': [str(_uuid())],
-                  'sort_key': ['name'],
-                  'sort_dir': ['asc']}
-        res = self.api.get(_get_path('networks'),
-                           params=params).json
-
-        self.assertEqual(len(res['networks']), 2)
-        self.assertEqual(sorted([id1, id2]),
-                         sorted([res['networks'][0]['id'],
-                                res['networks'][1]['id']]))
-
-        self.assertIn('networks_links', res)
-        next_links = []
-        previous_links = []
-        for r in res['networks_links']:
-            if r['rel'] == 'next':
-                next_links.append(r)
-            if r['rel'] == 'previous':
-                previous_links.append(r)
-        self.assertEqual(len(next_links), 1)
-        self.assertEqual(len(previous_links), 1)
-
-        url = urlparse.urlparse(next_links[0]['href'])
-        self.assertEqual(url.path, _get_path('networks'))
-        params['marker'] = [id2]
-        self.assertEqual(urlparse.parse_qs(url.query), params)
-
-        url = urlparse.urlparse(previous_links[0]['href'])
-        self.assertEqual(url.path, _get_path('networks'))
-        params['marker'] = [id1]
-        params['page_reverse'] = ['True']
-        self.assertEqual(urlparse.parse_qs(url.query), params)
-
-    def test_list_pagination_with_last_page(self):
-        id = str(_uuid())
-        input_dict = {'id': id,
-                      'name': 'net1',
-                      'admin_state_up': True,
-                      'status': "ACTIVE",
-                      'tenant_id': '',
-                      'shared': False,
-                      'subnets': []}
-        return_value = [input_dict]
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = return_value
-        params = {'limit': ['2'],
-                  'marker': str(_uuid())}
-        res = self.api.get(_get_path('networks'),
-                           params=params).json
-
-        self.assertEqual(len(res['networks']), 1)
-        self.assertEqual(id, res['networks'][0]['id'])
-
-        self.assertIn('networks_links', res)
-        previous_links = []
-        for r in res['networks_links']:
-            self.assertNotEqual(r['rel'], 'next')
-            if r['rel'] == 'previous':
-                previous_links.append(r)
-        self.assertEqual(len(previous_links), 1)
-
-        url = urlparse.urlparse(previous_links[0]['href'])
-        self.assertEqual(url.path, _get_path('networks'))
-        expect_params = params.copy()
-        expect_params['marker'] = [id]
-        expect_params['page_reverse'] = ['True']
-        self.assertEqual(urlparse.parse_qs(url.query), expect_params)
-
-    def test_list_pagination_with_empty_page(self):
-        return_value = []
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = return_value
-        params = {'limit': ['2'],
-                  'marker': str(_uuid())}
-        res = self.api.get(_get_path('networks'),
-                           params=params).json
-
-        self.assertEqual([], res['networks'])
-
-        previous_links = []
-        if 'networks_links' in res:
-            for r in res['networks_links']:
-                self.assertNotEqual(r['rel'], 'next')
-                if r['rel'] == 'previous':
-                    previous_links.append(r)
-        self.assertEqual(len(previous_links), 1)
-
-        url = urlparse.urlparse(previous_links[0]['href'])
-        self.assertEqual(url.path, _get_path('networks'))
-        expect_params = params.copy()
-        del expect_params['marker']
-        expect_params['page_reverse'] = ['True']
-        self.assertEqual(urlparse.parse_qs(url.query), expect_params)
-
-    def test_list_pagination_reverse_with_last_page(self):
-        id = str(_uuid())
-        input_dict = {'id': id,
-                      'name': 'net1',
-                      'admin_state_up': True,
-                      'status': "ACTIVE",
-                      'tenant_id': '',
-                      'shared': False,
-                      'subnets': []}
-        return_value = [input_dict]
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = return_value
-        params = {'limit': ['2'],
-                  'marker': [str(_uuid())],
-                  'page_reverse': ['True']}
-        res = self.api.get(_get_path('networks'),
-                           params=params).json
-
-        self.assertEqual(len(res['networks']), 1)
-        self.assertEqual(id, res['networks'][0]['id'])
-
-        self.assertIn('networks_links', res)
-        next_links = []
-        for r in res['networks_links']:
-            self.assertNotEqual(r['rel'], 'previous')
-            if r['rel'] == 'next':
-                next_links.append(r)
-        self.assertEqual(len(next_links), 1)
-
-        url = urlparse.urlparse(next_links[0]['href'])
-        self.assertEqual(url.path, _get_path('networks'))
-        expected_params = params.copy()
-        del expected_params['page_reverse']
-        expected_params['marker'] = [id]
-        self.assertEqual(urlparse.parse_qs(url.query),
-                         expected_params)
-
-    def test_list_pagination_reverse_with_empty_page(self):
-        return_value = []
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = return_value
-        params = {'limit': ['2'],
-                  'marker': [str(_uuid())],
-                  'page_reverse': ['True']}
-        res = self.api.get(_get_path('networks'),
-                           params=params).json
-        self.assertEqual([], res['networks'])
-
-        next_links = []
-        if 'networks_links' in res:
-            for r in res['networks_links']:
-                self.assertNotEqual(r['rel'], 'previous')
-                if r['rel'] == 'next':
-                    next_links.append(r)
-        self.assertEqual(len(next_links), 1)
-
-        url = urlparse.urlparse(next_links[0]['href'])
-        self.assertEqual(url.path, _get_path('networks'))
-        expect_params = params.copy()
-        del expect_params['marker']
-        del expect_params['page_reverse']
-        self.assertEqual(urlparse.parse_qs(url.query), expect_params)
-
-    def test_create(self):
-        net_id = _uuid()
-        data = {'network': {'name': 'net1', 'admin_state_up': True,
-                            'tenant_id': _uuid()}}
-        return_value = {'subnets': [], 'status': "ACTIVE",
-                        'id': net_id}
-        return_value.update(data['network'].copy())
-
-        instance = self.plugin.return_value
-        instance.create_network.return_value = return_value
-        instance.get_networks_count.return_value = 0
-
-        res = self.api.post(_get_path('networks', fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/' + self.fmt)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('network', res)
-        net = res['network']
-        self.assertEqual(net['id'], net_id)
-        self.assertEqual(net['status'], "ACTIVE")
-
-    def test_create_use_defaults(self):
-        net_id = _uuid()
-        initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
-        full_input = {'network': {'admin_state_up': True,
-                                  'shared': False}}
-        full_input['network'].update(initial_input['network'])
-
-        return_value = {'id': net_id, 'status': "ACTIVE"}
-        return_value.update(full_input['network'])
-
-        instance = self.plugin.return_value
-        instance.create_network.return_value = return_value
-        instance.get_networks_count.return_value = 0
-
-        res = self.api.post(_get_path('networks', fmt=self.fmt),
-                            self.serialize(initial_input),
-                            content_type='application/' + self.fmt)
-        instance.create_network.assert_called_with(mock.ANY,
-                                                   network=full_input)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('network', res)
-        net = res['network']
-        self.assertEqual(net['id'], net_id)
-        self.assertTrue(net['admin_state_up'])
-        self.assertEqual(net['status'], "ACTIVE")
-
-    def test_create_no_keystone_env(self):
-        data = {'name': 'net1'}
-        self._test_create_failure_bad_request('networks', data)
-
-    def test_create_with_keystone_env(self):
-        tenant_id = _uuid()
-        net_id = _uuid()
-        env = {'neutron.context': context.Context('', tenant_id)}
-        # tenant_id should be fetched from env
-        initial_input = {'network': {'name': 'net1'}}
-        full_input = {'network': {'admin_state_up': True,
-                      'shared': False, 'tenant_id': tenant_id}}
-        full_input['network'].update(initial_input['network'])
-
-        return_value = {'id': net_id, 'status': "ACTIVE"}
-        return_value.update(full_input['network'])
-
-        instance = self.plugin.return_value
-        instance.create_network.return_value = return_value
-        instance.get_networks_count.return_value = 0
-
-        res = self.api.post(_get_path('networks', fmt=self.fmt),
-                            self.serialize(initial_input),
-                            content_type='application/' + self.fmt,
-                            extra_environ=env)
-
-        instance.create_network.assert_called_with(mock.ANY,
-                                                   network=full_input)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-
-    def test_create_bad_keystone_tenant(self):
-        tenant_id = _uuid()
-        data = {'network': {'name': 'net1', 'tenant_id': tenant_id}}
-        env = {'neutron.context': context.Context('', tenant_id + "bad")}
-        self._test_create_failure_bad_request('networks', data,
-                                              extra_environ=env)
-
-    def test_create_no_body(self):
-        data = {'whoa': None}
-        self._test_create_failure_bad_request('networks', data)
-
-    def test_create_body_string_not_json(self):
-        data = 'a string'
-        self._test_create_failure_bad_request('networks', data)
-
-    def test_create_body_boolean_not_json(self):
-        data = True
-        self._test_create_failure_bad_request('networks', data)
-
-    def test_create_no_resource(self):
-        data = {}
-        self._test_create_failure_bad_request('networks', data)
-
-    def test_create_missing_attr(self):
-        data = {'port': {'what': 'who', 'tenant_id': _uuid()}}
-        self._test_create_failure_bad_request('ports', data)
-
-    def test_create_readonly_attr(self):
-        data = {'network': {'name': 'net1', 'tenant_id': _uuid(),
-                            'status': "ACTIVE"}}
-        self._test_create_failure_bad_request('networks', data)
-
-    def test_create_with_too_long_name(self):
-        data = {'network': {'name': "12345678" * 32,
-                            'admin_state_up': True,
-                            'tenant_id': _uuid()}}
-        res = self.api.post(_get_path('networks', fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/' + self.fmt,
-                            expect_errors=True)
-        self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_create_bulk(self):
-        data = {'networks': [{'name': 'net1',
-                              'admin_state_up': True,
-                              'tenant_id': _uuid()},
-                             {'name': 'net2',
-                              'admin_state_up': True,
-                              'tenant_id': _uuid()}]}
-
-        def side_effect(context, network):
-            net = network.copy()
-            net['network'].update({'subnets': []})
-            return net['network']
-
-        instance = self.plugin.return_value
-        instance.create_network.side_effect = side_effect
-        instance.get_networks_count.return_value = 0
-        res = self.api.post(_get_path('networks', fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/' + self.fmt)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-
-    def _test_create_failure_bad_request(self, resource, data, **kwargs):
-        res = self.api.post(_get_path(resource, fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/' + self.fmt,
-                            expect_errors=True, **kwargs)
-        self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_create_bulk_networks_none(self):
-        self._test_create_failure_bad_request('networks', {'networks': None})
-
-    def test_create_bulk_networks_empty_list(self):
-        self._test_create_failure_bad_request('networks', {'networks': []})
-
-    def test_create_bulk_missing_attr(self):
-        data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]}
-        self._test_create_failure_bad_request('ports', data)
-
-    def test_create_bulk_partial_body(self):
-        data = {'ports': [{'device_id': 'device_1',
-                           'tenant_id': _uuid()},
-                          {'tenant_id': _uuid()}]}
-        self._test_create_failure_bad_request('ports', data)
-
-    def test_create_attr_not_specified(self):
-        net_id = _uuid()
-        tenant_id = _uuid()
-        device_id = _uuid()
-        initial_input = {'port': {'name': '', 'network_id': net_id,
-                                  'tenant_id': tenant_id,
-                                  'device_id': device_id,
-                                  'admin_state_up': True}}
-        full_input = {'port': {'admin_state_up': True,
-                               'mac_address': attributes.ATTR_NOT_SPECIFIED,
-                               'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
-                               'device_owner': ''}}
-        full_input['port'].update(initial_input['port'])
-        return_value = {'id': _uuid(), 'status': 'ACTIVE',
-                        'admin_state_up': True,
-                        'mac_address': 'ca:fe:de:ad:be:ef',
-                        'device_id': device_id,
-                        'device_owner': ''}
-        return_value.update(initial_input['port'])
-
-        instance = self.plugin.return_value
-        instance.get_network.return_value = {
-            'tenant_id': six.text_type(tenant_id)
-        }
-        instance.get_ports_count.return_value = 1
-        instance.create_port.return_value = return_value
-        res = self.api.post(_get_path('ports', fmt=self.fmt),
-                            self.serialize(initial_input),
-                            content_type='application/' + self.fmt)
-        instance.create_port.assert_called_with(mock.ANY, port=full_input)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('port', res)
-        port = res['port']
-        self.assertEqual(port['network_id'], net_id)
-        self.assertEqual(port['mac_address'], 'ca:fe:de:ad:be:ef')
-
-    def test_create_return_extra_attr(self):
-        net_id = _uuid()
-        data = {'network': {'name': 'net1', 'admin_state_up': True,
-                            'tenant_id': _uuid()}}
-        return_value = {'subnets': [], 'status': "ACTIVE",
-                        'id': net_id, 'v2attrs:something': "123"}
-        return_value.update(data['network'].copy())
-
-        instance = self.plugin.return_value
-        instance.create_network.return_value = return_value
-        instance.get_networks_count.return_value = 0
-
-        res = self.api.post(_get_path('networks', fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/' + self.fmt)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('network', res)
-        net = res['network']
-        self.assertEqual(net['id'], net_id)
-        self.assertEqual(net['status'], "ACTIVE")
-        self.assertNotIn('v2attrs:something', net)
-
-    def test_fields(self):
-        return_value = {'name': 'net1', 'admin_state_up': True,
-                        'subnets': []}
-
-        instance = self.plugin.return_value
-        instance.get_network.return_value = return_value
-
-        self.api.get(_get_path('networks',
-                               id=uuidutils.generate_uuid(),
-                               fmt=self.fmt))
-
-    def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
-                     expect_errors=False):
-        env = {}
-        if req_tenant_id:
-            env = {'neutron.context': context.Context('', req_tenant_id)}
-        instance = self.plugin.return_value
-        instance.get_network.return_value = {'tenant_id': real_tenant_id,
-                                             'shared': False}
-        instance.delete_network.return_value = None
-
-        res = self.api.delete(_get_path('networks',
-                                        id=uuidutils.generate_uuid(),
-                                        fmt=self.fmt),
-                              extra_environ=env,
-                              expect_errors=expect_errors)
-        self.assertEqual(res.status_int, expected_code)
-
-    def test_delete_noauth(self):
-        self._test_delete(None, _uuid(), exc.HTTPNoContent.code)
-
-    def test_delete_keystone(self):
-        tenant_id = _uuid()
-        self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code)
-
-    def test_delete_keystone_bad_tenant(self):
-        tenant_id = _uuid()
-        self._test_delete(tenant_id + "bad", tenant_id,
-                          exc.HTTPNotFound.code, expect_errors=True)
-
-    def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
-                  expect_errors=False):
-        env = {}
-        shared = False
-        if req_tenant_id:
-            env = {'neutron.context': context.Context('', req_tenant_id)}
-            if req_tenant_id.endswith('another'):
-                shared = True
-                env['neutron.context'].roles = ['tenant_admin']
-
-        data = {'tenant_id': real_tenant_id, 'shared': shared}
-        instance = self.plugin.return_value
-        instance.get_network.return_value = data
-
-        res = self.api.get(_get_path('networks',
-                                     id=uuidutils.generate_uuid(),
-                                     fmt=self.fmt),
-                           extra_environ=env,
-                           expect_errors=expect_errors)
-        self.assertEqual(res.status_int, expected_code)
-        return res
-
-    def test_get_noauth(self):
-        self._test_get(None, _uuid(), 200)
-
-    def test_get_keystone(self):
-        tenant_id = _uuid()
-        self._test_get(tenant_id, tenant_id, 200)
-
-    def test_get_keystone_bad_tenant(self):
-        tenant_id = _uuid()
-        self._test_get(tenant_id + "bad", tenant_id,
-                       exc.HTTPNotFound.code, expect_errors=True)
-
-    def test_get_keystone_shared_network(self):
-        tenant_id = _uuid()
-        self._test_get(tenant_id + "another", tenant_id, 200)
-
-    def test_get_keystone_strip_admin_only_attribute(self):
-        tenant_id = _uuid()
-        # Inject rule in policy engine
-        rules = oslo_policy.Rules.from_dict(
-            {'get_network:name': "rule:admin_only"})
-        policy.set_rules(rules, overwrite=False)
-        res = self._test_get(tenant_id, tenant_id, 200)
-        res = self.deserialize(res)
-        self.assertNotIn('name', res['network'])
-
-    def _test_update(self, req_tenant_id, real_tenant_id, expected_code,
-                     expect_errors=False):
-        env = {}
-        if req_tenant_id:
-            env = {'neutron.context': context.Context('', req_tenant_id)}
-        # leave out 'name' field intentionally
-        data = {'network': {'admin_state_up': True}}
-        return_value = {'subnets': []}
-        return_value.update(data['network'].copy())
-
-        instance = self.plugin.return_value
-        instance.get_network.return_value = {'tenant_id': real_tenant_id,
-                                             'shared': False}
-        instance.update_network.return_value = return_value
-
-        res = self.api.put(_get_path('networks',
-                                     id=uuidutils.generate_uuid(),
-                                     fmt=self.fmt),
-                           self.serialize(data),
-                           extra_environ=env,
-                           expect_errors=expect_errors)
-        #  Ensure id attribute is included in fields returned by GET call
-        #  in update procedure.
-        self.assertEqual(1, instance.get_network.call_count)
-        self.assertIn('id', instance.get_network.call_args[1]['fields'])
-        self.assertEqual(res.status_int, expected_code)
-
-    def test_update_noauth(self):
-        self._test_update(None, _uuid(), 200)
-
-    def test_update_keystone(self):
-        tenant_id = _uuid()
-        self._test_update(tenant_id, tenant_id, 200)
-
-    def test_update_keystone_bad_tenant(self):
-        tenant_id = _uuid()
-        self._test_update(tenant_id + "bad", tenant_id,
-                          exc.HTTPNotFound.code, expect_errors=True)
-
-    def test_update_readonly_field(self):
-        data = {'network': {'status': "NANANA"}}
-        res = self.api.put(_get_path('networks', id=_uuid()),
-                           self.serialize(data),
-                           content_type='application/' + self.fmt,
-                           expect_errors=True)
-        self.assertEqual(res.status_int, 400)
-
-    def test_invalid_attribute_field(self):
-        data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}}
-        res = self.api.put(_get_path('networks', id=_uuid()),
-                           self.serialize(data),
-                           content_type='application/' + self.fmt,
-                           expect_errors=True)
-        self.assertEqual(res.status_int, 400)
-
-
-class SubresourceTest(base.BaseTestCase):
-    def setUp(self):
-        super(SubresourceTest, self).setUp()
-
-        plugin = 'neutron.tests.unit.api.v2.test_base.TestSubresourcePlugin'
-        extensions.PluginAwareExtensionManager._instance = None
-
-        self.useFixture(tools.AttributeMapMemento())
-
-        self.config_parse()
-        self.setup_coreplugin(plugin)
-
-        self._plugin_patcher = mock.patch(plugin, autospec=True)
-        self.plugin = self._plugin_patcher.start()
-
-        api = router.APIRouter()
-
-        SUB_RESOURCES = {}
-        RESOURCE_ATTRIBUTE_MAP = {}
-        SUB_RESOURCES['dummy'] = {
-            'collection_name': 'dummies',
-            'parent': {'collection_name': 'networks',
-                       'member_name': 'network'}
-        }
-        RESOURCE_ATTRIBUTE_MAP['dummies'] = {
-            'foo': {'allow_post': True, 'allow_put': True,
-                    'validate': {'type:string': None},
-                    'default': '', 'is_visible': True},
-            'tenant_id': {'allow_post': True, 'allow_put': False,
-                          'validate': {'type:string': None},
-                          'required_by_policy': True,
-                          'is_visible': True}
-        }
-        collection_name = SUB_RESOURCES['dummy'].get('collection_name')
-        resource_name = 'dummy'
-        parent = SUB_RESOURCES['dummy'].get('parent')
-        params = RESOURCE_ATTRIBUTE_MAP['dummies']
-        member_actions = {'mactions': 'GET'}
-        _plugin = manager.NeutronManager.get_plugin()
-        controller = v2_base.create_resource(collection_name, resource_name,
-                                             _plugin, params,
-                                             member_actions=member_actions,
-                                             parent=parent,
-                                             allow_bulk=True,
-                                             allow_pagination=True,
-                                             allow_sorting=True)
-
-        path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'],
-                                          parent['member_name'],
-                                          collection_name)
-        mapper_kwargs = dict(controller=controller,
-                             path_prefix=path_prefix)
-        api.map.collection(collection_name, resource_name, **mapper_kwargs)
-        api.map.resource(collection_name, collection_name,
-                         controller=controller,
-                         parent_resource=parent,
-                         member=member_actions)
-        self.api = webtest.TestApp(api)
-
-    def tearDown(self):
-        super(SubresourceTest, self).tearDown()
-
-    def test_index_sub_resource(self):
-        instance = self.plugin.return_value
-
-        self.api.get('/networks/id1/dummies')
-        instance.get_network_dummies.assert_called_once_with(mock.ANY,
-                                                             filters=mock.ANY,
-                                                             fields=mock.ANY,
-                                                             network_id='id1')
-
-    def test_show_sub_resource(self):
-        instance = self.plugin.return_value
-
-        dummy_id = _uuid()
-        self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id))
-        instance.get_network_dummy.assert_called_once_with(mock.ANY,
-                                                           dummy_id,
-                                                           network_id='id1',
-                                                           fields=mock.ANY)
-
-    def test_create_sub_resource(self):
-        instance = self.plugin.return_value
-
-        body = {'dummy': {'foo': 'bar', 'tenant_id': _uuid()}}
-        self.api.post_json('/networks/id1/dummies', body)
-        instance.create_network_dummy.assert_called_once_with(mock.ANY,
-                                                              network_id='id1',
-                                                              dummy=body)
-
-    def test_update_sub_resource(self):
-        instance = self.plugin.return_value
-
-        dummy_id = _uuid()
-        body = {'dummy': {'foo': 'bar'}}
-        self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
-                          body)
-        instance.update_network_dummy.assert_called_once_with(mock.ANY,
-                                                              dummy_id,
-                                                              network_id='id1',
-                                                              dummy=body)
-
-    def test_update_subresource_to_none(self):
-        instance = self.plugin.return_value
-
-        dummy_id = _uuid()
-        body = {'dummy': {}}
-        self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
-                          body)
-        instance.update_network_dummy.assert_called_once_with(mock.ANY,
-                                                              dummy_id,
-                                                              network_id='id1',
-                                                              dummy=body)
-
-    def test_delete_sub_resource(self):
-        instance = self.plugin.return_value
-
-        dummy_id = _uuid()
-        self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id))
-        instance.delete_network_dummy.assert_called_once_with(mock.ANY,
-                                                              dummy_id,
-                                                              network_id='id1')
-
-    def test_sub_resource_member_actions(self):
-        instance = self.plugin.return_value
-
-        dummy_id = _uuid()
-        self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id,
-                                                 action='mactions'))
-        instance.mactions.assert_called_once_with(mock.ANY,
-                                                  dummy_id,
-                                                  network_id='id1')
-
-
-# Note: since all resources use the same controller and validation
-# logic, we actually get really good coverage from testing just networks.
-class V2Views(base.BaseTestCase):
-    def _view(self, keys, collection, resource):
-        data = dict((key, 'value') for key in keys)
-        data['fake'] = 'value'
-        attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[collection]
-        controller = v2_base.Controller(None, collection, resource, attr_info)
-        res = controller._view(context.get_admin_context(), data)
-        self.assertNotIn('fake', res)
-        for key in keys:
-            self.assertIn(key, res)
-
-    def test_network(self):
-        keys = ('id', 'name', 'subnets', 'admin_state_up', 'status',
-                'tenant_id')
-        self._view(keys, 'networks', 'network')
-
-    def test_port(self):
-        keys = ('id', 'network_id', 'mac_address', 'fixed_ips',
-                'device_id', 'admin_state_up', 'tenant_id', 'status')
-        self._view(keys, 'ports', 'port')
-
-    def test_subnet(self):
-        keys = ('id', 'network_id', 'tenant_id', 'gateway_ip',
-                'ip_version', 'cidr', 'enable_dhcp')
-        self._view(keys, 'subnets', 'subnet')
-
-
-class NotificationTest(APIv2TestBase):
-
-    def setUp(self):
-        super(NotificationTest, self).setUp()
-        fake_notifier.reset()
-
-    def _resource_op_notifier(self, opname, resource, expected_errors=False):
-        initial_input = {resource: {'name': 'myname'}}
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = initial_input
-        instance.get_networks_count.return_value = 0
-        expected_code = exc.HTTPCreated.code
-        if opname == 'create':
-            initial_input[resource]['tenant_id'] = _uuid()
-            res = self.api.post_json(
-                _get_path('networks'),
-                initial_input, expect_errors=expected_errors)
-        if opname == 'update':
-            res = self.api.put_json(
-                _get_path('networks', id=_uuid()),
-                initial_input, expect_errors=expected_errors)
-            expected_code = exc.HTTPOk.code
-        if opname == 'delete':
-            initial_input[resource]['tenant_id'] = _uuid()
-            res = self.api.delete(
-                _get_path('networks', id=_uuid()),
-                expect_errors=expected_errors)
-            expected_code = exc.HTTPNoContent.code
-
-        expected_events = ('.'.join([resource, opname, "start"]),
-                           '.'.join([resource, opname, "end"]))
-        self.assertEqual(len(fake_notifier.NOTIFICATIONS),
-                         len(expected_events))
-        for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events):
-            self.assertEqual('INFO', msg['priority'])
-            self.assertEqual(event, msg['event_type'])
-
-        self.assertEqual(res.status_int, expected_code)
-
-    def test_network_create_notifer(self):
-        self._resource_op_notifier('create', 'network')
-
-    def test_network_delete_notifer(self):
-        self._resource_op_notifier('delete', 'network')
-
-    def test_network_update_notifer(self):
-        self._resource_op_notifier('update', 'network')
-
-
-class DHCPNotificationTest(APIv2TestBase):
-
-    def setUp(self):
-        # This test does not have database support so tracking cannot be used
-        cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
-        super(DHCPNotificationTest, self).setUp()
-
-    def _test_dhcp_notifier(self, opname, resource, initial_input=None):
-        instance = self.plugin.return_value
-        instance.get_networks.return_value = initial_input
-        instance.get_networks_count.return_value = 0
-        expected_code = exc.HTTPCreated.code
-        with mock.patch.object(dhcp_rpc_agent_api.DhcpAgentNotifyAPI,
-                               'notify') as dhcp_notifier:
-            if opname == 'create':
-                res = self.api.post_json(
-                    _get_path('networks'),
-                    initial_input)
-            if opname == 'update':
-                res = self.api.put_json(
-                    _get_path('networks', id=_uuid()),
-                    initial_input)
-                expected_code = exc.HTTPOk.code
-            if opname == 'delete':
-                res = self.api.delete(_get_path('networks', id=_uuid()))
-                expected_code = exc.HTTPNoContent.code
-            expected_item = mock.call(mock.ANY, mock.ANY,
-                                      resource + "." + opname + ".end")
-            if initial_input and resource not in initial_input:
-                resource += 's'
-            num = len(initial_input[resource]) if initial_input and isinstance(
-                initial_input[resource], list) else 1
-            expected = [expected_item for x in moves.range(num)]
-            self.assertEqual(expected, dhcp_notifier.call_args_list)
-            self.assertEqual(num, dhcp_notifier.call_count)
-        self.assertEqual(expected_code, res.status_int)
-
-    def test_network_create_dhcp_notifer(self):
-        input = {'network': {'name': 'net',
-                             'tenant_id': _uuid()}}
-        self._test_dhcp_notifier('create', 'network', input)
-
-    def test_network_delete_dhcp_notifer(self):
-        self._test_dhcp_notifier('delete', 'network')
-
-    def test_network_update_dhcp_notifer(self):
-        input = {'network': {'name': 'net'}}
-        self._test_dhcp_notifier('update', 'network', input)
-
-    def test_networks_create_bulk_dhcp_notifer(self):
-        input = {'networks': [{'name': 'net1',
-                               'tenant_id': _uuid()},
-                              {'name': 'net2',
-                               'tenant_id': _uuid()}]}
-        self._test_dhcp_notifier('create', 'network', input)
-
-
-class QuotaTest(APIv2TestBase):
-
-    def setUp(self):
-        # This test does not have database support so tracking cannot be used
-        cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
-        super(QuotaTest, self).setUp()
-        # Use mock to let the API use a different QuotaEngine instance for
-        # unit test in this class. This will ensure resource are registered
-        # again and instantiated with neutron.quota.resource.CountableResource
-        replacement_registry = resource_registry.ResourceRegistry()
-        registry_patcher = mock.patch('neutron.quota.resource_registry.'
-                                      'ResourceRegistry.get_instance')
-        mock_registry = registry_patcher.start().return_value
-        mock_registry.get_resource = replacement_registry.get_resource
-        mock_registry.resources = replacement_registry.resources
-        # Register a resource
-        replacement_registry.register_resource_by_name('network')
-
-    def test_create_network_quota(self):
-        cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
-        initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
-        full_input = {'network': {'admin_state_up': True, 'subnets': []}}
-        full_input['network'].update(initial_input['network'])
-
-        instance = self.plugin.return_value
-        instance.get_networks_count.return_value = 1
-        res = self.api.post_json(
-            _get_path('networks'), initial_input, expect_errors=True)
-        instance.get_networks_count.assert_called_with(mock.ANY,
-                                                       filters=mock.ANY)
-        self.assertIn("Quota exceeded for resources",
-                      res.json['NeutronError']['message'])
-
-    def test_create_network_quota_no_counts(self):
-        cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
-        initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
-        full_input = {'network': {'admin_state_up': True, 'subnets': []}}
-        full_input['network'].update(initial_input['network'])
-
-        instance = self.plugin.return_value
-        instance.get_networks_count.side_effect = (
-            NotImplementedError())
-        instance.get_networks.return_value = ["foo"]
-        res = self.api.post_json(
-            _get_path('networks'), initial_input, expect_errors=True)
-        instance.get_networks_count.assert_called_with(mock.ANY,
-                                                       filters=mock.ANY)
-        self.assertIn("Quota exceeded for resources",
-                      res.json['NeutronError']['message'])
-
-    def test_create_network_quota_without_limit(self):
-        cfg.CONF.set_override('quota_network', -1, group='QUOTAS')
-        initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
-        instance = self.plugin.return_value
-        instance.get_networks_count.return_value = 3
-        res = self.api.post_json(
-            _get_path('networks'), initial_input)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-
-
-class ExtensionTestCase(base.BaseTestCase):
-    def setUp(self):
-        # This test does not have database support so tracking cannot be used
-        cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
-        super(ExtensionTestCase, self).setUp()
-        plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
-        # Ensure existing ExtensionManager is not used
-        extensions.PluginAwareExtensionManager._instance = None
-
-        self.useFixture(tools.AttributeMapMemento())
-
-        # Create the default configurations
-        self.config_parse()
-
-        # Update the plugin and extensions path
-        self.setup_coreplugin(plugin)
-        cfg.CONF.set_override('api_extensions_path', EXTDIR)
-
-        self._plugin_patcher = mock.patch(plugin, autospec=True)
-        self.plugin = self._plugin_patcher.start()
-
-        # Instantiate mock plugin and enable the V2attributes extension
-        manager.NeutronManager.get_plugin().supported_extension_aliases = (
-            ["v2attrs"])
-
-        api = router.APIRouter()
-        self.api = webtest.TestApp(api)
-
-        quota.QUOTAS._driver = None
-        cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
-                              group='QUOTAS')
-
-    def tearDown(self):
-        super(ExtensionTestCase, self).tearDown()
-        self.api = None
-        self.plugin = None
-
-    def test_extended_create(self):
-        net_id = _uuid()
-        initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid(),
-                                     'v2attrs:something_else': "abc"}}
-        data = {'network': {'admin_state_up': True, 'shared': False}}
-        data['network'].update(initial_input['network'])
-
-        return_value = {'subnets': [], 'status': "ACTIVE",
-                        'id': net_id,
-                        'v2attrs:something': "123"}
-        return_value.update(data['network'].copy())
-
-        instance = self.plugin.return_value
-        instance.create_network.return_value = return_value
-        instance.get_networks_count.return_value = 0
-
-        res = self.api.post_json(_get_path('networks'), initial_input)
-
-        instance.create_network.assert_called_with(mock.ANY,
-                                                   network=data)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        self.assertIn('network', res.json)
-        net = res.json['network']
-        self.assertEqual(net['id'], net_id)
-        self.assertEqual(net['status'], "ACTIVE")
-        self.assertEqual(net['v2attrs:something'], "123")
-        self.assertNotIn('v2attrs:something_else', net)
-
-
-class TestSubresourcePlugin(object):
-    def get_network_dummies(self, context, network_id,
-                            filters=None, fields=None):
-        return []
-
-    def get_network_dummy(self, context, id, network_id,
-                          fields=None):
-        return {}
-
-    def create_network_dummy(self, context, network_id, dummy):
-        return {}
-
-    def update_network_dummy(self, context, id, network_id, dummy):
-        return {}
-
-    def delete_network_dummy(self, context, id, network_id):
-        return
-
-    def mactions(self, context, id, network_id):
-        return
-
-
-class ListArgsTestCase(base.BaseTestCase):
-    def test_list_args(self):
-        path = '/?fields=4&foo=3&fields=2&bar=1'
-        request = webob.Request.blank(path)
-        expect_val = ['2', '4']
-        actual_val = api_common.list_args(request, 'fields')
-        self.assertEqual(sorted(actual_val), expect_val)
-
-    def test_list_args_with_empty(self):
-        path = '/?foo=4&bar=3&baz=2&qux=1'
-        request = webob.Request.blank(path)
-        self.assertEqual([], api_common.list_args(request, 'fields'))
-
-
-class FiltersTestCase(base.BaseTestCase):
-    def test_all_skip_args(self):
-        path = '/?fields=4&fields=3&fields=2&fields=1'
-        request = webob.Request.blank(path)
-        self.assertEqual({}, api_common.get_filters(request, None,
-                                                    ["fields"]))
-
-    def test_blank_values(self):
-        path = '/?foo=&bar=&baz=&qux='
-        request = webob.Request.blank(path)
-        self.assertEqual({}, api_common.get_filters(request, {}))
-
-    def test_no_attr_info(self):
-        path = '/?foo=4&bar=3&baz=2&qux=1'
-        request = webob.Request.blank(path)
-        expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
-        actual_val = api_common.get_filters(request, {})
-        self.assertEqual(actual_val, expect_val)
-
-    def test_attr_info_without_conversion(self):
-        path = '/?foo=4&bar=3&baz=2&qux=1'
-        request = webob.Request.blank(path)
-        attr_info = {'foo': {'key': 'val'}}
-        expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
-        actual_val = api_common.get_filters(request, attr_info)
-        self.assertEqual(actual_val, expect_val)
-
-    def test_attr_info_with_convert_list_to(self):
-        path = '/?foo=key=4&bar=3&foo=key=2&qux=1'
-        request = webob.Request.blank(path)
-        attr_info = {
-            'foo': {
-                'convert_list_to': attributes.convert_kvp_list_to_dict,
-            }
-        }
-        expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']}
-        actual_val = api_common.get_filters(request, attr_info)
-        self.assertOrderedEqual(expect_val, actual_val)
-
-    def test_attr_info_with_convert_to(self):
-        path = '/?foo=4&bar=3&baz=2&qux=1'
-        request = webob.Request.blank(path)
-        attr_info = {'foo': {'convert_to': attributes.convert_to_int}}
-        expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
-        actual_val = api_common.get_filters(request, attr_info)
-        self.assertEqual(actual_val, expect_val)
-
-
-class CreateResourceTestCase(base.BaseTestCase):
-    def test_resource_creation(self):
-        resource = v2_base.create_resource('fakes', 'fake', None, {})
-        self.assertIsInstance(resource, webob.dec.wsgify)
diff --git a/neutron/tests/unit/api/v2/test_resource.py b/neutron/tests/unit/api/v2/test_resource.py
deleted file mode 100644 (file)
index 6b675c3..0000000
+++ /dev/null
@@ -1,350 +0,0 @@
-# Copyright (c) 2012 Intel Corporation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import oslo_i18n
-from webob import exc
-import webtest
-
-from neutron._i18n import _
-from neutron.api.v2 import resource as wsgi_resource
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.tests import base
-from neutron import wsgi
-
-
-class RequestTestCase(base.BaseTestCase):
-    def setUp(self):
-        super(RequestTestCase, self).setUp()
-        self.req = wsgi_resource.Request({'foo': 'bar'})
-
-    def test_content_type_missing(self):
-        request = wsgi.Request.blank('/tests/123', method='POST')
-        request.body = b"<body />"
-        self.assertIsNone(request.get_content_type())
-
-    def test_content_type_with_charset(self):
-        request = wsgi.Request.blank('/tests/123')
-        request.headers["Content-Type"] = "application/json; charset=UTF-8"
-        result = request.get_content_type()
-        self.assertEqual(result, "application/json")
-
-    def test_content_type_from_accept(self):
-        content_type = 'application/json'
-        request = wsgi.Request.blank('/tests/123')
-        request.headers["Accept"] = content_type
-        result = request.best_match_content_type()
-        self.assertEqual(result, content_type)
-
-    def test_content_type_from_accept_best(self):
-        request = wsgi.Request.blank('/tests/123')
-        request.headers["Accept"] = "application/json"
-        result = request.best_match_content_type()
-        self.assertEqual(result, "application/json")
-
-        request = wsgi.Request.blank('/tests/123')
-        request.headers["Accept"] = ("application/json; q=0.3, "
-                                     "application/xml; q=0.9")
-        result = request.best_match_content_type()
-        self.assertEqual(result, "application/json")
-
-    def test_content_type_from_query_extension(self):
-        request = wsgi.Request.blank('/tests/123.json')
-        result = request.best_match_content_type()
-        self.assertEqual(result, "application/json")
-
-        request = wsgi.Request.blank('/tests/123.invalid')
-        result = request.best_match_content_type()
-        self.assertEqual(result, "application/json")
-
-    def test_content_type_accept_and_query_extension(self):
-        request = wsgi.Request.blank('/tests/123.json')
-        request.headers["Accept"] = "application/xml"
-        result = request.best_match_content_type()
-        self.assertEqual(result, "application/json")
-
-    def test_content_type_accept_default(self):
-        request = wsgi.Request.blank('/tests/123.unsupported')
-        request.headers["Accept"] = "application/unsupported1"
-        result = request.best_match_content_type()
-        self.assertEqual(result, "application/json")
-
-    def test_context_with_neutron_context(self):
-        ctxt = context.Context('fake_user', 'fake_tenant')
-        self.req.environ['neutron.context'] = ctxt
-        self.assertEqual(self.req.context, ctxt)
-
-    def test_context_without_neutron_context(self):
-        self.assertTrue(self.req.context.is_admin)
-
-    def test_request_context_elevated(self):
-        user_context = context.Context(
-            'fake_user', 'fake_project', admin=False)
-        self.assertFalse(user_context.is_admin)
-        admin_context = user_context.elevated()
-        self.assertFalse(user_context.is_admin)
-        self.assertTrue(admin_context.is_admin)
-        self.assertNotIn('admin', user_context.roles)
-        self.assertIn('admin', admin_context.roles)
-
-    def test_best_match_language(self):
-        # Test that we are actually invoking language negotiation by webop
-        request = wsgi.Request.blank('/')
-        oslo_i18n.get_available_languages = mock.MagicMock()
-        oslo_i18n.get_available_languages.return_value = ['known-language',
-                                                          'es', 'zh']
-        request.headers['Accept-Language'] = 'known-language'
-        language = request.best_match_language()
-        self.assertEqual(language, 'known-language')
-
-        # If the Accept-Leader is an unknown language, missing or empty,
-        # the best match locale should be None
-        request.headers['Accept-Language'] = 'unknown-language'
-        language = request.best_match_language()
-        self.assertIsNone(language)
-        request.headers['Accept-Language'] = ''
-        language = request.best_match_language()
-        self.assertIsNone(language)
-        request.headers.pop('Accept-Language')
-        language = request.best_match_language()
-        self.assertIsNone(language)
-
-
-class ResourceTestCase(base.BaseTestCase):
-
-    @staticmethod
-    def _get_deserializer():
-        return wsgi.JSONDeserializer()
-
-    def test_unmapped_neutron_error_with_json(self):
-        msg = u'\u7f51\u7edc'
-
-        class TestException(n_exc.NeutronException):
-            message = msg
-        expected_res = {'body': {
-            'NeutronError': {
-                'type': 'TestException',
-                'message': msg,
-                'detail': ''}}}
-        controller = mock.MagicMock()
-        controller.test.side_effect = TestException()
-
-        resource = webtest.TestApp(wsgi_resource.Resource(controller))
-
-        environ = {'wsgiorg.routing_args': (None, {'action': 'test',
-                                                   'format': 'json'})}
-        res = resource.get('', extra_environ=environ, expect_errors=True)
-        self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
-        self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
-                         expected_res)
-
-    @mock.patch('oslo_i18n.translate')
-    def test_unmapped_neutron_error_localized(self, mock_translation):
-        msg_translation = 'Translated error'
-        mock_translation.return_value = msg_translation
-        msg = _('Unmapped error')
-
-        class TestException(n_exc.NeutronException):
-            message = msg
-
-        controller = mock.MagicMock()
-        controller.test.side_effect = TestException()
-        resource = webtest.TestApp(wsgi_resource.Resource(controller))
-
-        environ = {'wsgiorg.routing_args': (None, {'action': 'test',
-                                                   'format': 'json'})}
-
-        res = resource.get('', extra_environ=environ, expect_errors=True)
-        self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
-        self.assertIn(msg_translation,
-                      str(wsgi.JSONDeserializer().deserialize(res.body)))
-
-    def test_mapped_neutron_error_with_json(self):
-        msg = u'\u7f51\u7edc'
-
-        class TestException(n_exc.NeutronException):
-            message = msg
-        expected_res = {'body': {
-            'NeutronError': {
-                'type': 'TestException',
-                'message': msg,
-                'detail': ''}}}
-        controller = mock.MagicMock()
-        controller.test.side_effect = TestException()
-
-        faults = {TestException: exc.HTTPGatewayTimeout}
-        resource = webtest.TestApp(wsgi_resource.Resource(controller,
-                                                          faults=faults))
-
-        environ = {'wsgiorg.routing_args': (None, {'action': 'test',
-                                                   'format': 'json'})}
-        res = resource.get('', extra_environ=environ, expect_errors=True)
-        self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
-        self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
-                         expected_res)
-
-    @mock.patch('oslo_i18n.translate')
-    def test_mapped_neutron_error_localized(self, mock_translation):
-        msg_translation = 'Translated error'
-        mock_translation.return_value = msg_translation
-        msg = _('Unmapped error')
-
-        class TestException(n_exc.NeutronException):
-            message = msg
-
-        controller = mock.MagicMock()
-        controller.test.side_effect = TestException()
-        faults = {TestException: exc.HTTPGatewayTimeout}
-        resource = webtest.TestApp(wsgi_resource.Resource(controller,
-                                                          faults=faults))
-
-        environ = {'wsgiorg.routing_args': (None, {'action': 'test',
-                                                   'format': 'json'})}
-
-        res = resource.get('', extra_environ=environ, expect_errors=True)
-        self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
-        self.assertIn(msg_translation,
-                      str(wsgi.JSONDeserializer().deserialize(res.body)))
-
-    @staticmethod
-    def _make_request_with_side_effect(side_effect):
-        controller = mock.MagicMock()
-        controller.test.side_effect = side_effect
-
-        resource = webtest.TestApp(wsgi_resource.Resource(controller))
-
-        routing_args = {'action': 'test'}
-        environ = {'wsgiorg.routing_args': (None, routing_args)}
-        res = resource.get('', extra_environ=environ, expect_errors=True)
-        return res
-
-    def test_http_error(self):
-        res = self._make_request_with_side_effect(exc.HTTPGatewayTimeout())
-
-        # verify that the exception structure is the one expected
-        # by the python-neutronclient
-        self.assertEqual(exc.HTTPGatewayTimeout().explanation,
-                         res.json['NeutronError']['message'])
-        self.assertEqual('HTTPGatewayTimeout',
-                         res.json['NeutronError']['type'])
-        self.assertEqual('', res.json['NeutronError']['detail'])
-        self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int)
-
-    def test_unhandled_error(self):
-        expected_res = {'body': {'NeutronError':
-                                {'detail': '',
-                                 'message': _(
-                                     'Request Failed: internal server '
-                                     'error while processing your request.'),
-                                 'type': 'HTTPInternalServerError'}}}
-        res = self._make_request_with_side_effect(side_effect=Exception())
-        self.assertEqual(exc.HTTPInternalServerError.code,
-                         res.status_int)
-        self.assertEqual(expected_res,
-                         self._get_deserializer().deserialize(res.body))
-
-    def test_not_implemented_error(self):
-        expected_res = {'body': {'NeutronError':
-                                {'detail': '',
-                                 'message': _(
-                                     'The server has either erred or is '
-                                     'incapable of performing the requested '
-                                     'operation.'),
-                                 'type': 'HTTPNotImplemented'}}}
-
-        res = self._make_request_with_side_effect(exc.HTTPNotImplemented())
-        self.assertEqual(exc.HTTPNotImplemented.code, res.status_int)
-        self.assertEqual(expected_res,
-                         self._get_deserializer().deserialize(res.body))
-
-    def test_status_200(self):
-        controller = mock.MagicMock()
-        controller.test = lambda request: {'foo': 'bar'}
-
-        resource = webtest.TestApp(wsgi_resource.Resource(controller))
-
-        environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
-        res = resource.get('', extra_environ=environ)
-        self.assertEqual(res.status_int, 200)
-
-    def test_status_204(self):
-        controller = mock.MagicMock()
-        controller.test = lambda request: {'foo': 'bar'}
-
-        resource = webtest.TestApp(wsgi_resource.Resource(controller))
-
-        environ = {'wsgiorg.routing_args': (None, {'action': 'delete'})}
-        res = resource.delete('', extra_environ=environ)
-        self.assertEqual(res.status_int, 204)
-
-    def _test_error_log_level(self, expected_webob_exc, expect_log_info=False,
-                              use_fault_map=True, exc_raised=None):
-        if not exc_raised:
-            class TestException(n_exc.NeutronException):
-                message = 'Test Exception'
-            exc_raised = TestException
-
-        controller = mock.MagicMock()
-        controller.test.side_effect = exc_raised()
-        faults = {exc_raised: expected_webob_exc} if use_fault_map else {}
-        resource = webtest.TestApp(wsgi_resource.Resource(controller, faults))
-        environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
-        with mock.patch.object(wsgi_resource, 'LOG') as log:
-            res = resource.get('', extra_environ=environ, expect_errors=True)
-            self.assertEqual(res.status_int, expected_webob_exc.code)
-        self.assertEqual(expect_log_info, log.info.called)
-        self.assertNotEqual(expect_log_info, log.exception.called)
-
-    def test_4xx_error_logged_info_level(self):
-        self._test_error_log_level(exc.HTTPNotFound, expect_log_info=True)
-
-    def test_non_4xx_error_logged_exception_level(self):
-        self._test_error_log_level(exc.HTTPServiceUnavailable,
-                                   expect_log_info=False)
-
-    def test_unmapped_error_logged_exception_level(self):
-        self._test_error_log_level(exc.HTTPInternalServerError,
-                                   expect_log_info=False, use_fault_map=False)
-
-    def test_webob_4xx_logged_info_level(self):
-        self._test_error_log_level(exc.HTTPNotFound,
-                                   use_fault_map=False, expect_log_info=True,
-                                   exc_raised=exc.HTTPNotFound)
-
-    def test_webob_5xx_logged_info_level(self):
-        self._test_error_log_level(exc.HTTPServiceUnavailable,
-                                   use_fault_map=False, expect_log_info=False,
-                                   exc_raised=exc.HTTPServiceUnavailable)
-
-    def test_no_route_args(self):
-        controller = mock.MagicMock()
-
-        resource = webtest.TestApp(wsgi_resource.Resource(controller))
-
-        environ = {}
-        res = resource.get('', extra_environ=environ, expect_errors=True)
-        self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
-
-    def test_post_with_body(self):
-        controller = mock.MagicMock()
-        controller.test = lambda request, body: {'foo': 'bar'}
-
-        resource = webtest.TestApp(wsgi_resource.Resource(controller))
-
-        environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
-        res = resource.post('', params='{"key": "val"}',
-                            extra_environ=environ)
-        self.assertEqual(res.status_int, 200)
diff --git a/neutron/tests/unit/callbacks/__init__.py b/neutron/tests/unit/callbacks/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/callbacks/test_manager.py b/neutron/tests/unit/callbacks/test_manager.py
deleted file mode 100644 (file)
index f013afa..0000000
+++ /dev/null
@@ -1,198 +0,0 @@
-# Copyright 2015 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.callbacks import events
-from neutron.callbacks import exceptions
-from neutron.callbacks import manager
-from neutron.callbacks import resources
-from neutron.tests import base
-
-
-def callback_1(*args, **kwargs):
-    callback_1.counter += 1
-callback_id_1 = manager._get_id(callback_1)
-
-
-def callback_2(*args, **kwargs):
-    callback_2.counter += 1
-callback_id_2 = manager._get_id(callback_2)
-
-
-def callback_raise(*args, **kwargs):
-    raise Exception()
-
-
-class CallBacksManagerTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(CallBacksManagerTestCase, self).setUp()
-        self.manager = manager.CallbacksManager()
-        callback_1.counter = 0
-        callback_2.counter = 0
-
-    def test_subscribe(self):
-        self.manager.subscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.assertIsNotNone(
-            self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
-        self.assertIn(callback_id_1, self.manager._index)
-
-    def test_subscribe_unknown(self):
-        self.manager.subscribe(
-            callback_1, 'my_resource', 'my-event')
-        self.assertIsNotNone(
-            self.manager._callbacks['my_resource']['my-event'])
-        self.assertIn(callback_id_1, self.manager._index)
-
-    def test_subscribe_is_idempotent(self):
-        self.manager.subscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.manager.subscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.assertEqual(
-            1,
-            len(self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]))
-        callbacks = self.manager._index[callback_id_1][resources.PORT]
-        self.assertEqual(1, len(callbacks))
-
-    def test_subscribe_multiple_callbacks(self):
-        self.manager.subscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.manager.subscribe(
-            callback_2, resources.PORT, events.BEFORE_CREATE)
-        self.assertEqual(2, len(self.manager._index))
-        self.assertEqual(
-            2,
-            len(self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]))
-
-    def test_unsubscribe(self):
-        self.manager.subscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.manager.unsubscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.assertNotIn(
-            callback_id_1,
-            self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
-        self.assertNotIn(callback_id_1, self.manager._index)
-
-    def test_unsubscribe_unknown_callback(self):
-        self.manager.subscribe(
-            callback_2, resources.PORT, events.BEFORE_CREATE)
-        self.manager.unsubscribe(callback_1, mock.ANY, mock.ANY)
-        self.assertEqual(1, len(self.manager._index))
-
-    def test_unsubscribe_is_idempotent(self):
-        self.manager.subscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.manager.unsubscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.manager.unsubscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.assertNotIn(callback_id_1, self.manager._index)
-        self.assertNotIn(callback_id_1,
-            self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
-
-    def test_unsubscribe_by_resource(self):
-        self.manager.subscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.manager.subscribe(
-            callback_1, resources.PORT, events.BEFORE_DELETE)
-        self.manager.subscribe(
-            callback_2, resources.PORT, events.BEFORE_DELETE)
-        self.manager.unsubscribe_by_resource(callback_1, resources.PORT)
-        self.assertNotIn(
-            callback_id_1,
-            self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
-        self.assertIn(
-            callback_id_2,
-            self.manager._callbacks[resources.PORT][events.BEFORE_DELETE])
-        self.assertNotIn(callback_id_1, self.manager._index)
-
-    def test_unsubscribe_all(self):
-        self.manager.subscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.manager.subscribe(
-            callback_1, resources.PORT, events.BEFORE_DELETE)
-        self.manager.subscribe(
-            callback_1, resources.ROUTER, events.BEFORE_CREATE)
-        self.manager.unsubscribe_all(callback_1)
-        self.assertNotIn(
-            callback_id_1,
-            self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
-        self.assertNotIn(callback_id_1, self.manager._index)
-
-    def test_notify_none(self):
-        self.manager.notify(resources.PORT, events.BEFORE_CREATE, mock.ANY)
-        self.assertEqual(0, callback_1.counter)
-        self.assertEqual(0, callback_2.counter)
-
-    def test_feebly_referenced_callback(self):
-        self.manager.subscribe(lambda *x, **y: None, resources.PORT,
-                               events.BEFORE_CREATE)
-        self.manager.notify(resources.PORT, events.BEFORE_CREATE, mock.ANY)
-
-    def test_notify_with_exception(self):
-        with mock.patch.object(self.manager, '_notify_loop') as n:
-            n.return_value = ['error']
-            self.assertRaises(exceptions.CallbackFailure,
-                              self.manager.notify,
-                              mock.ANY, events.BEFORE_CREATE,
-                              'trigger', params={'a': 1})
-            expected_calls = [
-                mock.call(mock.ANY, 'before_create',
-                          'trigger', params={'a': 1}),
-                mock.call(mock.ANY, 'abort_create',
-                          'trigger', params={'a': 1})
-            ]
-            n.assert_has_calls(expected_calls)
-
-    def test_notify_handle_exception(self):
-        self.manager.subscribe(
-            callback_raise, resources.PORT, events.BEFORE_CREATE)
-        e = self.assertRaises(exceptions.CallbackFailure, self.manager.notify,
-                              resources.PORT, events.BEFORE_CREATE, self)
-        self.assertIsInstance(e.errors[0], exceptions.NotificationError)
-
-    def test_notify_called_once_with_no_failures(self):
-        with mock.patch.object(self.manager, '_notify_loop') as n:
-            n.return_value = False
-            self.manager.notify(resources.PORT, events.BEFORE_CREATE, mock.ANY)
-            n.assert_called_once_with(
-                resources.PORT, events.BEFORE_CREATE, mock.ANY)
-
-    def test__notify_loop_single_event(self):
-        self.manager.subscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.manager.subscribe(
-            callback_2, resources.PORT, events.BEFORE_CREATE)
-        self.manager._notify_loop(
-            resources.PORT, events.BEFORE_CREATE, mock.ANY)
-        self.assertEqual(1, callback_1.counter)
-        self.assertEqual(1, callback_2.counter)
-
-    def test__notify_loop_multiple_events(self):
-        self.manager.subscribe(
-            callback_1, resources.PORT, events.BEFORE_CREATE)
-        self.manager.subscribe(
-            callback_1, resources.ROUTER, events.BEFORE_DELETE)
-        self.manager.subscribe(
-            callback_2, resources.PORT, events.BEFORE_CREATE)
-        self.manager._notify_loop(
-            resources.PORT, events.BEFORE_CREATE, mock.ANY)
-        self.manager._notify_loop(
-            resources.ROUTER, events.BEFORE_DELETE, mock.ANY)
-        self.assertEqual(2, callback_1.counter)
-        self.assertEqual(1, callback_2.counter)
diff --git a/neutron/tests/unit/cmd/__init__.py b/neutron/tests/unit/cmd/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/cmd/server/__init__.py b/neutron/tests/unit/cmd/server/__init__.py
deleted file mode 100644 (file)
index 24090cc..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_config import cfg
-
-from neutron.cmd.eventlet import server
-from neutron.tests import base
-
-
-@mock.patch('neutron.cmd.eventlet.server.main_wsgi_eventlet')
-@mock.patch('neutron.cmd.eventlet.server.main_wsgi_pecan')
-class TestNeutronServer(base.BaseTestCase):
-
-    def test_legacy_server(self, pecan_mock, legacy_mock):
-        cfg.CONF.set_override('web_framework', 'legacy')
-        server.main()
-        pecan_mock.assert_not_called()
-        legacy_mock.assert_called_with()
-
-    def test_pecan_server(self, pecan_mock, legacy_mock):
-        cfg.CONF.set_override('web_framework', 'pecan')
-        server.main()
-        pecan_mock.assert_called_with()
-        legacy_mock.assert_not_called()
diff --git a/neutron/tests/unit/cmd/test_netns_cleanup.py b/neutron/tests/unit/cmd/test_netns_cleanup.py
deleted file mode 100644 (file)
index e12292d..0000000
+++ /dev/null
@@ -1,255 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.cmd import netns_cleanup as util
-from neutron.tests import base
-
-
-class TestNetnsCleanup(base.BaseTestCase):
-
-    def test_kill_dhcp(self, dhcp_active=True):
-        conf = mock.Mock()
-        conf.dhcp_driver = 'driver'
-
-        method_to_patch = 'oslo_utils.importutils.import_object'
-
-        with mock.patch(method_to_patch) as import_object:
-            driver = mock.Mock()
-            driver.active = dhcp_active
-            import_object.return_value = driver
-
-            util.kill_dhcp(conf, 'ns')
-
-            expected_params = {'conf': conf, 'network': mock.ANY,
-                               'process_monitor': mock.ANY,
-                               'plugin': mock.ANY}
-            import_object.assert_called_once_with('driver', **expected_params)
-
-            if dhcp_active:
-                driver.assert_has_calls([mock.call.disable()])
-            else:
-                self.assertFalse(driver.called)
-
-    def test_kill_dhcp_no_active(self):
-        self.test_kill_dhcp(False)
-
-    def test_eligible_for_deletion_ns_not_uuid(self):
-        ns = 'not_a_uuid'
-        self.assertFalse(util.eligible_for_deletion(mock.Mock(), ns))
-
-    def _test_eligible_for_deletion_helper(self, prefix, force, is_empty,
-                                           expected):
-        ns = prefix + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
-        conf = mock.Mock()
-
-        with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
-            ip_wrap.return_value.namespace_is_empty.return_value = is_empty
-            self.assertEqual(util.eligible_for_deletion(conf, ns, force),
-                             expected)
-
-            expected_calls = [mock.call(namespace=ns)]
-            if not force:
-                expected_calls.append(mock.call().namespace_is_empty())
-            ip_wrap.assert_has_calls(expected_calls)
-
-    def test_eligible_for_deletion_empty(self):
-        self._test_eligible_for_deletion_helper('qrouter-', False, True, True)
-
-    def test_eligible_for_deletion_not_empty(self):
-        self._test_eligible_for_deletion_helper('qdhcp-', False, False, False)
-
-    def test_eligible_for_deletion_not_empty_forced(self):
-        self._test_eligible_for_deletion_helper('qdhcp-', True, False, True)
-
-    def test_eligible_for_deletion_fip_namespace(self):
-        self._test_eligible_for_deletion_helper('fip-', False, True, True)
-
-    def test_eligible_for_deletion_lbaas_namespace(self):
-        self._test_eligible_for_deletion_helper('qlbaas-', False, True, True)
-
-    def test_eligible_for_deletion_snat_namespace(self):
-        self._test_eligible_for_deletion_helper('snat-', False, True, True)
-
-    def test_unplug_device_regular_device(self):
-        conf = mock.Mock()
-        device = mock.Mock()
-
-        util.unplug_device(conf, device)
-        device.assert_has_calls([mock.call.link.delete()])
-
-    def test_unplug_device_ovs_port(self):
-        conf = mock.Mock()
-        conf.ovs_integration_bridge = 'br-int'
-
-        device = mock.Mock()
-        device.name = 'tap1'
-        device.link.delete.side_effect = RuntimeError
-
-        with mock.patch(
-                'neutron.agent.common.ovs_lib.OVSBridge') as ovs_br_cls:
-            br_patch = mock.patch(
-                'neutron.agent.common.ovs_lib.BaseOVS.get_bridge_for_iface')
-            with br_patch as mock_get_bridge_for_iface:
-                mock_get_bridge_for_iface.return_value = 'br-int'
-                ovs_bridge = mock.Mock()
-                ovs_br_cls.return_value = ovs_bridge
-
-                util.unplug_device(conf, device)
-
-                mock_get_bridge_for_iface.assert_called_once_with('tap1')
-                ovs_br_cls.assert_called_once_with('br-int')
-                ovs_bridge.assert_has_calls(
-                    [mock.call.delete_port(device.name)])
-
-    def test_unplug_device_cannot_determine_bridge_port(self):
-        conf = mock.Mock()
-        conf.ovs_integration_bridge = 'br-int'
-
-        device = mock.Mock()
-        device.name = 'tap1'
-        device.link.delete.side_effect = RuntimeError
-
-        with mock.patch(
-                'neutron.agent.common.ovs_lib.OVSBridge') as ovs_br_cls:
-            br_patch = mock.patch(
-                'neutron.agent.common.ovs_lib.BaseOVS.get_bridge_for_iface')
-            with br_patch as mock_get_bridge_for_iface:
-                with mock.patch.object(util.LOG, 'debug') as debug:
-                    mock_get_bridge_for_iface.return_value = None
-                    ovs_bridge = mock.Mock()
-                    ovs_br_cls.return_value = ovs_bridge
-
-                    util.unplug_device(conf, device)
-
-                    mock_get_bridge_for_iface.assert_called_once_with('tap1')
-                    self.assertEqual([], ovs_br_cls.mock_calls)
-                    self.assertTrue(debug.called)
-
-    def _test_destroy_namespace_helper(self, force, num_devices):
-        ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
-        conf = mock.Mock()
-
-        lo_device = mock.Mock()
-        lo_device.name = 'lo'
-
-        devices = [lo_device]
-
-        while num_devices:
-            dev = mock.Mock()
-            dev.name = 'tap%d' % num_devices
-            devices.append(dev)
-            num_devices -= 1
-
-        with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
-            ip_wrap.return_value.get_devices.return_value = devices
-            ip_wrap.return_value.netns.exists.return_value = True
-
-            with mock.patch.object(util, 'unplug_device') as unplug:
-
-                with mock.patch.object(util, 'kill_dhcp') as kill_dhcp:
-                    util.destroy_namespace(conf, ns, force)
-                    expected = [mock.call(namespace=ns)]
-
-                    if force:
-                        expected.extend([
-                            mock.call().netns.exists(ns),
-                            mock.call().get_devices(exclude_loopback=True)])
-                        self.assertTrue(kill_dhcp.called)
-                        unplug.assert_has_calls(
-                            [mock.call(conf, d) for d in
-                             devices[1:]])
-
-                    expected.append(mock.call().garbage_collect_namespace())
-                    ip_wrap.assert_has_calls(expected)
-
-    def test_destroy_namespace_empty(self):
-        self._test_destroy_namespace_helper(False, 0)
-
-    def test_destroy_namespace_not_empty(self):
-        self._test_destroy_namespace_helper(False, 1)
-
-    def test_destroy_namespace_not_empty_forced(self):
-        self._test_destroy_namespace_helper(True, 2)
-
-    def test_destroy_namespace_exception(self):
-        ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
-        conf = mock.Mock()
-        with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
-            ip_wrap.side_effect = Exception()
-            util.destroy_namespace(conf, ns)
-
-    def test_main(self):
-        namespaces = ['ns1', 'ns2']
-        with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
-            ip_wrap.get_namespaces.return_value = namespaces
-
-            with mock.patch('time.sleep') as time_sleep:
-                conf = mock.Mock()
-                conf.force = False
-                methods_to_mock = dict(
-                    eligible_for_deletion=mock.DEFAULT,
-                    destroy_namespace=mock.DEFAULT,
-                    setup_conf=mock.DEFAULT)
-
-                with mock.patch.multiple(util, **methods_to_mock) as mocks:
-                    mocks['eligible_for_deletion'].return_value = True
-                    mocks['setup_conf'].return_value = conf
-                    with mock.patch('neutron.common.config.setup_logging'):
-                        util.main()
-
-                        mocks['eligible_for_deletion'].assert_has_calls(
-                            [mock.call(conf, 'ns1', False),
-                             mock.call(conf, 'ns2', False)])
-
-                        mocks['destroy_namespace'].assert_has_calls(
-                            [mock.call(conf, 'ns1', False),
-                             mock.call(conf, 'ns2', False)])
-
-                        ip_wrap.assert_has_calls(
-                            [mock.call.get_namespaces()])
-
-                        time_sleep.assert_called_once_with(2)
-
-    def test_main_no_candidates(self):
-        namespaces = ['ns1', 'ns2']
-        with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
-            ip_wrap.get_namespaces.return_value = namespaces
-
-            with mock.patch('time.sleep') as time_sleep:
-                conf = mock.Mock()
-                conf.force = False
-                methods_to_mock = dict(
-                    eligible_for_deletion=mock.DEFAULT,
-                    destroy_namespace=mock.DEFAULT,
-                    setup_conf=mock.DEFAULT)
-
-                with mock.patch.multiple(util, **methods_to_mock) as mocks:
-                    mocks['eligible_for_deletion'].return_value = False
-                    mocks['setup_conf'].return_value = conf
-                    with mock.patch('neutron.common.config.setup_logging'):
-                        util.main()
-
-                        ip_wrap.assert_has_calls(
-                            [mock.call.get_namespaces()])
-
-                        mocks['eligible_for_deletion'].assert_has_calls(
-                            [mock.call(conf, 'ns1', False),
-                             mock.call(conf, 'ns2', False)])
-
-                        self.assertFalse(mocks['destroy_namespace'].called)
-
-                        self.assertFalse(time_sleep.called)
diff --git a/neutron/tests/unit/cmd/test_ovs_cleanup.py b/neutron/tests/unit/cmd/test_ovs_cleanup.py
deleted file mode 100644 (file)
index 4d6b27c..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import itertools
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron.agent.common import ovs_lib
-from neutron.agent.linux import ip_lib
-from neutron.cmd import ovs_cleanup as util
-from neutron.tests import base
-
-
-class TestOVSCleanup(base.BaseTestCase):
-
-    @mock.patch('neutron.common.config.setup_logging')
-    @mock.patch('neutron.cmd.ovs_cleanup.setup_conf')
-    @mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges')
-    @mock.patch('neutron.agent.common.ovs_lib.OVSBridge')
-    @mock.patch.object(util, 'collect_neutron_ports')
-    @mock.patch.object(util, 'delete_neutron_ports')
-    def test_main(self, mock_delete, mock_collect, mock_ovs,
-                  mock_get_bridges, mock_conf, mock_logging):
-        bridges = ['br-int', 'br-ex']
-        ports = ['p1', 'p2', 'p3']
-        conf = mock.Mock()
-        conf.ovs_all_ports = False
-        conf.ovs_integration_bridge = 'br-int'
-        conf.external_network_bridge = 'br-ex'
-        mock_conf.return_value = conf
-        mock_get_bridges.return_value = bridges
-        mock_collect.return_value = ports
-
-        util.main()
-        mock_ovs.assert_has_calls([mock.call().delete_ports(
-            all_ports=False)])
-        mock_collect.assert_called_once_with(set(bridges))
-        mock_delete.assert_called_once_with(ports)
-
-    def test_collect_neutron_ports(self):
-        port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
-                                '11:22:33:44:55:66', 'br')
-        port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
-                                '77:88:99:aa:bb:cc', 'br')
-        port3 = ovs_lib.VifPort('tap90ab', 3, uuidutils.generate_uuid(),
-                                '99:00:aa:bb:cc:dd', 'br')
-        ports = [[port1, port2], [port3]]
-        portnames = [p.port_name for p in itertools.chain(*ports)]
-        with mock.patch('neutron.agent.common.ovs_lib.OVSBridge') as ovs:
-            ovs.return_value.get_vif_ports.side_effect = ports
-            bridges = ['br-int', 'br-ex']
-            ret = util.collect_neutron_ports(bridges)
-            self.assertEqual(ret, portnames)
-
-    @mock.patch.object(ip_lib, 'IPDevice')
-    def test_delete_neutron_ports(self, mock_ip):
-        ports = ['tap1234', 'tap5678', 'tap09ab']
-        port_found = [True, False, True]
-
-        mock_ip.return_value.exists.side_effect = port_found
-        util.delete_neutron_ports(ports)
-        mock_ip.assert_has_calls(
-            [mock.call('tap1234'),
-             mock.call().exists(),
-             mock.call().link.delete(),
-             mock.call('tap5678'),
-             mock.call().exists(),
-             mock.call('tap09ab'),
-             mock.call().exists(),
-             mock.call().link.delete()])
diff --git a/neutron/tests/unit/cmd/test_sanity_check.py b/neutron/tests/unit/cmd/test_sanity_check.py
deleted file mode 100644 (file)
index c30c2d5..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.cmd import sanity_check
-from neutron.tests import base
-
-
-class TestSanityCheck(base.BaseTestCase):
-
-    def test_setup_conf(self):
-        # verify that configuration can be successfully imported
-        sanity_check.setup_conf()
diff --git a/neutron/tests/unit/common/__init__.py b/neutron/tests/unit/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/common/test_ipv6_utils.py b/neutron/tests/unit/common/test_ipv6_utils.py
deleted file mode 100644 (file)
index 4f891a1..0000000
+++ /dev/null
@@ -1,153 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import mock
-
-from neutron.common import constants
-from neutron.common import ipv6_utils
-from neutron.tests import base
-from neutron.tests import tools
-
-
-class IPv6byEUI64TestCase(base.BaseTestCase):
-    """Unit tests for generate IPv6 by EUI-64 operations."""
-
-    def test_generate_IPv6_by_EUI64(self):
-        addr = ipv6_utils.get_ipv6_addr_by_EUI64('2001:db8::',
-                                                 '00:16:3e:33:44:55')
-        self.assertEqual('2001:db8::216:3eff:fe33:4455', addr.format())
-
-    def test_generate_IPv6_with_IPv4_prefix(self):
-        ipv4_prefix = '10.0.8'
-        mac = '00:16:3e:33:44:55'
-        self.assertRaises(TypeError, lambda:
-                          ipv6_utils.get_ipv6_addr_by_EUI64(ipv4_prefix, mac))
-
-    def test_generate_IPv6_with_bad_mac(self):
-        bad_mac = '00:16:3e:33:44:5Z'
-        prefix = '2001:db8::'
-        self.assertRaises(TypeError, lambda:
-                          ipv6_utils.get_ipv6_addr_by_EUI64(prefix, bad_mac))
-
-    def test_generate_IPv6_with_bad_prefix(self):
-        mac = '00:16:3e:33:44:55'
-        bad_prefix = 'bb'
-        self.assertRaises(TypeError, lambda:
-                          ipv6_utils.get_ipv6_addr_by_EUI64(bad_prefix, mac))
-
-    def test_generate_IPv6_with_error_prefix_type(self):
-        mac = '00:16:3e:33:44:55'
-        prefix = 123
-        self.assertRaises(TypeError, lambda:
-                          ipv6_utils.get_ipv6_addr_by_EUI64(prefix, mac))
-
-
-class TestIsEnabled(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestIsEnabled, self).setUp()
-
-        def reset_detection_flag():
-            ipv6_utils._IS_IPV6_ENABLED = None
-        reset_detection_flag()
-        self.addCleanup(reset_detection_flag)
-        self.mock_exists = mock.patch("os.path.exists",
-                                      return_value=True).start()
-        self.proc_path = '/proc/sys/net/ipv6/conf/default/disable_ipv6'
-
-    def test_enabled(self):
-        self.useFixture(tools.OpenFixture(self.proc_path, '0'))
-        enabled = ipv6_utils.is_enabled()
-        self.assertTrue(enabled)
-
-    def test_disabled(self):
-        self.useFixture(tools.OpenFixture(self.proc_path, '1'))
-        enabled = ipv6_utils.is_enabled()
-        self.assertFalse(enabled)
-
-    def test_disabled_non_exists(self):
-        mo = self.useFixture(tools.OpenFixture(self.proc_path, '1')).mock_open
-        self.mock_exists.return_value = False
-        enabled = ipv6_utils.is_enabled()
-        self.assertFalse(enabled)
-        self.assertFalse(mo.called)
-
-    def test_memoize(self):
-        mo = self.useFixture(tools.OpenFixture(self.proc_path, '0')).mock_open
-        ipv6_utils.is_enabled()
-        enabled = ipv6_utils.is_enabled()
-        self.assertTrue(enabled)
-        mo.assert_called_once_with(self.proc_path, 'r')
-
-
-class TestIsAutoAddressSubnet(base.BaseTestCase):
-
-    def setUp(self):
-        self.subnet = {
-            'cidr': '2001:200::/64',
-            'gateway_ip': '2001:200::1',
-            'ip_version': 6,
-            'ipv6_address_mode': None,
-            'ipv6_ra_mode': None
-        }
-        super(TestIsAutoAddressSubnet, self).setUp()
-
-    def test_combinations(self):
-        Mode = collections.namedtuple('Mode', "addr_mode ra_mode "
-                                              "is_auto_address")
-        subnets = [
-            Mode(None, None, False),
-            Mode(constants.DHCPV6_STATEFUL, None, False),
-            Mode(constants.DHCPV6_STATELESS, None, True),
-            Mode(constants.IPV6_SLAAC, None, True),
-            Mode(None, constants.DHCPV6_STATEFUL, False),
-            Mode(None, constants.DHCPV6_STATELESS, True),
-            Mode(None, constants.IPV6_SLAAC, True),
-            Mode(constants.DHCPV6_STATEFUL, constants.DHCPV6_STATEFUL, False),
-            Mode(constants.DHCPV6_STATELESS, constants.DHCPV6_STATELESS, True),
-            Mode(constants.IPV6_SLAAC, constants.IPV6_SLAAC, True),
-        ]
-        for subnet in subnets:
-            self.subnet['ipv6_address_mode'] = subnet.addr_mode
-            self.subnet['ipv6_ra_mode'] = subnet.ra_mode
-            self.assertEqual(subnet.is_auto_address,
-                             ipv6_utils.is_auto_address_subnet(self.subnet))
-
-
-class TestIsEui64Address(base.BaseTestCase):
-
-    def _test_eui_64(self, ips, expected):
-        for ip in ips:
-            self.assertEqual(expected, ipv6_utils.is_eui64_address(ip),
-                             "Error on %s" % ip)
-
-    def test_valid_eui64_addresses(self):
-        ips = ('fffe::0cad:12ff:fe44:5566',
-               ipv6_utils.get_ipv6_addr_by_EUI64('2001:db8::',
-                                                 '00:16:3e:33:44:55'))
-        self._test_eui_64(ips, True)
-
-    def test_invalid_eui64_addresses(self):
-        ips = ('192.168.1.1',
-               '192.168.1.0',
-               '255.255.255.255',
-               '0.0.0.0',
-               'fffe::',
-               'ff80::1',
-               'fffe::0cad:12ff:ff44:5566',
-               'fffe::0cad:12fe:fe44:5566',
-               'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
-        self._test_eui_64(ips, False)
diff --git a/neutron/tests/unit/common/test_rpc.py b/neutron/tests/unit/common/test_rpc.py
deleted file mode 100644 (file)
index ae2e1e2..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2015 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_config import cfg
-from oslo_messaging import conffixture as messaging_conffixture
-
-from neutron.common import rpc
-from neutron.tests import base
-
-
-CONF = cfg.CONF
-CONF.import_opt('state_path', 'neutron.common.config')
-
-
-class ServiceTestCase(base.DietTestCase):
-    # the class cannot be based on BaseTestCase since it mocks rpc.Connection
-
-    def setUp(self):
-        super(ServiceTestCase, self).setUp()
-        self.host = 'foo'
-        self.topic = 'neutron-agent'
-
-        self.target_mock = mock.patch('oslo_messaging.Target')
-        self.target_mock.start()
-
-        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
-        self.messaging_conf.transport_driver = 'fake'
-        self.messaging_conf.response_timeout = 0
-        self.useFixture(self.messaging_conf)
-
-        self.addCleanup(rpc.cleanup)
-        rpc.init(CONF)
-
-    def test_operations(self):
-        with mock.patch('oslo_messaging.get_rpc_server') as get_rpc_server:
-            rpc_server = get_rpc_server.return_value
-
-            service = rpc.Service(self.host, self.topic)
-            service.start()
-            rpc_server.start.assert_called_once_with()
-
-            service.stop()
-            rpc_server.stop.assert_called_once_with()
-            rpc_server.wait.assert_called_once_with()
diff --git a/neutron/tests/unit/common/test_utils.py b/neutron/tests/unit/common/test_utils.py
deleted file mode 100644 (file)
index f9bc4b2..0000000
+++ /dev/null
@@ -1,741 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import errno
-import re
-
-import eventlet
-import mock
-import netaddr
-import six
-import testtools
-
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import utils
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.common import utils as plugin_utils
-from neutron.tests import base
-from neutron.tests.common import helpers
-
-from oslo_log import log as logging
-
-
-class TestParseMappings(base.BaseTestCase):
-    def parse(self, mapping_list, unique_values=True):
-        return utils.parse_mappings(mapping_list, unique_values)
-
-    def test_parse_mappings_fails_for_missing_separator(self):
-        with testtools.ExpectedException(ValueError):
-            self.parse(['key'])
-
-    def test_parse_mappings_fails_for_missing_key(self):
-        with testtools.ExpectedException(ValueError):
-            self.parse([':val'])
-
-    def test_parse_mappings_fails_for_missing_value(self):
-        with testtools.ExpectedException(ValueError):
-            self.parse(['key:'])
-
-    def test_parse_mappings_fails_for_extra_separator(self):
-        with testtools.ExpectedException(ValueError):
-            self.parse(['key:val:junk'])
-
-    def test_parse_mappings_fails_for_duplicate_key(self):
-        with testtools.ExpectedException(ValueError):
-            self.parse(['key:val1', 'key:val2'])
-
-    def test_parse_mappings_fails_for_duplicate_value(self):
-        with testtools.ExpectedException(ValueError):
-            self.parse(['key1:val', 'key2:val'])
-
-    def test_parse_mappings_succeeds_for_one_mapping(self):
-        self.assertEqual(self.parse(['key:val']), {'key': 'val'})
-
-    def test_parse_mappings_succeeds_for_n_mappings(self):
-        self.assertEqual(self.parse(['key1:val1', 'key2:val2']),
-                         {'key1': 'val1', 'key2': 'val2'})
-
-    def test_parse_mappings_succeeds_for_duplicate_value(self):
-        self.assertEqual(self.parse(['key1:val', 'key2:val'], False),
-                         {'key1': 'val', 'key2': 'val'})
-
-    def test_parse_mappings_succeeds_for_no_mappings(self):
-        self.assertEqual({}, self.parse(['']))
-
-
-class TestParseTunnelRangesMixin(object):
-    TUN_MIN = None
-    TUN_MAX = None
-    TYPE = None
-    _err_prefix = "Invalid network tunnel range: '%d:%d' - "
-    _err_suffix = "%s is not a valid %s identifier."
-    _err_range = "End of tunnel range is less than start of tunnel range."
-
-    def _build_invalid_tunnel_range_msg(self, t_range_tuple, n):
-        bad_id = t_range_tuple[n - 1]
-        return (self._err_prefix % t_range_tuple) + (self._err_suffix
-                                                 % (bad_id, self.TYPE))
-
-    def _build_range_reversed_msg(self, t_range_tuple):
-        return (self._err_prefix % t_range_tuple) + self._err_range
-
-    def _verify_range(self, tunnel_range):
-        return plugin_utils.verify_tunnel_range(tunnel_range, self.TYPE)
-
-    def _check_range_valid_ranges(self, tunnel_range):
-        self.assertIsNone(self._verify_range(tunnel_range))
-
-    def _check_range_invalid_ranges(self, bad_range, which):
-        expected_msg = self._build_invalid_tunnel_range_msg(bad_range, which)
-        err = self.assertRaises(n_exc.NetworkTunnelRangeError,
-                                self._verify_range, bad_range)
-        self.assertEqual(expected_msg, str(err))
-
-    def _check_range_reversed(self, bad_range):
-        err = self.assertRaises(n_exc.NetworkTunnelRangeError,
-                                self._verify_range, bad_range)
-        expected_msg = self._build_range_reversed_msg(bad_range)
-        self.assertEqual(expected_msg, str(err))
-
-    def test_range_tunnel_id_valid(self):
-            self._check_range_valid_ranges((self.TUN_MIN, self.TUN_MAX))
-
-    def test_range_tunnel_id_invalid(self):
-            self._check_range_invalid_ranges((-1, self.TUN_MAX), 1)
-            self._check_range_invalid_ranges((self.TUN_MIN,
-                                              self.TUN_MAX + 1), 2)
-            self._check_range_invalid_ranges((self.TUN_MIN - 1,
-                                              self.TUN_MAX + 1), 1)
-
-    def test_range_tunnel_id_reversed(self):
-            self._check_range_reversed((self.TUN_MAX, self.TUN_MIN))
-
-
-class TestGreTunnelRangeVerifyValid(TestParseTunnelRangesMixin,
-                                    base.BaseTestCase):
-    TUN_MIN = p_const.MIN_GRE_ID
-    TUN_MAX = p_const.MAX_GRE_ID
-    TYPE = p_const.TYPE_GRE
-
-
-class TestVxlanTunnelRangeVerifyValid(TestParseTunnelRangesMixin,
-                                      base.BaseTestCase):
-    TUN_MIN = p_const.MIN_VXLAN_VNI
-    TUN_MAX = p_const.MAX_VXLAN_VNI
-    TYPE = p_const.TYPE_VXLAN
-
-
-class UtilTestParseVlanRanges(base.BaseTestCase):
-    _err_prefix = "Invalid network VLAN range: '"
-    _err_too_few = "' - 'need more than 2 values to unpack'."
-    _err_too_many_prefix = "' - 'too many values to unpack"
-    _err_not_int = "' - 'invalid literal for int() with base 10: '%s''."
-    _err_bad_vlan = "' - '%s is not a valid VLAN tag'."
-    _err_range = "' - 'End of VLAN range is less than start of VLAN range'."
-
-    def _range_too_few_err(self, nv_range):
-        return self._err_prefix + nv_range + self._err_too_few
-
-    def _range_too_many_err_prefix(self, nv_range):
-        return self._err_prefix + nv_range + self._err_too_many_prefix
-
-    def _vlan_not_int_err(self, nv_range, vlan):
-        return self._err_prefix + nv_range + (self._err_not_int % vlan)
-
-    def _nrange_invalid_vlan(self, nv_range, n):
-        vlan = nv_range.split(':')[n]
-        v_range = ':'.join(nv_range.split(':')[1:])
-        return self._err_prefix + v_range + (self._err_bad_vlan % vlan)
-
-    def _vrange_invalid_vlan(self, v_range_tuple, n):
-        vlan = v_range_tuple[n - 1]
-        v_range_str = '%d:%d' % v_range_tuple
-        return self._err_prefix + v_range_str + (self._err_bad_vlan % vlan)
-
-    def _vrange_invalid(self, v_range_tuple):
-        v_range_str = '%d:%d' % v_range_tuple
-        return self._err_prefix + v_range_str + self._err_range
-
-
-class TestVlanNetworkNameValid(base.BaseTestCase):
-    def parse_vlan_ranges(self, vlan_range):
-        return plugin_utils.parse_network_vlan_ranges(vlan_range)
-
-    def test_validate_provider_phynet_name_mixed(self):
-        self.assertRaises(n_exc.PhysicalNetworkNameError,
-                          self.parse_vlan_ranges,
-                          ['', ':23:30', 'physnet1',
-                           'tenant_net:100:200'])
-
-    def test_validate_provider_phynet_name_bad(self):
-        self.assertRaises(n_exc.PhysicalNetworkNameError,
-                          self.parse_vlan_ranges,
-                          [':1:34'])
-
-
-class TestVlanRangeVerifyValid(UtilTestParseVlanRanges):
-    def verify_range(self, vlan_range):
-        return plugin_utils.verify_vlan_range(vlan_range)
-
-    def test_range_valid_ranges(self):
-        self.assertIsNone(self.verify_range((1, 2)))
-        self.assertIsNone(self.verify_range((1, 1999)))
-        self.assertIsNone(self.verify_range((100, 100)))
-        self.assertIsNone(self.verify_range((100, 200)))
-        self.assertIsNone(self.verify_range((4001, 4094)))
-        self.assertIsNone(self.verify_range((1, 4094)))
-
-    def check_one_vlan_invalid(self, bad_range, which):
-        expected_msg = self._vrange_invalid_vlan(bad_range, which)
-        err = self.assertRaises(n_exc.NetworkVlanRangeError,
-                                self.verify_range, bad_range)
-        self.assertEqual(str(err), expected_msg)
-
-    def test_range_first_vlan_invalid_negative(self):
-        self.check_one_vlan_invalid((-1, 199), 1)
-
-    def test_range_first_vlan_invalid_zero(self):
-        self.check_one_vlan_invalid((0, 199), 1)
-
-    def test_range_first_vlan_invalid_limit_plus_one(self):
-        self.check_one_vlan_invalid((4095, 199), 1)
-
-    def test_range_first_vlan_invalid_too_big(self):
-        self.check_one_vlan_invalid((9999, 199), 1)
-
-    def test_range_second_vlan_invalid_negative(self):
-        self.check_one_vlan_invalid((299, -1), 2)
-
-    def test_range_second_vlan_invalid_zero(self):
-        self.check_one_vlan_invalid((299, 0), 2)
-
-    def test_range_second_vlan_invalid_limit_plus_one(self):
-        self.check_one_vlan_invalid((299, 4095), 2)
-
-    def test_range_second_vlan_invalid_too_big(self):
-        self.check_one_vlan_invalid((299, 9999), 2)
-
-    def test_range_both_vlans_invalid_01(self):
-        self.check_one_vlan_invalid((-1, 0), 1)
-
-    def test_range_both_vlans_invalid_02(self):
-        self.check_one_vlan_invalid((0, 4095), 1)
-
-    def test_range_both_vlans_invalid_03(self):
-        self.check_one_vlan_invalid((4095, 9999), 1)
-
-    def test_range_both_vlans_invalid_04(self):
-        self.check_one_vlan_invalid((9999, -1), 1)
-
-    def test_range_reversed(self):
-        bad_range = (95, 10)
-        expected_msg = self._vrange_invalid(bad_range)
-        err = self.assertRaises(n_exc.NetworkVlanRangeError,
-                                self.verify_range, bad_range)
-        self.assertEqual(str(err), expected_msg)
-
-
-class TestParseOneVlanRange(UtilTestParseVlanRanges):
-    def parse_one(self, cfg_entry):
-        return plugin_utils.parse_network_vlan_range(cfg_entry)
-
-    def test_parse_one_net_no_vlan_range(self):
-        config_str = "net1"
-        expected_networks = ("net1", None)
-        self.assertEqual(self.parse_one(config_str), expected_networks)
-
-    def test_parse_one_net_and_vlan_range(self):
-        config_str = "net1:100:199"
-        expected_networks = ("net1", (100, 199))
-        self.assertEqual(self.parse_one(config_str), expected_networks)
-
-    def test_parse_one_net_incomplete_range(self):
-        config_str = "net1:100"
-        expected_msg = self._range_too_few_err(config_str)
-        err = self.assertRaises(n_exc.NetworkVlanRangeError,
-                                self.parse_one, config_str)
-        self.assertEqual(str(err), expected_msg)
-
-    def test_parse_one_net_range_too_many(self):
-        config_str = "net1:100:150:200"
-        expected_msg_prefix = self._range_too_many_err_prefix(config_str)
-        err = self.assertRaises(n_exc.NetworkVlanRangeError,
-                                self.parse_one, config_str)
-        # The error message is not same in Python 2 and Python 3. In Python 3,
-        # it depends on the amount of values used when unpacking, so it cannot
-        # be predicted as a fixed string.
-        self.assertTrue(str(err).startswith(expected_msg_prefix))
-
-    def test_parse_one_net_vlan1_not_int(self):
-        config_str = "net1:foo:199"
-        expected_msg = self._vlan_not_int_err(config_str, 'foo')
-        err = self.assertRaises(n_exc.NetworkVlanRangeError,
-                                self.parse_one, config_str)
-        self.assertEqual(str(err), expected_msg)
-
-    def test_parse_one_net_vlan2_not_int(self):
-        config_str = "net1:100:bar"
-        expected_msg = self._vlan_not_int_err(config_str, 'bar')
-        err = self.assertRaises(n_exc.NetworkVlanRangeError,
-                                self.parse_one, config_str)
-        self.assertEqual(str(err), expected_msg)
-
-    def test_parse_one_net_and_max_range(self):
-        config_str = "net1:1:4094"
-        expected_networks = ("net1", (1, 4094))
-        self.assertEqual(self.parse_one(config_str), expected_networks)
-
-    def test_parse_one_net_range_bad_vlan1(self):
-        config_str = "net1:9000:150"
-        expected_msg = self._nrange_invalid_vlan(config_str, 1)
-        err = self.assertRaises(n_exc.NetworkVlanRangeError,
-                                self.parse_one, config_str)
-        self.assertEqual(str(err), expected_msg)
-
-    def test_parse_one_net_range_bad_vlan2(self):
-        config_str = "net1:4000:4999"
-        expected_msg = self._nrange_invalid_vlan(config_str, 2)
-        err = self.assertRaises(n_exc.NetworkVlanRangeError,
-                                self.parse_one, config_str)
-        self.assertEqual(str(err), expected_msg)
-
-
-class TestParseVlanRangeList(UtilTestParseVlanRanges):
-    def parse_list(self, cfg_entries):
-        return plugin_utils.parse_network_vlan_ranges(cfg_entries)
-
-    def test_parse_list_one_net_no_vlan_range(self):
-        config_list = ["net1"]
-        expected_networks = {"net1": []}
-        self.assertEqual(self.parse_list(config_list), expected_networks)
-
-    def test_parse_list_one_net_vlan_range(self):
-        config_list = ["net1:100:199"]
-        expected_networks = {"net1": [(100, 199)]}
-        self.assertEqual(self.parse_list(config_list), expected_networks)
-
-    def test_parse_two_nets_no_vlan_range(self):
-        config_list = ["net1",
-                       "net2"]
-        expected_networks = {"net1": [],
-                             "net2": []}
-        self.assertEqual(self.parse_list(config_list), expected_networks)
-
-    def test_parse_two_nets_range_and_no_range(self):
-        config_list = ["net1:100:199",
-                       "net2"]
-        expected_networks = {"net1": [(100, 199)],
-                             "net2": []}
-        self.assertEqual(self.parse_list(config_list), expected_networks)
-
-    def test_parse_two_nets_no_range_and_range(self):
-        config_list = ["net1",
-                       "net2:200:299"]
-        expected_networks = {"net1": [],
-                             "net2": [(200, 299)]}
-        self.assertEqual(self.parse_list(config_list), expected_networks)
-
-    def test_parse_two_nets_bad_vlan_range1(self):
-        config_list = ["net1:100",
-                       "net2:200:299"]
-        expected_msg = self._range_too_few_err(config_list[0])
-        err = self.assertRaises(n_exc.NetworkVlanRangeError,
-                                self.parse_list, config_list)
-        self.assertEqual(str(err), expected_msg)
-
-    def test_parse_two_nets_vlan_not_int2(self):
-        config_list = ["net1:100:199",
-                       "net2:200:0x200"]
-        expected_msg = self._vlan_not_int_err(config_list[1], '0x200')
-        err = self.assertRaises(n_exc.NetworkVlanRangeError,
-                                self.parse_list, config_list)
-        self.assertEqual(str(err), expected_msg)
-
-    def test_parse_two_nets_and_append_1_2(self):
-        config_list = ["net1:100:199",
-                       "net1:1000:1099",
-                       "net2:200:299"]
-        expected_networks = {"net1": [(100, 199),
-                                      (1000, 1099)],
-                             "net2": [(200, 299)]}
-        self.assertEqual(self.parse_list(config_list), expected_networks)
-
-    def test_parse_two_nets_and_append_1_3(self):
-        config_list = ["net1:100:199",
-                       "net2:200:299",
-                       "net1:1000:1099"]
-        expected_networks = {"net1": [(100, 199),
-                                      (1000, 1099)],
-                             "net2": [(200, 299)]}
-        self.assertEqual(self.parse_list(config_list), expected_networks)
-
-
-class TestDictUtils(base.BaseTestCase):
-    def test_dict2str(self):
-        dic = {"key1": "value1", "key2": "value2", "key3": "value3"}
-        expected = "key1=value1,key2=value2,key3=value3"
-        self.assertEqual(utils.dict2str(dic), expected)
-
-    def test_str2dict(self):
-        string = "key1=value1,key2=value2,key3=value3"
-        expected = {"key1": "value1", "key2": "value2", "key3": "value3"}
-        self.assertEqual(utils.str2dict(string), expected)
-
-    def test_dict_str_conversion(self):
-        dic = {"key1": "value1", "key2": "value2"}
-        self.assertEqual(utils.str2dict(utils.dict2str(dic)), dic)
-
-    def test_diff_list_of_dict(self):
-        old_list = [{"key1": "value1"},
-                    {"key2": "value2"},
-                    {"key3": "value3"}]
-        new_list = [{"key1": "value1"},
-                    {"key2": "value2"},
-                    {"key4": "value4"}]
-        added, removed = utils.diff_list_of_dict(old_list, new_list)
-        self.assertEqual(added, [dict(key4="value4")])
-        self.assertEqual(removed, [dict(key3="value3")])
-
-
-class _CachingDecorator(object):
-    def __init__(self):
-        self.func_retval = 'bar'
-        self._cache = mock.Mock()
-
-    @utils.cache_method_results
-    def func(self, *args, **kwargs):
-        return self.func_retval
-
-
-class TestCachingDecorator(base.BaseTestCase):
-    def setUp(self):
-        super(TestCachingDecorator, self).setUp()
-        self.decor = _CachingDecorator()
-        self.func_name = '%(module)s._CachingDecorator.func' % {
-            'module': self.__module__
-        }
-        self.not_cached = self.decor.func.func.__self__._not_cached
-
-    def test_cache_miss(self):
-        expected_key = (self.func_name, 1, 2, ('foo', 'bar'))
-        args = (1, 2)
-        kwargs = {'foo': 'bar'}
-        self.decor._cache.get.return_value = self.not_cached
-        retval = self.decor.func(*args, **kwargs)
-        self.decor._cache.set.assert_called_once_with(
-            expected_key, self.decor.func_retval, None)
-        self.assertEqual(self.decor.func_retval, retval)
-
-    def test_cache_hit(self):
-        expected_key = (self.func_name, 1, 2, ('foo', 'bar'))
-        args = (1, 2)
-        kwargs = {'foo': 'bar'}
-        retval = self.decor.func(*args, **kwargs)
-        self.assertFalse(self.decor._cache.set.called)
-        self.assertEqual(self.decor._cache.get.return_value, retval)
-        self.decor._cache.get.assert_called_once_with(expected_key,
-                                                      self.not_cached)
-
-    def test_get_unhashable(self):
-        expected_key = (self.func_name, [1], 2)
-        self.decor._cache.get.side_effect = TypeError
-        retval = self.decor.func([1], 2)
-        self.assertFalse(self.decor._cache.set.called)
-        self.assertEqual(self.decor.func_retval, retval)
-        self.decor._cache.get.assert_called_once_with(expected_key,
-                                                      self.not_cached)
-
-    def test_missing_cache(self):
-        delattr(self.decor, '_cache')
-        self.assertRaises(NotImplementedError, self.decor.func, (1, 2))
-
-    def test_no_cache(self):
-        self.decor._cache = False
-        retval = self.decor.func((1, 2))
-        self.assertEqual(self.decor.func_retval, retval)
-
-
-class TestDict2Tuples(base.BaseTestCase):
-    def test_dict(self):
-        input_dict = {'foo': 'bar', '42': 'baz', 'aaa': 'zzz'}
-        expected = (('42', 'baz'), ('aaa', 'zzz'), ('foo', 'bar'))
-        output_tuple = utils.dict2tuple(input_dict)
-        self.assertEqual(expected, output_tuple)
-
-
-class TestExceptionLogger(base.BaseTestCase):
-    def test_normal_call(self):
-        result = "Result"
-
-        @utils.exception_logger()
-        def func():
-            return result
-
-        self.assertEqual(result, func())
-
-    def test_raise(self):
-        result = "Result"
-
-        @utils.exception_logger()
-        def func():
-            raise RuntimeError(result)
-
-        self.assertRaises(RuntimeError, func)
-
-    def test_spawn_normal(self):
-        result = "Result"
-        logger = mock.Mock()
-
-        @utils.exception_logger(logger=logger)
-        def func():
-            return result
-
-        gt = eventlet.spawn(func)
-        self.assertEqual(result, gt.wait())
-        self.assertFalse(logger.called)
-
-    def test_spawn_raise(self):
-        result = "Result"
-        logger = mock.Mock()
-
-        @utils.exception_logger(logger=logger)
-        def func():
-            raise RuntimeError(result)
-
-        gt = eventlet.spawn(func)
-        self.assertRaises(RuntimeError, gt.wait)
-        self.assertTrue(logger.called)
-
-    def test_pool_spawn_normal(self):
-        logger = mock.Mock()
-        calls = mock.Mock()
-
-        @utils.exception_logger(logger=logger)
-        def func(i):
-            calls(i)
-
-        pool = eventlet.GreenPool(4)
-        for i in range(0, 4):
-            pool.spawn(func, i)
-        pool.waitall()
-
-        calls.assert_has_calls([mock.call(0), mock.call(1),
-                                mock.call(2), mock.call(3)],
-                               any_order=True)
-        self.assertFalse(logger.called)
-
-    def test_pool_spawn_raise(self):
-        logger = mock.Mock()
-        calls = mock.Mock()
-
-        @utils.exception_logger(logger=logger)
-        def func(i):
-            if i == 2:
-                raise RuntimeError(2)
-            else:
-                calls(i)
-
-        pool = eventlet.GreenPool(4)
-        for i in range(0, 4):
-            pool.spawn(func, i)
-        pool.waitall()
-
-        calls.assert_has_calls([mock.call(0), mock.call(1), mock.call(3)],
-                               any_order=True)
-        self.assertTrue(logger.called)
-
-
-class TestDvrServices(base.BaseTestCase):
-
-    def _test_is_dvr_serviced(self, device_owner, expected):
-        self.assertEqual(expected, utils.is_dvr_serviced(device_owner))
-
-    def test_is_dvr_serviced_with_lb_port(self):
-        self._test_is_dvr_serviced(constants.DEVICE_OWNER_LOADBALANCER, True)
-
-    def test_is_dvr_serviced_with_lbv2_port(self):
-        self._test_is_dvr_serviced(constants.DEVICE_OWNER_LOADBALANCERV2, True)
-
-    def test_is_dvr_serviced_with_dhcp_port(self):
-        self._test_is_dvr_serviced(constants.DEVICE_OWNER_DHCP, True)
-
-    def test_is_dvr_serviced_with_vm_port(self):
-        self._test_is_dvr_serviced(constants.DEVICE_OWNER_COMPUTE_PREFIX, True)
-
-
-class TestIpToCidr(base.BaseTestCase):
-    def test_ip_to_cidr_ipv4_default(self):
-        self.assertEqual('15.1.2.3/32', utils.ip_to_cidr('15.1.2.3'))
-
-    def test_ip_to_cidr_ipv4_prefix(self):
-        self.assertEqual('15.1.2.3/24', utils.ip_to_cidr('15.1.2.3', 24))
-
-    def test_ip_to_cidr_ipv4_netaddr(self):
-        ip_address = netaddr.IPAddress('15.1.2.3')
-        self.assertEqual('15.1.2.3/32', utils.ip_to_cidr(ip_address))
-
-    def test_ip_to_cidr_ipv4_bad_prefix(self):
-        self.assertRaises(netaddr.core.AddrFormatError,
-                          utils.ip_to_cidr, '15.1.2.3', 33)
-
-    def test_ip_to_cidr_ipv6_default(self):
-        self.assertEqual('::1/128', utils.ip_to_cidr('::1'))
-
-    def test_ip_to_cidr_ipv6_prefix(self):
-        self.assertEqual('::1/64', utils.ip_to_cidr('::1', 64))
-
-    def test_ip_to_cidr_ipv6_bad_prefix(self):
-        self.assertRaises(netaddr.core.AddrFormatError,
-                          utils.ip_to_cidr, '2000::1', 129)
-
-
-class TestCidrIsHost(base.BaseTestCase):
-    def test_is_cidr_host_ipv4(self):
-        self.assertTrue(utils.is_cidr_host('15.1.2.3/32'))
-
-    def test_is_cidr_host_ipv4_not_cidr(self):
-        self.assertRaises(ValueError,
-                          utils.is_cidr_host,
-                          '15.1.2.3')
-
-    def test_is_cidr_host_ipv6(self):
-        self.assertTrue(utils.is_cidr_host('2000::1/128'))
-
-    def test_is_cidr_host_ipv6_netaddr(self):
-        net = netaddr.IPNetwork("2000::1")
-        self.assertTrue(utils.is_cidr_host(net))
-
-    def test_is_cidr_host_ipv6_32(self):
-        self.assertFalse(utils.is_cidr_host('2000::1/32'))
-
-    def test_is_cidr_host_ipv6_not_cidr(self):
-        self.assertRaises(ValueError,
-                          utils.is_cidr_host,
-                          '2000::1')
-
-    def test_is_cidr_host_ipv6_not_cidr_netaddr(self):
-        ip_address = netaddr.IPAddress("2000::3")
-        self.assertRaises(ValueError,
-                          utils.is_cidr_host,
-                          ip_address)
-
-
-class TestIpVersionFromInt(base.BaseTestCase):
-    def test_ip_version_from_int_ipv4(self):
-        self.assertEqual(utils.ip_version_from_int(4),
-                         constants.IPv4)
-
-    def test_ip_version_from_int_ipv6(self):
-        self.assertEqual(utils.ip_version_from_int(6),
-                         constants.IPv6)
-
-    def test_ip_version_from_int_illegal_int(self):
-        self.assertRaises(ValueError,
-                          utils.ip_version_from_int,
-                          8)
-
-
-class TestDelayedStringRenderer(base.BaseTestCase):
-    def test_call_deferred_until_str(self):
-        my_func = mock.MagicMock(return_value='Brie cheese!')
-        delayed = utils.DelayedStringRenderer(my_func, 1, 2, key_arg=44)
-        self.assertFalse(my_func.called)
-        string = "Type: %s" % delayed
-        my_func.assert_called_once_with(1, 2, key_arg=44)
-        self.assertEqual("Type: Brie cheese!", string)
-
-    def test_not_called_with_low_log_level(self):
-        LOG = logging.getLogger(__name__)
-        # make sure we return logging to previous level
-        current_log_level = LOG.logger.getEffectiveLevel()
-        self.addCleanup(LOG.logger.setLevel, current_log_level)
-
-        my_func = mock.MagicMock()
-        delayed = utils.DelayedStringRenderer(my_func)
-
-        # set to warning so we shouldn't be logging debug messages
-        LOG.logger.setLevel(logging.logging.WARNING)
-        LOG.debug("Hello %s", delayed)
-        self.assertFalse(my_func.called)
-
-        # but it should be called with the debug level
-        LOG.logger.setLevel(logging.logging.DEBUG)
-        LOG.debug("Hello %s", delayed)
-        self.assertTrue(my_func.called)
-
-
-class TestEnsureDir(base.BaseTestCase):
-    @mock.patch('os.makedirs')
-    def test_ensure_dir_no_fail_if_exists(self, makedirs):
-        error = OSError()
-        error.errno = errno.EEXIST
-        makedirs.side_effect = error
-        utils.ensure_dir("/etc/create/concurrently")
-
-    @mock.patch('os.makedirs')
-    def test_ensure_dir_calls_makedirs(self, makedirs):
-        utils.ensure_dir("/etc/create/directory")
-        makedirs.assert_called_once_with("/etc/create/directory", 0o755)
-
-
-class TestCamelize(base.BaseTestCase):
-    def test_camelize(self):
-        data = {'bandwidth_limit': 'BandwidthLimit',
-                'test': 'Test',
-                'some__more__dashes': 'SomeMoreDashes',
-                'a_penguin_walks_into_a_bar': 'APenguinWalksIntoABar'}
-
-        for s, expected in data.items():
-            self.assertEqual(expected, utils.camelize(s))
-
-
-class TestRoundVal(base.BaseTestCase):
-    def test_round_val_ok(self):
-        for expected, value in ((0, 0),
-                                (0, 0.1),
-                                (1, 0.5),
-                                (1, 1.49),
-                                (2, 1.5)):
-            self.assertEqual(expected, utils.round_val(value))
-
-
-class TestGetRandomString(base.BaseTestCase):
-    def test_get_random_string(self):
-        length = 127
-        random_string = utils.get_random_string(length)
-        self.assertEqual(length, len(random_string))
-        regex = re.compile('^[0-9a-fA-F]+$')
-        self.assertIsNotNone(regex.match(random_string))
-
-
-class TestSafeDecodeUtf8(base.BaseTestCase):
-
-    @helpers.requires_py2
-    def test_py2_does_nothing(self):
-        s = 'test-py2'
-        self.assertIs(s, utils.safe_decode_utf8(s))
-
-    @helpers.requires_py3
-    def test_py3_decoded_valid_bytes(self):
-        s = bytes('test-py2', 'utf-8')
-        decoded_str = utils.safe_decode_utf8(s)
-        self.assertIsInstance(decoded_str, six.text_type)
-        self.assertEqual(s, decoded_str.encode('utf-8'))
-
-    @helpers.requires_py3
-    def test_py3_decoded_invalid_bytes(self):
-        s = bytes('test-py2', 'utf_16')
-        decoded_str = utils.safe_decode_utf8(s)
-        self.assertIsInstance(decoded_str, six.text_type)
diff --git a/neutron/tests/unit/core_extensions/__init__.py b/neutron/tests/unit/core_extensions/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/core_extensions/test_qos.py b/neutron/tests/unit/core_extensions/test_qos.py
deleted file mode 100644 (file)
index 07ba639..0000000
+++ /dev/null
@@ -1,195 +0,0 @@
-# Copyright (c) 2015 Red Hat Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron import context
-from neutron.core_extensions import base as base_core
-from neutron.core_extensions import qos as qos_core
-from neutron.plugins.common import constants as plugin_constants
-from neutron.services.qos import qos_consts
-from neutron.tests import base
-
-
-def _get_test_dbdata(qos_policy_id):
-    return {'id': None, 'qos_policy_binding': {'policy_id': qos_policy_id,
-                                               'network_id': 'fake_net_id'}}
-
-
-class QosCoreResourceExtensionTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(QosCoreResourceExtensionTestCase, self).setUp()
-        self.core_extension = qos_core.QosCoreResourceExtension()
-        policy_p = mock.patch('neutron.objects.qos.policy.QosPolicy')
-        self.policy_m = policy_p.start()
-        self.context = context.get_admin_context()
-
-    def test_process_fields_no_qos_policy_id(self):
-        self.core_extension.process_fields(
-            self.context, base_core.PORT, {}, None)
-        self.assertFalse(self.policy_m.called)
-
-    def _mock_plugin_loaded(self, plugin_loaded):
-        plugins = {}
-        if plugin_loaded:
-            plugins[plugin_constants.QOS] = None
-        return mock.patch('neutron.manager.NeutronManager.get_service_plugins',
-                          return_value=plugins)
-
-    def test_process_fields_no_qos_plugin_loaded(self):
-        with self._mock_plugin_loaded(False):
-            self.core_extension.process_fields(
-                self.context, base_core.PORT,
-                {qos_consts.QOS_POLICY_ID: None}, None)
-            self.assertFalse(self.policy_m.called)
-
-    def test_process_fields_port_new_policy(self):
-        with self._mock_plugin_loaded(True):
-            qos_policy_id = mock.Mock()
-            actual_port = {'id': mock.Mock(),
-                           qos_consts.QOS_POLICY_ID: qos_policy_id}
-            qos_policy = mock.MagicMock()
-            self.policy_m.get_by_id = mock.Mock(return_value=qos_policy)
-            self.core_extension.process_fields(
-                self.context, base_core.PORT,
-                {qos_consts.QOS_POLICY_ID: qos_policy_id},
-                actual_port)
-
-            qos_policy.attach_port.assert_called_once_with(actual_port['id'])
-
-    def test_process_fields_port_updated_policy(self):
-        with self._mock_plugin_loaded(True):
-            qos_policy1_id = mock.Mock()
-            qos_policy2_id = mock.Mock()
-            port_id = mock.Mock()
-            actual_port = {'id': port_id,
-                           qos_consts.QOS_POLICY_ID: qos_policy1_id}
-            old_qos_policy = mock.MagicMock()
-            self.policy_m.get_port_policy = mock.Mock(
-                return_value=old_qos_policy)
-            new_qos_policy = mock.MagicMock()
-            self.policy_m.get_by_id = mock.Mock(return_value=new_qos_policy)
-            self.core_extension.process_fields(
-                self.context, base_core.PORT,
-                {qos_consts.QOS_POLICY_ID: qos_policy2_id},
-                actual_port)
-
-            old_qos_policy.detach_port.assert_called_once_with(port_id)
-            new_qos_policy.attach_port.assert_called_once_with(port_id)
-            self.assertEqual(qos_policy2_id, actual_port['qos_policy_id'])
-
-    def test_process_resource_port_updated_no_policy(self):
-        with self._mock_plugin_loaded(True):
-            port_id = mock.Mock()
-            qos_policy_id = mock.Mock()
-            actual_port = {'id': port_id,
-                           qos_consts.QOS_POLICY_ID: qos_policy_id}
-            old_qos_policy = mock.MagicMock()
-            self.policy_m.get_port_policy = mock.Mock(
-                return_value=old_qos_policy)
-            new_qos_policy = mock.MagicMock()
-            self.policy_m.get_by_id = mock.Mock(return_value=new_qos_policy)
-            self.core_extension.process_fields(
-                self.context, base_core.PORT,
-                {qos_consts.QOS_POLICY_ID: None},
-                actual_port)
-
-            old_qos_policy.detach_port.assert_called_once_with(port_id)
-            self.assertIsNone(actual_port['qos_policy_id'])
-
-    def test_process_resource_network_updated_no_policy(self):
-        with self._mock_plugin_loaded(True):
-            network_id = mock.Mock()
-            qos_policy_id = mock.Mock()
-            actual_network = {'id': network_id,
-                              qos_consts.QOS_POLICY_ID: qos_policy_id}
-            old_qos_policy = mock.MagicMock()
-            self.policy_m.get_network_policy = mock.Mock(
-                return_value=old_qos_policy)
-            new_qos_policy = mock.MagicMock()
-            self.policy_m.get_by_id = mock.Mock(return_value=new_qos_policy)
-            self.core_extension.process_fields(
-                self.context, base_core.NETWORK,
-                {qos_consts.QOS_POLICY_ID: None},
-                actual_network)
-
-            old_qos_policy.detach_network.assert_called_once_with(network_id)
-            self.assertIsNone(actual_network['qos_policy_id'])
-
-    def test_process_fields_network_new_policy(self):
-        with self._mock_plugin_loaded(True):
-            qos_policy_id = mock.Mock()
-            actual_network = {'id': mock.Mock(),
-                              qos_consts.QOS_POLICY_ID: qos_policy_id}
-            qos_policy = mock.MagicMock()
-            self.policy_m.get_by_id = mock.Mock(return_value=qos_policy)
-            self.core_extension.process_fields(
-                self.context, base_core.NETWORK,
-                {qos_consts.QOS_POLICY_ID: qos_policy_id}, actual_network)
-
-            qos_policy.attach_network.assert_called_once_with(
-                actual_network['id'])
-
-    def test_process_fields_network_updated_policy(self):
-        with self._mock_plugin_loaded(True):
-            qos_policy_id = mock.Mock()
-            network_id = mock.Mock()
-            actual_network = {'id': network_id,
-                              qos_consts.QOS_POLICY_ID: qos_policy_id}
-            old_qos_policy = mock.MagicMock()
-            self.policy_m.get_network_policy = mock.Mock(
-                return_value=old_qos_policy)
-            new_qos_policy = mock.MagicMock()
-            self.policy_m.get_by_id = mock.Mock(return_value=new_qos_policy)
-            self.core_extension.process_fields(
-                self.context, base_core.NETWORK,
-                {qos_consts.QOS_POLICY_ID: qos_policy_id}, actual_network)
-
-            old_qos_policy.detach_network.assert_called_once_with(network_id)
-            new_qos_policy.attach_network.assert_called_once_with(network_id)
-
-    def test_extract_fields_plugin_not_loaded(self):
-        with self._mock_plugin_loaded(False):
-            fields = self.core_extension.extract_fields(None, None)
-            self.assertEqual({}, fields)
-
-    def _test_extract_fields_for_port(self, qos_policy_id):
-        with self._mock_plugin_loaded(True):
-            fields = self.core_extension.extract_fields(
-                base_core.PORT, _get_test_dbdata(qos_policy_id))
-            self.assertEqual({qos_consts.QOS_POLICY_ID: qos_policy_id}, fields)
-
-    def test_extract_fields_no_port_policy(self):
-        self._test_extract_fields_for_port(None)
-
-    def test_extract_fields_port_policy_exists(self):
-        qos_policy_id = mock.Mock()
-        self._test_extract_fields_for_port(qos_policy_id)
-
-    def _test_extract_fields_for_network(self, qos_policy_id):
-        with self._mock_plugin_loaded(True):
-            fields = self.core_extension.extract_fields(
-                base_core.NETWORK, _get_test_dbdata(qos_policy_id))
-            self.assertEqual({qos_consts.QOS_POLICY_ID: qos_policy_id}, fields)
-
-    def test_extract_fields_no_network_policy(self):
-        self._test_extract_fields_for_network(None)
-
-    def test_extract_fields_network_policy_exists(self):
-        qos_policy_id = mock.Mock()
-        qos_policy = mock.Mock()
-        qos_policy.id = qos_policy_id
-        self._test_extract_fields_for_network(qos_policy_id)
diff --git a/neutron/tests/unit/db/__init__.py b/neutron/tests/unit/db/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/db/metering/__init__.py b/neutron/tests/unit/db/metering/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/db/metering/test_metering_db.py b/neutron/tests/unit/db/metering/test_metering_db.py
deleted file mode 100644 (file)
index f2db17f..0000000
+++ /dev/null
@@ -1,313 +0,0 @@
-# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-import webob.exc
-
-from neutron.api import extensions
-from neutron.common import config
-from neutron.common import constants as n_consts
-from neutron import context
-import neutron.extensions
-from neutron.extensions import metering
-from neutron.plugins.common import constants
-from neutron.services.metering import metering_plugin
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-DB_METERING_PLUGIN_KLASS = (
-    "neutron.services.metering."
-    "metering_plugin.MeteringPlugin"
-)
-
-extensions_path = ':'.join(neutron.extensions.__path__)
-
-
-class MeteringPluginDbTestCaseMixin(object):
-    def _create_metering_label(self, fmt, name, description, **kwargs):
-        data = {'metering_label': {'name': name,
-                                   'tenant_id': kwargs.get('tenant_id',
-                                                           'test-tenant'),
-                                   'shared': kwargs.get('shared', False),
-                                   'description': description}}
-        req = self.new_create_request('metering-labels', data,
-                                      fmt)
-
-        if kwargs.get('set_context') and 'tenant_id' in kwargs:
-            # create a specific auth context for this request
-            req.environ['neutron.context'] = (
-                context.Context('', kwargs['tenant_id'],
-                                is_admin=kwargs.get('is_admin', True)))
-
-        return req.get_response(self.ext_api)
-
-    def _make_metering_label(self, fmt, name, description, **kwargs):
-        res = self._create_metering_label(fmt, name, description, **kwargs)
-        if res.status_int >= 400:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        return self.deserialize(fmt, res)
-
-    def _create_metering_label_rule(self, fmt, metering_label_id, direction,
-                                    remote_ip_prefix, excluded, **kwargs):
-        data = {'metering_label_rule':
-                {'metering_label_id': metering_label_id,
-                 'tenant_id': kwargs.get('tenant_id', 'test-tenant'),
-                 'direction': direction,
-                 'excluded': excluded,
-                 'remote_ip_prefix': remote_ip_prefix}}
-        req = self.new_create_request('metering-label-rules',
-                                      data, fmt)
-
-        if kwargs.get('set_context') and 'tenant_id' in kwargs:
-            # create a specific auth context for this request
-            req.environ['neutron.context'] = (
-                context.Context('', kwargs['tenant_id']))
-
-        return req.get_response(self.ext_api)
-
-    def _make_metering_label_rule(self, fmt, metering_label_id, direction,
-                                  remote_ip_prefix, excluded, **kwargs):
-        res = self._create_metering_label_rule(fmt, metering_label_id,
-                                               direction, remote_ip_prefix,
-                                               excluded, **kwargs)
-        if res.status_int >= 400:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        return self.deserialize(fmt, res)
-
-    @contextlib.contextmanager
-    def metering_label(self, name='label', description='desc',
-                       fmt=None, **kwargs):
-        if not fmt:
-            fmt = self.fmt
-        metering_label = self._make_metering_label(fmt, name,
-                                                   description, **kwargs)
-        yield metering_label
-
-    @contextlib.contextmanager
-    def metering_label_rule(self, metering_label_id=None, direction='ingress',
-                            remote_ip_prefix='10.0.0.0/24',
-                            excluded='false', fmt=None):
-        if not fmt:
-            fmt = self.fmt
-        metering_label_rule = self._make_metering_label_rule(fmt,
-                                                             metering_label_id,
-                                                             direction,
-                                                             remote_ip_prefix,
-                                                             excluded)
-        yield metering_label_rule
-
-
-class MeteringPluginDbTestCase(
-        test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
-        MeteringPluginDbTestCaseMixin):
-    fmt = 'json'
-
-    resource_prefix_map = dict(
-        (k.replace('_', '-'), "/metering")
-        for k in metering.RESOURCE_ATTRIBUTE_MAP.keys()
-    )
-
-    def setUp(self, plugin=None):
-        service_plugins = {'metering_plugin_name': DB_METERING_PLUGIN_KLASS}
-
-        super(MeteringPluginDbTestCase, self).setUp(
-            plugin=plugin,
-            service_plugins=service_plugins
-        )
-
-        self.plugin = metering_plugin.MeteringPlugin()
-        ext_mgr = extensions.PluginAwareExtensionManager(
-            extensions_path,
-            {constants.METERING: self.plugin}
-        )
-        app = config.load_paste_app('extensions_test_app')
-        self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
-
-
-class TestMetering(MeteringPluginDbTestCase):
-    def test_create_metering_label(self):
-        name = 'my label'
-        description = 'my metering label'
-        keys = [('name', name,), ('description', description)]
-        with self.metering_label(name, description) as metering_label:
-            for k, v, in keys:
-                self.assertEqual(metering_label['metering_label'][k], v)
-
-    def test_create_metering_label_shared(self):
-        name = 'my label'
-        description = 'my metering label'
-        shared = True
-        keys = [('name', name,), ('description', description),
-                ('shared', shared)]
-        with self.metering_label(name, description,
-                                 shared=shared) as metering_label:
-            for k, v, in keys:
-                self.assertEqual(metering_label['metering_label'][k], v)
-
-    def test_delete_metering_label(self):
-        name = 'my label'
-        description = 'my metering label'
-
-        with self.metering_label(name, description) as metering_label:
-            metering_label_id = metering_label['metering_label']['id']
-            self._delete('metering-labels', metering_label_id, 204)
-
-    def test_list_metering_label(self):
-        name = 'my label'
-        description = 'my metering label'
-
-        with self.metering_label(name, description) as v1,\
-                self.metering_label(name, description) as v2:
-            metering_label = (v1, v2)
-
-            self._test_list_resources('metering-label', metering_label)
-
-    def test_create_metering_label_rule(self):
-        name = 'my label'
-        description = 'my metering label'
-
-        with self.metering_label(name, description) as metering_label:
-            metering_label_id = metering_label['metering_label']['id']
-
-            direction = 'egress'
-            remote_ip_prefix = '192.168.0.0/24'
-            excluded = True
-
-            keys = [('metering_label_id', metering_label_id),
-                    ('direction', direction),
-                    ('excluded', excluded),
-                    ('remote_ip_prefix', remote_ip_prefix)]
-            with self.metering_label_rule(metering_label_id,
-                                          direction,
-                                          remote_ip_prefix,
-                                          excluded) as label_rule:
-                for k, v, in keys:
-                    self.assertEqual(label_rule['metering_label_rule'][k], v)
-
-    def test_delete_metering_label_rule(self):
-        name = 'my label'
-        description = 'my metering label'
-
-        with self.metering_label(name, description) as metering_label:
-            metering_label_id = metering_label['metering_label']['id']
-
-            direction = 'egress'
-            remote_ip_prefix = '192.168.0.0/24'
-            excluded = True
-
-            with self.metering_label_rule(metering_label_id,
-                                          direction,
-                                          remote_ip_prefix,
-                                          excluded) as label_rule:
-                rule_id = label_rule['metering_label_rule']['id']
-                self._delete('metering-label-rules', rule_id, 204)
-
-    def test_list_metering_label_rule(self):
-        name = 'my label'
-        description = 'my metering label'
-
-        with self.metering_label(name, description) as metering_label:
-            metering_label_id = metering_label['metering_label']['id']
-
-            direction = 'egress'
-            remote_ip_prefix = '192.168.0.0/24'
-            excluded = True
-
-            with self.metering_label_rule(metering_label_id,
-                                          direction,
-                                          remote_ip_prefix,
-                                          excluded) as v1,\
-                    self.metering_label_rule(metering_label_id,
-                                             'ingress',
-                                             remote_ip_prefix,
-                                             excluded) as v2:
-                metering_label_rule = (v1, v2)
-
-                self._test_list_resources('metering-label-rule',
-                                          metering_label_rule)
-
-    def test_create_metering_label_rules(self):
-        name = 'my label'
-        description = 'my metering label'
-
-        with self.metering_label(name, description) as metering_label:
-            metering_label_id = metering_label['metering_label']['id']
-
-            direction = 'egress'
-            remote_ip_prefix = '192.168.0.0/24'
-            excluded = True
-
-            with self.metering_label_rule(metering_label_id,
-                                          direction,
-                                          remote_ip_prefix,
-                                          excluded) as v1,\
-                    self.metering_label_rule(metering_label_id,
-                                             direction,
-                                             n_consts.IPv4_ANY,
-                                             False) as v2:
-                metering_label_rule = (v1, v2)
-
-                self._test_list_resources('metering-label-rule',
-                                          metering_label_rule)
-
-    def test_create_overlap_metering_label_rules(self):
-        name = 'my label'
-        description = 'my metering label'
-
-        with self.metering_label(name, description) as metering_label:
-            metering_label_id = metering_label['metering_label']['id']
-
-            direction = 'egress'
-            remote_ip_prefix1 = '192.168.0.0/24'
-            remote_ip_prefix2 = '192.168.0.0/16'
-            excluded = True
-
-            with self.metering_label_rule(metering_label_id,
-                                          direction,
-                                          remote_ip_prefix1,
-                                          excluded):
-                res = self._create_metering_label_rule(self.fmt,
-                                                       metering_label_id,
-                                                       direction,
-                                                       remote_ip_prefix2,
-                                                       excluded)
-                self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_create_metering_label_rule_two_labels(self):
-        name1 = 'my label 1'
-        name2 = 'my label 2'
-        description = 'my metering label'
-
-        with self.metering_label(name1, description) as metering_label1:
-            metering_label_id1 = metering_label1['metering_label']['id']
-
-            with self.metering_label(name2, description) as metering_label2:
-                metering_label_id2 = metering_label2['metering_label']['id']
-
-                direction = 'egress'
-                remote_ip_prefix = '192.168.0.0/24'
-                excluded = True
-
-                with self.metering_label_rule(metering_label_id1,
-                                              direction,
-                                              remote_ip_prefix,
-                                              excluded) as v1,\
-                        self.metering_label_rule(metering_label_id2,
-                                                 direction,
-                                                 remote_ip_prefix,
-                                                 excluded) as v2:
-                    metering_label_rule = (v1, v2)
-
-                    self._test_list_resources('metering-label-rule',
-                                              metering_label_rule)
diff --git a/neutron/tests/unit/db/quota/__init__.py b/neutron/tests/unit/db/quota/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/db/quota/test_api.py b/neutron/tests/unit/db/quota/test_api.py
deleted file mode 100644 (file)
index 15647f9..0000000
+++ /dev/null
@@ -1,322 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import datetime
-
-import mock
-
-from neutron import context
-from neutron.db.quota import api as quota_api
-from neutron.tests.unit import testlib_api
-
-
-class TestQuotaDbApi(testlib_api.SqlTestCaseLight):
-
-    def _set_context(self):
-        self.tenant_id = 'Higuain'
-        self.context = context.Context('Gonzalo', self.tenant_id,
-                                       is_admin=False, is_advsvc=False)
-
-    def _create_reservation(self, resource_deltas,
-                            tenant_id=None, expiration=None):
-        tenant_id = tenant_id or self.tenant_id
-        return quota_api.create_reservation(
-            self.context, tenant_id, resource_deltas, expiration)
-
-    def _create_quota_usage(self, resource, used, tenant_id=None):
-        tenant_id = tenant_id or self.tenant_id
-        return quota_api.set_quota_usage(
-            self.context, resource, tenant_id, in_use=used)
-
-    def _verify_quota_usage(self, usage_info,
-                            expected_resource=None,
-                            expected_used=None,
-                            expected_dirty=None):
-        self.assertEqual(self.tenant_id, usage_info.tenant_id)
-        if expected_resource:
-            self.assertEqual(expected_resource, usage_info.resource)
-        if expected_dirty is not None:
-                self.assertEqual(expected_dirty, usage_info.dirty)
-        if expected_used is not None:
-            self.assertEqual(expected_used, usage_info.used)
-
-    def setUp(self):
-        super(TestQuotaDbApi, self).setUp()
-        self._set_context()
-
-    def test_create_quota_usage(self):
-        usage_info = self._create_quota_usage('goals', 26)
-        self._verify_quota_usage(usage_info,
-                                 expected_resource='goals',
-                                 expected_used=26)
-
-    def test_update_quota_usage(self):
-        self._create_quota_usage('goals', 26)
-        # Higuain scores a double
-        usage_info_1 = quota_api.set_quota_usage(
-            self.context, 'goals', self.tenant_id,
-            in_use=28)
-        self._verify_quota_usage(usage_info_1,
-                                 expected_used=28)
-        usage_info_2 = quota_api.set_quota_usage(
-            self.context, 'goals', self.tenant_id,
-            in_use=24)
-        self._verify_quota_usage(usage_info_2,
-                                 expected_used=24)
-
-    def test_update_quota_usage_with_deltas(self):
-        self._create_quota_usage('goals', 26)
-        # Higuain scores a double
-        usage_info_1 = quota_api.set_quota_usage(
-            self.context, 'goals', self.tenant_id,
-            in_use=2, delta=True)
-        self._verify_quota_usage(usage_info_1,
-                                 expected_used=28)
-
-    def test_set_quota_usage_dirty(self):
-        self._create_quota_usage('goals', 26)
-        # Higuain needs a shower after the match
-        self.assertEqual(1, quota_api.set_quota_usage_dirty(
-            self.context, 'goals', self.tenant_id))
-        usage_info = quota_api.get_quota_usage_by_resource_and_tenant(
-            self.context, 'goals', self.tenant_id)
-        self._verify_quota_usage(usage_info,
-                                 expected_dirty=True)
-        # Higuain is clean now
-        self.assertEqual(1, quota_api.set_quota_usage_dirty(
-            self.context, 'goals', self.tenant_id, dirty=False))
-        usage_info = quota_api.get_quota_usage_by_resource_and_tenant(
-            self.context, 'goals', self.tenant_id)
-        self._verify_quota_usage(usage_info,
-                                 expected_dirty=False)
-
-    def test_set_dirty_non_existing_quota_usage(self):
-        self.assertEqual(0, quota_api.set_quota_usage_dirty(
-            self.context, 'meh', self.tenant_id))
-
-    def test_set_resources_quota_usage_dirty(self):
-        self._create_quota_usage('goals', 26)
-        self._create_quota_usage('assists', 11)
-        self._create_quota_usage('bookings', 3)
-        self.assertEqual(2, quota_api.set_resources_quota_usage_dirty(
-            self.context, ['goals', 'bookings'], self.tenant_id))
-        usage_info_goals = quota_api.get_quota_usage_by_resource_and_tenant(
-            self.context, 'goals', self.tenant_id)
-        usage_info_assists = quota_api.get_quota_usage_by_resource_and_tenant(
-            self.context, 'assists', self.tenant_id)
-        usage_info_bookings = quota_api.get_quota_usage_by_resource_and_tenant(
-            self.context, 'bookings', self.tenant_id)
-        self._verify_quota_usage(usage_info_goals, expected_dirty=True)
-        self._verify_quota_usage(usage_info_assists, expected_dirty=False)
-        self._verify_quota_usage(usage_info_bookings, expected_dirty=True)
-
-    def test_set_resources_quota_usage_dirty_with_empty_list(self):
-        self._create_quota_usage('goals', 26)
-        self._create_quota_usage('assists', 11)
-        self._create_quota_usage('bookings', 3)
-        # Expect all the resources for the tenant to be set dirty
-        self.assertEqual(3, quota_api.set_resources_quota_usage_dirty(
-            self.context, [], self.tenant_id))
-        usage_info_goals = quota_api.get_quota_usage_by_resource_and_tenant(
-            self.context, 'goals', self.tenant_id)
-        usage_info_assists = quota_api.get_quota_usage_by_resource_and_tenant(
-            self.context, 'assists', self.tenant_id)
-        usage_info_bookings = quota_api.get_quota_usage_by_resource_and_tenant(
-            self.context, 'bookings', self.tenant_id)
-        self._verify_quota_usage(usage_info_goals, expected_dirty=True)
-        self._verify_quota_usage(usage_info_assists, expected_dirty=True)
-        self._verify_quota_usage(usage_info_bookings, expected_dirty=True)
-
-        # Higuain is clean now
-        self.assertEqual(1, quota_api.set_quota_usage_dirty(
-            self.context, 'goals', self.tenant_id, dirty=False))
-        usage_info = quota_api.get_quota_usage_by_resource_and_tenant(
-            self.context, 'goals', self.tenant_id)
-        self._verify_quota_usage(usage_info,
-                                 expected_dirty=False)
-
-    def _test_set_all_quota_usage_dirty(self, expected):
-        self._create_quota_usage('goals', 26)
-        self._create_quota_usage('goals', 12, tenant_id='Callejon')
-        self.assertEqual(expected, quota_api.set_all_quota_usage_dirty(
-            self.context, 'goals'))
-
-    def test_set_all_quota_usage_dirty(self):
-        # All goal scorers need a shower after the match, but since this is not
-        # admin context we can clean only one
-        self._test_set_all_quota_usage_dirty(expected=1)
-
-    def test_get_quota_usage_by_tenant(self):
-        self._create_quota_usage('goals', 26)
-        self._create_quota_usage('assists', 11)
-        # Create a resource for a different tenant
-        self._create_quota_usage('mehs', 99, tenant_id='buffon')
-        usage_infos = quota_api.get_quota_usage_by_tenant_id(
-            self.context, self.tenant_id)
-
-        self.assertEqual(2, len(usage_infos))
-        resources = [info.resource for info in usage_infos]
-        self.assertIn('goals', resources)
-        self.assertIn('assists', resources)
-
-    def test_get_quota_usage_by_resource(self):
-        self._create_quota_usage('goals', 26)
-        self._create_quota_usage('assists', 11)
-        self._create_quota_usage('goals', 12, tenant_id='Callejon')
-        usage_infos = quota_api.get_quota_usage_by_resource(
-            self.context, 'goals')
-        # Only 1 result expected in tenant context
-        self.assertEqual(1, len(usage_infos))
-        self._verify_quota_usage(usage_infos[0],
-                                 expected_resource='goals',
-                                 expected_used=26)
-
-    def test_get_quota_usage_by_tenant_and_resource(self):
-        self._create_quota_usage('goals', 26)
-        usage_info = quota_api.get_quota_usage_by_resource_and_tenant(
-            self.context, 'goals', self.tenant_id)
-        self._verify_quota_usage(usage_info,
-                                 expected_resource='goals',
-                                 expected_used=26)
-
-    def test_get_non_existing_quota_usage_returns_none(self):
-        self.assertIsNone(quota_api.get_quota_usage_by_resource_and_tenant(
-            self.context, 'goals', self.tenant_id))
-
-    def _verify_reserved_resources(self, expected, actual):
-        for (resource, delta) in actual.items():
-            self.assertIn(resource, expected)
-            self.assertEqual(delta, expected[resource])
-            del expected[resource]
-        self.assertFalse(expected)
-
-    def test_create_reservation(self):
-        resources = {'goals': 2, 'assists': 1}
-        resv = self._create_reservation(resources)
-        self.assertEqual(self.tenant_id, resv.tenant_id)
-        self._verify_reserved_resources(resources, resv.deltas)
-
-    def test_create_reservation_with_expiration(self):
-        resources = {'goals': 2, 'assists': 1}
-        exp_date = datetime.datetime(2016, 3, 31, 14, 30)
-        resv = self._create_reservation(resources, expiration=exp_date)
-        self.assertEqual(self.tenant_id, resv.tenant_id)
-        self.assertEqual(exp_date, resv.expiration)
-        self._verify_reserved_resources(resources, resv.deltas)
-
-    def test_remove_non_existent_reservation(self):
-        self.assertIsNone(quota_api.remove_reservation(self.context, 'meh'))
-
-    def _get_reservations_for_resource_helper(self):
-        # create three reservation, 1 expired
-        resources_1 = {'goals': 2, 'assists': 1}
-        resources_2 = {'goals': 3, 'bookings': 1}
-        resources_3 = {'bookings': 2, 'assists': 2}
-        exp_date_1 = datetime.datetime(2016, 3, 31, 14, 30)
-        exp_date_2 = datetime.datetime(2015, 3, 31, 14, 30)
-        self._create_reservation(resources_1, expiration=exp_date_1)
-        self._create_reservation(resources_2, expiration=exp_date_1)
-        self._create_reservation(resources_3, expiration=exp_date_2)
-
-    def test_get_reservations_for_resources(self):
-        with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow:
-            self._get_reservations_for_resource_helper()
-            mock_utcnow.return_value = datetime.datetime(
-                2015, 5, 20, 0, 0)
-            deltas = quota_api.get_reservations_for_resources(
-                self.context, self.tenant_id, ['goals', 'assists', 'bookings'])
-            self.assertIn('goals', deltas)
-            self.assertEqual(5, deltas['goals'])
-            self.assertIn('assists', deltas)
-            self.assertEqual(1, deltas['assists'])
-            self.assertIn('bookings', deltas)
-            self.assertEqual(1, deltas['bookings'])
-            self.assertEqual(3, len(deltas))
-
-    def test_get_expired_reservations_for_resources(self):
-        with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow:
-            mock_utcnow.return_value = datetime.datetime(
-                2015, 5, 20, 0, 0)
-            self._get_reservations_for_resource_helper()
-            deltas = quota_api.get_reservations_for_resources(
-                self.context, self.tenant_id,
-                ['goals', 'assists', 'bookings'],
-                expired=True)
-            self.assertIn('assists', deltas)
-            self.assertEqual(2, deltas['assists'])
-            self.assertIn('bookings', deltas)
-            self.assertEqual(2, deltas['bookings'])
-            self.assertEqual(2, len(deltas))
-
-    def test_get_reservation_for_resources_with_empty_list(self):
-        self.assertIsNone(quota_api.get_reservations_for_resources(
-            self.context, self.tenant_id, []))
-
-    def test_remove_expired_reservations(self):
-        with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow:
-            mock_utcnow.return_value = datetime.datetime(
-                2015, 5, 20, 0, 0)
-            resources = {'goals': 2, 'assists': 1}
-            exp_date_1 = datetime.datetime(2016, 3, 31, 14, 30)
-            resv_1 = self._create_reservation(resources, expiration=exp_date_1)
-            exp_date_2 = datetime.datetime(2015, 3, 31, 14, 30)
-            resv_2 = self._create_reservation(resources, expiration=exp_date_2)
-            self.assertEqual(1, quota_api.remove_expired_reservations(
-                self.context, self.tenant_id))
-            self.assertIsNone(quota_api.get_reservation(
-                self.context, resv_2.reservation_id))
-            self.assertIsNotNone(quota_api.get_reservation(
-                self.context, resv_1.reservation_id))
-
-    def test_remove_expired_reservations_no_tenant(self):
-        with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow:
-            mock_utcnow.return_value = datetime.datetime(
-                2015, 5, 20, 0, 0)
-            resources = {'goals': 2, 'assists': 1}
-            exp_date_1 = datetime.datetime(2014, 3, 31, 14, 30)
-            resv_1 = self._create_reservation(resources, expiration=exp_date_1)
-            exp_date_2 = datetime.datetime(2015, 3, 31, 14, 30)
-            resv_2 = self._create_reservation(resources,
-                                              expiration=exp_date_2,
-                                              tenant_id='Callejon')
-            self.assertEqual(2, quota_api.remove_expired_reservations(
-                self.context))
-            self.assertIsNone(quota_api.get_reservation(
-                self.context, resv_2.reservation_id))
-            self.assertIsNone(quota_api.get_reservation(
-                self.context, resv_1.reservation_id))
-
-
-class TestQuotaDbApiAdminContext(TestQuotaDbApi):
-
-    def _set_context(self):
-        self.tenant_id = 'Higuain'
-        self.context = context.Context('Gonzalo', self.tenant_id,
-                                       is_admin=True, is_advsvc=True)
-
-    def test_get_quota_usage_by_resource(self):
-        self._create_quota_usage('goals', 26)
-        self._create_quota_usage('assists', 11)
-        self._create_quota_usage('goals', 12, tenant_id='Callejon')
-        usage_infos = quota_api.get_quota_usage_by_resource(
-            self.context, 'goals')
-        # 2 results expected in admin context
-        self.assertEqual(2, len(usage_infos))
-        for usage_info in usage_infos:
-            self.assertEqual('goals', usage_info.resource)
-
-    def test_set_all_quota_usage_dirty(self):
-        # All goal scorers need a shower after the match, and with admin
-        # context we should be able to clean all of them
-        self._test_set_all_quota_usage_dirty(expected=2)
diff --git a/neutron/tests/unit/db/quota/test_driver.py b/neutron/tests/unit/db/quota/test_driver.py
deleted file mode 100644 (file)
index c505331..0000000
+++ /dev/null
@@ -1,215 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from neutron.common import exceptions
-from neutron import context
-from neutron.db import db_base_plugin_v2 as base_plugin
-from neutron.db.quota import driver
-from neutron.tests.unit import testlib_api
-
-
-class FakePlugin(base_plugin.NeutronDbPluginV2, driver.DbQuotaDriver):
-    """A fake plugin class containing all DB methods."""
-
-
-class TestResource(object):
-    """Describe a test resource for quota checking."""
-
-    def __init__(self, name, default, fake_count=0):
-        self.name = name
-        self.quota = default
-        self.fake_count = fake_count
-
-    @property
-    def default(self):
-        return self.quota
-
-    def count(self, *args, **kwargs):
-        return self.fake_count
-
-
-PROJECT = 'prj_test'
-RESOURCE = 'res_test'
-ALT_RESOURCE = 'res_test_meh'
-
-
-class TestDbQuotaDriver(testlib_api.SqlTestCase):
-    def setUp(self):
-        super(TestDbQuotaDriver, self).setUp()
-        self.plugin = FakePlugin()
-        self.context = context.get_admin_context()
-
-    def test_create_quota_limit(self):
-        defaults = {RESOURCE: TestResource(RESOURCE, 4)}
-
-        self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
-        quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT)
-        self.assertEqual(2, quotas[RESOURCE])
-
-    def test_update_quota_limit(self):
-        defaults = {RESOURCE: TestResource(RESOURCE, 4)}
-
-        self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
-        self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 3)
-        quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT)
-        self.assertEqual(3, quotas[RESOURCE])
-
-    def test_delete_tenant_quota_restores_default_limit(self):
-        defaults = {RESOURCE: TestResource(RESOURCE, 4)}
-
-        self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
-        self.plugin.delete_tenant_quota(self.context, PROJECT)
-        quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT)
-        self.assertEqual(4, quotas[RESOURCE])
-
-    def test_get_tenant_quotas(self):
-        user_ctx = context.Context(user_id=PROJECT, tenant_id=PROJECT)
-        self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
-        quotas = self.plugin.get_tenant_quotas(user_ctx, {}, PROJECT)
-        self.assertEqual(2, quotas[RESOURCE])
-
-    def test_get_tenant_quotas_different_tenant(self):
-        user_ctx = context.Context(user_id=PROJECT,
-                                   tenant_id='another_project')
-        self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
-        # It is appropriate to use assertFalse here as the expected return
-        # value is an empty dict (the defaults passed in the statement below
-        # after the request context)
-        self.assertFalse(self.plugin.get_tenant_quotas(user_ctx, {}, PROJECT))
-
-    def test_get_all_quotas(self):
-        project_1 = 'prj_test_1'
-        project_2 = 'prj_test_2'
-        resource_1 = 'res_test_1'
-        resource_2 = 'res_test_2'
-
-        resources = {resource_1: TestResource(resource_1, 3),
-                     resource_2: TestResource(resource_2, 5)}
-
-        self.plugin.update_quota_limit(self.context, project_1, resource_1, 7)
-        self.plugin.update_quota_limit(self.context, project_2, resource_2, 9)
-        quotas = self.plugin.get_all_quotas(self.context, resources)
-
-        # Expect two tenants' quotas
-        self.assertEqual(2, len(quotas))
-        # But not quotas for the same tenant twice
-        self.assertNotEqual(quotas[0]['tenant_id'], quotas[1]['tenant_id'])
-
-        # Check the expected limits. The quotas can be in any order.
-        for quota in quotas:
-            self.assertEqual(3, len(quota))
-            project = quota['tenant_id']
-            self.assertIn(project, (project_1, project_2))
-            if project == project_1:
-                expected_limit_r1 = 7
-                expected_limit_r2 = 5
-            if project == project_2:
-                expected_limit_r1 = 3
-                expected_limit_r2 = 9
-            self.assertEqual(expected_limit_r1, quota[resource_1])
-            self.assertEqual(expected_limit_r2, quota[resource_2])
-
-    def test_limit_check(self):
-        resources = {RESOURCE: TestResource(RESOURCE, 2)}
-        values = {RESOURCE: 1}
-
-        self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
-        self.plugin.limit_check(self.context, PROJECT, resources, values)
-
-    def test_limit_check_over_quota(self):
-        resources = {RESOURCE: TestResource(RESOURCE, 2)}
-        values = {RESOURCE: 3}
-
-        self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
-
-        self.assertRaises(exceptions.OverQuota, self.plugin.limit_check,
-                          context.get_admin_context(), PROJECT, resources,
-                          values)
-
-    def test_limit_check_equals_to_quota(self):
-        resources = {RESOURCE: TestResource(RESOURCE, 2)}
-        values = {RESOURCE: 2}
-
-        self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
-        self.plugin.limit_check(self.context, PROJECT, resources, values)
-
-    def test_limit_check_value_lower_than_zero(self):
-        resources = {RESOURCE: TestResource(RESOURCE, 2)}
-        values = {RESOURCE: -1}
-
-        self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
-        self.assertRaises(exceptions.InvalidQuotaValue,
-                          self.plugin.limit_check, context.get_admin_context(),
-                          PROJECT, resources, values)
-
-    def _test_make_reservation_success(self, quota_driver,
-                                       resource_name, deltas):
-        resources = {resource_name: TestResource(resource_name, 2)}
-        self.plugin.update_quota_limit(self.context, PROJECT, resource_name, 2)
-        reservation = quota_driver.make_reservation(
-            self.context,
-            self.context.tenant_id,
-            resources,
-            deltas,
-            self.plugin)
-        self.assertIn(resource_name, reservation.deltas)
-        self.assertEqual(deltas[resource_name],
-                         reservation.deltas[resource_name])
-        self.assertEqual(self.context.tenant_id,
-                         reservation.tenant_id)
-
-    def test_make_reservation_single_resource(self):
-        quota_driver = driver.DbQuotaDriver()
-        self._test_make_reservation_success(
-            quota_driver, RESOURCE, {RESOURCE: 1})
-
-    def test_make_reservation_fill_quota(self):
-        quota_driver = driver.DbQuotaDriver()
-        self._test_make_reservation_success(
-            quota_driver, RESOURCE, {RESOURCE: 2})
-
-    def test_make_reservation_multiple_resources(self):
-        quota_driver = driver.DbQuotaDriver()
-        resources = {RESOURCE: TestResource(RESOURCE, 2),
-                     ALT_RESOURCE: TestResource(ALT_RESOURCE, 2)}
-        deltas = {RESOURCE: 1, ALT_RESOURCE: 2}
-        self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
-        self.plugin.update_quota_limit(self.context, PROJECT, ALT_RESOURCE, 2)
-        reservation = quota_driver.make_reservation(
-            self.context,
-            self.context.tenant_id,
-            resources,
-            deltas,
-            self.plugin)
-        self.assertIn(RESOURCE, reservation.deltas)
-        self.assertIn(ALT_RESOURCE, reservation.deltas)
-        self.assertEqual(1, reservation.deltas[RESOURCE])
-        self.assertEqual(2, reservation.deltas[ALT_RESOURCE])
-        self.assertEqual(self.context.tenant_id,
-                         reservation.tenant_id)
-
-    def test_make_reservation_over_quota_fails(self):
-        quota_driver = driver.DbQuotaDriver()
-        resources = {RESOURCE: TestResource(RESOURCE, 2,
-                                            fake_count=2)}
-        deltas = {RESOURCE: 1}
-        self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
-        self.assertRaises(exceptions.OverQuota,
-                          quota_driver.make_reservation,
-                          self.context,
-                          self.context.tenant_id,
-                          resources,
-                          deltas,
-                          self.plugin)
diff --git a/neutron/tests/unit/db/test_agents_db.py b/neutron/tests/unit/db/test_agents_db.py
deleted file mode 100644 (file)
index cabae43..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import datetime
-import mock
-
-from oslo_config import cfg
-from oslo_db import exception as exc
-from oslo_utils import timeutils
-import testscenarios
-
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.db import agents_db
-from neutron.db import db_base_plugin_v2 as base_plugin
-from neutron.tests.unit import testlib_api
-
-# the below code is required for the following reason
-# (as documented in testscenarios)
-"""Multiply tests depending on their 'scenarios' attribute.
-
-    This can be assigned to 'load_tests' in any test module to make this
-    automatically work across tests in the module.
-"""
-load_tests = testscenarios.load_tests_apply_scenarios
-
-
-class FakePlugin(base_plugin.NeutronDbPluginV2, agents_db.AgentDbMixin):
-    """A fake plugin class containing all DB methods."""
-
-
-class TestAgentsDbBase(testlib_api.SqlTestCase):
-    def setUp(self):
-        super(TestAgentsDbBase, self).setUp()
-        self.context = context.get_admin_context()
-        self.plugin = FakePlugin()
-
-    def _get_agents(self, hosts, agent_type):
-        return [
-            agents_db.Agent(
-                binary='foo-agent',
-                host=host,
-                agent_type=agent_type,
-                topic='foo_topic',
-                configurations="",
-                created_at=timeutils.utcnow(),
-                started_at=timeutils.utcnow(),
-                heartbeat_timestamp=timeutils.utcnow())
-            for host in hosts
-        ]
-
-    def _save_agents(self, agents):
-        for agent in agents:
-            with self.context.session.begin(subtransactions=True):
-                self.context.session.add(agent)
-
-    def _create_and_save_agents(self, hosts, agent_type, down_agents_count=0):
-        agents = self._get_agents(hosts, agent_type)
-        # bring down the specified agents
-        for agent in agents[:down_agents_count]:
-            agent['heartbeat_timestamp'] -= datetime.timedelta(minutes=60)
-
-        self._save_agents(agents)
-        return agents
-
-
-class TestAgentsDbMixin(TestAgentsDbBase):
-    def setUp(self):
-        super(TestAgentsDbMixin, self).setUp()
-
-        self.agent_status = {
-            'agent_type': 'Open vSwitch agent',
-            'binary': 'neutron-openvswitch-agent',
-            'host': 'overcloud-notcompute',
-            'topic': 'N/A'
-        }
-
-    def test_get_enabled_agent_on_host_found(self):
-        agents = self._create_and_save_agents(['foo_host'],
-                                              constants.AGENT_TYPE_L3)
-        expected = self.plugin.get_enabled_agent_on_host(
-            self.context, constants.AGENT_TYPE_L3, 'foo_host')
-        self.assertEqual(expected, agents[0])
-
-    def test_get_enabled_agent_on_host_not_found(self):
-        with mock.patch.object(agents_db.LOG, 'debug') as mock_log:
-            agent = self.plugin.get_enabled_agent_on_host(
-                self.context, constants.AGENT_TYPE_L3, 'foo_agent')
-        self.assertIsNone(agent)
-        self.assertTrue(mock_log.called)
-
-    def _assert_ref_fields_are_equal(self, reference, result):
-        """Compare (key, value) pairs of a reference dict with the result
-
-           Note: the result MAY have additional keys
-        """
-
-        for field, value in reference.items():
-            self.assertEqual(value, result[field], field)
-
-    def test_create_or_update_agent_new_entry(self):
-        self.plugin.create_or_update_agent(self.context, self.agent_status)
-
-        agent = self.plugin.get_agents(self.context)[0]
-        self._assert_ref_fields_are_equal(self.agent_status, agent)
-
-    def test_create_or_update_agent_existing_entry(self):
-        self.plugin.create_or_update_agent(self.context, self.agent_status)
-        self.plugin.create_or_update_agent(self.context, self.agent_status)
-        self.plugin.create_or_update_agent(self.context, self.agent_status)
-
-        agents = self.plugin.get_agents(self.context)
-        self.assertEqual(len(agents), 1)
-
-        agent = agents[0]
-        self._assert_ref_fields_are_equal(self.agent_status, agent)
-
-    def test_create_or_update_agent_logs_heartbeat(self):
-        status = self.agent_status.copy()
-        status['configurations'] = {'log_agent_heartbeats': True}
-
-        with mock.patch.object(agents_db.LOG, 'info') as info:
-            self.plugin.create_or_update_agent(self.context, status)
-            self.assertTrue(info.called)
-            status['configurations'] = {'log_agent_heartbeats': False}
-            info.reset_mock()
-            self.plugin.create_or_update_agent(self.context, status)
-            self.assertFalse(info.called)
-
-    def test_create_or_update_agent_concurrent_insert(self):
-        # NOTE(rpodolyaka): emulate violation of the unique constraint caused
-        #                   by a concurrent insert. Ensure we make another
-        #                   attempt on fail
-        with mock.patch('sqlalchemy.orm.Session.add') as add_mock:
-            add_mock.side_effect = [
-                exc.DBDuplicateEntry(),
-                None
-            ]
-
-            self.plugin.create_or_update_agent(self.context, self.agent_status)
-
-            self.assertEqual(add_mock.call_count, 2,
-                             "Agent entry creation hasn't been retried")
-
-    def test_create_or_update_agent_disable_new_agents(self):
-        cfg.CONF.set_override('enable_new_agents', False)
-        self.plugin.create_or_update_agent(self.context, self.agent_status)
-        agent = self.plugin.get_agents(self.context)[0]
-        self.assertFalse(agent['admin_state_up'])
-
-    def test_agent_health_check(self):
-        agents = [{'agent_type': "DHCP Agent",
-                   'heartbeat_timestamp': '2015-05-06 22:40:40.432295',
-                   'host': 'some.node',
-                   'alive': True}]
-        with mock.patch.object(self.plugin, 'get_agents',
-                               return_value=agents),\
-                mock.patch.object(agents_db.LOG, 'warn') as warn,\
-                mock.patch.object(agents_db.LOG, 'debug') as debug:
-            self.plugin.agent_health_check()
-            self.assertTrue(debug.called)
-            self.assertFalse(warn.called)
-            agents[0]['alive'] = False
-            self.plugin.agent_health_check()
-            warn.assert_called_once_with(
-                mock.ANY,
-                {'count': 1, 'total': 1,
-                 'data': "                Type       Last heartbeat host\n"
-                 "          DHCP Agent 2015-05-06 22:40:40.432295 some.node"}
-            )
-
-
-class TestAgentsDbGetAgents(TestAgentsDbBase):
-    scenarios = [
-        ('Get all agents', dict(agents=5, down_agents=2,
-                                agents_alive=None,
-                                expected_agents=5)),
-
-        ('Get alive agents (True)', dict(agents=5, down_agents=2,
-                                         agents_alive='True',
-                                         expected_agents=3)),
-
-        ('Get down agents (False)', dict(agents=5, down_agents=2,
-                                         agents_alive='False',
-                                         expected_agents=2)),
-
-        ('Get alive agents (true)', dict(agents=5, down_agents=2,
-                                         agents_alive='true',
-                                         expected_agents=3)),
-
-        ('Get down agents (false)', dict(agents=5, down_agents=2,
-                                         agents_alive='false',
-                                         expected_agents=2)),
-
-        ('Get agents invalid alive filter', dict(agents=5, down_agents=2,
-                                                 agents_alive='invalid',
-                                                 expected_agents=None)),
-    ]
-
-    def setUp(self):
-        # ensure that the first scenario will execute with nosetests
-        if not hasattr(self, 'agents'):
-            self.__dict__.update(self.scenarios[0][1])
-        super(TestAgentsDbGetAgents, self).setUp()
-
-    def test_get_agents(self):
-        hosts = ['host-%s' % i for i in range(self.agents)]
-        self._create_and_save_agents(hosts, constants.AGENT_TYPE_L3,
-                                     down_agents_count=self.down_agents)
-        if self.agents_alive == 'invalid':
-            self.assertRaises(n_exc.InvalidInput, self.plugin.get_agents,
-                              self.context,
-                              filters={'alive': [self.agents_alive]})
-        else:
-            returned_agents = self.plugin.get_agents(
-                self.context, filters={'alive': [self.agents_alive]}
-                if self.agents_alive else None)
-            self.assertEqual(self.expected_agents, len(returned_agents))
-            if self.agents_alive:
-                alive = (self.agents_alive == 'True' or
-                         self.agents_alive == 'true')
-                for agent in returned_agents:
-                    self.assertEqual(alive, agent['alive'])
diff --git a/neutron/tests/unit/db/test_agentschedulers_db.py b/neutron/tests/unit/db/test_agentschedulers_db.py
deleted file mode 100644 (file)
index c878422..0000000
+++ /dev/null
@@ -1,1645 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import datetime
-
-import mock
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-import oslo_messaging
-from oslo_utils import uuidutils
-from webob import exc
-
-from neutron.api import extensions
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.api.rpc.handlers import dhcp_rpc
-from neutron.api.rpc.handlers import l3_rpc
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron import context
-from neutron.db import agents_db
-from neutron.db import agentschedulers_db
-from neutron.db import l3_agentschedulers_db
-from neutron.extensions import agent
-from neutron.extensions import dhcpagentscheduler
-from neutron.extensions import l3agentscheduler
-from neutron import manager
-from neutron.plugins.common import constants as service_constants
-from neutron.tests.common import helpers
-from neutron.tests import fake_notifier
-from neutron.tests import tools
-from neutron.tests.unit.api import test_extensions
-from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
-from neutron.tests.unit.extensions import test_agent
-from neutron.tests.unit.extensions import test_l3
-from neutron.tests.unit import testlib_api
-from neutron import wsgi
-
-L3_HOSTA = 'hosta'
-DHCP_HOSTA = 'hosta'
-L3_HOSTB = 'hostb'
-DHCP_HOSTC = 'hostc'
-
-DEVICE_OWNER_COMPUTE = ''.join([constants.DEVICE_OWNER_COMPUTE_PREFIX,
-                                'test:',
-                                DHCP_HOSTA])
-
-
-class AgentSchedulerTestMixIn(object):
-
-    def _request_list(self, path, admin_context=True,
-                      expected_code=exc.HTTPOk.code):
-        req = self._path_req(path, admin_context=admin_context)
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, expected_code)
-        return self.deserialize(self.fmt, res)
-
-    def _path_req(self, path, method='GET', data=None,
-                  query_string=None,
-                  admin_context=True):
-        content_type = 'application/%s' % self.fmt
-        body = None
-        if data is not None:  # empty dict is valid
-            body = wsgi.Serializer().serialize(data, content_type)
-        if admin_context:
-            return testlib_api.create_request(
-                path, body, content_type, method, query_string=query_string)
-        else:
-            return testlib_api.create_request(
-                path, body, content_type, method, query_string=query_string,
-                context=context.Context('', 'tenant_id'))
-
-    def _path_create_request(self, path, data, admin_context=True):
-        return self._path_req(path, method='POST', data=data,
-                              admin_context=admin_context)
-
-    def _path_show_request(self, path, admin_context=True):
-        return self._path_req(path, admin_context=admin_context)
-
-    def _path_delete_request(self, path, admin_context=True):
-        return self._path_req(path, method='DELETE',
-                              admin_context=admin_context)
-
-    def _path_update_request(self, path, data, admin_context=True):
-        return self._path_req(path, method='PUT', data=data,
-                              admin_context=admin_context)
-
-    def _list_routers_hosted_by_l3_agent(self, agent_id,
-                                         expected_code=exc.HTTPOk.code,
-                                         admin_context=True):
-        path = "/agents/%s/%s.%s" % (agent_id,
-                                     l3agentscheduler.L3_ROUTERS,
-                                     self.fmt)
-        return self._request_list(path, expected_code=expected_code,
-                                  admin_context=admin_context)
-
-    def _list_networks_hosted_by_dhcp_agent(self, agent_id,
-                                            expected_code=exc.HTTPOk.code,
-                                            admin_context=True):
-        path = "/agents/%s/%s.%s" % (agent_id,
-                                     dhcpagentscheduler.DHCP_NETS,
-                                     self.fmt)
-        return self._request_list(path, expected_code=expected_code,
-                                  admin_context=admin_context)
-
-    def _list_l3_agents_hosting_router(self, router_id,
-                                       expected_code=exc.HTTPOk.code,
-                                       admin_context=True):
-        path = "/routers/%s/%s.%s" % (router_id,
-                                      l3agentscheduler.L3_AGENTS,
-                                      self.fmt)
-        return self._request_list(path, expected_code=expected_code,
-                                  admin_context=admin_context)
-
-    def _list_dhcp_agents_hosting_network(self, network_id,
-                                          expected_code=exc.HTTPOk.code,
-                                          admin_context=True):
-        path = "/networks/%s/%s.%s" % (network_id,
-                                       dhcpagentscheduler.DHCP_AGENTS,
-                                       self.fmt)
-        return self._request_list(path, expected_code=expected_code,
-                                  admin_context=admin_context)
-
-    def _add_router_to_l3_agent(self, id, router_id,
-                                expected_code=exc.HTTPCreated.code,
-                                admin_context=True):
-        path = "/agents/%s/%s.%s" % (id,
-                                     l3agentscheduler.L3_ROUTERS,
-                                     self.fmt)
-        req = self._path_create_request(path,
-                                        {'router_id': router_id},
-                                        admin_context=admin_context)
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, expected_code)
-
-    def _add_network_to_dhcp_agent(self, id, network_id,
-                                   expected_code=exc.HTTPCreated.code,
-                                   admin_context=True):
-        path = "/agents/%s/%s.%s" % (id,
-                                     dhcpagentscheduler.DHCP_NETS,
-                                     self.fmt)
-        req = self._path_create_request(path,
-                                        {'network_id': network_id},
-                                        admin_context=admin_context)
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, expected_code)
-
-    def _remove_network_from_dhcp_agent(self, id, network_id,
-                                        expected_code=exc.HTTPNoContent.code,
-                                        admin_context=True):
-        path = "/agents/%s/%s/%s.%s" % (id,
-                                        dhcpagentscheduler.DHCP_NETS,
-                                        network_id,
-                                        self.fmt)
-        req = self._path_delete_request(path,
-                                        admin_context=admin_context)
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, expected_code)
-
-    def _remove_router_from_l3_agent(self, id, router_id,
-                                     expected_code=exc.HTTPNoContent.code,
-                                     admin_context=True):
-        path = "/agents/%s/%s/%s.%s" % (id,
-                                        l3agentscheduler.L3_ROUTERS,
-                                        router_id,
-                                        self.fmt)
-        req = self._path_delete_request(path, admin_context=admin_context)
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, expected_code)
-
-    def _assert_notify(self, notifications, expected_event_type):
-        event_types = [event['event_type'] for event in notifications]
-        self.assertIn(expected_event_type, event_types)
-
-    def test_agent_registration_bad_timestamp(self):
-        callback = agents_db.AgentExtRpcCallback()
-        delta_time = datetime.datetime.now() - datetime.timedelta(days=1)
-        str_time = delta_time.strftime('%Y-%m-%dT%H:%M:%S.%f')
-        callback.report_state(
-            self.adminContext,
-            agent_state={
-                'agent_state': helpers._get_dhcp_agent_dict(DHCP_HOSTA)},
-            time=str_time)
-
-    def test_agent_registration_invalid_timestamp_allowed(self):
-        callback = agents_db.AgentExtRpcCallback()
-        utc_time = datetime.datetime.utcnow()
-        delta_time = utc_time - datetime.timedelta(seconds=10)
-        str_time = delta_time.strftime('%Y-%m-%dT%H:%M:%S.%f')
-        callback.report_state(
-            self.adminContext,
-            agent_state={
-                'agent_state': helpers._get_dhcp_agent_dict(DHCP_HOSTA)},
-            time=str_time)
-
-    def _disable_agent(self, agent_id, admin_state_up=False):
-        new_agent = {}
-        new_agent['agent'] = {}
-        new_agent['agent']['admin_state_up'] = admin_state_up
-        self._update('agents', agent_id, new_agent)
-
-    def _get_agent_id(self, agent_type, host):
-        agents = self._list_agents()
-        for agent_data in agents['agents']:
-            if (agent_data['agent_type'] == agent_type and
-                agent_data['host'] == host):
-                return agent_data['id']
-
-
-class OvsAgentSchedulerTestCaseBase(test_l3.L3NatTestCaseMixin,
-                                    test_agent.AgentDBTestMixIn,
-                                    AgentSchedulerTestMixIn,
-                                    test_plugin.NeutronDbPluginV2TestCase):
-    fmt = 'json'
-    plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-    l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
-                 'TestL3NatAgentSchedulingServicePlugin')
-
-    def setUp(self):
-        self.useFixture(tools.AttributeMapMemento())
-        if self.l3_plugin:
-            service_plugins = {'l3_plugin_name': self.l3_plugin}
-        else:
-            service_plugins = None
-        mock.patch('neutron.common.rpc.get_client').start()
-        super(OvsAgentSchedulerTestCaseBase, self).setUp(
-            self.plugin_str, service_plugins=service_plugins)
-        ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
-        self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
-        self.adminContext = context.get_admin_context()
-        # Add the resources to the global attribute map
-        # This is done here as the setup process won't
-        # initialize the main API router which extends
-        # the global attribute map
-        attributes.RESOURCE_ATTRIBUTE_MAP.update(
-            agent.RESOURCE_ATTRIBUTE_MAP)
-        self.l3plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        self.l3_notify_p = mock.patch(
-            'neutron.extensions.l3agentscheduler.notify')
-        self.patched_l3_notify = self.l3_notify_p.start()
-        self.l3_periodic_p = mock.patch('neutron.db.l3_agentschedulers_db.'
-                                        'L3AgentSchedulerDbMixin.'
-                                        'start_periodic_l3_agent_status_check')
-        self.patched_l3_periodic = self.l3_periodic_p.start()
-        self.dhcp_notify_p = mock.patch(
-            'neutron.extensions.dhcpagentscheduler.notify')
-        self.patched_dhcp_notify = self.dhcp_notify_p.start()
-
-
-class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
-
-    def test_report_states(self):
-        self._register_agent_states()
-        agents = self._list_agents()
-        self.assertEqual(4, len(agents['agents']))
-
-    def test_network_scheduling_on_network_creation(self):
-        self._register_agent_states()
-        with self.network() as net:
-            dhcp_agents = self._list_dhcp_agents_hosting_network(
-                net['network']['id'])
-        self.assertEqual(0, len(dhcp_agents['agents']))
-
-    def test_network_auto_schedule_with_disabled(self):
-        cfg.CONF.set_override('allow_overlapping_ips', True)
-        with self.subnet(), self.subnet():
-            dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTA)
-            hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTC)
-            self._disable_agent(hosta_id)
-            dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
-            # second agent will host all the networks since first is disabled.
-            dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC)
-            networks = self._list_networks_hosted_by_dhcp_agent(hostc_id)
-            num_hostc_nets = len(networks['networks'])
-            networks = self._list_networks_hosted_by_dhcp_agent(hosta_id)
-            num_hosta_nets = len(networks['networks'])
-        self.assertEqual(0, num_hosta_nets)
-        self.assertEqual(2, num_hostc_nets)
-
-    def test_network_auto_schedule_with_no_dhcp(self):
-        cfg.CONF.set_override('allow_overlapping_ips', True)
-        with self.subnet(enable_dhcp=False), self.subnet(enable_dhcp=False):
-            dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTA)
-            hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTC)
-            self._disable_agent(hosta_id)
-            dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
-            dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC)
-            networks = self._list_networks_hosted_by_dhcp_agent(hostc_id)
-            num_hostc_nets = len(networks['networks'])
-            networks = self._list_networks_hosted_by_dhcp_agent(hosta_id)
-            num_hosta_nets = len(networks['networks'])
-        self.assertEqual(0, num_hosta_nets)
-        self.assertEqual(0, num_hostc_nets)
-
-    def test_network_auto_schedule_with_multiple_agents(self):
-        cfg.CONF.set_override('dhcp_agents_per_network', 2)
-        cfg.CONF.set_override('allow_overlapping_ips', True)
-        with self.subnet(), self.subnet():
-            dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTA)
-            hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTC)
-            dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
-            dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC)
-            networks = self._list_networks_hosted_by_dhcp_agent(hostc_id)
-            num_hostc_nets = len(networks['networks'])
-            networks = self._list_networks_hosted_by_dhcp_agent(hosta_id)
-            num_hosta_nets = len(networks['networks'])
-        self.assertEqual(2, num_hosta_nets)
-        self.assertEqual(2, num_hostc_nets)
-
-    def test_network_auto_schedule_restart_dhcp_agent(self):
-        cfg.CONF.set_override('dhcp_agents_per_network', 2)
-        with self.subnet() as sub1:
-            dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
-            self._register_agent_states()
-            dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
-            dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
-            dhcp_agents = self._list_dhcp_agents_hosting_network(
-                sub1['subnet']['network_id'])
-        self.assertEqual(1, len(dhcp_agents['agents']))
-
-    def test_network_auto_schedule_with_hosted(self):
-        # one agent hosts all the networks, other hosts none
-        cfg.CONF.set_override('allow_overlapping_ips', True)
-        with self.subnet() as sub1, self.subnet():
-            dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
-            self._register_agent_states()
-            dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
-            # second agent will not host the network since first has got it.
-            dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC)
-            dhcp_agents = self._list_dhcp_agents_hosting_network(
-                sub1['subnet']['network_id'])
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTA)
-            hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTC)
-            hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id)
-            num_hosta_nets = len(hosta_nets['networks'])
-            hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id)
-            num_hostc_nets = len(hostc_nets['networks'])
-
-        self.assertEqual(2, num_hosta_nets)
-        self.assertEqual(0, num_hostc_nets)
-        self.assertEqual(1, len(dhcp_agents['agents']))
-        self.assertEqual(DHCP_HOSTA, dhcp_agents['agents'][0]['host'])
-
-    def test_network_auto_schedule_with_hosted_2(self):
-        # one agent hosts one network
-        dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
-        cfg.CONF.set_override('allow_overlapping_ips', True)
-        with self.subnet() as sub1:
-            helpers.register_dhcp_agent(DHCP_HOSTA)
-            dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTA)
-            self._disable_agent(hosta_id, admin_state_up=False)
-            with self.subnet() as sub2:
-                helpers.register_dhcp_agent(DHCP_HOSTC)
-                dhcp_rpc_cb.get_active_networks(self.adminContext,
-                                                host=DHCP_HOSTC)
-                dhcp_agents_1 = self._list_dhcp_agents_hosting_network(
-                    sub1['subnet']['network_id'])
-                dhcp_agents_2 = self._list_dhcp_agents_hosting_network(
-                    sub2['subnet']['network_id'])
-                hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id)
-                num_hosta_nets = len(hosta_nets['networks'])
-                hostc_id = self._get_agent_id(
-                    constants.AGENT_TYPE_DHCP,
-                    DHCP_HOSTC)
-                hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id)
-                num_hostc_nets = len(hostc_nets['networks'])
-
-        self.assertEqual(1, num_hosta_nets)
-        self.assertEqual(1, num_hostc_nets)
-        self.assertEqual(1, len(dhcp_agents_1['agents']))
-        self.assertEqual(1, len(dhcp_agents_2['agents']))
-        self.assertEqual(DHCP_HOSTA, dhcp_agents_1['agents'][0]['host'])
-        self.assertEqual(DHCP_HOSTC, dhcp_agents_2['agents'][0]['host'])
-
-    def test_network_scheduling_on_port_creation(self):
-        with self.subnet() as subnet:
-            dhcp_agents = self._list_dhcp_agents_hosting_network(
-                subnet['subnet']['network_id'])
-            result0 = len(dhcp_agents['agents'])
-            self._register_agent_states()
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE) as port:
-                dhcp_agents = self._list_dhcp_agents_hosting_network(
-                    port['port']['network_id'])
-                result1 = len(dhcp_agents['agents'])
-        self.assertEqual(0, result0)
-        self.assertEqual(1, result1)
-
-    def test_network_ha_scheduling_on_port_creation(self):
-        cfg.CONF.set_override('dhcp_agents_per_network', 2)
-        with self.subnet() as subnet:
-            dhcp_agents = self._list_dhcp_agents_hosting_network(
-                subnet['subnet']['network_id'])
-            result0 = len(dhcp_agents['agents'])
-            self._register_agent_states()
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE) as port:
-                dhcp_agents = self._list_dhcp_agents_hosting_network(
-                    port['port']['network_id'])
-                result1 = len(dhcp_agents['agents'])
-        self.assertEqual(0, result0)
-        self.assertEqual(2, result1)
-
-    def test_network_ha_scheduling_on_port_creation_with_new_agent(self):
-        cfg.CONF.set_override('dhcp_agents_per_network', 3)
-        with self.subnet() as subnet:
-            dhcp_agents = self._list_dhcp_agents_hosting_network(
-                subnet['subnet']['network_id'])
-            result0 = len(dhcp_agents['agents'])
-            self._register_agent_states()
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE) as port:
-                dhcp_agents = self._list_dhcp_agents_hosting_network(
-                    port['port']['network_id'])
-                result1 = len(dhcp_agents['agents'])
-            helpers.register_dhcp_agent('host1')
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE) as port:
-                dhcp_agents = self._list_dhcp_agents_hosting_network(
-                    port['port']['network_id'])
-                result2 = len(dhcp_agents['agents'])
-        self.assertEqual(0, result0)
-        self.assertEqual(2, result1)
-        self.assertEqual(3, result2)
-
-    def test_network_scheduler_with_disabled_agent(self):
-        helpers.register_dhcp_agent(DHCP_HOSTA)
-        with self.port() as port1:
-            dhcp_agents = self._list_dhcp_agents_hosting_network(
-                port1['port']['network_id'])
-        self._delete('ports', port1['port']['id'])
-        self._delete('networks', port1['port']['network_id'])
-        self.assertEqual(1, len(dhcp_agents['agents']))
-        agents = self._list_agents()
-        self._disable_agent(agents['agents'][0]['id'])
-        with self.port() as port2:
-            dhcp_agents = self._list_dhcp_agents_hosting_network(
-                port2['port']['network_id'])
-        self._delete('ports', port2['port']['id'])
-        self.assertEqual(0, len(dhcp_agents['agents']))
-
-    def test_is_eligible_agent(self):
-        agent_startup = ('neutron.db.agentschedulers_db.'
-                         'DhcpAgentSchedulerDbMixin.agent_starting_up')
-        is_eligible_agent = ('neutron.db.agentschedulers_db.'
-                             'AgentSchedulerDbMixin.is_eligible_agent')
-        dhcp_mixin = agentschedulers_db.DhcpAgentSchedulerDbMixin()
-        with mock.patch(agent_startup) as startup,\
-                mock.patch(is_eligible_agent) as elig:
-            tests = [(True, True),
-                     (True, False),
-                     (False, True),
-                     (False, False)]
-            for rv1, rv2 in tests:
-                startup.return_value = rv1
-                elig.return_value = rv2
-                self.assertEqual(rv1 or rv2,
-                                 dhcp_mixin.is_eligible_agent(None,
-                                                              None, None))
-
-    def test_network_scheduler_with_down_agent(self):
-        helpers.register_dhcp_agent(DHCP_HOSTA)
-        eligible_agent_str = ('neutron.db.agentschedulers_db.'
-                              'DhcpAgentSchedulerDbMixin.is_eligible_agent')
-        with mock.patch(eligible_agent_str) as eligible_agent:
-            eligible_agent.return_value = True
-            with self.port() as port:
-                dhcp_agents = self._list_dhcp_agents_hosting_network(
-                    port['port']['network_id'])
-            self._delete('ports', port['port']['id'])
-            self._delete('networks', port['port']['network_id'])
-            self.assertEqual(1, len(dhcp_agents['agents']))
-
-        with mock.patch(eligible_agent_str) as eligible_agent:
-            eligible_agent.return_value = False
-            with self.port() as port:
-                dhcp_agents = self._list_dhcp_agents_hosting_network(
-                    port['port']['network_id'])
-            self._delete('ports', port['port']['id'])
-            self.assertEqual(0, len(dhcp_agents['agents']))
-
-    def test_network_scheduler_with_hosted_network(self):
-        plugin = manager.NeutronManager.get_plugin()
-        helpers.register_dhcp_agent(DHCP_HOSTA)
-        with self.port() as port1:
-            dhcp_agents = self._list_dhcp_agents_hosting_network(
-                port1['port']['network_id'])
-            self.assertEqual(1, len(dhcp_agents['agents']))
-        with mock.patch.object(plugin,
-                               'get_dhcp_agents_hosting_networks',
-                               autospec=True) as mock_hosting_agents:
-
-            mock_hosting_agents.return_value = plugin.get_agents_db(
-                self.adminContext)
-            with self.network('test') as net1:
-                pass
-            with self.subnet(network=net1,
-                             cidr='10.0.1.0/24') as subnet1:
-                pass
-            with self.port(subnet=subnet1) as port2:
-                pass
-        dhcp_agents = self._list_dhcp_agents_hosting_network(
-            port2['port']['network_id'])
-        self.assertEqual(0, len(dhcp_agents['agents']))
-
-    def test_network_policy(self):
-        with self.network() as net1:
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTA)
-            self._list_networks_hosted_by_dhcp_agent(
-                hosta_id, expected_code=exc.HTTPForbidden.code,
-                admin_context=False)
-            self._add_network_to_dhcp_agent(
-                hosta_id, net1['network']['id'],
-                expected_code=exc.HTTPForbidden.code,
-                admin_context=False)
-            self._add_network_to_dhcp_agent(hosta_id,
-                                            net1['network']['id'])
-            self._remove_network_from_dhcp_agent(
-                hosta_id, net1['network']['id'],
-                expected_code=exc.HTTPForbidden.code,
-                admin_context=False)
-            self._list_dhcp_agents_hosting_network(
-                net1['network']['id'],
-                expected_code=exc.HTTPForbidden.code,
-                admin_context=False)
-
-    def _test_network_add_to_dhcp_agent(self, admin_state_up=True):
-        with self.network() as net1:
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTA)
-            if not admin_state_up:
-                self._set_agent_admin_state_up(DHCP_HOSTA, False)
-            num_before_add = len(
-                self._list_networks_hosted_by_dhcp_agent(
-                    hosta_id)['networks'])
-            self._add_network_to_dhcp_agent(hosta_id,
-                                            net1['network']['id'])
-            num_after_add = len(
-                self._list_networks_hosted_by_dhcp_agent(
-                    hosta_id)['networks'])
-        self.assertEqual(0, num_before_add)
-        self.assertEqual(1, num_after_add)
-
-    def test_network_add_to_dhcp_agent(self):
-        self._test_network_add_to_dhcp_agent()
-
-    def test_network_add_to_dhcp_agent_with_admin_state_down(self):
-        cfg.CONF.set_override(
-            'enable_services_on_agents_with_admin_state_down', True)
-        self._test_network_add_to_dhcp_agent(admin_state_up=False)
-
-    def test_network_remove_from_dhcp_agent(self):
-        agent = helpers.register_dhcp_agent(DHCP_HOSTA)
-        hosta_id = agent.id
-        with self.port() as port1:
-            num_before_remove = len(
-                self._list_networks_hosted_by_dhcp_agent(
-                    hosta_id)['networks'])
-            self._remove_network_from_dhcp_agent(hosta_id,
-                                                 port1['port']['network_id'])
-            num_after_remove = len(
-                self._list_networks_hosted_by_dhcp_agent(
-                    hosta_id)['networks'])
-        self.assertEqual(1, num_before_remove)
-        self.assertEqual(0, num_after_remove)
-
-    def test_list_active_networks_on_not_registered_yet_dhcp_agent(self):
-        plugin = manager.NeutronManager.get_plugin()
-        nets = plugin.list_active_networks_on_active_dhcp_agent(
-            self.adminContext, host=DHCP_HOSTA)
-        self.assertEqual([], nets)
-
-    def test_reserved_port_after_network_remove_from_dhcp_agent(self):
-        helpers.register_dhcp_agent(DHCP_HOSTA)
-        hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                      DHCP_HOSTA)
-        with self.port(device_owner=constants.DEVICE_OWNER_DHCP,
-                       host=DHCP_HOSTA) as port1:
-            self._remove_network_from_dhcp_agent(hosta_id,
-                                                 port1['port']['network_id'])
-            port_res = self._list_ports(
-                'json',
-                200,
-                network_id=port1['port']['network_id'])
-            port_list = self.deserialize('json', port_res)
-            self.assertEqual(port_list['ports'][0]['device_id'],
-                             constants.DEVICE_ID_RESERVED_DHCP_PORT)
-
-    def _test_get_active_networks_from_admin_state_down_agent(self,
-                                                              keep_services):
-        if keep_services:
-            cfg.CONF.set_override(
-                'enable_services_on_agents_with_admin_state_down', True)
-        helpers.register_dhcp_agent(DHCP_HOSTA)
-        dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
-        with self.port():
-            nets = dhcp_rpc_cb.get_active_networks(self.adminContext,
-                                                   host=DHCP_HOSTA)
-            self.assertEqual(1, len(nets))
-            self._set_agent_admin_state_up(DHCP_HOSTA, False)
-            nets = dhcp_rpc_cb.get_active_networks(self.adminContext,
-                                                   host=DHCP_HOSTA)
-            if keep_services:
-                self.assertEqual(1, len(nets))
-            else:
-                self.assertEqual(0, len(nets))
-
-    def test_dhcp_agent_keep_services_off(self):
-        self._test_get_active_networks_from_admin_state_down_agent(False)
-
-    def test_dhcp_agent_keep_services_on(self):
-        self._test_get_active_networks_from_admin_state_down_agent(True)
-
-    def _take_down_agent_and_run_reschedule(self, host):
-        # take down the agent on host A and ensure B is alive
-        self.adminContext.session.begin(subtransactions=True)
-        query = self.adminContext.session.query(agents_db.Agent)
-        agt = query.filter_by(host=host).first()
-        agt.heartbeat_timestamp = (
-            agt.heartbeat_timestamp - datetime.timedelta(hours=1))
-        self.adminContext.session.commit()
-
-        plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-
-        plugin.reschedule_routers_from_down_agents()
-
-    def _set_agent_admin_state_up(self, host, state):
-        self.adminContext.session.begin(subtransactions=True)
-        query = self.adminContext.session.query(agents_db.Agent)
-        agt_db = query.filter_by(host=host).first()
-        agt_db.admin_state_up = state
-        self.adminContext.session.commit()
-
-    def test_router_rescheduler_catches_rpc_db_and_reschedule_exceptions(self):
-        with self.router():
-            l3_rpc_cb = l3_rpc.L3RpcCallback()
-            self._register_agent_states()
-            # schedule the router to host A
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-
-            plugin = manager.NeutronManager.get_service_plugins().get(
-                service_constants.L3_ROUTER_NAT)
-            mock.patch.object(
-                plugin, 'reschedule_router',
-                side_effect=[
-                    db_exc.DBError(), oslo_messaging.RemoteError(),
-                    l3agentscheduler.RouterReschedulingFailed(router_id='f',
-                                                              agent_id='f'),
-                    ValueError('this raises'),
-                    Exception()
-                ]).start()
-            self._take_down_agent_and_run_reschedule(L3_HOSTA)  # DBError
-            self._take_down_agent_and_run_reschedule(L3_HOSTA)  # RemoteError
-            self._take_down_agent_and_run_reschedule(L3_HOSTA)  # schedule err
-            self._take_down_agent_and_run_reschedule(L3_HOSTA)  # Value error
-            self._take_down_agent_and_run_reschedule(L3_HOSTA)  # Exception
-
-    def test_router_rescheduler_iterates_after_reschedule_failure(self):
-        plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        l3_rpc_cb = l3_rpc.L3RpcCallback()
-        self._register_agent_states()
-        with self.router() as r1, self.router() as r2:
-            # schedule the routers to host A
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-
-            rs_mock = mock.patch.object(
-                plugin, 'reschedule_router',
-                side_effect=l3agentscheduler.RouterReschedulingFailed(
-                    router_id='f', agent_id='f'),
-            ).start()
-            self._take_down_agent_and_run_reschedule(L3_HOSTA)
-            # make sure both had a reschedule attempt even though first failed
-            rs_mock.assert_has_calls([mock.call(mock.ANY, r1['router']['id']),
-                                      mock.call(mock.ANY, r2['router']['id'])],
-                                     any_order=True)
-
-    def test_router_is_not_rescheduled_from_alive_agent(self):
-        with self.router():
-            l3_rpc_cb = l3_rpc.L3RpcCallback()
-            self._register_agent_states()
-
-            # schedule the router to host A
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            with mock.patch('neutron.db.l3_agentschedulers_db.'
-                            'L3AgentSchedulerDbMixin.reschedule_router') as rr:
-                # take down some unrelated agent and run reschedule check
-                self._take_down_agent_and_run_reschedule(DHCP_HOSTC)
-                self.assertFalse(rr.called)
-
-    def test_router_is_not_rescheduled_if_agent_is_back_online(self):
-        plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        l3_rpc_cb = l3_rpc.L3RpcCallback()
-        agent = helpers.register_l3_agent(host=L3_HOSTA)
-        with self.router(),\
-                self.router(),\
-                mock.patch.object(plugin, 'reschedule_router') as rs_mock,\
-                mock.patch.object(plugin, '_get_agent') as get_agent_mock:
-
-            # schedule the routers to the agent
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            self._take_down_agent_and_run_reschedule(L3_HOSTA)
-            # since _get_agent is mocked it will return Mock object and
-            # agent.is_active will return true, so no rescheduling will be done
-            self.assertFalse(rs_mock.called)
-            # should be called only once as for second router alive agent id
-            # will be in cache
-            get_agent_mock.assert_called_once_with(mock.ANY, agent['id'])
-
-    def test_router_reschedule_from_dead_agent(self):
-        with self.router():
-            l3_rpc_cb = l3_rpc.L3RpcCallback()
-            self._register_agent_states()
-
-            # schedule the router to host A
-            ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            self._take_down_agent_and_run_reschedule(L3_HOSTA)
-
-            # B should now pick up the router
-            ret_b = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
-        self.assertEqual(ret_b, ret_a)
-
-    def test_router_no_reschedule_from_dead_admin_down_agent(self):
-        with self.router() as r:
-            l3_rpc_cb = l3_rpc.L3RpcCallback()
-            self._register_agent_states()
-
-            # schedule the router to host A
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            self._set_agent_admin_state_up(L3_HOSTA, False)
-            self._take_down_agent_and_run_reschedule(L3_HOSTA)
-
-            # A should still have it even though it was inactive due to the
-            # admin_state being down
-            rab = l3_agentschedulers_db.RouterL3AgentBinding
-            binding = (self.adminContext.session.query(rab).
-                       filter(rab.router_id == r['router']['id']).first())
-            self.assertEqual(binding.l3_agent.host, L3_HOSTA)
-
-            # B should not pick up the router
-            ret_b = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
-            self.assertFalse(ret_b)
-
-    def test_router_is_not_rescheduled_from_dvr_agent(self):
-        with self.subnet() as s, \
-                mock.patch.object(
-                        self.l3plugin,
-                        'check_dvr_serviceable_ports_on_host') as port_exists:
-            net_id = s['subnet']['network_id']
-            self._set_net_external(net_id)
-            router = {'name': 'router1',
-                      'admin_state_up': True,
-                      'tenant_id': 'tenant_id',
-                      'external_gateway_info': {'network_id': net_id},
-                      'distributed': True}
-            r = self.l3plugin.create_router(
-                self.adminContext, {'router': router})
-            dvr_snat_agent, dvr_agent = self._register_dvr_agents()
-
-            port_exists.return_value = True
-            self.l3plugin.schedule_router(
-                self.adminContext, r['id'])
-            agents = self._list_l3_agents_hosting_router(r['id'])
-            self.assertEqual(2, len(agents['agents']))
-            self.assertIn(dvr_agent['host'],
-                          [a['host'] for a in agents['agents']])
-            # router should not be unscheduled from dvr agent
-            self._take_down_agent_and_run_reschedule(dvr_agent['host'])
-            agents = self._list_l3_agents_hosting_router(r['id'])
-            self.assertEqual(2, len(agents['agents']))
-            self.assertIn(dvr_agent['host'],
-                          [a['host'] for a in agents['agents']])
-
-            # another dvr_snat agent is needed to test that router is not
-            # unscheduled from dead dvr agent in case rescheduling between
-            # dvr_snat agents happens
-            helpers.register_l3_agent(
-                host='hostC', agent_mode=constants.L3_AGENT_MODE_DVR_SNAT)
-            self._take_down_agent_and_run_reschedule(dvr_snat_agent['host'])
-            agents = self._list_l3_agents_hosting_router(r['id'])
-            self.assertEqual(2, len(agents['agents']))
-            self.assertIn(dvr_agent['host'],
-                          [a['host'] for a in agents['agents']])
-
-    def test_router_reschedule_succeeded_after_failed_notification(self):
-        l3_plugin = (manager.NeutronManager.get_service_plugins()
-                     [service_constants.L3_ROUTER_NAT])
-        l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3]
-        l3_rpc_cb = l3_rpc.L3RpcCallback()
-        self._register_agent_states()
-        with self.router() as router:
-            # schedule the router to host A
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            with mock.patch.object(
-                    l3_notifier, 'router_added_to_agent') as notification_mock:
-                notification_mock.side_effect = [
-                    oslo_messaging.MessagingTimeout, None]
-                self._take_down_agent_and_run_reschedule(L3_HOSTA)
-                self.assertEqual(
-                    2, l3_notifier.router_added_to_agent.call_count)
-                # make sure router was rescheduled even when first attempt
-                # failed to notify l3 agent
-                l3_agents = self._list_l3_agents_hosting_router(
-                    router['router']['id'])['agents']
-                self.assertEqual(1, len(l3_agents))
-                self.assertEqual(L3_HOSTB, l3_agents[0]['host'])
-
-    def test_router_reschedule_failed_notification_all_attempts(self):
-        l3_plugin = (manager.NeutronManager.get_service_plugins()
-                     [service_constants.L3_ROUTER_NAT])
-        l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3]
-        l3_rpc_cb = l3_rpc.L3RpcCallback()
-        self._register_agent_states()
-        with self.router() as router:
-            # schedule the router to host A
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            with mock.patch.object(
-                    l3_notifier, 'router_added_to_agent') as notification_mock:
-                notification_mock.side_effect = oslo_messaging.MessagingTimeout
-                self._take_down_agent_and_run_reschedule(L3_HOSTA)
-                self.assertEqual(
-                    l3_agentschedulers_db.AGENT_NOTIFY_MAX_ATTEMPTS,
-                    l3_notifier.router_added_to_agent.call_count)
-                l3_agents = self._list_l3_agents_hosting_router(
-                    router['router']['id'])['agents']
-                self.assertEqual(0, len(l3_agents))
-
-    def test_router_auto_schedule_with_invalid_router(self):
-        with self.router() as router:
-            l3_rpc_cb = l3_rpc.L3RpcCallback()
-            self._register_agent_states()
-        self._delete('routers', router['router']['id'])
-
-        # deleted router
-        ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
-                                       router_ids=[router['router']['id']])
-        self.assertFalse(ret_a)
-        # non-existent router
-        ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
-                                       router_ids=[uuidutils.generate_uuid()])
-        self.assertFalse(ret_a)
-
-    def test_router_auto_schedule_with_hosted(self):
-        with self.router() as router:
-            l3_rpc_cb = l3_rpc.L3RpcCallback()
-            self._register_agent_states()
-            ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            ret_b = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
-            l3_agents = self._list_l3_agents_hosting_router(
-                router['router']['id'])
-            self.assertEqual(1, len(ret_a))
-            self.assertIn(router['router']['id'], [r['id'] for r in ret_a])
-            self.assertFalse(len(ret_b))
-        self.assertEqual(1, len(l3_agents['agents']))
-        self.assertEqual(L3_HOSTA, l3_agents['agents'][0]['host'])
-
-    def test_router_auto_schedule_restart_l3_agent(self):
-        with self.router():
-            l3_rpc_cb = l3_rpc.L3RpcCallback()
-            self._register_agent_states()
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-
-    def test_router_auto_schedule_with_hosted_2(self):
-        # one agent hosts one router
-        l3_rpc_cb = l3_rpc.L3RpcCallback()
-        with self.router() as router1:
-            hosta_id = helpers.register_l3_agent(host=L3_HOSTA).id
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            self._disable_agent(hosta_id, admin_state_up=False)
-            with self.router() as router2:
-                hostb_id = helpers.register_l3_agent(host=L3_HOSTB).id
-                l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
-                l3_agents_1 = self._list_l3_agents_hosting_router(
-                    router1['router']['id'])
-                l3_agents_2 = self._list_l3_agents_hosting_router(
-                    router2['router']['id'])
-                hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
-                num_hosta_routers = len(hosta_routers['routers'])
-                hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id)
-                num_hostb_routers = len(hostb_routers['routers'])
-
-        self.assertEqual(1, num_hosta_routers)
-        self.assertEqual(1, num_hostb_routers)
-        self.assertEqual(1, len(l3_agents_1['agents']))
-        self.assertEqual(1, len(l3_agents_2['agents']))
-        self.assertEqual(L3_HOSTA, l3_agents_1['agents'][0]['host'])
-        self.assertEqual(L3_HOSTB, l3_agents_2['agents'][0]['host'])
-
-    def test_router_auto_schedule_with_disabled(self):
-        with self.router(), self.router():
-            l3_rpc_cb = l3_rpc.L3RpcCallback()
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
-                                          L3_HOSTA)
-            hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3,
-                                          L3_HOSTB)
-            self._disable_agent(hosta_id)
-            # first agent will not host router since it is disabled
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            # second agent will host all the routers since first is disabled.
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
-            hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id)
-            num_hostb_routers = len(hostb_routers['routers'])
-            hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
-            num_hosta_routers = len(hosta_routers['routers'])
-        self.assertEqual(2, num_hostb_routers)
-        self.assertEqual(0, num_hosta_routers)
-
-    def test_router_auto_schedule_with_candidates(self):
-        with self.router() as router1, self.router() as router2:
-            l3_rpc_cb = l3_rpc.L3RpcCallback()
-            agent = helpers.register_l3_agent(
-                host=L3_HOSTA, router_id=router1['router']['id'])
-            l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            hosta_routers = self._list_routers_hosted_by_l3_agent(agent.id)
-            num_hosta_routers = len(hosta_routers['routers'])
-            l3_agents_1 = self._list_l3_agents_hosting_router(
-                router1['router']['id'])
-            l3_agents_2 = self._list_l3_agents_hosting_router(
-                router2['router']['id'])
-        # L3 agent will host only the compatible router.
-        self.assertEqual(1, num_hosta_routers)
-        self.assertEqual(1, len(l3_agents_1['agents']))
-        self.assertEqual(0, len(l3_agents_2['agents']))
-
-    def test_rpc_sync_routers(self):
-        l3_rpc_cb = l3_rpc.L3RpcCallback()
-        self._register_agent_states()
-
-        # No routers
-        ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-        self.assertEqual(0, len(ret_a))
-
-        with self.router() as v1, self.router() as v2, self.router() as v3:
-            routers = (v1, v2, v3)
-            router_ids = [r['router']['id'] for r in routers]
-
-            # Get all routers
-            ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            self.assertEqual(3, len(ret_a))
-            self.assertEqual(set(router_ids), set([r['id'] for r in ret_a]))
-
-            # Get all routers (router_ids=None)
-            ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
-                                           router_ids=None)
-            self.assertEqual(3, len(ret_a))
-            self.assertEqual(set(router_ids), set([r['id'] for r in ret_a]))
-
-            # Get router2 only
-            ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
-                                           router_ids=[router_ids[1]])
-            self.assertEqual(1, len(ret_a))
-            self.assertIn(router_ids[1], [r['id'] for r in ret_a])
-
-            # Get router1 and router3
-            ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
-                                           router_ids=[router_ids[0],
-                                                       router_ids[2]])
-            self.assertEqual(2, len(ret_a))
-            self.assertIn(router_ids[0], [r['id'] for r in ret_a])
-            self.assertIn(router_ids[2], [r['id'] for r in ret_a])
-
-    def test_router_auto_schedule_for_specified_routers(self):
-
-        def _sync_router_with_ids(router_ids, exp_synced, exp_hosted, host_id):
-            ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
-                                           router_ids=router_ids)
-            self.assertEqual(exp_synced, len(ret_a))
-            for r in router_ids:
-                self.assertIn(r, [r['id'] for r in ret_a])
-            host_routers = self._list_routers_hosted_by_l3_agent(host_id)
-            num_host_routers = len(host_routers['routers'])
-            self.assertEqual(exp_hosted, num_host_routers)
-
-        l3_rpc_cb = l3_rpc.L3RpcCallback()
-        self._register_agent_states()
-        hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA)
-
-        with self.router() as v1,\
-                self.router() as v2,\
-                self.router() as v3,\
-                self.router() as v4:
-            routers = (v1, v2, v3, v4)
-            router_ids = [r['router']['id'] for r in routers]
-            # Sync router1 (router1 is scheduled)
-            _sync_router_with_ids([router_ids[0]], 1, 1, hosta_id)
-            # Sync router1 only (no router is scheduled)
-            _sync_router_with_ids([router_ids[0]], 1, 1, hosta_id)
-            # Schedule router2
-            _sync_router_with_ids([router_ids[1]], 1, 2, hosta_id)
-            # Sync router2 and router4 (router4 is scheduled)
-            _sync_router_with_ids([router_ids[1], router_ids[3]],
-                                  2, 3, hosta_id)
-            # Sync all routers (router3 is scheduled)
-            _sync_router_with_ids(router_ids, 4, 4, hosta_id)
-
-    def test_router_schedule_with_candidates(self):
-        with self.router() as router1,\
-                self.router() as router2,\
-                self.subnet() as subnet1,\
-                self.subnet(cidr='10.0.3.0/24') as subnet2:
-            agent = helpers.register_l3_agent(
-                host=L3_HOSTA, router_id=router1['router']['id'])
-            self._router_interface_action('add',
-                                          router1['router']['id'],
-                                          subnet1['subnet']['id'],
-                                          None)
-            self._router_interface_action('add',
-                                          router2['router']['id'],
-                                          subnet2['subnet']['id'],
-                                          None)
-            hosta_routers = self._list_routers_hosted_by_l3_agent(agent.id)
-            num_hosta_routers = len(hosta_routers['routers'])
-            l3_agents_1 = self._list_l3_agents_hosting_router(
-                router1['router']['id'])
-            l3_agents_2 = self._list_l3_agents_hosting_router(
-                router2['router']['id'])
-            # safe cleanup
-            self._router_interface_action('remove',
-                                          router1['router']['id'],
-                                          subnet1['subnet']['id'],
-                                          None)
-            self._router_interface_action('remove',
-                                          router2['router']['id'],
-                                          subnet2['subnet']['id'],
-                                          None)
-
-        # L3 agent will host only the compatible router.
-        self.assertEqual(1, num_hosta_routers)
-        self.assertEqual(1, len(l3_agents_1['agents']))
-        self.assertEqual(0, len(l3_agents_2['agents']))
-
-    def test_router_without_l3_agents(self):
-        with self.subnet() as s:
-            self._set_net_external(s['subnet']['network_id'])
-            data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
-            data['router']['name'] = 'router1'
-            data['router']['external_gateway_info'] = {
-                'network_id': s['subnet']['network_id']}
-            router_req = self.new_create_request('routers', data, self.fmt)
-            res = router_req.get_response(self.ext_api)
-            router = self.deserialize(self.fmt, res)
-            l3agents = (
-                self.l3plugin.get_l3_agents_hosting_routers(
-                    self.adminContext, [router['router']['id']]))
-            self._delete('routers', router['router']['id'])
-        self.assertEqual(0, len(l3agents))
-
-    def test_dvr_router_scheduling_to_all_needed_agents(self):
-        self._register_dvr_agents()
-        with self.subnet() as s:
-            net_id = s['subnet']['network_id']
-            self._set_net_external(net_id)
-
-            router = {'name': 'router1',
-                      'external_gateway_info': {'network_id': net_id},
-                      'tenant_id': 'tenant_id',
-                      'admin_state_up': True,
-                      'distributed': True}
-            r = self.l3plugin.create_router(self.adminContext,
-                                            {'router': router})
-            with mock.patch.object(
-                    self.l3plugin,
-                    'check_dvr_serviceable_ports_on_host') as ports_exist:
-                # emulating dvr serviceable ports exist on compute node
-                ports_exist.return_value = True
-                self.l3plugin.schedule_router(
-                    self.adminContext, r['id'])
-
-        l3agents = self._list_l3_agents_hosting_router(r['id'])
-        self.assertEqual(2, len(l3agents['agents']))
-        self.assertEqual({'dvr', 'dvr_snat'},
-                         set([a['configurations']['agent_mode'] for a in
-                              l3agents['agents']]))
-
-    def test_dvr_router_snat_scheduling_late_ext_gw_add(self):
-        """Test snat scheduling for the case when dvr router is already
-        scheduled to all dvr_snat agents and then external gateway is added.
-        """
-        helpers.register_l3_agent(
-            host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT)
-        helpers.register_l3_agent(
-            host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT)
-        with self.subnet() as s_int,\
-                self.subnet(cidr='20.0.0.0/24') as s_ext:
-            net_id = s_ext['subnet']['network_id']
-            self._set_net_external(net_id)
-
-            router = {'name': 'router1',
-                      'tenant_id': 'tenant_id',
-                      'admin_state_up': True,
-                      'distributed': True}
-            r = self.l3plugin.create_router(self.adminContext,
-                                            {'router': router})
-            # add router interface first
-            self.l3plugin.add_router_interface(self.adminContext, r['id'],
-                {'subnet_id': s_int['subnet']['id']})
-            # Check if the router is not scheduled to any of the agents
-            l3agents = self._list_l3_agents_hosting_router(r['id'])
-            self.assertEqual(0, len(l3agents['agents']))
-            # check that snat is not scheduled as router is not connected to
-            # external network
-            snat_agents = self.l3plugin.get_snat_bindings(
-                self.adminContext, [r['id']])
-            self.assertEqual(0, len(snat_agents))
-
-            # connect router to external network
-            self.l3plugin.update_router(self.adminContext, r['id'],
-                {'router': {'external_gateway_info': {'network_id': net_id}}})
-            # router should still be scheduled to one of the dvr_snat agents
-            l3agents = self._list_l3_agents_hosting_router(r['id'])
-            self.assertEqual(1, len(l3agents['agents']))
-            # now snat portion should be scheduled as router is connected
-            # to external network
-            snat_agents = self.l3plugin.get_snat_bindings(
-                self.adminContext, [r['id']])
-            self.assertEqual(1, len(snat_agents))
-
-    def test_dvr_router_csnat_rescheduling(self):
-        helpers.register_l3_agent(
-            host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT)
-        helpers.register_l3_agent(
-            host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT)
-        with self.subnet() as s:
-            net_id = s['subnet']['network_id']
-            self._set_net_external(net_id)
-
-            router = {'name': 'router1',
-                      'external_gateway_info': {'network_id': net_id},
-                      'tenant_id': 'tenant_id',
-                      'admin_state_up': True,
-                      'distributed': True}
-            r = self.l3plugin.create_router(self.adminContext,
-                                            {'router': router})
-            self.l3plugin.schedule_router(
-                    self.adminContext, r['id'])
-            l3agents = self._list_l3_agents_hosting_router(r['id'])
-            self.assertEqual(1, len(l3agents['agents']))
-            csnat_agent_host = self.l3plugin.get_snat_bindings(
-                self.adminContext, [r['id']])[0]['l3_agent']['host']
-            self._take_down_agent_and_run_reschedule(csnat_agent_host)
-            l3agents = self._list_l3_agents_hosting_router(r['id'])
-            self.assertEqual(1, len(l3agents['agents']))
-            new_csnat_agent_host = self.l3plugin.get_snat_bindings(
-                self.adminContext, [r['id']])[0]['l3_agent']['host']
-            self.assertNotEqual(csnat_agent_host, new_csnat_agent_host)
-
-    def test_dvr_router_csnat_manual_rescheduling(self):
-        helpers.register_l3_agent(
-            host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT)
-        helpers.register_l3_agent(
-            host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT)
-        with self.subnet() as s:
-            net_id = s['subnet']['network_id']
-            self._set_net_external(net_id)
-
-            router = {'name': 'router1',
-                      'external_gateway_info': {'network_id': net_id},
-                      'tenant_id': 'tenant_id',
-                      'admin_state_up': True,
-                      'distributed': True}
-            r = self.l3plugin.create_router(self.adminContext,
-                                            {'router': router})
-            self.l3plugin.schedule_router(
-                    self.adminContext, r['id'])
-            l3agents = self.l3plugin.list_l3_agents_hosting_router(
-                self.adminContext, r['id'])
-            self.assertEqual(1, len(l3agents['agents']))
-            csnat_agent = self.l3plugin.get_snat_bindings(
-                self.adminContext, [r['id']])[0]['l3_agent']
-            # NOTE: Removing the router from the l3_agent will
-            # remove all the namespace since there is no other
-            # serviceable ports in the node that requires it.
-            self.l3plugin.remove_router_from_l3_agent(
-                self.adminContext, csnat_agent['id'], r['id'])
-
-            l3agents = self.l3plugin.list_l3_agents_hosting_router(
-                self.adminContext, r['id'])
-            self.assertEqual(0, len(l3agents['agents']))
-            self.assertFalse(self.l3plugin.get_snat_bindings(
-                self.adminContext, [r['id']]))
-
-            self.l3plugin.add_router_to_l3_agent(
-                self.adminContext, csnat_agent['id'], r['id'])
-
-            l3agents = self.l3plugin.list_l3_agents_hosting_router(
-                self.adminContext, r['id'])
-            self.assertEqual(1, len(l3agents['agents']))
-            new_csnat_agent = self.l3plugin.get_snat_bindings(
-                self.adminContext, [r['id']])[0]['l3_agent']
-            self.assertEqual(csnat_agent['id'], new_csnat_agent['id'])
-
-    def test_router_sync_data(self):
-        with self.subnet() as s1,\
-                self.subnet(cidr='10.0.2.0/24') as s2,\
-                self.subnet(cidr='10.0.3.0/24') as s3:
-            self._register_agent_states()
-            self._set_net_external(s1['subnet']['network_id'])
-            data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
-            data['router']['name'] = 'router1'
-            data['router']['external_gateway_info'] = {
-                'network_id': s1['subnet']['network_id']}
-            router_req = self.new_create_request('routers', data, self.fmt)
-            res = router_req.get_response(self.ext_api)
-            router = self.deserialize(self.fmt, res)
-            self._router_interface_action('add',
-                                          router['router']['id'],
-                                          s2['subnet']['id'],
-                                          None)
-            self._router_interface_action('add',
-                                          router['router']['id'],
-                                          s3['subnet']['id'],
-                                          None)
-            l3agents = self._list_l3_agents_hosting_router(
-                router['router']['id'])
-            self.assertEqual(1, len(l3agents['agents']))
-            agents = self._list_agents()
-            another_l3_agent_id = None
-            another_l3_agent_host = None
-            default = l3agents['agents'][0]['id']
-            for com in agents['agents']:
-                if (com['id'] != default and
-                    com['agent_type'] == constants.AGENT_TYPE_L3):
-                    another_l3_agent_id = com['id']
-                    another_l3_agent_host = com['host']
-                    break
-            self.assertIsNotNone(another_l3_agent_id)
-            self._add_router_to_l3_agent(another_l3_agent_id,
-                                         router['router']['id'],
-                                         expected_code=exc.HTTPConflict.code)
-            self._remove_router_from_l3_agent(default,
-                                              router['router']['id'])
-            self._add_router_to_l3_agent(another_l3_agent_id,
-                                         router['router']['id'])
-            l3agents = self._list_l3_agents_hosting_router(
-                router['router']['id'])
-            self.assertEqual(another_l3_agent_host,
-                             l3agents['agents'][0]['host'])
-            self._remove_router_from_l3_agent(another_l3_agent_id,
-                                              router['router']['id'])
-            self._router_interface_action('remove',
-                                          router['router']['id'],
-                                          s2['subnet']['id'],
-                                          None)
-            l3agents = self._list_l3_agents_hosting_router(
-                router['router']['id'])
-            self.assertEqual(1,
-                             len(l3agents['agents']))
-            self._router_interface_action('remove',
-                                          router['router']['id'],
-                                          s3['subnet']['id'],
-                                          None)
-            self._delete('routers', router['router']['id'])
-
-    def _test_router_add_to_l3_agent(self, admin_state_up=True):
-        with self.router() as router1:
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
-                                          L3_HOSTA)
-            if not admin_state_up:
-                self._set_agent_admin_state_up(L3_HOSTA, False)
-            num_before_add = len(
-                self._list_routers_hosted_by_l3_agent(
-                    hosta_id)['routers'])
-            self._add_router_to_l3_agent(hosta_id,
-                                         router1['router']['id'])
-            hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3,
-                                          L3_HOSTB)
-            self._add_router_to_l3_agent(hostb_id,
-                                         router1['router']['id'],
-                                         expected_code=exc.HTTPConflict.code)
-            num_after_add = len(
-                self._list_routers_hosted_by_l3_agent(
-                    hosta_id)['routers'])
-        self.assertEqual(0, num_before_add)
-        self.assertEqual(1, num_after_add)
-
-    def test_router_add_to_l3_agent(self):
-        self._test_router_add_to_l3_agent()
-
-    def test_router_add_to_l3_agent_with_admin_state_down(self):
-        cfg.CONF.set_override(
-            'enable_services_on_agents_with_admin_state_down', True)
-        self._test_router_add_to_l3_agent(admin_state_up=False)
-
-    def test_router_add_to_l3_agent_two_times(self):
-        with self.router() as router1:
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
-                                          L3_HOSTA)
-            self._add_router_to_l3_agent(hosta_id,
-                                         router1['router']['id'])
-            # scheduling twice on the same agent is fine
-            self._add_router_to_l3_agent(hosta_id,
-                                         router1['router']['id'])
-
-    def test_router_add_to_two_l3_agents(self):
-        with self.router() as router1:
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
-                                          L3_HOSTA)
-            hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3,
-                                          L3_HOSTB)
-            self._add_router_to_l3_agent(hosta_id,
-                                         router1['router']['id'])
-            self._add_router_to_l3_agent(hostb_id,
-                                         router1['router']['id'],
-                                         expected_code=exc.HTTPConflict.code)
-
-    def test_router_policy(self):
-        with self.router() as router1:
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
-                                          L3_HOSTA)
-            self._list_routers_hosted_by_l3_agent(
-                hosta_id, expected_code=exc.HTTPForbidden.code,
-                admin_context=False)
-            self._add_router_to_l3_agent(
-                hosta_id, router1['router']['id'],
-                expected_code=exc.HTTPForbidden.code,
-                admin_context=False)
-            self._add_router_to_l3_agent(
-                hosta_id, router1['router']['id'])
-            self._remove_router_from_l3_agent(
-                hosta_id, router1['router']['id'],
-                expected_code=exc.HTTPForbidden.code,
-                admin_context=False)
-            self._list_l3_agents_hosting_router(
-                router1['router']['id'],
-                expected_code=exc.HTTPForbidden.code,
-                admin_context=False)
-
-    def _test_sync_routers_from_admin_state_down_agent(self, keep_services):
-        if keep_services:
-            cfg.CONF.set_override(
-                'enable_services_on_agents_with_admin_state_down', True)
-        l3_rpc_cb = l3_rpc.L3RpcCallback()
-        self._register_agent_states()
-        hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA)
-        with self.router() as router:
-            self._add_router_to_l3_agent(hosta_id,
-                                         router['router']['id'])
-            routers = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            self.assertEqual(1, len(routers))
-            self._set_agent_admin_state_up(L3_HOSTA, False)
-            routers = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
-            if keep_services:
-                self.assertEqual(1, len(routers))
-            else:
-                self.assertEqual(0, len(routers))
-
-    def test_l3_agent_keep_services_off(self):
-        self._test_sync_routers_from_admin_state_down_agent(False)
-
-    def test_l3_agent_keep_services_on(self):
-        self._test_sync_routers_from_admin_state_down_agent(True)
-
-    def test_list_routers_hosted_by_l3_agent_with_invalid_agent(self):
-        invalid_agentid = 'non_existing_agent'
-        self._list_routers_hosted_by_l3_agent(invalid_agentid,
-                                              exc.HTTPNotFound.code)
-
-    def test_list_networks_hosted_by_dhcp_agent_with_invalid_agent(self):
-        invalid_agentid = 'non_existing_agent'
-        self._list_networks_hosted_by_dhcp_agent(invalid_agentid,
-                                                 exc.HTTPNotFound.code)
-
-
-class OvsDhcpAgentNotifierTestCase(test_l3.L3NatTestCaseMixin,
-                                   test_agent.AgentDBTestMixIn,
-                                   AgentSchedulerTestMixIn,
-                                   test_plugin.NeutronDbPluginV2TestCase):
-    plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-
-    def setUp(self):
-        self.useFixture(tools.AttributeMapMemento())
-        super(OvsDhcpAgentNotifierTestCase, self).setUp(self.plugin_str)
-        self.dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
-        self.dhcp_notifier_cast = mock.patch(
-            'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.'
-            'DhcpAgentNotifyAPI._cast_message').start()
-        ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
-        self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
-        self.adminContext = context.get_admin_context()
-        # Add the resources to the global attribute map
-        # This is done here as the setup process won't
-        # initialize the main API router which extends
-        # the global attribute map
-        attributes.RESOURCE_ATTRIBUTE_MAP.update(
-            agent.RESOURCE_ATTRIBUTE_MAP)
-        fake_notifier.reset()
-
-    def test_network_add_to_dhcp_agent_notification(self):
-        with self.network() as net1:
-            network_id = net1['network']['id']
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTA)
-            self._add_network_to_dhcp_agent(hosta_id,
-                                            network_id)
-        self.dhcp_notifier_cast.assert_called_with(
-                mock.ANY, 'network_create_end',
-                {'network': {'id': network_id}}, DHCP_HOSTA)
-        notifications = fake_notifier.NOTIFICATIONS
-        expected_event_type = 'dhcp_agent.network.add'
-        self._assert_notify(notifications, expected_event_type)
-
-    def test_network_remove_from_dhcp_agent_notification(self):
-        with self.network() as net1:
-            network_id = net1['network']['id']
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                          DHCP_HOSTA)
-            self._add_network_to_dhcp_agent(hosta_id,
-                                            network_id)
-
-        self._remove_network_from_dhcp_agent(hosta_id,
-                                             network_id)
-        self.dhcp_notifier_cast.assert_called_with(
-                mock.ANY, 'network_delete_end',
-                {'network_id': network_id}, DHCP_HOSTA)
-        notifications = fake_notifier.NOTIFICATIONS
-        expected_event_type = 'dhcp_agent.network.remove'
-        self._assert_notify(notifications, expected_event_type)
-
-    def test_agent_updated_dhcp_agent_notification(self):
-        self._register_agent_states()
-        hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
-                                      DHCP_HOSTA)
-        self._disable_agent(hosta_id, admin_state_up=False)
-
-        self.dhcp_notifier_cast.assert_called_with(
-                mock.ANY, 'agent_updated',
-                {'admin_state_up': False}, DHCP_HOSTA)
-
-    def _network_port_create(
-            self, hosts, gateway=attributes.ATTR_NOT_SPECIFIED, owner=None):
-        for host in hosts:
-            helpers.register_dhcp_agent(host)
-        with self.network() as net1:
-            with self.subnet(network=net1,
-                             gateway_ip=gateway) as subnet1:
-                if owner:
-                    with self.port(subnet=subnet1,
-                                   device_owner=owner) as port:
-                        return [net1, subnet1, port]
-                else:
-                    with self.port(subnet=subnet1) as port:
-                        return [net1, subnet1, port]
-
-    def _notification_mocks(self, hosts, net, subnet, port):
-        host_calls = {}
-        for host in hosts:
-            expected_calls = [
-                mock.call(
-                    mock.ANY,
-                    'network_create_end',
-                    {'network': {'id': net['network']['id']}},
-                    host),
-                mock.call(
-                    mock.ANY,
-                    'subnet_create_end',
-                    subnet,
-                    host, 'dhcp_agent'),
-                mock.call(
-                    mock.ANY,
-                    'port_create_end',
-                    {'port': port['port']},
-                    host, 'dhcp_agent')]
-            host_calls[host] = expected_calls
-        return host_calls
-
-    def test_network_port_create_notification(self):
-        hosts = [DHCP_HOSTA]
-        net, subnet, port = self._network_port_create(hosts)
-        expected_calls = self._notification_mocks(hosts, net, subnet, port)
-        self.assertEqual(
-            expected_calls[DHCP_HOSTA], self.dhcp_notifier_cast.call_args_list)
-
-    def test_network_ha_port_create_notification(self):
-        cfg.CONF.set_override('dhcp_agents_per_network', 2)
-        hosts = [DHCP_HOSTA, DHCP_HOSTC]
-        net, subnet, port = self._network_port_create(hosts)
-        expected_calls = self._notification_mocks(hosts, net, subnet, port)
-        for expected in expected_calls[DHCP_HOSTA]:
-            self.assertIn(expected, self.dhcp_notifier_cast.call_args_list)
-        for expected in expected_calls[DHCP_HOSTC]:
-            self.assertIn(expected, self.dhcp_notifier_cast.call_args_list)
-
-    def _is_schedule_network_called(self, device_id):
-        plugin = manager.NeutronManager.get_plugin()
-        notifier = plugin.agent_notifiers[constants.AGENT_TYPE_DHCP]
-        with self.subnet() as subnet,\
-                mock.patch.object(plugin,
-                                  'get_dhcp_agents_hosting_networks',
-                                  return_value=[]),\
-                mock.patch.object(notifier,
-                                  '_schedule_network',
-                                  return_value=[]) as mock_sched:
-            with self.port(subnet=subnet, device_id=device_id):
-                return mock_sched.called
-
-    def test_reserved_dhcp_port_creation(self):
-        device_id = constants.DEVICE_ID_RESERVED_DHCP_PORT
-        self.assertFalse(self._is_schedule_network_called(device_id))
-
-    def test_unreserved_dhcp_port_creation(self):
-        device_id = 'not_reserved'
-        self.assertTrue(self._is_schedule_network_called(device_id))
-
-
-class OvsL3AgentNotifierTestCase(test_l3.L3NatTestCaseMixin,
-                                 test_agent.AgentDBTestMixIn,
-                                 AgentSchedulerTestMixIn,
-                                 test_plugin.NeutronDbPluginV2TestCase):
-    plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-    l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
-                 'TestL3NatAgentSchedulingServicePlugin')
-
-    def setUp(self):
-        self.dhcp_notifier_cls_p = mock.patch(
-            'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.'
-            'DhcpAgentNotifyAPI')
-        self.dhcp_notifier = mock.Mock(name='dhcp_notifier')
-        self.dhcp_notifier_cls = self.dhcp_notifier_cls_p.start()
-        self.dhcp_notifier_cls.return_value = self.dhcp_notifier
-
-        self.useFixture(tools.AttributeMapMemento())
-
-        if self.l3_plugin:
-            service_plugins = {'l3_plugin_name': self.l3_plugin}
-        else:
-            service_plugins = None
-        super(OvsL3AgentNotifierTestCase, self).setUp(
-            self.plugin_str, service_plugins=service_plugins)
-        ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
-        self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
-        self.adminContext = context.get_admin_context()
-        # Add the resources to the global attribute map
-        # This is done here as the setup process won't
-        # initialize the main API router which extends
-        # the global attribute map
-        attributes.RESOURCE_ATTRIBUTE_MAP.update(
-            agent.RESOURCE_ATTRIBUTE_MAP)
-        fake_notifier.reset()
-
-    def test_router_add_to_l3_agent_notification(self):
-        l3_plugin = (manager.NeutronManager.get_service_plugins()
-                     [service_constants.L3_ROUTER_NAT])
-        l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3]
-        with mock.patch.object(
-            l3_notifier.client,
-            'prepare',
-            return_value=l3_notifier.client) as mock_prepare,\
-                mock.patch.object(l3_notifier.client, 'call') as mock_call,\
-                self.router() as router1:
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
-                                          L3_HOSTA)
-            self._add_router_to_l3_agent(hosta_id,
-                                         router1['router']['id'])
-            routers = [router1['router']['id']]
-            mock_prepare.assert_called_with(server='hosta')
-            mock_call.assert_called_with(
-                mock.ANY, 'router_added_to_agent', payload=routers)
-            notifications = fake_notifier.NOTIFICATIONS
-            expected_event_type = 'l3_agent.router.add'
-            self._assert_notify(notifications, expected_event_type)
-
-    def test_router_remove_from_l3_agent_notification(self):
-        l3_plugin = (manager.NeutronManager.get_service_plugins()
-                     [service_constants.L3_ROUTER_NAT])
-        l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3]
-        with mock.patch.object(
-            l3_notifier.client,
-            'prepare',
-            return_value=l3_notifier.client) as mock_prepare,\
-                mock.patch.object(l3_notifier.client, 'cast') as mock_cast,\
-                mock.patch.object(l3_notifier.client, 'call'),\
-                self.router() as router1:
-            self._register_agent_states()
-            hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
-                                          L3_HOSTA)
-            self._add_router_to_l3_agent(hosta_id,
-                                         router1['router']['id'])
-            self._remove_router_from_l3_agent(hosta_id,
-                                              router1['router']['id'])
-            mock_prepare.assert_called_with(server='hosta')
-            mock_cast.assert_called_with(
-                    mock.ANY, 'router_removed_from_agent',
-                    payload={'router_id': router1['router']['id']})
-            notifications = fake_notifier.NOTIFICATIONS
-            expected_event_type = 'l3_agent.router.remove'
-            self._assert_notify(notifications, expected_event_type)
-
-    def test_agent_updated_l3_agent_notification(self):
-        l3_plugin = (manager.NeutronManager.get_service_plugins()
-                     [service_constants.L3_ROUTER_NAT])
-        l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3]
-        with mock.patch.object(
-            l3_notifier.client,
-            'prepare',
-            return_value=l3_notifier.client) as mock_prepare,\
-                mock.patch.object(l3_notifier.client, 'cast') as mock_cast:
-            agent_id = helpers.register_l3_agent(L3_HOSTA).id
-            self._disable_agent(agent_id, admin_state_up=False)
-
-            mock_prepare.assert_called_with(server=L3_HOSTA)
-
-            mock_cast.assert_called_with(
-                mock.ANY, 'agent_updated', payload={'admin_state_up': False})
diff --git a/neutron/tests/unit/db/test_allowedaddresspairs_db.py b/neutron/tests/unit/db/test_allowedaddresspairs_db.py
deleted file mode 100644 (file)
index 491e773..0000000
+++ /dev/null
@@ -1,321 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from oslo_config import cfg
-from webob import exc as web_exc
-
-from neutron.api.v2 import attributes as attr
-from neutron.db import allowedaddresspairs_db as addr_pair_db
-from neutron.db import db_base_plugin_v2
-from neutron.db import portsecurity_db
-from neutron.extensions import allowedaddresspairs as addr_pair
-from neutron.extensions import portsecurity as psec
-from neutron.extensions import securitygroup as secgroup
-from neutron import manager
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-
-DB_PLUGIN_KLASS = ('neutron.tests.unit.db.test_allowedaddresspairs_db.'
-                   'AllowedAddressPairTestPlugin')
-
-
-class AllowedAddressPairTestCase(
-        test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-    def setUp(self, plugin=None, ext_mgr=None):
-        super(AllowedAddressPairTestCase, self).setUp(plugin)
-
-        # Check if a plugin supports security groups
-        plugin_obj = manager.NeutronManager.get_plugin()
-        self._skip_port_security = ('port-security' not in
-                                    plugin_obj.supported_extension_aliases)
-
-
-class AllowedAddressPairTestPlugin(portsecurity_db.PortSecurityDbMixin,
-                                   db_base_plugin_v2.NeutronDbPluginV2,
-                                   addr_pair_db.AllowedAddressPairsMixin):
-
-    """Test plugin that implements necessary calls on create/delete port for
-    associating ports with port security and allowed address pairs.
-    """
-
-    supported_extension_aliases = ["allowed-address-pairs"]
-
-    def create_port(self, context, port):
-        p = port['port']
-        with context.session.begin(subtransactions=True):
-            neutron_db = super(AllowedAddressPairTestPlugin, self).create_port(
-                context, port)
-            p.update(neutron_db)
-            if attr.is_attr_set(p.get(addr_pair.ADDRESS_PAIRS)):
-                self._process_create_allowed_address_pairs(
-                    context, p,
-                    p[addr_pair.ADDRESS_PAIRS])
-            else:
-                p[addr_pair.ADDRESS_PAIRS] = None
-
-        return port['port']
-
-    def update_port(self, context, id, port):
-        delete_addr_pairs = self._check_update_deletes_allowed_address_pairs(
-            port)
-        has_addr_pairs = self._check_update_has_allowed_address_pairs(port)
-
-        with context.session.begin(subtransactions=True):
-            ret_port = super(AllowedAddressPairTestPlugin, self).update_port(
-                context, id, port)
-            # copy values over - but not fixed_ips
-            port['port'].pop('fixed_ips', None)
-            ret_port.update(port['port'])
-
-            if (delete_addr_pairs or has_addr_pairs):
-                # delete address pairs and readd them
-                self._delete_allowed_address_pairs(context, id)
-                self._process_create_allowed_address_pairs(
-                    context, ret_port,
-                    ret_port[addr_pair.ADDRESS_PAIRS])
-
-        return ret_port
-
-
-class AllowedAddressPairDBTestCase(AllowedAddressPairTestCase):
-    def setUp(self, plugin=None, ext_mgr=None):
-        plugin = plugin or DB_PLUGIN_KLASS
-        super(AllowedAddressPairDBTestCase,
-              self).setUp(plugin=plugin, ext_mgr=ext_mgr)
-
-
-class TestAllowedAddressPairs(AllowedAddressPairDBTestCase):
-
-    def test_create_port_allowed_address_pairs_bad_format(self):
-        with self.network() as net:
-            bad_values = [False, True, None, 1.1, 1]
-            for value in bad_values:
-                self._create_port(
-                    self.fmt, net['network']['id'],
-                    expected_res_status=web_exc.HTTPBadRequest.code,
-                    arg_list=(addr_pair.ADDRESS_PAIRS,),
-                    allowed_address_pairs=value)
-
-    def test_create_port_allowed_address_pairs(self):
-        with self.network() as net:
-            address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                              'ip_address': '10.0.0.1'}]
-            res = self._create_port(self.fmt, net['network']['id'],
-                                    arg_list=(addr_pair.ADDRESS_PAIRS,),
-                                    allowed_address_pairs=address_pairs)
-            port = self.deserialize(self.fmt, res)
-            self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
-                             address_pairs)
-            self._delete('ports', port['port']['id'])
-
-    def test_create_port_security_true_allowed_address_pairs(self):
-        if self._skip_port_security:
-            self.skipTest("Plugin does not implement port-security extension")
-
-        with self.network() as net:
-            address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                              'ip_address': '10.0.0.1'}]
-            res = self._create_port(self.fmt, net['network']['id'],
-                                    arg_list=('port_security_enabled',
-                                              addr_pair.ADDRESS_PAIRS,),
-                                    port_security_enabled=True,
-                                    allowed_address_pairs=address_pairs)
-            port = self.deserialize(self.fmt, res)
-            self.assertTrue(port['port'][psec.PORTSECURITY])
-            self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
-                             address_pairs)
-            self._delete('ports', port['port']['id'])
-
-    def test_create_port_security_false_allowed_address_pairs(self):
-        if self._skip_port_security:
-            self.skipTest("Plugin does not implement port-security extension")
-
-        with self.network() as net:
-            address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                              'ip_address': '10.0.0.1'}]
-            res = self._create_port(self.fmt, net['network']['id'],
-                                    arg_list=('port_security_enabled',
-                                              addr_pair.ADDRESS_PAIRS,),
-                                    port_security_enabled=False,
-                                    allowed_address_pairs=address_pairs)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(res.status_int, 409)
-
-            address_pairs = []
-            res = self._create_port(self.fmt, net['network']['id'],
-                                    arg_list=('port_security_enabled',
-                                              addr_pair.ADDRESS_PAIRS,),
-                                    port_security_enabled=False,
-                                    allowed_address_pairs=address_pairs)
-            port = self.deserialize(self.fmt, res)
-            self.assertFalse(port['port'][psec.PORTSECURITY])
-            self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
-                             address_pairs)
-            self._delete('ports', port['port']['id'])
-
-    def test_create_port_bad_mac(self):
-        address_pairs = [{'mac_address': 'invalid_mac',
-                          'ip_address': '10.0.0.1'}]
-        self._create_port_with_address_pairs(address_pairs, 400)
-
-    def test_create_port_bad_ip(self):
-        address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                          'ip_address': '10.0.0.1222'}]
-        self._create_port_with_address_pairs(address_pairs, 400)
-
-    def test_create_missing_ip_field(self):
-        address_pairs = [{'mac_address': '00:00:00:00:00:01'}]
-        self._create_port_with_address_pairs(address_pairs, 400)
-
-    def test_create_duplicate_mac_ip(self):
-        address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                          'ip_address': '10.0.0.1'},
-                         {'mac_address': '00:00:00:00:00:01',
-                          'ip_address': '10.0.0.1'}]
-        self._create_port_with_address_pairs(address_pairs, 400)
-
-    def test_more_than_max_allowed_address_pair(self):
-        cfg.CONF.set_default('max_allowed_address_pair', 3)
-        address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                          'ip_address': '10.0.0.1'},
-                         {'mac_address': '00:00:00:00:00:02',
-                          'ip_address': '10.0.0.2'},
-                         {'mac_address': '00:00:00:00:00:03',
-                          'ip_address': '10.0.0.3'},
-                         {'mac_address': '00:00:00:00:00:04',
-                          'ip_address': '10.0.0.4'}]
-        self._create_port_with_address_pairs(address_pairs, 400)
-
-    def test_equal_to_max_allowed_address_pair(self):
-        cfg.CONF.set_default('max_allowed_address_pair', 3)
-        address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                          'ip_address': '10.0.0.1'},
-                         {'mac_address': '00:00:00:00:00:02',
-                          'ip_address': '10.0.0.2'},
-                         {'mac_address': '00:00:00:00:00:03',
-                          'ip_address': '10.0.0.3'}]
-        self._create_port_with_address_pairs(address_pairs, 201)
-
-    def test_create_overlap_with_fixed_ip(self):
-        address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                          'ip_address': '10.0.0.2'}]
-        with self.network() as network:
-            with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
-                fixed_ips = [{'subnet_id': subnet['subnet']['id'],
-                              'ip_address': '10.0.0.2'}]
-                res = self._create_port(self.fmt, network['network']['id'],
-                                        arg_list=(addr_pair.ADDRESS_PAIRS,
-                                        'fixed_ips'),
-                                        allowed_address_pairs=address_pairs,
-                                        fixed_ips=fixed_ips)
-                self.assertEqual(res.status_int, 201)
-                port = self.deserialize(self.fmt, res)
-                self._delete('ports', port['port']['id'])
-
-    def test_create_port_extra_args(self):
-        address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                          'ip_address': '10.0.0.1',
-                          'icbb': 'agreed'}]
-        self._create_port_with_address_pairs(address_pairs, 400)
-
-    def _create_port_with_address_pairs(self, address_pairs, ret_code):
-        with self.network() as net:
-            res = self._create_port(self.fmt, net['network']['id'],
-                                    arg_list=(addr_pair.ADDRESS_PAIRS,),
-                                    allowed_address_pairs=address_pairs)
-            port = self.deserialize(self.fmt, res)
-            self.assertEqual(res.status_int, ret_code)
-            if ret_code == 201:
-                self._delete('ports', port['port']['id'])
-
-    def test_update_add_address_pairs(self):
-        with self.network() as net:
-            res = self._create_port(self.fmt, net['network']['id'])
-            port = self.deserialize(self.fmt, res)
-            address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                              'ip_address': '10.0.0.1'}]
-            update_port = {'port': {addr_pair.ADDRESS_PAIRS:
-                                    address_pairs}}
-            req = self.new_update_request('ports', update_port,
-                                          port['port']['id'])
-            port = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
-                             address_pairs)
-            self._delete('ports', port['port']['id'])
-
-    def test_create_address_gets_port_mac(self):
-        with self.network() as net:
-            address_pairs = [{'ip_address': '23.23.23.23'}]
-            res = self._create_port(self.fmt, net['network']['id'],
-                                    arg_list=('port_security_enabled',
-                                              addr_pair.ADDRESS_PAIRS,),
-                                    allowed_address_pairs=address_pairs)
-            port = self.deserialize(self.fmt, res)['port']
-            port_addr_mac = port[addr_pair.ADDRESS_PAIRS][0]['mac_address']
-            self.assertEqual(port_addr_mac,
-                             port['mac_address'])
-            self._delete('ports', port['id'])
-
-    def test_update_port_security_off_address_pairs(self):
-        if self._skip_port_security:
-            self.skipTest("Plugin does not implement port-security extension")
-        with self.network() as net:
-            with self.subnet(network=net) as subnet:
-                address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                                  'ip_address': '10.0.0.1'}]
-                # The port should not have any security-groups associated to it
-                with self.port(subnet=subnet,
-                               arg_list=(psec.PORTSECURITY,
-                                         addr_pair.ADDRESS_PAIRS,
-                                         secgroup.SECURITYGROUPS),
-                               port_security_enabled=True,
-                               allowed_address_pairs=address_pairs,
-                               security_groups=[]) as port:
-
-                    update_port = {'port': {psec.PORTSECURITY: False}}
-                    req = self.new_update_request('ports', update_port,
-                                                  port['port']['id'])
-                    res = req.get_response(self.api)
-                    self.assertEqual(409, res.status_int)
-
-    def test_update_with_none_and_own_mac_for_duplicate_ip(self):
-        with self.network() as net:
-            res = self._create_port(self.fmt, net['network']['id'])
-            port = self.deserialize(self.fmt, res)
-            mac_address = port['port']['mac_address']
-            address_pairs = [{'ip_address': '10.0.0.1'},
-                             {'mac_address': mac_address,
-                              'ip_address': '10.0.0.1'}]
-            update_port = {'port': {addr_pair.ADDRESS_PAIRS:
-                                    address_pairs}}
-            req = self.new_update_request('ports', update_port,
-                                          port['port']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(400, res.status_int)
-
-    def test_create_port_remove_allowed_address_pairs(self):
-        with self.network() as net:
-            address_pairs = [{'mac_address': '00:00:00:00:00:01',
-                              'ip_address': '10.0.0.1'}]
-            res = self._create_port(self.fmt, net['network']['id'],
-                                    arg_list=(addr_pair.ADDRESS_PAIRS,),
-                                    allowed_address_pairs=address_pairs)
-            port = self.deserialize(self.fmt, res)
-            update_port = {'port': {addr_pair.ADDRESS_PAIRS: []}}
-            req = self.new_update_request('ports', update_port,
-                                          port['port']['id'])
-            port = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual([], port['port'][addr_pair.ADDRESS_PAIRS])
-            self._delete('ports', port['port']['id'])
diff --git a/neutron/tests/unit/db/test_api.py b/neutron/tests/unit/db/test_api.py
deleted file mode 100644 (file)
index 5b576a4..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from oslo_db import exception as db_exc
-import testtools
-
-from neutron.db import api as db_api
-from neutron.tests import base
-
-
-class TestExceptionToRetryContextManager(base.BaseTestCase):
-
-    def test_translates_single_exception(self):
-        with testtools.ExpectedException(db_exc.RetryRequest):
-            with db_api.exc_to_retry(ValueError):
-                raise ValueError()
-
-    def test_translates_multiple_exception_types(self):
-        with testtools.ExpectedException(db_exc.RetryRequest):
-            with db_api.exc_to_retry((ValueError, TypeError)):
-                raise TypeError()
-
-    def test_passes_other_exceptions(self):
-        with testtools.ExpectedException(ValueError):
-            with db_api.exc_to_retry(TypeError):
-                raise ValueError()
-
-    def test_inner_exception_preserved_in_retryrequest(self):
-        try:
-            exc = ValueError('test')
-            with db_api.exc_to_retry(ValueError):
-                raise exc
-        except db_exc.RetryRequest as e:
-            self.assertEqual(exc, e.inner_exc)
diff --git a/neutron/tests/unit/db/test_db_base_plugin_common.py b/neutron/tests/unit/db/test_db_base_plugin_common.py
deleted file mode 100644 (file)
index 2186652..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.db import db_base_plugin_common
-from neutron.tests import base
-
-
-class DummyObject(object):
-    def __init__(self, **kwargs):
-        self.kwargs = kwargs
-
-    def to_dict(self):
-        return self.kwargs
-
-
-class ConvertToDictTestCase(base.BaseTestCase):
-
-    @db_base_plugin_common.convert_result_to_dict
-    def method_dict(self, fields=None):
-        return DummyObject(one=1, two=2, three=3)
-
-    @db_base_plugin_common.convert_result_to_dict
-    def method_list(self):
-        return [DummyObject(one=1, two=2, three=3)] * 3
-
-    def test_simple_object(self):
-        expected = {'one': 1, 'two': 2, 'three': 3}
-        observed = self.method_dict()
-        self.assertEqual(expected, observed)
-
-    def test_list_of_objects(self):
-        expected = [{'one': 1, 'two': 2, 'three': 3}] * 3
-        observed = self.method_list()
-        self.assertEqual(expected, observed)
-
-
-class FilterFieldsTestCase(base.BaseTestCase):
-
-    @db_base_plugin_common.filter_fields
-    def method_dict(self, fields=None):
-        return {'one': 1, 'two': 2, 'three': 3}
-
-    @db_base_plugin_common.filter_fields
-    def method_list(self, fields=None):
-        return [self.method_dict() for _ in range(3)]
-
-    @db_base_plugin_common.filter_fields
-    def method_multiple_arguments(self, not_used, fields=None,
-                                  also_not_used=None):
-        return {'one': 1, 'two': 2, 'three': 3}
-
-    def test_no_fields(self):
-        expected = {'one': 1, 'two': 2, 'three': 3}
-        observed = self.method_dict()
-        self.assertEqual(expected, observed)
-
-    def test_dict(self):
-        expected = {'two': 2}
-        observed = self.method_dict(['two'])
-        self.assertEqual(expected, observed)
-
-    def test_list(self):
-        expected = [{'two': 2}, {'two': 2}, {'two': 2}]
-        observed = self.method_list(['two'])
-        self.assertEqual(expected, observed)
-
-    def test_multiple_arguments_positional(self):
-        expected = {'two': 2}
-        observed = self.method_multiple_arguments(list(), ['two'])
-        self.assertEqual(expected, observed)
-
-    def test_multiple_arguments_positional_and_keywords(self):
-        expected = {'two': 2}
-        observed = self.method_multiple_arguments(fields=['two'],
-                                                  not_used=None)
-        self.assertEqual(expected, observed)
-
-    def test_multiple_arguments_keyword(self):
-        expected = {'two': 2}
-        observed = self.method_multiple_arguments(list(), fields=['two'])
-        self.assertEqual(expected, observed)
diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py
deleted file mode 100644 (file)
index 3fea278..0000000
+++ /dev/null
@@ -1,5945 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import copy
-import itertools
-
-import mock
-import netaddr
-from oslo_config import cfg
-from oslo_utils import importutils
-import six
-from sqlalchemy import event
-from sqlalchemy import orm
-import testtools
-from testtools import matchers
-import webob.exc
-
-import neutron
-from neutron.api import api_common
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.api.v2 import router
-from neutron.callbacks import exceptions
-from neutron.callbacks import registry
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.common import test_lib
-from neutron.common import utils
-from neutron import context
-from neutron.db import api as db_api
-from neutron.db import db_base_plugin_common
-from neutron.db import ipam_non_pluggable_backend as non_ipam
-from neutron.db import l3_db
-from neutron.db import models_v2
-from neutron.db import securitygroups_db as sgdb
-from neutron import manager
-from neutron.tests import base
-from neutron.tests import tools
-from neutron.tests.unit.api import test_extensions
-from neutron.tests.unit import testlib_api
-
-DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
-
-DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
-DEVICE_OWNER_NOT_COMPUTE = constants.DEVICE_OWNER_DHCP
-
-
-def optional_ctx(obj, fallback):
-    if not obj:
-        return fallback()
-
-    @contextlib.contextmanager
-    def context_wrapper():
-        yield obj
-    return context_wrapper()
-
-
-def _fake_get_pagination_helper(self, request):
-    return api_common.PaginationEmulatedHelper(request, self._primary_key)
-
-
-def _fake_get_sorting_helper(self, request):
-    return api_common.SortingEmulatedHelper(request, self._attr_info)
-
-
-# TODO(banix): Move the following method to ML2 db test module when ML2
-# mechanism driver unit tests are corrected to use Ml2PluginV2TestCase
-# instead of directly using NeutronDbPluginV2TestCase
-def _get_create_db_method(resource):
-    ml2_method = '_create_%s_db' % resource
-    if hasattr(manager.NeutronManager.get_plugin(), ml2_method):
-        return ml2_method
-    else:
-        return 'create_%s' % resource
-
-
-class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
-    fmt = 'json'
-    resource_prefix_map = {}
-
-    def setUp(self, plugin=None, service_plugins=None,
-              ext_mgr=None):
-
-        super(NeutronDbPluginV2TestCase, self).setUp()
-        cfg.CONF.set_override('notify_nova_on_port_status_changes', False)
-        cfg.CONF.set_override('allow_overlapping_ips', True)
-        # Make sure at each test according extensions for the plugin is loaded
-        extensions.PluginAwareExtensionManager._instance = None
-        # Save the attributes map in case the plugin will alter it
-        # loading extensions
-        self.useFixture(tools.AttributeMapMemento())
-        self._tenant_id = 'test-tenant'
-
-        if not plugin:
-            plugin = DB_PLUGIN_KLASS
-
-        # Update the plugin
-        self.setup_coreplugin(plugin)
-        cfg.CONF.set_override(
-            'service_plugins',
-            [test_lib.test_config.get(key, default)
-             for key, default in six.iteritems(service_plugins or {})]
-        )
-
-        cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab")
-        cfg.CONF.set_override('max_dns_nameservers', 2)
-        cfg.CONF.set_override('max_subnet_host_routes', 2)
-        cfg.CONF.set_override('allow_pagination', True)
-        cfg.CONF.set_override('allow_sorting', True)
-        self.api = router.APIRouter()
-        # Set the default status
-        self.net_create_status = 'ACTIVE'
-        self.port_create_status = 'ACTIVE'
-
-        def _is_native_bulk_supported():
-            plugin_obj = manager.NeutronManager.get_plugin()
-            native_bulk_attr_name = ("_%s__native_bulk_support"
-                                     % plugin_obj.__class__.__name__)
-            return getattr(plugin_obj, native_bulk_attr_name, False)
-
-        self._skip_native_bulk = not _is_native_bulk_supported()
-
-        def _is_native_pagination_support():
-            native_pagination_attr_name = (
-                "_%s__native_pagination_support" %
-                manager.NeutronManager.get_plugin().__class__.__name__)
-            return (cfg.CONF.allow_pagination and
-                    getattr(manager.NeutronManager.get_plugin(),
-                            native_pagination_attr_name, False))
-
-        self._skip_native_pagination = not _is_native_pagination_support()
-
-        def _is_native_sorting_support():
-            native_sorting_attr_name = (
-                "_%s__native_sorting_support" %
-                manager.NeutronManager.get_plugin().__class__.__name__)
-            return (cfg.CONF.allow_sorting and
-                    getattr(manager.NeutronManager.get_plugin(),
-                            native_sorting_attr_name, False))
-
-        self.plugin = manager.NeutronManager.get_plugin()
-        self._skip_native_sorting = not _is_native_sorting_support()
-        if ext_mgr:
-            self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
-
-    def tearDown(self):
-        self.api = None
-        self._deserializers = None
-        self._skip_native_bulk = None
-        self._skip_native_pagination = None
-        self._skip_native_sortin = None
-        self.ext_api = None
-        super(NeutronDbPluginV2TestCase, self).tearDown()
-
-    def setup_config(self):
-        # Create the default configurations
-        args = ['--config-file', base.etcdir('neutron.conf')]
-        # If test_config specifies some config-file, use it, as well
-        for config_file in test_lib.test_config.get('config_files', []):
-            args.extend(['--config-file', config_file])
-        super(NeutronDbPluginV2TestCase, self).setup_config(args=args)
-
-    def _req(self, method, resource, data=None, fmt=None, id=None, params=None,
-             action=None, subresource=None, sub_id=None, context=None):
-        fmt = fmt or self.fmt
-
-        path = '/%s.%s' % (
-            '/'.join(p for p in
-                     (resource, id, subresource, sub_id, action) if p),
-            fmt
-        )
-
-        prefix = self.resource_prefix_map.get(resource)
-        if prefix:
-            path = prefix + path
-
-        content_type = 'application/%s' % fmt
-        body = None
-        if data is not None:  # empty dict is valid
-            body = self.serialize(data)
-        return testlib_api.create_request(path, body, content_type, method,
-                                          query_string=params, context=context)
-
-    def new_create_request(self, resource, data, fmt=None, id=None,
-                           subresource=None, context=None):
-        return self._req('POST', resource, data, fmt, id=id,
-                         subresource=subresource, context=context)
-
-    def new_list_request(self, resource, fmt=None, params=None,
-                         subresource=None):
-        return self._req(
-            'GET', resource, None, fmt, params=params, subresource=subresource
-        )
-
-    def new_show_request(self, resource, id, fmt=None,
-                         subresource=None, fields=None):
-        if fields:
-            params = "&".join(["fields=%s" % x for x in fields])
-        else:
-            params = None
-        return self._req('GET', resource, None, fmt, id=id,
-                         params=params, subresource=subresource)
-
-    def new_delete_request(self, resource, id, fmt=None, subresource=None,
-                           sub_id=None):
-        return self._req(
-            'DELETE',
-            resource,
-            None,
-            fmt,
-            id=id,
-            subresource=subresource,
-            sub_id=sub_id
-        )
-
-    def new_update_request(self, resource, data, id, fmt=None,
-                           subresource=None, context=None):
-        return self._req(
-            'PUT', resource, data, fmt, id=id, subresource=subresource,
-            context=context
-        )
-
-    def new_action_request(self, resource, data, id, action, fmt=None,
-                           subresource=None):
-        return self._req(
-            'PUT',
-            resource,
-            data,
-            fmt,
-            id=id,
-            action=action,
-            subresource=subresource
-        )
-
-    def deserialize(self, content_type, response):
-        ctype = 'application/%s' % content_type
-        data = self._deserializers[ctype].deserialize(response.body)['body']
-        return data
-
-    def _create_bulk_from_list(self, fmt, resource, objects, **kwargs):
-        """Creates a bulk request from a list of objects."""
-        collection = "%ss" % resource
-        req_data = {collection: objects}
-        req = self.new_create_request(collection, req_data, fmt)
-        if ('set_context' in kwargs and
-                kwargs['set_context'] is True and
-                'tenant_id' in kwargs):
-            # create a specific auth context for this request
-            req.environ['neutron.context'] = context.Context(
-                '', kwargs['tenant_id'])
-        elif 'context' in kwargs:
-            req.environ['neutron.context'] = kwargs['context']
-        return req.get_response(self.api)
-
-    def _create_bulk(self, fmt, number, resource, data, name='test', **kwargs):
-        """Creates a bulk request for any kind of resource."""
-        objects = []
-        collection = "%ss" % resource
-        for i in range(number):
-            obj = copy.deepcopy(data)
-            obj[resource]['name'] = "%s_%s" % (name, i)
-            if 'override' in kwargs and i in kwargs['override']:
-                obj[resource].update(kwargs['override'][i])
-            objects.append(obj)
-        req_data = {collection: objects}
-        req = self.new_create_request(collection, req_data, fmt)
-        if ('set_context' in kwargs and
-                kwargs['set_context'] is True and
-                'tenant_id' in kwargs):
-            # create a specific auth context for this request
-            req.environ['neutron.context'] = context.Context(
-                '', kwargs['tenant_id'])
-        elif 'context' in kwargs:
-            req.environ['neutron.context'] = kwargs['context']
-        return req.get_response(self.api)
-
-    def _create_network(self, fmt, name, admin_state_up,
-                        arg_list=None, **kwargs):
-        data = {'network': {'name': name,
-                            'admin_state_up': admin_state_up,
-                            'tenant_id': self._tenant_id}}
-        for arg in (('admin_state_up', 'tenant_id', 'shared',
-                     'vlan_transparent',
-                     'availability_zone_hints') + (arg_list or ())):
-            # Arg must be present
-            if arg in kwargs:
-                data['network'][arg] = kwargs[arg]
-        network_req = self.new_create_request('networks', data, fmt)
-        if (kwargs.get('set_context') and 'tenant_id' in kwargs):
-            # create a specific auth context for this request
-            network_req.environ['neutron.context'] = context.Context(
-                '', kwargs['tenant_id'])
-
-        return network_req.get_response(self.api)
-
-    def _create_network_bulk(self, fmt, number, name,
-                             admin_state_up, **kwargs):
-        base_data = {'network': {'admin_state_up': admin_state_up,
-                                 'tenant_id': self._tenant_id}}
-        return self._create_bulk(fmt, number, 'network', base_data, **kwargs)
-
-    def _create_subnet(self, fmt, net_id, cidr,
-                       expected_res_status=None, **kwargs):
-        data = {'subnet': {'network_id': net_id,
-                           'cidr': cidr,
-                           'ip_version': 4,
-                           'tenant_id': self._tenant_id}}
-        for arg in ('ip_version', 'tenant_id',
-                    'enable_dhcp', 'allocation_pools',
-                    'dns_nameservers', 'host_routes',
-                    'shared', 'ipv6_ra_mode', 'ipv6_address_mode'):
-            # Arg must be present and not null (but can be false)
-            if kwargs.get(arg) is not None:
-                data['subnet'][arg] = kwargs[arg]
-
-        if ('gateway_ip' in kwargs and
-            kwargs['gateway_ip'] is not attributes.ATTR_NOT_SPECIFIED):
-            data['subnet']['gateway_ip'] = kwargs['gateway_ip']
-
-        subnet_req = self.new_create_request('subnets', data, fmt)
-        if (kwargs.get('set_context') and 'tenant_id' in kwargs):
-            # create a specific auth context for this request
-            subnet_req.environ['neutron.context'] = context.Context(
-                '', kwargs['tenant_id'])
-
-        subnet_res = subnet_req.get_response(self.api)
-        if expected_res_status:
-            self.assertEqual(subnet_res.status_int, expected_res_status)
-        return subnet_res
-
-    def _create_subnet_bulk(self, fmt, number, net_id, name,
-                            ip_version=4, **kwargs):
-        base_data = {'subnet': {'network_id': net_id,
-                                'ip_version': ip_version,
-                                'tenant_id': self._tenant_id}}
-        # auto-generate cidrs as they should not overlap
-        overrides = dict((k, v)
-                         for (k, v) in zip(range(number),
-                                           [{'cidr': "10.0.%s.0/24" % num}
-                                            for num in range(number)]))
-        kwargs.update({'override': overrides})
-        return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs)
-
-    def _create_subnetpool(self, fmt, prefixes,
-                           expected_res_status=None, admin=False, **kwargs):
-        subnetpool = {'subnetpool': {'prefixes': prefixes}}
-        for k, v in kwargs.items():
-            subnetpool['subnetpool'][k] = str(v)
-
-        api = self._api_for_resource('subnetpools')
-        subnetpools_req = self.new_create_request('subnetpools',
-                                                  subnetpool, fmt)
-        if not admin:
-            neutron_context = context.Context('', kwargs['tenant_id'])
-            subnetpools_req.environ['neutron.context'] = neutron_context
-        subnetpool_res = subnetpools_req.get_response(api)
-        if expected_res_status:
-            self.assertEqual(subnetpool_res.status_int, expected_res_status)
-        return subnetpool_res
-
-    def _create_port(self, fmt, net_id, expected_res_status=None,
-                     arg_list=None, **kwargs):
-        data = {'port': {'network_id': net_id,
-                         'tenant_id': self._tenant_id}}
-
-        for arg in (('admin_state_up', 'device_id',
-                    'mac_address', 'name', 'fixed_ips',
-                    'tenant_id', 'device_owner', 'security_groups') +
-                    (arg_list or ())):
-            # Arg must be present
-            if arg in kwargs:
-                data['port'][arg] = kwargs[arg]
-        # create a dhcp port device id if one hasn't been supplied
-        if ('device_owner' in kwargs and
-            kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and
-            'host' in kwargs and
-            'device_id' not in kwargs):
-            device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
-            data['port']['device_id'] = device_id
-        port_req = self.new_create_request('ports', data, fmt)
-        if (kwargs.get('set_context') and 'tenant_id' in kwargs):
-            # create a specific auth context for this request
-            port_req.environ['neutron.context'] = context.Context(
-                '', kwargs['tenant_id'])
-
-        port_res = port_req.get_response(self.api)
-        if expected_res_status:
-            self.assertEqual(port_res.status_int, expected_res_status)
-        return port_res
-
-    def _list_ports(self, fmt, expected_res_status=None,
-                    net_id=None, **kwargs):
-        query_params = []
-        if net_id:
-            query_params.append("network_id=%s" % net_id)
-        if kwargs.get('device_owner'):
-            query_params.append("device_owner=%s" % kwargs.get('device_owner'))
-        port_req = self.new_list_request('ports', fmt, '&'.join(query_params))
-        if ('set_context' in kwargs and
-                kwargs['set_context'] is True and
-                'tenant_id' in kwargs):
-            # create a specific auth context for this request
-            port_req.environ['neutron.context'] = context.Context(
-                '', kwargs['tenant_id'])
-
-        port_res = port_req.get_response(self.api)
-        if expected_res_status:
-            self.assertEqual(port_res.status_int, expected_res_status)
-        return port_res
-
-    def _create_port_bulk(self, fmt, number, net_id, name,
-                          admin_state_up, **kwargs):
-        base_data = {'port': {'network_id': net_id,
-                              'admin_state_up': admin_state_up,
-                              'tenant_id': self._tenant_id}}
-        return self._create_bulk(fmt, number, 'port', base_data, **kwargs)
-
-    def _make_network(self, fmt, name, admin_state_up, **kwargs):
-        res = self._create_network(fmt, name, admin_state_up, **kwargs)
-        # TODO(salvatore-orlando): do exception handling in this test module
-        # in a uniform way (we do it differently for ports, subnets, and nets
-        # Things can go wrong - raise HTTP exc with res code only
-        # so it can be caught by unit tests
-        if res.status_int >= webob.exc.HTTPClientError.code:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        return self.deserialize(fmt, res)
-
-    def _make_subnet(self, fmt, network, gateway, cidr,
-                     allocation_pools=None, ip_version=4, enable_dhcp=True,
-                     dns_nameservers=None, host_routes=None, shared=None,
-                     ipv6_ra_mode=None, ipv6_address_mode=None,
-                     tenant_id=None, set_context=False):
-        res = self._create_subnet(fmt,
-                                  net_id=network['network']['id'],
-                                  cidr=cidr,
-                                  gateway_ip=gateway,
-                                  tenant_id=(tenant_id or
-                                             network['network']['tenant_id']),
-                                  allocation_pools=allocation_pools,
-                                  ip_version=ip_version,
-                                  enable_dhcp=enable_dhcp,
-                                  dns_nameservers=dns_nameservers,
-                                  host_routes=host_routes,
-                                  shared=shared,
-                                  ipv6_ra_mode=ipv6_ra_mode,
-                                  ipv6_address_mode=ipv6_address_mode,
-                                  set_context=set_context)
-        # Things can go wrong - raise HTTP exc with res code only
-        # so it can be caught by unit tests
-        if res.status_int >= webob.exc.HTTPClientError.code:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        return self.deserialize(fmt, res)
-
-    def _make_subnetpool(self, fmt, prefixes, admin=False, **kwargs):
-        res = self._create_subnetpool(fmt,
-                                      prefixes,
-                                      None,
-                                      admin,
-                                      **kwargs)
-        # Things can go wrong - raise HTTP exc with res code only
-        # so it can be caught by unit tests
-        if res.status_int >= webob.exc.HTTPClientError.code:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        return self.deserialize(fmt, res)
-
-    def _make_port(self, fmt, net_id, expected_res_status=None, **kwargs):
-        res = self._create_port(fmt, net_id, expected_res_status, **kwargs)
-        # Things can go wrong - raise HTTP exc with res code only
-        # so it can be caught by unit tests
-        if res.status_int >= webob.exc.HTTPClientError.code:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        return self.deserialize(fmt, res)
-
-    def _api_for_resource(self, resource):
-        if resource in ['networks', 'subnets', 'ports', 'subnetpools']:
-            return self.api
-        else:
-            return self.ext_api
-
-    def _delete(self, collection, id,
-                expected_code=webob.exc.HTTPNoContent.code,
-                neutron_context=None):
-        req = self.new_delete_request(collection, id)
-        if neutron_context:
-            # create a specific auth context for this request
-            req.environ['neutron.context'] = neutron_context
-        res = req.get_response(self._api_for_resource(collection))
-        self.assertEqual(res.status_int, expected_code)
-
-    def _show_response(self, resource, id, neutron_context=None):
-        req = self.new_show_request(resource, id)
-        if neutron_context:
-            # create a specific auth context for this request
-            req.environ['neutron.context'] = neutron_context
-        return req.get_response(self._api_for_resource(resource))
-
-    def _show(self, resource, id,
-              expected_code=webob.exc.HTTPOk.code,
-              neutron_context=None):
-        res = self._show_response(resource, id,
-                                  neutron_context=neutron_context)
-        self.assertEqual(expected_code, res.status_int)
-        return self.deserialize(self.fmt, res)
-
-    def _update(self, resource, id, new_data,
-                expected_code=webob.exc.HTTPOk.code,
-                neutron_context=None):
-        req = self.new_update_request(resource, new_data, id)
-        if neutron_context:
-            # create a specific auth context for this request
-            req.environ['neutron.context'] = neutron_context
-        res = req.get_response(self._api_for_resource(resource))
-        self.assertEqual(res.status_int, expected_code)
-        return self.deserialize(self.fmt, res)
-
-    def _list(self, resource, fmt=None, neutron_context=None,
-              query_params=None):
-        fmt = fmt or self.fmt
-        req = self.new_list_request(resource, fmt, query_params)
-        if neutron_context:
-            req.environ['neutron.context'] = neutron_context
-        res = req.get_response(self._api_for_resource(resource))
-        self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
-        return self.deserialize(fmt, res)
-
-    def _fail_second_call(self, patched_plugin, orig, *args, **kwargs):
-        """Invoked by test cases for injecting failures in plugin."""
-        def second_call(*args, **kwargs):
-            raise n_exc.NeutronException()
-        patched_plugin.side_effect = second_call
-        return orig(*args, **kwargs)
-
-    def _validate_behavior_on_bulk_failure(
-            self, res, collection,
-            errcode=webob.exc.HTTPClientError.code):
-        self.assertEqual(res.status_int, errcode)
-        req = self.new_list_request(collection)
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
-        items = self.deserialize(self.fmt, res)
-        self.assertEqual(len(items[collection]), 0)
-
-    def _validate_behavior_on_bulk_success(self, res, collection,
-                                           names=['test_0', 'test_1']):
-        self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
-        items = self.deserialize(self.fmt, res)[collection]
-        self.assertEqual(len(items), 2)
-        self.assertEqual(items[0]['name'], 'test_0')
-        self.assertEqual(items[1]['name'], 'test_1')
-
-    def _test_list_resources(self, resource, items, neutron_context=None,
-                             query_params=None):
-        res = self._list('%ss' % resource,
-                         neutron_context=neutron_context,
-                         query_params=query_params)
-        resource = resource.replace('-', '_')
-        self.assertItemsEqual([i['id'] for i in res['%ss' % resource]],
-                              [i[resource]['id'] for i in items])
-
-    @contextlib.contextmanager
-    def network(self, name='net1',
-                admin_state_up=True,
-                fmt=None,
-                **kwargs):
-        network = self._make_network(fmt or self.fmt, name,
-                                     admin_state_up, **kwargs)
-        yield network
-
-    @contextlib.contextmanager
-    def subnet(self, network=None,
-               gateway_ip=attributes.ATTR_NOT_SPECIFIED,
-               cidr='10.0.0.0/24',
-               fmt=None,
-               ip_version=4,
-               allocation_pools=None,
-               enable_dhcp=True,
-               dns_nameservers=None,
-               host_routes=None,
-               shared=None,
-               ipv6_ra_mode=None,
-               ipv6_address_mode=None,
-               tenant_id=None,
-               set_context=False):
-        with optional_ctx(network, self.network) as network_to_use:
-            subnet = self._make_subnet(fmt or self.fmt,
-                                       network_to_use,
-                                       gateway_ip,
-                                       cidr,
-                                       allocation_pools,
-                                       ip_version,
-                                       enable_dhcp,
-                                       dns_nameservers,
-                                       host_routes,
-                                       shared=shared,
-                                       ipv6_ra_mode=ipv6_ra_mode,
-                                       ipv6_address_mode=ipv6_address_mode,
-                                       tenant_id=tenant_id,
-                                       set_context=set_context)
-            yield subnet
-
-    @contextlib.contextmanager
-    def subnetpool(self, prefixes, admin=False, **kwargs):
-        subnetpool = self._make_subnetpool(self.fmt,
-                                           prefixes,
-                                           admin,
-                                           **kwargs)
-        yield subnetpool
-
-    @contextlib.contextmanager
-    def port(self, subnet=None, fmt=None, **kwargs):
-        with optional_ctx(subnet, self.subnet) as subnet_to_use:
-            net_id = subnet_to_use['subnet']['network_id']
-            port = self._make_port(fmt or self.fmt, net_id, **kwargs)
-            yield port
-
-    def _test_list_with_sort(self, resource,
-                             items, sorts, resources=None, query_params=''):
-        query_str = query_params
-        for key, direction in sorts:
-            query_str = query_str + "&sort_key=%s&sort_dir=%s" % (key,
-                                                                  direction)
-        if not resources:
-            resources = '%ss' % resource
-        req = self.new_list_request(resources,
-                                    params=query_str)
-        api = self._api_for_resource(resources)
-        res = self.deserialize(self.fmt, req.get_response(api))
-        resource = resource.replace('-', '_')
-        resources = resources.replace('-', '_')
-        expected_res = [item[resource]['id'] for item in items]
-        self.assertEqual(expected_res, [n['id'] for n in res[resources]])
-
-    def _test_list_with_pagination(self, resource, items, sort,
-                                   limit, expected_page_num,
-                                   resources=None,
-                                   query_params='',
-                                   verify_key='id'):
-        if not resources:
-            resources = '%ss' % resource
-        query_str = query_params + '&' if query_params else ''
-        query_str = query_str + ("limit=%s&sort_key=%s&"
-                                 "sort_dir=%s") % (limit, sort[0], sort[1])
-        req = self.new_list_request(resources, params=query_str)
-        items_res = []
-        page_num = 0
-        api = self._api_for_resource(resources)
-        resource = resource.replace('-', '_')
-        resources = resources.replace('-', '_')
-        while req:
-            page_num = page_num + 1
-            res = self.deserialize(self.fmt, req.get_response(api))
-            self.assertThat(len(res[resources]),
-                            matchers.LessThan(limit + 1))
-            items_res = items_res + res[resources]
-            req = None
-            if '%s_links' % resources in res:
-                for link in res['%s_links' % resources]:
-                    if link['rel'] == 'next':
-                        content_type = 'application/%s' % self.fmt
-                        req = testlib_api.create_request(link['href'],
-                                                         '', content_type)
-                        self.assertEqual(len(res[resources]),
-                                         limit)
-        self.assertEqual(expected_page_num, page_num)
-        self.assertEqual([item[resource][verify_key] for item in items],
-                         [n[verify_key] for n in items_res])
-
-    def _test_list_with_pagination_reverse(self, resource, items, sort,
-                                           limit, expected_page_num,
-                                           resources=None,
-                                           query_params=''):
-        if not resources:
-            resources = '%ss' % resource
-        resource = resource.replace('-', '_')
-        api = self._api_for_resource(resources)
-        marker = items[-1][resource]['id']
-        query_str = query_params + '&' if query_params else ''
-        query_str = query_str + ("limit=%s&page_reverse=True&"
-                                 "sort_key=%s&sort_dir=%s&"
-                                 "marker=%s") % (limit, sort[0], sort[1],
-                                                 marker)
-        req = self.new_list_request(resources, params=query_str)
-        item_res = [items[-1][resource]]
-        page_num = 0
-        resources = resources.replace('-', '_')
-        while req:
-            page_num = page_num + 1
-            res = self.deserialize(self.fmt, req.get_response(api))
-            self.assertThat(len(res[resources]),
-                            matchers.LessThan(limit + 1))
-            res[resources].reverse()
-            item_res = item_res + res[resources]
-            req = None
-            if '%s_links' % resources in res:
-                for link in res['%s_links' % resources]:
-                    if link['rel'] == 'previous':
-                        content_type = 'application/%s' % self.fmt
-                        req = testlib_api.create_request(link['href'],
-                                                         '', content_type)
-                        self.assertEqual(len(res[resources]),
-                                         limit)
-        self.assertEqual(expected_page_num, page_num)
-        expected_res = [item[resource]['id'] for item in items]
-        expected_res.reverse()
-        self.assertEqual(expected_res, [n['id'] for n in item_res])
-
-    def _compare_resource(self, observed_res, expected_res, res_name):
-        '''
-           Compare the observed and expected resources (ie compare subnets)
-        '''
-        for k in expected_res:
-            self.assertIn(k, observed_res[res_name])
-            if isinstance(expected_res[k], list):
-                self.assertEqual(sorted(observed_res[res_name][k]),
-                                 sorted(expected_res[k]))
-            else:
-                self.assertEqual(observed_res[res_name][k], expected_res[k])
-
-    def _validate_resource(self, resource, keys, res_name):
-        for k in keys:
-            self.assertIn(k, resource[res_name])
-            if isinstance(keys[k], list):
-                self.assertEqual(
-                     sorted(resource[res_name][k], key=utils.safe_sort_key),
-                     sorted(keys[k], key=utils.safe_sort_key))
-            else:
-                self.assertEqual(resource[res_name][k], keys[k])
-
-
-class TestBasicGet(NeutronDbPluginV2TestCase):
-
-    def test_single_get_admin(self):
-        plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2()
-        with self.network() as network:
-            net_id = network['network']['id']
-            ctx = context.get_admin_context()
-            n = plugin._get_network(ctx, net_id)
-            self.assertEqual(net_id, n.id)
-
-    def test_single_get_tenant(self):
-        plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2()
-        with self.network() as network:
-            net_id = network['network']['id']
-            ctx = context.get_admin_context()
-            n = plugin._get_network(ctx, net_id)
-            self.assertEqual(net_id, n.id)
-
-
-class TestV2HTTPResponse(NeutronDbPluginV2TestCase):
-    def test_create_returns_201(self):
-        res = self._create_network(self.fmt, 'net2', True)
-        self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
-
-    def test_list_returns_200(self):
-        req = self.new_list_request('networks')
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
-
-    def _check_list_with_fields(self, res, field_name):
-        self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
-        body = self.deserialize(self.fmt, res)
-        # further checks: 1 networks
-        self.assertEqual(len(body['networks']), 1)
-        # 1 field in the network record
-        self.assertEqual(len(body['networks'][0]), 1)
-        # field is 'name'
-        self.assertIn(field_name, body['networks'][0])
-
-    def test_list_with_fields(self):
-        self._create_network(self.fmt, 'some_net', True)
-        req = self.new_list_request('networks', params="fields=name")
-        res = req.get_response(self.api)
-        self._check_list_with_fields(res, 'name')
-
-    def test_list_with_fields_noadmin(self):
-        tenant_id = 'some_tenant'
-        self._create_network(self.fmt,
-                             'some_net',
-                             True,
-                             tenant_id=tenant_id,
-                             set_context=True)
-        req = self.new_list_request('networks', params="fields=name")
-        req.environ['neutron.context'] = context.Context('', tenant_id)
-        res = req.get_response(self.api)
-        self._check_list_with_fields(res, 'name')
-
-    def test_list_with_fields_noadmin_and_policy_field(self):
-        """If a field used by policy is selected, do not duplicate it.
-
-        Verifies that if the field parameter explicitly specifies a field
-        which is used by the policy engine, then it is not duplicated
-        in the response.
-
-        """
-        tenant_id = 'some_tenant'
-        self._create_network(self.fmt,
-                             'some_net',
-                             True,
-                             tenant_id=tenant_id,
-                             set_context=True)
-        req = self.new_list_request('networks', params="fields=tenant_id")
-        req.environ['neutron.context'] = context.Context('', tenant_id)
-        res = req.get_response(self.api)
-        self._check_list_with_fields(res, 'tenant_id')
-
-    def test_show_returns_200(self):
-        with self.network() as net:
-            req = self.new_show_request('networks', net['network']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
-
-    def test_delete_returns_204(self):
-        res = self._create_network(self.fmt, 'net1', True)
-        net = self.deserialize(self.fmt, res)
-        req = self.new_delete_request('networks', net['network']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-
-    def test_update_returns_200(self):
-        with self.network() as net:
-            req = self.new_update_request('networks',
-                                          {'network': {'name': 'steve'}},
-                                          net['network']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
-
-    def test_update_invalid_json_400(self):
-        with self.network() as net:
-            req = self.new_update_request('networks',
-                                          '{{"name": "aaa"}}',
-                                          net['network']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_bad_route_404(self):
-        req = self.new_list_request('doohickeys')
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
-
-
-class TestPortsV2(NeutronDbPluginV2TestCase):
-    def test_create_port_json(self):
-        keys = [('admin_state_up', True), ('status', self.port_create_status)]
-        with self.port(name='myname') as port:
-            for k, v in keys:
-                self.assertEqual(port['port'][k], v)
-            self.assertIn('mac_address', port['port'])
-            ips = port['port']['fixed_ips']
-            self.assertEqual(len(ips), 1)
-            self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
-            self.assertEqual('myname', port['port']['name'])
-
-    def test_create_port_as_admin(self):
-        with self.network() as network:
-            self._create_port(self.fmt,
-                              network['network']['id'],
-                              webob.exc.HTTPCreated.code,
-                              tenant_id='bad_tenant_id',
-                              device_id='fake_device',
-                              device_owner='fake_owner',
-                              fixed_ips=[],
-                              set_context=False)
-
-    def test_create_port_bad_tenant(self):
-        with self.network() as network:
-            self._create_port(self.fmt,
-                              network['network']['id'],
-                              webob.exc.HTTPNotFound.code,
-                              tenant_id='bad_tenant_id',
-                              device_id='fake_device',
-                              device_owner='fake_owner',
-                              fixed_ips=[],
-                              set_context=True)
-
-    def test_create_port_public_network(self):
-        keys = [('admin_state_up', True), ('status', self.port_create_status)]
-        with self.network(shared=True) as network:
-            port_res = self._create_port(self.fmt,
-                                         network['network']['id'],
-                                         webob.exc.HTTPCreated.code,
-                                         tenant_id='another_tenant',
-                                         set_context=True)
-            port = self.deserialize(self.fmt, port_res)
-            for k, v in keys:
-                self.assertEqual(port['port'][k], v)
-            self.assertIn('mac_address', port['port'])
-            self._delete('ports', port['port']['id'])
-
-    def test_create_port_public_network_with_ip(self):
-        with self.network(shared=True) as network:
-            with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
-                keys = [('admin_state_up', True),
-                        ('status', self.port_create_status),
-                        ('fixed_ips', [{'subnet_id': subnet['subnet']['id'],
-                                        'ip_address': '10.0.0.2'}])]
-                port_res = self._create_port(self.fmt,
-                                             network['network']['id'],
-                                             webob.exc.HTTPCreated.code,
-                                             tenant_id='another_tenant',
-                                             set_context=True)
-                port = self.deserialize(self.fmt, port_res)
-                for k, v in keys:
-                    self.assertEqual(port['port'][k], v)
-                self.assertIn('mac_address', port['port'])
-                self._delete('ports', port['port']['id'])
-
-    def test_create_port_anticipating_allocation(self):
-        with self.network(shared=True) as network:
-            with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
-                fixed_ips = [{'subnet_id': subnet['subnet']['id']},
-                             {'subnet_id': subnet['subnet']['id'],
-                              'ip_address': '10.0.0.2'}]
-                self._create_port(self.fmt, network['network']['id'],
-                                  webob.exc.HTTPCreated.code,
-                                  fixed_ips=fixed_ips)
-
-    def test_create_port_public_network_with_invalid_ip_no_subnet_id(self,
-            expected_error='InvalidIpForNetwork'):
-        with self.network(shared=True) as network:
-            with self.subnet(network=network, cidr='10.0.0.0/24'):
-                ips = [{'ip_address': '1.1.1.1'}]
-                res = self._create_port(self.fmt,
-                                        network['network']['id'],
-                                        webob.exc.HTTPBadRequest.code,
-                                        fixed_ips=ips,
-                                        set_context=True)
-                data = self.deserialize(self.fmt, res)
-                msg = str(n_exc.InvalidIpForNetwork(ip_address='1.1.1.1'))
-                self.assertEqual(expected_error, data['NeutronError']['type'])
-                self.assertEqual(msg, data['NeutronError']['message'])
-
-    def test_create_port_public_network_with_invalid_ip_and_subnet_id(self,
-            expected_error='InvalidIpForSubnet'):
-        with self.network(shared=True) as network:
-            with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
-                ips = [{'subnet_id': subnet['subnet']['id'],
-                        'ip_address': '1.1.1.1'}]
-                res = self._create_port(self.fmt,
-                                        network['network']['id'],
-                                        webob.exc.HTTPBadRequest.code,
-                                        fixed_ips=ips,
-                                        set_context=True)
-                data = self.deserialize(self.fmt, res)
-                msg = str(n_exc.InvalidIpForSubnet(ip_address='1.1.1.1'))
-                self.assertEqual(expected_error, data['NeutronError']['type'])
-                self.assertEqual(msg, data['NeutronError']['message'])
-
-    def test_create_port_with_too_many_fixed_ips(self):
-        with self.network() as network:
-            with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
-                fixed_ips = [{'subnet_id': subnet['subnet']['id'],
-                              'ip_address': '10.0.0.%s' % id}
-                             for id in range(3,
-                                 cfg.CONF.max_fixed_ips_per_port + 4)]
-                res = self._create_port(self.fmt,
-                                        network['network']['id'],
-                                        webob.exc.HTTPBadRequest.code,
-                                        fixed_ips=fixed_ips,
-                                        set_context=True)
-                data = self.deserialize(self.fmt, res)
-                expected_error = 'InvalidInput'
-                self.assertEqual(expected_error,
-                                 data['NeutronError']['type'])
-
-    def test_create_ports_bulk_native(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk port create")
-        with self.network() as net:
-            res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
-                                         'test', True)
-            self._validate_behavior_on_bulk_success(res, 'ports')
-            for p in self.deserialize(self.fmt, res)['ports']:
-                self._delete('ports', p['id'])
-
-    def test_create_ports_bulk_emulated(self):
-        real_has_attr = hasattr
-
-        #ensures the API choose the emulation code path
-        def fakehasattr(item, attr):
-            if attr.endswith('__native_bulk_support'):
-                return False
-            return real_has_attr(item, attr)
-
-        with mock.patch('six.moves.builtins.hasattr',
-                        new=fakehasattr):
-            with self.network() as net:
-                res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
-                                             'test', True)
-                self._validate_behavior_on_bulk_success(res, 'ports')
-                for p in self.deserialize(self.fmt, res)['ports']:
-                    self._delete('ports', p['id'])
-
-    def test_create_ports_bulk_wrong_input(self):
-        with self.network() as net:
-            overrides = {1: {'admin_state_up': 'doh'}}
-            res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
-                                         'test', True,
-                                         override=overrides)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-            req = self.new_list_request('ports')
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
-            ports = self.deserialize(self.fmt, res)
-            self.assertEqual(len(ports['ports']), 0)
-
-    def test_create_ports_bulk_emulated_plugin_failure(self):
-        real_has_attr = hasattr
-
-        #ensures the API choose the emulation code path
-        def fakehasattr(item, attr):
-            if attr.endswith('__native_bulk_support'):
-                return False
-            return real_has_attr(item, attr)
-
-        with mock.patch('six.moves.builtins.hasattr',
-                        new=fakehasattr):
-            orig = manager.NeutronManager.get_plugin().create_port
-            method_to_patch = _get_create_db_method('port')
-            with mock.patch.object(manager.NeutronManager.get_plugin(),
-                                   method_to_patch) as patched_plugin:
-
-                def side_effect(*args, **kwargs):
-                    return self._fail_second_call(patched_plugin, orig,
-                                                  *args, **kwargs)
-
-                patched_plugin.side_effect = side_effect
-                with self.network() as net:
-                    res = self._create_port_bulk(self.fmt, 2,
-                                                 net['network']['id'],
-                                                 'test',
-                                                 True)
-                    # We expect a 500 as we injected a fault in the plugin
-                    self._validate_behavior_on_bulk_failure(
-                        res, 'ports', webob.exc.HTTPServerError.code
-                    )
-
-    def test_create_ports_bulk_native_plugin_failure(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk port create")
-        ctx = context.get_admin_context()
-        with self.network() as net:
-            plugin = manager.NeutronManager.get_plugin()
-            orig = plugin.create_port
-            method_to_patch = _get_create_db_method('port')
-            with mock.patch.object(plugin, method_to_patch) as patched_plugin:
-
-                def side_effect(*args, **kwargs):
-                    return self._fail_second_call(patched_plugin, orig,
-                                                  *args, **kwargs)
-
-                patched_plugin.side_effect = side_effect
-                res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
-                                             'test', True, context=ctx)
-                # We expect a 500 as we injected a fault in the plugin
-                self._validate_behavior_on_bulk_failure(
-                    res, 'ports', webob.exc.HTTPServerError.code)
-
-    def test_list_ports(self):
-        # for this test we need to enable overlapping ips
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        with self.port() as v1, self.port() as v2, self.port() as v3:
-            ports = (v1, v2, v3)
-            self._test_list_resources('port', ports)
-
-    def test_list_ports_filtered_by_fixed_ip(self):
-        # for this test we need to enable overlapping ips
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        with self.port() as port1, self.port():
-            fixed_ips = port1['port']['fixed_ips'][0]
-            query_params = """
-fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
-""".strip() % (fixed_ips['ip_address'],
-               '192.168.126.5',
-               fixed_ips['subnet_id'])
-            self._test_list_resources('port', [port1],
-                                      query_params=query_params)
-
-    def test_list_ports_public_network(self):
-        with self.network(shared=True) as network:
-            with self.subnet(network) as subnet:
-                with self.port(subnet, tenant_id='tenant_1') as port1,\
-                        self.port(subnet, tenant_id='tenant_2') as port2:
-                    # Admin request - must return both ports
-                    self._test_list_resources('port', [port1, port2])
-                    # Tenant_1 request - must return single port
-                    n_context = context.Context('', 'tenant_1')
-                    self._test_list_resources('port', [port1],
-                                              neutron_context=n_context)
-                    # Tenant_2 request - must return single port
-                    n_context = context.Context('', 'tenant_2')
-                    self._test_list_resources('port', [port2],
-                                              neutron_context=n_context)
-
-    def test_list_ports_with_sort_native(self):
-        if self._skip_native_sorting:
-            self.skipTest("Skip test for not implemented sorting feature")
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        with self.port(admin_state_up='True',
-                       mac_address='00:00:00:00:00:01') as port1,\
-                self.port(admin_state_up='False',
-                          mac_address='00:00:00:00:00:02') as port2,\
-                self.port(admin_state_up='False',
-                          mac_address='00:00:00:00:00:03') as port3:
-            self._test_list_with_sort('port', (port3, port2, port1),
-                                      [('admin_state_up', 'asc'),
-                                       ('mac_address', 'desc')])
-
-    def test_list_ports_with_sort_emulated(self):
-        helper_patcher = mock.patch(
-            'neutron.api.v2.base.Controller._get_sorting_helper',
-            new=_fake_get_sorting_helper)
-        helper_patcher.start()
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        with self.port(admin_state_up='True',
-                       mac_address='00:00:00:00:00:01') as port1,\
-                self.port(admin_state_up='False',
-                          mac_address='00:00:00:00:00:02') as port2,\
-                self.port(admin_state_up='False',
-                          mac_address='00:00:00:00:00:03') as port3:
-            self._test_list_with_sort('port', (port3, port2, port1),
-                                      [('admin_state_up', 'asc'),
-                                       ('mac_address', 'desc')])
-
-    def test_list_ports_with_pagination_native(self):
-        if self._skip_native_pagination:
-            self.skipTest("Skip test for not implemented pagination feature")
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        with self.port(mac_address='00:00:00:00:00:01') as port1,\
-                self.port(mac_address='00:00:00:00:00:02') as port2,\
-                self.port(mac_address='00:00:00:00:00:03') as port3:
-            self._test_list_with_pagination('port',
-                                            (port1, port2, port3),
-                                            ('mac_address', 'asc'), 2, 2)
-
-    def test_list_ports_with_pagination_emulated(self):
-        helper_patcher = mock.patch(
-            'neutron.api.v2.base.Controller._get_pagination_helper',
-            new=_fake_get_pagination_helper)
-        helper_patcher.start()
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        with self.port(mac_address='00:00:00:00:00:01') as port1,\
-                self.port(mac_address='00:00:00:00:00:02') as port2,\
-                self.port(mac_address='00:00:00:00:00:03') as port3:
-            self._test_list_with_pagination('port',
-                                            (port1, port2, port3),
-                                            ('mac_address', 'asc'), 2, 2)
-
-    def test_list_ports_with_pagination_reverse_native(self):
-        if self._skip_native_pagination:
-            self.skipTest("Skip test for not implemented pagination feature")
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        with self.port(mac_address='00:00:00:00:00:01') as port1,\
-                self.port(mac_address='00:00:00:00:00:02') as port2,\
-                self.port(mac_address='00:00:00:00:00:03') as port3:
-            self._test_list_with_pagination_reverse('port',
-                                                    (port1, port2, port3),
-                                                    ('mac_address', 'asc'),
-                                                    2, 2)
-
-    def test_list_ports_with_pagination_reverse_emulated(self):
-        helper_patcher = mock.patch(
-            'neutron.api.v2.base.Controller._get_pagination_helper',
-            new=_fake_get_pagination_helper)
-        helper_patcher.start()
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        with self.port(mac_address='00:00:00:00:00:01') as port1,\
-                self.port(mac_address='00:00:00:00:00:02') as port2,\
-                self.port(mac_address='00:00:00:00:00:03') as port3:
-            self._test_list_with_pagination_reverse('port',
-                                                    (port1, port2, port3),
-                                                    ('mac_address', 'asc'),
-                                                    2, 2)
-
-    def test_show_port(self):
-        with self.port() as port:
-            req = self.new_show_request('ports', port['port']['id'], self.fmt)
-            sport = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(port['port']['id'], sport['port']['id'])
-
-    def test_delete_port(self):
-        with self.port() as port:
-            self._delete('ports', port['port']['id'])
-            self._show('ports', port['port']['id'],
-                       expected_code=webob.exc.HTTPNotFound.code)
-
-    def test_delete_port_public_network(self):
-        with self.network(shared=True) as network:
-            port_res = self._create_port(self.fmt,
-                                         network['network']['id'],
-                                         webob.exc.HTTPCreated.code,
-                                         tenant_id='another_tenant',
-                                         set_context=True)
-
-            port = self.deserialize(self.fmt, port_res)
-            self._delete('ports', port['port']['id'])
-            self._show('ports', port['port']['id'],
-                       expected_code=webob.exc.HTTPNotFound.code)
-
-    def test_update_port(self):
-        with self.port() as port:
-            data = {'port': {'admin_state_up': False}}
-            req = self.new_update_request('ports', data, port['port']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(res['port']['admin_state_up'],
-                             data['port']['admin_state_up'])
-
-    def update_port_mac(self, port, updated_fixed_ips=None):
-        orig_mac = port['mac_address']
-        mac = orig_mac.split(':')
-        mac[5] = '01' if mac[5] != '01' else '00'
-        new_mac = ':'.join(mac)
-        data = {'port': {'mac_address': new_mac}}
-        if updated_fixed_ips:
-            data['port']['fixed_ips'] = updated_fixed_ips
-        req = self.new_update_request('ports', data, port['id'])
-        return req.get_response(self.api), new_mac
-
-    def _check_v6_auto_address_address(self, port, subnet):
-        if ipv6_utils.is_auto_address_subnet(subnet['subnet']):
-            port_mac = port['port']['mac_address']
-            subnet_cidr = subnet['subnet']['cidr']
-            eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
-                                                             port_mac))
-            self.assertEqual(port['port']['fixed_ips'][0]['ip_address'],
-                             eui_addr)
-
-    def check_update_port_mac(
-            self, expected_status=webob.exc.HTTPOk.code,
-            expected_error='StateInvalid', subnet=None,
-            device_owner=DEVICE_OWNER_COMPUTE, updated_fixed_ips=None,
-            host_arg=None, arg_list=None):
-        host_arg = host_arg or {}
-        arg_list = arg_list or []
-        with self.port(device_owner=device_owner, subnet=subnet,
-                       arg_list=arg_list, **host_arg) as port:
-            self.assertIn('mac_address', port['port'])
-            res, new_mac = self.update_port_mac(
-                port['port'], updated_fixed_ips=updated_fixed_ips)
-            self.assertEqual(expected_status, res.status_int)
-            if expected_status == webob.exc.HTTPOk.code:
-                result = self.deserialize(self.fmt, res)
-                self.assertIn('port', result)
-                self.assertEqual(new_mac, result['port']['mac_address'])
-                if subnet and subnet['subnet']['ip_version'] == 6:
-                    self._check_v6_auto_address_address(port, subnet)
-            else:
-                error = self.deserialize(self.fmt, res)
-                self.assertEqual(expected_error,
-                                 error['NeutronError']['type'])
-
-    def test_update_port_mac(self):
-        self.check_update_port_mac()
-        # sub-classes for plugins/drivers that support mac address update
-        # override this method
-
-    def test_update_dhcp_port_with_exceeding_fixed_ips(self):
-        """
-        Max fixed ips per port is configured in configuration file
-        by max_fixed_ips_per_port parameter.
-
-        DHCP port is not restricted by this parameter.
-        """
-        with self.subnet() as subnet:
-            updated_fixed_ips = [{'subnet_id': subnet['subnet']['id'],
-                                  'ip_address': '10.0.0.%s' % id}
-                                 for id in range(3,
-                                     cfg.CONF.max_fixed_ips_per_port + 4)]
-            host_arg = None or {}
-            arg_list = None or []
-            with self.port(device_owner=constants.DEVICE_OWNER_DHCP,
-                           subnet=subnet, arg_list=arg_list,
-                           **host_arg) as port:
-                data = {'port': {'fixed_ips': updated_fixed_ips}}
-                req = self.new_update_request('ports',
-                                              data, port['port']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
-                result = self.deserialize(self.fmt, res)
-                for fixed_ip in updated_fixed_ips:
-                    self.assertIn(fixed_ip, result['port']['fixed_ips'])
-
-    def test_update_port_mac_ip(self):
-        with self.subnet() as subnet:
-            updated_fixed_ips = [{'subnet_id': subnet['subnet']['id'],
-                              'ip_address': '10.0.0.3'}]
-            self.check_update_port_mac(subnet=subnet,
-                                       updated_fixed_ips=updated_fixed_ips)
-
-    def test_update_port_mac_v6_slaac(self):
-        with self.subnet(gateway_ip='fe80::1',
-                         cidr='2607:f0d0:1002:51::/64',
-                         ip_version=6,
-                         ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
-            self.assertTrue(
-                ipv6_utils.is_auto_address_subnet(subnet['subnet']))
-            self.check_update_port_mac(subnet=subnet)
-
-    def test_update_port_mac_bad_owner(self):
-        self.check_update_port_mac(
-            device_owner=DEVICE_OWNER_NOT_COMPUTE,
-            expected_status=webob.exc.HTTPConflict.code,
-            expected_error='UnsupportedPortDeviceOwner')
-
-    def check_update_port_mac_used(self, expected_error='MacAddressInUse'):
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                with self.port(subnet=subnet) as port2:
-                    self.assertIn('mac_address', port['port'])
-                    new_mac = port2['port']['mac_address']
-                    data = {'port': {'mac_address': new_mac}}
-                    req = self.new_update_request('ports', data,
-                                                  port['port']['id'])
-                    res = req.get_response(self.api)
-                    self.assertEqual(webob.exc.HTTPConflict.code,
-                                     res.status_int)
-                    error = self.deserialize(self.fmt, res)
-                    self.assertEqual(expected_error,
-                                     error['NeutronError']['type'])
-
-    def test_update_port_mac_used(self):
-        self.check_update_port_mac_used()
-
-    def test_update_port_not_admin(self):
-        res = self._create_network(self.fmt, 'net1', True,
-                                   tenant_id='not_admin',
-                                   set_context=True)
-        net1 = self.deserialize(self.fmt, res)
-        res = self._create_port(self.fmt, net1['network']['id'],
-                                tenant_id='not_admin', set_context=True)
-        port = self.deserialize(self.fmt, res)
-        data = {'port': {'admin_state_up': False}}
-        neutron_context = context.Context('', 'not_admin')
-        port = self._update('ports', port['port']['id'], data,
-                            neutron_context=neutron_context)
-        self.assertFalse(port['port']['admin_state_up'])
-
-    def test_update_device_id_unchanged(self):
-        with self.port() as port:
-            data = {'port': {'admin_state_up': True,
-                             'device_id': port['port']['device_id']}}
-            req = self.new_update_request('ports', data, port['port']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertTrue(res['port']['admin_state_up'])
-
-    def test_update_device_id_null(self):
-        with self.port() as port:
-            data = {'port': {'device_id': None}}
-            req = self.new_update_request('ports', data, port['port']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_delete_network_if_port_exists(self):
-        with self.port() as port:
-            req = self.new_delete_request('networks',
-                                          port['port']['network_id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
-
-    def test_delete_network_port_exists_owned_by_network(self):
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        network_id = network['network']['id']
-        self._create_port(self.fmt, network_id,
-                          device_owner=constants.DEVICE_OWNER_DHCP)
-        req = self.new_delete_request('networks', network_id)
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-
-    def test_update_port_delete_ip(self):
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                data = {'port': {'admin_state_up': False,
-                                 'fixed_ips': []}}
-                req = self.new_update_request('ports',
-                                              data, port['port']['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                self.assertEqual(res['port']['admin_state_up'],
-                                 data['port']['admin_state_up'])
-                self.assertEqual(res['port']['fixed_ips'],
-                                 data['port']['fixed_ips'])
-
-    def test_no_more_port_exception(self):
-        with self.subnet(cidr='10.0.0.0/31', enable_dhcp=False) as subnet:
-            id = subnet['subnet']['network_id']
-            res = self._create_port(self.fmt, id)
-            data = self.deserialize(self.fmt, res)
-            msg = str(n_exc.IpAddressGenerationFailure(net_id=id))
-            self.assertEqual(data['NeutronError']['message'], msg)
-            self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
-
-    def test_update_port_update_ip(self):
-        """Test update of port IP.
-
-        Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10.
-        """
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                data = {'port': {'fixed_ips': [{'subnet_id':
-                                                subnet['subnet']['id'],
-                                                'ip_address': "10.0.0.10"}]}}
-                req = self.new_update_request('ports', data,
-                                              port['port']['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                ips = res['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.10')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-
-    def test_update_port_update_ip_address_only(self):
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                data = {'port': {'fixed_ips': [{'subnet_id':
-                                                subnet['subnet']['id'],
-                                                'ip_address': "10.0.0.10"},
-                                               {'ip_address': "10.0.0.2"}]}}
-                req = self.new_update_request('ports', data,
-                                              port['port']['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                ips = res['port']['fixed_ips']
-                self.assertEqual(len(ips), 2)
-                self.assertIn({'ip_address': '10.0.0.2',
-                               'subnet_id': subnet['subnet']['id']}, ips)
-                self.assertIn({'ip_address': '10.0.0.10',
-                               'subnet_id': subnet['subnet']['id']}, ips)
-
-    def test_update_port_update_ips(self):
-        """Update IP and associate new IP on port.
-
-        Check a port update with the specified subnet_id's. A IP address
-        will be allocated for each subnet_id.
-        """
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                data = {'port': {'admin_state_up': False,
-                                 'fixed_ips': [{'subnet_id':
-                                                subnet['subnet']['id'],
-                                                'ip_address': '10.0.0.3'}]}}
-                req = self.new_update_request('ports', data,
-                                              port['port']['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                self.assertEqual(res['port']['admin_state_up'],
-                                 data['port']['admin_state_up'])
-                ips = res['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-
-    def test_update_port_add_additional_ip(self):
-        """Test update of port with additional IP."""
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                data = {'port': {'admin_state_up': False,
-                                 'fixed_ips': [{'subnet_id':
-                                                subnet['subnet']['id']},
-                                               {'subnet_id':
-                                                subnet['subnet']['id']}]}}
-                req = self.new_update_request('ports', data,
-                                              port['port']['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                self.assertEqual(res['port']['admin_state_up'],
-                                 data['port']['admin_state_up'])
-                ips = res['port']['fixed_ips']
-                self.assertEqual(len(ips), 2)
-                self.assertIn({'ip_address': '10.0.0.3',
-                           'subnet_id': subnet['subnet']['id']}, ips)
-                self.assertIn({'ip_address': '10.0.0.4',
-                           'subnet_id': subnet['subnet']['id']}, ips)
-
-    def test_update_port_invalid_fixed_ip_address_v6_slaac(self):
-        with self.subnet(
-            cidr='2607:f0d0:1002:51::/64',
-            ip_version=6,
-            ipv6_address_mode=constants.IPV6_SLAAC,
-            gateway_ip=attributes.ATTR_NOT_SPECIFIED) as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                port_mac = port['port']['mac_address']
-                subnet_cidr = subnet['subnet']['cidr']
-                eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
-                                                                 port_mac))
-                self.assertEqual(ips[0]['ip_address'], eui_addr)
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-
-                data = {'port': {'fixed_ips': [{'subnet_id':
-                                                subnet['subnet']['id'],
-                                                'ip_address':
-                                                '2607:f0d0:1002:51::5'}]}}
-                req = self.new_update_request('ports', data,
-                                              port['port']['id'])
-                res = req.get_response(self.api)
-                err = self.deserialize(self.fmt, res)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-                self.assertEqual(err['NeutronError']['type'], 'InvalidInput')
-
-    def test_requested_duplicate_mac(self):
-        with self.port() as port:
-            mac = port['port']['mac_address']
-            # check that MAC address matches base MAC
-            base_mac = cfg.CONF.base_mac[0:2]
-            self.assertTrue(mac.startswith(base_mac))
-            kwargs = {"mac_address": mac}
-            net_id = port['port']['network_id']
-            res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-            self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
-
-    def test_mac_generation(self):
-        cfg.CONF.set_override('base_mac', "12:34:56:00:00:00")
-        with self.port() as port:
-            mac = port['port']['mac_address']
-            self.assertTrue(mac.startswith("12:34:56"))
-
-    def test_mac_generation_4octet(self):
-        cfg.CONF.set_override('base_mac', "12:34:56:78:00:00")
-        with self.port() as port:
-            mac = port['port']['mac_address']
-            self.assertTrue(mac.startswith("12:34:56:78"))
-
-    def test_bad_mac_format(self):
-        cfg.CONF.set_override('base_mac', "bad_mac")
-        try:
-            self.plugin._check_base_mac_format()
-        except Exception:
-            return
-        self.fail("No exception for illegal base_mac format")
-
-    def test_mac_exhaustion(self):
-        # rather than actually consuming all MAC (would take a LONG time)
-        # we try to allocate an already allocated mac address
-        cfg.CONF.set_override('mac_generation_retries', 3)
-
-        res = self._create_network(fmt=self.fmt, name='net1',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        net_id = network['network']['id']
-
-        error = n_exc.MacAddressInUse(net_id=net_id, mac='00:11:22:33:44:55')
-        with mock.patch.object(
-                neutron.db.db_base_plugin_v2.NeutronDbPluginV2,
-                '_create_port_with_mac', side_effect=error) as create_mock:
-            res = self._create_port(self.fmt, net_id=net_id)
-            self.assertEqual(res.status_int,
-                             webob.exc.HTTPServiceUnavailable.code)
-            self.assertEqual(3, create_mock.call_count)
-
-    def test_requested_duplicate_ip(self):
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                # Check configuring of duplicate IP
-                kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
-                                         'ip_address': ips[0]['ip_address']}]}
-                net_id = port['port']['network_id']
-                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
-
-    def test_requested_subnet_id(self):
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                # Request a IP from specific subnet
-                kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}]}
-                net_id = port['port']['network_id']
-                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                port2 = self.deserialize(self.fmt, res)
-                ips = port2['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                self._delete('ports', port2['port']['id'])
-
-    def test_requested_subnet_id_not_on_network(self):
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                # Create new network
-                res = self._create_network(fmt=self.fmt, name='net2',
-                                           admin_state_up=True)
-                network2 = self.deserialize(self.fmt, res)
-                subnet2 = self._make_subnet(self.fmt, network2, "1.1.1.1",
-                                            "1.1.1.0/24", ip_version=4)
-                net_id = port['port']['network_id']
-                # Request a IP from specific subnet
-                kwargs = {"fixed_ips": [{'subnet_id':
-                                         subnet2['subnet']['id']}]}
-                net_id = port['port']['network_id']
-                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_overlapping_subnets(self):
-        with self.subnet() as subnet:
-            tenant_id = subnet['subnet']['tenant_id']
-            net_id = subnet['subnet']['network_id']
-            res = self._create_subnet(self.fmt,
-                                      tenant_id=tenant_id,
-                                      net_id=net_id,
-                                      cidr='10.0.0.225/28',
-                                      ip_version=4,
-                                      gateway_ip=attributes.ATTR_NOT_SPECIFIED)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_requested_subnet_id_v4_and_v6(self):
-        with self.subnet() as subnet:
-                # Get a IPv4 and IPv6 address
-                tenant_id = subnet['subnet']['tenant_id']
-                net_id = subnet['subnet']['network_id']
-                res = self._create_subnet(
-                    self.fmt,
-                    tenant_id=tenant_id,
-                    net_id=net_id,
-                    cidr='2607:f0d0:1002:51::/124',
-                    ip_version=6,
-                    gateway_ip=attributes.ATTR_NOT_SPECIFIED)
-                subnet2 = self.deserialize(self.fmt, res)
-                kwargs = {"fixed_ips":
-                          [{'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet2['subnet']['id']}]}
-                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                port3 = self.deserialize(self.fmt, res)
-                ips = port3['port']['fixed_ips']
-                self.assertEqual(len(ips), 2)
-                self.assertIn({'ip_address': '10.0.0.2',
-                               'subnet_id': subnet['subnet']['id']}, ips)
-                self.assertIn({'ip_address': '2607:f0d0:1002:51::2',
-                               'subnet_id': subnet2['subnet']['id']}, ips)
-                res = self._create_port(self.fmt, net_id=net_id)
-                port4 = self.deserialize(self.fmt, res)
-                # Check that a v4 and a v6 address are allocated
-                ips = port4['port']['fixed_ips']
-                self.assertEqual(len(ips), 2)
-                self.assertIn({'ip_address': '10.0.0.3',
-                               'subnet_id': subnet['subnet']['id']}, ips)
-                self.assertIn({'ip_address': '2607:f0d0:1002:51::3',
-                               'subnet_id': subnet2['subnet']['id']}, ips)
-                self._delete('ports', port3['port']['id'])
-                self._delete('ports', port4['port']['id'])
-
-    def test_requested_invalid_fixed_ip_address_v6_slaac(self):
-        with self.subnet(gateway_ip='fe80::1',
-                         cidr='2607:f0d0:1002:51::/64',
-                         ip_version=6,
-                         ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
-            kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
-                                     'ip_address': '2607:f0d0:1002:51::5'}]}
-            net_id = subnet['subnet']['network_id']
-            res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-            self.assertEqual(res.status_int,
-                             webob.exc.HTTPClientError.code)
-
-    @mock.patch.object(non_ipam.IpamNonPluggableBackend,
-                       '_allocate_specific_ip')
-    def test_requested_fixed_ip_address_v6_slaac_router_iface(
-            self, alloc_specific_ip):
-        with self.subnet(gateway_ip='fe80::1',
-                         cidr='fe80::/64',
-                         ip_version=6,
-                         ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
-            kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
-                                     'ip_address': 'fe80::1'}]}
-            net_id = subnet['subnet']['network_id']
-            device_owner = constants.DEVICE_OWNER_ROUTER_INTF
-            res = self._create_port(self.fmt, net_id=net_id,
-                                    device_owner=device_owner, **kwargs)
-            port = self.deserialize(self.fmt, res)
-            self.assertEqual(len(port['port']['fixed_ips']), 1)
-            self.assertEqual(port['port']['fixed_ips'][0]['ip_address'],
-                             'fe80::1')
-            self.assertFalse(alloc_specific_ip.called)
-
-    def test_requested_subnet_id_v6_slaac(self):
-        with self.subnet(gateway_ip='fe80::1',
-                         cidr='2607:f0d0:1002:51::/64',
-                         ip_version=6,
-                         ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
-            with self.port(subnet,
-                           fixed_ips=[{'subnet_id':
-                                       subnet['subnet']['id']}]) as port:
-                port_mac = port['port']['mac_address']
-                subnet_cidr = subnet['subnet']['cidr']
-                eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
-                                                                 port_mac))
-                self.assertEqual(port['port']['fixed_ips'][0]['ip_address'],
-                                 eui_addr)
-
-    def test_requested_subnet_id_v4_and_v6_slaac(self):
-        with self.network() as network:
-            with self.subnet(network) as subnet,\
-                    self.subnet(
-                        network,
-                        cidr='2607:f0d0:1002:51::/64',
-                        ip_version=6,
-                        gateway_ip='fe80::1',
-                        ipv6_address_mode=constants.IPV6_SLAAC) as subnet2:
-                with self.port(
-                    subnet,
-                    fixed_ips=[{'subnet_id': subnet['subnet']['id']},
-                               {'subnet_id': subnet2['subnet']['id']}]
-                ) as port:
-                    ips = port['port']['fixed_ips']
-                    self.assertEqual(len(ips), 2)
-                    self.assertIn({'ip_address': '10.0.0.2',
-                                   'subnet_id': subnet['subnet']['id']}, ips)
-                    port_mac = port['port']['mac_address']
-                    subnet_cidr = subnet2['subnet']['cidr']
-                    eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(
-                            subnet_cidr, port_mac))
-                    self.assertIn({'ip_address': eui_addr,
-                                   'subnet_id': subnet2['subnet']['id']}, ips)
-
-    def test_create_router_port_ipv4_and_ipv6_slaac_no_fixed_ips(self):
-        with self.network() as network:
-            # Create an IPv4 and an IPv6 SLAAC subnet on the network
-            with self.subnet(network),\
-                    self.subnet(network,
-                                cidr='2607:f0d0:1002:51::/64',
-                                ip_version=6,
-                                gateway_ip='fe80::1',
-                                ipv6_address_mode=constants.IPV6_SLAAC):
-                # Create a router port without specifying fixed_ips
-                port = self._make_port(
-                    self.fmt, network['network']['id'],
-                    device_owner=constants.DEVICE_OWNER_ROUTER_INTF)
-                # Router port should only have an IPv4 address
-                fixed_ips = port['port']['fixed_ips']
-                self.assertEqual(1, len(fixed_ips))
-                self.assertEqual('10.0.0.2', fixed_ips[0]['ip_address'])
-
-    def _make_v6_subnet(self, network, ra_addr_mode, ipv6_pd=False):
-        cidr = 'fe80::/64'
-        gateway = 'fe80::1'
-        if ipv6_pd:
-            cidr = None
-            gateway = None
-            cfg.CONF.set_override('ipv6_pd_enabled', True)
-        return (self._make_subnet(self.fmt, network, gateway=gateway,
-                                  cidr=cidr, ip_version=6,
-                                  ipv6_ra_mode=ra_addr_mode,
-                                  ipv6_address_mode=ra_addr_mode))
-
-    @staticmethod
-    def _calc_ipv6_addr_by_EUI64(port, subnet):
-        port_mac = port['port']['mac_address']
-        subnet_cidr = subnet['subnet']['cidr']
-        return str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac))
-
-    def test_ip_allocation_for_ipv6_subnet_slaac_address_mode(self):
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        subnet = self._make_v6_subnet(network, constants.IPV6_SLAAC)
-        port = self._make_port(self.fmt, network['network']['id'])
-        self.assertEqual(1, len(port['port']['fixed_ips']))
-        self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet),
-                         port['port']['fixed_ips'][0]['ip_address'])
-
-    def _test_create_port_with_ipv6_subnet_in_fixed_ips(self, addr_mode,
-                                                        ipv6_pd=False):
-        """Test port create with an IPv6 subnet incl in fixed IPs."""
-        with self.network(name='net') as network:
-            subnet = self._make_v6_subnet(network, addr_mode, ipv6_pd)
-            subnet_id = subnet['subnet']['id']
-            fixed_ips = [{'subnet_id': subnet_id}]
-            with self.port(subnet=subnet, fixed_ips=fixed_ips) as port:
-                if addr_mode == constants.IPV6_SLAAC:
-                    exp_ip_addr = self._calc_ipv6_addr_by_EUI64(port, subnet)
-                else:
-                    exp_ip_addr = 'fe80::2'
-                port_fixed_ips = port['port']['fixed_ips']
-                self.assertEqual(1, len(port_fixed_ips))
-                self.assertEqual(exp_ip_addr,
-                                 port_fixed_ips[0]['ip_address'])
-
-    def test_create_port_with_ipv6_slaac_subnet_in_fixed_ips(self):
-        self._test_create_port_with_ipv6_subnet_in_fixed_ips(
-            addr_mode=constants.IPV6_SLAAC)
-
-    def test_create_port_with_ipv6_pd_subnet_in_fixed_ips(self):
-        self._test_create_port_with_ipv6_subnet_in_fixed_ips(
-            addr_mode=constants.IPV6_SLAAC, ipv6_pd=True)
-
-    def test_create_port_with_ipv6_dhcp_stateful_subnet_in_fixed_ips(self):
-        self._test_create_port_with_ipv6_subnet_in_fixed_ips(
-            addr_mode=constants.DHCPV6_STATEFUL)
-
-    def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self):
-        """Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets."""
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        sub_dicts = [
-            {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
-             'ip_version': 4, 'ra_addr_mode': None},
-            {'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24',
-             'ip_version': 4, 'ra_addr_mode': None},
-            {'gateway': 'fe80::1', 'cidr': 'fe80::/64',
-             'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
-            {'gateway': 'fe81::1', 'cidr': 'fe81::/64',
-             'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
-            {'gateway': 'fe82::1', 'cidr': 'fe82::/64',
-             'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL},
-            {'gateway': 'fe83::1', 'cidr': 'fe83::/64',
-             'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}]
-        subnets = {}
-        for sub_dict in sub_dicts:
-            subnet = self._make_subnet(
-                self.fmt, network,
-                gateway=sub_dict['gateway'],
-                cidr=sub_dict['cidr'],
-                ip_version=sub_dict['ip_version'],
-                ipv6_ra_mode=sub_dict['ra_addr_mode'],
-                ipv6_address_mode=sub_dict['ra_addr_mode'])
-            subnets[subnet['subnet']['id']] = sub_dict
-        res = self._create_port(self.fmt, net_id=network['network']['id'])
-        port = self.deserialize(self.fmt, res)
-        # Since the create port request was made without a list of fixed IPs,
-        # the port should be associated with addresses for one of the
-        # IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6
-        # SLAAC subnets.
-        self.assertEqual(4, len(port['port']['fixed_ips']))
-        addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0,
-                           constants.IPV6_SLAAC: 0}
-        for fixed_ip in port['port']['fixed_ips']:
-            subnet_id = fixed_ip['subnet_id']
-            if subnet_id in subnets:
-                addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1
-        self.assertEqual(1, addr_mode_count[None])
-        self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL])
-        self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC])
-
-    def test_delete_port_with_ipv6_slaac_address(self):
-        """Test that a port with an IPv6 SLAAC address can be deleted."""
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        # Create a port that has an associated IPv6 SLAAC address
-        self._make_v6_subnet(network, constants.IPV6_SLAAC)
-        res = self._create_port(self.fmt, net_id=network['network']['id'])
-        port = self.deserialize(self.fmt, res)
-        self.assertEqual(1, len(port['port']['fixed_ips']))
-        # Confirm that the port can be deleted
-        self._delete('ports', port['port']['id'])
-        self._show('ports', port['port']['id'],
-                   expected_code=webob.exc.HTTPNotFound.code)
-
-    def test_update_port_with_ipv6_slaac_subnet_in_fixed_ips(self):
-        """Test port update with an IPv6 SLAAC subnet in fixed IPs."""
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        # Create a port using an IPv4 subnet and an IPv6 SLAAC subnet
-        self._make_subnet(self.fmt, network, gateway='10.0.0.1',
-                          cidr='10.0.0.0/24', ip_version=4)
-        subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC)
-        res = self._create_port(self.fmt, net_id=network['network']['id'])
-        port = self.deserialize(self.fmt, res)
-        self.assertEqual(2, len(port['port']['fixed_ips']))
-        # Update port including only the IPv6 SLAAC subnet
-        data = {'port': {'fixed_ips': [{'subnet_id':
-                                        subnet_v6['subnet']['id']}]}}
-        req = self.new_update_request('ports', data,
-                                      port['port']['id'])
-        res = self.deserialize(self.fmt, req.get_response(self.api))
-        # Port should only have an address corresponding to IPv6 SLAAC subnet
-        ips = res['port']['fixed_ips']
-        self.assertEqual(1, len(ips))
-        self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet_v6),
-                         ips[0]['ip_address'])
-
-    def test_update_port_excluding_ipv6_slaac_subnet_from_fixed_ips(self):
-        """Test port update excluding IPv6 SLAAC subnet from fixed ips."""
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        # Create a port using an IPv4 subnet and an IPv6 SLAAC subnet
-        subnet_v4 = self._make_subnet(self.fmt, network, gateway='10.0.0.1',
-                                      cidr='10.0.0.0/24', ip_version=4)
-        subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC)
-        res = self._create_port(self.fmt, net_id=network['network']['id'])
-        port = self.deserialize(self.fmt, res)
-        self.assertEqual(2, len(port['port']['fixed_ips']))
-        # Update port including only the IPv4 subnet
-        data = {'port': {'fixed_ips': [{'subnet_id':
-                                        subnet_v4['subnet']['id'],
-                                        'ip_address': "10.0.0.10"}]}}
-        req = self.new_update_request('ports', data,
-                                      port['port']['id'])
-        res = self.deserialize(self.fmt, req.get_response(self.api))
-        # Port should still have an addr corresponding to IPv6 SLAAC subnet
-        ips = res['port']['fixed_ips']
-        self.assertEqual(2, len(ips))
-        eui_addr = self._calc_ipv6_addr_by_EUI64(port, subnet_v6)
-        expected_v6_ip = {'subnet_id': subnet_v6['subnet']['id'],
-                          'ip_address': eui_addr}
-        self.assertIn(expected_v6_ip, ips)
-
-    def test_ip_allocation_for_ipv6_2_subnet_slaac_mode(self):
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        v6_subnet_1 = self._make_subnet(self.fmt, network,
-                                        gateway='2001:100::1',
-                                        cidr='2001:100::0/64',
-                                        ip_version=6,
-                                        ipv6_ra_mode=constants.IPV6_SLAAC)
-        v6_subnet_2 = self._make_subnet(self.fmt, network,
-                                        gateway='2001:200::1',
-                                        cidr='2001:200::0/64',
-                                        ip_version=6,
-                                        ipv6_ra_mode=constants.IPV6_SLAAC)
-        port = self._make_port(self.fmt, network['network']['id'])
-        port_mac = port['port']['mac_address']
-        cidr_1 = v6_subnet_1['subnet']['cidr']
-        cidr_2 = v6_subnet_2['subnet']['cidr']
-        eui_addr_1 = str(ipv6_utils.get_ipv6_addr_by_EUI64(cidr_1,
-                                                           port_mac))
-        eui_addr_2 = str(ipv6_utils.get_ipv6_addr_by_EUI64(cidr_2,
-                                                           port_mac))
-        self.assertEqual({eui_addr_1, eui_addr_2},
-                         {fixed_ip['ip_address'] for fixed_ip in
-                          port['port']['fixed_ips']})
-
-    def test_range_allocation(self):
-        with self.subnet(gateway_ip='10.0.0.3',
-                         cidr='10.0.0.0/29') as subnet:
-                kwargs = {"fixed_ips":
-                          [{'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']}]}
-                net_id = subnet['subnet']['network_id']
-                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                port = self.deserialize(self.fmt, res)
-                ips = port['port']['fixed_ips']
-                self.assertEqual(len(ips), 5)
-                alloc = ['10.0.0.1', '10.0.0.2', '10.0.0.4', '10.0.0.5',
-                         '10.0.0.6']
-                for ip in ips:
-                    self.assertIn(ip['ip_address'], alloc)
-                    self.assertEqual(ip['subnet_id'],
-                                     subnet['subnet']['id'])
-                    alloc.remove(ip['ip_address'])
-                self.assertEqual(len(alloc), 0)
-                self._delete('ports', port['port']['id'])
-
-        with self.subnet(gateway_ip='11.0.0.6',
-                         cidr='11.0.0.0/29') as subnet:
-                kwargs = {"fixed_ips":
-                          [{'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']}]}
-                net_id = subnet['subnet']['network_id']
-                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                port = self.deserialize(self.fmt, res)
-                ips = port['port']['fixed_ips']
-                self.assertEqual(len(ips), 5)
-                alloc = ['11.0.0.1', '11.0.0.2', '11.0.0.3', '11.0.0.4',
-                         '11.0.0.5']
-                for ip in ips:
-                    self.assertIn(ip['ip_address'], alloc)
-                    self.assertEqual(ip['subnet_id'],
-                                     subnet['subnet']['id'])
-                    alloc.remove(ip['ip_address'])
-                self.assertEqual(len(alloc), 0)
-                self._delete('ports', port['port']['id'])
-
-    def test_requested_invalid_fixed_ips(self):
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                # Test invalid subnet_id
-                kwargs = {"fixed_ips":
-                          [{'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id':
-                            '00000000-ffff-ffff-ffff-000000000000'}]}
-                net_id = port['port']['network_id']
-                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                port2 = self.deserialize(self.fmt, res)
-                self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
-
-                # Test invalid IP address on specified subnet_id
-                kwargs = {"fixed_ips":
-                          [{'subnet_id': subnet['subnet']['id'],
-                            'ip_address': '1.1.1.1'}]}
-                net_id = port['port']['network_id']
-                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                port2 = self.deserialize(self.fmt, res)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-                # Test invalid addresses - IP's not on subnet or network
-                # address or broadcast address
-                bad_ips = ['1.1.1.1', '10.0.0.0', '10.0.0.255']
-                net_id = port['port']['network_id']
-                for ip in bad_ips:
-                    kwargs = {"fixed_ips": [{'ip_address': ip}]}
-                    res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                    port2 = self.deserialize(self.fmt, res)
-                    self.assertEqual(res.status_int,
-                                     webob.exc.HTTPClientError.code)
-
-                # Enable allocation of gateway address
-                kwargs = {"fixed_ips":
-                          [{'subnet_id': subnet['subnet']['id'],
-                            'ip_address': '10.0.0.1'}]}
-                net_id = port['port']['network_id']
-                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                port2 = self.deserialize(self.fmt, res)
-                ips = port2['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.1')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                self._delete('ports', port2['port']['id'])
-
-    def test_invalid_ip(self):
-        with self.subnet() as subnet:
-            # Allocate specific IP
-            kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
-                                     'ip_address': '1011.0.0.5'}]}
-            net_id = subnet['subnet']['network_id']
-            res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_requested_split(self):
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ports_to_delete = []
-                ips = port['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                # Allocate specific IP
-                kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
-                                         'ip_address': '10.0.0.5'}]}
-                net_id = port['port']['network_id']
-                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                port2 = self.deserialize(self.fmt, res)
-                ports_to_delete.append(port2)
-                ips = port2['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.5')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                # Allocate specific IP's
-                allocated = ['10.0.0.3', '10.0.0.4', '10.0.0.6']
-
-                for a in allocated:
-                    res = self._create_port(self.fmt, net_id=net_id)
-                    port2 = self.deserialize(self.fmt, res)
-                    ports_to_delete.append(port2)
-                    ips = port2['port']['fixed_ips']
-                    self.assertEqual(len(ips), 1)
-                    self.assertEqual(ips[0]['ip_address'], a)
-                    self.assertEqual(ips[0]['subnet_id'],
-                                     subnet['subnet']['id'])
-
-                for p in ports_to_delete:
-                    self._delete('ports', p['port']['id'])
-
-    def test_duplicate_ips(self):
-        with self.subnet() as subnet:
-            # Allocate specific IP
-            kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
-                                     'ip_address': '10.0.0.5'},
-                                    {'subnet_id': subnet['subnet']['id'],
-                                     'ip_address': '10.0.0.5'}]}
-            net_id = subnet['subnet']['network_id']
-            res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_fixed_ip_invalid_subnet_id(self):
-        with self.subnet() as subnet:
-            # Allocate specific IP
-            kwargs = {"fixed_ips": [{'subnet_id': 'i am invalid',
-                                     'ip_address': '10.0.0.5'}]}
-            net_id = subnet['subnet']['network_id']
-            res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_fixed_ip_invalid_ip(self):
-        with self.subnet() as subnet:
-            # Allocate specific IP
-            kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
-                                     'ip_address': '10.0.0.55555'}]}
-            net_id = subnet['subnet']['network_id']
-            res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_requested_ips_only(self):
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                ips_only = ['10.0.0.18', '10.0.0.20', '10.0.0.22', '10.0.0.21',
-                            '10.0.0.3', '10.0.0.17', '10.0.0.19']
-                ports_to_delete = []
-                for i in ips_only:
-                    kwargs = {"fixed_ips": [{'ip_address': i}]}
-                    net_id = port['port']['network_id']
-                    res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                    port = self.deserialize(self.fmt, res)
-                    ports_to_delete.append(port)
-                    ips = port['port']['fixed_ips']
-                    self.assertEqual(len(ips), 1)
-                    self.assertEqual(ips[0]['ip_address'], i)
-                    self.assertEqual(ips[0]['subnet_id'],
-                                     subnet['subnet']['id'])
-                for p in ports_to_delete:
-                    self._delete('ports', p['port']['id'])
-
-    def test_invalid_admin_state(self):
-        with self.network() as network:
-            data = {'port': {'network_id': network['network']['id'],
-                             'tenant_id': network['network']['tenant_id'],
-                             'admin_state_up': 7,
-                             'fixed_ips': []}}
-            port_req = self.new_create_request('ports', data)
-            res = port_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_invalid_mac_address(self):
-        with self.network() as network:
-            data = {'port': {'network_id': network['network']['id'],
-                             'tenant_id': network['network']['tenant_id'],
-                             'admin_state_up': 1,
-                             'mac_address': 'mac',
-                             'fixed_ips': []}}
-            port_req = self.new_create_request('ports', data)
-            res = port_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_max_fixed_ips_exceeded(self):
-        with self.subnet(gateway_ip='10.0.0.3',
-                         cidr='10.0.0.0/24') as subnet:
-                kwargs = {"fixed_ips":
-                          [{'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']},
-                           {'subnet_id': subnet['subnet']['id']}]}
-                net_id = subnet['subnet']['network_id']
-                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_update_max_fixed_ips_exceeded(self):
-        with self.subnet(gateway_ip='10.0.0.3',
-                         cidr='10.0.0.0/24') as subnet:
-            with self.port(subnet) as port:
-                data = {'port': {'fixed_ips':
-                                 [{'subnet_id': subnet['subnet']['id'],
-                                   'ip_address': '10.0.0.2'},
-                                  {'subnet_id': subnet['subnet']['id'],
-                                   'ip_address': '10.0.0.4'},
-                                  {'subnet_id': subnet['subnet']['id']},
-                                  {'subnet_id': subnet['subnet']['id']},
-                                  {'subnet_id': subnet['subnet']['id']},
-                                  {'subnet_id': subnet['subnet']['id']}]}}
-                req = self.new_update_request('ports', data,
-                                              port['port']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_delete_ports_by_device_id(self):
-        plugin = manager.NeutronManager.get_plugin()
-        ctx = context.get_admin_context()
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet, device_id='owner1') as p1,\
-                    self.port(subnet=subnet, device_id='owner1') as p2,\
-                    self.port(subnet=subnet, device_id='owner2') as p3:
-                network_id = subnet['subnet']['network_id']
-                plugin.delete_ports_by_device_id(ctx, 'owner1',
-                                                 network_id)
-                self._show('ports', p1['port']['id'],
-                           expected_code=webob.exc.HTTPNotFound.code)
-                self._show('ports', p2['port']['id'],
-                           expected_code=webob.exc.HTTPNotFound.code)
-                self._show('ports', p3['port']['id'],
-                           expected_code=webob.exc.HTTPOk.code)
-
-    def _test_delete_ports_by_device_id_second_call_failure(self, plugin):
-        ctx = context.get_admin_context()
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet, device_id='owner1') as p1,\
-                    self.port(subnet=subnet, device_id='owner1') as p2,\
-                    self.port(subnet=subnet, device_id='owner2') as p3:
-                orig = plugin.delete_port
-                with mock.patch.object(plugin, 'delete_port') as del_port:
-
-                    def side_effect(*args, **kwargs):
-                        return self._fail_second_call(del_port, orig,
-                                                      *args, **kwargs)
-
-                    del_port.side_effect = side_effect
-                    network_id = subnet['subnet']['network_id']
-                    self.assertRaises(n_exc.NeutronException,
-                                      plugin.delete_ports_by_device_id,
-                                      ctx, 'owner1', network_id)
-                statuses = {
-                    self._show_response('ports', p['port']['id']).status_int
-                    for p in [p1, p2]}
-                expected = {webob.exc.HTTPNotFound.code, webob.exc.HTTPOk.code}
-                self.assertEqual(expected, statuses)
-                self._show('ports', p3['port']['id'],
-                           expected_code=webob.exc.HTTPOk.code)
-
-    def test_delete_ports_by_device_id_second_call_failure(self):
-        plugin = manager.NeutronManager.get_plugin()
-        self._test_delete_ports_by_device_id_second_call_failure(plugin)
-
-    def _test_delete_ports_ignores_port_not_found(self, plugin):
-        ctx = context.get_admin_context()
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet, device_id='owner1') as p,\
-                    mock.patch.object(plugin, 'delete_port') as del_port:
-                del_port.side_effect = n_exc.PortNotFound(
-                    port_id=p['port']['id']
-                )
-                network_id = subnet['subnet']['network_id']
-                try:
-                    plugin.delete_ports_by_device_id(ctx, 'owner1',
-                                                     network_id)
-                except n_exc.PortNotFound:
-                    self.fail("delete_ports_by_device_id unexpectedly raised "
-                              "a PortNotFound exception. It should ignore "
-                              "this exception because it is often called at "
-                              "the same time other concurrent operations are "
-                              "deleting some of the same ports.")
-
-    def test_delete_ports_ignores_port_not_found(self):
-        plugin = manager.NeutronManager.get_plugin()
-        self._test_delete_ports_ignores_port_not_found(plugin)
-
-
-class TestNetworksV2(NeutronDbPluginV2TestCase):
-    # NOTE(cerberus): successful network update and delete are
-    #                 effectively tested above
-    def test_create_network(self):
-        name = 'net1'
-        keys = [('subnets', []), ('name', name), ('admin_state_up', True),
-                ('status', self.net_create_status), ('shared', False)]
-        with self.network(name=name) as net:
-            for k, v in keys:
-                self.assertEqual(net['network'][k], v)
-
-    def test_create_public_network(self):
-        name = 'public_net'
-        keys = [('subnets', []), ('name', name), ('admin_state_up', True),
-                ('status', self.net_create_status), ('shared', True)]
-        with self.network(name=name, shared=True) as net:
-            for k, v in keys:
-                self.assertEqual(net['network'][k], v)
-
-    def test_create_public_network_no_admin_tenant(self):
-        name = 'public_net'
-        with testlib_api.ExpectedException(
-                webob.exc.HTTPClientError) as ctx_manager:
-            with self.network(name=name,
-                              shared=True,
-                              tenant_id="another_tenant",
-                              set_context=True):
-                pass
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPForbidden.code)
-
-    def test_update_network(self):
-        with self.network() as network:
-            data = {'network': {'name': 'a_brand_new_name'}}
-            req = self.new_update_request('networks',
-                                          data,
-                                          network['network']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(res['network']['name'],
-                             data['network']['name'])
-
-    def test_update_shared_network_noadmin_returns_403(self):
-        with self.network(shared=True) as network:
-            data = {'network': {'name': 'a_brand_new_name'}}
-            req = self.new_update_request('networks',
-                                          data,
-                                          network['network']['id'])
-            req.environ['neutron.context'] = context.Context('', 'somebody')
-            res = req.get_response(self.api)
-            # The API layer always returns 404 on updates in place of 403
-            self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
-
-    def test_update_network_set_shared(self):
-        with self.network(shared=False) as network:
-            data = {'network': {'shared': True}}
-            req = self.new_update_request('networks',
-                                          data,
-                                          network['network']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertTrue(res['network']['shared'])
-
-    def test_update_network_set_shared_owner_returns_403(self):
-        with self.network(shared=False) as network:
-            net_owner = network['network']['tenant_id']
-            data = {'network': {'shared': True}}
-            req = self.new_update_request('networks',
-                                          data,
-                                          network['network']['id'])
-            req.environ['neutron.context'] = context.Context('u', net_owner)
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPForbidden.code)
-
-    def test_update_network_with_subnet_set_shared(self):
-        with self.network(shared=False) as network:
-            with self.subnet(network=network) as subnet:
-                data = {'network': {'shared': True}}
-                req = self.new_update_request('networks',
-                                              data,
-                                              network['network']['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                self.assertTrue(res['network']['shared'])
-                # must query db to see whether subnet's shared attribute
-                # has been updated or not
-                ctx = context.Context('', '', is_admin=True)
-                subnet_db = manager.NeutronManager.get_plugin().get_subnet(
-                    ctx, subnet['subnet']['id'])
-                self.assertTrue(subnet_db['shared'])
-
-    def test_update_network_set_not_shared_single_tenant(self):
-        with self.network(shared=True) as network:
-            res1 = self._create_port(self.fmt,
-                                     network['network']['id'],
-                                     webob.exc.HTTPCreated.code,
-                                     tenant_id=network['network']['tenant_id'],
-                                     set_context=True)
-            data = {'network': {'shared': False}}
-            req = self.new_update_request('networks',
-                                          data,
-                                          network['network']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertFalse(res['network']['shared'])
-            port1 = self.deserialize(self.fmt, res1)
-            self._delete('ports', port1['port']['id'])
-
-    def test_update_network_set_not_shared_other_tenant_returns_409(self):
-        with self.network(shared=True) as network:
-            res1 = self._create_port(self.fmt,
-                                     network['network']['id'],
-                                     webob.exc.HTTPCreated.code,
-                                     tenant_id='somebody_else',
-                                     set_context=True)
-            data = {'network': {'shared': False}}
-            req = self.new_update_request('networks',
-                                          data,
-                                          network['network']['id'])
-            self.assertEqual(req.get_response(self.api).status_int,
-                             webob.exc.HTTPConflict.code)
-            port1 = self.deserialize(self.fmt, res1)
-            self._delete('ports', port1['port']['id'])
-
-    def test_update_network_set_not_shared_multi_tenants_returns_409(self):
-        with self.network(shared=True) as network:
-            res1 = self._create_port(self.fmt,
-                                     network['network']['id'],
-                                     webob.exc.HTTPCreated.code,
-                                     tenant_id='somebody_else',
-                                     set_context=True)
-            res2 = self._create_port(self.fmt,
-                                     network['network']['id'],
-                                     webob.exc.HTTPCreated.code,
-                                     tenant_id=network['network']['tenant_id'],
-                                     set_context=True)
-            data = {'network': {'shared': False}}
-            req = self.new_update_request('networks',
-                                          data,
-                                          network['network']['id'])
-            self.assertEqual(req.get_response(self.api).status_int,
-                             webob.exc.HTTPConflict.code)
-            port1 = self.deserialize(self.fmt, res1)
-            port2 = self.deserialize(self.fmt, res2)
-            self._delete('ports', port1['port']['id'])
-            self._delete('ports', port2['port']['id'])
-
-    def test_update_network_set_not_shared_multi_tenants2_returns_409(self):
-        with self.network(shared=True) as network:
-            res1 = self._create_port(self.fmt,
-                                     network['network']['id'],
-                                     webob.exc.HTTPCreated.code,
-                                     tenant_id='somebody_else',
-                                     set_context=True)
-            self._create_subnet(self.fmt,
-                                network['network']['id'],
-                                '10.0.0.0/24',
-                                webob.exc.HTTPCreated.code,
-                                tenant_id=network['network']['tenant_id'],
-                                set_context=True)
-            data = {'network': {'shared': False}}
-            req = self.new_update_request('networks',
-                                          data,
-                                          network['network']['id'])
-            self.assertEqual(req.get_response(self.api).status_int,
-                             webob.exc.HTTPConflict.code)
-
-            port1 = self.deserialize(self.fmt, res1)
-            self._delete('ports', port1['port']['id'])
-
-    def test_create_networks_bulk_native(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk network create")
-        res = self._create_network_bulk(self.fmt, 2, 'test', True)
-        self._validate_behavior_on_bulk_success(res, 'networks')
-
-    def test_create_networks_bulk_native_quotas(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk network create")
-        quota = 4
-        cfg.CONF.set_override('quota_network', quota, group='QUOTAS')
-        res = self._create_network_bulk(self.fmt, quota + 1, 'test', True)
-        self._validate_behavior_on_bulk_failure(
-            res, 'networks',
-            errcode=webob.exc.HTTPConflict.code)
-
-    def test_create_networks_bulk_tenants_and_quotas(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk network create")
-        quota = 2
-        cfg.CONF.set_override('quota_network', quota, group='QUOTAS')
-        networks = [{'network': {'name': 'n1',
-                                 'tenant_id': self._tenant_id}},
-                    {'network': {'name': 'n2',
-                                 'tenant_id': self._tenant_id}},
-                    {'network': {'name': 'n1',
-                                 'tenant_id': 't1'}},
-                    {'network': {'name': 'n2',
-                                 'tenant_id': 't1'}}]
-
-        res = self._create_bulk_from_list(self.fmt, 'network', networks)
-        self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
-
-    def test_create_networks_bulk_tenants_and_quotas_fail(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk network create")
-        quota = 2
-        cfg.CONF.set_override('quota_network', quota, group='QUOTAS')
-        networks = [{'network': {'name': 'n1',
-                                 'tenant_id': self._tenant_id}},
-                    {'network': {'name': 'n2',
-                                 'tenant_id': self._tenant_id}},
-                    {'network': {'name': 'n1',
-                                 'tenant_id': 't1'}},
-                    {'network': {'name': 'n3',
-                                 'tenant_id': self._tenant_id}},
-                    {'network': {'name': 'n2',
-                                 'tenant_id': 't1'}}]
-
-        res = self._create_bulk_from_list(self.fmt, 'network', networks)
-        self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
-
-    def test_create_networks_bulk_emulated(self):
-        real_has_attr = hasattr
-
-        #ensures the API choose the emulation code path
-        def fakehasattr(item, attr):
-            if attr.endswith('__native_bulk_support'):
-                return False
-            return real_has_attr(item, attr)
-
-        with mock.patch('six.moves.builtins.hasattr',
-                        new=fakehasattr):
-            res = self._create_network_bulk(self.fmt, 2, 'test', True)
-            self._validate_behavior_on_bulk_success(res, 'networks')
-
-    def test_create_networks_bulk_wrong_input(self):
-        res = self._create_network_bulk(self.fmt, 2, 'test', True,
-                                        override={1:
-                                                  {'admin_state_up': 'doh'}})
-        self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-        req = self.new_list_request('networks')
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
-        nets = self.deserialize(self.fmt, res)
-        self.assertEqual(len(nets['networks']), 0)
-
-    def test_create_networks_bulk_emulated_plugin_failure(self):
-        real_has_attr = hasattr
-
-        def fakehasattr(item, attr):
-            if attr.endswith('__native_bulk_support'):
-                return False
-            return real_has_attr(item, attr)
-
-        orig = manager.NeutronManager.get_plugin().create_network
-        #ensures the API choose the emulation code path
-        with mock.patch('six.moves.builtins.hasattr',
-                        new=fakehasattr):
-            method_to_patch = _get_create_db_method('network')
-            with mock.patch.object(manager.NeutronManager.get_plugin(),
-                                   method_to_patch) as patched_plugin:
-
-                def side_effect(*args, **kwargs):
-                    return self._fail_second_call(patched_plugin, orig,
-                                                  *args, **kwargs)
-
-                patched_plugin.side_effect = side_effect
-                res = self._create_network_bulk(self.fmt, 2, 'test', True)
-                # We expect a 500 as we injected a fault in the plugin
-                self._validate_behavior_on_bulk_failure(
-                    res, 'networks', webob.exc.HTTPServerError.code
-                )
-
-    def test_create_networks_bulk_native_plugin_failure(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk network create")
-        orig = manager.NeutronManager.get_plugin().create_network
-        method_to_patch = _get_create_db_method('network')
-        with mock.patch.object(manager.NeutronManager.get_plugin(),
-                               method_to_patch) as patched_plugin:
-
-            def side_effect(*args, **kwargs):
-                return self._fail_second_call(patched_plugin, orig,
-                                              *args, **kwargs)
-
-            patched_plugin.side_effect = side_effect
-            res = self._create_network_bulk(self.fmt, 2, 'test', True)
-            # We expect a 500 as we injected a fault in the plugin
-            self._validate_behavior_on_bulk_failure(
-                res, 'networks', webob.exc.HTTPServerError.code
-            )
-
-    def test_list_networks(self):
-        with self.network() as v1, self.network() as v2, self.network() as v3:
-            networks = (v1, v2, v3)
-            self._test_list_resources('network', networks)
-
-    def test_list_networks_with_sort_native(self):
-        if self._skip_native_sorting:
-            self.skipTest("Skip test for not implemented sorting feature")
-        with self.network(admin_state_up=True, name='net1') as net1,\
-                self.network(admin_state_up=False, name='net2') as net2,\
-                self.network(admin_state_up=False, name='net3') as net3:
-            self._test_list_with_sort('network', (net3, net2, net1),
-                                      [('admin_state_up', 'asc'),
-                                       ('name', 'desc')])
-
-    def test_list_networks_with_sort_extended_attr_native_returns_400(self):
-        if self._skip_native_sorting:
-            self.skipTest("Skip test for not implemented sorting feature")
-        with self.network(admin_state_up=True, name='net1'),\
-                self.network(admin_state_up=False, name='net2'),\
-                self.network(admin_state_up=False, name='net3'):
-            req = self.new_list_request(
-                'networks',
-                params='sort_key=provider:segmentation_id&sort_dir=asc')
-            res = req.get_response(self.api)
-            self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
-
-    def test_list_networks_with_sort_remote_key_native_returns_400(self):
-        if self._skip_native_sorting:
-            self.skipTest("Skip test for not implemented sorting feature")
-        with self.network(admin_state_up=True, name='net1'),\
-                self.network(admin_state_up=False, name='net2'),\
-                self.network(admin_state_up=False, name='net3'):
-            req = self.new_list_request(
-                'networks', params='sort_key=subnets&sort_dir=asc')
-            res = req.get_response(self.api)
-            self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
-
-    def test_list_networks_with_sort_emulated(self):
-        helper_patcher = mock.patch(
-            'neutron.api.v2.base.Controller._get_sorting_helper',
-            new=_fake_get_sorting_helper)
-        helper_patcher.start()
-        with self.network(admin_state_up=True, name='net1') as net1,\
-                self.network(admin_state_up=False, name='net2') as net2,\
-                self.network(admin_state_up=False, name='net3') as net3:
-            self._test_list_with_sort('network', (net3, net2, net1),
-                                      [('admin_state_up', 'asc'),
-                                       ('name', 'desc')])
-
-    def test_list_networks_with_pagination_native(self):
-        if self._skip_native_pagination:
-            self.skipTest("Skip test for not implemented pagination feature")
-        with self.network(name='net1') as net1,\
-                self.network(name='net2') as net2,\
-                self.network(name='net3') as net3:
-            self._test_list_with_pagination('network',
-                                            (net1, net2, net3),
-                                            ('name', 'asc'), 2, 2)
-
-    def test_list_networks_with_pagination_emulated(self):
-        helper_patcher = mock.patch(
-            'neutron.api.v2.base.Controller._get_pagination_helper',
-            new=_fake_get_pagination_helper)
-        helper_patcher.start()
-        with self.network(name='net1') as net1,\
-                self.network(name='net2') as net2,\
-                self.network(name='net3') as net3:
-            self._test_list_with_pagination('network',
-                                            (net1, net2, net3),
-                                            ('name', 'asc'), 2, 2)
-
-    def test_list_networks_without_pk_in_fields_pagination_emulated(self):
-        helper_patcher = mock.patch(
-            'neutron.api.v2.base.Controller._get_pagination_helper',
-            new=_fake_get_pagination_helper)
-        helper_patcher.start()
-        with self.network(name='net1', shared=True) as net1,\
-                self.network(name='net2', shared=False) as net2,\
-                self.network(name='net3', shared=True) as net3:
-            self._test_list_with_pagination('network',
-                                            (net1, net2, net3),
-                                            ('name', 'asc'), 2, 2,
-                                            query_params="fields=name",
-                                            verify_key='name')
-
-    def test_list_networks_without_pk_in_fields_pagination_native(self):
-        if self._skip_native_pagination:
-            self.skipTest("Skip test for not implemented pagination feature")
-        with self.network(name='net1') as net1,\
-                self.network(name='net2') as net2,\
-                self.network(name='net3') as net3:
-            self._test_list_with_pagination('network',
-                                            (net1, net2, net3),
-                                            ('name', 'asc'), 2, 2,
-                                            query_params="fields=shared",
-                                            verify_key='shared')
-
-    def test_list_networks_with_pagination_reverse_native(self):
-        if self._skip_native_pagination:
-            self.skipTest("Skip test for not implemented pagination feature")
-        with self.network(name='net1') as net1,\
-                self.network(name='net2') as net2,\
-                self.network(name='net3') as net3:
-            self._test_list_with_pagination_reverse('network',
-                                                    (net1, net2, net3),
-                                                    ('name', 'asc'), 2, 2)
-
-    def test_list_networks_with_pagination_reverse_emulated(self):
-        helper_patcher = mock.patch(
-            'neutron.api.v2.base.Controller._get_pagination_helper',
-            new=_fake_get_pagination_helper)
-        helper_patcher.start()
-        with self.network(name='net1') as net1,\
-                self.network(name='net2') as net2,\
-                self.network(name='net3') as net3:
-            self._test_list_with_pagination_reverse('network',
-                                                    (net1, net2, net3),
-                                                    ('name', 'asc'), 2, 2)
-
-    def test_list_networks_with_parameters(self):
-        with self.network(name='net1', admin_state_up=False) as net1,\
-                self.network(name='net2') as net2:
-            query_params = 'admin_state_up=False'
-            self._test_list_resources('network', [net1],
-                                      query_params=query_params)
-            query_params = 'admin_state_up=True'
-            self._test_list_resources('network', [net2],
-                                      query_params=query_params)
-
-    def test_list_networks_with_fields(self):
-        with self.network(name='net1') as net1:
-            req = self.new_list_request('networks',
-                                        params='fields=name')
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(1, len(res['networks']))
-            self.assertEqual(res['networks'][0]['name'],
-                             net1['network']['name'])
-            self.assertIsNone(res['networks'][0].get('id'))
-
-    def test_list_networks_with_parameters_invalid_values(self):
-        with self.network(name='net1', admin_state_up=False),\
-                self.network(name='net2'):
-            req = self.new_list_request('networks',
-                                        params='admin_state_up=fake')
-            res = req.get_response(self.api)
-            self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
-
-    def test_list_shared_networks_with_non_admin_user(self):
-        with self.network(shared=False,
-                          name='net1',
-                          tenant_id='tenant1') as net1,\
-                self.network(shared=True,
-                             name='net2',
-                             tenant_id='another_tenant') as net2,\
-                self.network(shared=False,
-                             name='net3',
-                             tenant_id='another_tenant'):
-            ctx = context.Context(user_id='non_admin',
-                                  tenant_id='tenant1',
-                                  is_admin=False)
-            self._test_list_resources('network', (net1, net2), ctx)
-
-    def test_show_network(self):
-        with self.network(name='net1') as net:
-            req = self.new_show_request('networks', net['network']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(res['network']['name'],
-                             net['network']['name'])
-
-    def test_show_network_with_subnet(self):
-        with self.network(name='net1') as net:
-            with self.subnet(net) as subnet:
-                req = self.new_show_request('networks', net['network']['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                self.assertEqual(res['network']['subnets'][0],
-                                 subnet['subnet']['id'])
-
-    def test_invalid_admin_status(self):
-        value = [[7, False, webob.exc.HTTPClientError.code],
-                 [True, True, webob.exc.HTTPCreated.code],
-                 ["True", True, webob.exc.HTTPCreated.code],
-                 ["true", True, webob.exc.HTTPCreated.code],
-                 [1, True, webob.exc.HTTPCreated.code],
-                 ["False", False, webob.exc.HTTPCreated.code],
-                 [False, False, webob.exc.HTTPCreated.code],
-                 ["false", False, webob.exc.HTTPCreated.code],
-                 ["7", False, webob.exc.HTTPClientError.code]]
-        for v in value:
-            data = {'network': {'name': 'net',
-                                'admin_state_up': v[0],
-                                'tenant_id': self._tenant_id}}
-            network_req = self.new_create_request('networks', data)
-            req = network_req.get_response(self.api)
-            self.assertEqual(req.status_int, v[2])
-            if v[2] == webob.exc.HTTPCreated.code:
-                res = self.deserialize(self.fmt, req)
-                self.assertEqual(res['network']['admin_state_up'], v[1])
-
-
-class TestSubnetsV2(NeutronDbPluginV2TestCase):
-
-    def _test_create_subnet(self, network=None, expected=None, **kwargs):
-        keys = kwargs.copy()
-        keys.setdefault('cidr', '10.0.0.0/24')
-        keys.setdefault('ip_version', 4)
-        keys.setdefault('enable_dhcp', True)
-        with self.subnet(network=network, **keys) as subnet:
-            # verify the response has each key with the correct value
-            self._validate_resource(subnet, keys, 'subnet')
-            # verify the configured validations are correct
-            if expected:
-                self._compare_resource(subnet, expected, 'subnet')
-        self._delete('subnets', subnet['subnet']['id'])
-        return subnet
-
-    def test_create_subnet(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        subnet = self._test_create_subnet(gateway_ip=gateway_ip,
-                                          cidr=cidr)
-        self.assertEqual(4, subnet['subnet']['ip_version'])
-        self.assertIn('name', subnet['subnet'])
-
-    def test_create_subnet_with_network_different_tenant(self):
-        with self.network(shared=False, tenant_id='tenant1') as network:
-            ctx = context.Context(user_id='non_admin',
-                                  tenant_id='tenant2',
-                                  is_admin=False)
-            data = {'subnet': {'network_id': network['network']['id'],
-                    'cidr': '10.0.2.0/24',
-                    'ip_version': '4',
-                    'gateway_ip': '10.0.2.1'}}
-            req = self.new_create_request('subnets', data,
-                                          self.fmt, context=ctx)
-            res = req.get_response(self.api)
-            self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
-
-    def test_create_two_subnets(self):
-        gateway_ips = ['10.0.0.1', '10.0.1.1']
-        cidrs = ['10.0.0.0/24', '10.0.1.0/24']
-        with self.network() as network:
-            with self.subnet(network=network,
-                             gateway_ip=gateway_ips[0],
-                             cidr=cidrs[0]):
-                with self.subnet(network=network,
-                                 gateway_ip=gateway_ips[1],
-                                 cidr=cidrs[1]):
-                    net_req = self.new_show_request('networks',
-                                                    network['network']['id'])
-                    raw_res = net_req.get_response(self.api)
-                    net_res = self.deserialize(self.fmt, raw_res)
-                    for subnet_id in net_res['network']['subnets']:
-                        sub_req = self.new_show_request('subnets', subnet_id)
-                        raw_res = sub_req.get_response(self.api)
-                        sub_res = self.deserialize(self.fmt, raw_res)
-                        self.assertIn(sub_res['subnet']['cidr'], cidrs)
-                        self.assertIn(sub_res['subnet']['gateway_ip'],
-                                      gateway_ips)
-
-    def test_create_two_subnets_same_cidr_returns_400(self):
-        gateway_ip_1 = '10.0.0.1'
-        cidr_1 = '10.0.0.0/24'
-        gateway_ip_2 = '10.0.0.10'
-        cidr_2 = '10.0.0.0/24'
-        with self.network() as network:
-            with self.subnet(network=network,
-                             gateway_ip=gateway_ip_1,
-                             cidr=cidr_1):
-                with testlib_api.ExpectedException(
-                        webob.exc.HTTPClientError) as ctx_manager:
-                    with self.subnet(network=network,
-                                     gateway_ip=gateway_ip_2,
-                                     cidr=cidr_2):
-                        pass
-                self.assertEqual(ctx_manager.exception.code,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_bad_V4_cidr(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                    'cidr': '10.0.2.0',
-                    'ip_version': '4',
-                    'tenant_id': network['network']['tenant_id'],
-                    'gateway_ip': '10.0.2.1'}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_no_ip_version(self):
-        with self.network() as network:
-            cfg.CONF.set_override('default_ipv4_subnet_pool', None)
-            cfg.CONF.set_override('default_ipv6_subnet_pool', None)
-            data = {'subnet': {'network_id': network['network']['id'],
-                    'tenant_id': network['network']['tenant_id']}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_only_ip_version_v6_no_pool(self):
-        with self.network() as network:
-            tenant_id = network['network']['tenant_id']
-            cfg.CONF.set_override('ipv6_pd_enabled', False)
-            cfg.CONF.set_override('default_ipv6_subnet_pool', None)
-            data = {'subnet': {'network_id': network['network']['id'],
-                    'ip_version': '6',
-                    'tenant_id': tenant_id}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_only_ip_version_v4(self):
-        with self.network() as network:
-            tenant_id = network['network']['tenant_id']
-            subnetpool_prefix = '10.0.0.0/8'
-            with self.subnetpool(prefixes=[subnetpool_prefix],
-                                 admin=True,
-                                 name="My subnet pool",
-                                 tenant_id=tenant_id,
-                                 min_prefixlen='25',
-                                 is_default=True) as subnetpool:
-                subnetpool_id = subnetpool['subnetpool']['id']
-                data = {'subnet': {'network_id': network['network']['id'],
-                        'ip_version': '4',
-                        'prefixlen': '27',
-                        'tenant_id': tenant_id}}
-                subnet_req = self.new_create_request('subnets', data)
-                res = subnet_req.get_response(self.api)
-                subnet = self.deserialize(self.fmt, res)['subnet']
-                ip_net = netaddr.IPNetwork(subnet['cidr'])
-                self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix))
-                self.assertEqual(27, ip_net.prefixlen)
-                self.assertEqual(subnetpool_id, subnet['subnetpool_id'])
-
-    def test_create_subnet_only_ip_version_v4_old(self):
-        # TODO(john-davidge): Remove after Mitaka release.
-        with self.network() as network:
-            tenant_id = network['network']['tenant_id']
-            subnetpool_prefix = '10.0.0.0/8'
-            with self.subnetpool(prefixes=[subnetpool_prefix],
-                                 admin=False,
-                                 name="My subnet pool",
-                                 tenant_id=tenant_id,
-                                 min_prefixlen='25') as subnetpool:
-                subnetpool_id = subnetpool['subnetpool']['id']
-                cfg.CONF.set_override('default_ipv4_subnet_pool',
-                                      subnetpool_id)
-                data = {'subnet': {'network_id': network['network']['id'],
-                        'ip_version': '4',
-                        'prefixlen': '27',
-                        'tenant_id': tenant_id}}
-                subnet_req = self.new_create_request('subnets', data)
-                res = subnet_req.get_response(self.api)
-                subnet = self.deserialize(self.fmt, res)['subnet']
-                ip_net = netaddr.IPNetwork(subnet['cidr'])
-                self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix))
-                self.assertEqual(27, ip_net.prefixlen)
-                self.assertEqual(subnetpool_id, subnet['subnetpool_id'])
-
-    def test_create_subnet_only_ip_version_v6(self):
-        with self.network() as network:
-            tenant_id = network['network']['tenant_id']
-            subnetpool_prefix = '2000::/56'
-            with self.subnetpool(prefixes=[subnetpool_prefix],
-                                 admin=True,
-                                 name="My ipv6 subnet pool",
-                                 tenant_id=tenant_id,
-                                 min_prefixlen='64',
-                                 is_default=True) as subnetpool:
-                subnetpool_id = subnetpool['subnetpool']['id']
-                cfg.CONF.set_override('ipv6_pd_enabled', False)
-                data = {'subnet': {'network_id': network['network']['id'],
-                        'ip_version': '6',
-                        'tenant_id': tenant_id}}
-                subnet_req = self.new_create_request('subnets', data)
-                res = subnet_req.get_response(self.api)
-                subnet = self.deserialize(self.fmt, res)['subnet']
-                self.assertEqual(subnetpool_id, subnet['subnetpool_id'])
-                ip_net = netaddr.IPNetwork(subnet['cidr'])
-                self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix))
-                self.assertEqual(64, ip_net.prefixlen)
-
-    def test_create_subnet_only_ip_version_v6_old(self):
-        # TODO(john-davidge): Remove after Mitaka release.
-        with self.network() as network:
-            tenant_id = network['network']['tenant_id']
-            subnetpool_prefix = '2000::/56'
-            with self.subnetpool(prefixes=[subnetpool_prefix],
-                                 admin=False,
-                                 name="My ipv6 subnet pool",
-                                 tenant_id=tenant_id,
-                                 min_prefixlen='64') as subnetpool:
-                subnetpool_id = subnetpool['subnetpool']['id']
-                cfg.CONF.set_override('default_ipv6_subnet_pool',
-                                      subnetpool_id)
-                cfg.CONF.set_override('ipv6_pd_enabled', False)
-                data = {'subnet': {'network_id': network['network']['id'],
-                        'ip_version': '6',
-                        'tenant_id': tenant_id}}
-                subnet_req = self.new_create_request('subnets', data)
-                res = subnet_req.get_response(self.api)
-                subnet = self.deserialize(self.fmt, res)['subnet']
-                self.assertEqual(subnetpool_id, subnet['subnetpool_id'])
-                ip_net = netaddr.IPNetwork(subnet['cidr'])
-                self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix))
-                self.assertEqual(64, ip_net.prefixlen)
-
-    def test_create_subnet_bad_V4_cidr_prefix_len(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                    'cidr': constants.IPv4_ANY,
-                    'ip_version': '4',
-                    'tenant_id': network['network']['tenant_id'],
-                    'gateway_ip': '0.0.0.1'}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_bad_V6_cidr(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                    'cidr': 'fe80::',
-                    'ip_version': '6',
-                    'tenant_id': network['network']['tenant_id'],
-                    'gateway_ip': 'fe80::1'}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_V6_slaac_big_prefix(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                    'cidr': '2014::/65',
-                    'ip_version': '6',
-                    'tenant_id': network['network']['tenant_id'],
-                    'gateway_ip': 'fe80::1',
-                    'ipv6_address_mode': 'slaac'}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
-
-    def test_create_2_subnets_overlapping_cidr_allowed_returns_200(self):
-        cidr_1 = '10.0.0.0/23'
-        cidr_2 = '10.0.0.0/24'
-        cfg.CONF.set_override('allow_overlapping_ips', True)
-
-        with self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2):
-            pass
-
-    def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self):
-        cidr_1 = '10.0.0.0/23'
-        cidr_2 = '10.0.0.0/24'
-        cfg.CONF.set_override('allow_overlapping_ips', False)
-        with testlib_api.ExpectedException(
-                webob.exc.HTTPClientError) as ctx_manager:
-            with self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2):
-                pass
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPClientError.code)
-
-    def test_create_subnets_bulk_native(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk subnet create")
-        with self.network() as net:
-            res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'],
-                                           'test')
-            self._validate_behavior_on_bulk_success(res, 'subnets')
-
-    def test_create_subnets_bulk_emulated(self):
-        real_has_attr = hasattr
-
-        #ensures the API choose the emulation code path
-        def fakehasattr(item, attr):
-            if attr.endswith('__native_bulk_support'):
-                return False
-            return real_has_attr(item, attr)
-
-        with mock.patch('six.moves.builtins.hasattr',
-                        new=fakehasattr):
-            with self.network() as net:
-                res = self._create_subnet_bulk(self.fmt, 2,
-                                               net['network']['id'],
-                                               'test')
-                self._validate_behavior_on_bulk_success(res, 'subnets')
-
-    def test_create_subnets_bulk_emulated_plugin_failure(self):
-        real_has_attr = hasattr
-
-        #ensures the API choose the emulation code path
-        def fakehasattr(item, attr):
-            if attr.endswith('__native_bulk_support'):
-                return False
-            return real_has_attr(item, attr)
-
-        with mock.patch('six.moves.builtins.hasattr',
-                        new=fakehasattr):
-            orig = manager.NeutronManager.get_plugin().create_subnet
-            method_to_patch = _get_create_db_method('subnet')
-            with mock.patch.object(manager.NeutronManager.get_plugin(),
-                                   method_to_patch) as patched_plugin:
-
-                def side_effect(*args, **kwargs):
-                    self._fail_second_call(patched_plugin, orig,
-                                           *args, **kwargs)
-
-                patched_plugin.side_effect = side_effect
-                with self.network() as net:
-                    res = self._create_subnet_bulk(self.fmt, 2,
-                                                   net['network']['id'],
-                                                   'test')
-                self._delete('networks', net['network']['id'])
-                # We expect a 500 as we injected a fault in the plugin
-                self._validate_behavior_on_bulk_failure(
-                    res, 'subnets', webob.exc.HTTPServerError.code
-                )
-
-    def test_create_subnets_bulk_native_plugin_failure(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk subnet create")
-        plugin = manager.NeutronManager.get_plugin()
-        orig = plugin.create_subnet
-        method_to_patch = _get_create_db_method('subnet')
-        with mock.patch.object(plugin, method_to_patch) as patched_plugin:
-            def side_effect(*args, **kwargs):
-                return self._fail_second_call(patched_plugin, orig,
-                                              *args, **kwargs)
-
-            patched_plugin.side_effect = side_effect
-            with self.network() as net:
-                res = self._create_subnet_bulk(self.fmt, 2,
-                                               net['network']['id'],
-                                               'test')
-
-                # We expect a 500 as we injected a fault in the plugin
-                self._validate_behavior_on_bulk_failure(
-                    res, 'subnets', webob.exc.HTTPServerError.code
-                )
-
-    def test_delete_subnet(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        # Create new network
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        subnet = self._make_subnet(self.fmt, network, gateway_ip,
-                                   cidr, ip_version=4)
-        req = self.new_delete_request('subnets', subnet['subnet']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-
-    def test_delete_subnet_port_exists_owned_by_network(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        # Create new network
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        subnet = self._make_subnet(self.fmt, network, gateway_ip,
-                                   cidr, ip_version=4)
-        self._create_port(self.fmt,
-                          network['network']['id'],
-                          device_owner=constants.DEVICE_OWNER_DHCP)
-        req = self.new_delete_request('subnets', subnet['subnet']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-
-    def test_delete_subnet_dhcp_port_associated_with_other_subnets(self):
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        subnet1 = self._make_subnet(self.fmt, network, '10.0.0.1',
-                                    '10.0.0.0/24', ip_version=4)
-        subnet2 = self._make_subnet(self.fmt, network, '10.0.1.1',
-                                    '10.0.1.0/24', ip_version=4)
-        res = self._create_port(self.fmt,
-                                network['network']['id'],
-                                device_owner=constants.DEVICE_OWNER_DHCP,
-                                fixed_ips=[
-                                    {'subnet_id': subnet1['subnet']['id']},
-                                    {'subnet_id': subnet2['subnet']['id']}
-                                ])
-        port = self.deserialize(self.fmt, res)
-        expected_subnets = [subnet1['subnet']['id'], subnet2['subnet']['id']]
-        self.assertEqual(expected_subnets,
-                         [s['subnet_id'] for s in port['port']['fixed_ips']])
-        req = self.new_delete_request('subnets', subnet1['subnet']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 204)
-        port = self._show('ports', port['port']['id'])
-
-        expected_subnets = [subnet2['subnet']['id']]
-        self.assertEqual(expected_subnets,
-                         [s['subnet_id'] for s in port['port']['fixed_ips']])
-        req = self.new_delete_request('subnets', subnet2['subnet']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 204)
-        port = self._show('ports', port['port']['id'])
-        self.assertFalse(port['port']['fixed_ips'])
-
-    def test_delete_subnet_port_exists_owned_by_other(self):
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet):
-                id = subnet['subnet']['id']
-                req = self.new_delete_request('subnets', id)
-                res = req.get_response(self.api)
-                data = self.deserialize(self.fmt, res)
-                self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
-                msg = str(n_exc.SubnetInUse(subnet_id=id))
-                self.assertEqual(data['NeutronError']['message'], msg)
-
-    def test_delete_subnet_with_other_subnet_on_network_still_in_use(self):
-        with self.network() as network:
-            with self.subnet(network=network) as subnet1,\
-                    self.subnet(network=network,
-                                cidr='10.0.1.0/24') as subnet2:
-                subnet1_id = subnet1['subnet']['id']
-                subnet2_id = subnet2['subnet']['id']
-                with self.port(
-                    subnet=subnet1,
-                    fixed_ips=[{'subnet_id': subnet1_id}]):
-                    req = self.new_delete_request('subnets', subnet2_id)
-                    res = req.get_response(self.api)
-                    self.assertEqual(res.status_int,
-                                     webob.exc.HTTPNoContent.code)
-
-    def _create_slaac_subnet_and_port(self, port_owner=None):
-        # Create an IPv6 SLAAC subnet and a port using that subnet
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        subnet = self._make_subnet(self.fmt, network, gateway='fe80::1',
-                                   cidr='fe80::/64', ip_version=6,
-                                   ipv6_ra_mode=constants.IPV6_SLAAC,
-                                   ipv6_address_mode=constants.IPV6_SLAAC)
-        kwargs = {}
-        if port_owner:
-            kwargs['device_owner'] = port_owner
-            if port_owner in constants.ROUTER_INTERFACE_OWNERS:
-                kwargs['fixed_ips'] = [{'ip_address': 'fe80::1'}]
-        res = self._create_port(self.fmt, net_id=network['network']['id'],
-                                **kwargs)
-
-        port = self.deserialize(self.fmt, res)
-        self.assertEqual(1, len(port['port']['fixed_ips']))
-
-        # The port should have an address from the subnet
-        req = self.new_show_request('ports', port['port']['id'], self.fmt)
-        res = req.get_response(self.api)
-        sport = self.deserialize(self.fmt, req.get_response(self.api))
-        self.assertEqual(1, len(sport['port']['fixed_ips']))
-
-        return subnet, port
-
-    def test_delete_subnet_ipv6_slaac_port_exists(self):
-        """Test IPv6 SLAAC subnet delete when a port is still using subnet."""
-        subnet, port = self._create_slaac_subnet_and_port()
-        # Delete the subnet
-        req = self.new_delete_request('subnets', subnet['subnet']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
-        # The port should no longer have an address from the deleted subnet
-        req = self.new_show_request('ports', port['port']['id'], self.fmt)
-        res = req.get_response(self.api)
-        sport = self.deserialize(self.fmt, req.get_response(self.api))
-        self.assertEqual(0, len(sport['port']['fixed_ips']))
-
-    def test_delete_subnet_ipv6_slaac_router_port_exists(self):
-        """Test IPv6 SLAAC subnet delete with a router port using the subnet"""
-        subnet, port = self._create_slaac_subnet_and_port(
-                constants.DEVICE_OWNER_ROUTER_INTF)
-        # Delete the subnet and assert that we get a HTTP 409 error
-        req = self.new_delete_request('subnets', subnet['subnet']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-        # The subnet should still exist and the port should still have an
-        # address from the subnet
-        req = self.new_show_request('subnets', subnet['subnet']['id'],
-                                    self.fmt)
-        res = req.get_response(self.api)
-        ssubnet = self.deserialize(self.fmt, req.get_response(self.api))
-        self.assertIsNotNone(ssubnet)
-        req = self.new_show_request('ports', port['port']['id'], self.fmt)
-        res = req.get_response(self.api)
-        sport = self.deserialize(self.fmt, req.get_response(self.api))
-        self.assertEqual(1, len(sport['port']['fixed_ips']))
-        port_subnet_ids = [fip['subnet_id'] for fip in
-                           sport['port']['fixed_ips']]
-        self.assertIn(subnet['subnet']['id'], port_subnet_ids)
-
-    def test_delete_network(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        # Create new network
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr,
-                                   ip_version=4)
-        req = self.new_delete_request('networks', network['network']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-        req = self.new_show_request('subnets', subnet['subnet']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
-
-    def test_create_subnet_bad_tenant(self):
-        with self.network() as network:
-            self._create_subnet(self.fmt,
-                                network['network']['id'],
-                                '10.0.2.0/24',
-                                webob.exc.HTTPNotFound.code,
-                                ip_version=4,
-                                tenant_id='bad_tenant_id',
-                                gateway_ip='10.0.2.1',
-                                device_owner='fake_owner',
-                                set_context=True)
-
-    def test_create_subnet_as_admin(self):
-        with self.network() as network:
-            self._create_subnet(self.fmt,
-                                network['network']['id'],
-                                '10.0.2.0/24',
-                                webob.exc.HTTPCreated.code,
-                                ip_version=4,
-                                tenant_id='bad_tenant_id',
-                                gateway_ip='10.0.2.1',
-                                device_owner='fake_owner',
-                                set_context=False)
-
-    def test_create_subnet_nonzero_cidr(self):
-        # Pass None as gateway_ip to prevent ip auto allocation for gw
-        # Previously gateway ip was allocated after validations,
-        # so no errors were raised if gw ip was out of range.
-        with self.subnet(cidr='10.129.122.5/8') as v1,\
-                self.subnet(cidr='11.129.122.5/15') as v2,\
-                self.subnet(cidr='12.129.122.5/16') as v3,\
-                self.subnet(cidr='13.129.122.5/18') as v4,\
-                self.subnet(cidr='14.129.122.5/22') as v5,\
-                self.subnet(cidr='15.129.122.5/24') as v6,\
-                self.subnet(cidr='16.129.122.5/28') as v7,\
-                self.subnet(cidr='17.129.122.5/32', gateway_ip=None,
-                            enable_dhcp=False) as v8:
-            subs = (v1, v2, v3, v4, v5, v6, v7, v8)
-            # the API should accept and correct these for users
-            self.assertEqual(subs[0]['subnet']['cidr'], '10.0.0.0/8')
-            self.assertEqual(subs[1]['subnet']['cidr'], '11.128.0.0/15')
-            self.assertEqual(subs[2]['subnet']['cidr'], '12.129.0.0/16')
-            self.assertEqual(subs[3]['subnet']['cidr'], '13.129.64.0/18')
-            self.assertEqual(subs[4]['subnet']['cidr'], '14.129.120.0/22')
-            self.assertEqual(subs[5]['subnet']['cidr'], '15.129.122.0/24')
-            self.assertEqual(subs[6]['subnet']['cidr'], '16.129.122.0/28')
-            self.assertEqual(subs[7]['subnet']['cidr'], '17.129.122.5/32')
-
-    def _test_create_subnet_with_invalid_netmask_returns_400(self, *args):
-        with self.network() as network:
-            for cidr in args:
-                ip_version = netaddr.IPNetwork(cidr).version
-                self._create_subnet(self.fmt,
-                                    network['network']['id'],
-                                    cidr,
-                                    webob.exc.HTTPClientError.code,
-                                    ip_version=ip_version)
-
-    def test_create_subnet_with_invalid_netmask_returns_400_ipv4(self):
-        self._test_create_subnet_with_invalid_netmask_returns_400(
-                '10.0.0.0/31', '10.0.0.0/32')
-
-    def test_create_subnet_with_invalid_netmask_returns_400_ipv6(self):
-        self._test_create_subnet_with_invalid_netmask_returns_400(
-                'cafe:cafe::/127', 'cafe:cafe::/128')
-
-    def test_create_subnet_bad_ip_version(self):
-        with self.network() as network:
-            # Check bad IP version
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': 'abc',
-                               'tenant_id': network['network']['tenant_id'],
-                               'gateway_ip': '10.0.2.1'}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_bad_ip_version_null(self):
-        with self.network() as network:
-            # Check bad IP version
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': None,
-                               'tenant_id': network['network']['tenant_id'],
-                               'gateway_ip': '10.0.2.1'}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_bad_uuid(self):
-        with self.network() as network:
-            # Check invalid UUID
-            data = {'subnet': {'network_id': None,
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id'],
-                               'gateway_ip': '10.0.2.1'}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_bad_boolean(self):
-        with self.network() as network:
-            # Check invalid boolean
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': '4',
-                               'enable_dhcp': None,
-                               'tenant_id': network['network']['tenant_id'],
-                               'gateway_ip': '10.0.2.1'}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_bad_pools(self):
-        with self.network() as network:
-            # Check allocation pools
-            allocation_pools = [[{'end': '10.0.0.254'}],
-                                [{'start': '10.0.0.254'}],
-                                [{'start': '1000.0.0.254'}],
-                                [{'start': '10.0.0.2', 'end': '10.0.0.254'},
-                                 {'end': '10.0.0.254'}],
-                                None,
-                                [{'start': '10.0.0.200', 'end': '10.0.3.20'}],
-                                [{'start': '10.0.2.250', 'end': '10.0.3.5'}],
-                                [{'start': '10.0.2.10', 'end': '10.0.2.5'}],
-                                [{'start': '10.0.0.2', 'end': '10.0.0.3'},
-                                 {'start': '10.0.0.2', 'end': '10.0.0.3'}]]
-            tenant_id = network['network']['tenant_id']
-            for pool in allocation_pools:
-                data = {'subnet': {'network_id': network['network']['id'],
-                                   'cidr': '10.0.2.0/24',
-                                   'ip_version': '4',
-                                   'tenant_id': tenant_id,
-                                   'gateway_ip': '10.0.2.1',
-                                   'allocation_pools': pool}}
-                subnet_req = self.new_create_request('subnets', data)
-                res = subnet_req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_bad_nameserver(self):
-        with self.network() as network:
-            # Check nameservers
-            nameserver_pools = [['1100.0.0.2'],
-                                ['1.1.1.2', '1.1000.1.3'],
-                                ['1.1.1.2', '1.1.1.2']]
-            tenant_id = network['network']['tenant_id']
-            for nameservers in nameserver_pools:
-                data = {'subnet': {'network_id': network['network']['id'],
-                                   'cidr': '10.0.2.0/24',
-                                   'ip_version': '4',
-                                   'tenant_id': tenant_id,
-                                   'gateway_ip': '10.0.2.1',
-                                   'dns_nameservers': nameservers}}
-                subnet_req = self.new_create_request('subnets', data)
-                res = subnet_req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_bad_hostroutes(self):
-        with self.network() as network:
-            # Check hostroutes
-            hostroute_pools = [[{'destination': '100.0.0.0/24'}],
-                               [{'nexthop': '10.0.2.20'}],
-                               [{'nexthop': '10.0.2.20',
-                                 'destination': '100.0.0.0/8'},
-                                {'nexthop': '10.0.2.20',
-                                 'destination': '100.0.0.0/8'}]]
-            tenant_id = network['network']['tenant_id']
-            for hostroutes in hostroute_pools:
-                data = {'subnet': {'network_id': network['network']['id'],
-                                   'cidr': '10.0.2.0/24',
-                                   'ip_version': '4',
-                                   'tenant_id': tenant_id,
-                                   'gateway_ip': '10.0.2.1',
-                                   'host_routes': hostroutes}}
-                subnet_req = self.new_create_request('subnets', data)
-                res = subnet_req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_defaults(self):
-        gateway = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.254'}]
-        enable_dhcp = True
-        subnet = self._test_create_subnet()
-        # verify cidr & gw have been correctly generated
-        self.assertEqual(subnet['subnet']['cidr'], cidr)
-        self.assertEqual(subnet['subnet']['gateway_ip'], gateway)
-        self.assertEqual(subnet['subnet']['enable_dhcp'], enable_dhcp)
-        self.assertEqual(subnet['subnet']['allocation_pools'],
-                         allocation_pools)
-
-    def test_create_subnet_gw_values(self):
-        cidr = '10.0.0.0/24'
-        # Gateway is last IP in range
-        gateway = '10.0.0.254'
-        allocation_pools = [{'start': '10.0.0.1',
-                             'end': '10.0.0.253'}]
-        expected = {'gateway_ip': gateway,
-                    'cidr': cidr,
-                    'allocation_pools': allocation_pools}
-        self._test_create_subnet(expected=expected, gateway_ip=gateway)
-        # Gateway is first in subnet
-        gateway = '10.0.0.1'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.254'}]
-        expected = {'gateway_ip': gateway,
-                    'cidr': cidr,
-                    'allocation_pools': allocation_pools}
-        self._test_create_subnet(expected=expected,
-                                 gateway_ip=gateway)
-
-    def test_create_subnet_ipv6_gw_values(self):
-        cidr = '2001::/64'
-        # Gateway is last IP in IPv6 DHCPv6 stateful subnet
-        gateway = '2001::ffff:ffff:ffff:ffff'
-        allocation_pools = [{'start': '2001::1',
-                             'end': '2001::ffff:ffff:ffff:fffe'}]
-        expected = {'gateway_ip': gateway,
-                    'cidr': cidr,
-                    'allocation_pools': allocation_pools}
-        self._test_create_subnet(expected=expected, gateway_ip=gateway,
-                                 cidr=cidr, ip_version=6,
-                                 ipv6_ra_mode=constants.DHCPV6_STATEFUL,
-                                 ipv6_address_mode=constants.DHCPV6_STATEFUL)
-        # Gateway is first IP in IPv6 DHCPv6 stateful subnet
-        gateway = '2001::1'
-        allocation_pools = [{'start': '2001::2',
-                             'end': '2001::ffff:ffff:ffff:ffff'}]
-        expected = {'gateway_ip': gateway,
-                    'cidr': cidr,
-                    'allocation_pools': allocation_pools}
-        self._test_create_subnet(expected=expected, gateway_ip=gateway,
-                                 cidr=cidr, ip_version=6,
-                                 ipv6_ra_mode=constants.DHCPV6_STATEFUL,
-                                 ipv6_address_mode=constants.DHCPV6_STATEFUL)
-        # If gateway_ip is not specified, allocate first IP from the subnet
-        expected = {'gateway_ip': gateway,
-                    'cidr': cidr}
-        self._test_create_subnet(expected=expected,
-                                 cidr=cidr, ip_version=6,
-                                 ipv6_ra_mode=constants.IPV6_SLAAC,
-                                 ipv6_address_mode=constants.IPV6_SLAAC)
-
-    @testtools.skipIf(tools.is_bsd(), 'bug/1484837')
-    def test_create_subnet_ipv6_pd_gw_values(self):
-        cidr = constants.PROVISIONAL_IPV6_PD_PREFIX
-        # Gateway is last IP in IPv6 DHCPv6 Stateless subnet
-        gateway = '::ffff:ffff:ffff:ffff'
-        allocation_pools = [{'start': '::1',
-                             'end': '::ffff:ffff:ffff:fffe'}]
-        expected = {'gateway_ip': gateway,
-                    'cidr': cidr,
-                    'allocation_pools': allocation_pools}
-        self._test_create_subnet(expected=expected, gateway_ip=gateway,
-                                 cidr=cidr, ip_version=6,
-                                 ipv6_ra_mode=constants.DHCPV6_STATELESS,
-                                 ipv6_address_mode=constants.DHCPV6_STATELESS)
-        # Gateway is first IP in IPv6 DHCPv6 Stateless subnet
-        gateway = '::1'
-        allocation_pools = [{'start': '::2',
-                             'end': '::ffff:ffff:ffff:ffff'}]
-        expected = {'gateway_ip': gateway,
-                    'cidr': cidr,
-                    'allocation_pools': allocation_pools}
-        self._test_create_subnet(expected=expected, gateway_ip=gateway,
-                                 cidr=cidr, ip_version=6,
-                                 ipv6_ra_mode=constants.DHCPV6_STATELESS,
-                                 ipv6_address_mode=constants.DHCPV6_STATELESS)
-        # If gateway_ip is not specified, allocate first IP from the subnet
-        expected = {'gateway_ip': gateway,
-                    'cidr': cidr}
-        self._test_create_subnet(expected=expected,
-                                 cidr=cidr, ip_version=6,
-                                 ipv6_ra_mode=constants.IPV6_SLAAC,
-                                 ipv6_address_mode=constants.IPV6_SLAAC)
-
-    def test_create_subnet_gw_outside_cidr_returns_400(self):
-        with self.network() as network:
-            self._create_subnet(self.fmt,
-                                network['network']['id'],
-                                '10.0.0.0/24',
-                                webob.exc.HTTPClientError.code,
-                                gateway_ip='100.0.0.1')
-
-    def test_create_subnet_gw_of_network_returns_400(self):
-        with self.network() as network:
-            self._create_subnet(self.fmt,
-                                network['network']['id'],
-                                '10.0.0.0/24',
-                                webob.exc.HTTPClientError.code,
-                                gateway_ip='10.0.0.0')
-
-    def test_create_subnet_gw_bcast_returns_400(self):
-        with self.network() as network:
-            self._create_subnet(self.fmt,
-                                network['network']['id'],
-                                '10.0.0.0/24',
-                                webob.exc.HTTPClientError.code,
-                                gateway_ip='10.0.0.255')
-
-    def test_create_subnet_with_allocation_pool(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.100'}]
-        self._test_create_subnet(gateway_ip=gateway_ip,
-                                 cidr=cidr,
-                                 allocation_pools=allocation_pools)
-
-    def test_create_subnet_with_none_gateway(self):
-        cidr = '10.0.0.0/24'
-        self._test_create_subnet(gateway_ip=None,
-                                 cidr=cidr)
-
-    def test_create_subnet_with_none_gateway_fully_allocated(self):
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.1',
-                             'end': '10.0.0.254'}]
-        self._test_create_subnet(gateway_ip=None,
-                                 cidr=cidr,
-                                 allocation_pools=allocation_pools)
-
-    def test_subnet_with_allocation_range(self):
-        with self.network() as network:
-            net_id = network['network']['id']
-            data = {'subnet': {'network_id': net_id,
-                               'cidr': '10.0.0.0/24',
-                               'ip_version': 4,
-                               'gateway_ip': '10.0.0.1',
-                               'tenant_id': network['network']['tenant_id'],
-                               'allocation_pools': [{'start': '10.0.0.100',
-                                                    'end': '10.0.0.120'}]}}
-            subnet_req = self.new_create_request('subnets', data)
-            subnet = self.deserialize(self.fmt,
-                                      subnet_req.get_response(self.api))
-            # Check fixed IP not in allocation range
-            kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
-                                     'ip_address': '10.0.0.10'}]}
-            res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-            self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
-            port = self.deserialize(self.fmt, res)
-            # delete the port
-            self._delete('ports', port['port']['id'])
-
-            # Check when fixed IP is gateway
-            kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
-                                     'ip_address': '10.0.0.1'}]}
-            res = self._create_port(self.fmt, net_id=net_id, **kwargs)
-            self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
-            port = self.deserialize(self.fmt, res)
-            # delete the port
-            self._delete('ports', port['port']['id'])
-
-    def test_create_subnet_with_none_gateway_allocation_pool(self):
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.100'}]
-        self._test_create_subnet(gateway_ip=None,
-                                 cidr=cidr,
-                                 allocation_pools=allocation_pools)
-
-    def test_create_subnet_with_v6_allocation_pool(self):
-        gateway_ip = 'fe80::1'
-        cidr = 'fe80::/80'
-        allocation_pools = [{'start': 'fe80::2',
-                             'end': 'fe80::ffff:fffa:ffff'}]
-        self._test_create_subnet(gateway_ip=gateway_ip,
-                                 cidr=cidr, ip_version=6,
-                                 allocation_pools=allocation_pools)
-
-    @testtools.skipIf(tools.is_bsd(), 'bug/1484837')
-    def test_create_subnet_with_v6_pd_allocation_pool(self):
-        gateway_ip = '::1'
-        cidr = constants.PROVISIONAL_IPV6_PD_PREFIX
-        allocation_pools = [{'start': '::2',
-                             'end': '::ffff:ffff:ffff:fffe'}]
-        self._test_create_subnet(gateway_ip=gateway_ip,
-                                 cidr=cidr, ip_version=6,
-                                 allocation_pools=allocation_pools)
-
-    def test_create_subnet_with_large_allocation_pool(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/8'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.100'},
-                            {'start': '10.1.0.0',
-                             'end': '10.200.0.100'}]
-        self._test_create_subnet(gateway_ip=gateway_ip,
-                                 cidr=cidr,
-                                 allocation_pools=allocation_pools)
-
-    def test_create_subnet_multiple_allocation_pools(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.100'},
-                            {'start': '10.0.0.110',
-                             'end': '10.0.0.150'}]
-        self._test_create_subnet(gateway_ip=gateway_ip,
-                                 cidr=cidr,
-                                 allocation_pools=allocation_pools)
-
-    def test_create_subnet_with_dhcp_disabled(self):
-        enable_dhcp = False
-        self._test_create_subnet(enable_dhcp=enable_dhcp)
-
-    def test_create_subnet_default_gw_conflict_allocation_pool_returns_409(
-        self):
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.1',
-                             'end': '10.0.0.5'}]
-        with testlib_api.ExpectedException(
-                webob.exc.HTTPClientError) as ctx_manager:
-            self._test_create_subnet(cidr=cidr,
-                                     allocation_pools=allocation_pools)
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPConflict.code)
-
-    def test_create_subnet_gateway_in_allocation_pool_returns_409(self):
-        gateway_ip = '10.0.0.50'
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.1',
-                             'end': '10.0.0.100'}]
-        with testlib_api.ExpectedException(
-                webob.exc.HTTPClientError) as ctx_manager:
-            self._test_create_subnet(gateway_ip=gateway_ip,
-                                     cidr=cidr,
-                                     allocation_pools=allocation_pools)
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPConflict.code)
-
-    def test_create_subnet_overlapping_allocation_pools_returns_409(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.150'},
-                            {'start': '10.0.0.140',
-                             'end': '10.0.0.180'}]
-        with testlib_api.ExpectedException(
-                webob.exc.HTTPClientError) as ctx_manager:
-            self._test_create_subnet(gateway_ip=gateway_ip,
-                                     cidr=cidr,
-                                     allocation_pools=allocation_pools)
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPConflict.code)
-
-    def test_create_subnet_invalid_allocation_pool_returns_400(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.256'}]
-        with testlib_api.ExpectedException(
-                webob.exc.HTTPClientError) as ctx_manager:
-            self._test_create_subnet(gateway_ip=gateway_ip,
-                                     cidr=cidr,
-                                     allocation_pools=allocation_pools)
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_out_of_range_allocation_pool_returns_400(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.1.6'}]
-        with testlib_api.ExpectedException(
-                webob.exc.HTTPClientError) as ctx_manager:
-            self._test_create_subnet(gateway_ip=gateway_ip,
-                                     cidr=cidr,
-                                     allocation_pools=allocation_pools)
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_shared_returns_400(self):
-        cidr = '10.0.0.0/24'
-        with testlib_api.ExpectedException(
-                webob.exc.HTTPClientError) as ctx_manager:
-            self._test_create_subnet(cidr=cidr,
-                                     shared=True)
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_inconsistent_ipv6_cidrv4(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': 6,
-                               'tenant_id': network['network']['tenant_id']}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_inconsistent_ipv4_cidrv6(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': 'fe80::0/80',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_inconsistent_ipv4_gatewayv6(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': 4,
-                               'gateway_ip': 'fe80::1',
-                               'tenant_id': network['network']['tenant_id']}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_inconsistent_ipv6_gatewayv4(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': 'fe80::0/80',
-                               'ip_version': 6,
-                               'gateway_ip': '192.168.0.1',
-                               'tenant_id': network['network']['tenant_id']}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_inconsistent_ipv6_dns_v4(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': 'fe80::0/80',
-                               'ip_version': 6,
-                               'dns_nameservers': ['192.168.0.1'],
-                               'tenant_id': network['network']['tenant_id']}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self):
-        host_routes = [{'destination': 'fe80::0/48',
-                        'nexthop': '10.0.2.20'}]
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': 4,
-                               'host_routes': host_routes,
-                               'tenant_id': network['network']['tenant_id']}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self):
-        host_routes = [{'destination': '172.16.0.0/24',
-                        'nexthop': 'fe80::1'}]
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': 4,
-                               'host_routes': host_routes,
-                               'tenant_id': network['network']['tenant_id']}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def _test_validate_subnet_ipv6_modes(self, cur_subnet=None,
-                                         expect_success=True, **modes):
-        plugin = manager.NeutronManager.get_plugin()
-        ctx = context.get_admin_context()
-        new_subnet = {'ip_version': 6,
-                      'cidr': 'fe80::/64',
-                      'enable_dhcp': True,
-                      'ipv6_address_mode': None,
-                      'ipv6_ra_mode': None}
-        for mode, value in modes.items():
-            new_subnet[mode] = value
-        if expect_success:
-            plugin._validate_subnet(ctx, new_subnet, cur_subnet)
-        else:
-            self.assertRaises(n_exc.InvalidInput, plugin._validate_subnet,
-                              ctx, new_subnet, cur_subnet)
-
-    def _test_validate_subnet_ipv6_pd_modes(self, cur_subnet=None,
-                                         expect_success=True, **modes):
-        plugin = manager.NeutronManager.get_plugin()
-        ctx = context.get_admin_context()
-        new_subnet = {'ip_version': 6,
-                      'cidr': constants.PROVISIONAL_IPV6_PD_PREFIX,
-                      'enable_dhcp': True,
-                      'ipv6_address_mode': None,
-                      'ipv6_ra_mode': None}
-        for mode, value in modes.items():
-            new_subnet[mode] = value
-        if expect_success:
-            plugin._validate_subnet(ctx, new_subnet, cur_subnet)
-        else:
-            self.assertRaises(n_exc.InvalidInput, plugin._validate_subnet,
-                              ctx, new_subnet, cur_subnet)
-
-    def test_create_subnet_ipv6_ra_modes(self):
-        # Test all RA modes with no address mode specified
-        for ra_mode in constants.IPV6_MODES:
-            self._test_validate_subnet_ipv6_modes(
-                ipv6_ra_mode=ra_mode)
-            self._test_validate_subnet_ipv6_pd_modes(
-                ipv6_ra_mode=ra_mode)
-
-    def test_create_subnet_ipv6_addr_modes(self):
-        # Test all address modes with no RA mode specified
-        for addr_mode in constants.IPV6_MODES:
-            self._test_validate_subnet_ipv6_modes(
-                ipv6_address_mode=addr_mode)
-            self._test_validate_subnet_ipv6_pd_modes(
-                ipv6_address_mode=addr_mode)
-
-    def test_create_subnet_ipv6_same_ra_and_addr_modes(self):
-        # Test all ipv6 modes with ra_mode==addr_mode
-        for ipv6_mode in constants.IPV6_MODES:
-            self._test_validate_subnet_ipv6_modes(
-                ipv6_ra_mode=ipv6_mode,
-                ipv6_address_mode=ipv6_mode)
-            self._test_validate_subnet_ipv6_pd_modes(
-                ipv6_ra_mode=ipv6_mode,
-                ipv6_address_mode=ipv6_mode)
-
-    def test_create_subnet_ipv6_different_ra_and_addr_modes(self):
-        # Test all ipv6 modes with ra_mode!=addr_mode
-        for ra_mode, addr_mode in itertools.permutations(
-                constants.IPV6_MODES, 2):
-            self._test_validate_subnet_ipv6_modes(
-                expect_success=not (ra_mode and addr_mode),
-                ipv6_ra_mode=ra_mode,
-                ipv6_address_mode=addr_mode)
-            self._test_validate_subnet_ipv6_pd_modes(
-                expect_success=not (ra_mode and addr_mode),
-                ipv6_ra_mode=ra_mode,
-                ipv6_address_mode=addr_mode)
-
-    def test_create_subnet_ipv6_out_of_cidr_global(self):
-        gateway_ip = '2000::1'
-        cidr = '2001::/64'
-
-        with testlib_api.ExpectedException(
-            webob.exc.HTTPClientError) as ctx_manager:
-            self._test_create_subnet(
-                gateway_ip=gateway_ip, cidr=cidr, ip_version=6,
-                ipv6_ra_mode=constants.DHCPV6_STATEFUL,
-                ipv6_address_mode=constants.DHCPV6_STATEFUL)
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_ipv6_out_of_cidr_lla(self):
-        gateway_ip = 'fe80::1'
-        cidr = '2001::/64'
-
-        self._test_create_subnet(
-            gateway_ip=gateway_ip, cidr=cidr, ip_version=6,
-            ipv6_ra_mode=constants.IPV6_SLAAC,
-            ipv6_address_mode=constants.IPV6_SLAAC)
-
-    def test_create_subnet_ipv6_attributes_no_dhcp_enabled(self):
-        gateway_ip = 'fe80::1'
-        cidr = 'fe80::/64'
-        with testlib_api.ExpectedException(
-                webob.exc.HTTPClientError) as ctx_manager:
-            for mode in constants.IPV6_MODES:
-                self._test_create_subnet(gateway_ip=gateway_ip,
-                                         cidr=cidr, ip_version=6,
-                                         enable_dhcp=False,
-                                         ipv6_ra_mode=mode,
-                                         ipv6_address_mode=mode)
-                self.assertEqual(ctx_manager.exception.code,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_invalid_ipv6_ra_mode(self):
-        gateway_ip = 'fe80::1'
-        cidr = 'fe80::/80'
-        with testlib_api.ExpectedException(
-            webob.exc.HTTPClientError) as ctx_manager:
-            self._test_create_subnet(gateway_ip=gateway_ip,
-                                     cidr=cidr, ip_version=6,
-                                     ipv6_ra_mode='foo',
-                                     ipv6_address_mode='slaac')
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_invalid_ipv6_address_mode(self):
-        gateway_ip = 'fe80::1'
-        cidr = 'fe80::/80'
-        with testlib_api.ExpectedException(
-            webob.exc.HTTPClientError) as ctx_manager:
-            self._test_create_subnet(gateway_ip=gateway_ip,
-                                     cidr=cidr, ip_version=6,
-                                     ipv6_ra_mode='slaac',
-                                     ipv6_address_mode='baz')
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_ipv6_ra_mode_ip_version_4(self):
-        cidr = '10.0.2.0/24'
-        with testlib_api.ExpectedException(
-            webob.exc.HTTPClientError) as ctx_manager:
-            self._test_create_subnet(cidr=cidr, ip_version=4,
-                                     ipv6_ra_mode=constants.DHCPV6_STATEFUL)
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_ipv6_address_mode_ip_version_4(self):
-        cidr = '10.0.2.0/24'
-        with testlib_api.ExpectedException(
-            webob.exc.HTTPClientError) as ctx_manager:
-            self._test_create_subnet(
-                cidr=cidr, ip_version=4,
-                ipv6_address_mode=constants.DHCPV6_STATEFUL)
-        self.assertEqual(ctx_manager.exception.code,
-                         webob.exc.HTTPClientError.code)
-
-    def _test_create_subnet_ipv6_auto_addr_with_port_on_network(
-            self, addr_mode, device_owner=DEVICE_OWNER_COMPUTE,
-            insert_db_reference_error=False):
-        # Create a network with one IPv4 subnet and one port
-        with self.network() as network,\
-            self.subnet(network=network) as v4_subnet,\
-            self.port(subnet=v4_subnet, device_owner=device_owner) as port:
-            if insert_db_reference_error:
-                orig_fn = orm.Session.add
-
-                def db_ref_err_for_ipalloc(s, instance):
-                    if instance.__class__.__name__ == 'IPAllocation':
-                        # tweak port_id to cause a FK violation,
-                        # thus DBReferenceError
-                        instance.port_id = 'nonexistent'
-                    return orig_fn(s, instance)
-
-                mock.patch.object(orm.Session, 'add',
-                                  side_effect=db_ref_err_for_ipalloc,
-                                  autospec=True).start()
-                mock.patch.object(db_base_plugin_common.DbBasePluginCommon,
-                                  '_get_subnet',
-                                  return_value=mock.Mock()).start()
-            # Add an IPv6 auto-address subnet to the network
-            v6_subnet = self._make_subnet(self.fmt, network, 'fe80::1',
-                                          'fe80::/64', ip_version=6,
-                                          ipv6_ra_mode=addr_mode,
-                                          ipv6_address_mode=addr_mode)
-            if (insert_db_reference_error
-                or device_owner == constants.DEVICE_OWNER_ROUTER_SNAT
-                or device_owner in constants.ROUTER_INTERFACE_OWNERS):
-                # DVR SNAT and router interfaces should not have been
-                # updated with addresses from the new auto-address subnet
-                self.assertEqual(1, len(port['port']['fixed_ips']))
-            else:
-                # Confirm that the port has been updated with an address
-                # from the new auto-address subnet
-                req = self.new_show_request('ports', port['port']['id'],
-                                            self.fmt)
-                sport = self.deserialize(self.fmt, req.get_response(self.api))
-                fixed_ips = sport['port']['fixed_ips']
-                self.assertEqual(2, len(fixed_ips))
-                self.assertIn(v6_subnet['subnet']['id'],
-                              [fixed_ip['subnet_id'] for fixed_ip
-                              in fixed_ips])
-
-    def test_create_subnet_ipv6_slaac_with_port_on_network(self):
-        self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
-            constants.IPV6_SLAAC)
-
-    def test_create_subnet_dhcpv6_stateless_with_port_on_network(self):
-        self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
-            constants.DHCPV6_STATELESS)
-
-    def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self):
-        self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
-            constants.IPV6_SLAAC,
-            device_owner=constants.DEVICE_OWNER_DHCP)
-
-    def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self):
-        self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
-            constants.IPV6_SLAAC,
-            device_owner=constants.DEVICE_OWNER_ROUTER_INTF)
-
-    def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self):
-        self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
-            constants.IPV6_SLAAC,
-            device_owner=constants.DEVICE_OWNER_ROUTER_SNAT)
-
-    def test_create_subnet_ipv6_slaac_with_db_reference_error(self):
-        self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
-            constants.IPV6_SLAAC, insert_db_reference_error=True)
-
-    def test_update_subnet_no_gateway(self):
-        with self.subnet() as subnet:
-            data = {'subnet': {'gateway_ip': '10.0.0.1'}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(res['subnet']['gateway_ip'],
-                             data['subnet']['gateway_ip'])
-            data = {'subnet': {'gateway_ip': None}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertIsNone(data['subnet']['gateway_ip'])
-
-    def test_update_subnet(self):
-        with self.subnet() as subnet:
-            data = {'subnet': {'gateway_ip': '10.0.0.1'}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(res['subnet']['gateway_ip'],
-                             data['subnet']['gateway_ip'])
-
-    def test_update_subnet_adding_additional_host_routes_and_dns(self):
-        host_routes = [{'destination': '172.16.0.0/24',
-                        'nexthop': '10.0.2.2'}]
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': 4,
-                               'dns_nameservers': ['192.168.0.1'],
-                               'host_routes': host_routes,
-                               'tenant_id': network['network']['tenant_id']}}
-            subnet_req = self.new_create_request('subnets', data)
-            res = self.deserialize(self.fmt, subnet_req.get_response(self.api))
-
-            host_routes = [{'destination': '172.16.0.0/24',
-                            'nexthop': '10.0.2.2'},
-                           {'destination': '192.168.0.0/24',
-                            'nexthop': '10.0.2.3'}]
-
-            dns_nameservers = ['192.168.0.1', '192.168.0.2']
-            data = {'subnet': {'host_routes': host_routes,
-                               'dns_nameservers': dns_nameservers}}
-            req = self.new_update_request('subnets', data,
-                                          res['subnet']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(
-                sorted(res['subnet']['host_routes'], key=utils.safe_sort_key),
-                sorted(host_routes, key=utils.safe_sort_key))
-            self.assertEqual(res['subnet']['dns_nameservers'],
-                             dns_nameservers)
-
-    def test_update_subnet_shared_returns_400(self):
-        with self.network(shared=True) as network:
-            with self.subnet(network=network) as subnet:
-                data = {'subnet': {'shared': True}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_gw_outside_cidr_returns_400(self):
-        with self.network() as network:
-            with self.subnet(network=network) as subnet:
-                data = {'subnet': {'gateway_ip': '100.0.0.1'}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_gw_ip_in_use_returns_409(self):
-        with self.network() as network:
-            with self.subnet(
-                network=network,
-                allocation_pools=[{'start': '10.0.0.100',
-                                   'end': '10.0.0.253'}]) as subnet:
-                subnet_data = subnet['subnet']
-                with self.port(
-                    subnet=subnet,
-                    fixed_ips=[{'subnet_id': subnet_data['id'],
-                                'ip_address': subnet_data['gateway_ip']}]):
-                    data = {'subnet': {'gateway_ip': '10.0.0.99'}}
-                    req = self.new_update_request('subnets', data,
-                                                  subnet_data['id'])
-                    res = req.get_response(self.api)
-                    self.assertEqual(res.status_int, 409)
-
-    def test_update_subnet_inconsistent_ipv4_gatewayv6(self):
-        with self.network() as network:
-            with self.subnet(network=network) as subnet:
-                data = {'subnet': {'gateway_ip': 'fe80::1'}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_inconsistent_ipv6_gatewayv4(self):
-        with self.network() as network:
-            with self.subnet(network=network,
-                             ip_version=6, cidr='fe80::/48') as subnet:
-                data = {'subnet': {'gateway_ip': '10.1.1.1'}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_inconsistent_ipv4_dns_v6(self):
-        dns_nameservers = ['fe80::1']
-        with self.network() as network:
-            with self.subnet(network=network) as subnet:
-                data = {'subnet': {'dns_nameservers': dns_nameservers}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self):
-        host_routes = [{'destination': 'fe80::0/48',
-                        'nexthop': '10.0.2.20'}]
-        with self.network() as network:
-            with self.subnet(network=network,
-                             ip_version=6, cidr='fe80::/48') as subnet:
-                data = {'subnet': {'host_routes': host_routes}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self):
-        host_routes = [{'destination': '172.16.0.0/24',
-                        'nexthop': 'fe80::1'}]
-        with self.network() as network:
-            with self.subnet(network=network,
-                             ip_version=6, cidr='fe80::/48') as subnet:
-                data = {'subnet': {'host_routes': host_routes}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_gateway_in_allocation_pool_returns_409(self):
-        allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
-        with self.network() as network:
-            with self.subnet(network=network,
-                             allocation_pools=allocation_pools,
-                             cidr='10.0.0.0/24') as subnet:
-                data = {'subnet': {'gateway_ip': '10.0.0.50'}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPConflict.code)
-
-    def test_update_subnet_ipv6_attributes_fails(self):
-        with self.subnet(ip_version=6, cidr='fe80::/64',
-                         ipv6_ra_mode=constants.IPV6_SLAAC,
-                         ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
-            data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL,
-                               'ipv6_address_mode': constants.DHCPV6_STATEFUL}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int,
-                             webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_ipv6_ra_mode_fails(self):
-        with self.subnet(ip_version=6, cidr='fe80::/64',
-                         ipv6_ra_mode=constants.IPV6_SLAAC) as subnet:
-            data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int,
-                             webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_ipv6_address_mode_fails(self):
-        with self.subnet(ip_version=6, cidr='fe80::/64',
-                         ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
-            data = {'subnet': {'ipv6_address_mode': constants.DHCPV6_STATEFUL}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int,
-                             webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_ipv6_cannot_disable_dhcp(self):
-        with self.subnet(ip_version=6, cidr='fe80::/64',
-                         ipv6_ra_mode=constants.IPV6_SLAAC,
-                         ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
-            data = {'subnet': {'enable_dhcp': False}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int,
-                             webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_ipv6_ra_mode_ip_version_4(self):
-        with self.network() as network:
-            with self.subnet(network=network) as subnet:
-                data = {'subnet': {'ipv6_ra_mode':
-                                   constants.DHCPV6_STATEFUL}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_ipv6_address_mode_ip_version_4(self):
-        with self.network() as network:
-            with self.subnet(network=network) as subnet:
-                data = {'subnet': {'ipv6_address_mode':
-                                   constants.DHCPV6_STATEFUL}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def _verify_updated_subnet_allocation_pools(self, res, with_gateway_ip):
-        res = self.deserialize(self.fmt, res)
-        self.assertEqual(len(res['subnet']['allocation_pools']), 2)
-        res_vals = (
-            list(res['subnet']['allocation_pools'][0].values()) +
-            list(res['subnet']['allocation_pools'][1].values())
-        )
-        for pool_val in ['10', '20', '30', '40']:
-            self.assertIn('192.168.0.%s' % (pool_val), res_vals)
-        if with_gateway_ip:
-            self.assertEqual((res['subnet']['gateway_ip']),
-                             '192.168.0.9')
-
-    def _test_update_subnet_allocation_pools(self, with_gateway_ip=False):
-        """Test that we can successfully update with sane params.
-
-        This will create a subnet with specified allocation_pools
-        Then issue an update (PUT) to update these using correct
-        (i.e. non erroneous) params. Finally retrieve the updated
-        subnet and verify.
-        """
-        allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}]
-        with self.network() as network:
-            with self.subnet(network=network,
-                             allocation_pools=allocation_pools,
-                             cidr='192.168.0.0/24') as subnet:
-                data = {'subnet': {'allocation_pools': [
-                        {'start': '192.168.0.10', 'end': '192.168.0.20'},
-                        {'start': '192.168.0.30', 'end': '192.168.0.40'}]}}
-                if with_gateway_ip:
-                    data['subnet']['gateway_ip'] = '192.168.0.9'
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                #check res code and contents
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_code, 200)
-                self._verify_updated_subnet_allocation_pools(res,
-                                                             with_gateway_ip)
-                #GET subnet to verify DB updated correctly
-                req = self.new_show_request('subnets', subnet['subnet']['id'],
-                                            self.fmt)
-                res = req.get_response(self.api)
-                self._verify_updated_subnet_allocation_pools(res,
-                                                             with_gateway_ip)
-
-    def test_update_subnet_allocation_pools(self):
-        self._test_update_subnet_allocation_pools()
-
-    def test_update_subnet_allocation_pools_and_gateway_ip(self):
-        self._test_update_subnet_allocation_pools(with_gateway_ip=True)
-
-    #updating alloc pool to something outside subnet.cidr
-    def test_update_subnet_allocation_pools_invalid_pool_for_cidr(self):
-        """Test update alloc pool to something outside subnet.cidr.
-
-        This makes sure that an erroneous allocation_pool specified
-        in a subnet update (outside subnet cidr) will result in an error.
-        """
-        allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}]
-        with self.network() as network:
-            with self.subnet(network=network,
-                             allocation_pools=allocation_pools,
-                             cidr='192.168.0.0/24') as subnet:
-                data = {'subnet': {'allocation_pools': [
-                        {'start': '10.0.0.10', 'end': '10.0.0.20'}]}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    #updating alloc pool on top of existing subnet.gateway_ip
-    def test_update_subnet_allocation_pools_over_gateway_ip_returns_409(self):
-        allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
-        with self.network() as network:
-            with self.subnet(network=network,
-                             allocation_pools=allocation_pools,
-                             cidr='10.0.0.0/24') as subnet:
-                data = {'subnet': {'allocation_pools': [
-                        {'start': '10.0.0.1', 'end': '10.0.0.254'}]}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPConflict.code)
-
-    def test_update_subnet_allocation_pools_invalid_returns_400(self):
-        allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
-        with self.network() as network:
-            with self.subnet(network=network,
-                             allocation_pools=allocation_pools,
-                             cidr='10.0.0.0/24') as subnet:
-                # Check allocation pools
-                invalid_pools = [[{'end': '10.0.0.254'}],
-                                 [{'start': '10.0.0.254'}],
-                                 [{'start': '1000.0.0.254'}],
-                                 [{'start': '10.0.0.2', 'end': '10.0.0.254'},
-                                  {'end': '10.0.0.254'}],
-                                 None,
-                                 [{'start': '10.0.0.200', 'end': '10.0.3.20'}],
-                                 [{'start': '10.0.2.250', 'end': '10.0.3.5'}],
-                                 [{'start': '10.0.0.0', 'end': '10.0.0.50'}],
-                                 [{'start': '10.0.2.10', 'end': '10.0.2.5'}],
-                                 [{'start': 'fe80::2', 'end': 'fe80::ffff'}]]
-                for pool in invalid_pools:
-                    data = {'subnet': {'allocation_pools': pool}}
-                    req = self.new_update_request('subnets', data,
-                                                  subnet['subnet']['id'])
-                    res = req.get_response(self.api)
-                    self.assertEqual(res.status_int,
-                                     webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_allocation_pools_overlapping_returns_409(self):
-        allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
-        with self.network() as network:
-            with self.subnet(network=network,
-                             allocation_pools=allocation_pools,
-                             cidr='10.0.0.0/24') as subnet:
-                data = {'subnet': {'allocation_pools': [
-                        {'start': '10.0.0.20', 'end': '10.0.0.40'},
-                        {'start': '10.0.0.30', 'end': '10.0.0.50'}]}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPConflict.code)
-
-    def test_show_subnet(self):
-        with self.network() as network:
-            with self.subnet(network=network) as subnet:
-                req = self.new_show_request('subnets',
-                                            subnet['subnet']['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                self.assertEqual(res['subnet']['id'],
-                                 subnet['subnet']['id'])
-                self.assertEqual(res['subnet']['network_id'],
-                                 network['network']['id'])
-
-    def test_list_subnets(self):
-        with self.network() as network:
-            with self.subnet(network=network,
-                             gateway_ip='10.0.0.1',
-                             cidr='10.0.0.0/24') as v1,\
-                    self.subnet(network=network,
-                                gateway_ip='10.0.1.1',
-                                cidr='10.0.1.0/24') as v2,\
-                    self.subnet(network=network,
-                                gateway_ip='10.0.2.1',
-                                cidr='10.0.2.0/24') as v3:
-                subnets = (v1, v2, v3)
-                self._test_list_resources('subnet', subnets)
-
-    def test_list_subnets_shared(self):
-        with self.network(shared=True) as network:
-            with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
-                with self.subnet(cidr='10.0.1.0/24') as priv_subnet:
-                    # normal user should see only 1 subnet
-                    req = self.new_list_request('subnets')
-                    req.environ['neutron.context'] = context.Context(
-                        '', 'some_tenant')
-                    res = self.deserialize(self.fmt,
-                                           req.get_response(self.api))
-                    self.assertEqual(len(res['subnets']), 1)
-                    self.assertEqual(res['subnets'][0]['cidr'],
-                                     subnet['subnet']['cidr'])
-                    # admin will see both subnets
-                    admin_req = self.new_list_request('subnets')
-                    admin_res = self.deserialize(
-                        self.fmt, admin_req.get_response(self.api))
-                    self.assertEqual(len(admin_res['subnets']), 2)
-                    cidrs = [sub['cidr'] for sub in admin_res['subnets']]
-                    self.assertIn(subnet['subnet']['cidr'], cidrs)
-                    self.assertIn(priv_subnet['subnet']['cidr'], cidrs)
-
-    def test_list_subnets_with_parameter(self):
-        with self.network() as network:
-            with self.subnet(network=network,
-                             gateway_ip='10.0.0.1',
-                             cidr='10.0.0.0/24') as v1,\
-                    self.subnet(network=network,
-                                gateway_ip='10.0.1.1',
-                                cidr='10.0.1.0/24') as v2:
-                subnets = (v1, v2)
-                query_params = 'ip_version=4&ip_version=6'
-                self._test_list_resources('subnet', subnets,
-                                          query_params=query_params)
-                query_params = 'ip_version=6'
-                self._test_list_resources('subnet', [],
-                                          query_params=query_params)
-
-    def test_list_subnets_with_sort_native(self):
-        if self._skip_native_sorting:
-            self.skipTest("Skip test for not implemented sorting feature")
-        with self.subnet(enable_dhcp=True, cidr='10.0.0.0/24') as subnet1,\
-                self.subnet(enable_dhcp=False, cidr='11.0.0.0/24') as subnet2,\
-                self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') as subnet3:
-            self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1),
-                                      [('enable_dhcp', 'asc'),
-                                       ('cidr', 'desc')])
-
-    def test_list_subnets_with_sort_emulated(self):
-        helper_patcher = mock.patch(
-            'neutron.api.v2.base.Controller._get_sorting_helper',
-            new=_fake_get_sorting_helper)
-        helper_patcher.start()
-        with self.subnet(enable_dhcp=True, cidr='10.0.0.0/24') as subnet1,\
-                self.subnet(enable_dhcp=False, cidr='11.0.0.0/24') as subnet2,\
-                self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') as subnet3:
-            self._test_list_with_sort('subnet', (subnet3,
-                                                 subnet2,
-                                                 subnet1),
-                                      [('enable_dhcp', 'asc'),
-                                       ('cidr', 'desc')])
-
-    def test_list_subnets_with_pagination_native(self):
-        if self._skip_native_pagination:
-            self.skipTest("Skip test for not implemented sorting feature")
-        with self.subnet(cidr='10.0.0.0/24') as subnet1,\
-                self.subnet(cidr='11.0.0.0/24') as subnet2,\
-                self.subnet(cidr='12.0.0.0/24') as subnet3:
-            self._test_list_with_pagination('subnet',
-                                            (subnet1, subnet2, subnet3),
-                                            ('cidr', 'asc'), 2, 2)
-
-    def test_list_subnets_with_pagination_emulated(self):
-        helper_patcher = mock.patch(
-            'neutron.api.v2.base.Controller._get_pagination_helper',
-            new=_fake_get_pagination_helper)
-        helper_patcher.start()
-        with self.subnet(cidr='10.0.0.0/24') as subnet1,\
-                self.subnet(cidr='11.0.0.0/24') as subnet2,\
-                self.subnet(cidr='12.0.0.0/24') as subnet3:
-            self._test_list_with_pagination('subnet',
-                                            (subnet1, subnet2, subnet3),
-                                            ('cidr', 'asc'), 2, 2)
-
-    def test_list_subnets_with_pagination_reverse_native(self):
-        if self._skip_native_sorting:
-            self.skipTest("Skip test for not implemented sorting feature")
-        with self.subnet(cidr='10.0.0.0/24') as subnet1,\
-                self.subnet(cidr='11.0.0.0/24') as subnet2,\
-                self.subnet(cidr='12.0.0.0/24') as subnet3:
-            self._test_list_with_pagination_reverse('subnet',
-                                                    (subnet1, subnet2,
-                                                     subnet3),
-                                                    ('cidr', 'asc'), 2, 2)
-
-    def test_list_subnets_with_pagination_reverse_emulated(self):
-        helper_patcher = mock.patch(
-            'neutron.api.v2.base.Controller._get_pagination_helper',
-            new=_fake_get_pagination_helper)
-        helper_patcher.start()
-        with self.subnet(cidr='10.0.0.0/24') as subnet1,\
-                self.subnet(cidr='11.0.0.0/24') as subnet2,\
-                self.subnet(cidr='12.0.0.0/24') as subnet3:
-            self._test_list_with_pagination_reverse('subnet',
-                                                    (subnet1, subnet2,
-                                                     subnet3),
-                                                    ('cidr', 'asc'), 2, 2)
-
-    def test_invalid_ip_version(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': 7,
-                               'tenant_id': network['network']['tenant_id'],
-                               'gateway_ip': '10.0.2.1'}}
-
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_invalid_subnet(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': 'invalid',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id'],
-                               'gateway_ip': '10.0.2.1'}}
-
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def _test_unsupported_subnet_cidr(self, subnet_cidr):
-        with self.network() as network:
-            subnet = {'network_id': network['network']['id'],
-                      'cidr': subnet_cidr,
-                      'ip_version': 4,
-                      'enable_dhcp': True,
-                      'tenant_id': network['network']['tenant_id']}
-            plugin = manager.NeutronManager.get_plugin()
-            if hasattr(plugin, '_validate_subnet'):
-                self.assertRaises(n_exc.InvalidInput,
-                                  plugin._validate_subnet,
-                                  context.get_admin_context(),
-                                  subnet)
-
-    def test_unsupported_subnet_cidr_multicast(self):
-        self._test_unsupported_subnet_cidr("224.0.0.1/16")
-
-    def test_unsupported_subnet_cidr_loopback(self):
-        self._test_unsupported_subnet_cidr("127.0.0.1/8")
-
-    def test_invalid_ip_address(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id'],
-                               'gateway_ip': 'ipaddress'}}
-
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_invalid_uuid(self):
-        with self.network() as network:
-            data = {'subnet': {'network_id': 'invalid-uuid',
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id'],
-                               'gateway_ip': '10.0.0.1'}}
-
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_with_one_dns(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.100'}]
-        dns_nameservers = ['1.2.3.4']
-        self._test_create_subnet(gateway_ip=gateway_ip,
-                                 cidr=cidr,
-                                 allocation_pools=allocation_pools,
-                                 dns_nameservers=dns_nameservers)
-
-    def test_create_subnet_with_two_dns(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.100'}]
-        dns_nameservers = ['1.2.3.4', '4.3.2.1']
-        self._test_create_subnet(gateway_ip=gateway_ip,
-                                 cidr=cidr,
-                                 allocation_pools=allocation_pools,
-                                 dns_nameservers=dns_nameservers)
-
-    def test_create_subnet_with_too_many_dns(self):
-        with self.network() as network:
-            dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id'],
-                               'gateway_ip': '10.0.0.1',
-                               'dns_nameservers': dns_list}}
-
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_create_subnet_with_one_host_route(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.100'}]
-        host_routes = [{'destination': '135.207.0.0/16',
-                       'nexthop': '1.2.3.4'}]
-        self._test_create_subnet(gateway_ip=gateway_ip,
-                                 cidr=cidr,
-                                 allocation_pools=allocation_pools,
-                                 host_routes=host_routes)
-
-    def test_create_subnet_with_two_host_routes(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.100'}]
-        host_routes = [{'destination': '135.207.0.0/16',
-                       'nexthop': '1.2.3.4'},
-                       {'destination': '12.0.0.0/8',
-                        'nexthop': '4.3.2.1'}]
-
-        self._test_create_subnet(gateway_ip=gateway_ip,
-                                 cidr=cidr,
-                                 allocation_pools=allocation_pools,
-                                 host_routes=host_routes)
-
-    def test_create_subnet_with_too_many_routes(self):
-        with self.network() as network:
-            host_routes = [{'destination': '135.207.0.0/16',
-                            'nexthop': '1.2.3.4'},
-                           {'destination': '12.0.0.0/8',
-                            'nexthop': '4.3.2.1'},
-                           {'destination': '141.212.0.0/16',
-                            'nexthop': '2.2.2.2'}]
-
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'cidr': '10.0.2.0/24',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id'],
-                               'gateway_ip': '10.0.0.1',
-                               'host_routes': host_routes}}
-
-            subnet_req = self.new_create_request('subnets', data)
-            res = subnet_req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_dns(self):
-        with self.subnet() as subnet:
-            data = {'subnet': {'dns_nameservers': ['11.0.0.1']}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(res['subnet']['dns_nameservers'],
-                             data['subnet']['dns_nameservers'])
-
-    def test_subnet_lifecycle_dns_retains_order(self):
-        cfg.CONF.set_override('max_dns_nameservers', 3)
-        with self.subnet(dns_nameservers=['1.1.1.1', '2.2.2.2',
-            '3.3.3.3']) as subnet:
-            subnets = self._show('subnets', subnet['subnet']['id'],
-                expected_code=webob.exc.HTTPOk.code)
-            self.assertEqual(['1.1.1.1', '2.2.2.2', '3.3.3.3'],
-                subnets['subnet']['dns_nameservers'])
-            data = {'subnet': {'dns_nameservers': ['2.2.2.2', '3.3.3.3',
-                '1.1.1.1']}}
-            req = self.new_update_request('subnets',
-                                          data,
-                                          subnet['subnet']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(data['subnet']['dns_nameservers'],
-                             res['subnet']['dns_nameservers'])
-            subnets = self._show('subnets', subnet['subnet']['id'],
-                expected_code=webob.exc.HTTPOk.code)
-            self.assertEqual(data['subnet']['dns_nameservers'],
-                             subnets['subnet']['dns_nameservers'])
-
-    def test_update_subnet_dns_to_None(self):
-        with self.subnet(dns_nameservers=['11.0.0.1']) as subnet:
-            data = {'subnet': {'dns_nameservers': None}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual([], res['subnet']['dns_nameservers'])
-            data = {'subnet': {'dns_nameservers': ['11.0.0.3']}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(data['subnet']['dns_nameservers'],
-                             res['subnet']['dns_nameservers'])
-
-    def test_update_subnet_dns_with_too_many_entries(self):
-        with self.subnet() as subnet:
-            dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
-            data = {'subnet': {'dns_nameservers': dns_list}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_update_subnet_route(self):
-        with self.subnet() as subnet:
-            data = {'subnet': {'host_routes':
-                    [{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(res['subnet']['host_routes'],
-                             data['subnet']['host_routes'])
-
-    def test_update_subnet_route_to_None(self):
-        with self.subnet(host_routes=[{'destination': '12.0.0.0/8',
-                                       'nexthop': '1.2.3.4'}]) as subnet:
-            data = {'subnet': {'host_routes': None}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual([], res['subnet']['host_routes'])
-            data = {'subnet': {'host_routes': [{'destination': '12.0.0.0/8',
-                                                'nexthop': '1.2.3.4'}]}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(data['subnet']['host_routes'],
-                             res['subnet']['host_routes'])
-
-    def _test_update_subnet(self, old_gw=None, new_gw=None,
-                            check_gateway=False):
-        allocation_pools = [{'start': '192.168.0.16', 'end': '192.168.0.254'}]
-        with self.network() as network:
-            with self.subnet(network=network,
-                             gateway_ip=old_gw,
-                             allocation_pools=allocation_pools,
-                             cidr='192.168.0.0/24') as subnet:
-                data = {
-                    'subnet': {
-                        'allocation_pools': [
-                            {'start': '192.168.0.10', 'end': '192.168.0.20'},
-                            {'start': '192.168.0.30', 'end': '192.168.0.40'}],
-                        'gateway_ip': new_gw}}
-                req = self.new_update_request('subnets', data,
-                                              subnet['subnet']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(200, res.status_code)
-                self._verify_updated_subnet_allocation_pools(
-                    res, with_gateway_ip=check_gateway)
-
-    def test_update_subnet_from_no_gw_to_no_gw(self):
-        self._test_update_subnet()
-
-    def test_update_subnet_from_gw_to_no_gw(self):
-        self._test_update_subnet(old_gw='192.168.0.15')
-
-    def test_update_subnet_from_gw_to_new_gw(self):
-        self._test_update_subnet(old_gw='192.168.0.15',
-                                 new_gw='192.168.0.9', check_gateway=True)
-
-    def test_update_subnet_route_with_too_many_entries(self):
-        with self.subnet() as subnet:
-            data = {'subnet': {'host_routes': [
-                    {'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'},
-                    {'destination': '13.0.0.0/8', 'nexthop': '1.2.3.5'},
-                    {'destination': '14.0.0.0/8', 'nexthop': '1.2.3.6'}]}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
-
-    def test_delete_subnet_with_dns(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        dns_nameservers = ['1.2.3.4']
-        # Create new network
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        subnet = self._make_subnet(self.fmt, network, gateway_ip,
-                                   cidr, ip_version=4,
-                                   dns_nameservers=dns_nameservers)
-        req = self.new_delete_request('subnets', subnet['subnet']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-
-    def test_delete_subnet_with_route(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        host_routes = [{'destination': '135.207.0.0/16',
-                        'nexthop': '1.2.3.4'}]
-        # Create new network
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        subnet = self._make_subnet(self.fmt, network, gateway_ip,
-                                   cidr, ip_version=4,
-                                   host_routes=host_routes)
-        req = self.new_delete_request('subnets', subnet['subnet']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-
-    def test_delete_subnet_with_dns_and_route(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        dns_nameservers = ['1.2.3.4']
-        host_routes = [{'destination': '135.207.0.0/16',
-                        'nexthop': '1.2.3.4'}]
-        # Create new network
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        subnet = self._make_subnet(self.fmt, network, gateway_ip,
-                                   cidr, ip_version=4,
-                                   dns_nameservers=dns_nameservers,
-                                   host_routes=host_routes)
-        req = self.new_delete_request('subnets', subnet['subnet']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-
-    def test_delete_subnet_with_callback(self):
-        with self.subnet() as subnet,\
-                mock.patch.object(registry, 'notify') as notify:
-
-            errors = [
-                exceptions.NotificationError(
-                    'fake_id', n_exc.NeutronException()),
-            ]
-            notify.side_effect = [
-                exceptions.CallbackFailure(errors=errors), None
-            ]
-
-            # Make sure the delete request fails
-            delete_request = self.new_delete_request('subnets',
-                                                     subnet['subnet']['id'])
-            delete_response = delete_request.get_response(self.api)
-
-            self.assertIn('NeutronError', delete_response.json)
-            self.assertEqual('SubnetInUse',
-                             delete_response.json['NeutronError']['type'])
-
-            # Make sure the subnet wasn't deleted
-            list_request = self.new_list_request(
-                'subnets', params="id=%s" % subnet['subnet']['id'])
-            list_response = list_request.get_response(self.api)
-            self.assertEqual(subnet['subnet']['id'],
-                             list_response.json['subnets'][0]['id'])
-
-    def _helper_test_validate_subnet(self, option, exception):
-        cfg.CONF.set_override(option, 0)
-        with self.network() as network:
-            subnet = {'network_id': network['network']['id'],
-                      'cidr': '10.0.2.0/24',
-                      'ip_version': 4,
-                      'tenant_id': network['network']['tenant_id'],
-                      'gateway_ip': '10.0.2.1',
-                      'dns_nameservers': ['8.8.8.8'],
-                      'host_routes': [{'destination': '135.207.0.0/16',
-                                       'nexthop': '1.2.3.4'}]}
-            plugin = manager.NeutronManager.get_plugin()
-            e = self.assertRaises(exception,
-                                  plugin._validate_subnet,
-                                  context.get_admin_context(),
-                                  subnet)
-            self.assertThat(
-                str(e),
-                matchers.Not(matchers.Contains('built-in function id')))
-
-    def test_validate_subnet_dns_nameservers_exhausted(self):
-        self._helper_test_validate_subnet(
-            'max_dns_nameservers',
-            n_exc.DNSNameServersExhausted)
-
-    def test_validate_subnet_host_routes_exhausted(self):
-        self._helper_test_validate_subnet(
-            'max_subnet_host_routes',
-            n_exc.HostRoutesExhausted)
-
-    def test_port_prevents_network_deletion(self):
-        with self.port() as p:
-            self._delete('networks', p['port']['network_id'],
-                         expected_code=webob.exc.HTTPConflict.code)
-
-    def test_port_prevents_subnet_deletion(self):
-        with self.port() as p:
-            self._delete('subnets', p['port']['fixed_ips'][0]['subnet_id'],
-                         expected_code=webob.exc.HTTPConflict.code)
-
-
-class TestSubnetPoolsV2(NeutronDbPluginV2TestCase):
-
-    _POOL_NAME = 'test-pool'
-
-    def _test_create_subnetpool(self, prefixes, expected=None,
-                                admin=False, **kwargs):
-        keys = kwargs.copy()
-        keys.setdefault('tenant_id', self._tenant_id)
-        with self.subnetpool(prefixes, admin, **keys) as subnetpool:
-            self._validate_resource(subnetpool, keys, 'subnetpool')
-            if expected:
-                self._compare_resource(subnetpool, expected, 'subnetpool')
-        return subnetpool
-
-    def _validate_default_prefix(self, prefix, subnetpool):
-        self.assertEqual(subnetpool['subnetpool']['default_prefixlen'], prefix)
-
-    def _validate_min_prefix(self, prefix, subnetpool):
-        self.assertEqual(subnetpool['subnetpool']['min_prefixlen'], prefix)
-
-    def _validate_max_prefix(self, prefix, subnetpool):
-        self.assertEqual(subnetpool['subnetpool']['max_prefixlen'], prefix)
-
-    def _validate_is_default(self, subnetpool):
-        self.assertTrue(subnetpool['subnetpool']['is_default'])
-
-    def test_create_subnetpool_empty_prefix_list(self):
-        self.assertRaises(webob.exc.HTTPClientError,
-                          self._test_create_subnetpool,
-                          [],
-                          name=self._POOL_NAME,
-                          tenant_id=self._tenant_id,
-                          min_prefixlen='21')
-
-    def test_create_default_subnetpools(self):
-        for cidr, min_prefixlen in (['fe80::/48', '64'],
-                                    ['10.10.10.0/24', '24']):
-            pool = self._test_create_subnetpool([cidr],
-                                                admin=True,
-                                                tenant_id=self._tenant_id,
-                                                name=self._POOL_NAME,
-                                                min_prefixlen=min_prefixlen,
-                                                is_default=True)
-            self._validate_is_default(pool)
-
-    def test_cannot_create_multiple_default_subnetpools(self):
-        for cidr1, cidr2, min_prefixlen in (['fe80::/48', '2001::/48', '64'],
-                                            ['10.10.10.0/24', '10.10.20.0/24',
-                                             '24']):
-
-            pool = self._test_create_subnetpool([cidr1],
-                                                admin=True,
-                                                tenant_id=self._tenant_id,
-                                                name=self._POOL_NAME,
-                                                min_prefixlen=min_prefixlen,
-                                                is_default=True)
-            self._validate_is_default(pool)
-            self.assertRaises(webob.exc.HTTPClientError,
-                              self._test_create_subnetpool,
-                              [cidr2],
-                              admin=True,
-                              tenant_id=self._tenant_id,
-                              name=self._POOL_NAME,
-                              min_prefixlen=min_prefixlen,
-                              is_default=True)
-
-    def test_create_subnetpool_ipv4_24_with_defaults(self):
-        subnet = netaddr.IPNetwork('10.10.10.0/24')
-        subnetpool = self._test_create_subnetpool([subnet.cidr],
-                                                  name=self._POOL_NAME,
-                                                  tenant_id=self._tenant_id,
-                                                  min_prefixlen='21')
-        self._validate_default_prefix('21', subnetpool)
-        self._validate_min_prefix('21', subnetpool)
-
-    def test_create_subnetpool_ipv4_21_with_defaults(self):
-        subnet = netaddr.IPNetwork('10.10.10.0/21')
-        subnetpool = self._test_create_subnetpool([subnet.cidr],
-                                                  name=self._POOL_NAME,
-                                                  tenant_id=self._tenant_id,
-                                                  min_prefixlen='21')
-        self._validate_default_prefix('21', subnetpool)
-        self._validate_min_prefix('21', subnetpool)
-
-    def test_create_subnetpool_ipv4_default_prefix_too_small(self):
-        subnet = netaddr.IPNetwork('10.10.10.0/21')
-        self.assertRaises(webob.exc.HTTPClientError,
-                          self._test_create_subnetpool,
-                          [subnet.cidr],
-                          tenant_id=self._tenant_id,
-                          name=self._POOL_NAME,
-                          min_prefixlen='21',
-                          default_prefixlen='20')
-
-    def test_create_subnetpool_ipv4_default_prefix_too_large(self):
-        subnet = netaddr.IPNetwork('10.10.10.0/21')
-        self.assertRaises(webob.exc.HTTPClientError,
-                          self._test_create_subnetpool,
-                          [subnet.cidr],
-                          tenant_id=self._tenant_id,
-                          name=self._POOL_NAME,
-                          max_prefixlen=24,
-                          default_prefixlen='32')
-
-    def test_create_subnetpool_ipv4_default_prefix_bounds(self):
-        subnet = netaddr.IPNetwork('10.10.10.0/21')
-        subnetpool = self._test_create_subnetpool([subnet.cidr],
-                                                  tenant_id=self._tenant_id,
-                                                  name=self._POOL_NAME)
-        self._validate_min_prefix('8', subnetpool)
-        self._validate_default_prefix('8', subnetpool)
-        self._validate_max_prefix('32', subnetpool)
-
-    def test_create_subnetpool_ipv6_default_prefix_bounds(self):
-        subnet = netaddr.IPNetwork('fe80::/48')
-        subnetpool = self._test_create_subnetpool([subnet.cidr],
-                                                  tenant_id=self._tenant_id,
-                                                  name=self._POOL_NAME)
-        self._validate_min_prefix('64', subnetpool)
-        self._validate_default_prefix('64', subnetpool)
-        self._validate_max_prefix('128', subnetpool)
-
-    def test_create_subnetpool_ipv4_supported_default_prefix(self):
-        subnet = netaddr.IPNetwork('10.10.10.0/21')
-        subnetpool = self._test_create_subnetpool([subnet.cidr],
-                                                  tenant_id=self._tenant_id,
-                                                  name=self._POOL_NAME,
-                                                  min_prefixlen='21',
-                                                  default_prefixlen='26')
-        self._validate_default_prefix('26', subnetpool)
-
-    def test_create_subnetpool_ipv4_supported_min_prefix(self):
-        subnet = netaddr.IPNetwork('10.10.10.0/24')
-        subnetpool = self._test_create_subnetpool([subnet.cidr],
-                                                  tenant_id=self._tenant_id,
-                                                  name=self._POOL_NAME,
-                                                  min_prefixlen='26')
-        self._validate_min_prefix('26', subnetpool)
-        self._validate_default_prefix('26', subnetpool)
-
-    def test_create_subnetpool_ipv4_default_prefix_smaller_than_min(self):
-        subnet = netaddr.IPNetwork('10.10.10.0/21')
-        self.assertRaises(webob.exc.HTTPClientError,
-                          self._test_create_subnetpool,
-                          [subnet.cidr],
-                          tenant_id=self._tenant_id,
-                          name=self._POOL_NAME,
-                          default_prefixlen='22',
-                          min_prefixlen='23')
-
-    def test_create_subnetpool_mixed_ip_version(self):
-        subnet_v4 = netaddr.IPNetwork('10.10.10.0/21')
-        subnet_v6 = netaddr.IPNetwork('fe80::/48')
-        self.assertRaises(webob.exc.HTTPClientError,
-                          self._test_create_subnetpool,
-                          [subnet_v4.cidr, subnet_v6.cidr],
-                          tenant_id=self._tenant_id,
-                          name=self._POOL_NAME,
-                          min_prefixlen='21')
-
-    def test_create_subnetpool_ipv6_with_defaults(self):
-        subnet = netaddr.IPNetwork('fe80::/48')
-        subnetpool = self._test_create_subnetpool([subnet.cidr],
-                                                  tenant_id=self._tenant_id,
-                                                  name=self._POOL_NAME,
-                                                  min_prefixlen='48')
-        self._validate_default_prefix('48', subnetpool)
-        self._validate_min_prefix('48', subnetpool)
-
-    def test_get_subnetpool(self):
-        subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                                  tenant_id=self._tenant_id,
-                                                  name=self._POOL_NAME,
-                                                  min_prefixlen='24')
-        req = self.new_show_request('subnetpools',
-                                    subnetpool['subnetpool']['id'])
-        res = self.deserialize(self.fmt, req.get_response(self.api))
-        self.assertEqual(subnetpool['subnetpool']['id'],
-                         res['subnetpool']['id'])
-
-    def test_get_subnetpool_different_tenants_not_shared(self):
-        subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                                  shared=False,
-                                                  tenant_id=self._tenant_id,
-                                                  name=self._POOL_NAME,
-                                                  min_prefixlen='24')
-        req = self.new_show_request('subnetpools',
-                                    subnetpool['subnetpool']['id'])
-        neutron_context = context.Context('', 'not-the-owner')
-        req.environ['neutron.context'] = neutron_context
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 404)
-
-    def test_get_subnetpool_different_tenants_shared(self):
-        subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                                  None,
-                                                  True,
-                                                  name=self._POOL_NAME,
-                                                  min_prefixlen='24',
-                                                  shared=True)
-        req = self.new_show_request('subnetpools',
-                                    subnetpool['subnetpool']['id'])
-        neutron_context = context.Context('', self._tenant_id)
-        req.environ['neutron.context'] = neutron_context
-        res = self.deserialize(self.fmt, req.get_response(self.api))
-        self.assertEqual(subnetpool['subnetpool']['id'],
-                         res['subnetpool']['id'])
-
-    def test_list_subnetpools_different_tenants_shared(self):
-        self._test_create_subnetpool(['10.10.10.0/24'],
-                                     None,
-                                     True,
-                                     name=self._POOL_NAME,
-                                     min_prefixlen='24',
-                                     shared=True)
-        admin_res = self._list('subnetpools')
-        mortal_res = self._list('subnetpools',
-                   neutron_context=context.Context('', 'not-the-owner'))
-        self.assertEqual(len(admin_res['subnetpools']), 1)
-        self.assertEqual(len(mortal_res['subnetpools']), 1)
-
-    def test_list_subnetpools_different_tenants_not_shared(self):
-        self._test_create_subnetpool(['10.10.10.0/24'],
-                                     None,
-                                     True,
-                                     name=self._POOL_NAME,
-                                     min_prefixlen='24',
-                                     shared=False)
-        admin_res = self._list('subnetpools')
-        mortal_res = self._list('subnetpools',
-                   neutron_context=context.Context('', 'not-the-owner'))
-        self.assertEqual(len(admin_res['subnetpools']), 1)
-        self.assertEqual(len(mortal_res['subnetpools']), 0)
-
-    def test_delete_subnetpool(self):
-        subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                                  tenant_id=self._tenant_id,
-                                                  name=self._POOL_NAME,
-                                                  min_prefixlen='24')
-        req = self.new_delete_request('subnetpools',
-                                      subnetpool['subnetpool']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 204)
-
-    def test_delete_nonexistent_subnetpool(self):
-        req = self.new_delete_request('subnetpools',
-                                      'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
-        res = req.get_response(self._api_for_resource('subnetpools'))
-        self.assertEqual(res.status_int, 404)
-
-    def test_update_subnetpool_prefix_list_append(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.8.0/21'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='24')
-
-        data = {'subnetpool': {'prefixes': ['10.10.8.0/21', '3.3.3.0/24',
-                                            '2.2.2.0/24']}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        api = self._api_for_resource('subnetpools')
-        res = self.deserialize(self.fmt, req.get_response(api))
-        self.assertItemsEqual(res['subnetpool']['prefixes'],
-                              ['10.10.8.0/21', '3.3.3.0/24', '2.2.2.0/24'])
-
-    def test_update_subnetpool_prefix_list_compaction(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='24')
-
-        data = {'subnetpool': {'prefixes': ['10.10.10.0/24',
-                                            '10.10.11.0/24']}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        api = self._api_for_resource('subnetpools')
-        res = self.deserialize(self.fmt, req.get_response(api))
-        self.assertItemsEqual(res['subnetpool']['prefixes'],
-                              ['10.10.10.0/23'])
-
-    def test_illegal_subnetpool_prefix_list_update(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='24')
-
-        data = {'subnetpool': {'prefixes': ['10.10.11.0/24']}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        api = self._api_for_resource('subnetpools')
-        res = req.get_response(api)
-        self.assertEqual(res.status_int, 400)
-
-    def test_update_subnetpool_default_prefix(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.8.0/21'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='24')
-
-        data = {'subnetpool': {'default_prefixlen': '26'}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        api = self._api_for_resource('subnetpools')
-        res = self.deserialize(self.fmt, req.get_response(api))
-        self.assertEqual(res['subnetpool']['default_prefixlen'], 26)
-
-    def test_update_subnetpool_min_prefix(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='24')
-
-        data = {'subnetpool': {'min_prefixlen': '21'}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        res = self.deserialize(self.fmt, req.get_response(self.api))
-        self.assertEqual(res['subnetpool']['min_prefixlen'], 21)
-
-    def test_update_subnetpool_min_prefix_larger_than_max(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='21',
-                                         max_prefixlen='24')
-
-        data = {'subnetpool': {'min_prefixlen': '28'}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 400)
-
-    def test_update_subnetpool_max_prefix(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='21',
-                                         max_prefixlen='24')
-
-        data = {'subnetpool': {'max_prefixlen': '26'}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        res = self.deserialize(self.fmt, req.get_response(self.api))
-        self.assertEqual(res['subnetpool']['max_prefixlen'], 26)
-
-    def test_update_subnetpool_max_prefix_less_than_min(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='24')
-
-        data = {'subnetpool': {'max_prefixlen': '21'}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 400)
-
-    def test_update_subnetpool_max_prefix_less_than_default(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='21',
-                                         default_prefixlen='24')
-
-        data = {'subnetpool': {'max_prefixlen': '22'}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 400)
-
-    def test_update_subnetpool_default_prefix_less_than_min(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='21')
-
-        data = {'subnetpool': {'default_prefixlen': '20'}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 400)
-
-    def test_update_subnetpool_default_prefix_larger_than_max(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='21',
-                                         max_prefixlen='24')
-
-        data = {'subnetpool': {'default_prefixlen': '28'}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 400)
-
-    def test_update_subnetpool_prefix_list_mixed_ip_version(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='24')
-
-        data = {'subnetpool': {'prefixes': ['fe80::/48']}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 400)
-
-    def test_update_subnetpool_default_quota(self):
-        initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
-                                         tenant_id=self._tenant_id,
-                                         name=self._POOL_NAME,
-                                         min_prefixlen='24',
-                                         default_quota=10)
-
-        self.assertEqual(initial_subnetpool['subnetpool']['default_quota'],
-                         10)
-        data = {'subnetpool': {'default_quota': '1'}}
-        req = self.new_update_request('subnetpools', data,
-                                      initial_subnetpool['subnetpool']['id'])
-        res = self.deserialize(self.fmt, req.get_response(self.api))
-        self.assertEqual(res['subnetpool']['default_quota'], 1)
-
-    def test_allocate_any_subnet_with_prefixlen(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.0.0/16'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21')
-
-            # Request a subnet allocation (no CIDR)
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'prefixlen': 24,
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-
-            subnet = netaddr.IPNetwork(res['subnet']['cidr'])
-            self.assertEqual(subnet.prefixlen, 24)
-            # Assert the allocated subnet CIDR is a subnet of our pool prefix
-            supernet = netaddr.smallest_matching_cidr(
-                                                 subnet,
-                                                 sp['subnetpool']['prefixes'])
-            self.assertEqual(supernet, netaddr.IPNetwork('10.10.0.0/16'))
-
-    def test_allocate_any_subnet_with_default_prefixlen(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.0.0/16'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21')
-
-            # Request any subnet allocation using default prefix
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-
-            subnet = netaddr.IPNetwork(res['subnet']['cidr'])
-            self.assertEqual(subnet.prefixlen,
-                             int(sp['subnetpool']['default_prefixlen']))
-
-    def test_allocate_specific_subnet_with_mismatch_prefixlen(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.0.0/16'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21')
-
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'cidr': '10.10.1.0/24',
-                               'prefixlen': 26,
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, 400)
-
-    def test_allocate_specific_subnet_with_matching_prefixlen(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.0.0/16'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21')
-
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'cidr': '10.10.1.0/24',
-                               'prefixlen': 24,
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, 400)
-
-    def test_allocate_specific_subnet(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.0.0/16'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21')
-
-            # Request a specific subnet allocation
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'cidr': '10.10.1.0/24',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-
-            # Assert the allocated subnet CIDR is what we expect
-            subnet = netaddr.IPNetwork(res['subnet']['cidr'])
-            self.assertEqual(subnet, netaddr.IPNetwork('10.10.1.0/24'))
-
-    def test_allocate_specific_subnet_non_existent_prefix(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.0.0/16'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21')
-
-            # Request a specific subnet allocation
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'cidr': '192.168.1.0/24',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, 500)
-
-    def test_allocate_specific_subnet_already_allocated(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.10.0/24'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21')
-
-            # Request a specific subnet allocation
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'cidr': '10.10.10.0/24',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            # Allocate the subnet
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, 201)
-            # Attempt to allocate it again
-            res = req.get_response(self.api)
-            # Assert error
-            self.assertEqual(res.status_int, 500)
-
-    def test_allocate_specific_subnet_prefix_too_small(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.0.0/16'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21')
-
-            # Request a specific subnet allocation
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'cidr': '10.10.0.0/20',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, 400)
-
-    def test_allocate_specific_subnet_prefix_specific_gw(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.0.0/16'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21')
-
-            # Request a specific subnet allocation
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'cidr': '10.10.1.0/24',
-                               'gateway_ip': '10.10.1.254',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(res['subnet']['gateway_ip'], '10.10.1.254')
-
-    def test_allocate_specific_subnet_prefix_allocation_pools(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.0.0/16'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21')
-
-            # Request a specific subnet allocation
-            pools = [{'start': '10.10.1.2',
-                     'end': '10.10.1.253'}]
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'cidr': '10.10.1.0/24',
-                               'gateway_ip': '10.10.1.1',
-                               'ip_version': 4,
-                               'allocation_pools': pools,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(res['subnet']['allocation_pools'][0]['start'],
-                             pools[0]['start'])
-            self.assertEqual(res['subnet']['allocation_pools'][0]['end'],
-                             pools[0]['end'])
-
-    def test_allocate_any_subnet_prefix_allocation_pools(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.10.0/24'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21')
-
-            # Request an any subnet allocation
-            pools = [{'start': '10.10.10.1',
-                     'end': '10.10.10.254'}]
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'prefixlen': '24',
-                               'ip_version': 4,
-                               'allocation_pools': pools,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, 400)
-
-    def test_allocate_specific_subnet_prefix_too_large(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.0.0/16'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21',
-                                              max_prefixlen='21')
-
-            # Request a specific subnet allocation
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'cidr': '10.10.0.0/24',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, 400)
-
-    def test_delete_subnetpool_existing_allocations(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.0.0/16'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21')
-
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'cidr': '10.10.0.0/24',
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            req.get_response(self.api)
-            req = self.new_delete_request('subnetpools',
-                                          sp['subnetpool']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, 400)
-
-    def test_allocate_subnet_over_quota(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['10.10.0.0/16'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME,
-                                              min_prefixlen='21',
-                                              default_quota=2048)
-
-            # Request a specific subnet allocation
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'ip_version': 4,
-                               'prefixlen': 21,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            # Allocate a subnet to fill the quota
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, 201)
-            # Attempt to allocate a /21 again
-            res = req.get_response(self.api)
-            # Assert error
-            self.assertEqual(res.status_int, 409)
-
-    def test_allocate_any_ipv4_subnet_ipv6_pool(self):
-        with self.network() as network:
-            sp = self._test_create_subnetpool(['2001:db8:1:2::/63'],
-                                              tenant_id=self._tenant_id,
-                                              name=self._POOL_NAME)
-
-            # Request a specific subnet allocation
-            data = {'subnet': {'network_id': network['network']['id'],
-                               'subnetpool_id': sp['subnetpool']['id'],
-                               'ip_version': 4,
-                               'tenant_id': network['network']['tenant_id']}}
-            req = self.new_create_request('subnets', data)
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, 400)
-
-
-class DbModelTestCase(testlib_api.SqlTestCase):
-    """DB model tests."""
-    def test_repr(self):
-        """testing the string representation of 'model' classes."""
-        network = models_v2.Network(name="net_net", status="OK",
-                                    admin_state_up=True)
-        actual_repr_output = repr(network)
-        exp_start_with = "<neutron.db.models_v2.Network"
-        exp_middle = "[object at %x]" % id(network)
-        exp_end_with = (" {tenant_id=None, id=None, "
-                        "name='net_net', status='OK', "
-                        "admin_state_up=True, mtu=None, "
-                        "vlan_transparent=None, "
-                        "availability_zone_hints=None, "
-                        "standard_attr_id=None}>")
-        final_exp = exp_start_with + exp_middle + exp_end_with
-        self.assertEqual(final_exp, actual_repr_output)
-
-    def _make_network(self, ctx):
-        with ctx.session.begin():
-            network = models_v2.Network(name="net_net", status="OK",
-                                        tenant_id='dbcheck',
-                                        admin_state_up=True)
-            ctx.session.add(network)
-        return network
-
-    def _make_subnet(self, ctx, network_id):
-        with ctx.session.begin():
-            subnet = models_v2.Subnet(name="subsub", ip_version=4,
-                                      tenant_id='dbcheck',
-                                      cidr='turn_down_for_what',
-                                      network_id=network_id)
-            ctx.session.add(subnet)
-        return subnet
-
-    def _make_port(self, ctx, network_id):
-        with ctx.session.begin():
-            port = models_v2.Port(network_id=network_id, mac_address='1',
-                                  tenant_id='dbcheck',
-                                  admin_state_up=True, status="COOL",
-                                  device_id="devid", device_owner="me")
-            ctx.session.add(port)
-        return port
-
-    def _make_subnetpool(self, ctx):
-        with ctx.session.begin():
-            subnetpool = models_v2.SubnetPool(
-                ip_version=4, default_prefixlen=4, min_prefixlen=4,
-                max_prefixlen=4, shared=False, default_quota=4,
-                address_scope_id='f', tenant_id='dbcheck',
-                is_default=False
-            )
-            ctx.session.add(subnetpool)
-        return subnetpool
-
-    def _make_security_group_and_rule(self, ctx):
-        with ctx.session.begin():
-            sg = sgdb.SecurityGroup(name='sg', description='sg')
-            rule = sgdb.SecurityGroupRule(security_group=sg, port_range_min=1,
-                                          port_range_max=2, protocol='TCP',
-                                          ethertype='v4', direction='ingress',
-                                          remote_ip_prefix='0.0.0.0/0')
-            ctx.session.add(sg)
-            ctx.session.add(rule)
-        return sg, rule
-
-    def _make_floating_ip(self, ctx, port_id):
-        with ctx.session.begin():
-            flip = l3_db.FloatingIP(floating_ip_address='1.2.3.4',
-                                    floating_network_id='somenet',
-                                    floating_port_id=port_id)
-            ctx.session.add(flip)
-        return flip
-
-    def _make_router(self, ctx):
-        with ctx.session.begin():
-            router = l3_db.Router()
-            ctx.session.add(router)
-        return router
-
-    def _get_neutron_attr(self, ctx, attr_id):
-        return ctx.session.query(
-            models_v2.model_base.StandardAttribute).filter(
-            models_v2.model_base.StandardAttribute.id == attr_id).one()
-
-    def _test_standardattr_removed_on_obj_delete(self, ctx, obj):
-        attr_id = obj.standard_attr_id
-        self.assertEqual(
-            obj.__table__.name,
-            self._get_neutron_attr(ctx, attr_id).resource_type)
-        with ctx.session.begin():
-            ctx.session.delete(obj)
-        with testtools.ExpectedException(orm.exc.NoResultFound):
-            # we want to make sure that the attr resource was removed
-            self._get_neutron_attr(ctx, attr_id)
-
-    def test_standardattr_removed_on_subnet_delete(self):
-        ctx = context.get_admin_context()
-        network = self._make_network(ctx)
-        subnet = self._make_subnet(ctx, network.id)
-        self._test_standardattr_removed_on_obj_delete(ctx, subnet)
-
-    def test_standardattr_removed_on_network_delete(self):
-        ctx = context.get_admin_context()
-        network = self._make_network(ctx)
-        self._test_standardattr_removed_on_obj_delete(ctx, network)
-
-    def test_standardattr_removed_on_subnetpool_delete(self):
-        ctx = context.get_admin_context()
-        spool = self._make_subnetpool(ctx)
-        self._test_standardattr_removed_on_obj_delete(ctx, spool)
-
-    def test_standardattr_removed_on_port_delete(self):
-        ctx = context.get_admin_context()
-        network = self._make_network(ctx)
-        port = self._make_port(ctx, network.id)
-        self._test_standardattr_removed_on_obj_delete(ctx, port)
-
-    def test_standardattr_removed_on_sg_delete(self):
-        ctx = context.get_admin_context()
-        sg, rule = self._make_security_group_and_rule(ctx)
-        self._test_standardattr_removed_on_obj_delete(ctx, sg)
-        # make sure the attr entry was wiped out for the rule as well
-        with testtools.ExpectedException(orm.exc.NoResultFound):
-            self._get_neutron_attr(ctx, rule.standard_attr_id)
-
-    def test_standardattr_removed_on_floating_ip_delete(self):
-        ctx = context.get_admin_context()
-        network = self._make_network(ctx)
-        port = self._make_port(ctx, network.id)
-        flip = self._make_floating_ip(ctx, port.id)
-        self._test_standardattr_removed_on_obj_delete(ctx, flip)
-
-    def test_standardattr_removed_on_router_delete(self):
-        ctx = context.get_admin_context()
-        router = self._make_router(ctx)
-        self._test_standardattr_removed_on_obj_delete(ctx, router)
-
-    def test_resource_type_fields(self):
-        ctx = context.get_admin_context()
-        network = self._make_network(ctx)
-        port = self._make_port(ctx, network.id)
-        subnet = self._make_subnet(ctx, network.id)
-        spool = self._make_subnetpool(ctx)
-        for disc, obj in (('ports', port), ('networks', network),
-                          ('subnets', subnet), ('subnetpools', spool)):
-            self.assertEqual(
-                disc, obj.standard_attr.resource_type)
-
-
-class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase,
-                                       testlib_api.SqlTestCase):
-    """Tests for NeutronDbPluginV2 as Mixin.
-
-    While NeutronDbPluginV2TestCase checks NeutronDbPlugin and all plugins as
-    a complete plugin, this test case verifies abilities of NeutronDbPlugin
-    which are provided to other plugins (e.g. DB operations). This test case
-    may include tests only for NeutronDbPlugin, so this should not be used in
-    unit tests for other plugins.
-    """
-
-    def setUp(self):
-        super(NeutronDbPluginV2AsMixinTestCase, self).setUp()
-        self.plugin = importutils.import_object(DB_PLUGIN_KLASS)
-        self.context = context.get_admin_context()
-        self.net_data = {'network': {'id': 'fake-id',
-                                     'name': 'net1',
-                                     'admin_state_up': True,
-                                     'tenant_id': 'test-tenant',
-                                     'shared': False}}
-
-    def test_create_network_with_default_status(self):
-        net = self.plugin.create_network(self.context, self.net_data)
-        default_net_create_status = 'ACTIVE'
-        expected = [('id', 'fake-id'), ('name', 'net1'),
-                    ('admin_state_up', True), ('tenant_id', 'test-tenant'),
-                    ('shared', False), ('status', default_net_create_status)]
-        for k, v in expected:
-            self.assertEqual(net[k], v)
-
-    def test_create_network_with_status_BUILD(self):
-        self.net_data['network']['status'] = 'BUILD'
-        net = self.plugin.create_network(self.context, self.net_data)
-        self.assertEqual(net['status'], 'BUILD')
-
-    def test_get_user_allocation_for_dhcp_port_returns_none(self):
-        plugin = manager.NeutronManager.get_plugin()
-        with self.network() as net, self.network() as net1:
-            with self.subnet(network=net, cidr='10.0.0.0/24') as subnet,\
-                    self.subnet(network=net1, cidr='10.0.1.0/24') as subnet1:
-                with self.port(subnet=subnet,
-                               device_owner=constants.DEVICE_OWNER_DHCP),\
-                        self.port(subnet=subnet1):
-                    # check that user allocations on another network don't
-                    # affect _subnet_get_user_allocation method
-                    res = plugin._subnet_get_user_allocation(
-                        context.get_admin_context(),
-                        subnet['subnet']['id'])
-                    self.assertIsNone(res)
-
-    def test__validate_network_subnetpools(self):
-        network = models_v2.Network()
-        network.subnets = [models_v2.Subnet(subnetpool_id='test_id',
-                                            ip_version=4)]
-        new_subnetpool_id = None
-        self.assertRaises(n_exc.NetworkSubnetPoolAffinityError,
-                          self.plugin.ipam._validate_network_subnetpools,
-                          network, new_subnetpool_id, 4)
-
-
-class TestNetworks(testlib_api.SqlTestCase):
-    def setUp(self):
-        super(TestNetworks, self).setUp()
-        self._tenant_id = 'test-tenant'
-
-        # Update the plugin
-        self.setup_coreplugin(DB_PLUGIN_KLASS)
-
-    def _create_network(self, plugin, ctx, shared=True):
-        network = {'network': {'name': 'net',
-                               'shared': shared,
-                               'admin_state_up': True,
-                               'tenant_id': self._tenant_id}}
-        created_network = plugin.create_network(ctx, network)
-        return (network, created_network['id'])
-
-    def _create_port(self, plugin, ctx, net_id, device_owner, tenant_id):
-        port = {'port': {'name': 'port',
-                         'network_id': net_id,
-                         'mac_address': attributes.ATTR_NOT_SPECIFIED,
-                         'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
-                         'admin_state_up': True,
-                         'device_id': 'device_id',
-                         'device_owner': device_owner,
-                         'tenant_id': tenant_id}}
-        plugin.create_port(ctx, port)
-
-    def _test_update_shared_net_used(self,
-                                     device_owner,
-                                     expected_exception=None):
-        plugin = manager.NeutronManager.get_plugin()
-        ctx = context.get_admin_context()
-        network, net_id = self._create_network(plugin, ctx)
-
-        self._create_port(plugin,
-                          ctx,
-                          net_id,
-                          device_owner,
-                          self._tenant_id + '1')
-
-        network['network']['shared'] = False
-
-        if (expected_exception):
-            with testlib_api.ExpectedException(expected_exception):
-                plugin.update_network(ctx, net_id, network)
-        else:
-            plugin.update_network(ctx, net_id, network)
-
-    def test_update_shared_net_used_fails(self):
-        self._test_update_shared_net_used('', n_exc.InvalidSharedSetting)
-
-    def test_update_shared_net_used_as_router_gateway(self):
-        self._test_update_shared_net_used(
-            constants.DEVICE_OWNER_ROUTER_GW)
-
-    def test_update_shared_net_used_by_floating_ip(self):
-        self._test_update_shared_net_used(
-            constants.DEVICE_OWNER_FLOATINGIP)
-
-
-class DbOperationBoundMixin(object):
-    """Mixin to support tests that assert constraints on DB operations."""
-
-    def setUp(self, *args, **kwargs):
-        super(DbOperationBoundMixin, self).setUp(*args, **kwargs)
-        self._db_execute_count = 0
-
-        def _event_incrementer(*args, **kwargs):
-            self._db_execute_count += 1
-
-        engine = db_api.get_engine()
-        event.listen(engine, 'after_execute', _event_incrementer)
-        self.addCleanup(event.remove, engine, 'after_execute',
-                        _event_incrementer)
-
-    def _list_and_count_queries(self, resource):
-        self._db_execute_count = 0
-        self.assertNotEqual([], self._list(resource))
-        query_count = self._db_execute_count
-        # sanity check to make sure queries are being observed
-        self.assertNotEqual(0, query_count)
-        return query_count
-
-    def _assert_object_list_queries_constant(self, obj_creator, plural):
-        obj_creator()
-        before_count = self._list_and_count_queries(plural)
-        # one more thing shouldn't change the db query count
-        obj_creator()
-        self.assertEqual(before_count, self._list_and_count_queries(plural))
diff --git a/neutron/tests/unit/db/test_dvr_mac_db.py b/neutron/tests/unit/db/test_dvr_mac_db.py
deleted file mode 100644 (file)
index c827aee..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation, all rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-from oslo_config import cfg
-
-from neutron import context
-from neutron.db import dvr_mac_db
-from neutron.extensions import dvr
-from neutron.tests.unit.plugins.ml2 import test_plugin
-
-
-class DVRDbMixinImpl(dvr_mac_db.DVRDbMixin):
-
-    def __init__(self, notifier):
-        self.notifier = notifier
-
-
-class DvrDbMixinTestCase(test_plugin.Ml2PluginV2TestCase):
-
-    def setUp(self):
-        super(DvrDbMixinTestCase, self).setUp()
-        self.ctx = context.get_admin_context()
-        self.mixin = DVRDbMixinImpl(mock.Mock())
-
-    def _create_dvr_mac_entry(self, host, mac_address):
-        with self.ctx.session.begin(subtransactions=True):
-            entry = dvr_mac_db.DistributedVirtualRouterMacAddress(
-                host=host, mac_address=mac_address)
-            self.ctx.session.add(entry)
-
-    def test__get_dvr_mac_address_by_host(self):
-        with self.ctx.session.begin(subtransactions=True):
-            entry = dvr_mac_db.DistributedVirtualRouterMacAddress(
-                host='foo_host', mac_address='foo_mac_address')
-            self.ctx.session.add(entry)
-        result = self.mixin._get_dvr_mac_address_by_host(self.ctx, 'foo_host')
-        self.assertEqual(entry, result)
-
-    def test__get_dvr_mac_address_by_host_not_found(self):
-        self.assertRaises(dvr.DVRMacAddressNotFound,
-                          self.mixin._get_dvr_mac_address_by_host,
-                          self.ctx, 'foo_host')
-
-    def test__create_dvr_mac_address_success(self):
-        entry = {'host': 'foo_host', 'mac_address': '00:11:22:33:44:55:66'}
-        with mock.patch.object(dvr_mac_db.utils, 'get_random_mac') as f:
-            f.return_value = entry['mac_address']
-            expected = self.mixin._create_dvr_mac_address(
-                self.ctx, entry['host'])
-        self.assertEqual(expected, entry)
-
-    def test__create_dvr_mac_address_retries_exceeded_retry_logic(self):
-        new_retries = 8
-        cfg.CONF.set_override('mac_generation_retries', new_retries)
-        self._create_dvr_mac_entry('foo_host_1', 'non_unique_mac')
-        with mock.patch.object(dvr_mac_db.utils, 'get_random_mac') as f:
-            f.return_value = 'non_unique_mac'
-            self.assertRaises(dvr.MacAddressGenerationFailure,
-                              self.mixin._create_dvr_mac_address,
-                              self.ctx, "foo_host_2")
-        self.assertEqual(new_retries, f.call_count)
-
-    def test_get_dvr_mac_address_list(self):
-        self._create_dvr_mac_entry('host_1', 'mac_1')
-        self._create_dvr_mac_entry('host_2', 'mac_2')
-        mac_list = self.mixin.get_dvr_mac_address_list(self.ctx)
-        self.assertEqual(2, len(mac_list))
-
-    def test_get_dvr_mac_address_by_host_existing_host(self):
-        self._create_dvr_mac_entry('foo_host', 'foo_mac')
-        with mock.patch.object(self.mixin,
-                               '_get_dvr_mac_address_by_host') as f:
-            self.mixin.get_dvr_mac_address_by_host(self.ctx, 'foo_host')
-            self.assertEqual(1, f.call_count)
-
-    def test_get_dvr_mac_address_by_host_missing_host(self):
-        with mock.patch.object(self.mixin, '_create_dvr_mac_address') as f:
-            self.mixin.get_dvr_mac_address_by_host(self.ctx, 'foo_host')
-            self.assertEqual(1, f.call_count)
-
-    def test_get_subnet_for_dvr_returns_correct_mac(self):
-        with self.subnet() as subnet,\
-                self.port(subnet=subnet),\
-                self.port(subnet=subnet):
-            dvr_subnet = self.mixin.get_subnet_for_dvr(self.ctx,
-                                                       subnet['subnet']['id'])
-            # no gateway port should be found so no info should be returned
-            self.assertEqual({}, dvr_subnet)
-            with self.port(
-                    subnet=subnet,
-                    fixed_ips=[{'ip_address': subnet['subnet'][
-                        'gateway_ip']}]) as gw_port:
-                dvr_subnet = self.mixin.get_subnet_for_dvr(
-                    self.ctx, subnet['subnet']['id'])
-                self.assertEqual(gw_port['port']['mac_address'],
-                                 dvr_subnet['gateway_mac'])
-
-    def test_get_subnet_for_dvr_returns_correct_mac_fixed_ips_passed(self):
-        with self.subnet() as subnet,\
-                self.port(subnet=subnet,
-                          fixed_ips=[{'ip_address': '10.0.0.2'}]),\
-                self.port(subnet=subnet,
-                          fixed_ips=[{'ip_address': '10.0.0.3'}]):
-            fixed_ips = [{'subnet_id': subnet['subnet']['id'],
-                          'ip_address': '10.0.0.4'}]
-            dvr_subnet = self.mixin.get_subnet_for_dvr(
-                self.ctx, subnet['subnet']['id'], fixed_ips)
-            # no gateway port should be found so no info should be returned
-            self.assertEqual({}, dvr_subnet)
-            with self.port(
-                    subnet=subnet,
-                    fixed_ips=[{'ip_address': '10.0.0.4'}]) as gw_port:
-                dvr_subnet = self.mixin.get_subnet_for_dvr(
-                    self.ctx, subnet['subnet']['id'], fixed_ips)
-                self.assertEqual(gw_port['port']['mac_address'],
-                                 dvr_subnet['gateway_mac'])
diff --git a/neutron/tests/unit/db/test_ipam_backend_mixin.py b/neutron/tests/unit/db/test_ipam_backend_mixin.py
deleted file mode 100644 (file)
index c25045a..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright (c) 2015 Infoblox Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.common import constants
-from neutron.db import ipam_backend_mixin
-from neutron.tests import base
-
-
-class TestIpamBackendMixin(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestIpamBackendMixin, self).setUp()
-        self.mixin = ipam_backend_mixin.IpamBackendMixin()
-        self.ctx = mock.Mock()
-        self.default_new_ips = (('id-1', '192.168.1.1'),
-                                ('id-2', '192.168.1.2'))
-        self.default_original_ips = (('id-1', '192.168.1.1'),
-                                     ('id-5', '172.20.16.5'))
-        self.owner_non_router = constants.DEVICE_OWNER_DHCP
-        self.owner_router = constants.DEVICE_OWNER_ROUTER_INTF
-
-    def _prepare_ips(self, ips):
-        return [{'ip_address': ip[1],
-                 'subnet_id': ip[0]} for ip in ips]
-
-    def _mock_slaac_subnet_on(self):
-        slaac_subnet = {'ipv6_address_mode': constants.IPV6_SLAAC,
-                        'ipv6_ra_mode': constants.IPV6_SLAAC}
-        self.mixin._get_subnet = mock.Mock(return_value=slaac_subnet)
-
-    def _mock_slaac_subnet_off(self):
-        non_slaac_subnet = {'ipv6_address_mode': None,
-                            'ipv6_ra_mode': None}
-        self.mixin._get_subnet = mock.Mock(return_value=non_slaac_subnet)
-
-    def _test_get_changed_ips_for_port(self, expected_change, original_ips,
-                                       new_ips, owner):
-        change = self.mixin._get_changed_ips_for_port(self.ctx,
-                                                      original_ips,
-                                                      new_ips,
-                                                      owner)
-        self.assertEqual(expected_change, change)
-
-    def test__get_changed_ips_for_port(self):
-        new_ips = self._prepare_ips(self.default_new_ips)
-        original_ips = self._prepare_ips(self.default_original_ips)
-
-        expected_change = self.mixin.Changes(add=[new_ips[1]],
-                                             original=[original_ips[0]],
-                                             remove=[original_ips[1]])
-        self._test_get_changed_ips_for_port(expected_change, original_ips,
-                                            new_ips, self.owner_router)
-
-    def test__get_changed_ips_for_port_autoaddress(self):
-        new_ips = self._prepare_ips(self.default_new_ips)
-
-        original = (('id-1', '192.168.1.1'),
-                    ('id-5', '2000:1234:5678::12FF:FE34:5678'))
-        original_ips = self._prepare_ips(original)
-
-        self._mock_slaac_subnet_on()
-
-        expected_change = self.mixin.Changes(add=[new_ips[1]],
-                                             original=original_ips,
-                                             remove=[])
-        self._test_get_changed_ips_for_port(expected_change, original_ips,
-                                            new_ips, self.owner_non_router)
-
-    def test__get_changed_ips_for_port_autoaddress_ipv6_pd_enabled(self):
-        owner_not_router = constants.DEVICE_OWNER_DHCP
-        new_ips = self._prepare_ips(self.default_new_ips)
-
-        original = (('id-1', '192.168.1.1'),
-                    ('id-5', '2000:1234:5678::12FF:FE34:5678'))
-        original_ips = self._prepare_ips(original)
-
-        # mock to test auto address part
-        pd_subnet = {'subnetpool_id': constants.IPV6_PD_POOL_ID,
-                     'ipv6_address_mode': constants.IPV6_SLAAC,
-                     'ipv6_ra_mode': constants.IPV6_SLAAC}
-        self.mixin._get_subnet = mock.Mock(return_value=pd_subnet)
-
-        # make a copy of original_ips
-        # since it is changed by _get_changed_ips_for_port
-        expected_change = self.mixin.Changes(add=[new_ips[1]],
-                                             original=[original_ips[0]],
-                                             remove=[original_ips[1]])
-
-        self._test_get_changed_ips_for_port(expected_change, original_ips,
-                                            new_ips, owner_not_router)
-
-    def _test_get_changed_ips_for_port_no_ip_address(self):
-        # IP address should be added if only subnet_id is provided,
-        # independently from auto_address status for subnet
-        new_ips = [{'subnet_id': 'id-3'}]
-        original_ips = []
-
-        expected_change = self.mixin.Changes(add=[new_ips[0]],
-                                             original=[],
-                                             remove=[])
-        self._test_get_changed_ips_for_port(expected_change, original_ips,
-                                            new_ips, self.owner_non_router)
-
-    def test__get_changed_ips_for_port_no_ip_address_no_slaac(self):
-        self._mock_slaac_subnet_off()
-        self._test_get_changed_ips_for_port_no_ip_address()
-
-    def test__get_changed_ips_for_port_no_ip_address_slaac(self):
-        self._mock_slaac_subnet_on()
-        self._test_get_changed_ips_for_port_no_ip_address()
-
-    def test__is_ip_required_by_subnet_for_router_port(self):
-        # Owner -> router:
-        # _get_subnet should not be called,
-        # expected True
-        self._mock_slaac_subnet_off()
-
-        result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id',
-                                                      self.owner_router)
-        self.assertTrue(result)
-        self.assertFalse(self.mixin._get_subnet.called)
-
-    def test__is_ip_required_by_subnet_for_non_router_port(self):
-        # Owner -> not router:
-        # _get_subnet should be called,
-        # expected True, because subnet is not slaac
-        self._mock_slaac_subnet_off()
-
-        result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id',
-                                                      self.owner_non_router)
-        self.assertTrue(result)
-        self.assertTrue(self.mixin._get_subnet.called)
-
-    def test__is_ip_required_by_subnet_for_non_router_port_and_slaac(self):
-        # Owner -> not router:
-        # _get_subnet should be called,
-        # expected False, because subnet is slaac
-        self._mock_slaac_subnet_on()
-
-        result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id',
-                                                      self.owner_non_router)
-        self.assertFalse(result)
-        self.assertTrue(self.mixin._get_subnet.called)
diff --git a/neutron/tests/unit/db/test_ipam_non_pluggable_backend.py b/neutron/tests/unit/db/test_ipam_non_pluggable_backend.py
deleted file mode 100644 (file)
index 3678e79..0000000
+++ /dev/null
@@ -1,220 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_config import cfg
-
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.db import db_base_plugin_common
-from neutron.db import db_base_plugin_v2
-from neutron.db import ipam_non_pluggable_backend as non_ipam
-from neutron.db import models_v2
-from neutron.tests import base
-
-
-class TestIpamNonPluggableBackend(base.BaseTestCase):
-    """Unit Tests for non pluggable IPAM Logic."""
-
-    def test_generate_ip(self):
-        with mock.patch.object(non_ipam.IpamNonPluggableBackend,
-                               '_try_generate_ip') as generate:
-            with mock.patch.object(non_ipam.IpamNonPluggableBackend,
-                                   '_rebuild_availability_ranges') as rebuild:
-
-                non_ipam.IpamNonPluggableBackend._generate_ip('c', 's')
-
-        generate.assert_called_once_with('c', 's')
-        self.assertEqual(0, rebuild.call_count)
-
-    def test_generate_ip_exhausted_pool(self):
-        with mock.patch.object(non_ipam.IpamNonPluggableBackend,
-                               '_try_generate_ip') as generate:
-            with mock.patch.object(non_ipam.IpamNonPluggableBackend,
-                                   '_rebuild_availability_ranges') as rebuild:
-
-                exception = n_exc.IpAddressGenerationFailure(net_id='n')
-                # fail first call but not second
-                generate.side_effect = [exception, None]
-                non_ipam.IpamNonPluggableBackend._generate_ip('c', 's')
-
-        self.assertEqual(2, generate.call_count)
-        rebuild.assert_called_once_with('c', 's')
-
-    def _validate_rebuild_availability_ranges(self, pools, allocations,
-                                              expected):
-        ip_qry = mock.Mock()
-        ip_qry.with_lockmode.return_value = ip_qry
-        ip_qry.filter_by.return_value = allocations
-
-        pool_qry = mock.Mock()
-        pool_qry.options.return_value = pool_qry
-        pool_qry.with_lockmode.return_value = pool_qry
-        pool_qry.filter_by.return_value = pools
-
-        def return_queries_side_effect(*args, **kwargs):
-            if args[0] == models_v2.IPAllocation:
-                return ip_qry
-            if args[0] == models_v2.IPAllocationPool:
-                return pool_qry
-
-        context = mock.Mock()
-        context.session.query.side_effect = return_queries_side_effect
-        subnets = [mock.MagicMock()]
-
-        non_ipam.IpamNonPluggableBackend._rebuild_availability_ranges(
-            context, subnets)
-
-        actual = [[args[0].allocation_pool_id,
-                   args[0].first_ip, args[0].last_ip]
-                  for _name, args, _kwargs in context.session.add.mock_calls]
-        self.assertEqual(expected, actual)
-
-    def test_rebuild_availability_ranges(self):
-        pools = [{'id': 'a',
-                  'first_ip': '192.168.1.3',
-                  'last_ip': '192.168.1.10'},
-                 {'id': 'b',
-                  'first_ip': '192.168.1.100',
-                  'last_ip': '192.168.1.120'}]
-
-        allocations = [{'ip_address': '192.168.1.3'},
-                       {'ip_address': '192.168.1.78'},
-                       {'ip_address': '192.168.1.7'},
-                       {'ip_address': '192.168.1.110'},
-                       {'ip_address': '192.168.1.11'},
-                       {'ip_address': '192.168.1.4'},
-                       {'ip_address': '192.168.1.111'}]
-
-        expected = [['a', '192.168.1.5', '192.168.1.6'],
-                    ['a', '192.168.1.8', '192.168.1.10'],
-                    ['b', '192.168.1.100', '192.168.1.109'],
-                    ['b', '192.168.1.112', '192.168.1.120']]
-
-        self._validate_rebuild_availability_ranges(pools, allocations,
-                                                   expected)
-
-    def test_rebuild_ipv6_availability_ranges(self):
-        pools = [{'id': 'a',
-                  'first_ip': '2001::1',
-                  'last_ip': '2001::50'},
-                 {'id': 'b',
-                  'first_ip': '2001::100',
-                  'last_ip': '2001::ffff:ffff:ffff:fffe'}]
-
-        allocations = [{'ip_address': '2001::10'},
-                       {'ip_address': '2001::45'},
-                       {'ip_address': '2001::60'},
-                       {'ip_address': '2001::111'},
-                       {'ip_address': '2001::200'},
-                       {'ip_address': '2001::ffff:ffff:ffff:ff10'},
-                       {'ip_address': '2001::ffff:ffff:ffff:f2f0'}]
-
-        expected = [['a', '2001::1', '2001::f'],
-                    ['a', '2001::11', '2001::44'],
-                    ['a', '2001::46', '2001::50'],
-                    ['b', '2001::100', '2001::110'],
-                    ['b', '2001::112', '2001::1ff'],
-                    ['b', '2001::201', '2001::ffff:ffff:ffff:f2ef'],
-                    ['b', '2001::ffff:ffff:ffff:f2f1',
-                     '2001::ffff:ffff:ffff:ff0f'],
-                    ['b', '2001::ffff:ffff:ffff:ff11',
-                     '2001::ffff:ffff:ffff:fffe']]
-
-        self._validate_rebuild_availability_ranges(pools, allocations,
-                                                   expected)
-
-    def _test__allocate_ips_for_port(self, subnets, port, expected):
-        # this test is incompatible with pluggable ipam, because subnets
-        # were not actually created, so no ipam_subnet exists
-        cfg.CONF.set_override("ipam_driver", None)
-        plugin = db_base_plugin_v2.NeutronDbPluginV2()
-        with mock.patch.object(db_base_plugin_common.DbBasePluginCommon,
-                               '_get_subnets') as get_subnets:
-            with mock.patch.object(non_ipam.IpamNonPluggableBackend,
-                                   '_check_unique_ip') as check_unique:
-                context = mock.Mock()
-                get_subnets.return_value = subnets
-                check_unique.return_value = True
-                actual = plugin.ipam._allocate_ips_for_port(context, port)
-                self.assertEqual(expected, actual)
-
-    def test__allocate_ips_for_port_2_slaac_subnets(self):
-        subnets = [
-            {
-                'cidr': u'2001:100::/64',
-                'enable_dhcp': True,
-                'gateway_ip': u'2001:100::1',
-                'id': u'd1a28edd-bd83-480a-bd40-93d036c89f13',
-                'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176',
-                'ip_version': 6,
-                'ipv6_address_mode': None,
-                'ipv6_ra_mode': u'slaac'},
-            {
-                'cidr': u'2001:200::/64',
-                'enable_dhcp': True,
-                'gateway_ip': u'2001:200::1',
-                'id': u'dc813d3d-ed66-4184-8570-7325c8195e28',
-                'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176',
-                'ip_version': 6,
-                'ipv6_address_mode': None,
-                'ipv6_ra_mode': u'slaac'}]
-        port = {'port': {
-            'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176',
-            'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
-            'mac_address': '12:34:56:78:44:ab',
-            'device_owner': 'compute'}}
-        expected = []
-        for subnet in subnets:
-            addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(
-                            subnet['cidr'], port['port']['mac_address']))
-            expected.append({'ip_address': addr, 'subnet_id': subnet['id']})
-
-        self._test__allocate_ips_for_port(subnets, port, expected)
-
-    def test__allocate_ips_for_port_2_slaac_pd_subnets(self):
-        subnets = [
-            {
-                'cidr': constants.PROVISIONAL_IPV6_PD_PREFIX,
-                'enable_dhcp': True,
-                'gateway_ip': '::1',
-                'id': 'd1a28edd-bd83-480a-bd40-93d036c89f13',
-                'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176',
-                'ip_version': 6,
-                'ipv6_address_mode': None,
-                'ipv6_ra_mode': 'slaac'},
-            {
-                'cidr': constants.PROVISIONAL_IPV6_PD_PREFIX,
-                'enable_dhcp': True,
-                'gateway_ip': '::1',
-                'id': 'dc813d3d-ed66-4184-8570-7325c8195e28',
-                'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176',
-                'ip_version': 6,
-                'ipv6_address_mode': None,
-                'ipv6_ra_mode': 'slaac'}]
-        port = {'port': {
-            'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176',
-            'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
-            'mac_address': '12:34:56:78:44:ab',
-            'device_owner': 'compute'}}
-        expected = []
-        for subnet in subnets:
-            addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(
-                            subnet['cidr'], port['port']['mac_address']))
-            expected.append({'ip_address': addr, 'subnet_id': subnet['id']})
-
-        self._test__allocate_ips_for_port(subnets, port, expected)
diff --git a/neutron/tests/unit/db/test_ipam_pluggable_backend.py b/neutron/tests/unit/db/test_ipam_pluggable_backend.py
deleted file mode 100644 (file)
index 5ca13cc..0000000
+++ /dev/null
@@ -1,544 +0,0 @@
-# Copyright (c) 2015 Infoblox Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import netaddr
-import webob.exc
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.db import ipam_backend_mixin
-from neutron.db import ipam_pluggable_backend
-from neutron.ipam import requests as ipam_req
-from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_base
-
-
-class UseIpamMixin(object):
-
-    def setUp(self):
-        cfg.CONF.set_override("ipam_driver", 'internal')
-        super(UseIpamMixin, self).setUp()
-
-
-class TestIpamHTTPResponse(UseIpamMixin, test_db_base.TestV2HTTPResponse):
-    pass
-
-
-class TestIpamPorts(UseIpamMixin, test_db_base.TestPortsV2):
-    pass
-
-
-class TestIpamNetworks(UseIpamMixin, test_db_base.TestNetworksV2):
-    pass
-
-
-class TestIpamSubnets(UseIpamMixin, test_db_base.TestSubnetsV2):
-    pass
-
-
-class TestIpamSubnetPool(UseIpamMixin, test_db_base.TestSubnetPoolsV2):
-    pass
-
-
-class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
-    def setUp(self):
-        cfg.CONF.set_override("ipam_driver", 'internal')
-        super(TestDbBasePluginIpam, self).setUp()
-        self.tenant_id = uuidutils.generate_uuid()
-        self.subnet_id = uuidutils.generate_uuid()
-
-    def _prepare_mocks(self, address_factory=None):
-        if address_factory is None:
-            address_factory = ipam_req.AddressRequestFactory
-
-        mocks = {
-            'driver': mock.Mock(),
-            'subnet': mock.Mock(),
-            'subnet_request': ipam_req.SpecificSubnetRequest(
-                self.tenant_id,
-                self.subnet_id,
-                '10.0.0.0/24',
-                '10.0.0.1',
-                [netaddr.IPRange('10.0.0.2', '10.0.0.254')]),
-        }
-        mocks['driver'].get_subnet.return_value = mocks['subnet']
-        mocks['driver'].allocate_subnet.return_value = mocks['subnet']
-        mocks['driver'].get_subnet_request_factory.return_value = (
-            ipam_req.SubnetRequestFactory)
-        mocks['driver'].get_address_request_factory.return_value = (
-            address_factory)
-        mocks['subnet'].get_details.return_value = mocks['subnet_request']
-        return mocks
-
-    def _prepare_ipam(self):
-        mocks = self._prepare_mocks()
-        mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend()
-        return mocks
-
-    def _prepare_mocks_with_pool_mock(self, pool_mock, address_factory=None):
-        mocks = self._prepare_mocks(address_factory=address_factory)
-        pool_mock.get_instance.return_value = mocks['driver']
-        return mocks
-
-    def _get_allocate_mock(self, auto_ip='10.0.0.2',
-                           fail_ip='127.0.0.1',
-                           error_message='SomeError'):
-        def allocate_mock(request):
-            if type(request) == ipam_req.SpecificAddressRequest:
-                if request.address == netaddr.IPAddress(fail_ip):
-                    raise n_exc.InvalidInput(error_message=error_message)
-                else:
-                    return str(request.address)
-            else:
-                return auto_ip
-
-        return allocate_mock
-
-    def _validate_allocate_calls(self, expected_calls, mocks):
-        self.assertTrue(mocks['subnet'].allocate.called)
-
-        actual_calls = mocks['subnet'].allocate.call_args_list
-        self.assertEqual(len(expected_calls), len(actual_calls))
-
-        i = 0
-        for call in expected_calls:
-            if call['ip_address']:
-                self.assertIsInstance(actual_calls[i][0][0],
-                                      ipam_req.SpecificAddressRequest)
-                self.assertEqual(netaddr.IPAddress(call['ip_address']),
-                                 actual_calls[i][0][0].address)
-            else:
-                self.assertIsInstance(actual_calls[i][0][0],
-                                      ipam_req.AnyAddressRequest)
-            i += 1
-
-    def _convert_to_ips(self, data):
-        ips = [{'ip_address': ip,
-                'subnet_id': data[ip][1],
-                'subnet_cidr': data[ip][0]} for ip in data]
-        return sorted(ips, key=lambda t: t['subnet_cidr'])
-
-    def _gen_subnet_id(self):
-        return uuidutils.generate_uuid()
-
-    def test_deallocate_single_ip(self):
-        mocks = self._prepare_ipam()
-        ip = '192.168.12.45'
-        data = {ip: ['192.168.12.0/24', self._gen_subnet_id()]}
-        ips = self._convert_to_ips(data)
-
-        mocks['ipam']._ipam_deallocate_ips(mock.ANY, mocks['driver'],
-                                           mock.ANY, ips)
-
-        mocks['driver'].get_subnet.assert_called_once_with(data[ip][1])
-        mocks['subnet'].deallocate.assert_called_once_with(ip)
-
-    def test_deallocate_multiple_ips(self):
-        mocks = self._prepare_ipam()
-        data = {'192.168.43.15': ['192.168.43.0/24', self._gen_subnet_id()],
-                '172.23.158.84': ['172.23.128.0/17', self._gen_subnet_id()],
-                '8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]}
-        ips = self._convert_to_ips(data)
-
-        mocks['ipam']._ipam_deallocate_ips(mock.ANY, mocks['driver'],
-                                           mock.ANY, ips)
-
-        get_calls = [mock.call(data[ip][1]) for ip in data]
-        mocks['driver'].get_subnet.assert_has_calls(get_calls, any_order=True)
-
-        ip_calls = [mock.call(ip) for ip in data]
-        mocks['subnet'].deallocate.assert_has_calls(ip_calls, any_order=True)
-
-    def _single_ip_allocate_helper(self, mocks, ip, network, subnet):
-        ips = [{'subnet_cidr': network,
-                'subnet_id': subnet}]
-        if ip:
-            ips[0]['ip_address'] = ip
-
-        allocated_ips = mocks['ipam']._ipam_allocate_ips(
-            mock.ANY, mocks['driver'], mock.ANY, ips)
-
-        mocks['driver'].get_subnet.assert_called_once_with(subnet)
-
-        self.assertTrue(mocks['subnet'].allocate.called)
-        request = mocks['subnet'].allocate.call_args[0][0]
-
-        return {'ips': allocated_ips,
-                'request': request}
-
-    def test_allocate_single_fixed_ip(self):
-        mocks = self._prepare_ipam()
-        ip = '192.168.15.123'
-        mocks['subnet'].allocate.return_value = ip
-
-        results = self._single_ip_allocate_helper(mocks,
-                                                  ip,
-                                                  '192.168.15.0/24',
-                                                  self._gen_subnet_id())
-
-        self.assertIsInstance(results['request'],
-                              ipam_req.SpecificAddressRequest)
-        self.assertEqual(netaddr.IPAddress(ip), results['request'].address)
-
-        self.assertEqual(ip, results['ips'][0]['ip_address'],
-                         'Should allocate the same ip as passed')
-
-    def test_allocate_single_any_ip(self):
-        mocks = self._prepare_ipam()
-        network = '192.168.15.0/24'
-        ip = '192.168.15.83'
-        mocks['subnet'].allocate.return_value = ip
-
-        results = self._single_ip_allocate_helper(mocks, '', network,
-                                                  self._gen_subnet_id())
-
-        self.assertIsInstance(results['request'], ipam_req.AnyAddressRequest)
-        self.assertEqual(ip, results['ips'][0]['ip_address'])
-
-    def test_allocate_eui64_ip(self):
-        mocks = self._prepare_ipam()
-        ip = {'subnet_id': self._gen_subnet_id(),
-              'subnet_cidr': '2001:470:abcd::/64',
-              'mac': '6c:62:6d:de:cf:49',
-              'eui64_address': True}
-        eui64_ip = ipv6_utils.get_ipv6_addr_by_EUI64(ip['subnet_cidr'],
-                                                     ip['mac'])
-        mocks['ipam']._ipam_allocate_ips(mock.ANY, mocks['driver'],
-                                         mock.ANY, [ip])
-
-        request = mocks['subnet'].allocate.call_args[0][0]
-        self.assertIsInstance(request, ipam_req.AutomaticAddressRequest)
-        self.assertEqual(eui64_ip, request.address)
-
-    def test_allocate_multiple_ips(self):
-        mocks = self._prepare_ipam()
-        data = {'': ['172.23.128.0/17', self._gen_subnet_id()],
-                '192.168.43.15': ['192.168.43.0/24', self._gen_subnet_id()],
-                '8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]}
-        ips = self._convert_to_ips(data)
-        mocks['subnet'].allocate.side_effect = self._get_allocate_mock(
-            auto_ip='172.23.128.94')
-
-        mocks['ipam']._ipam_allocate_ips(
-            mock.ANY, mocks['driver'], mock.ANY, ips)
-        get_calls = [mock.call(data[ip][1]) for ip in data]
-        mocks['driver'].get_subnet.assert_has_calls(get_calls, any_order=True)
-
-        self._validate_allocate_calls(ips, mocks)
-
-    def test_allocate_multiple_ips_with_exception(self):
-        mocks = self._prepare_ipam()
-
-        auto_ip = '172.23.128.94'
-        fail_ip = '192.168.43.15'
-        data = {'': ['172.23.128.0/17', self._gen_subnet_id()],
-                fail_ip: ['192.168.43.0/24', self._gen_subnet_id()],
-                '8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]}
-        ips = self._convert_to_ips(data)
-        mocks['subnet'].allocate.side_effect = self._get_allocate_mock(
-            auto_ip=auto_ip, fail_ip=fail_ip)
-
-        # Exception should be raised on attempt to allocate second ip.
-        # Revert action should be performed for the already allocated ips,
-        # In this test case only one ip should be deallocated
-        # and original error should be reraised
-        self.assertRaises(n_exc.InvalidInput,
-                          mocks['ipam']._ipam_allocate_ips,
-                          mock.ANY,
-                          mocks['driver'],
-                          mock.ANY,
-                          ips)
-
-        # get_subnet should be called only for the first two networks
-        get_calls = [mock.call(data[ip][1]) for ip in ['', fail_ip]]
-        mocks['driver'].get_subnet.assert_has_calls(get_calls, any_order=True)
-
-        # Allocate should be called for the first two ips only
-        self._validate_allocate_calls(ips[:-1], mocks)
-        # Deallocate should be called for the first ip only
-        mocks['subnet'].deallocate.assert_called_once_with(auto_ip)
-
-    @mock.patch('neutron.ipam.driver.Pool')
-    def test_create_subnet_over_ipam(self, pool_mock):
-        mocks = self._prepare_mocks_with_pool_mock(pool_mock)
-        cidr = '192.168.0.0/24'
-        allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}]
-        with self.subnet(allocation_pools=allocation_pools,
-                         cidr=cidr):
-            pool_mock.get_instance.assert_called_once_with(None, mock.ANY)
-            self.assertTrue(mocks['driver'].allocate_subnet.called)
-            request = mocks['driver'].allocate_subnet.call_args[0][0]
-            self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
-            self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
-
-    @mock.patch('neutron.ipam.driver.Pool')
-    def test_create_ipv6_pd_subnet_over_ipam(self, pool_mock):
-        mocks = self._prepare_mocks_with_pool_mock(pool_mock)
-        cfg.CONF.set_override('ipv6_pd_enabled', True)
-        cidr = constants.PROVISIONAL_IPV6_PD_PREFIX
-        allocation_pools = [netaddr.IPRange('::2', '::ffff:ffff:ffff:ffff')]
-        with self.subnet(cidr=None, ip_version=6,
-                         ipv6_ra_mode=constants.IPV6_SLAAC,
-                         ipv6_address_mode=constants.IPV6_SLAAC):
-            pool_mock.get_instance.assert_called_once_with(None, mock.ANY)
-            self.assertTrue(mocks['driver'].allocate_subnet.called)
-            request = mocks['driver'].allocate_subnet.call_args[0][0]
-            self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
-            self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
-            self.assertEqual(allocation_pools, request.allocation_pools)
-
-    @mock.patch('neutron.ipam.driver.Pool')
-    def test_create_subnet_over_ipam_with_rollback(self, pool_mock):
-        mocks = self._prepare_mocks_with_pool_mock(pool_mock)
-        mocks['driver'].allocate_subnet.side_effect = ValueError
-        cidr = '10.0.2.0/24'
-        with self.network() as network:
-            self._create_subnet(self.fmt, network['network']['id'],
-                                cidr, expected_res_status=500)
-
-            pool_mock.get_instance.assert_called_once_with(None, mock.ANY)
-            self.assertTrue(mocks['driver'].allocate_subnet.called)
-            request = mocks['driver'].allocate_subnet.call_args[0][0]
-            self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
-            self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
-            # Verify no subnet was created for network
-            req = self.new_show_request('networks', network['network']['id'])
-            res = req.get_response(self.api)
-            net = self.deserialize(self.fmt, res)
-            self.assertEqual(0, len(net['network']['subnets']))
-
-    @mock.patch('neutron.ipam.driver.Pool')
-    def test_ipam_subnet_deallocated_if_create_fails(self, pool_mock):
-        mocks = self._prepare_mocks_with_pool_mock(pool_mock)
-        cidr = '10.0.2.0/24'
-        with mock.patch.object(
-                ipam_backend_mixin.IpamBackendMixin, '_save_subnet',
-                side_effect=ValueError), self.network() as network:
-            self._create_subnet(self.fmt, network['network']['id'],
-                                cidr, expected_res_status=500)
-            pool_mock.get_instance.assert_any_call(None, mock.ANY)
-            self.assertEqual(2, pool_mock.get_instance.call_count)
-            self.assertTrue(mocks['driver'].allocate_subnet.called)
-            request = mocks['driver'].allocate_subnet.call_args[0][0]
-            self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
-            self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
-            # Verify remove ipam subnet was called
-            mocks['driver'].remove_subnet.assert_called_once_with(
-                self.subnet_id)
-
-    @mock.patch('neutron.ipam.driver.Pool')
-    def test_update_subnet_over_ipam(self, pool_mock):
-        mocks = self._prepare_mocks_with_pool_mock(pool_mock)
-        cidr = '10.0.0.0/24'
-        allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
-        with self.subnet(allocation_pools=allocation_pools,
-                         cidr=cidr) as subnet:
-            data = {'subnet': {'allocation_pools': [
-                    {'start': '10.0.0.10', 'end': '10.0.0.20'},
-                    {'start': '10.0.0.30', 'end': '10.0.0.40'}]}}
-            req = self.new_update_request('subnets', data,
-                                          subnet['subnet']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(200, res.status_code)
-
-            pool_mock.get_instance.assert_any_call(None, mock.ANY)
-            self.assertEqual(2, pool_mock.get_instance.call_count)
-            self.assertTrue(mocks['driver'].update_subnet.called)
-            request = mocks['driver'].update_subnet.call_args[0][0]
-            self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
-            self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
-
-            ip_ranges = [netaddr.IPRange(p['start'],
-                p['end']) for p in data['subnet']['allocation_pools']]
-            self.assertEqual(ip_ranges, request.allocation_pools)
-
-    @mock.patch('neutron.ipam.driver.Pool')
-    def test_delete_subnet_over_ipam(self, pool_mock):
-        mocks = self._prepare_mocks_with_pool_mock(pool_mock)
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        subnet = self._make_subnet(self.fmt, network, gateway_ip,
-                                   cidr, ip_version=4)
-        req = self.new_delete_request('subnets', subnet['subnet']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
-
-        pool_mock.get_instance.assert_any_call(None, mock.ANY)
-        self.assertEqual(2, pool_mock.get_instance.call_count)
-        mocks['driver'].remove_subnet.assert_called_once_with(
-            subnet['subnet']['id'])
-
-    @mock.patch('neutron.ipam.driver.Pool')
-    def test_delete_subnet_over_ipam_with_rollback(self, pool_mock):
-        mocks = self._prepare_mocks_with_pool_mock(pool_mock)
-        mocks['driver'].remove_subnet.side_effect = ValueError
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        subnet = self._make_subnet(self.fmt, network, gateway_ip,
-                                   cidr, ip_version=4)
-        req = self.new_delete_request('subnets', subnet['subnet']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(webob.exc.HTTPServerError.code, res.status_int)
-
-        pool_mock.get_instance.assert_any_call(None, mock.ANY)
-        self.assertEqual(2, pool_mock.get_instance.call_count)
-        mocks['driver'].remove_subnet.assert_called_once_with(
-            subnet['subnet']['id'])
-        # Verify subnet was recreated after failed ipam call
-        subnet_req = self.new_show_request('subnets',
-                                           subnet['subnet']['id'])
-        raw_res = subnet_req.get_response(self.api)
-        sub_res = self.deserialize(self.fmt, raw_res)
-        self.assertIn(sub_res['subnet']['cidr'], cidr)
-        self.assertIn(sub_res['subnet']['gateway_ip'],
-                      gateway_ip)
-
-    @mock.patch('neutron.ipam.driver.Pool')
-    def test_create_port_ipam(self, pool_mock):
-        mocks = self._prepare_mocks_with_pool_mock(pool_mock)
-        auto_ip = '10.0.0.2'
-        expected_calls = [{'ip_address': ''}]
-        mocks['subnet'].allocate.side_effect = self._get_allocate_mock(
-            auto_ip=auto_ip)
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(1, len(ips))
-                self.assertEqual(ips[0]['ip_address'], auto_ip)
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                self._validate_allocate_calls(expected_calls, mocks)
-
-    @mock.patch('neutron.ipam.driver.Pool')
-    def test_create_port_ipam_with_rollback(self, pool_mock):
-        mocks = self._prepare_mocks_with_pool_mock(pool_mock)
-        mocks['subnet'].allocate.side_effect = ValueError
-        with self.network() as network:
-            with self.subnet(network=network):
-                net_id = network['network']['id']
-                data = {
-                    'port': {'network_id': net_id,
-                             'tenant_id': network['network']['tenant_id']}}
-                port_req = self.new_create_request('ports', data)
-                res = port_req.get_response(self.api)
-                self.assertEqual(webob.exc.HTTPServerError.code,
-                                 res.status_int)
-
-                # verify no port left after failure
-                req = self.new_list_request('ports', self.fmt,
-                                            "network_id=%s" % net_id)
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                self.assertEqual(0, len(res['ports']))
-
-    @mock.patch('neutron.ipam.driver.Pool')
-    def test_update_port_ipam(self, pool_mock):
-        mocks = self._prepare_mocks_with_pool_mock(pool_mock)
-        auto_ip = '10.0.0.2'
-        new_ip = '10.0.0.15'
-        expected_calls = [{'ip_address': ip} for ip in ['', new_ip]]
-        mocks['subnet'].allocate.side_effect = self._get_allocate_mock(
-            auto_ip=auto_ip)
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(1, len(ips))
-                self.assertEqual(ips[0]['ip_address'], auto_ip)
-                # Update port with another new ip
-                data = {"port": {"fixed_ips": [{
-                        'subnet_id': subnet['subnet']['id'],
-                        'ip_address': new_ip}]}}
-                req = self.new_update_request('ports', data,
-                                              port['port']['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                ips = res['port']['fixed_ips']
-                self.assertEqual(1, len(ips))
-                self.assertEqual(new_ip, ips[0]['ip_address'])
-
-                # Allocate should be called for the first two networks
-                self._validate_allocate_calls(expected_calls, mocks)
-                # Deallocate should be called for the first ip only
-                mocks['subnet'].deallocate.assert_called_once_with(auto_ip)
-
-    @mock.patch('neutron.ipam.driver.Pool')
-    def test_delete_port_ipam(self, pool_mock):
-        mocks = self._prepare_mocks_with_pool_mock(pool_mock)
-        auto_ip = '10.0.0.2'
-        mocks['subnet'].allocate.side_effect = self._get_allocate_mock(
-            auto_ip=auto_ip)
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(1, len(ips))
-                self.assertEqual(ips[0]['ip_address'], auto_ip)
-                req = self.new_delete_request('ports', port['port']['id'])
-                res = req.get_response(self.api)
-
-                self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
-                mocks['subnet'].deallocate.assert_called_once_with(auto_ip)
-
-    def test_recreate_port_ipam(self):
-        ip = '10.0.0.2'
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(1, len(ips))
-                self.assertEqual(ips[0]['ip_address'], ip)
-                req = self.new_delete_request('ports', port['port']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
-                with self.port(subnet=subnet, fixed_ips=ips) as port:
-                    ips = port['port']['fixed_ips']
-                    self.assertEqual(1, len(ips))
-                    self.assertEqual(ips[0]['ip_address'], ip)
-
-    @mock.patch('neutron.ipam.driver.Pool')
-    def test_update_ips_for_port_passes_port_dict_to_factory(self, pool_mock):
-        address_factory = mock.Mock()
-        mocks = self._prepare_mocks_with_pool_mock(
-            pool_mock, address_factory=address_factory)
-        context = mock.Mock()
-        new_ips = mock.Mock()
-        original_ips = mock.Mock()
-        mac = mock.Mock()
-
-        ip_dict = {'ip_address': '192.1.1.10',
-                   'subnet_id': uuidutils.generate_uuid()}
-        changes = ipam_pluggable_backend.IpamPluggableBackend.Changes(
-            add=[ip_dict], original=[], remove=[])
-        changes_mock = mock.Mock(return_value=changes)
-        fixed_ips_mock = mock.Mock(return_value=changes.add)
-        mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend()
-        mocks['ipam']._get_changed_ips_for_port = changes_mock
-        mocks['ipam']._test_fixed_ips_for_port = fixed_ips_mock
-
-        port_dict = {'device_owner': uuidutils.generate_uuid(),
-                     'network_id': uuidutils.generate_uuid()}
-
-        mocks['ipam']._update_ips_for_port(context, port_dict,
-                                           original_ips, new_ips, mac)
-        mocks['driver'].get_address_request_factory.assert_called_once_with()
-        # Validate port_dict is passed into address_factory
-        address_factory.get_request.assert_called_once_with(context,
-                                                            port_dict,
-                                                            ip_dict)
diff --git a/neutron/tests/unit/db/test_l3_db.py b/neutron/tests/unit/db/test_l3_db.py
deleted file mode 100644 (file)
index 5694e68..0000000
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright 2015 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-from neutron.db import l3_db
-from neutron import manager
-from neutron.tests import base
-
-
-class TestL3_NAT_dbonly_mixin(base.BaseTestCase):
-    def setUp(self):
-        super(TestL3_NAT_dbonly_mixin, self).setUp()
-        self.db = l3_db.L3_NAT_dbonly_mixin()
-
-    def test__each_port_having_fixed_ips_none(self):
-        """Be sure the method returns an empty list when None is passed"""
-        filtered = l3_db.L3_NAT_dbonly_mixin._each_port_having_fixed_ips(None)
-        self.assertEqual([], list(filtered))
-
-    def test__each_port_having_fixed_ips(self):
-        """Basic test that ports without fixed ips are filtered out"""
-        ports = [{'id': 'a', 'fixed_ips': [mock.sentinel.fixedip]},
-                 {'id': 'b'}]
-        filtered = l3_db.L3_NAT_dbonly_mixin._each_port_having_fixed_ips(ports)
-        ids = [p['id'] for p in filtered]
-        self.assertEqual(['a'], ids)
-
-    def test__get_subnets_by_network_no_query(self):
-        """Basic test that no query is performed if no Ports are passed"""
-        context = mock.Mock()
-        with mock.patch.object(manager.NeutronManager, 'get_plugin') as get_p:
-            self.db._get_subnets_by_network_list(context, [])
-        self.assertFalse(context.session.query.called)
-        self.assertFalse(get_p.called)
-
-    def test__get_subnets_by_network(self):
-        """Basic test that the right query is called"""
-        context = mock.MagicMock()
-        query = context.session.query().outerjoin().filter()
-        query.__iter__.return_value = [(mock.sentinel.subnet_db,
-                                        mock.sentinel.address_scope_id)]
-
-        with mock.patch.object(manager.NeutronManager, 'get_plugin') as get_p:
-            get_p()._make_subnet_dict.return_value = {
-                'network_id': mock.sentinel.network_id}
-            subnets = self.db._get_subnets_by_network_list(
-                context, [mock.sentinel.network_id])
-        self.assertEqual({
-            mock.sentinel.network_id: [{
-                'address_scope_id': mock.sentinel.address_scope_id,
-                'network_id': mock.sentinel.network_id}]}, subnets)
-
-    def test__populate_ports_for_subnets_none(self):
-        """Basic test that the method runs correctly with no ports"""
-        ports = []
-        self.db._populate_subnets_for_ports(mock.sentinel.context, ports)
-        self.assertEqual([], ports)
-
-    @mock.patch.object(l3_db.L3_NAT_dbonly_mixin,
-                       '_get_subnets_by_network_list')
-    def test__populate_ports_for_subnets(self, get_subnets_by_network):
-        cidr = "2001:db8::/64"
-        subnet = {'id': mock.sentinel.subnet_id,
-                  'cidr': cidr,
-                  'gateway_ip': mock.sentinel.gateway_ip,
-                  'dns_nameservers': mock.sentinel.dns_nameservers,
-                  'ipv6_ra_mode': mock.sentinel.ipv6_ra_mode,
-                  'subnetpool_id': mock.sentinel.subnetpool_id,
-                  'address_scope_id': mock.sentinel.address_scope_id}
-        get_subnets_by_network.return_value = {'net_id': [subnet]}
-
-        ports = [{'network_id': 'net_id',
-                  'id': 'port_id',
-                  'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}]
-        self.db._populate_subnets_for_ports(mock.sentinel.context, ports)
-        keys = ('id', 'cidr', 'gateway_ip', 'ipv6_ra_mode', 'subnetpool_id',
-                'dns_nameservers')
-        address_scopes = {4: None, 6: mock.sentinel.address_scope_id}
-        self.assertEqual([{'extra_subnets': [],
-                           'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id,
-                                          'prefixlen': 64}],
-                           'id': 'port_id',
-                           'network_id': 'net_id',
-                           'subnets': [{k: subnet[k] for k in keys}],
-                           'address_scopes': address_scopes}], ports)
-
-    def test__get_sync_floating_ips_no_query(self):
-        """Basic test that no query is performed if no router ids are passed"""
-        db = l3_db.L3_NAT_dbonly_mixin()
-        context = mock.Mock()
-        db._get_sync_floating_ips(context, [])
-        self.assertFalse(context.session.query.called)
-
-    @mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_floatingip_dict')
-    def test__make_floatingip_dict_with_scope(self, make_fip_dict):
-        db = l3_db.L3_NAT_dbonly_mixin()
-        make_fip_dict.return_value = {'id': mock.sentinel.fip_ip}
-        result = db._make_floatingip_dict_with_scope(
-            mock.sentinel.floating_ip_db, mock.sentinel.address_scope_id)
-        self.assertEqual({
-            'fixed_ip_address_scope': mock.sentinel.address_scope_id,
-            'id': mock.sentinel.fip_ip}, result)
-
-    def test__unique_floatingip_iterator(self):
-        query = mock.MagicMock()
-        query.order_by().__iter__.return_value = [
-            ({'id': 'id1'}, 'scope1'),
-            ({'id': 'id1'}, 'scope1'),
-            ({'id': 'id2'}, 'scope2'),
-            ({'id': 'id2'}, 'scope2'),
-            ({'id': 'id2'}, 'scope2'),
-            ({'id': 'id3'}, 'scope3')]
-        query.reset_mock()
-        result = list(
-            l3_db.L3_NAT_dbonly_mixin._unique_floatingip_iterator(query))
-        query.order_by.assert_called_once_with(l3_db.FloatingIP.id)
-        self.assertEqual([({'id': 'id1'}, 'scope1'),
-                          ({'id': 'id2'}, 'scope2'),
-                          ({'id': 'id3'}, 'scope3')], result)
diff --git a/neutron/tests/unit/db/test_l3_dvr_db.py b/neutron/tests/unit/db/test_l3_dvr_db.py
deleted file mode 100644 (file)
index b6aa3d5..0000000
+++ /dev/null
@@ -1,647 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation, all rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron.common import constants as l3_const
-from neutron.common import exceptions
-from neutron import context
-from neutron.db import agents_db
-from neutron.db import common_db_mixin
-from neutron.db import l3_agentschedulers_db
-from neutron.db import l3_dvr_db
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.plugins.common import constants as plugin_const
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-_uuid = uuidutils.generate_uuid
-
-
-class FakeL3Plugin(common_db_mixin.CommonDbMixin,
-                   l3_dvr_db.L3_NAT_with_dvr_db_mixin,
-                   l3_agentschedulers_db.L3AgentSchedulerDbMixin,
-                   agents_db.AgentDbMixin):
-    pass
-
-
-class L3DvrTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-
-    def setUp(self):
-        core_plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-        super(L3DvrTestCase, self).setUp(plugin=core_plugin)
-        self.core_plugin = manager.NeutronManager.get_plugin()
-        self.ctx = context.get_admin_context()
-        self.mixin = FakeL3Plugin()
-
-    def _create_router(self, router):
-        with self.ctx.session.begin(subtransactions=True):
-            return self.mixin._create_router_db(self.ctx, router, 'foo_tenant')
-
-    def _test__create_router_db(self, expected=False, distributed=None):
-        router = {'name': 'foo_router', 'admin_state_up': True}
-        if distributed is not None:
-            router['distributed'] = distributed
-        result = self._create_router(router)
-        self.assertEqual(expected, result.extra_attributes['distributed'])
-
-    def test_create_router_db_default(self):
-        self._test__create_router_db(expected=False)
-
-    def test_create_router_db_centralized(self):
-        self._test__create_router_db(expected=False, distributed=False)
-
-    def test_create_router_db_distributed(self):
-        self._test__create_router_db(expected=True, distributed=True)
-
-    def test__validate_router_migration_on_router_update(self):
-        router = {
-            'name': 'foo_router',
-            'admin_state_up': True,
-            'distributed': True
-        }
-        router_db = self._create_router(router)
-        self.assertIsNone(self.mixin._validate_router_migration(
-            self.ctx, router_db, {'name': 'foo_router_2'}))
-
-    def test__validate_router_migration_raise_error(self):
-        router = {
-            'name': 'foo_router',
-            'admin_state_up': True,
-            'distributed': True
-        }
-        router_db = self._create_router(router)
-        self.assertRaises(exceptions.BadRequest,
-                          self.mixin._validate_router_migration,
-                          self.ctx, router_db, {'distributed': False})
-
-    def test_upgrade_active_router_to_distributed_validation_failure(self):
-        router = {'name': 'foo_router', 'admin_state_up': True}
-        router_db = self._create_router(router)
-        update = {'distributed': True}
-        self.assertRaises(exceptions.BadRequest,
-                          self.mixin._validate_router_migration,
-                          self.ctx, router_db, update)
-
-    def test_update_router_db_centralized_to_distributed(self):
-        router = {'name': 'foo_router', 'admin_state_up': True}
-        agent = {'id': _uuid()}
-        distributed = {'distributed': True}
-        router_db = self._create_router(router)
-        router_id = router_db['id']
-        self.assertFalse(router_db.extra_attributes.distributed)
-        self.mixin._get_router = mock.Mock(return_value=router_db)
-        self.mixin._validate_router_migration = mock.Mock()
-        self.mixin._update_distributed_attr = mock.Mock()
-        self.mixin.list_l3_agents_hosting_router = mock.Mock(
-            return_value={'agents': [agent]})
-        self.mixin._unbind_router = mock.Mock()
-        router_db = self.mixin._update_router_db(
-            self.ctx, router_id, distributed)
-        # Assert that the DB value has changed
-        self.assertTrue(router_db.extra_attributes.distributed)
-        self.assertEqual(1,
-                         self.mixin._update_distributed_attr.call_count)
-
-    def _test_get_device_owner(self, is_distributed=False,
-                               expected=l3_const.DEVICE_OWNER_ROUTER_INTF,
-                               pass_router_id=True):
-        router = {
-            'name': 'foo_router',
-            'admin_state_up': True,
-            'distributed': is_distributed
-        }
-        router_db = self._create_router(router)
-        router_pass = router_db['id'] if pass_router_id else router_db
-        with mock.patch.object(self.mixin, '_get_router') as f:
-            f.return_value = router_db
-            result = self.mixin._get_device_owner(self.ctx, router_pass)
-            self.assertEqual(expected, result)
-
-    def test_get_device_owner_by_router_id(self):
-        self._test_get_device_owner()
-
-    def test__get_device_owner_centralized(self):
-        self._test_get_device_owner(pass_router_id=False)
-
-    def test__get_device_owner_distributed(self):
-        self._test_get_device_owner(
-            is_distributed=True,
-            expected=l3_const.DEVICE_OWNER_DVR_INTERFACE,
-            pass_router_id=False)
-
-    def _test__is_distributed_router(self, router, expected):
-        result = l3_dvr_db.is_distributed_router(router)
-        self.assertEqual(expected, result)
-
-    def test__is_distributed_router_by_db_object(self):
-        router = {'name': 'foo_router', 'admin_state_up': True}
-        router_db = self._create_router(router)
-        self.mixin._get_device_owner(mock.ANY, router_db)
-
-    def test__is_distributed_router_default(self):
-        router = {'id': 'foo_router_id'}
-        self._test__is_distributed_router(router, False)
-
-    def test__is_distributed_router_centralized(self):
-        router = {'id': 'foo_router_id', 'distributed': False}
-        self._test__is_distributed_router(router, False)
-
-    def test__is_distributed_router_distributed(self):
-        router = {'id': 'foo_router_id', 'distributed': True}
-        self._test__is_distributed_router(router, True)
-
-    def test__get_agent_gw_ports_exist_for_network(self):
-        with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
-            plugin = mock.Mock()
-            gp.return_value = plugin
-            plugin.get_ports.return_value = []
-            self.mixin._get_agent_gw_ports_exist_for_network(
-                self.ctx, 'network_id', 'host', 'agent_id')
-        plugin.get_ports.assert_called_with(self.ctx, {
-            'network_id': ['network_id'],
-            'device_id': ['agent_id'],
-            'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]})
-
-    def _test_prepare_direct_delete_dvr_internal_ports(self, port):
-        with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
-            plugin = mock.Mock()
-            gp.return_value = plugin
-            plugin.get_port.return_value = port
-            self.assertRaises(exceptions.ServicePortInUse,
-                              self.mixin.prevent_l3_port_deletion,
-                              self.ctx,
-                              port['id'])
-
-    def test_prevent_delete_floatingip_agent_gateway_port(self):
-        port = {
-            'id': 'my_port_id',
-            'fixed_ips': mock.ANY,
-            'device_owner': l3_const.DEVICE_OWNER_AGENT_GW
-        }
-        self._test_prepare_direct_delete_dvr_internal_ports(port)
-
-    def test_prevent_delete_csnat_port(self):
-        port = {
-            'id': 'my_port_id',
-            'fixed_ips': mock.ANY,
-            'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT
-        }
-        self._test_prepare_direct_delete_dvr_internal_ports(port)
-
-    def test__create_gw_port_with_no_gateway(self):
-        router = {
-            'name': 'foo_router',
-            'admin_state_up': True,
-            'distributed': True,
-        }
-        router_db = self._create_router(router)
-        router_id = router_db['id']
-        self.assertTrue(router_db.extra_attributes.distributed)
-        with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
-                               '_create_gw_port'),\
-                mock.patch.object(
-                    self.mixin,
-                    '_create_snat_intf_ports_if_not_exists') as cs:
-            self.mixin._create_gw_port(
-                self.ctx, router_id, router_db, mock.ANY,
-                mock.ANY)
-            self.assertFalse(cs.call_count)
-
-    def test_build_routers_list_with_gw_port_mismatch(self):
-        routers = [{'gw_port_id': 'foo_gw_port_id', 'id': 'foo_router_id'}]
-        gw_ports = {}
-        routers = self.mixin._build_routers_list(self.ctx, routers, gw_ports)
-        self.assertIsNone(routers[0].get('gw_port'))
-
-    def setup_port_has_ipv6_address(self, port):
-        with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
-                               '_port_has_ipv6_address') as pv6:
-            pv6.return_value = True
-            result = self.mixin._port_has_ipv6_address(port)
-            return result, pv6
-
-    def test__port_has_ipv6_address_for_dvr_snat_port(self):
-        port = {
-            'id': 'my_port_id',
-            'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT,
-        }
-        result, pv6 = self.setup_port_has_ipv6_address(port)
-        self.assertFalse(result)
-        self.assertFalse(pv6.called)
-
-    def test__port_has_ipv6_address_for_non_snat_ports(self):
-        port = {
-            'id': 'my_port_id',
-            'device_owner': l3_const.DEVICE_OWNER_DVR_INTERFACE,
-        }
-        result, pv6 = self.setup_port_has_ipv6_address(port)
-        self.assertTrue(result)
-        self.assertTrue(pv6.called)
-
-    def _helper_delete_floatingip_agent_gateway_port(self, port_host):
-        ports = [{
-            'id': 'my_port_id',
-            portbindings.HOST_ID: 'foo_host',
-            'network_id': 'ext_network_id',
-            'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
-        },
-                {
-            'id': 'my_new_port_id',
-            portbindings.HOST_ID: 'my_foo_host',
-            'network_id': 'ext_network_id',
-            'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
-        }]
-        with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
-            plugin = mock.Mock()
-            gp.return_value = plugin
-            plugin.get_ports.return_value = ports
-            self.mixin.delete_floatingip_agent_gateway_port(
-                self.ctx, port_host, 'ext_network_id')
-        plugin.get_ports.assert_called_with(self.ctx, filters={
-            'network_id': ['ext_network_id'],
-            'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]})
-        if port_host:
-            plugin.ipam.delete_port.assert_called_once_with(
-                self.ctx, 'my_port_id')
-        else:
-            plugin.ipam.delete_port.assert_called_with(
-                self.ctx, 'my_new_port_id')
-
-    def test_delete_floatingip_agent_gateway_port_without_host_id(self):
-        self._helper_delete_floatingip_agent_gateway_port(None)
-
-    def test_delete_floatingip_agent_gateway_port_with_host_id(self):
-        self._helper_delete_floatingip_agent_gateway_port(
-            'foo_host')
-
-    def _setup_delete_current_gw_port_deletes_fip_agent_gw_port(
-        self, port=None, gw_port=True):
-        router = mock.MagicMock()
-        router.extra_attributes.distributed = True
-        if gw_port:
-            gw_port_db = {
-                'id': 'my_gw_id',
-                'network_id': 'ext_net_id',
-                'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
-            }
-            router.gw_port = gw_port_db
-        else:
-            router.gw_port = None
-
-        with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp,\
-            mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
-                              '_delete_current_gw_port'),\
-            mock.patch.object(
-                self.mixin,
-                '_get_router') as grtr,\
-            mock.patch.object(
-                self.mixin,
-                'delete_csnat_router_interface_ports') as del_csnat_port,\
-            mock.patch.object(
-                self.mixin,
-                'delete_floatingip_agent_gateway_port') as del_agent_gw_port,\
-            mock.patch.object(
-                self.mixin.l3_rpc_notifier,
-                'delete_fipnamespace_for_ext_net') as del_fip:
-            plugin = mock.Mock()
-            gp.return_value = plugin
-            plugin.get_ports.return_value = port
-            grtr.return_value = router
-            self.mixin._delete_current_gw_port(
-                self.ctx, router['id'], router, 'ext_network_id')
-            return router, plugin, del_csnat_port, del_agent_gw_port, del_fip
-
-    def test_delete_current_gw_port_deletes_fip_agent_gw_port_and_fipnamespace(
-            self):
-        rtr, plugin, d_csnat_port, d_agent_gw_port, del_fip = (
-            self._setup_delete_current_gw_port_deletes_fip_agent_gw_port())
-        self.assertTrue(d_csnat_port.called)
-        self.assertTrue(d_agent_gw_port.called)
-        d_csnat_port.assert_called_once_with(
-            mock.ANY, rtr)
-        d_agent_gw_port.assert_called_once_with(mock.ANY, None, 'ext_net_id')
-        del_fip.assert_called_once_with(mock.ANY, 'ext_net_id')
-
-    def test_delete_current_gw_port_never_calls_delete_fip_agent_gw_port(self):
-        port = [{
-            'id': 'my_port_id',
-            'network_id': 'ext_net_id',
-            'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
-        },
-                {
-            'id': 'my_new_port_id',
-            'network_id': 'ext_net_id',
-            'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
-        }]
-        rtr, plugin, d_csnat_port, d_agent_gw_port, del_fip = (
-            self._setup_delete_current_gw_port_deletes_fip_agent_gw_port(
-                port=port))
-        self.assertTrue(d_csnat_port.called)
-        self.assertFalse(d_agent_gw_port.called)
-        self.assertFalse(del_fip.called)
-        d_csnat_port.assert_called_once_with(
-            mock.ANY, rtr)
-
-    def test_delete_current_gw_port_never_calls_delete_fipnamespace(self):
-        rtr, plugin, d_csnat_port, d_agent_gw_port, del_fip = (
-            self._setup_delete_current_gw_port_deletes_fip_agent_gw_port(
-                gw_port=False))
-        self.assertFalse(d_csnat_port.called)
-        self.assertFalse(d_agent_gw_port.called)
-        self.assertFalse(del_fip.called)
-
-    def _floatingip_on_port_test_setup(self, hostid):
-        router = {'id': 'foo_router_id', 'distributed': True}
-        floatingip = {
-            'id': _uuid(),
-            'port_id': _uuid(),
-            'router_id': 'foo_router_id',
-            'host': hostid
-        }
-        if not hostid:
-            hostid = 'not_my_host_id'
-        routers = {
-            'foo_router_id': router
-        }
-        fipagent = {
-            'id': _uuid()
-        }
-
-        # NOTE: mock.patch is not needed here since self.mixin is created fresh
-        # for each test.  It doesn't work with some methods since the mixin is
-        # tested in isolation (e.g. _get_agent_by_type_and_host).
-        self.mixin._get_dvr_service_port_hostid = mock.Mock(
-            return_value=hostid)
-        self.mixin._get_agent_by_type_and_host = mock.Mock(
-            return_value=fipagent)
-        self.mixin._get_fip_sync_interfaces = mock.Mock(
-            return_value='fip_interface')
-        agent = mock.Mock()
-        agent.id = fipagent['id']
-
-        self.mixin._process_floating_ips_dvr(self.ctx, routers, [floatingip],
-                                             hostid, agent)
-        return (router, floatingip)
-
-    def test_floatingip_on_port_not_host(self):
-        router, fip = self._floatingip_on_port_test_setup(None)
-
-        self.assertNotIn(l3_const.FLOATINGIP_KEY, router)
-        self.assertNotIn(l3_const.FLOATINGIP_AGENT_INTF_KEY, router)
-
-    def test_floatingip_on_port_with_host(self):
-        router, fip = self._floatingip_on_port_test_setup(_uuid())
-
-        self.assertTrue(self.mixin._get_fip_sync_interfaces.called)
-
-        self.assertIn(l3_const.FLOATINGIP_KEY, router)
-        self.assertIn(l3_const.FLOATINGIP_AGENT_INTF_KEY, router)
-        self.assertIn(fip, router[l3_const.FLOATINGIP_KEY])
-        self.assertIn('fip_interface',
-            router[l3_const.FLOATINGIP_AGENT_INTF_KEY])
-
-    def _setup_test_create_floatingip(
-        self, fip, floatingip_db, router_db):
-        port = {
-            'id': '1234',
-            portbindings.HOST_ID: 'myhost',
-            'network_id': 'external_net'
-        }
-
-        with mock.patch.object(self.mixin, 'get_router') as grtr,\
-                mock.patch.object(self.mixin,
-                                  '_get_dvr_service_port_hostid') as vmp,\
-                mock.patch.object(
-                    self.mixin,
-                    'create_fip_agent_gw_port_if_not_exists') as c_fip,\
-                mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
-                                  '_update_fip_assoc'):
-            grtr.return_value = router_db
-            vmp.return_value = 'my-host'
-            self.mixin._update_fip_assoc(
-                self.ctx, fip, floatingip_db, port)
-            return c_fip
-
-    def test_create_floatingip_agent_gw_port_with_dvr_router(self):
-        floatingip = {
-            'id': _uuid(),
-            'router_id': 'foo_router_id'
-        }
-        router = {'id': 'foo_router_id', 'distributed': True}
-        fip = {
-            'id': _uuid(),
-            'port_id': _uuid()
-        }
-        create_fip = (
-            self._setup_test_create_floatingip(
-                fip, floatingip, router))
-        self.assertTrue(create_fip.called)
-
-    def test_create_floatingip_agent_gw_port_with_non_dvr_router(self):
-        floatingip = {
-            'id': _uuid(),
-            'router_id': 'foo_router_id'
-        }
-        router = {'id': 'foo_router_id', 'distributed': False}
-        fip = {
-            'id': _uuid(),
-            'port_id': _uuid()
-        }
-        create_fip = (
-            self._setup_test_create_floatingip(
-                fip, floatingip, router))
-        self.assertFalse(create_fip.called)
-
-    def test_remove_router_interface_delete_router_l3agent_binding(self):
-        interface_info = {'subnet_id': '123'}
-        router = mock.MagicMock()
-        router.extra_attributes.distributed = True
-        plugin = mock.MagicMock()
-        plugin.get_l3_agents_hosting_routers = mock.Mock(
-            return_value=[mock.MagicMock()])
-        plugin.get_subnet_ids_on_router = mock.Mock(
-            return_value=interface_info)
-        plugin.check_dvr_serviceable_ports_on_host = mock.Mock(
-            return_value=False)
-        plugin.remove_router_from_l3_agent = mock.Mock(
-            return_value=None)
-        with mock.patch.object(self.mixin, '_get_router') as grtr,\
-                mock.patch.object(self.mixin, '_get_device_owner') as gdev,\
-                mock.patch.object(self.mixin,
-                                  '_remove_interface_by_subnet') as rmintf,\
-                mock.patch.object(
-                    self.mixin,
-                    'delete_csnat_router_interface_ports') as delintf,\
-                mock.patch.object(manager.NeutronManager,
-                                  'get_service_plugins') as gplugin,\
-                mock.patch.object(self.mixin,
-                                  '_make_router_interface_info') as mkintf,\
-                mock.patch.object(self.mixin,
-                                  'notify_router_interface_action') as notify:
-            grtr.return_value = router
-            gdev.return_value = mock.Mock()
-            rmintf.return_value = (mock.MagicMock(), mock.MagicMock())
-            mkintf.return_value = mock.Mock()
-            gplugin.return_value = {plugin_const.L3_ROUTER_NAT: plugin}
-            delintf.return_value = None
-            notify.return_value = None
-
-            self.mixin.manager = manager
-            self.mixin.remove_router_interface(
-                self.ctx, mock.Mock(), interface_info)
-            self.assertTrue(plugin.get_l3_agents_hosting_routers.called)
-            self.assertTrue(plugin.check_dvr_serviceable_ports_on_host.called)
-            self.assertTrue(plugin.remove_router_from_l3_agent.called)
-
-    def test_remove_router_interface_csnat_ports_removal(self):
-        router_dict = {'name': 'test_router', 'admin_state_up': True,
-                       'distributed': True}
-        router = self._create_router(router_dict)
-        plugin = mock.MagicMock()
-        plugin.get_subnet_ids_on_router = mock.Mock()
-        with self.network() as net_ext,\
-                self.subnet() as subnet1,\
-                self.subnet(cidr='20.0.0.0/24') as subnet2:
-            ext_net_id = net_ext['network']['id']
-            self.core_plugin.update_network(
-                self.ctx, ext_net_id,
-                {'network': {'router:external': True}})
-            self.mixin.update_router(
-                self.ctx, router['id'],
-                {'router': {'external_gateway_info':
-                            {'network_id': ext_net_id}}})
-            self.mixin.add_router_interface(self.ctx, router['id'],
-                {'subnet_id': subnet1['subnet']['id']})
-            self.mixin.add_router_interface(self.ctx, router['id'],
-                {'subnet_id': subnet2['subnet']['id']})
-
-            csnat_filters = {'device_owner':
-                             [l3_const.DEVICE_OWNER_ROUTER_SNAT]}
-            csnat_ports = self.core_plugin.get_ports(
-                self.ctx, filters=csnat_filters)
-            self.assertEqual(2, len(csnat_ports))
-
-            dvr_filters = {'device_owner':
-                           [l3_const.DEVICE_OWNER_DVR_INTERFACE]}
-            dvr_ports = self.core_plugin.get_ports(
-                self.ctx, filters=dvr_filters)
-            self.assertEqual(2, len(dvr_ports))
-
-            with mock.patch.object(manager.NeutronManager,
-                                  'get_service_plugins') as get_svc_plugin:
-                get_svc_plugin.return_value = {
-                    plugin_const.L3_ROUTER_NAT: plugin}
-                self.mixin.manager = manager
-                self.mixin.remove_router_interface(
-                    self.ctx, router['id'], {'port_id': dvr_ports[0]['id']})
-
-            csnat_ports = self.core_plugin.get_ports(
-                self.ctx, filters=csnat_filters)
-            self.assertEqual(1, len(csnat_ports))
-            self.assertEqual(dvr_ports[1]['fixed_ips'][0]['subnet_id'],
-                             csnat_ports[0]['fixed_ips'][0]['subnet_id'])
-
-            dvr_ports = self.core_plugin.get_ports(
-                self.ctx, filters=dvr_filters)
-            self.assertEqual(1, len(dvr_ports))
-            self.assertEqual(1, plugin.get_subnet_ids_on_router.call_count)
-
-    def test__validate_router_migration_notify_advanced_services(self):
-        router = {'name': 'foo_router', 'admin_state_up': False}
-        router_db = self._create_router(router)
-        with mock.patch.object(l3_dvr_db.registry, 'notify') as mock_notify:
-            self.mixin._validate_router_migration(
-                self.ctx, router_db, {'distributed': True})
-            kwargs = {'context': self.ctx, 'router': router_db}
-            mock_notify.assert_called_once_with(
-                'router', 'before_update', self.mixin, **kwargs)
-
-    def _test_update_arp_entry_for_dvr_service_port(
-            self, device_owner, action):
-        with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp,\
-                mock.patch.object(self.mixin, '_get_router') as grtr:
-            plugin = mock.Mock()
-            dvr_router = mock.Mock()
-            l3_notify = self.mixin.l3_rpc_notifier = mock.Mock()
-            gp.return_value = plugin
-            port = {
-                'id': 'my_port_id',
-                'fixed_ips': [
-                    {'subnet_id': '51edc9e0-24f9-47f2-8e1e-2a41cb691323',
-                     'ip_address': '10.0.0.11'},
-                    {'subnet_id': '2b7c8a07-6f8e-4937-8701-f1d5da1a807c',
-                     'ip_address': '10.0.0.21'},
-                    {'subnet_id': '48534187-f077-4e81-93ff-81ec4cc0ad3b',
-                     'ip_address': 'fd45:1515:7e0:0:f816:3eff:fe1a:1111'}],
-                'mac_address': 'my_mac',
-                'device_owner': device_owner
-            }
-            dvr_port = {
-                'id': 'dvr_port_id',
-                'fixed_ips': mock.ANY,
-                'device_owner': l3_const.DEVICE_OWNER_DVR_INTERFACE,
-                'device_id': 'dvr_router_id'
-            }
-            plugin.get_ports.return_value = [port, dvr_port]
-            grtr.return_value = dvr_router
-            dvr_router.extra_attributes.distributed = True
-            self.mixin.update_arp_entry_for_dvr_service_port(
-                self.ctx, port, action)
-            if action == 'add':
-                self.assertEqual(3, l3_notify.add_arp_entry.call_count)
-            elif action == 'del':
-                self.assertTrue(3, l3_notify.del_arp_entry.call_count)
-
-    def test_update_arp_entry_for_dvr_service_port_added(self):
-        action = 'add'
-        device_owner = l3_const.DEVICE_OWNER_LOADBALANCER
-        self._test_update_arp_entry_for_dvr_service_port(device_owner, action)
-
-    def test_update_arp_entry_for_dvr_service_port_deleted(self):
-        action = 'del'
-        device_owner = l3_const.DEVICE_OWNER_LOADBALANCER
-        self._test_update_arp_entry_for_dvr_service_port(device_owner, action)
-
-    def test_add_router_interface_csnat_ports_failure(self):
-        router_dict = {'name': 'test_router', 'admin_state_up': True,
-                       'distributed': True}
-        router = self._create_router(router_dict)
-        with self.network() as net_ext,\
-                self.subnet() as subnet:
-            ext_net_id = net_ext['network']['id']
-            self.core_plugin.update_network(
-                self.ctx, ext_net_id,
-                {'network': {'router:external': True}})
-            self.mixin.update_router(
-                self.ctx, router['id'],
-                {'router': {'external_gateway_info':
-                            {'network_id': ext_net_id}}})
-            with mock.patch.object(
-                self.mixin, '_add_csnat_router_interface_port') as f:
-                f.side_effect = RuntimeError()
-                self.assertRaises(
-                    RuntimeError,
-                    self.mixin.add_router_interface,
-                    self.ctx, router['id'],
-                    {'subnet_id': subnet['subnet']['id']})
-                filters = {
-                    'device_id': [router['id']],
-                }
-                router_ports = self.core_plugin.get_ports(self.ctx, filters)
-                self.assertEqual(1, len(router_ports))
-                self.assertEqual(l3_const.DEVICE_OWNER_ROUTER_GW,
-                                 router_ports[0]['device_owner'])
diff --git a/neutron/tests/unit/db/test_l3_hamode_db.py b/neutron/tests/unit/db/test_l3_hamode_db.py
deleted file mode 100644 (file)
index a416c31..0000000
+++ /dev/null
@@ -1,835 +0,0 @@
-# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo_config import cfg
-from oslo_utils import uuidutils
-import sqlalchemy as sa
-from sqlalchemy import orm
-
-from neutron.api.rpc.handlers import l3_rpc
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.db import agents_db
-from neutron.db import common_db_mixin
-from neutron.db import l3_agentschedulers_db
-from neutron.db import l3_hamode_db
-from neutron.extensions import l3
-from neutron.extensions import l3_ext_ha_mode
-from neutron.extensions import portbindings
-from neutron.extensions import providernet
-from neutron import manager
-from neutron.scheduler import l3_agent_scheduler
-from neutron.tests.common import helpers
-from neutron.tests.unit import testlib_api
-
-_uuid = uuidutils.generate_uuid
-
-
-class FakeL3PluginWithAgents(common_db_mixin.CommonDbMixin,
-                             l3_hamode_db.L3_HA_NAT_db_mixin,
-                             l3_agentschedulers_db.L3AgentSchedulerDbMixin,
-                             agents_db.AgentDbMixin):
-    pass
-
-
-class L3HATestFramework(testlib_api.SqlTestCase):
-    def setUp(self):
-        super(L3HATestFramework, self).setUp()
-
-        self.admin_ctx = context.get_admin_context()
-        self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
-        self.core_plugin = manager.NeutronManager.get_plugin()
-        notif_p = mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
-                                    '_notify_ha_interfaces_updated')
-        self.notif_m = notif_p.start()
-        cfg.CONF.set_override('allow_overlapping_ips', True)
-
-        self.plugin = FakeL3PluginWithAgents()
-        self.plugin.router_scheduler = l3_agent_scheduler.ChanceScheduler()
-        self.agent1 = helpers.register_l3_agent()
-        self.agent2 = helpers.register_l3_agent(
-            'host_2', constants.L3_AGENT_MODE_DVR_SNAT)
-
-    def _create_router(self, ha=True, tenant_id='tenant1', distributed=None,
-                       ctx=None):
-        if ctx is None:
-            ctx = self.admin_ctx
-        ctx.tenant_id = tenant_id
-        router = {'name': 'router1',
-                  'admin_state_up': True,
-                  'tenant_id': tenant_id}
-        if ha is not None:
-            router['ha'] = ha
-        if distributed is not None:
-            router['distributed'] = distributed
-        return self.plugin.create_router(ctx, {'router': router})
-
-    def _migrate_router(self, router_id, ha):
-        self._update_router(router_id, admin_state=False)
-        self._update_router(router_id, ha=ha)
-        return self._update_router(router_id, admin_state=True)
-
-    def _update_router(self, router_id, ha=None, distributed=None, ctx=None,
-                       admin_state=None):
-        if ctx is None:
-            ctx = self.admin_ctx
-        data = {'ha': ha} if ha is not None else {}
-        if distributed is not None:
-            data['distributed'] = distributed
-        if admin_state is not None:
-            data['admin_state_up'] = admin_state
-        return self.plugin._update_router_db(ctx, router_id, data)
-
-    def _bind_router(self, router_id):
-        with self.admin_ctx.session.begin(subtransactions=True):
-            agents_db = self.plugin.get_agents_db(self.admin_ctx)
-            self.plugin.router_scheduler._bind_ha_router_to_agents(
-                self.plugin,
-                self.admin_ctx,
-                router_id,
-                agents_db)
-
-
-class L3HATestCase(L3HATestFramework):
-    def test_verify_configuration_succeed(self):
-        # Default configuration should pass
-        self.plugin._verify_configuration()
-
-    def test_verify_configuration_l3_ha_net_cidr_is_not_a_cidr(self):
-        cfg.CONF.set_override('l3_ha_net_cidr', 'not a cidr')
-        self.assertRaises(
-            l3_ext_ha_mode.HANetworkCIDRNotValid,
-            self.plugin._verify_configuration)
-
-    def test_verify_configuration_l3_ha_net_cidr_is_not_a_subnet(self):
-        cfg.CONF.set_override('l3_ha_net_cidr', '10.0.0.1/8')
-        self.assertRaises(
-            l3_ext_ha_mode.HANetworkCIDRNotValid,
-            self.plugin._verify_configuration)
-
-    def test_verify_configuration_min_l3_agents_per_router_below_minimum(self):
-        cfg.CONF.set_override('min_l3_agents_per_router', 0)
-        self.assertRaises(
-            l3_ext_ha_mode.HAMinimumAgentsNumberNotValid,
-            self.plugin._check_num_agents_per_router)
-
-    def test_verify_configuration_max_l3_agents_below_min_l3_agents(self):
-        cfg.CONF.set_override('max_l3_agents_per_router', 3)
-        cfg.CONF.set_override('min_l3_agents_per_router', 4)
-        self.assertRaises(
-            l3_ext_ha_mode.HAMaximumAgentsNumberNotValid,
-            self.plugin._check_num_agents_per_router)
-
-    def test_verify_configuration_max_l3_agents_unlimited(self):
-        cfg.CONF.set_override('max_l3_agents_per_router',
-                              l3_hamode_db.UNLIMITED_AGENTS_PER_ROUTER)
-        self.plugin._check_num_agents_per_router()
-
-    def test_get_ha_router_port_bindings(self):
-        router = self._create_router()
-        self._bind_router(router['id'])
-        bindings = self.plugin.get_ha_router_port_bindings(
-            self.admin_ctx, [router['id']])
-        binding_dicts = [{'router_id': binding['router_id'],
-                          'l3_agent_id': binding['l3_agent_id']}
-                         for binding in bindings]
-        self.assertIn({'router_id': router['id'],
-                       'l3_agent_id': self.agent1['id']}, binding_dicts)
-        self.assertIn({'router_id': router['id'],
-                       'l3_agent_id': self.agent2['id']}, binding_dicts)
-
-    def test_get_l3_bindings_hosting_router_with_ha_states_ha_router(self):
-        router = self._create_router()
-        self._bind_router(router['id'])
-        self.plugin.update_routers_states(
-            self.admin_ctx, {router['id']: 'active'}, self.agent1['host'])
-        bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states(
-            self.admin_ctx, router['id'])
-        agent_ids = [(agent[0]['id'], agent[1]) for agent in bindings]
-        self.assertIn((self.agent1['id'], 'active'), agent_ids)
-        self.assertIn((self.agent2['id'], 'standby'), agent_ids)
-
-    def test_get_l3_bindings_hosting_router_with_ha_states_agent_none(self):
-        router = self._create_router()
-        # Do not bind router to leave agents as None
-        res = self.admin_ctx.session.query(
-            l3_hamode_db.L3HARouterAgentPortBinding).filter(
-            l3_hamode_db.L3HARouterAgentPortBinding.router_id == router['id']
-        ).all()
-        # Check that agents are None
-        self.assertEqual([None, None], [r.agent for r in res])
-        bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states(
-            self.admin_ctx, router['id'])
-        self.assertEqual([], bindings)
-
-    def test_get_l3_bindings_hosting_router_with_ha_states_not_scheduled(self):
-        router = self._create_router(ha=False)
-        # Check that there no L3 agents scheduled for this router
-        res = self.admin_ctx.session.query(
-            l3_hamode_db.L3HARouterAgentPortBinding).filter(
-            l3_hamode_db.L3HARouterAgentPortBinding.router_id == router['id']
-        ).all()
-        self.assertEqual([], [r.agent for r in res])
-        bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states(
-            self.admin_ctx, router['id'])
-        self.assertEqual([], bindings)
-
-    def test_ha_router_create(self):
-        router = self._create_router()
-        self.assertTrue(router['ha'])
-
-    def test_ha_router_create_with_distributed(self):
-        self.assertRaises(l3_ext_ha_mode.DistributedHARouterNotSupported,
-                          self._create_router,
-                          distributed=True)
-
-    def test_no_ha_router_create(self):
-        router = self._create_router(ha=False)
-        self.assertFalse(router['ha'])
-
-    def test_add_ha_network_settings(self):
-        cfg.CONF.set_override('l3_ha_network_type', 'abc')
-        cfg.CONF.set_override('l3_ha_network_physical_name', 'def')
-
-        network = {}
-        self.plugin._add_ha_network_settings(network)
-
-        self.assertEqual('abc', network[providernet.NETWORK_TYPE])
-        self.assertEqual('def', network[providernet.PHYSICAL_NETWORK])
-
-    def test_router_create_with_ha_conf_enabled(self):
-        cfg.CONF.set_override('l3_ha', True)
-
-        router = self._create_router(ha=None)
-        self.assertTrue(router['ha'])
-
-    def test_migration_from_ha(self):
-        router = self._create_router()
-        self.assertTrue(router['ha'])
-
-        router = self._migrate_router(router['id'], False)
-        self.assertFalse(router.extra_attributes['ha'])
-        self.assertIsNone(router.extra_attributes['ha_vr_id'])
-
-    def test_migration_to_ha(self):
-        router = self._create_router(ha=False)
-        self.assertFalse(router['ha'])
-
-        router = self._migrate_router(router['id'], True)
-        self.assertTrue(router.extra_attributes['ha'])
-        self.assertIsNotNone(router.extra_attributes['ha_vr_id'])
-
-    def test_migration_requires_admin_state_down(self):
-        router = self._create_router(ha=False)
-        self.assertRaises(n_exc.BadRequest,
-                          self._update_router,
-                          router['id'],
-                          ha=True)
-
-    def test_migrate_ha_router_to_distributed(self):
-        router = self._create_router()
-        self.assertTrue(router['ha'])
-
-        self.assertRaises(l3_ext_ha_mode.DistributedHARouterNotSupported,
-                          self._update_router,
-                          router['id'],
-                          distributed=True)
-
-    def test_migrate_distributed_router_to_ha(self):
-        router = self._create_router(ha=False, distributed=True)
-        self.assertFalse(router['ha'])
-        self.assertTrue(router['distributed'])
-
-        self.assertRaises(l3_ext_ha_mode.DistributedHARouterNotSupported,
-                          self._update_router,
-                          router['id'],
-                          ha=True)
-
-    def test_migrate_legacy_router_to_distributed_and_ha(self):
-        router = self._create_router(ha=False, distributed=False)
-        self.assertFalse(router['ha'])
-        self.assertFalse(router['distributed'])
-
-        self.assertRaises(l3_ext_ha_mode.DistributedHARouterNotSupported,
-                          self._update_router,
-                          router['id'],
-                          ha=True,
-                          distributed=True)
-
-    def test_unbind_ha_router(self):
-        router = self._create_router()
-        self._bind_router(router['id'])
-
-        bound_agents = self.plugin.get_l3_agents_hosting_routers(
-            self.admin_ctx, [router['id']])
-        self.assertEqual(2, len(bound_agents))
-
-        with mock.patch.object(manager.NeutronManager,
-                               'get_service_plugins') as mock_manager:
-            self.plugin._unbind_ha_router(self.admin_ctx, router['id'])
-
-        bound_agents = self.plugin.get_l3_agents_hosting_routers(
-            self.admin_ctx, [router['id']])
-        self.assertEqual(0, len(bound_agents))
-        self.assertEqual(2, mock_manager.call_count)
-
-    def test_get_ha_sync_data_for_host_with_non_dvr_agent(self):
-        with mock.patch.object(self.plugin,
-                               '_get_dvr_sync_data') as mock_get_sync:
-            self.plugin.supported_extension_aliases = ['dvr', 'l3-ha']
-            self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
-                                                  self.agent1['host'],
-                                                  self.agent1)
-            self.assertFalse(mock_get_sync.called)
-
-    def test_get_ha_sync_data_for_host_with_dvr_agent(self):
-        with mock.patch.object(self.plugin,
-                               '_get_dvr_sync_data') as mock_get_sync:
-            self.plugin.supported_extension_aliases = ['dvr', 'l3-ha']
-            self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
-                                                  self.agent2['host'],
-                                                  self.agent2)
-            self.assertTrue(mock_get_sync.called)
-
-    def test_l3_agent_routers_query_interface(self):
-        router = self._create_router()
-        self._bind_router(router['id'])
-        routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
-                                                        self.agent1['host'],
-                                                        self.agent1)
-        self.assertEqual(1, len(routers))
-        router = routers[0]
-
-        self.assertIsNotNone(router.get('ha'))
-
-        interface = router.get(constants.HA_INTERFACE_KEY)
-        self.assertIsNotNone(interface)
-
-        self.assertEqual(constants.DEVICE_OWNER_ROUTER_HA_INTF,
-                         interface['device_owner'])
-
-        subnets = interface['subnets']
-        self.assertEqual(1, len(subnets))
-        self.assertEqual(cfg.CONF.l3_ha_net_cidr, subnets[0]['cidr'])
-
-    def test_unique_ha_network_per_tenant(self):
-        tenant1 = _uuid()
-        tenant2 = _uuid()
-        self._create_router(tenant_id=tenant1)
-        self._create_router(tenant_id=tenant2)
-        ha_network1 = self.plugin.get_ha_network(self.admin_ctx, tenant1)
-        ha_network2 = self.plugin.get_ha_network(self.admin_ctx, tenant2)
-        self.assertNotEqual(
-            ha_network1['network_id'], ha_network2['network_id'])
-
-    def _deployed_router_change_ha_flag(self, to_ha):
-        router1 = self._create_router(ha=not to_ha)
-        self._bind_router(router1['id'])
-        routers = self.plugin.get_ha_sync_data_for_host(
-            self.admin_ctx, self.agent1['host'], self.agent1)
-        router = routers[0]
-        interface = router.get(constants.HA_INTERFACE_KEY)
-        if to_ha:
-            self.assertIsNone(interface)
-        else:
-            self.assertIsNotNone(interface)
-
-        self._migrate_router(router['id'], to_ha)
-        self.plugin.schedule_router(self.admin_ctx, router1['id'])
-        routers = self.plugin.get_ha_sync_data_for_host(
-            self.admin_ctx, self.agent1['host'], self.agent1)
-        router = routers[0]
-        interface = router.get(constants.HA_INTERFACE_KEY)
-        if to_ha:
-            self.assertIsNotNone(interface)
-        else:
-            self.assertIsNone(interface)
-
-    def test_deployed_router_can_have_ha_enabled(self):
-        self._deployed_router_change_ha_flag(to_ha=True)
-
-    def test_deployed_router_can_have_ha_disabled(self):
-        self._deployed_router_change_ha_flag(to_ha=False)
-
-    def test_create_ha_router_notifies_agent(self):
-        self._create_router()
-        self.assertTrue(self.notif_m.called)
-
-    def test_update_router_to_ha_notifies_agent(self):
-        router = self._create_router(ha=False)
-        self.notif_m.reset_mock()
-        self._migrate_router(router['id'], True)
-        self.assertTrue(self.notif_m.called)
-
-    def test_unique_vr_id_between_routers(self):
-        router1 = self._create_router()
-        router2 = self._create_router()
-        self._bind_router(router1['id'])
-        self._bind_router(router2['id'])
-        routers = self.plugin.get_ha_sync_data_for_host(
-            self.admin_ctx, self.agent1['host'], self.agent1)
-        self.assertEqual(2, len(routers))
-        self.assertNotEqual(routers[0]['ha_vr_id'], routers[1]['ha_vr_id'])
-
-    @mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 1)))
-    def test_vr_id_depleted(self):
-        self.assertRaises(l3_ext_ha_mode.NoVRIDAvailable, self._create_router)
-
-    @mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 2)))
-    def test_vr_id_unique_range_per_tenant(self):
-        router1 = self._create_router()
-        router2 = self._create_router(tenant_id=_uuid())
-        self._bind_router(router1['id'])
-        self._bind_router(router2['id'])
-        routers = self.plugin.get_ha_sync_data_for_host(
-            self.admin_ctx, self.agent1['host'], self.agent1)
-        self.assertEqual(2, len(routers))
-        self.assertEqual(routers[0]['ha_vr_id'], routers[1]['ha_vr_id'])
-
-    @mock.patch('neutron.db.l3_hamode_db.MAX_ALLOCATION_TRIES', new=2)
-    def test_vr_id_allocation_contraint_conflict(self):
-        router = self._create_router()
-        network = self.plugin.get_ha_network(self.admin_ctx,
-                                             router['tenant_id'])
-
-        with mock.patch.object(self.plugin, '_get_allocated_vr_id',
-                               return_value=set()) as alloc:
-            self.assertRaises(l3_ext_ha_mode.MaxVRIDAllocationTriesReached,
-                              self.plugin._allocate_vr_id, self.admin_ctx,
-                              network.network_id, router['id'])
-            self.assertEqual(2, len(alloc.mock_calls))
-
-    def test_vr_id_allocation_delete_router(self):
-        router = self._create_router()
-        network = self.plugin.get_ha_network(self.admin_ctx,
-                                             router['tenant_id'])
-
-        allocs_before = self.plugin._get_allocated_vr_id(self.admin_ctx,
-                                                         network.network_id)
-        router = self._create_router()
-        allocs_current = self.plugin._get_allocated_vr_id(self.admin_ctx,
-                                                          network.network_id)
-        self.assertNotEqual(allocs_before, allocs_current)
-
-        self.plugin.delete_router(self.admin_ctx, router['id'])
-        allocs_after = self.plugin._get_allocated_vr_id(self.admin_ctx,
-                                                        network.network_id)
-        self.assertEqual(allocs_before, allocs_after)
-
-    def test_vr_id_allocation_router_migration(self):
-        router = self._create_router()
-        network = self.plugin.get_ha_network(self.admin_ctx,
-                                             router['tenant_id'])
-
-        allocs_before = self.plugin._get_allocated_vr_id(self.admin_ctx,
-                                                         network.network_id)
-        router = self._create_router()
-        self._migrate_router(router['id'], False)
-        allocs_after = self.plugin._get_allocated_vr_id(self.admin_ctx,
-                                                        network.network_id)
-        self.assertEqual(allocs_before, allocs_after)
-
-    def test_one_ha_router_one_not(self):
-        router1 = self._create_router(ha=False)
-        router2 = self._create_router()
-        self._bind_router(router1['id'])
-        self._bind_router(router2['id'])
-        routers = self.plugin.get_ha_sync_data_for_host(
-            self.admin_ctx, self.agent1['host'], self.agent1)
-
-        ha0 = routers[0]['ha']
-        ha1 = routers[1]['ha']
-
-        self.assertNotEqual(ha0, ha1)
-
-    def test_add_ha_port_subtransactions_blocked(self):
-        with self.admin_ctx.session.begin():
-            self.assertRaises(RuntimeError, self.plugin.add_ha_port,
-                              self.admin_ctx, 'id', 'id', 'id')
-
-    def test_add_ha_port_binding_failure_rolls_back_port(self):
-        router = self._create_router()
-        device_filter = {'device_id': [router['id']]}
-        ports_before = self.core_plugin.get_ports(
-            self.admin_ctx, filters=device_filter)
-        network = self.plugin.get_ha_network(self.admin_ctx,
-                                             router['tenant_id'])
-
-        with mock.patch.object(self.plugin, '_create_ha_port_binding',
-                               side_effect=ValueError):
-            self.assertRaises(ValueError, self.plugin.add_ha_port,
-                              self.admin_ctx, router['id'], network.network_id,
-                              router['tenant_id'])
-
-        ports_after = self.core_plugin.get_ports(
-            self.admin_ctx, filters=device_filter)
-
-        self.assertEqual(ports_before, ports_after)
-
-    def test_create_ha_network_binding_failure_rolls_back_network(self):
-        networks_before = self.core_plugin.get_networks(self.admin_ctx)
-
-        with mock.patch.object(self.plugin,
-                               '_create_ha_network_tenant_binding',
-                               side_effect=ValueError):
-            self.assertRaises(ValueError, self.plugin._create_ha_network,
-                              self.admin_ctx, _uuid())
-
-        networks_after = self.core_plugin.get_networks(self.admin_ctx)
-        self.assertEqual(networks_before, networks_after)
-
-    def test_create_ha_network_subnet_failure_rolls_back_network(self):
-        networks_before = self.core_plugin.get_networks(self.admin_ctx)
-
-        with mock.patch.object(self.plugin, '_create_ha_subnet',
-                               side_effect=ValueError):
-            self.assertRaises(ValueError, self.plugin._create_ha_network,
-                              self.admin_ctx, _uuid())
-
-        networks_after = self.core_plugin.get_networks(self.admin_ctx)
-        self.assertEqual(networks_before, networks_after)
-
-    def test_create_ha_interfaces_binding_failure_rolls_back_ports(self):
-        router = self._create_router()
-        network = self.plugin.get_ha_network(self.admin_ctx,
-                                             router['tenant_id'])
-        device_filter = {'device_id': [router['id']]}
-        ports_before = self.core_plugin.get_ports(
-            self.admin_ctx, filters=device_filter)
-
-        router_db = self.plugin._get_router(self.admin_ctx, router['id'])
-        with mock.patch.object(self.plugin, '_create_ha_port_binding',
-                               side_effect=ValueError):
-            self.assertRaises(ValueError, self.plugin._create_ha_interfaces,
-                              self.admin_ctx, router_db, network)
-
-        ports_after = self.core_plugin.get_ports(
-            self.admin_ctx, filters=device_filter)
-        self.assertEqual(ports_before, ports_after)
-
-    def test_create_router_db_ha_attribute_failure_rolls_back_router(self):
-        routers_before = self.plugin.get_routers(self.admin_ctx)
-
-        for method in ('_set_vr_id',
-                       '_create_ha_interfaces',
-                       '_notify_ha_interfaces_updated'):
-            with mock.patch.object(self.plugin, method,
-                                   side_effect=ValueError):
-                self.assertRaises(ValueError, self._create_router)
-
-        routers_after = self.plugin.get_routers(self.admin_ctx)
-        self.assertEqual(routers_before, routers_after)
-
-    def test_get_active_host_for_ha_router(self):
-        router = self._create_router()
-        self._bind_router(router['id'])
-        self.assertEqual(
-            None,
-            self.plugin.get_active_host_for_ha_router(
-                self.admin_ctx, router['id']))
-        self.plugin.update_routers_states(
-            self.admin_ctx, {router['id']: 'active'}, self.agent2['host'])
-        self.assertEqual(
-            self.agent2['host'],
-            self.plugin.get_active_host_for_ha_router(
-                self.admin_ctx, router['id']))
-
-    def test_update_routers_states(self):
-        router1 = self._create_router()
-        self._bind_router(router1['id'])
-        router2 = self._create_router()
-        self._bind_router(router2['id'])
-
-        routers = self.plugin.get_ha_sync_data_for_host(
-            self.admin_ctx, self.agent1['host'], self.agent1)
-        for router in routers:
-            self.assertEqual('standby', router[constants.HA_ROUTER_STATE_KEY])
-
-        states = {router1['id']: 'active',
-                  router2['id']: 'standby'}
-        self.plugin.update_routers_states(
-            self.admin_ctx, states, self.agent1['host'])
-
-        routers = self.plugin.get_ha_sync_data_for_host(
-            self.admin_ctx, self.agent1['host'], self.agent1)
-        for router in routers:
-            self.assertEqual(states[router['id']],
-                             router[constants.HA_ROUTER_STATE_KEY])
-
-    def test_set_router_states_handles_concurrently_deleted_router(self):
-        router1 = self._create_router()
-        self._bind_router(router1['id'])
-        router2 = self._create_router()
-        self._bind_router(router2['id'])
-        bindings = self.plugin.get_ha_router_port_bindings(
-            self.admin_ctx, [router1['id'], router2['id']])
-        self.plugin.delete_router(self.admin_ctx, router1['id'])
-        self.plugin._set_router_states(
-            self.admin_ctx, bindings, {router1['id']: 'active',
-                                       router2['id']: 'active'})
-        routers = self.plugin.get_ha_sync_data_for_host(
-            self.admin_ctx, self.agent1['host'], self.agent1)
-        self.assertEqual('active', routers[0][constants.HA_ROUTER_STATE_KEY])
-
-    def test_exclude_dvr_agents_for_ha_candidates(self):
-        """Test dvr agents are not counted in the ha candidates.
-
-        This test case tests that when get_number_of_agents_for_scheduling
-        is called, it doesn't count dvr agents.
-        """
-        # Test setup registers two l3 agents.
-        # Register another l3 agent with dvr mode and assert that
-        # get_number_of_ha_agent_candidates return 2.
-        helpers.register_l3_agent('host_3', constants.L3_AGENT_MODE_DVR)
-        num_ha_candidates = self.plugin.get_number_of_agents_for_scheduling(
-            self.admin_ctx)
-        self.assertEqual(2, num_ha_candidates)
-
-    def test_get_number_of_agents_for_scheduling_not_enough_agents(self):
-        cfg.CONF.set_override('min_l3_agents_per_router', 3)
-        helpers.kill_agent(helpers.register_l3_agent(host='l3host_3')['id'])
-        self.assertRaises(l3_ext_ha_mode.HANotEnoughAvailableAgents,
-                          self.plugin.get_number_of_agents_for_scheduling,
-                          self.admin_ctx)
-
-    def test_ha_network_deleted_if_no_ha_router_present_two_tenants(self):
-        # Create two routers in different tenants.
-        router1 = self._create_router()
-        router2 = self._create_router(tenant_id='tenant2')
-        nets_before = [net['name'] for net in
-                       self.core_plugin.get_networks(self.admin_ctx)]
-        # Check that HA networks created for each tenant
-        self.assertIn('HA network tenant %s' % router1['tenant_id'],
-                      nets_before)
-        self.assertIn('HA network tenant %s' % router2['tenant_id'],
-                      nets_before)
-        # Delete router1
-        self.plugin.delete_router(self.admin_ctx, router1['id'])
-        nets_after = [net['name'] for net in
-                      self.core_plugin.get_networks(self.admin_ctx)]
-        # Check that HA network for tenant1 is deleted and for tenant2 is not.
-        self.assertNotIn('HA network tenant %s' % router1['tenant_id'],
-                         nets_after)
-        self.assertIn('HA network tenant %s' % router2['tenant_id'],
-                      nets_after)
-
-    def test_ha_network_is_not_delete_if_ha_router_is_present(self):
-        # Create 2 routers in one tenant and check if one is deleted, HA
-        # network still exists.
-        router1 = self._create_router()
-        router2 = self._create_router()
-        nets_before = [net['name'] for net in
-                       self.core_plugin.get_networks(self.admin_ctx)]
-        self.assertIn('HA network tenant %s' % router1['tenant_id'],
-                      nets_before)
-        self.plugin.delete_router(self.admin_ctx, router2['id'])
-        nets_after = [net['name'] for net in
-                      self.core_plugin.get_networks(self.admin_ctx)]
-        self.assertIn('HA network tenant %s' % router1['tenant_id'],
-                      nets_after)
-
-    def test_ha_network_delete_ha_and_non_ha_router(self):
-        # Create HA and non-HA router. Check after deletion HA router HA
-        # network is deleted.
-        router1 = self._create_router(ha=False)
-        router2 = self._create_router()
-        nets_before = [net['name'] for net in
-                       self.core_plugin.get_networks(self.admin_ctx)]
-        self.assertIn('HA network tenant %s' % router1['tenant_id'],
-                      nets_before)
-        self.plugin.delete_router(self.admin_ctx, router2['id'])
-        nets_after = [net['name'] for net in
-                      self.core_plugin.get_networks(self.admin_ctx)]
-        self.assertNotIn('HA network tenant %s' % router1['tenant_id'],
-                         nets_after)
-
-    def _test_ha_network_is_not_deleted_raise_exception(self, exception):
-        router1 = self._create_router()
-        nets_before = [net['name'] for net in
-                       self.core_plugin.get_networks(self.admin_ctx)]
-        self.assertIn('HA network tenant %s' % router1['tenant_id'],
-                      nets_before)
-        with mock.patch.object(self.plugin, '_delete_ha_network',
-                               side_effect=exception):
-            self.plugin.delete_router(self.admin_ctx, router1['id'])
-            nets_after = [net['name'] for net in
-                          self.core_plugin.get_networks(self.admin_ctx)]
-            self.assertIn('HA network tenant %s' % router1['tenant_id'],
-                          nets_after)
-
-    def test_ha_network_is_not_deleted_if_another_ha_router_is_created(self):
-        # If another router was created during deletion of current router,
-        # _delete_ha_network will fail with InvalidRequestError. Check that HA
-        # network won't be deleted.
-        self._test_ha_network_is_not_deleted_raise_exception(
-            sa.exc.InvalidRequestError)
-
-    def test_ha_network_is_not_deleted_if_network_in_use(self):
-        self._test_ha_network_is_not_deleted_raise_exception(
-            n_exc.NetworkInUse(net_id="foo_net_id"))
-
-    def test_ha_network_is_not_deleted_if_db_deleted_error(self):
-        self._test_ha_network_is_not_deleted_raise_exception(
-            orm.exc.ObjectDeletedError(None))
-
-    def test_ha_router_create_failed_no_ha_network_delete(self):
-        tenant_id = "foo_tenant_id"
-        nets_before = self.core_plugin.get_networks(self.admin_ctx)
-        self.assertNotIn('HA network tenant %s' % tenant_id,
-                         nets_before)
-
-        # Unable to create HA network
-        with mock.patch.object(self.core_plugin, 'create_network',
-                               side_effect=n_exc.NoNetworkAvailable):
-            self.assertRaises(n_exc.NoNetworkAvailable,
-                              self._create_router,
-                              True,
-                              tenant_id)
-            nets_after = self.core_plugin.get_networks(self.admin_ctx)
-            self.assertEqual(nets_before, nets_after)
-            self.assertNotIn('HA network tenant %s' % tenant_id,
-                             nets_after)
-
-
-class L3HAModeDbTestCase(L3HATestFramework):
-
-    def _create_network(self, plugin, ctx, name='net',
-                        tenant_id='tenant1'):
-        network = {'network': {'name': name,
-                               'shared': False,
-                               'admin_state_up': True,
-                               'tenant_id': tenant_id}}
-        return plugin.create_network(ctx, network)['id']
-
-    def _create_subnet(self, plugin, ctx, network_id, cidr='10.0.0.0/8',
-                       name='subnet', tenant_id='tenant1'):
-        subnet = {'subnet': {'name': name,
-                  'ip_version': 4,
-                  'network_id': network_id,
-                  'cidr': cidr,
-                  'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
-                  'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
-                  'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
-                  'host_routes': attributes.ATTR_NOT_SPECIFIED,
-                  'tenant_id': tenant_id,
-                  'enable_dhcp': True,
-                  'ipv6_ra_mode': attributes.ATTR_NOT_SPECIFIED}}
-        created_subnet = plugin.create_subnet(ctx, subnet)
-        return created_subnet
-
-    def test_remove_ha_in_use(self):
-        router = self._create_router(ctx=self.admin_ctx)
-        network_id = self._create_network(self.core_plugin, self.admin_ctx)
-        subnet = self._create_subnet(self.core_plugin, self.admin_ctx,
-                                     network_id)
-        interface_info = {'subnet_id': subnet['id']}
-        self.plugin.add_router_interface(self.admin_ctx,
-                                         router['id'],
-                                         interface_info)
-        self.assertRaises(l3.RouterInUse, self.plugin.delete_router,
-                          self.admin_ctx, router['id'])
-        bindings = self.plugin.get_ha_router_port_bindings(
-            self.admin_ctx, [router['id']])
-        self.assertEqual(2, len(bindings))
-
-    def test_update_router_port_bindings_no_ports(self):
-        self.plugin._update_router_port_bindings(
-            self.admin_ctx, {}, self.agent1['host'])
-
-    def _get_first_interface(self, router_id):
-        device_filter = {'device_id': [router_id],
-                         'device_owner':
-                         [constants.DEVICE_OWNER_ROUTER_INTF]}
-        return self.core_plugin.get_ports(
-            self.admin_ctx,
-            filters=device_filter)[0]
-
-    def test_update_router_port_bindings_updates_host(self):
-        network_id = self._create_network(self.core_plugin, self.admin_ctx)
-        subnet = self._create_subnet(self.core_plugin, self.admin_ctx,
-                                     network_id)
-        interface_info = {'subnet_id': subnet['id']}
-
-        router = self._create_router()
-        self._bind_router(router['id'])
-        self.plugin.add_router_interface(self.admin_ctx,
-                                         router['id'],
-                                         interface_info)
-        self.plugin._update_router_port_bindings(
-            self.admin_ctx, {router['id']: 'active'}, self.agent1['host'])
-
-        port = self._get_first_interface(router['id'])
-        self.assertEqual(self.agent1['host'], port[portbindings.HOST_ID])
-
-        self.plugin._update_router_port_bindings(
-            self.admin_ctx, {router['id']: 'active'}, self.agent2['host'])
-        port = self._get_first_interface(router['id'])
-        self.assertEqual(self.agent2['host'], port[portbindings.HOST_ID])
-
-    def test_ensure_host_set_on_ports_binds_correctly(self):
-        network_id = self._create_network(self.core_plugin, self.admin_ctx)
-        subnet = self._create_subnet(self.core_plugin, self.admin_ctx,
-                                     network_id)
-        interface_info = {'subnet_id': subnet['id']}
-
-        router = self._create_router()
-        self._bind_router(router['id'])
-        self.plugin.add_router_interface(self.admin_ctx,
-                                         router['id'],
-                                         interface_info)
-        port = self._get_first_interface(router['id'])
-        self.assertEqual('', port[portbindings.HOST_ID])
-
-        # Update the router object to include the first interface
-        router = (
-            self.plugin.list_active_sync_routers_on_active_l3_agent(
-                self.admin_ctx, self.agent1['host'], [router['id']]))[0]
-
-        # ensure_host_set_on_ports binds an unbound port
-        callback = l3_rpc.L3RpcCallback()
-        callback._l3plugin = self.plugin
-        callback._ensure_host_set_on_ports(
-            self.admin_ctx, self.agent1['host'], [router])
-        port = self._get_first_interface(router['id'])
-        self.assertEqual(self.agent1['host'], port[portbindings.HOST_ID])
-
-        # ensure_host_set_on_ports does not rebind a bound port
-        router = (
-            self.plugin.list_active_sync_routers_on_active_l3_agent(
-                self.admin_ctx, self.agent1['host'], [router['id']]))[0]
-        callback._ensure_host_set_on_ports(
-            self.admin_ctx, self.agent2['host'], [router])
-        port = self._get_first_interface(router['id'])
-        self.assertEqual(self.agent1['host'], port[portbindings.HOST_ID])
-
-
-class L3HAUserTestCase(L3HATestFramework):
-
-    def setUp(self):
-        super(L3HAUserTestCase, self).setUp()
-        self.user_ctx = context.Context('', _uuid())
-
-    def test_create_ha_router(self):
-        self._create_router(ctx=self.user_ctx)
-
-    def test_update_router(self):
-        router = self._create_router(ctx=self.user_ctx)
-        self._update_router(router['id'], ctx=self.user_ctx)
-
-    def test_delete_router(self):
-        router = self._create_router(ctx=self.user_ctx)
-        self.plugin.delete_router(self.user_ctx, router['id'])
diff --git a/neutron/tests/unit/db/test_migration.py b/neutron/tests/unit/db/test_migration.py
deleted file mode 100644 (file)
index 20f25c7..0000000
+++ /dev/null
@@ -1,781 +0,0 @@
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-import os
-import sys
-import textwrap
-
-from alembic.autogenerate import api as alembic_ag_api
-from alembic import config as alembic_config
-from alembic.operations import ops as alembic_ops
-from alembic import script as alembic_script
-import fixtures
-import mock
-from oslo_utils import fileutils
-import pkg_resources
-import sqlalchemy as sa
-
-from neutron.db import migration
-from neutron.db.migration import autogen
-from neutron.db.migration import cli
-from neutron.tests import base
-from neutron.tests import tools
-from neutron.tests.unit import testlib_api
-
-
-class FakeConfig(object):
-    service = ''
-
-
-class FakeRevision(object):
-    path = 'fakepath'
-
-    def __init__(self, labels=None, down_revision=None, is_branch_point=False):
-        if not labels:
-            labels = set()
-        self.branch_labels = labels
-        self.down_revision = down_revision
-        self.is_branch_point = is_branch_point
-        self.revision = tools.get_random_string()
-        self.module = mock.MagicMock()
-
-
-class MigrationEntrypointsMemento(fixtures.Fixture):
-    '''Create a copy of the migration entrypoints map so it can be restored
-       during test cleanup.
-    '''
-
-    def _setUp(self):
-        self.ep_backup = {}
-        for proj, ep in cli.migration_entrypoints.items():
-            self.ep_backup[proj] = copy.copy(ep)
-        self.addCleanup(self.restore)
-
-    def restore(self):
-        cli.migration_entrypoints = self.ep_backup
-
-
-class TestDbMigration(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestDbMigration, self).setUp()
-        mock.patch('alembic.op.get_bind').start()
-        self.mock_alembic_is_offline = mock.patch(
-            'alembic.context.is_offline_mode', return_value=False).start()
-        self.mock_alembic_is_offline.return_value = False
-        self.mock_sa_inspector = mock.patch(
-            'sqlalchemy.engine.reflection.Inspector').start()
-
-    def _prepare_mocked_sqlalchemy_inspector(self):
-        mock_inspector = mock.MagicMock()
-        mock_inspector.get_table_names.return_value = ['foo', 'bar']
-        mock_inspector.get_columns.return_value = [{'name': 'foo_column'},
-                                                   {'name': 'bar_column'}]
-        self.mock_sa_inspector.from_engine.return_value = mock_inspector
-
-    def test_schema_has_table(self):
-        self._prepare_mocked_sqlalchemy_inspector()
-        self.assertTrue(migration.schema_has_table('foo'))
-
-    def test_schema_has_table_raises_if_offline(self):
-        self.mock_alembic_is_offline.return_value = True
-        self.assertRaises(RuntimeError, migration.schema_has_table, 'foo')
-
-    def test_schema_has_column_missing_table(self):
-        self._prepare_mocked_sqlalchemy_inspector()
-        self.assertFalse(migration.schema_has_column('meh', 'meh'))
-
-    def test_schema_has_column(self):
-        self._prepare_mocked_sqlalchemy_inspector()
-        self.assertTrue(migration.schema_has_column('foo', 'foo_column'))
-
-    def test_schema_has_column_raises_if_offline(self):
-        self.mock_alembic_is_offline.return_value = True
-        self.assertRaises(RuntimeError, migration.schema_has_column,
-                          'foo', 'foo_col')
-
-    def test_schema_has_column_missing_column(self):
-        self._prepare_mocked_sqlalchemy_inspector()
-        self.assertFalse(migration.schema_has_column(
-            'foo', column_name='meh'))
-
-
-class TestCli(base.BaseTestCase):
-    def setUp(self):
-        super(TestCli, self).setUp()
-        self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command')
-        self.do_alembic_cmd = self.do_alembic_cmd_p.start()
-        self.mock_alembic_err = mock.patch('alembic.util.err').start()
-        self.mock_alembic_warn = mock.patch('alembic.util.warn').start()
-        self.mock_alembic_err.side_effect = SystemExit
-
-        def mocked_root_dir(cfg):
-            return os.path.join('/fake/dir', cli._get_project_base(cfg))
-        mock_root = mock.patch.object(cli, '_get_package_root_dir').start()
-        mock_root.side_effect = mocked_root_dir
-        # Avoid creating fake directories
-        mock.patch('neutron.common.utils.ensure_dir').start()
-
-        # Set up some configs and entrypoints for tests to chew on
-        self.configs = []
-        self.projects = ('neutron', 'networking-foo', 'neutron-fwaas')
-        ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini')
-        self.useFixture(MigrationEntrypointsMemento())
-        cli.migration_entrypoints = {}
-        for project in self.projects:
-            config = alembic_config.Config(ini)
-            config.set_main_option('neutron_project', project)
-            module_name = project.replace('-', '_') + '.db.migration'
-            attrs = ('alembic_migrations',)
-            script_location = ':'.join([module_name, attrs[0]])
-            config.set_main_option('script_location', script_location)
-            self.configs.append(config)
-            entrypoint = pkg_resources.EntryPoint(project,
-                                                  module_name,
-                                                  attrs=attrs)
-            cli.migration_entrypoints[project] = entrypoint
-
-    def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]):
-        with mock.patch.object(sys, 'argv', argv),\
-            mock.patch.object(cli, 'run_sanity_checks'),\
-            mock.patch.object(cli, 'validate_revisions'),\
-            mock.patch.object(cli, '_use_separate_migration_branches'):
-
-            cli.main()
-            self.do_alembic_cmd.assert_has_calls(
-                [mock.call(mock.ANY, func_name, **kwargs)
-                 for kwargs in exp_kwargs]
-            )
-
-    def test_stamp(self):
-        self._main_test_helper(
-            ['prog', 'stamp', 'foo'],
-            'stamp',
-            [{'revision': 'foo', 'sql': False}]
-        )
-
-        self._main_test_helper(
-            ['prog', 'stamp', 'foo', '--sql'],
-            'stamp',
-            [{'revision': 'foo', 'sql': True}]
-        )
-
-    def _validate_cmd(self, cmd):
-        self._main_test_helper(
-            ['prog', cmd],
-            cmd,
-            [{'verbose': False}])
-
-        self._main_test_helper(
-            ['prog', cmd, '--verbose'],
-            cmd,
-            [{'verbose': True}])
-
-    def test_branches(self):
-        self._validate_cmd('branches')
-
-    def test_current(self):
-        self._validate_cmd('current')
-
-    def test_history(self):
-        self._validate_cmd('history')
-
-    def test_heads(self):
-        self._validate_cmd('heads')
-
-    def test_check_migration(self):
-        with mock.patch.object(cli, 'validate_head_file') as validate:
-            self._main_test_helper(['prog', 'check_migration'], 'branches')
-            self.assertEqual(len(self.projects), validate.call_count)
-
-    def _test_database_sync_revision(self, separate_branches=True):
-        with mock.patch.object(cli, 'update_head_files') as update,\
-                mock.patch.object(cli, '_use_separate_migration_branches',
-                                  return_value=separate_branches):
-            if separate_branches:
-                mock.patch('os.path.exists').start()
-            expected_kwargs = [{
-                'message': 'message', 'sql': False, 'autogenerate': True,
-            }]
-            self._main_test_helper(
-                ['prog', 'revision', '--autogenerate', '-m', 'message'],
-                'revision',
-                expected_kwargs
-            )
-            self.assertEqual(len(self.projects), update.call_count)
-            update.reset_mock()
-
-            for kwarg in expected_kwargs:
-                kwarg['autogenerate'] = False
-                kwarg['sql'] = True
-
-            self._main_test_helper(
-                ['prog', 'revision', '--sql', '-m', 'message'],
-                'revision',
-                expected_kwargs
-            )
-            self.assertEqual(len(self.projects), update.call_count)
-            update.reset_mock()
-
-            for kwarg in expected_kwargs:
-                kwarg['sql'] = False
-                kwarg['head'] = 'expand@head'
-
-            self._main_test_helper(
-                ['prog', 'revision', '-m', 'message', '--expand'],
-                'revision',
-                expected_kwargs
-            )
-            self.assertEqual(len(self.projects), update.call_count)
-            update.reset_mock()
-
-            for kwarg in expected_kwargs:
-                kwarg['head'] = 'contract@head'
-
-            self._main_test_helper(
-                ['prog', 'revision', '-m', 'message', '--contract'],
-                'revision',
-                expected_kwargs
-            )
-            self.assertEqual(len(self.projects), update.call_count)
-
-    def test_database_sync_revision(self):
-        self._test_database_sync_revision()
-
-    def test_database_sync_revision_no_branches(self):
-        # Test that old branchless approach is still supported
-        self._test_database_sync_revision(separate_branches=False)
-
-    def test_upgrade_revision(self):
-        self._main_test_helper(
-            ['prog', 'upgrade', '--sql', 'head'],
-            'upgrade',
-            [{'desc': None, 'revision': 'heads', 'sql': True}]
-        )
-
-    def test_upgrade_delta(self):
-        self._main_test_helper(
-            ['prog', 'upgrade', '--delta', '3'],
-            'upgrade',
-            [{'desc': None, 'revision': '+3', 'sql': False}]
-        )
-
-    def test_upgrade_revision_delta(self):
-        self._main_test_helper(
-            ['prog', 'upgrade', 'kilo', '--delta', '3'],
-            'upgrade',
-            [{'desc': None, 'revision': 'kilo+3', 'sql': False}]
-        )
-
-    def test_upgrade_expand(self):
-        self._main_test_helper(
-            ['prog', 'upgrade', '--expand'],
-            'upgrade',
-            [{'desc': cli.EXPAND_BRANCH,
-              'revision': 'expand@head',
-              'sql': False}]
-        )
-
-    def test_upgrade_expand_contract_are_mutually_exclusive(self):
-        with testlib_api.ExpectedException(SystemExit):
-            self._main_test_helper(
-                ['prog', 'upgrade', '--expand --contract'], 'upgrade')
-
-    def _test_upgrade_conflicts_with_revision(self, mode):
-        with testlib_api.ExpectedException(SystemExit):
-            self._main_test_helper(
-                ['prog', 'upgrade', '--%s revision1' % mode], 'upgrade')
-
-    def _test_upgrade_conflicts_with_delta(self, mode):
-        with testlib_api.ExpectedException(SystemExit):
-            self._main_test_helper(
-                ['prog', 'upgrade', '--%s +3' % mode], 'upgrade')
-
-    def test_upgrade_expand_conflicts_with_revision(self):
-        self._test_upgrade_conflicts_with_revision('expand')
-
-    def test_upgrade_contract_conflicts_with_revision(self):
-        self._test_upgrade_conflicts_with_revision('contract')
-
-    def test_upgrade_expand_conflicts_with_delta(self):
-        self._test_upgrade_conflicts_with_delta('expand')
-
-    def test_upgrade_contract_conflicts_with_delta(self):
-        self._test_upgrade_conflicts_with_delta('contract')
-
-    def test_upgrade_contract(self):
-        self._main_test_helper(
-            ['prog', 'upgrade', '--contract'],
-            'upgrade',
-            [{'desc': cli.CONTRACT_BRANCH,
-              'revision': 'contract@head',
-              'sql': False}]
-        )
-
-    def assert_command_fails(self, command):
-        # Avoid cluttering stdout with argparse error messages
-        mock.patch('argparse.ArgumentParser._print_message').start()
-        with mock.patch.object(sys, 'argv', command), mock.patch.object(
-                cli, 'run_sanity_checks'):
-            self.assertRaises(SystemExit, cli.main)
-
-    def test_downgrade_fails(self):
-        self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno'])
-
-    @mock.patch.object(cli, '_use_separate_migration_branches')
-    def test_upgrade_negative_relative_revision_fails(self, use_mock):
-        self.assert_command_fails(['prog', 'upgrade', '-2'])
-
-    @mock.patch.object(cli, '_use_separate_migration_branches')
-    def test_upgrade_negative_delta_fails(self, use_mock):
-        self.assert_command_fails(['prog', 'upgrade', '--delta', '-2'])
-
-    @mock.patch.object(cli, '_use_separate_migration_branches')
-    def test_upgrade_rejects_delta_with_relative_revision(self, use_mock):
-        self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3'])
-
-    def _test_validate_head_file_helper(self, heads, file_heads=None):
-        if file_heads is None:
-            file_heads = []
-        fake_config = self.configs[0]
-        mock_open = self.useFixture(
-                    tools.OpenFixture(cli._get_head_file_path(fake_config),
-                                      '\n'.join(file_heads))).mock_open
-        with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\
-                mock.patch.object(cli, '_use_separate_migration_branches',
-                                  return_value=False):
-            fc.return_value.get_heads.return_value = heads
-            if all(head in file_heads for head in heads):
-                cli.validate_head_file(fake_config)
-            else:
-                self.assertRaises(
-                    SystemExit,
-                    cli.validate_head_file,
-                    fake_config
-                )
-                self.assertTrue(self.mock_alembic_err.called)
-            mock_open.assert_called_with(
-                    cli._get_head_file_path(fake_config))
-
-            fc.assert_called_once_with(fake_config)
-
-    def _test_validate_head_files_helper(self, heads, contract_head='',
-                                         expand_head=''):
-        fake_config = self.configs[0]
-        head_files_not_exist = (contract_head == expand_head == '')
-        with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\
-                mock.patch('os.path.exists') as os_mock,\
-                mock.patch.object(cli, '_use_separate_migration_branches',
-                                  return_value=True):
-            if head_files_not_exist:
-                os_mock.return_value = False
-            else:
-                os_mock.return_value = True
-
-            fc.return_value.get_heads.return_value = heads
-
-            revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH),
-                    heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)}
-            fc.return_value.get_revision.side_effect = revs.__getitem__
-            mock_open_con = self.useFixture(
-                tools.OpenFixture(cli._get_contract_head_file_path(
-                    fake_config), contract_head + '\n')).mock_open
-            mock_open_ex = self.useFixture(
-                tools.OpenFixture(cli._get_expand_head_file_path(
-                    fake_config), expand_head + '\n')).mock_open
-
-            if contract_head in heads and expand_head in heads:
-                cli.validate_head_file(fake_config)
-            elif head_files_not_exist:
-                cli.validate_head_file(fake_config)
-                self.assertTrue(self.mock_alembic_warn.called)
-            else:
-                self.assertRaises(
-                    SystemExit,
-                    cli.validate_head_file,
-                    fake_config
-                )
-                self.assertTrue(self.mock_alembic_err.called)
-
-            if contract_head in heads and expand_head in heads:
-                mock_open_ex.assert_called_with(
-                    cli._get_expand_head_file_path(fake_config))
-                mock_open_con.assert_called_with(
-                    cli._get_contract_head_file_path(fake_config))
-
-            if not head_files_not_exist:
-                fc.assert_called_once_with(fake_config)
-
-    def test_validate_head_files_success(self):
-        self._test_validate_head_files_helper(['a', 'b'], contract_head='a',
-                                              expand_head='b')
-
-    def test_validate_head_files_missing_file(self):
-        self._test_validate_head_files_helper(['a', 'b'])
-
-    def test_validate_head_files_wrong_contents(self):
-        self._test_validate_head_files_helper(['a', 'b'], contract_head='c',
-                                              expand_head='d')
-
-    def test_validate_head_file_branchless_wrong_contents(self):
-        self._test_validate_head_file_helper(['a'], ['b'])
-
-    def test_validate_head_file_branchless_success(self):
-        self._test_validate_head_file_helper(['a'], ['a'])
-
-    def test_validate_head_file_branchless_missing_file(self):
-        self._test_validate_head_file_helper(['a'])
-
-    def test_update_head_file_success(self):
-        head = ['b']
-        mock_open = self.useFixture(
-                    tools.OpenFixture(cli._get_head_file_path(
-                        self.configs[0]))).mock_open
-        with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:
-            fc.return_value.get_heads.return_value = head
-            cli.update_head_file(self.configs[0])
-            mock_open.return_value.write.assert_called_with(
-                '\n'.join(head))
-
-    @mock.patch.object(cli, '_use_separate_migration_branches',
-                       return_value=True)
-    @mock.patch.object(fileutils, 'delete_if_exists')
-    def test_update_head_files_success(self, *mocks):
-        heads = ['a', 'b']
-        mock_open_con = self.useFixture(
-                    tools.OpenFixture(cli._get_contract_head_file_path(
-                        self.configs[0]))).mock_open
-        mock_open_ex = self.useFixture(
-            tools.OpenFixture(cli._get_expand_head_file_path(
-                self.configs[0]))).mock_open
-        with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:
-            fc.return_value.get_heads.return_value = heads
-            revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH),
-                    heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)}
-            fc.return_value.get_revision.side_effect = revs.__getitem__
-            cli.update_head_files(self.configs[0])
-            mock_open_con.return_value.write.assert_called_with(
-                heads[0] + '\n')
-            mock_open_ex.return_value.write.assert_called_with(heads[1] + '\n')
-
-            old_head_file = cli._get_head_file_path(
-                self.configs[0])
-            old_heads_file = cli._get_heads_file_path(
-                self.configs[0])
-            delete_if_exists = mocks[0]
-            self.assertIn(mock.call(old_head_file),
-                          delete_if_exists.call_args_list)
-            self.assertIn(mock.call(old_heads_file),
-                          delete_if_exists.call_args_list)
-
-    def test_get_project_base(self):
-        config = alembic_config.Config()
-        config.set_main_option('script_location', 'a.b.c:d')
-        proj_base = cli._get_project_base(config)
-        self.assertEqual('a', proj_base)
-
-    def test_get_root_versions_dir(self):
-        config = alembic_config.Config()
-        config.set_main_option('script_location', 'a.b.c:d')
-        versions_dir = cli._get_root_versions_dir(config)
-        self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir)
-
-    def test_get_subproject_script_location(self):
-        foo_ep = cli._get_subproject_script_location('networking-foo')
-        expected = 'networking_foo.db.migration:alembic_migrations'
-        self.assertEqual(expected, foo_ep)
-
-    def test_get_subproject_script_location_not_installed(self):
-        self.assertRaises(
-            SystemExit, cli._get_subproject_script_location, 'not-installed')
-
-    def test_get_service_script_location(self):
-        fwaas_ep = cli._get_service_script_location('fwaas')
-        expected = 'neutron_fwaas.db.migration:alembic_migrations'
-        self.assertEqual(expected, fwaas_ep)
-
-    def test_get_service_script_location_not_installed(self):
-        self.assertRaises(
-            SystemExit, cli._get_service_script_location, 'myaas')
-
-    def test_get_subproject_base_not_installed(self):
-        self.assertRaises(
-            SystemExit, cli._get_subproject_base, 'not-installed')
-
-    def test__compare_labels_ok(self):
-        labels = {'label1', 'label2'}
-        fake_revision = FakeRevision(labels)
-        cli._compare_labels(fake_revision, {'label1', 'label2'})
-
-    def test__compare_labels_fail_unexpected_labels(self):
-        labels = {'label1', 'label2', 'label3'}
-        fake_revision = FakeRevision(labels)
-        self.assertRaises(
-            SystemExit,
-            cli._compare_labels, fake_revision, {'label1', 'label2'})
-
-    @mock.patch.object(cli, '_compare_labels')
-    def test__validate_single_revision_labels_branchless_fail_different_labels(
-        self, compare_mock):
-
-        fake_down_revision = FakeRevision()
-        fake_revision = FakeRevision(down_revision=fake_down_revision)
-
-        script_dir = mock.Mock()
-        script_dir.get_revision.return_value = fake_down_revision
-        cli._validate_single_revision_labels(script_dir, fake_revision,
-                                             label=None)
-
-        expected_labels = set()
-        compare_mock.assert_has_calls(
-            [mock.call(revision, expected_labels)
-             for revision in (fake_revision, fake_down_revision)]
-        )
-
-    @mock.patch.object(cli, '_compare_labels')
-    def test__validate_single_revision_labels_branches_fail_different_labels(
-        self, compare_mock):
-
-        fake_down_revision = FakeRevision()
-        fake_revision = FakeRevision(down_revision=fake_down_revision)
-
-        script_dir = mock.Mock()
-        script_dir.get_revision.return_value = fake_down_revision
-        cli._validate_single_revision_labels(
-            script_dir, fake_revision, label='fakebranch')
-
-        expected_labels = {'fakebranch'}
-        compare_mock.assert_has_calls(
-            [mock.call(revision, expected_labels)
-             for revision in (fake_revision, fake_down_revision)]
-        )
-
-    @mock.patch.object(cli, '_validate_single_revision_labels')
-    def test__validate_revision_validates_branches(self, validate_mock):
-        script_dir = mock.Mock()
-        fake_revision = FakeRevision()
-        branch = cli.MIGRATION_BRANCHES[0]
-        fake_revision.path = os.path.join('/fake/path', branch)
-        cli._validate_revision(script_dir, fake_revision)
-        validate_mock.assert_called_with(
-            script_dir, fake_revision, label=branch)
-
-    @mock.patch.object(cli, '_validate_single_revision_labels')
-    def test__validate_revision_validates_branchless_migrations(
-        self, validate_mock):
-
-        script_dir = mock.Mock()
-        fake_revision = FakeRevision()
-        cli._validate_revision(script_dir, fake_revision)
-        validate_mock.assert_called_with(script_dir, fake_revision)
-
-    @mock.patch.object(cli, '_validate_revision')
-    @mock.patch('alembic.script.ScriptDirectory.walk_revisions')
-    def test_validate_revisions_walks_thru_all_revisions(
-        self, walk_mock, validate_mock):
-
-        revisions = [FakeRevision() for i in range(10)]
-        walk_mock.return_value = revisions
-        cli.validate_revisions(self.configs[0])
-        validate_mock.assert_has_calls(
-            [mock.call(mock.ANY, revision) for revision in revisions]
-        )
-
-    @mock.patch.object(cli, '_validate_revision')
-    @mock.patch('alembic.script.ScriptDirectory.walk_revisions')
-    def test_validate_revisions_fails_on_multiple_branch_points(
-        self, walk_mock, validate_mock):
-
-        revisions = [FakeRevision(is_branch_point=True) for i in range(2)]
-        walk_mock.return_value = revisions
-        self.assertRaises(
-            SystemExit, cli.validate_revisions, self.configs[0])
-
-    @mock.patch('alembic.script.ScriptDirectory.walk_revisions')
-    def test__get_branch_points(self, walk_mock):
-        revisions = [FakeRevision(is_branch_point=tools.get_random_boolean)
-                     for i in range(50)]
-        walk_mock.return_value = revisions
-        script_dir = alembic_script.ScriptDirectory.from_config(
-            self.configs[0])
-        self.assertEqual(set(rev for rev in revisions if rev.is_branch_point),
-                         set(cli._get_branch_points(script_dir)))
-
-    @mock.patch.object(cli, '_use_separate_migration_branches')
-    @mock.patch.object(cli, '_get_version_branch_path')
-    def test_autogen_process_directives(
-            self,
-            get_version_branch_path,
-            use_separate_migration_branches):
-
-        use_separate_migration_branches.return_value = True
-        get_version_branch_path.side_effect = lambda cfg, release, branch: (
-            "/foo/expand" if branch == 'expand' else "/foo/contract")
-
-        migration_script = alembic_ops.MigrationScript(
-            'eced083f5df',
-            # these directives will be split into separate
-            # expand/contract scripts
-            alembic_ops.UpgradeOps(
-                ops=[
-                    alembic_ops.CreateTableOp(
-                        'organization',
-                        [
-                            sa.Column('id', sa.Integer(), primary_key=True),
-                            sa.Column('name', sa.String(50), nullable=False)
-                        ]
-                    ),
-                    alembic_ops.ModifyTableOps(
-                        'user',
-                        ops=[
-                            alembic_ops.AddColumnOp(
-                                'user',
-                                sa.Column('organization_id', sa.Integer())
-                            ),
-                            alembic_ops.CreateForeignKeyOp(
-                                'org_fk', 'user', 'organization',
-                                ['organization_id'], ['id']
-                            ),
-                            alembic_ops.DropConstraintOp(
-                                'user', 'uq_user_org'
-                            ),
-                            alembic_ops.DropColumnOp(
-                                'user', 'organization_name'
-                            )
-                        ]
-                    )
-                ]
-            ),
-            # these will be discarded
-            alembic_ops.DowngradeOps(
-                ops=[
-                    alembic_ops.AddColumnOp(
-                        'user', sa.Column(
-                            'organization_name', sa.String(50), nullable=True)
-                    ),
-                    alembic_ops.CreateUniqueConstraintOp(
-                        'uq_user_org', 'user',
-                        ['user_name', 'organization_name']
-                    ),
-                    alembic_ops.ModifyTableOps(
-                        'user',
-                        ops=[
-                            alembic_ops.DropConstraintOp('org_fk', 'user'),
-                            alembic_ops.DropColumnOp('user', 'organization_id')
-                        ]
-                    ),
-                    alembic_ops.DropTableOp('organization')
-                ]
-            ),
-            message='create the organization table and '
-            'replace user.organization_name'
-        )
-
-        directives = [migration_script]
-        autogen.process_revision_directives(
-            mock.Mock(), mock.Mock(), directives
-        )
-
-        expand = directives[0]
-        contract = directives[1]
-        self.assertEqual("/foo/expand", expand.version_path)
-        self.assertEqual("/foo/contract", contract.version_path)
-        self.assertTrue(expand.downgrade_ops.is_empty())
-        self.assertTrue(contract.downgrade_ops.is_empty())
-
-        self.assertEqual(
-            textwrap.dedent("""\
-            ### commands auto generated by Alembic - please adjust! ###
-                op.create_table('organization',
-                sa.Column('id', sa.Integer(), nullable=False),
-                sa.Column('name', sa.String(length=50), nullable=False),
-                sa.PrimaryKeyConstraint('id')
-                )
-                op.add_column('user', """
-                """sa.Column('organization_id', sa.Integer(), nullable=True))
-                op.create_foreign_key('org_fk', 'user', """
-                """'organization', ['organization_id'], ['id'])
-                ### end Alembic commands ###"""),
-            alembic_ag_api.render_python_code(expand.upgrade_ops)
-        )
-        self.assertEqual(
-            textwrap.dedent("""\
-            ### commands auto generated by Alembic - please adjust! ###
-                op.drop_constraint('user', 'uq_user_org', type_=None)
-                op.drop_column('user', 'organization_name')
-                ### end Alembic commands ###"""),
-            alembic_ag_api.render_python_code(contract.upgrade_ops)
-        )
-
-    @mock.patch.object(cli, '_get_branch_points', return_value=[])
-    @mock.patch.object(cli.CONF, 'split_branches',
-                       new_callable=mock.PropertyMock,
-                       return_value=True, create=True)
-    def test__use_separate_migration_branches_enforced(self, *mocks):
-        self.assertTrue(cli._use_separate_migration_branches(self.configs[0]))
-
-    @mock.patch.object(cli, '_get_branch_points', return_value=[])
-    def test__use_separate_migration_branches_no_branch_points(self, *mocks):
-        self.assertFalse(cli._use_separate_migration_branches(self.configs[0]))
-
-    @mock.patch.object(cli, '_get_branch_points', return_value=['fake1'])
-    def test__use_separate_migration_branches_with_branch_points(self, *mocks):
-        self.assertTrue(cli._use_separate_migration_branches(self.configs[0]))
-
-    @mock.patch('alembic.script.ScriptDirectory.walk_revisions')
-    def test__find_milestone_revisions_one_branch(self, walk_mock):
-        c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
-        c_revs[1].module.neutron_milestone = [migration.LIBERTY]
-
-        walk_mock.return_value = c_revs
-        m = cli._find_milestone_revisions(self.configs[0], 'liberty',
-                                          cli.CONTRACT_BRANCH)
-        self.assertEqual(1, len(m))
-        m = cli._find_milestone_revisions(self.configs[0], 'liberty',
-                                          cli.EXPAND_BRANCH)
-        self.assertEqual(0, len(m))
-
-    @mock.patch('alembic.script.ScriptDirectory.walk_revisions')
-    def test__find_milestone_revisions_two_branches(self, walk_mock):
-        c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
-        c_revs[1].module.neutron_milestone = [migration.LIBERTY]
-        e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)]
-        e_revs[3].module.neutron_milestone = [migration.LIBERTY]
-
-        walk_mock.return_value = c_revs + e_revs
-        m = cli._find_milestone_revisions(self.configs[0], 'liberty')
-        self.assertEqual(2, len(m))
-
-        m = cli._find_milestone_revisions(self.configs[0], 'mitaka')
-        self.assertEqual(0, len(m))
-
-    @mock.patch('alembic.script.ScriptDirectory.walk_revisions')
-    def test__find_milestone_revisions_branchless(self, walk_mock):
-        revisions = [FakeRevision() for r in range(5)]
-        revisions[2].module.neutron_milestone = [migration.LIBERTY]
-
-        walk_mock.return_value = revisions
-        m = cli._find_milestone_revisions(self.configs[0], 'liberty')
-        self.assertEqual(1, len(m))
-
-        m = cli._find_milestone_revisions(self.configs[0], 'mitaka')
-        self.assertEqual(0, len(m))
-
-
-class TestSafetyChecks(base.BaseTestCase):
-
-    def test_validate_revisions(self, *mocks):
-        cli.validate_revisions(cli.get_neutron_config())
diff --git a/neutron/tests/unit/db/test_securitygroups_db.py b/neutron/tests/unit/db/test_securitygroups_db.py
deleted file mode 100644 (file)
index db98f46..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-import testtools
-
-from neutron.callbacks import exceptions
-from neutron.callbacks import registry
-from neutron import context
-from neutron.db import common_db_mixin
-from neutron.db import securitygroups_db
-from neutron.extensions import securitygroup
-from neutron.tests.unit import testlib_api
-
-
-class SecurityGroupDbMixinImpl(securitygroups_db.SecurityGroupDbMixin,
-                               common_db_mixin.CommonDbMixin):
-    pass
-
-
-class SecurityGroupDbMixinTestCase(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(SecurityGroupDbMixinTestCase, self).setUp()
-        self.ctx = context.get_admin_context()
-        self.mixin = SecurityGroupDbMixinImpl()
-
-    def test_create_security_group_conflict(self):
-        with mock.patch.object(registry, "notify") as mock_notify:
-            mock_notify.side_effect = exceptions.CallbackFailure(Exception())
-            secgroup = {'security_group': mock.ANY}
-            with testtools.ExpectedException(
-                securitygroup.SecurityGroupConflict):
-                self.mixin.create_security_group(self.ctx, secgroup)
-
-    def test_delete_security_group_in_use(self):
-        with mock.patch.object(self.mixin,
-                               '_get_port_security_group_bindings'),\
-                mock.patch.object(self.mixin, '_get_security_group'),\
-                mock.patch.object(registry, "notify") as mock_notify:
-            mock_notify.side_effect = exceptions.CallbackFailure(Exception())
-            with testtools.ExpectedException(
-                securitygroup.SecurityGroupInUse):
-                self.mixin.delete_security_group(self.ctx, mock.ANY)
-
-    def test_update_security_group_conflict(self):
-        with mock.patch.object(registry, "notify") as mock_notify:
-            mock_notify.side_effect = exceptions.CallbackFailure(Exception())
-            secgroup = {'security_group': mock.ANY}
-            with testtools.ExpectedException(
-                securitygroup.SecurityGroupConflict):
-                self.mixin.update_security_group(self.ctx, 'foo_id', secgroup)
-
-    def test_create_security_group_rule_conflict(self):
-        with mock.patch.object(self.mixin, '_validate_security_group_rule'),\
-                mock.patch.object(self.mixin,
-                                  '_check_for_duplicate_rules_in_db'),\
-                mock.patch.object(registry, "notify") as mock_notify:
-            mock_notify.side_effect = exceptions.CallbackFailure(Exception())
-            with testtools.ExpectedException(
-                securitygroup.SecurityGroupConflict):
-                self.mixin.create_security_group_rule(
-                    self.ctx, mock.MagicMock())
-
-    def test_delete_security_group_rule_in_use(self):
-        with mock.patch.object(registry, "notify") as mock_notify:
-            mock_notify.side_effect = exceptions.CallbackFailure(Exception())
-            with testtools.ExpectedException(
-                securitygroup.SecurityGroupRuleInUse):
-                self.mixin.delete_security_group_rule(self.ctx, mock.ANY)
-
-    def test_delete_security_group_rule_raise_error_on_not_found(self):
-        with testtools.ExpectedException(
-            securitygroup.SecurityGroupRuleNotFound):
-            self.mixin.delete_security_group_rule(self.ctx, 'foo_rule')
diff --git a/neutron/tests/unit/debug/__init__.py b/neutron/tests/unit/debug/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/debug/test_commands.py b/neutron/tests/unit/debug/test_commands.py
deleted file mode 100644 (file)
index 60c6ede..0000000
+++ /dev/null
@@ -1,333 +0,0 @@
-# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import socket
-
-import mock
-from oslo_config import cfg
-
-from neutron.agent.common import config
-from neutron.agent.linux import interface
-from neutron.common import config as common_config
-from neutron.debug import commands
-from neutron.debug import debug_agent
-from neutron.extensions import portbindings
-from neutron.tests import base
-
-
-class MyApp(object):
-    def __init__(self, _stdout):
-        self.stdout = _stdout
-
-
-class TestDebugCommands(base.BaseTestCase):
-    def setUp(self):
-        super(TestDebugCommands, self).setUp()
-        cfg.CONF.register_opts(interface.OPTS)
-        cfg.CONF.register_opts(config.EXT_NET_BRIDGE_OPTS)
-        common_config.init([])
-        config.register_interface_driver_opts_helper(cfg.CONF)
-
-        device_exists_p = mock.patch(
-            'neutron.agent.linux.ip_lib.device_exists', return_value=False)
-        device_exists_p.start()
-        namespace_p = mock.patch(
-            'neutron.agent.linux.ip_lib.IpNetnsCommand')
-        namespace_p.start()
-        ensure_namespace_p = mock.patch(
-            'neutron.agent.linux.ip_lib.IPWrapper.ensure_namespace')
-        ensure_namespace_p.start()
-        dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
-        driver_cls = dvr_cls_p.start()
-        mock_driver = mock.MagicMock()
-        mock_driver.DEV_NAME_LEN = (
-            interface.LinuxInterfaceDriver.DEV_NAME_LEN)
-        mock_driver.get_device_name.return_value = 'tap12345678-12'
-        driver_cls.return_value = mock_driver
-        self.driver = mock_driver
-
-        client_cls_p = mock.patch('neutronclient.v2_0.client.Client')
-        client_cls = client_cls_p.start()
-        client_inst = mock.Mock()
-        client_cls.return_value = client_inst
-
-        fake_network = {'network': {'id': 'fake_net',
-                                    'tenant_id': 'fake_tenant',
-                                    'subnets': ['fake_subnet']}}
-        fake_port = {'port':
-                    {'id': 'fake_port',
-                     'device_owner': 'fake_device',
-                     'mac_address': 'aa:bb:cc:dd:ee:ffa',
-                     'network_id': 'fake_net',
-                     'fixed_ips':
-                     [{'subnet_id': 'fake_subnet', 'ip_address': '10.0.0.3'}]
-                     }}
-        fake_ports = {'ports': [fake_port['port']]}
-        self.fake_ports = fake_ports
-        allocation_pools = [{'start': '10.0.0.2',
-                             'end': '10.0.0.254'}]
-        fake_subnet_v4 = {'subnet': {'name': 'fake_subnet_v4',
-                          'id': 'fake_subnet',
-                          'network_id': 'fake_net',
-                          'gateway_ip': '10.0.0.1',
-                          'dns_nameservers': ['10.0.0.2'],
-                          'host_routes': [],
-                          'cidr': '10.0.0.0/24',
-                          'allocation_pools': allocation_pools,
-                          'enable_dhcp': True,
-                          'ip_version': 4}}
-
-        client_inst.list_ports.return_value = fake_ports
-        client_inst.create_port.return_value = fake_port
-        client_inst.show_port.return_value = fake_port
-        client_inst.show_network.return_value = fake_network
-        client_inst.show_subnet.return_value = fake_subnet_v4
-        self.client = client_inst
-        mock_std = mock.Mock()
-        self.app = MyApp(mock_std)
-        self.app.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF,
-                                                             client_inst,
-                                                             mock_driver)
-
-    def _test_create_probe(self, device_owner):
-        cmd = commands.CreateProbe(self.app, None)
-        cmd_parser = cmd.get_parser('create_probe')
-        if device_owner == debug_agent.DEVICE_OWNER_COMPUTE_PROBE:
-            args = ['fake_net', '--device-owner', 'compute']
-        else:
-            args = ['fake_net']
-        parsed_args = cmd_parser.parse_args(args)
-        cmd.run(parsed_args)
-        fake_port = {'port':
-                    {'device_owner': device_owner,
-                     'admin_state_up': True,
-                     'network_id': 'fake_net',
-                     'tenant_id': 'fake_tenant',
-                     portbindings.HOST_ID: cfg.CONF.host,
-                     'fixed_ips': [{'subnet_id': 'fake_subnet'}],
-                     'device_id': socket.gethostname()}}
-        namespace = 'qprobe-fake_port'
-        self.client.assert_has_calls([mock.call.show_network('fake_net'),
-                                      mock.call.show_subnet('fake_subnet'),
-                                      mock.call.create_port(fake_port),
-                                      mock.call.show_subnet('fake_subnet')])
-        self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
-                                      mock.call.plug('fake_net',
-                                                     'fake_port',
-                                                     'tap12345678-12',
-                                                     'aa:bb:cc:dd:ee:ffa',
-                                                     bridge=None,
-                                                     namespace=namespace),
-                                      mock.call.init_l3('tap12345678-12',
-                                                        ['10.0.0.3/24'],
-                                                        namespace=namespace
-                                                        )])
-
-    def test_create_network_probe(self):
-        self._test_create_probe(debug_agent.DEVICE_OWNER_NETWORK_PROBE)
-
-    def test_create_nova_probe(self):
-        self._test_create_probe(debug_agent.DEVICE_OWNER_COMPUTE_PROBE)
-
-    def _test_create_probe_external(self, device_owner):
-        fake_network = {'network': {'id': 'fake_net',
-                                    'tenant_id': 'fake_tenant',
-                                    'router:external': True,
-                                    'subnets': ['fake_subnet']}}
-        self.client.show_network.return_value = fake_network
-        cmd = commands.CreateProbe(self.app, None)
-        cmd_parser = cmd.get_parser('create_probe')
-        if device_owner == debug_agent.DEVICE_OWNER_COMPUTE_PROBE:
-            args = ['fake_net', '--device-owner', 'compute']
-        else:
-            args = ['fake_net']
-        parsed_args = cmd_parser.parse_args(args)
-        cmd.run(parsed_args)
-        fake_port = {'port':
-                    {'device_owner': device_owner,
-                     'admin_state_up': True,
-                     'network_id': 'fake_net',
-                     'tenant_id': 'fake_tenant',
-                     portbindings.HOST_ID: cfg.CONF.host,
-                     'fixed_ips': [{'subnet_id': 'fake_subnet'}],
-                     'device_id': socket.gethostname()}}
-        namespace = 'qprobe-fake_port'
-        self.client.assert_has_calls([mock.call.show_network('fake_net'),
-                                      mock.call.show_subnet('fake_subnet'),
-                                      mock.call.create_port(fake_port),
-                                      mock.call.show_subnet('fake_subnet')])
-        self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
-                                      mock.call.plug('fake_net',
-                                                     'fake_port',
-                                                     'tap12345678-12',
-                                                     'aa:bb:cc:dd:ee:ffa',
-                                                     bridge='br-ex',
-                                                     namespace=namespace),
-                                      mock.call.init_l3('tap12345678-12',
-                                                        ['10.0.0.3/24'],
-                                                        namespace=namespace
-                                                        )])
-
-    def test_create_network_probe_external(self):
-        self._test_create_probe_external(
-            debug_agent.DEVICE_OWNER_NETWORK_PROBE)
-
-    def test_create_nova_probe_external(self):
-        self._test_create_probe_external(
-            debug_agent.DEVICE_OWNER_COMPUTE_PROBE)
-
-    def test_delete_probe(self):
-        cmd = commands.DeleteProbe(self.app, None)
-        cmd_parser = cmd.get_parser('delete_probe')
-        args = ['fake_port']
-        parsed_args = cmd_parser.parse_args(args)
-        cmd.run(parsed_args)
-        namespace = 'qprobe-fake_port'
-        self.client.assert_has_calls([mock.call.show_port('fake_port'),
-                                      mock.call.show_network('fake_net'),
-                                      mock.call.show_subnet('fake_subnet'),
-                                      mock.call.delete_port('fake_port')])
-        self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
-                                      mock.call.unplug('tap12345678-12',
-                                                       namespace=namespace,
-                                                       bridge=None)])
-
-    def test_delete_probe_external(self):
-        fake_network = {'network': {'id': 'fake_net',
-                                    'tenant_id': 'fake_tenant',
-                                    'router:external': True,
-                                    'subnets': ['fake_subnet']}}
-        self.client.show_network.return_value = fake_network
-        cmd = commands.DeleteProbe(self.app, None)
-        cmd_parser = cmd.get_parser('delete_probe')
-        args = ['fake_port']
-        parsed_args = cmd_parser.parse_args(args)
-        cmd.run(parsed_args)
-        namespace = 'qprobe-fake_port'
-        self.client.assert_has_calls([mock.call.show_port('fake_port'),
-                                      mock.call.show_network('fake_net'),
-                                      mock.call.show_subnet('fake_subnet'),
-                                      mock.call.delete_port('fake_port')])
-        self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
-                                      mock.call.unplug('tap12345678-12',
-                                                       namespace=namespace,
-                                                       bridge='br-ex')])
-
-    def test_list_probe(self):
-        cmd = commands.ListProbe(self.app, None)
-        cmd_parser = cmd.get_parser('list_probe')
-        args = []
-        parsed_args = cmd_parser.parse_args(args)
-        cmd.run(parsed_args)
-        self.client.assert_has_calls(
-            [mock.call.list_ports(
-                device_owner=[debug_agent.DEVICE_OWNER_NETWORK_PROBE,
-                              debug_agent.DEVICE_OWNER_COMPUTE_PROBE])])
-
-    def test_exec_command(self):
-        cmd = commands.ExecProbe(self.app, None)
-        cmd_parser = cmd.get_parser('exec_command')
-        args = ['fake_port', 'fake_command']
-        parsed_args = cmd_parser.parse_args(args)
-        with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns:
-            cmd.run(parsed_args)
-            ns.assert_has_calls([mock.call.execute(mock.ANY)])
-        self.client.assert_has_calls([mock.call.show_port('fake_port')])
-
-    def test_clear_probe(self):
-        cmd = commands.ClearProbe(self.app, None)
-        cmd_parser = cmd.get_parser('clear_probe')
-        args = []
-        parsed_args = cmd_parser.parse_args(args)
-        cmd.run(parsed_args)
-        namespace = 'qprobe-fake_port'
-        self.client.assert_has_calls(
-            [mock.call.list_ports(
-                device_id=socket.gethostname(),
-                device_owner=[debug_agent.DEVICE_OWNER_NETWORK_PROBE,
-                              debug_agent.DEVICE_OWNER_COMPUTE_PROBE]),
-             mock.call.show_port('fake_port'),
-             mock.call.show_network('fake_net'),
-             mock.call.show_subnet('fake_subnet'),
-             mock.call.delete_port('fake_port')])
-        self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
-                                      mock.call.unplug('tap12345678-12',
-                                                       namespace=namespace,
-                                                       bridge=None)])
-
-    def test_ping_all_with_ensure_port(self):
-        fake_ports = self.fake_ports
-
-        def fake_port_list(network_id=None, device_owner=None, device_id=None):
-            if network_id:
-                # In order to test ensure_port, return []
-                return {'ports': []}
-            return fake_ports
-        self.client.list_ports.side_effect = fake_port_list
-        cmd = commands.PingAll(self.app, None)
-        cmd_parser = cmd.get_parser('ping_all')
-        args = []
-        parsed_args = cmd_parser.parse_args(args)
-        namespace = 'qprobe-fake_port'
-        with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns:
-            cmd.run(parsed_args)
-            ns.assert_has_calls([mock.call.execute(mock.ANY)])
-        fake_port = {'port':
-                    {'device_owner': debug_agent.DEVICE_OWNER_NETWORK_PROBE,
-                     'admin_state_up': True,
-                     'network_id': 'fake_net',
-                     'tenant_id': 'fake_tenant',
-                     portbindings.HOST_ID: cfg.CONF.host,
-                     'fixed_ips': [{'subnet_id': 'fake_subnet'}],
-                     'device_id': socket.gethostname()}}
-        expected = [mock.call.show_network('fake_net'),
-                    mock.call.show_subnet('fake_subnet'),
-                    mock.call.create_port(fake_port),
-                    mock.call.show_subnet('fake_subnet')]
-        self.client.assert_has_calls(expected)
-        self.driver.assert_has_calls([mock.call.init_l3('tap12345678-12',
-                                                        ['10.0.0.3/24'],
-                                                        namespace=namespace
-                                                        )])
-
-    def test_ping_all(self):
-        cmd = commands.PingAll(self.app, None)
-        cmd_parser = cmd.get_parser('ping_all')
-        args = []
-        parsed_args = cmd_parser.parse_args(args)
-        with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns:
-            cmd.run(parsed_args)
-            ns.assert_has_calls([mock.call.execute(mock.ANY)])
-        expected = [mock.call.list_ports(),
-                    mock.call.list_ports(
-                        network_id='fake_net',
-                        device_owner=debug_agent.DEVICE_OWNER_NETWORK_PROBE,
-                        device_id=socket.gethostname()),
-                    mock.call.show_subnet('fake_subnet'),
-                    mock.call.show_port('fake_port')]
-        self.client.assert_has_calls(expected)
-
-    def test_ping_all_v6(self):
-        fake_subnet_v6 = {'subnet': {'name': 'fake_v6',
-                          'ip_version': 6}}
-        self.client.show_subnet.return_value = fake_subnet_v6
-        cmd = commands.PingAll(self.app, None)
-        cmd_parser = cmd.get_parser('ping_all')
-        args = []
-        parsed_args = cmd_parser.parse_args(args)
-        with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns:
-            cmd.run(parsed_args)
-            ns.assert_has_calls([mock.call.execute(mock.ANY)])
-        self.client.assert_has_calls([mock.call.list_ports()])
diff --git a/neutron/tests/unit/dummy_plugin.py b/neutron/tests/unit/dummy_plugin.py
deleted file mode 100644 (file)
index f050a81..0000000
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_utils import uuidutils
-
-from neutron.api import extensions
-from neutron.api.v2 import base
-from neutron.common import exceptions
-from neutron.db import servicetype_db
-from neutron.extensions import servicetype
-from neutron import manager
-from neutron.plugins.common import constants
-from neutron.services import service_base
-
-
-RESOURCE_NAME = "dummy"
-COLLECTION_NAME = "%ss" % RESOURCE_NAME
-
-# Attribute Map for dummy resource
-RESOURCE_ATTRIBUTE_MAP = {
-    COLLECTION_NAME: {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True},
-        'name': {'allow_post': True, 'allow_put': True,
-                 'validate': {'type:string': None},
-                 'is_visible': True, 'default': ''},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'required_by_policy': True,
-                      'is_visible': True},
-        'service_type': {'allow_post': True,
-                         'allow_put': False,
-                         'validate': {'type:servicetype_ref': None},
-                         'is_visible': True,
-                         'default': None}
-    }
-}
-
-
-class Dummy(object):
-
-    @classmethod
-    def get_name(cls):
-        return "dummy"
-
-    @classmethod
-    def get_alias(cls):
-        return "dummy"
-
-    @classmethod
-    def get_description(cls):
-        return "Dummy stuff"
-
-    @classmethod
-    def get_updated(cls):
-        return "2012-11-20T10:00:00-00:00"
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Extended Resource for dummy management."""
-        n_mgr = manager.NeutronManager.get_instance()
-        dummy_inst = n_mgr.get_service_plugins()['DUMMY']
-        controller = base.create_resource(
-            COLLECTION_NAME, RESOURCE_NAME, dummy_inst,
-            RESOURCE_ATTRIBUTE_MAP[COLLECTION_NAME])
-        return [extensions.ResourceExtension(COLLECTION_NAME,
-                                             controller)]
-
-
-class DummyServicePlugin(service_base.ServicePluginBase):
-    """This is a simple plugin for managing instances of a fictional 'dummy'
-        service. This plugin is provided as a proof-of-concept of how
-        advanced service might leverage the service type extension.
-        Ideally, instances of real advanced services, such as load balancing
-        or VPN will adopt a similar solution.
-    """
-
-    supported_extension_aliases = ['dummy', servicetype.EXT_ALIAS]
-    path_prefix = "/dummy_svc"
-    agent_notifiers = {'dummy': 'dummy_agent_notifier'}
-
-    def __init__(self):
-        self.svctype_mgr = servicetype_db.ServiceTypeManager.get_instance()
-        self.dummys = {}
-
-    def get_plugin_type(self):
-        return constants.DUMMY
-
-    def get_plugin_description(self):
-        return "Neutron Dummy Service Plugin"
-
-    def get_dummys(self, context, filters, fields):
-        return self.dummys.values()
-
-    def get_dummy(self, context, id, fields):
-        try:
-            return self.dummys[id]
-        except KeyError:
-            raise exceptions.NotFound()
-
-    def create_dummy(self, context, dummy):
-        d = dummy['dummy']
-        d['id'] = uuidutils.generate_uuid()
-        self.dummys[d['id']] = d
-        self.svctype_mgr.increase_service_type_refcount(context,
-                                                        d['service_type'])
-        return d
-
-    def update_dummy(self, context, id, dummy):
-        pass
-
-    def delete_dummy(self, context, id):
-        try:
-            svc_type_id = self.dummys[id]['service_type']
-            del self.dummys[id]
-            self.svctype_mgr.decrease_service_type_refcount(context,
-                                                            svc_type_id)
-        except KeyError:
-            raise exceptions.NotFound()
diff --git a/neutron/tests/unit/extension_stubs.py b/neutron/tests/unit/extension_stubs.py
deleted file mode 100644 (file)
index 358769e..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-from neutron.api import extensions
-from neutron import wsgi
-
-
-class StubExtension(extensions.ExtensionDescriptor):
-
-    def __init__(self, alias="stub_extension"):
-        self.alias = alias
-
-    def get_name(self):
-        return "Stub Extension"
-
-    def get_alias(self):
-        return self.alias
-
-    def get_description(self):
-        return ""
-
-    def get_updated(self):
-        return ""
-
-
-class StubPlugin(object):
-
-    def __init__(self, supported_extensions=None):
-        supported_extensions = supported_extensions or []
-        self.supported_extension_aliases = supported_extensions
-
-
-class ExtensionExpectingPluginInterface(StubExtension):
-    """Expect plugin to implement all methods in StubPluginInterface.
-
-    This extension expects plugin to implement all the methods defined
-    in StubPluginInterface.
-    """
-
-    def get_plugin_interface(self):
-        return StubPluginInterface
-
-
-class StubPluginInterface(extensions.PluginInterface):
-
-    @abc.abstractmethod
-    def get_foo(self, bar=None):
-        pass
-
-
-class StubBaseAppController(wsgi.Controller):
-
-    def index(self, request):
-        return "base app index"
-
-    def show(self, request, id):
-        return {'fort': 'knox'}
-
-    def update(self, request, id):
-        return {'uneditable': 'original_value'}
diff --git a/neutron/tests/unit/extensions/__init__.py b/neutron/tests/unit/extensions/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/extensions/base.py b/neutron/tests/unit/extensions/base.py
deleted file mode 100644 (file)
index 75ba95a..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright 2014 Intel Corporation.
-# Copyright 2014 Isaku Yamahata <isaku.yamahata at intel com>
-#                               <isaku.yamahata at gmail com>
-# All Rights Reserved.
-#
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import uuid
-
-import mock
-from oslo_config import cfg
-from webob import exc
-import webtest
-
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron import quota
-from neutron.tests import tools
-from neutron.tests.unit.api import test_extensions
-from neutron.tests.unit.api.v2 import test_base
-from neutron.tests.unit import testlib_api
-
-
-class ExtensionTestCase(testlib_api.WebTestCase):
-
-    def _setUpExtension(self, plugin, service_type,
-                        resource_attribute_map, extension_class,
-                        resource_prefix, plural_mappings=None,
-                        translate_resource_name=False,
-                        allow_pagination=False, allow_sorting=False,
-                        supported_extension_aliases=None,
-                        use_quota=False,
-                        ):
-
-        self._resource_prefix = resource_prefix
-        self._plural_mappings = plural_mappings or {}
-        self._translate_resource_name = translate_resource_name
-
-        # Ensure existing ExtensionManager is not used
-        extensions.PluginAwareExtensionManager._instance = None
-
-        self.useFixture(tools.AttributeMapMemento())
-
-        # Create the default configurations
-        self.config_parse()
-
-        #just stubbing core plugin with plugin
-        self.setup_coreplugin(plugin)
-        cfg.CONF.set_override('core_plugin', plugin)
-        if service_type:
-            cfg.CONF.set_override('service_plugins', [plugin])
-
-        self._plugin_patcher = mock.patch(plugin, autospec=True)
-        self.plugin = self._plugin_patcher.start()
-        instance = self.plugin.return_value
-        if service_type:
-            instance.get_plugin_type.return_value = service_type
-        if supported_extension_aliases is not None:
-            instance.supported_extension_aliases = supported_extension_aliases
-        if allow_pagination:
-            cfg.CONF.set_override('allow_pagination', True)
-            # instance.__native_pagination_support = True
-            native_pagination_attr_name = ("_%s__native_pagination_support"
-                                           % instance.__class__.__name__)
-            setattr(instance, native_pagination_attr_name, True)
-        if allow_sorting:
-            cfg.CONF.set_override('allow_sorting', True)
-            # instance.__native_sorting_support = True
-            native_sorting_attr_name = ("_%s__native_sorting_support"
-                                        % instance.__class__.__name__)
-            setattr(instance, native_sorting_attr_name, True)
-        if use_quota:
-            quota.QUOTAS._driver = None
-            cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
-                                  group='QUOTAS')
-        setattr(instance, 'path_prefix', resource_prefix)
-
-        class ExtensionTestExtensionManager(object):
-            def get_resources(self):
-                # Add the resources to the global attribute map
-                # This is done here as the setup process won't
-                # initialize the main API router which extends
-                # the global attribute map
-                attributes.RESOURCE_ATTRIBUTE_MAP.update(
-                    resource_attribute_map)
-                return extension_class.get_resources()
-
-            def get_actions(self):
-                return []
-
-            def get_request_extensions(self):
-                return []
-
-        ext_mgr = ExtensionTestExtensionManager()
-        self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
-        self.api = webtest.TestApp(self.ext_mdw)
-
-    def _test_entity_delete(self, entity):
-        """Does the entity deletion based on naming convention."""
-        entity_id = str(uuid.uuid4())
-        path = self._resource_prefix + '/' if self._resource_prefix else ''
-        path += self._plural_mappings.get(entity, entity + 's')
-        if self._translate_resource_name:
-            path = path.replace('_', '-')
-        res = self.api.delete(
-            test_base._get_path(path, id=entity_id, fmt=self.fmt))
-        delete_entity = getattr(self.plugin.return_value, "delete_" + entity)
-        delete_entity.assert_called_with(mock.ANY, entity_id)
-        self.assertEqual(res.status_int, exc.HTTPNoContent.code)
diff --git a/neutron/tests/unit/extensions/extendedattribute.py b/neutron/tests/unit/extensions/extendedattribute.py
deleted file mode 100644 (file)
index 2f2f2de..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api import extensions
-
-EXTENDED_ATTRIBUTE = 'extended_attribute'
-EXTENDED_ATTRIBUTES_2_0 = {
-    'ext_test_resources': {
-        EXTENDED_ATTRIBUTE: {'allow_post': True, 'allow_put': False,
-                             'validate': {'type:uuid_or_none': None},
-                             'default': None, 'is_visible': True},
-    }
-}
-
-
-class Extendedattribute(extensions.ExtensionDescriptor):
-    """Extension class supporting extended attribute for router."""
-
-    @classmethod
-    def get_name(cls):
-        return "Extended Extension Attributes"
-
-    @classmethod
-    def get_alias(cls):
-        return "extended-ext-attr"
-
-    @classmethod
-    def get_description(cls):
-        return "Provides extended_attr attribute to router"
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-02-05T00:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/tests/unit/extensions/extensionattribute.py b/neutron/tests/unit/extensions/extensionattribute.py
deleted file mode 100644 (file)
index dcf2c8c..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright 2013 VMware, Inc.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-from neutron.api import extensions
-from neutron.api.v2 import base
-from neutron import manager
-from neutron.quota import resource_registry
-
-
-# Attribute Map
-RESOURCE_ATTRIBUTE_MAP = {
-    'ext_test_resources': {
-        'id': {'allow_post': False, 'allow_put': False,
-               'validate': {'type:uuid': None},
-               'is_visible': True},
-        'name': {'allow_post': True, 'allow_put': True,
-                 'validate': {'type:string': None},
-                 'is_visible': True, 'default': ''},
-        'tenant_id': {'allow_post': True, 'allow_put': False,
-                      'required_by_policy': True,
-                      'validate': {'type:string': None},
-                      'is_visible': True},
-    }
-}
-
-
-class Extensionattribute(extensions.ExtensionDescriptor):
-
-    @classmethod
-    def get_name(cls):
-        return "Extension Test Resource"
-
-    @classmethod
-    def get_alias(cls):
-        return "ext-obj-test"
-
-    @classmethod
-    def get_description(cls):
-        return "Extension Test Resource"
-
-    @classmethod
-    def get_updated(cls):
-        return "2013-02-05T10:00:00-00:00"
-
-    def update_attributes_map(self, attributes):
-        super(Extensionattribute, self).update_attributes_map(
-            attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
-
-    @classmethod
-    def get_resources(cls):
-        """Returns Ext Resources."""
-        exts = []
-        plugin = manager.NeutronManager.get_plugin()
-        resource_name = 'ext_test_resource'
-        collection_name = resource_name + "s"
-        params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict())
-
-        resource_registry.register_resource_by_name(resource_name)
-
-        controller = base.create_resource(collection_name,
-                                          resource_name,
-                                          plugin, params,
-                                          member_actions={})
-
-        ex = extensions.ResourceExtension(collection_name,
-                                          controller,
-                                          member_actions={})
-        exts.append(ex)
-
-        return exts
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return RESOURCE_ATTRIBUTE_MAP
-        else:
-            return {}
-
-
-class ExtensionObjectTestPluginBase(object):
-
-    @abc.abstractmethod
-    def create_ext_test_resource(self, context, router):
-        pass
-
-    @abc.abstractmethod
-    def get_ext_test_resource(self, context, id, fields=None):
-        pass
diff --git a/neutron/tests/unit/extensions/foxinsocks.py b/neutron/tests/unit/extensions/foxinsocks.py
deleted file mode 100644 (file)
index d42f2b4..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-from oslo_serialization import jsonutils
-
-from neutron.api import extensions
-from neutron import wsgi
-
-
-class FoxInSocksController(wsgi.Controller):
-
-    def index(self, request):
-        return "Try to say this Mr. Knox, sir..."
-
-
-class FoxInSocksPluginInterface(extensions.PluginInterface):
-
-    @abc.abstractmethod
-    def method_to_support_foxnsox_extension(self):
-        pass
-
-
-class Foxinsocks(extensions.ExtensionDescriptor):
-
-    def __init__(self):
-        pass
-
-    def get_plugin_interface(self):
-        return FoxInSocksPluginInterface
-
-    def get_name(self):
-        return "Fox In Socks"
-
-    def get_alias(self):
-        return "FOXNSOX"
-
-    def get_description(self):
-        return "The Fox In Socks Extension"
-
-    def get_updated(self):
-        return "2011-01-22T13:25:27-06:00"
-
-    def get_resources(self):
-        resources = []
-        resource = extensions.ResourceExtension('foxnsocks',
-                                                FoxInSocksController())
-        resources.append(resource)
-        return resources
-
-    def get_actions(self):
-        return [extensions.ActionExtension('dummy_resources',
-                                           'FOXNSOX:add_tweedle',
-                                           self._add_tweedle_handler),
-                extensions.ActionExtension('dummy_resources',
-                                           'FOXNSOX:delete_tweedle',
-                                           self._delete_tweedle_handler)]
-
-    def get_request_extensions(self):
-        request_exts = []
-
-        def _goose_handler(req, res):
-            #NOTE: This only handles JSON responses.
-            # You can use content type header to test for XML.
-            data = jsonutils.loads(res.body)
-            data['FOXNSOX:googoose'] = req.GET.get('chewing')
-            res.body = jsonutils.dumps(data).encode('utf-8')
-            return res
-
-        req_ext1 = extensions.RequestExtension('GET', '/dummy_resources/:(id)',
-                                               _goose_handler)
-        request_exts.append(req_ext1)
-
-        def _bands_handler(req, res):
-            #NOTE: This only handles JSON responses.
-            # You can use content type header to test for XML.
-            data = jsonutils.loads(res.body)
-            data['FOXNSOX:big_bands'] = 'Pig Bands!'
-            res.body = jsonutils.dumps(data).encode('utf-8')
-            return res
-
-        req_ext2 = extensions.RequestExtension('GET', '/dummy_resources/:(id)',
-                                               _bands_handler)
-        request_exts.append(req_ext2)
-        return request_exts
-
-    def _add_tweedle_handler(self, input_dict, req, id):
-        return "Tweedle {0} Added.".format(
-            input_dict['FOXNSOX:add_tweedle']['name'])
-
-    def _delete_tweedle_handler(self, input_dict, req, id):
-        return "Tweedle {0} Deleted.".format(
-            input_dict['FOXNSOX:delete_tweedle']['name'])
diff --git a/neutron/tests/unit/extensions/test_address_scope.py b/neutron/tests/unit/extensions/test_address_scope.py
deleted file mode 100644 (file)
index be3f816..0000000
+++ /dev/null
@@ -1,416 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import netaddr
-
-import webob.exc
-
-from neutron.api.v2 import attributes as attr
-from neutron.common import constants
-from neutron import context
-from neutron.db import address_scope_db
-from neutron.db import db_base_plugin_v2
-from neutron.extensions import address_scope as ext_address_scope
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-DB_PLUGIN_KLASS = ('neutron.tests.unit.extensions.test_address_scope.'
-                   'AddressScopeTestPlugin')
-
-
-class AddressScopeTestExtensionManager(object):
-
-    def get_resources(self):
-        # Add the resources to the global attribute map
-        # This is done here as the setup process won't
-        # initialize the main API router which extends
-        # the global attribute map
-        attr.RESOURCE_ATTRIBUTE_MAP.update(
-            ext_address_scope.RESOURCE_ATTRIBUTE_MAP)
-        return ext_address_scope.Address_scope.get_resources()
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-class AddressScopeTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-
-    def _create_address_scope(self, fmt, ip_version=constants.IP_VERSION_4,
-                              expected_res_status=None, admin=False, **kwargs):
-        address_scope = {'address_scope': {}}
-        address_scope['address_scope']['ip_version'] = ip_version
-        for k, v in kwargs.items():
-            address_scope['address_scope'][k] = str(v)
-
-        address_scope_req = self.new_create_request('address-scopes',
-                                                    address_scope, fmt)
-
-        if not admin:
-            neutron_context = context.Context('', kwargs.get('tenant_id',
-                                                             self._tenant_id))
-            address_scope_req.environ['neutron.context'] = neutron_context
-
-        address_scope_res = address_scope_req.get_response(self.ext_api)
-        if expected_res_status:
-            self.assertEqual(address_scope_res.status_int, expected_res_status)
-        return address_scope_res
-
-    def _make_address_scope(self, fmt, ip_version, admin=False, **kwargs):
-        res = self._create_address_scope(fmt, ip_version,
-                                         admin=admin, **kwargs)
-        if res.status_int >= webob.exc.HTTPClientError.code:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        return self.deserialize(fmt, res)
-
-    @contextlib.contextmanager
-    def address_scope(self, ip_version=constants.IP_VERSION_4,
-                      admin=False, **kwargs):
-        addr_scope = self._make_address_scope(self.fmt, ip_version,
-                                              admin, **kwargs)
-        yield addr_scope
-
-    def _test_create_address_scope(self, ip_version=constants.IP_VERSION_4,
-                                   admin=False, expected=None, **kwargs):
-        keys = kwargs.copy()
-        keys.setdefault('tenant_id', self._tenant_id)
-        with self.address_scope(ip_version,
-                                admin=admin, **keys) as addr_scope:
-            keys['ip_version'] = ip_version
-            self._validate_resource(addr_scope, keys, 'address_scope')
-            if expected:
-                self._compare_resource(addr_scope, expected, 'address_scope')
-        return addr_scope
-
-    def _test_update_address_scope(self, addr_scope_id, data, admin=False,
-                                   expected=None, tenant_id=None):
-        update_req = self.new_update_request(
-            'address-scopes', data, addr_scope_id)
-        if not admin:
-            neutron_context = context.Context('', tenant_id or self._tenant_id)
-            update_req.environ['neutron.context'] = neutron_context
-
-        update_res = update_req.get_response(self.ext_api)
-        if expected:
-            addr_scope = self.deserialize(self.fmt, update_res)
-            self._compare_resource(addr_scope, expected, 'address_scope')
-            return addr_scope
-
-        return update_res
-
-
-class AddressScopeTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
-                             address_scope_db.AddressScopeDbMixin):
-    __native_pagination_support = True
-    __native_sorting_support = True
-
-    supported_extension_aliases = ["address-scope"]
-
-
-class TestAddressScope(AddressScopeTestCase):
-
-    def setUp(self):
-        plugin = DB_PLUGIN_KLASS
-        ext_mgr = AddressScopeTestExtensionManager()
-        super(TestAddressScope, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
-
-    def test_create_address_scope_ipv4(self):
-        expected_addr_scope = {'name': 'foo-address-scope',
-                               'tenant_id': self._tenant_id,
-                               'shared': False,
-                               'ip_version': constants.IP_VERSION_4}
-        self._test_create_address_scope(name='foo-address-scope',
-                                        expected=expected_addr_scope)
-
-    def test_create_address_scope_ipv6(self):
-        expected_addr_scope = {'name': 'foo-address-scope',
-                               'tenant_id': self._tenant_id,
-                               'shared': False,
-                               'ip_version': constants.IP_VERSION_6}
-        self._test_create_address_scope(constants.IP_VERSION_6,
-                                        name='foo-address-scope',
-                                        expected=expected_addr_scope)
-
-    def test_create_address_scope_empty_name(self):
-        expected_addr_scope = {'name': '',
-                               'tenant_id': self._tenant_id,
-                               'shared': False}
-        self._test_create_address_scope(name='', expected=expected_addr_scope)
-
-        # no name specified
-        self._test_create_address_scope(expected=expected_addr_scope)
-
-    def test_create_address_scope_shared_admin(self):
-        expected_addr_scope = {'name': 'foo-address-scope', 'shared': True}
-        self._test_create_address_scope(name='foo-address-scope', admin=True,
-                                        shared=True,
-                                        expected=expected_addr_scope)
-
-    def test_created_address_scope_shared_non_admin(self):
-        res = self._create_address_scope(self.fmt, name='foo-address-scope',
-                                         tenant_id=self._tenant_id,
-                                         admin=False, shared=True)
-        self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int)
-
-    def test_created_address_scope_specify_id(self):
-        res = self._create_address_scope(self.fmt, name='foo-address-scope',
-                                         id='foo-id')
-        self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
-
-    def test_delete_address_scope(self):
-        with self.address_scope(name='foo-address-scope') as addr_scope:
-            self._delete('address-scopes', addr_scope['address_scope']['id'])
-            self._show('address-scopes', addr_scope['address_scope']['id'],
-                       expected_code=webob.exc.HTTPNotFound.code)
-
-    def test_update_address_scope(self):
-        addr_scope = self._test_create_address_scope(name='foo-address-scope')
-        data = {'address_scope': {'name': 'bar-address-scope'}}
-        self._test_update_address_scope(addr_scope['address_scope']['id'],
-                                        data, expected=data['address_scope'])
-
-    def test_update_address_scope_shared_true_admin(self):
-        addr_scope = self._test_create_address_scope(name='foo-address-scope')
-        data = {'address_scope': {'shared': True}}
-        self._test_update_address_scope(addr_scope['address_scope']['id'],
-                                        data, admin=True,
-                                        expected=data['address_scope'])
-
-    def test_update_address_scope_shared_true_non_admin(self):
-        addr_scope = self._test_create_address_scope(name='foo-address-scope')
-        data = {'address_scope': {'shared': True}}
-        res = self._test_update_address_scope(
-            addr_scope['address_scope']['id'], data, admin=False)
-        self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int)
-
-    def test_update_address_scope_shared_false_admin(self):
-        addr_scope = self._test_create_address_scope(name='foo-address-scope',
-                                                     admin=True, shared=True)
-        data = {'address_scope': {'shared': False}}
-        res = self._test_update_address_scope(
-            addr_scope['address_scope']['id'], data, admin=True)
-        self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
-
-    def test_get_address_scope(self):
-        addr_scope = self._test_create_address_scope(name='foo-address-scope')
-        req = self.new_show_request('address-scopes',
-                                    addr_scope['address_scope']['id'])
-        res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-        self.assertEqual(addr_scope['address_scope']['id'],
-                         res['address_scope']['id'])
-
-    def test_get_address_scope_different_tenants_not_shared(self):
-        addr_scope = self._test_create_address_scope(name='foo-address-scope')
-        req = self.new_show_request('address-scopes',
-                                    addr_scope['address_scope']['id'])
-        neutron_context = context.Context('', 'not-the-owner')
-        req.environ['neutron.context'] = neutron_context
-        res = req.get_response(self.ext_api)
-        self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
-
-    def test_get_address_scope_different_tenants_shared(self):
-        addr_scope = self._test_create_address_scope(name='foo-address-scope',
-                                                     shared=True, admin=True)
-        req = self.new_show_request('address-scopes',
-                                    addr_scope['address_scope']['id'])
-        neutron_context = context.Context('', 'test-tenant-2')
-        req.environ['neutron.context'] = neutron_context
-        res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-        self.assertEqual(addr_scope['address_scope']['id'],
-                         res['address_scope']['id'])
-
-    def test_list_address_scopes(self):
-        self._test_create_address_scope(name='foo-address-scope')
-        self._test_create_address_scope(constants.IP_VERSION_6,
-                                        name='bar-address-scope')
-        res = self._list('address-scopes')
-        self.assertEqual(2, len(res['address_scopes']))
-
-    def test_list_address_scopes_different_tenants_shared(self):
-        self._test_create_address_scope(name='foo-address-scope', shared=True,
-                                        admin=True)
-        admin_res = self._list('address-scopes')
-        mortal_res = self._list(
-            'address-scopes',
-            neutron_context=context.Context('', 'not-the-owner'))
-        self.assertEqual(1, len(admin_res['address_scopes']))
-        self.assertEqual(1, len(mortal_res['address_scopes']))
-
-    def test_list_address_scopes_different_tenants_not_shared(self):
-        self._test_create_address_scope(constants.IP_VERSION_6,
-                                        name='foo-address-scope')
-        admin_res = self._list('address-scopes')
-        mortal_res = self._list(
-            'address-scopes',
-            neutron_context=context.Context('', 'not-the-owner'))
-        self.assertEqual(1, len(admin_res['address_scopes']))
-        self.assertEqual(0, len(mortal_res['address_scopes']))
-
-
-class TestSubnetPoolsWithAddressScopes(AddressScopeTestCase):
-    def setUp(self):
-        plugin = DB_PLUGIN_KLASS
-        ext_mgr = AddressScopeTestExtensionManager()
-        super(TestSubnetPoolsWithAddressScopes, self).setUp(plugin=plugin,
-                                                            ext_mgr=ext_mgr)
-
-    def _test_create_subnetpool(self, prefixes, expected=None,
-                                admin=False, **kwargs):
-        keys = kwargs.copy()
-        keys.setdefault('tenant_id', self._tenant_id)
-        with self.subnetpool(prefixes, admin, **keys) as subnetpool:
-            self._validate_resource(subnetpool, keys, 'subnetpool')
-            if expected:
-                self._compare_resource(subnetpool, expected, 'subnetpool')
-        return subnetpool
-
-    def test_create_subnetpool_associate_address_scope(self):
-        with self.address_scope(name='foo-address-scope') as addr_scope:
-            address_scope_id = addr_scope['address_scope']['id']
-            subnet = netaddr.IPNetwork('10.10.10.0/24')
-            expected = {'address_scope_id': address_scope_id}
-            self._test_create_subnetpool([subnet.cidr], expected=expected,
-                                         name='foo-subnetpool',
-                                         min_prefixlen='21',
-                                         address_scope_id=address_scope_id)
-
-    def test_create_subnetpool_associate_invalid_address_scope(self):
-        self.assertRaises(
-            webob.exc.HTTPClientError, self._test_create_subnetpool, [],
-            min_prefixlen='21', address_scope_id='foo-addr-scope-id')
-
-    def test_create_subnetpool_assoc_address_scope_with_prefix_intersect(self):
-        with self.address_scope(name='foo-address-scope') as addr_scope:
-            address_scope_id = addr_scope['address_scope']['id']
-            subnet = netaddr.IPNetwork('10.10.10.0/24')
-            expected = {'address_scope_id': address_scope_id}
-            self._test_create_subnetpool([subnet.cidr], expected=expected,
-                                         name='foo-subnetpool',
-                                         min_prefixlen='21',
-                                         address_scope_id=address_scope_id)
-            overlap_subnet = netaddr.IPNetwork('10.10.10.10/24')
-            self.assertRaises(
-                webob.exc.HTTPClientError, self._test_create_subnetpool,
-                [overlap_subnet.cidr], min_prefixlen='21',
-                address_scope_id=address_scope_id)
-
-    def test_update_subnetpool_associate_address_scope(self):
-        subnet = netaddr.IPNetwork('10.10.10.0/24')
-        initial_subnetpool = self._test_create_subnetpool([subnet.cidr],
-                                                          name='foo-sp',
-                                                          min_prefixlen='21')
-        with self.address_scope(name='foo-address-scope') as addr_scope:
-            address_scope_id = addr_scope['address_scope']['id']
-            data = {'subnetpool': {'address_scope_id': address_scope_id}}
-            req = self.new_update_request(
-                'subnetpools', data, initial_subnetpool['subnetpool']['id'])
-            api = self._api_for_resource('subnetpools')
-            res = self.deserialize(self.fmt, req.get_response(api))
-            self._compare_resource(res, data['subnetpool'], 'subnetpool')
-
-    def test_update_subnetpool_associate_invalid_address_scope(self):
-        subnet = netaddr.IPNetwork('10.10.10.0/24')
-        initial_subnetpool = self._test_create_subnetpool([subnet.cidr],
-                                                          name='foo-sp',
-                                                          min_prefixlen='21')
-        data = {'subnetpool': {'address_scope_id': 'foo-addr-scope-id'}}
-        req = self.new_update_request(
-            'subnetpools', data, initial_subnetpool['subnetpool']['id'])
-        api = self._api_for_resource('subnetpools')
-        res = req.get_response(api)
-        self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
-
-    def test_update_subnetpool_disassociate_address_scope(self):
-        with self.address_scope(name='foo-address-scope') as addr_scope:
-            address_scope_id = addr_scope['address_scope']['id']
-            subnet = netaddr.IPNetwork('10.10.10.0/24')
-            expected = {'address_scope_id': address_scope_id}
-            initial_subnetpool = self._test_create_subnetpool(
-                [subnet.cidr], expected=expected, name='foo-sp',
-                min_prefixlen='21', address_scope_id=address_scope_id)
-
-            data = {'subnetpool': {'address_scope_id': None}}
-            req = self.new_update_request(
-                'subnetpools', data, initial_subnetpool['subnetpool']['id'])
-            api = self._api_for_resource('subnetpools')
-            res = self.deserialize(self.fmt, req.get_response(api))
-            self._compare_resource(res, data['subnetpool'], 'subnetpool')
-
-    def test_update_subnetpool_associate_another_address_scope(self):
-        with self.address_scope(name='foo-address-scope') as addr_scope:
-            address_scope_id = addr_scope['address_scope']['id']
-            subnet = netaddr.IPNetwork('10.10.10.0/24')
-            expected = {'address_scope_id': address_scope_id}
-            initial_subnetpool = self._test_create_subnetpool(
-                [subnet.cidr], expected=expected, name='foo-sp',
-                min_prefixlen='21', address_scope_id=address_scope_id)
-
-            with self.address_scope(name='foo-address-scope') as other_a_s:
-                other_a_s_id = other_a_s['address_scope']['id']
-                update_data = {'subnetpool': {'address_scope_id':
-                                              other_a_s_id}}
-                req = self.new_update_request(
-                    'subnetpools', update_data,
-                    initial_subnetpool['subnetpool']['id'])
-                api = self._api_for_resource('subnetpools')
-                res = self.deserialize(self.fmt, req.get_response(api))
-                self._compare_resource(res, update_data['subnetpool'],
-                                       'subnetpool')
-
-    def test_delete_address_scope_in_use(self):
-        with self.address_scope(name='foo-address-scope') as addr_scope:
-            address_scope_id = addr_scope['address_scope']['id']
-            subnet = netaddr.IPNetwork('10.10.10.0/24')
-            expected = {'address_scope_id': address_scope_id}
-            self._test_create_subnetpool([subnet.cidr], expected=expected,
-                                         name='foo-subnetpool',
-                                         min_prefixlen='21',
-                                         address_scope_id=address_scope_id)
-            self._delete('address-scopes', address_scope_id,
-                         expected_code=webob.exc.HTTPConflict.code)
-
-    def test_add_subnetpool_address_scope_wrong_address_family(self):
-        with self.address_scope(constants.IP_VERSION_6,
-                                name='foo-address-scope') as addr_scope:
-            address_scope_id = addr_scope['address_scope']['id']
-            subnet = netaddr.IPNetwork('10.10.10.0/24')
-            self.assertRaises(webob.exc.HTTPClientError,
-                              self._test_create_subnetpool,
-                              [subnet.cidr], name='foo-subnetpool',
-                              min_prefixlen='21',
-                              address_scope_id=address_scope_id)
-
-    def test_update_subnetpool_associate_address_scope_wrong_family(self):
-        with self.address_scope(constants.IP_VERSION_6,
-                                name='foo-address-scope') as addr_scope:
-            address_scope_id = addr_scope['address_scope']['id']
-            subnet = netaddr.IPNetwork('2001:db8::/64')
-            expected = {'address_scope_id': address_scope_id}
-            initial_subnetpool = self._test_create_subnetpool(
-                [subnet.cidr], expected=expected, name='foo-sp',
-                min_prefixlen='64', address_scope_id=address_scope_id)
-
-            with self.address_scope(name='foo-address-scope') as other_a_s:
-                other_a_s_id = other_a_s['address_scope']['id']
-                update_data = {'subnetpool': {'address_scope_id':
-                                              other_a_s_id}}
-                req = self.new_update_request(
-                    'subnetpools', update_data,
-                    initial_subnetpool['subnetpool']['id'])
-                api = self._api_for_resource('subnetpools')
-                res = req.get_response(api)
-                self.assertEqual(webob.exc.HTTPBadRequest.code,
-                                 res.status_int)
diff --git a/neutron/tests/unit/extensions/test_agent.py b/neutron/tests/unit/extensions/test_agent.py
deleted file mode 100644 (file)
index 65ef6de..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-from datetime import datetime
-import time
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-from webob import exc
-
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron import context
-from neutron.db import agents_db
-from neutron.db import db_base_plugin_v2
-from neutron.extensions import agent
-from neutron.tests.common import helpers
-from neutron.tests import tools
-from neutron.tests.unit.api.v2 import test_base
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-
-_uuid = uuidutils.generate_uuid
-_get_path = test_base._get_path
-L3_HOSTA = 'hosta'
-DHCP_HOSTA = 'hosta'
-L3_HOSTB = 'hostb'
-DHCP_HOSTC = 'hostc'
-LBAAS_HOSTA = 'hosta'
-LBAAS_HOSTB = 'hostb'
-
-
-class AgentTestExtensionManager(object):
-
-    def get_resources(self):
-        # Add the resources to the global attribute map
-        # This is done here as the setup process won't
-        # initialize the main API router which extends
-        # the global attribute map
-        attributes.RESOURCE_ATTRIBUTE_MAP.update(
-            agent.RESOURCE_ATTRIBUTE_MAP)
-        return agent.Agent.get_resources()
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-# This plugin class is just for testing
-class TestAgentPlugin(db_base_plugin_v2.NeutronDbPluginV2,
-                      agents_db.AgentDbMixin):
-    supported_extension_aliases = ["agent"]
-
-
-class AgentDBTestMixIn(object):
-
-    def _list_agents(self, expected_res_status=None,
-                     neutron_context=None,
-                     query_string=None):
-        agent_res = self._list('agents',
-                               neutron_context=neutron_context,
-                               query_params=query_string)
-        if expected_res_status:
-            self.assertEqual(agent_res.status_int, expected_res_status)
-        return agent_res
-
-    def _register_agent_states(self, lbaas_agents=False):
-        """Register two L3 agents and two DHCP agents."""
-        l3_hosta = helpers._get_l3_agent_dict(
-            L3_HOSTA, constants.L3_AGENT_MODE_LEGACY)
-        l3_hostb = helpers._get_l3_agent_dict(
-            L3_HOSTB, constants.L3_AGENT_MODE_LEGACY)
-        dhcp_hosta = helpers._get_dhcp_agent_dict(DHCP_HOSTA)
-        dhcp_hostc = helpers._get_dhcp_agent_dict(DHCP_HOSTC)
-        helpers.register_l3_agent(host=L3_HOSTA)
-        helpers.register_l3_agent(host=L3_HOSTB)
-        helpers.register_dhcp_agent(host=DHCP_HOSTA)
-        helpers.register_dhcp_agent(host=DHCP_HOSTC)
-
-        res = [l3_hosta, l3_hostb, dhcp_hosta, dhcp_hostc]
-        if lbaas_agents:
-            lbaas_hosta = {
-                'binary': 'neutron-loadbalancer-agent',
-                'host': LBAAS_HOSTA,
-                'topic': 'LOADBALANCER_AGENT',
-                'configurations': {'device_drivers': ['haproxy_ns']},
-                'agent_type': constants.AGENT_TYPE_LOADBALANCER}
-            lbaas_hostb = copy.deepcopy(lbaas_hosta)
-            lbaas_hostb['host'] = LBAAS_HOSTB
-            callback = agents_db.AgentExtRpcCallback()
-            callback.report_state(
-                self.adminContext,
-                agent_state={'agent_state': lbaas_hosta},
-                time=datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT))
-            callback.report_state(
-                self.adminContext,
-                agent_state={'agent_state': lbaas_hostb},
-                time=datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT))
-            res += [lbaas_hosta, lbaas_hostb]
-
-        return res
-
-    def _register_dvr_agents(self):
-        dvr_snat_agent = helpers.register_l3_agent(
-            host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT)
-        dvr_agent = helpers.register_l3_agent(
-            host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR)
-        return [dvr_snat_agent, dvr_agent]
-
-
-class AgentDBTestCase(AgentDBTestMixIn,
-                      test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-    fmt = 'json'
-
-    def setUp(self):
-        plugin = 'neutron.tests.unit.extensions.test_agent.TestAgentPlugin'
-        # for these tests we need to enable overlapping ips
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        self.useFixture(tools.AttributeMapMemento())
-        ext_mgr = AgentTestExtensionManager()
-        super(AgentDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
-        self.adminContext = context.get_admin_context()
-
-    def test_create_agent(self):
-        data = {'agent': {}}
-        _req = self.new_create_request('agents', data, self.fmt)
-        _req.environ['neutron.context'] = context.Context(
-            '', 'tenant_id')
-        res = _req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_list_agent(self):
-        agents = self._register_agent_states()
-        res = self._list('agents')
-        self.assertEqual(len(agents), len(res['agents']))
-
-    def test_show_agent(self):
-        self._register_agent_states()
-        agents = self._list_agents(
-            query_string='binary=neutron-l3-agent')
-        self.assertEqual(2, len(agents['agents']))
-        agent = self._show('agents', agents['agents'][0]['id'])
-        self.assertEqual('neutron-l3-agent', agent['agent']['binary'])
-
-    def test_update_agent(self):
-        self._register_agent_states()
-        agents = self._list_agents(
-            query_string='binary=neutron-l3-agent&host=' + L3_HOSTB)
-        self.assertEqual(1, len(agents['agents']))
-        com_id = agents['agents'][0]['id']
-        agent = self._show('agents', com_id)
-        new_agent = {}
-        new_agent['agent'] = {}
-        new_agent['agent']['admin_state_up'] = False
-        new_agent['agent']['description'] = 'description'
-        self._update('agents', com_id, new_agent)
-        agent = self._show('agents', com_id)
-        self.assertFalse(agent['agent']['admin_state_up'])
-        self.assertEqual('description', agent['agent']['description'])
-
-    def test_dead_agent(self):
-        cfg.CONF.set_override('agent_down_time', 1)
-        self._register_agent_states()
-        time.sleep(1.5)
-        agents = self._list_agents(
-            query_string='binary=neutron-l3-agent&host=' + L3_HOSTB)
-        self.assertFalse(agents['agents'][0]['alive'])
diff --git a/neutron/tests/unit/extensions/test_availability_zone.py b/neutron/tests/unit/extensions/test_availability_zone.py
deleted file mode 100644 (file)
index 553441c..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron import context
-from neutron.db import agents_db
-from neutron.db import db_base_plugin_v2
-from neutron.extensions import agent
-from neutron.extensions import availability_zone as az_ext
-from neutron.tests.common import helpers
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-
-class AZExtensionManager(object):
-
-    def get_resources(self):
-        agent.RESOURCE_ATTRIBUTE_MAP['agents'].update(
-            az_ext.EXTENDED_ATTRIBUTES_2_0['agents'])
-        return (az_ext.Availability_zone.get_resources() +
-                agent.Agent.get_resources())
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-class AZTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
-                   agents_db.AgentDbMixin):
-    supported_extension_aliases = ["agent", "availability_zone"]
-
-
-class AZTestCommon(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-    def _register_azs(self):
-        self.agent1 = helpers.register_dhcp_agent(host='host1', az='nova1')
-        self.agent2 = helpers.register_dhcp_agent(host='host2', az='nova2')
-        self.agent3 = helpers.register_l3_agent(host='host2', az='nova2')
-        self.agent4 = helpers.register_l3_agent(host='host3', az='nova3')
-        self.agent5 = helpers.register_l3_agent(host='host4', az='nova2')
-
-
-class TestAZAgentCase(AZTestCommon):
-    def setUp(self):
-        plugin = ('neutron.tests.unit.extensions.'
-                  'test_availability_zone.AZTestPlugin')
-        ext_mgr = AZExtensionManager()
-        super(TestAZAgentCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
-
-    def test_list_availability_zones(self):
-        self._register_azs()
-        helpers.set_agent_admin_state(self.agent3['id'], admin_state_up=False)
-        helpers.set_agent_admin_state(self.agent4['id'], admin_state_up=False)
-        expected = [
-            {'name': 'nova1', 'resource': 'network', 'state': 'available'},
-            {'name': 'nova2', 'resource': 'network', 'state': 'available'},
-            {'name': 'nova2', 'resource': 'router', 'state': 'available'},
-            {'name': 'nova3', 'resource': 'router', 'state': 'unavailable'}]
-        res = self._list('availability_zones')
-        azs = res['availability_zones']
-        self.assertItemsEqual(expected, azs)
-        # not admin case
-        ctx = context.Context('', 'noadmin')
-        res = self._list('availability_zones', neutron_context=ctx)
-        azs = res['availability_zones']
-        self.assertItemsEqual(expected, azs)
-
-    def test_list_agent_with_az(self):
-        helpers.register_dhcp_agent(host='host1', az='nova1')
-        res = self._list('agents')
-        self.assertEqual('nova1',
-            res['agents'][0]['availability_zone'])
-
-    def test_validate_availability_zones(self):
-        self._register_azs()
-        ctx = context.Context('', 'tenant_id')
-        self.plugin.validate_availability_zones(ctx, 'network',
-                                                ['nova1', 'nova2'])
-        self.plugin.validate_availability_zones(ctx, 'router',
-                                                ['nova2', 'nova3'])
-        self.assertRaises(az_ext.AvailabilityZoneNotFound,
-                          self.plugin.validate_availability_zones,
-                          ctx, 'router', ['nova1'])
-
-
-class TestAZNetworkCase(AZTestCommon):
-    def setUp(self):
-        plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-        ext_mgr = AZExtensionManager()
-        super(TestAZNetworkCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
-
-    def test_create_network_with_az(self):
-        self._register_azs()
-        az_hints = ['nova1']
-        with self.network(availability_zone_hints=az_hints) as net:
-            res = self._show('networks', net['network']['id'])
-            self.assertItemsEqual(az_hints,
-                                  res['network']['availability_zone_hints'])
-
-    def test_create_network_with_azs(self):
-        self._register_azs()
-        az_hints = ['nova1', 'nova2']
-        with self.network(availability_zone_hints=az_hints) as net:
-            res = self._show('networks', net['network']['id'])
-            self.assertItemsEqual(az_hints,
-                                  res['network']['availability_zone_hints'])
-
-    def test_create_network_without_az(self):
-        with self.network() as net:
-            res = self._show('networks', net['network']['id'])
-            self.assertEqual([], res['network']['availability_zone_hints'])
-
-    def test_create_network_with_empty_az(self):
-        with self.network(availability_zone_hints=[]) as net:
-            res = self._show('networks', net['network']['id'])
-            self.assertEqual([], res['network']['availability_zone_hints'])
-
-    def test_create_network_with_not_exist_az(self):
-        res = self._create_network(self.fmt, 'net', True,
-                                   availability_zone_hints=['nova3'])
-        self.assertEqual(404, res.status_int)
diff --git a/neutron/tests/unit/extensions/test_dns.py b/neutron/tests/unit/extensions/test_dns.py
deleted file mode 100644 (file)
index 48f7990..0000000
+++ /dev/null
@@ -1,474 +0,0 @@
-# Copyright 2015 Rackspace
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import math
-import netaddr
-
-from oslo_config import cfg
-
-from neutron.common import constants
-from neutron.common import utils
-from neutron import context
-from neutron.db import db_base_plugin_v2
-from neutron.extensions import dns
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-
-class DnsExtensionManager(object):
-
-    def get_resources(self):
-        return []
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-    def get_extended_resources(self, version):
-        return dns.get_extended_resources(version)
-
-
-class DnsExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2):
-    """Test plugin to mixin the DNS Integration extensions.
-    """
-
-    supported_extension_aliases = ["dns-integration"]
-
-
-class DnsExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2):
-    """Test API extension dns attributes.
-    """
-
-    def setUp(self):
-        plugin = ('neutron.tests.unit.extensions.test_dns.' +
-                  'DnsExtensionTestPlugin')
-        ext_mgr = DnsExtensionManager()
-        super(DnsExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
-
-    def _create_port(self, fmt, net_id, expected_res_status=None,
-                     arg_list=None, **kwargs):
-        data = {'port': {'network_id': net_id,
-                         'tenant_id': self._tenant_id}}
-
-        for arg in (('admin_state_up', 'device_id',
-                    'mac_address', 'name', 'fixed_ips',
-                    'tenant_id', 'device_owner', 'security_groups',
-                    'dns_name') + (arg_list or ())):
-            # Arg must be present
-            if arg in kwargs:
-                data['port'][arg] = kwargs[arg]
-        # create a dhcp port device id if one hasn't been supplied
-        if ('device_owner' in kwargs and
-            kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and
-            'host' in kwargs and
-            'device_id' not in kwargs):
-            device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
-            data['port']['device_id'] = device_id
-        port_req = self.new_create_request('ports', data, fmt)
-        if (kwargs.get('set_context') and 'tenant_id' in kwargs):
-            # create a specific auth context for this request
-            port_req.environ['neutron.context'] = context.Context(
-                '', kwargs['tenant_id'])
-
-        port_res = port_req.get_response(self.api)
-        if expected_res_status:
-            self.assertEqual(port_res.status_int, expected_res_status)
-        return port_res
-
-    def _test_list_resources(self, resource, items, neutron_context=None,
-                             query_params=None):
-        res = self._list('%ss' % resource,
-                         neutron_context=neutron_context,
-                         query_params=query_params)
-        resource = resource.replace('-', '_')
-        self.assertItemsEqual([i['id'] for i in res['%ss' % resource]],
-                              [i[resource]['id'] for i in items])
-        return res
-
-    def test_create_port_json(self):
-        keys = [('admin_state_up', True), ('status', self.port_create_status)]
-        with self.port(name='myname') as port:
-            for k, v in keys:
-                self.assertEqual(port['port'][k], v)
-            self.assertIn('mac_address', port['port'])
-            ips = port['port']['fixed_ips']
-            self.assertEqual(len(ips), 1)
-            self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
-            self.assertEqual('myname', port['port']['name'])
-            self._verify_dns_assigment(port['port'],
-                                       ips_list=['10.0.0.2'])
-
-    def test_list_ports(self):
-        # for this test we need to enable overlapping ips
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        with self.port() as v1, self.port() as v2, self.port() as v3:
-            ports = (v1, v2, v3)
-            res = self._test_list_resources('port', ports)
-            for port in res['ports']:
-                self._verify_dns_assigment(
-                    port, ips_list=[port['fixed_ips'][0]['ip_address']])
-
-    def test_show_port(self):
-        with self.port() as port:
-            req = self.new_show_request('ports', port['port']['id'], self.fmt)
-            sport = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(port['port']['id'], sport['port']['id'])
-            self._verify_dns_assigment(
-                sport['port'],
-                ips_list=[sport['port']['fixed_ips'][0]['ip_address']])
-
-    def test_update_port_non_default_dns_domain_with_dns_name(self):
-        with self.port() as port:
-            cfg.CONF.set_override('dns_domain', 'example.com')
-            data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}}
-            req = self.new_update_request('ports', data, port['port']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(res['port']['admin_state_up'],
-                             data['port']['admin_state_up'])
-            self._verify_dns_assigment(res['port'],
-                                       ips_list=['10.0.0.2'],
-                                       dns_name='vm1')
-
-    def test_update_port_default_dns_domain_with_dns_name(self):
-        with self.port() as port:
-            data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}}
-            req = self.new_update_request('ports', data, port['port']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(res['port']['admin_state_up'],
-                             data['port']['admin_state_up'])
-            self._verify_dns_assigment(res['port'],
-                                       ips_list=['10.0.0.2'])
-
-    def _verify_dns_assigment(self, port, ips_list=None, exp_ips_ipv4=0,
-                              exp_ips_ipv6=0, ipv4_cidrs=None, ipv6_cidrs=None,
-                              dns_name=''):
-        ips_list = ips_list or []
-        ipv4_cidrs = ipv4_cidrs or []
-        ipv6_cidrs = ipv6_cidrs or []
-        self.assertEqual(port['dns_name'], dns_name)
-        dns_assignment = port['dns_assignment']
-        if ips_list:
-            self.assertEqual(len(dns_assignment), len(ips_list))
-            ips_set = set(ips_list)
-        else:
-            self.assertEqual(len(dns_assignment), exp_ips_ipv4 + exp_ips_ipv6)
-            ipv4_count = 0
-            ipv6_count = 0
-            subnets_v4 = [netaddr.IPNetwork(cidr) for cidr in ipv4_cidrs]
-            subnets_v6 = [netaddr.IPNetwork(cidr) for cidr in ipv6_cidrs]
-
-        request_dns_name, request_fqdn = self._get_request_hostname_and_fqdn(
-            dns_name)
-        for assignment in dns_assignment:
-            if ips_list:
-                self.assertIn(assignment['ip_address'], ips_set)
-                ips_set.remove(assignment['ip_address'])
-            else:
-                ip = netaddr.IPAddress(assignment['ip_address'])
-                if ip.version == 4:
-                    self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v4))
-                    ipv4_count += 1
-                else:
-                    self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v6))
-                    ipv6_count += 1
-            hostname, fqdn = self._get_hostname_and_fqdn(request_dns_name,
-                                                         request_fqdn,
-                                                         assignment)
-            self.assertEqual(assignment['hostname'], hostname)
-            self.assertEqual(assignment['fqdn'], fqdn)
-        if ips_list:
-            self.assertFalse(ips_set)
-        else:
-            self.assertEqual(ipv4_count, exp_ips_ipv4)
-            self.assertEqual(ipv6_count, exp_ips_ipv6)
-
-    def _get_dns_domain(self):
-        if not cfg.CONF.dns_domain:
-            return ''
-        if cfg.CONF.dns_domain.endswith('.'):
-            return cfg.CONF.dns_domain
-        return '%s.' % cfg.CONF.dns_domain
-
-    def _get_request_hostname_and_fqdn(self, dns_name):
-        request_dns_name = ''
-        request_fqdn = ''
-        dns_domain = self._get_dns_domain()
-        if dns_name and dns_domain and dns_domain != 'openstacklocal.':
-            request_dns_name = dns_name
-            request_fqdn = request_dns_name
-            if not request_dns_name.endswith('.'):
-                request_fqdn = '%s.%s' % (dns_name, dns_domain)
-        return request_dns_name, request_fqdn
-
-    def _get_hostname_and_fqdn(self, request_dns_name, request_fqdn,
-                               assignment):
-        dns_domain = self._get_dns_domain()
-        if request_dns_name:
-            hostname = request_dns_name
-            fqdn = request_fqdn
-        else:
-            hostname = 'host-%s' % assignment['ip_address'].replace(
-                '.', '-').replace(':', '-')
-            fqdn = hostname
-            if dns_domain:
-                fqdn = '%s.%s' % (hostname, dns_domain)
-        return hostname, fqdn
-
-    def _verify_ip_in_subnet(self, ip, subnets_list):
-        for subnet in subnets_list:
-            if ip in subnet:
-                return True
-        return False
-
-    def test_update_port_update_ip(self):
-        """Test update of port IP.
-
-        Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10.
-        """
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                data = {'port': {'fixed_ips': [{'subnet_id':
-                                                subnet['subnet']['id'],
-                                                'ip_address': "10.0.0.10"}]}}
-                req = self.new_update_request('ports', data,
-                                              port['port']['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                ips = res['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.10')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                self._verify_dns_assigment(res['port'], ips_list=['10.0.0.10'])
-
-    def test_update_port_update_ip_address_only(self):
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                ips = port['port']['fixed_ips']
-                self.assertEqual(len(ips), 1)
-                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
-                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
-                data = {'port': {'fixed_ips': [{'subnet_id':
-                                                subnet['subnet']['id'],
-                                                'ip_address': "10.0.0.10"},
-                                               {'ip_address': "10.0.0.2"}]}}
-                req = self.new_update_request('ports', data,
-                                              port['port']['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                ips = res['port']['fixed_ips']
-                self.assertEqual(len(ips), 2)
-                self.assertIn({'ip_address': '10.0.0.2',
-                               'subnet_id': subnet['subnet']['id']}, ips)
-                self.assertIn({'ip_address': '10.0.0.10',
-                               'subnet_id': subnet['subnet']['id']}, ips)
-                self._verify_dns_assigment(res['port'],
-                                           ips_list=['10.0.0.10',
-                                                     '10.0.0.2'])
-
-    def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self):
-        res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets()
-        self.assertEqual(res.status_code, 201)
-
-    def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_no_period(
-        self):
-        cfg.CONF.set_override('dns_domain', 'example.com')
-        res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
-            dns_name='vm1')
-        self.assertEqual(res.status_code, 201)
-
-    def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_period(
-        self):
-        cfg.CONF.set_override('dns_domain', 'example.com.')
-        res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
-            dns_name='vm1')
-        self.assertEqual(res.status_code, 201)
-
-    def test_create_port_multiple_v4_v6_subnets_pqdn_and_no_dns_domain(
-        self):
-        cfg.CONF.set_override('dns_domain', '')
-        res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets()
-        self.assertEqual(res.status_code, 201)
-
-    def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_no_period(
-        self):
-        cfg.CONF.set_override('dns_domain', 'example.com')
-        res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
-            dns_name='vm1.example.com.')
-        self.assertEqual(res.status_code, 201)
-
-    def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_period(
-        self):
-        cfg.CONF.set_override('dns_domain', 'example.com.')
-        res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
-            dns_name='vm1.example.com.')
-        self.assertEqual(res.status_code, 201)
-
-    def test_create_port_multiple_v4_v6_subnets_fqdn_default_domain_period(
-        self):
-        cfg.CONF.set_override('dns_domain', 'openstacklocal.')
-        res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets()
-        self.assertEqual(res.status_code, 201)
-
-    def test_create_port_multiple_v4_v6_subnets_bad_fqdn_and_dns_domain(
-        self):
-        cfg.CONF.set_override('dns_domain', 'example.com')
-        res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
-            dns_name='vm1.bad-domain.com.')
-        self.assertEqual(res.status_code, 400)
-        expected_error = ('The dns_name passed is a FQDN. Its higher level '
-                          'labels must be equal to the dns_domain option in '
-                          'neutron.conf')
-        self.assertIn(expected_error, res.text)
-
-    def test_create_port_multiple_v4_v6_subnets_bad_pqdn_and_dns_domain(
-        self):
-        cfg.CONF.set_override('dns_domain', 'example.com')
-        num_labels = int(
-            math.floor(dns.FQDN_MAX_LEN / dns.DNS_LABEL_MAX_LEN))
-        filler_len = int(
-            math.floor(dns.FQDN_MAX_LEN % dns.DNS_LABEL_MAX_LEN))
-        dns_name = (('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') *
-                    num_labels + 'a' * filler_len)
-        res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
-            dns_name=dns_name)
-        self.assertEqual(res.status_code, 400)
-        expected_error = ("When the two are concatenated to form a FQDN "
-                          "(with a '.' at the end), the resulting length "
-                          "exceeds the maximum size")
-        self.assertIn(expected_error, res.text)
-
-    def _test_create_port_with_multiple_ipv4_and_ipv6_subnets(self,
-                                                              dns_name=''):
-        """Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets."""
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        sub_dicts = [
-            {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
-             'ip_version': 4, 'ra_addr_mode': None},
-            {'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24',
-             'ip_version': 4, 'ra_addr_mode': None},
-            {'gateway': 'fe80::1', 'cidr': 'fe80::/64',
-             'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
-            {'gateway': 'fe81::1', 'cidr': 'fe81::/64',
-             'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
-            {'gateway': 'fe82::1', 'cidr': 'fe82::/64',
-             'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL},
-            {'gateway': 'fe83::1', 'cidr': 'fe83::/64',
-             'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}]
-        subnets = {}
-        for sub_dict in sub_dicts:
-            subnet = self._make_subnet(
-                self.fmt, network,
-                gateway=sub_dict['gateway'],
-                cidr=sub_dict['cidr'],
-                ip_version=sub_dict['ip_version'],
-                ipv6_ra_mode=sub_dict['ra_addr_mode'],
-                ipv6_address_mode=sub_dict['ra_addr_mode'])
-            subnets[subnet['subnet']['id']] = sub_dict
-        res = self._create_port(self.fmt, net_id=network['network']['id'],
-                                dns_name=dns_name)
-        if res.status_code != 201:
-            return res
-        port = self.deserialize(self.fmt, res)
-        # Since the create port request was made without a list of fixed IPs,
-        # the port should be associated with addresses for one of the
-        # IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6
-        # SLAAC subnets.
-        self.assertEqual(4, len(port['port']['fixed_ips']))
-        addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0,
-                           constants.IPV6_SLAAC: 0}
-        for fixed_ip in port['port']['fixed_ips']:
-            subnet_id = fixed_ip['subnet_id']
-            if subnet_id in subnets:
-                addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1
-        self.assertEqual(1, addr_mode_count[None])
-        self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL])
-        self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC])
-        self._verify_dns_assigment(port['port'], exp_ips_ipv4=1,
-                                   exp_ips_ipv6=3,
-                                   ipv4_cidrs=[sub_dicts[0]['cidr'],
-                                               sub_dicts[1]['cidr']],
-                                   ipv6_cidrs=[sub_dicts[2]['cidr'],
-                                               sub_dicts[3]['cidr'],
-                                               sub_dicts[4]['cidr'],
-                                               sub_dicts[5]['cidr']],
-                                   dns_name=dns_name)
-        return res
-
-    def test_api_extension_validation_with_bad_dns_names(self):
-        num_labels = int(
-            math.floor(dns.FQDN_MAX_LEN / dns.DNS_LABEL_MAX_LEN))
-        filler_len = int(
-            math.floor(dns.FQDN_MAX_LEN % dns.DNS_LABEL_MAX_LEN))
-        dns_names = [555, '\f\n\r', '.', '-vm01', '_vm01', 'vm01-',
-                    '-vm01.test1', 'vm01.-test1', 'vm01._test1',
-                    'vm01.test1-', 'vm01.te$t1', 'vm0#1.test1.',
-                    'vm01.123.', '-' + 'a' * dns.DNS_LABEL_MAX_LEN,
-                    'a' * (dns.DNS_LABEL_MAX_LEN + 1),
-                    ('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') *
-                    num_labels + 'a' * (filler_len + 1)]
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
-                    'ip_version': 4, 'ra_addr_mode': None}
-        self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'],
-                          cidr=sub_dict['cidr'],
-                          ip_version=sub_dict['ip_version'],
-                          ipv6_ra_mode=sub_dict['ra_addr_mode'],
-                          ipv6_address_mode=sub_dict['ra_addr_mode'])
-        for dns_name in dns_names:
-            res = self._create_port(self.fmt, net_id=network['network']['id'],
-                                    dns_name=dns_name)
-            self.assertEqual(res.status_code, 400)
-            is_expected_message = (
-                'cannot be converted to lowercase string' in res.text or
-                'not a valid PQDN or FQDN. Reason:' in res.text)
-            self.assertTrue(is_expected_message)
-
-    def test_api_extension_validation_with_good_dns_names(self):
-        cfg.CONF.set_override('dns_domain', 'example.com')
-        higher_labels_len = len('example.com.')
-        num_labels = int(
-            math.floor((dns.FQDN_MAX_LEN - higher_labels_len) /
-                       dns.DNS_LABEL_MAX_LEN))
-        filler_len = int(
-            math.floor((dns.FQDN_MAX_LEN - higher_labels_len) %
-                       dns.DNS_LABEL_MAX_LEN))
-        dns_names = ['', 'www.1000.com', 'vM01', 'vm01.example.com.',
-                     '8vm01', 'vm-01.example.com.', 'vm01.test',
-                     'vm01.test.example.com.', 'vm01.test-100',
-                     'vm01.test-100.example.com.',
-                     'a' * dns.DNS_LABEL_MAX_LEN,
-                     ('a' * dns.DNS_LABEL_MAX_LEN) + '.example.com.',
-                     ('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') *
-                     num_labels + 'a' * (filler_len - 1)]
-        res = self._create_network(fmt=self.fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(self.fmt, res)
-        sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
-                    'ip_version': 4, 'ra_addr_mode': None}
-        self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'],
-                          cidr=sub_dict['cidr'],
-                          ip_version=sub_dict['ip_version'],
-                          ipv6_ra_mode=sub_dict['ra_addr_mode'],
-                          ipv6_address_mode=sub_dict['ra_addr_mode'])
-        for dns_name in dns_names:
-            res = self._create_port(self.fmt, net_id=network['network']['id'],
-                                    dns_name=dns_name)
-            self.assertEqual(res.status_code, 201)
diff --git a/neutron/tests/unit/extensions/test_external_net.py b/neutron/tests/unit/extensions/test_external_net.py
deleted file mode 100644 (file)
index 131fefd..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_utils import uuidutils
-import testtools
-from webob import exc
-
-from neutron import context
-from neutron.db import models_v2
-from neutron.extensions import external_net as external_net
-from neutron import manager
-from neutron.tests.unit.api.v2 import test_base
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-
-_uuid = uuidutils.generate_uuid
-_get_path = test_base._get_path
-
-
-class ExtNetTestExtensionManager(object):
-
-    def get_resources(self):
-        return []
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-
-    def _create_network(self, fmt, name, admin_state_up, **kwargs):
-        """Override the routine for allowing the router:external attribute."""
-        # attributes containing a colon should be passed with
-        # a double underscore
-        new_args = dict(zip(map(lambda x: x.replace('__', ':'), kwargs),
-                            kwargs.values()))
-        arg_list = new_args.pop('arg_list', ()) + (external_net.EXTERNAL,)
-        return super(ExtNetDBTestCase, self)._create_network(
-            fmt, name, admin_state_up, arg_list=arg_list, **new_args)
-
-    def setUp(self):
-        plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin'
-        ext_mgr = ExtNetTestExtensionManager()
-        super(ExtNetDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
-
-    def _set_net_external(self, net_id):
-        self._update('networks', net_id,
-                     {'network': {external_net.EXTERNAL: True}})
-
-    def test_list_nets_external(self):
-        with self.network() as n1:
-            self._set_net_external(n1['network']['id'])
-            with self.network():
-                body = self._list('networks')
-                self.assertEqual(len(body['networks']), 2)
-
-                body = self._list('networks',
-                                  query_params="%s=True" %
-                                               external_net.EXTERNAL)
-                self.assertEqual(len(body['networks']), 1)
-
-                body = self._list('networks',
-                                  query_params="%s=False" %
-                                               external_net.EXTERNAL)
-                self.assertEqual(len(body['networks']), 1)
-
-    def test_list_nets_external_pagination(self):
-        if self._skip_native_pagination:
-            self.skipTest("Skip test for not implemented pagination feature")
-        with self.network(name='net1') as n1, self.network(name='net3') as n3:
-            self._set_net_external(n1['network']['id'])
-            self._set_net_external(n3['network']['id'])
-            with self.network(name='net2') as n2:
-                self._test_list_with_pagination(
-                    'network', (n1, n3), ('name', 'asc'), 1, 3,
-                    query_params='router:external=True')
-                self._test_list_with_pagination(
-                    'network', (n2, ), ('name', 'asc'), 1, 2,
-                    query_params='router:external=False')
-
-    def test_get_network_succeeds_without_filter(self):
-        plugin = manager.NeutronManager.get_plugin()
-        ctx = context.Context(None, None, is_admin=True)
-        result = plugin.get_networks(ctx, filters=None)
-        self.assertEqual([], result)
-
-    def test_update_network_set_external_non_admin_fails(self):
-        # Assert that a non-admin user cannot update the
-        # router:external attribute
-        with self.network(tenant_id='noadmin') as network:
-            data = {'network': {'router:external': True}}
-            req = self.new_update_request('networks',
-                                          data,
-                                          network['network']['id'])
-            req.environ['neutron.context'] = context.Context('', 'noadmin')
-            res = req.get_response(self.api)
-            self.assertEqual(exc.HTTPForbidden.code, res.status_int)
-
-    def test_network_filter_hook_admin_context(self):
-        plugin = manager.NeutronManager.get_plugin()
-        ctx = context.Context(None, None, is_admin=True)
-        model = models_v2.Network
-        conditions = plugin._network_filter_hook(ctx, model, [])
-        self.assertEqual([], conditions)
-
-    def test_network_filter_hook_nonadmin_context(self):
-        plugin = manager.NeutronManager.get_plugin()
-        ctx = context.Context('edinson', 'cavani')
-        model = models_v2.Network
-        txt = "externalnetworks.network_id IS NOT NULL"
-        conditions = plugin._network_filter_hook(ctx, model, [])
-        self.assertEqual(conditions.__str__(), txt)
-        # Try to concatenate conditions
-        conditions = plugin._network_filter_hook(ctx, model, conditions)
-        self.assertEqual(conditions.__str__(), "%s OR %s" % (txt, txt))
-
-    def test_create_port_external_network_non_admin_fails(self):
-        with self.network(router__external=True) as ext_net:
-            with self.subnet(network=ext_net) as ext_subnet:
-                with testtools.ExpectedException(
-                        exc.HTTPClientError) as ctx_manager:
-                    with self.port(subnet=ext_subnet,
-                                   set_context='True',
-                                   tenant_id='noadmin'):
-                        pass
-                    self.assertEqual(ctx_manager.exception.code, 403)
-
-    def test_create_port_external_network_admin_succeeds(self):
-        with self.network(router__external=True) as ext_net:
-            with self.subnet(network=ext_net) as ext_subnet:
-                with self.port(subnet=ext_subnet) as port:
-                    self.assertEqual(port['port']['network_id'],
-                                     ext_net['network']['id'])
-
-    def test_create_external_network_non_admin_fails(self):
-        with testtools.ExpectedException(exc.HTTPClientError) as ctx_manager:
-            with self.network(router__external=True,
-                              set_context='True',
-                              tenant_id='noadmin'):
-                pass
-            self.assertEqual(ctx_manager.exception.code, 403)
-
-    def test_create_external_network_admin_succeeds(self):
-        with self.network(router__external=True) as ext_net:
-            self.assertTrue(ext_net['network'][external_net.EXTERNAL])
-
-    def test_delete_network_check_disassociated_floatingips(self):
-        with mock.patch.object(manager.NeutronManager,
-                               'get_service_plugins') as srv_plugins:
-            l3_mock = mock.Mock()
-            srv_plugins.return_value = {'L3_ROUTER_NAT': l3_mock}
-            with self.network() as net:
-                req = self.new_delete_request('networks', net['network']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int, exc.HTTPNoContent.code)
-                (l3_mock.delete_disassociated_floatingips
-                 .assert_called_once_with(mock.ANY, net['network']['id']))
diff --git a/neutron/tests/unit/extensions/test_extra_dhcp_opt.py b/neutron/tests/unit/extensions/test_extra_dhcp_opt.py
deleted file mode 100644 (file)
index e1ef765..0000000
+++ /dev/null
@@ -1,335 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import copy
-
-import webob.exc
-
-from neutron.db import db_base_plugin_v2
-from neutron.db import extradhcpopt_db as edo_db
-from neutron.extensions import extra_dhcp_opt as edo_ext
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-
-DB_PLUGIN_KLASS = (
-    'neutron.tests.unit.extensions.test_extra_dhcp_opt.ExtraDhcpOptTestPlugin')
-
-
-class ExtraDhcpOptTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
-                             edo_db.ExtraDhcpOptMixin):
-    """Test plugin that implements necessary calls on create/delete port for
-    associating ports with extra dhcp options.
-    """
-
-    supported_extension_aliases = ["extra_dhcp_opt"]
-
-    def create_port(self, context, port):
-        with context.session.begin(subtransactions=True):
-            edos = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
-            new_port = super(ExtraDhcpOptTestPlugin, self).create_port(
-                context, port)
-            self._process_port_create_extra_dhcp_opts(context, new_port, edos)
-        return new_port
-
-    def update_port(self, context, id, port):
-        with context.session.begin(subtransactions=True):
-            rtn_port = super(ExtraDhcpOptTestPlugin, self).update_port(
-                context, id, port)
-            self._update_extra_dhcp_opts_on_port(context, id, port, rtn_port)
-        return rtn_port
-
-
-class ExtraDhcpOptDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-
-    def setUp(self, plugin=DB_PLUGIN_KLASS):
-        super(ExtraDhcpOptDBTestCase, self).setUp(plugin=plugin)
-
-
-class TestExtraDhcpOpt(ExtraDhcpOptDBTestCase):
-    def _check_opts(self, expected, returned):
-        self.assertEqual(len(expected), len(returned))
-        for opt in returned:
-            name = opt['opt_name']
-            for exp in expected:
-                if (name == exp['opt_name']
-                    and opt['ip_version'] == exp.get(
-                            'ip_version', 4)):
-                    val = exp['opt_value']
-                    break
-            self.assertEqual(val, opt['opt_value'])
-
-    def test_create_port_with_extradhcpopts(self):
-        opt_list = [{'opt_name': 'bootfile-name',
-                     'opt_value': 'pxelinux.0'},
-                    {'opt_name': 'server-ip-address',
-                     'opt_value': '123.123.123.456'},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123'}]
-
-        params = {edo_ext.EXTRADHCPOPTS: opt_list,
-                  'arg_list': (edo_ext.EXTRADHCPOPTS,)}
-
-        with self.port(**params) as port:
-            self._check_opts(opt_list,
-                             port['port'][edo_ext.EXTRADHCPOPTS])
-
-    def test_create_port_with_none_extradhcpopts(self):
-        opt_list = [{'opt_name': 'bootfile-name',
-                     'opt_value': None},
-                    {'opt_name': 'server-ip-address',
-                     'opt_value': '123.123.123.456'},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123'}]
-        expected = [{'opt_name': 'server-ip-address',
-                     'opt_value': '123.123.123.456'},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123'}]
-
-        params = {edo_ext.EXTRADHCPOPTS: opt_list,
-                  'arg_list': (edo_ext.EXTRADHCPOPTS,)}
-
-        with self.port(**params) as port:
-            self._check_opts(expected,
-                             port['port'][edo_ext.EXTRADHCPOPTS])
-
-    def test_create_port_with_empty_router_extradhcpopts(self):
-        opt_list = [{'opt_name': 'router',
-                     'opt_value': ''},
-                    {'opt_name': 'server-ip-address',
-                     'opt_value': '123.123.123.456'},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123'}]
-
-        params = {edo_ext.EXTRADHCPOPTS: opt_list,
-                  'arg_list': (edo_ext.EXTRADHCPOPTS,)}
-
-        with self.port(**params) as port:
-            self._check_opts(opt_list,
-                             port['port'][edo_ext.EXTRADHCPOPTS])
-
-    def test_create_port_with_extradhcpopts_ipv4_opt_version(self):
-        opt_list = [{'opt_name': 'bootfile-name',
-                     'opt_value': 'pxelinux.0',
-                     'ip_version': 4},
-                    {'opt_name': 'server-ip-address',
-                     'opt_value': '123.123.123.456',
-                     'ip_version': 4},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123',
-                     'ip_version': 4}]
-
-        params = {edo_ext.EXTRADHCPOPTS: opt_list,
-                  'arg_list': (edo_ext.EXTRADHCPOPTS,)}
-
-        with self.port(**params) as port:
-            self._check_opts(opt_list,
-                             port['port'][edo_ext.EXTRADHCPOPTS])
-
-    def test_create_port_with_extradhcpopts_ipv6_opt_version(self):
-        opt_list = [{'opt_name': 'bootfile-name',
-                     'opt_value': 'pxelinux.0',
-                     'ip_version': 6},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '2001:192:168::1',
-                     'ip_version': 6}]
-
-        params = {edo_ext.EXTRADHCPOPTS: opt_list,
-                  'arg_list': (edo_ext.EXTRADHCPOPTS,)}
-
-        with self.port(**params) as port:
-            self._check_opts(opt_list,
-                             port['port'][edo_ext.EXTRADHCPOPTS])
-
-    def _test_update_port_with_extradhcpopts(self, opt_list, upd_opts,
-                                             expected_opts):
-        params = {edo_ext.EXTRADHCPOPTS: opt_list,
-                  'arg_list': (edo_ext.EXTRADHCPOPTS,)}
-
-        with self.port(**params) as port:
-            update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}}
-
-            req = self.new_update_request('ports', update_port,
-                                          port['port']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
-            port = self.deserialize('json', res)
-            self._check_opts(expected_opts,
-                             port['port'][edo_ext.EXTRADHCPOPTS])
-
-    def test_update_port_with_extradhcpopts_with_same(self):
-        opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123'},
-                    {'opt_name': 'server-ip-address',
-                     'opt_value': '123.123.123.456'}]
-        upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}]
-        expected_opts = opt_list[:]
-        for i in expected_opts:
-            if i['opt_name'] == upd_opts[0]['opt_name']:
-                i['opt_value'] = upd_opts[0]['opt_value']
-                break
-        self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
-                                                  expected_opts)
-
-    def test_update_port_with_additional_extradhcpopt(self):
-        opt_list = [{'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123'},
-                    {'opt_name': 'server-ip-address',
-                     'opt_value': '123.123.123.456'}]
-        upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}]
-        expected_opts = copy.deepcopy(opt_list)
-        expected_opts.append(upd_opts[0])
-        self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
-                                                  expected_opts)
-
-    def test_update_port_with_extradhcpopts(self):
-        opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123'},
-                    {'opt_name': 'server-ip-address',
-                     'opt_value': '123.123.123.456'}]
-        upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}]
-        expected_opts = copy.deepcopy(opt_list)
-        for i in expected_opts:
-            if i['opt_name'] == upd_opts[0]['opt_name']:
-                i['opt_value'] = upd_opts[0]['opt_value']
-                break
-        self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
-                                                  expected_opts)
-
-    def test_update_port_with_extradhcpopt_delete(self):
-        opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123'},
-                    {'opt_name': 'server-ip-address',
-                     'opt_value': '123.123.123.456'}]
-        upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}]
-        expected_opts = []
-
-        expected_opts = [opt for opt in opt_list
-                         if opt['opt_name'] != 'bootfile-name']
-        self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
-                                                  expected_opts)
-
-    def test_update_port_without_extradhcpopt_delete(self):
-        opt_list = []
-        upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}]
-        expected_opts = []
-        self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
-                                                  expected_opts)
-
-    def test_update_port_adding_extradhcpopts(self):
-        opt_list = []
-        upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123'},
-                    {'opt_name': 'server-ip-address',
-                     'opt_value': '123.123.123.456'}]
-        expected_opts = copy.deepcopy(upd_opts)
-        self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
-                                                  expected_opts)
-
-    def test_update_port_with_blank_string_extradhcpopt(self):
-        opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123'},
-                    {'opt_name': 'server-ip-address',
-                     'opt_value': '123.123.123.456'}]
-        upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': '    '}]
-
-        params = {edo_ext.EXTRADHCPOPTS: opt_list,
-                  'arg_list': (edo_ext.EXTRADHCPOPTS,)}
-
-        with self.port(**params) as port:
-            update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}}
-
-            req = self.new_update_request('ports', update_port,
-                                          port['port']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
-
-    def test_update_port_with_blank_name_extradhcpopt(self):
-        opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123'},
-                    {'opt_name': 'server-ip-address',
-                     'opt_value': '123.123.123.456'}]
-        upd_opts = [{'opt_name': '     ', 'opt_value': 'pxelinux.0'}]
-
-        params = {edo_ext.EXTRADHCPOPTS: opt_list,
-                  'arg_list': (edo_ext.EXTRADHCPOPTS,)}
-
-        with self.port(**params) as port:
-            update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}}
-
-            req = self.new_update_request('ports', update_port,
-                                          port['port']['id'])
-            res = req.get_response(self.api)
-            self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
-
-    def test_update_port_with_blank_router_extradhcpopt(self):
-        opt_list = [{'opt_name': 'bootfile-name',
-                     'opt_value': 'pxelinux.0',
-                     'ip_version': 4},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123',
-                     'ip_version': 4},
-                    {'opt_name': 'router',
-                     'opt_value': '123.123.123.1',
-                     'ip_version': 4}]
-        upd_opts = [{'opt_name': 'router',
-                     'opt_value': '',
-                     'ip_version': 4}]
-        expected_opts = copy.deepcopy(opt_list)
-        for i in expected_opts:
-            if i['opt_name'] == upd_opts[0]['opt_name']:
-                i['opt_value'] = upd_opts[0]['opt_value']
-                break
-
-        self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
-                                                  expected_opts)
-
-    def test_update_port_with_extradhcpopts_ipv6_change_value(self):
-        opt_list = [{'opt_name': 'bootfile-name',
-                     'opt_value': 'pxelinux.0',
-                     'ip_version': 6},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '2001:192:168::1',
-                     'ip_version': 6}]
-        upd_opts = [{'opt_name': 'tftp-server',
-                     'opt_value': '2001:192:168::2',
-                     'ip_version': 6}]
-        expected_opts = copy.deepcopy(opt_list)
-        for i in expected_opts:
-            if i['opt_name'] == upd_opts[0]['opt_name']:
-                i['opt_value'] = upd_opts[0]['opt_value']
-                break
-        self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
-                                                  expected_opts)
-
-    def test_update_port_with_extradhcpopts_add_another_ver_opt(self):
-        opt_list = [{'opt_name': 'bootfile-name',
-                     'opt_value': 'pxelinux.0',
-                     'ip_version': 6},
-                    {'opt_name': 'tftp-server',
-                     'opt_value': '2001:192:168::1',
-                     'ip_version': 6}]
-        upd_opts = [{'opt_name': 'tftp-server',
-                     'opt_value': '123.123.123.123',
-                     'ip_version': 4}]
-        expected_opts = copy.deepcopy(opt_list)
-        expected_opts.extend(upd_opts)
-        self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
-                                                  expected_opts)
diff --git a/neutron/tests/unit/extensions/test_extraroute.py b/neutron/tests/unit/extensions/test_extraroute.py
deleted file mode 100644 (file)
index 371056d..0000000
+++ /dev/null
@@ -1,487 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-from webob import exc
-
-from neutron.common import constants
-from neutron.common import utils
-from neutron.db import extraroute_db
-from neutron.extensions import extraroute
-from neutron.extensions import l3
-from neutron.tests.unit.api.v2 import test_base
-from neutron.tests.unit.extensions import test_l3 as test_l3
-
-
-_uuid = uuidutils.generate_uuid
-_get_path = test_base._get_path
-
-
-class ExtraRouteTestExtensionManager(object):
-
-    def get_resources(self):
-        l3.RESOURCE_ATTRIBUTE_MAP['routers'].update(
-            extraroute.EXTENDED_ATTRIBUTES_2_0['routers'])
-        return l3.L3.get_resources()
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-# This plugin class is for tests with plugin that integrates L3.
-class TestExtraRouteIntPlugin(test_l3.TestL3NatIntPlugin,
-                              extraroute_db.ExtraRoute_db_mixin):
-    supported_extension_aliases = ["external-net", "router", "extraroute"]
-
-
-# A fake l3 service plugin class with extra route capability for
-# plugins that delegate away L3 routing functionality
-class TestExtraRouteL3NatServicePlugin(test_l3.TestL3NatServicePlugin,
-                                       extraroute_db.ExtraRoute_db_mixin):
-    supported_extension_aliases = ["router", "extraroute"]
-
-
-class ExtraRouteDBTestCaseBase(object):
-    def _routes_update_prepare(self, router_id, subnet_id,
-                               port_id, routes, skip_add=False):
-        if not skip_add:
-            self._router_interface_action('add', router_id, subnet_id, port_id)
-        self._update('routers', router_id, {'router': {'routes': routes}})
-        return self._show('routers', router_id)
-
-    def _routes_update_cleanup(self, port_id, subnet_id, router_id, routes):
-        self._update('routers', router_id, {'router': {'routes': routes}})
-        self._router_interface_action('remove', router_id, subnet_id, port_id)
-
-    def test_route_update_with_one_route(self):
-        routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}]
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                with self.port(subnet=s) as p:
-                    body = self._routes_update_prepare(r['router']['id'],
-                                                       None, p['port']['id'],
-                                                       routes)
-                    self.assertEqual(body['router']['routes'], routes)
-                    self._routes_update_cleanup(p['port']['id'],
-                                                None, r['router']['id'], [])
-
-    def test_route_clear_routes_with_None(self):
-        routes = [{'destination': '135.207.0.0/16',
-                   'nexthop': '10.0.1.3'},
-                  {'destination': '12.0.0.0/8',
-                   'nexthop': '10.0.1.4'},
-                  {'destination': '141.212.0.0/16',
-                   'nexthop': '10.0.1.5'}]
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                with self.port(subnet=s) as p:
-                    self._routes_update_prepare(r['router']['id'],
-                                                None, p['port']['id'], routes)
-                    body = self._update('routers', r['router']['id'],
-                                        {'router': {'routes': None}})
-                    self.assertEqual([], body['router']['routes'])
-                    self._routes_update_cleanup(p['port']['id'],
-                                                None, r['router']['id'], [])
-
-    def test_router_interface_in_use_by_route(self):
-        routes = [{'destination': '135.207.0.0/16',
-                   'nexthop': '10.0.1.3'}]
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                with self.port(subnet=s) as p:
-                    body = self._routes_update_prepare(r['router']['id'],
-                                                       None, p['port']['id'],
-                                                       routes)
-                    self.assertEqual(body['router']['routes'], routes)
-                    self._router_interface_action(
-                        'remove',
-                        r['router']['id'],
-                        None,
-                        p['port']['id'],
-                        expected_code=exc.HTTPConflict.code)
-
-                    self._routes_update_cleanup(p['port']['id'],
-                                                None, r['router']['id'], [])
-
-    def test_route_update_with_multi_routes(self):
-        routes = [{'destination': '135.207.0.0/16',
-                   'nexthop': '10.0.1.3'},
-                  {'destination': '12.0.0.0/8',
-                   'nexthop': '10.0.1.4'},
-                  {'destination': '141.212.0.0/16',
-                   'nexthop': '10.0.1.5'}]
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                with self.port(subnet=s) as p:
-                    body = self._routes_update_prepare(r['router']['id'],
-                                                       None, p['port']['id'],
-                                                       routes)
-                    self.assertEqual(
-                        sorted(body['router']['routes'],
-                               key=utils.safe_sort_key),
-                        sorted(routes, key=utils.safe_sort_key))
-                    self._routes_update_cleanup(p['port']['id'],
-                                                None, r['router']['id'], [])
-
-    def test_routes_update_for_multiple_routers(self):
-        routes1 = [{'destination': '135.207.0.0/16',
-                   'nexthop': '10.0.0.3'}]
-        routes2 = [{'destination': '12.0.0.0/8',
-                   'nexthop': '10.0.0.4'}]
-        with self.router() as r1,\
-                self.router() as r2,\
-                self.subnet(cidr='10.0.0.0/24') as s:
-            with self.port(subnet=s) as p1, self.port(subnet=s) as p2:
-                body = self._routes_update_prepare(r1['router']['id'],
-                                                   None, p1['port']['id'],
-                                                   routes1)
-                self.assertEqual(body['router']['routes'], routes1)
-
-                body = self._routes_update_prepare(r2['router']['id'],
-                                                   None, p2['port']['id'],
-                                                   routes2)
-                self.assertEqual(body['router']['routes'], routes2)
-
-                self._routes_update_cleanup(p1['port']['id'],
-                                            None, r1['router']['id'], [])
-                self._routes_update_cleanup(p2['port']['id'],
-                                            None, r2['router']['id'], [])
-
-    def test_router_update_delete_routes(self):
-        routes_orig = [{'destination': '135.207.0.0/16',
-                        'nexthop': '10.0.1.3'},
-                       {'destination': '12.0.0.0/8',
-                        'nexthop': '10.0.1.4'},
-                       {'destination': '141.212.0.0/16',
-                        'nexthop': '10.0.1.5'}]
-        routes_left = [{'destination': '135.207.0.0/16',
-                        'nexthop': '10.0.1.3'},
-                       {'destination': '141.212.0.0/16',
-                        'nexthop': '10.0.1.5'}]
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                with self.port(subnet=s) as p:
-                    body = self._routes_update_prepare(r['router']['id'],
-                                                       None, p['port']['id'],
-                                                       routes_orig)
-                    self.assertEqual(
-                        sorted(body['router']['routes'],
-                               key=utils.safe_sort_key),
-                        sorted(routes_orig, key=utils.safe_sort_key))
-                    body = self._routes_update_prepare(r['router']['id'],
-                                                       None, p['port']['id'],
-                                                       routes_left,
-                                                       skip_add=True)
-                    self.assertEqual(
-                        sorted(body['router']['routes'],
-                               key=utils.safe_sort_key),
-                        sorted(routes_left, key=utils.safe_sort_key))
-                    self._routes_update_cleanup(p['port']['id'],
-                                                None, r['router']['id'], [])
-
-    def _test_malformed_route(self, routes):
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                with self.port(subnet=s) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-                    self._update('routers', r['router']['id'],
-                                 {'router': {'routes': routes}},
-                                 expected_code=exc.HTTPBadRequest.code)
-                    # clean-up
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-    def test_no_destination_route(self):
-        self._test_malformed_route([{'nexthop': '10.0.1.6'}])
-
-    def test_no_nexthop_route(self):
-        self._test_malformed_route({'destination': '135.207.0.0/16'})
-
-    def test_none_destination(self):
-        self._test_malformed_route([{'destination': None,
-                                     'nexthop': '10.0.1.3'}])
-
-    def test_none_nexthop(self):
-        self._test_malformed_route([{'destination': '135.207.0.0/16',
-                                     'nexthop': None}])
-
-    def test_nexthop_is_port_ip(self):
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                with self.port(subnet=s) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-                    port_ip = p['port']['fixed_ips'][0]['ip_address']
-                    routes = [{'destination': '135.207.0.0/16',
-                               'nexthop': port_ip}]
-
-                    self._update('routers', r['router']['id'],
-                                 {'router': {'routes':
-                                             routes}},
-                                 expected_code=exc.HTTPBadRequest.code)
-                    # clean-up
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-    def test_router_update_with_too_many_routes(self):
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                with self.port(subnet=s) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-                    routes = [{'destination': '135.207.0.0/16',
-                               'nexthop': '10.0.1.3'},
-                              {'destination': '12.0.0.0/8',
-                               'nexthop': '10.0.1.4'},
-                              {'destination': '141.212.0.0/16',
-                               'nexthop': '10.0.1.5'},
-                              {'destination': '192.168.0.0/16',
-                               'nexthop': '10.0.1.6'}]
-
-                    self._update('routers', r['router']['id'],
-                                 {'router': {'routes':
-                                             routes}},
-                                 expected_code=exc.HTTPBadRequest.code)
-
-                    # clean-up
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-    def test_router_update_with_dup_address(self):
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                with self.port(subnet=s) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-                    routes = [{'destination': '135.207.0.0/16',
-                               'nexthop': '10.0.1.3'},
-                              {'destination': '135.207.0.0/16',
-                               'nexthop': '10.0.1.3'}]
-
-                    self._update('routers', r['router']['id'],
-                                 {'router': {'routes':
-                                             routes}},
-                                 expected_code=exc.HTTPBadRequest.code)
-
-                    # clean-up
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-    def test_router_update_with_invalid_ip_address(self):
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                with self.port(subnet=s) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-                    routes = [{'destination': '512.207.0.0/16',
-                               'nexthop': '10.0.1.3'}]
-
-                    self._update('routers', r['router']['id'],
-                                 {'router': {'routes':
-                                             routes}},
-                                 expected_code=exc.HTTPBadRequest.code)
-
-                    routes = [{'destination': '127.207.0.0/48',
-                               'nexthop': '10.0.1.3'}]
-
-                    self._update('routers', r['router']['id'],
-                                 {'router': {'routes':
-                                             routes}},
-                                 expected_code=exc.HTTPBadRequest.code)
-
-                    routes = [{'destination': 'invalid_ip_address',
-                               'nexthop': '10.0.1.3'}]
-
-                    self._update('routers', r['router']['id'],
-                                 {'router': {'routes':
-                                             routes}},
-                                 expected_code=exc.HTTPBadRequest.code)
-
-                    # clean-up
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-    def test_router_update_with_invalid_nexthop_ip(self):
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                with self.port(subnet=s) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-                    routes = [{'destination': '127.207.0.0/16',
-                               'nexthop': ' 300.10.10.4'}]
-
-                    self._update('routers', r['router']['id'],
-                                 {'router': {'routes':
-                                             routes}},
-                                 expected_code=exc.HTTPBadRequest.code)
-
-                    # clean-up
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-    def test_router_update_with_nexthop_is_outside_port_subnet(self):
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                with self.port(subnet=s) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-                    routes = [{'destination': '127.207.0.0/16',
-                               'nexthop': ' 20.10.10.4'}]
-
-                    self._update('routers', r['router']['id'],
-                                 {'router': {'routes':
-                                             routes}},
-                                 expected_code=exc.HTTPBadRequest.code)
-
-                    # clean-up
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-    def test_router_update_on_external_port(self):
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s:
-                self._set_net_external(s['subnet']['network_id'])
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'])
-                body = self._show('routers', r['router']['id'])
-                net_id = body['router']['external_gateway_info']['network_id']
-                self.assertEqual(net_id, s['subnet']['network_id'])
-                port_res = self._list_ports(
-                    'json',
-                    200,
-                    s['subnet']['network_id'],
-                    tenant_id=r['router']['tenant_id'],
-                    device_owner=constants.DEVICE_OWNER_ROUTER_GW)
-                port_list = self.deserialize('json', port_res)
-                self.assertEqual(len(port_list['ports']), 1)
-
-                routes = [{'destination': '135.207.0.0/16',
-                           'nexthop': '10.0.1.3'}]
-
-                body = self._update('routers', r['router']['id'],
-                                    {'router': {'routes':
-                                                routes}})
-
-                body = self._show('routers', r['router']['id'])
-                self.assertEqual(body['router']['routes'],
-                                 routes)
-
-                self._remove_external_gateway_from_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'])
-                body = self._show('routers', r['router']['id'])
-                gw_info = body['router']['external_gateway_info']
-                self.assertIsNone(gw_info)
-
-    def test_router_list_with_sort(self):
-        with self.router(name='router1') as router1,\
-                self.router(name='router2') as router2,\
-                self.router(name='router3') as router3:
-            self._test_list_with_sort('router', (router3, router2, router1),
-                                      [('name', 'desc')])
-
-    def test_router_list_with_pagination(self):
-        with self.router(name='router1') as router1,\
-                self.router(name='router2') as router2,\
-                self.router(name='router3') as router3:
-            self._test_list_with_pagination('router',
-                                            (router1, router2, router3),
-                                            ('name', 'asc'), 2, 2)
-
-    def test_router_list_with_pagination_reverse(self):
-        with self.router(name='router1') as router1,\
-                self.router(name='router2') as router2,\
-                self.router(name='router3') as router3:
-            self._test_list_with_pagination_reverse('router',
-                                                    (router1, router2,
-                                                     router3),
-                                                    ('name', 'asc'), 2, 2)
-
-
-class ExtraRouteDBIntTestCase(test_l3.L3NatDBIntTestCase,
-                              ExtraRouteDBTestCaseBase):
-
-    def setUp(self, plugin=None, ext_mgr=None):
-        if not plugin:
-            plugin = ('neutron.tests.unit.extensions.test_extraroute.'
-                      'TestExtraRouteIntPlugin')
-        # for these tests we need to enable overlapping ips
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        cfg.CONF.set_default('max_routes', 3)
-        ext_mgr = ExtraRouteTestExtensionManager()
-        super(test_l3.L3BaseForIntTests, self).setUp(plugin=plugin,
-                                                     ext_mgr=ext_mgr)
-        self.setup_notification_driver()
-
-
-class ExtraRouteDBSepTestCase(test_l3.L3NatDBSepTestCase,
-                              ExtraRouteDBTestCaseBase):
-    def setUp(self):
-        # the plugin without L3 support
-        plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin'
-        # the L3 service plugin
-        l3_plugin = ('neutron.tests.unit.extensions.test_extraroute.'
-                     'TestExtraRouteL3NatServicePlugin')
-        service_plugins = {'l3_plugin_name': l3_plugin}
-
-        # for these tests we need to enable overlapping ips
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        cfg.CONF.set_default('max_routes', 3)
-        ext_mgr = ExtraRouteTestExtensionManager()
-        super(test_l3.L3BaseForSepTests, self).setUp(
-            plugin=plugin, ext_mgr=ext_mgr,
-            service_plugins=service_plugins)
-
-        self.setup_notification_driver()
diff --git a/neutron/tests/unit/extensions/test_flavors.py b/neutron/tests/unit/extensions/test_flavors.py
deleted file mode 100644 (file)
index bcc1eec..0000000
+++ /dev/null
@@ -1,719 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-
-import copy
-import fixtures
-import mock
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-from webob import exc
-
-from neutron.api.v2 import attributes as attr
-from neutron import context
-from neutron.db import api as dbapi
-from neutron.db import flavors_db
-from neutron.db import servicetype_db
-from neutron.extensions import flavors
-from neutron.plugins.common import constants
-from neutron.services.flavors import flavors_plugin
-from neutron.services import provider_configuration as provconf
-from neutron.tests import base
-from neutron.tests.unit.api.v2 import test_base
-from neutron.tests.unit.db import test_db_base_plugin_v2
-from neutron.tests.unit.extensions import base as extension
-
-_uuid = uuidutils.generate_uuid
-_get_path = test_base._get_path
-
-_driver = ('neutron.tests.unit.extensions.test_flavors.'
-           'DummyServiceDriver')
-_provider = 'dummy'
-_long_name = 'x' * (attr.NAME_MAX_LEN + 1)
-_long_description = 'x' * (attr.LONG_DESCRIPTION_MAX_LEN + 1)
-
-
-class FlavorExtensionTestCase(extension.ExtensionTestCase):
-
-    def setUp(self):
-        super(FlavorExtensionTestCase, self).setUp()
-        self._setUpExtension(
-            'neutron.services.flavors.flavors_plugin.FlavorsPlugin',
-            constants.FLAVORS, flavors.RESOURCE_ATTRIBUTE_MAP,
-            flavors.Flavors, '', supported_extension_aliases='flavors')
-
-    def test_create_flavor(self):
-        tenant_id = uuidutils.generate_uuid()
-        # Use service_type FLAVORS since plugin must be loaded to validate
-        data = {'flavor': {'name': 'GOLD',
-                           'service_type': constants.FLAVORS,
-                           'description': 'the best flavor',
-                           'tenant_id': tenant_id,
-                           'enabled': True}}
-
-        expected = copy.deepcopy(data)
-        expected['flavor']['service_profiles'] = []
-
-        instance = self.plugin.return_value
-        instance.create_flavor.return_value = expected['flavor']
-        res = self.api.post(_get_path('flavors', fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/%s' % self.fmt)
-
-        instance.create_flavor.assert_called_with(mock.ANY,
-                                                  flavor=expected)
-        res = self.deserialize(res)
-        self.assertIn('flavor', res)
-        self.assertEqual(expected, res)
-
-    def test_create_flavor_invalid_service_type(self):
-        tenant_id = uuidutils.generate_uuid()
-        data = {'flavor': {'name': 'GOLD',
-                           'service_type': 'BROKEN',
-                           'description': 'the best flavor',
-                           'tenant_id': tenant_id,
-                           'enabled': True}}
-        self.api.post(_get_path('flavors', fmt=self.fmt),
-                      self.serialize(data),
-                      content_type='application/%s' % self.fmt,
-                      status=exc.HTTPBadRequest.code)
-
-    def test_create_flavor_too_long_name(self):
-        tenant_id = uuidutils.generate_uuid()
-        data = {'flavor': {'name': _long_name,
-                           'service_type': constants.FLAVORS,
-                           'description': 'the best flavor',
-                           'tenant_id': tenant_id,
-                           'enabled': True}}
-        self.api.post(_get_path('flavors', fmt=self.fmt),
-                      self.serialize(data),
-                      content_type='application/%s' % self.fmt,
-                      status=exc.HTTPBadRequest.code)
-
-    def test_create_flavor_too_long_description(self):
-        tenant_id = uuidutils.generate_uuid()
-        data = {'flavor': {'name': _long_name,
-                           'service_type': constants.FLAVORS,
-                           'description': _long_description,
-                           'tenant_id': tenant_id,
-                           'enabled': True}}
-        self.api.post(_get_path('flavors', fmt=self.fmt),
-                      self.serialize(data),
-                      content_type='application/%s' % self.fmt,
-                      status=exc.HTTPBadRequest.code)
-
-    def test_create_flavor_invalid_enabled(self):
-        tenant_id = uuidutils.generate_uuid()
-        data = {'flavor': {'name': _long_name,
-                           'service_type': constants.FLAVORS,
-                           'description': 'the best flavor',
-                           'tenant_id': tenant_id,
-                           'enabled': 'BROKEN'}}
-        self.api.post(_get_path('flavors', fmt=self.fmt),
-                      self.serialize(data),
-                      content_type='application/%s' % self.fmt,
-                      status=exc.HTTPBadRequest.code)
-
-    def test_update_flavor(self):
-        flavor_id = 'fake_id'
-        data = {'flavor': {'name': 'GOLD',
-                           'description': 'the best flavor',
-                           'enabled': True}}
-        expected = copy.copy(data)
-        expected['flavor']['service_profiles'] = []
-
-        instance = self.plugin.return_value
-        instance.update_flavor.return_value = expected['flavor']
-        res = self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
-                           self.serialize(data),
-                           content_type='application/%s' % self.fmt)
-
-        instance.update_flavor.assert_called_with(mock.ANY,
-                                                  flavor_id,
-                                                  flavor=expected)
-        res = self.deserialize(res)
-        self.assertIn('flavor', res)
-        self.assertEqual(expected, res)
-
-    def test_update_flavor_too_long_name(self):
-        flavor_id = 'fake_id'
-        data = {'flavor': {'name': _long_name,
-                           'description': 'the best flavor',
-                           'enabled': True}}
-        self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
-                     self.serialize(data),
-                     content_type='application/%s' % self.fmt,
-                     status=exc.HTTPBadRequest.code)
-
-    def test_update_flavor_too_long_description(self):
-        flavor_id = 'fake_id'
-        data = {'flavor': {'name': 'GOLD',
-                           'description': _long_description,
-                           'enabled': True}}
-        self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
-                     self.serialize(data),
-                     content_type='application/%s' % self.fmt,
-                     status=exc.HTTPBadRequest.code)
-
-    def test_update_flavor_invalid_enabled(self):
-        flavor_id = 'fake_id'
-        data = {'flavor': {'name': 'GOLD',
-                           'description': _long_description,
-                           'enabled': 'BROKEN'}}
-        self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
-                     self.serialize(data),
-                     content_type='application/%s' % self.fmt,
-                     status=exc.HTTPBadRequest.code)
-
-    def test_delete_flavor(self):
-        flavor_id = 'fake_id'
-        instance = self.plugin.return_value
-        self.api.delete(_get_path('flavors', id=flavor_id, fmt=self.fmt),
-                        content_type='application/%s' % self.fmt)
-
-        instance.delete_flavor.assert_called_with(mock.ANY,
-                                                  flavor_id)
-
-    def test_show_flavor(self):
-        flavor_id = 'fake_id'
-        expected = {'flavor': {'id': flavor_id,
-                               'name': 'GOLD',
-                               'description': 'the best flavor',
-                               'enabled': True,
-                               'service_profiles': ['profile-1']}}
-        instance = self.plugin.return_value
-        instance.get_flavor.return_value = expected['flavor']
-        res = self.api.get(_get_path('flavors', id=flavor_id, fmt=self.fmt))
-        instance.get_flavor.assert_called_with(mock.ANY,
-                                               flavor_id,
-                                               fields=mock.ANY)
-        res = self.deserialize(res)
-        self.assertEqual(expected, res)
-
-    def test_get_flavors(self):
-        data = {'flavors': [{'id': 'id1',
-                             'name': 'GOLD',
-                             'description': 'the best flavor',
-                             'enabled': True,
-                             'service_profiles': ['profile-1']},
-                            {'id': 'id2',
-                             'name': 'GOLD',
-                             'description': 'the best flavor',
-                             'enabled': True,
-                             'service_profiles': ['profile-2', 'profile-1']}]}
-        instance = self.plugin.return_value
-        instance.get_flavors.return_value = data['flavors']
-        res = self.api.get(_get_path('flavors', fmt=self.fmt))
-        instance.get_flavors.assert_called_with(mock.ANY,
-                                                fields=mock.ANY,
-                                                filters=mock.ANY)
-        res = self.deserialize(res)
-        self.assertEqual(data, res)
-
-    def test_create_service_profile(self):
-        tenant_id = uuidutils.generate_uuid()
-        expected = {'service_profile': {'description': 'the best sp',
-                                        'driver': '',
-                                        'tenant_id': tenant_id,
-                                        'enabled': True,
-                                        'metainfo': '{"data": "value"}'}}
-
-        instance = self.plugin.return_value
-        instance.create_service_profile.return_value = (
-            expected['service_profile'])
-        res = self.api.post(_get_path('service_profiles', fmt=self.fmt),
-                            self.serialize(expected),
-                            content_type='application/%s' % self.fmt)
-        instance.create_service_profile.assert_called_with(
-            mock.ANY,
-            service_profile=expected)
-        res = self.deserialize(res)
-        self.assertIn('service_profile', res)
-        self.assertEqual(expected, res)
-
-    def test_create_service_profile_too_long_description(self):
-        tenant_id = uuidutils.generate_uuid()
-        expected = {'service_profile': {'description': _long_description,
-                                        'driver': '',
-                                        'tenant_id': tenant_id,
-                                        'enabled': True,
-                                        'metainfo': '{"data": "value"}'}}
-        self.api.post(_get_path('service_profiles', fmt=self.fmt),
-                      self.serialize(expected),
-                      content_type='application/%s' % self.fmt,
-                      status=exc.HTTPBadRequest.code)
-
-    def test_create_service_profile_too_long_driver(self):
-        tenant_id = uuidutils.generate_uuid()
-        expected = {'service_profile': {'description': 'the best sp',
-                                        'driver': _long_description,
-                                        'tenant_id': tenant_id,
-                                        'enabled': True,
-                                        'metainfo': '{"data": "value"}'}}
-        self.api.post(_get_path('service_profiles', fmt=self.fmt),
-                      self.serialize(expected),
-                      content_type='application/%s' % self.fmt,
-                      status=exc.HTTPBadRequest.code)
-
-    def test_create_service_profile_invalid_enabled(self):
-        tenant_id = uuidutils.generate_uuid()
-        expected = {'service_profile': {'description': 'the best sp',
-                                        'driver': '',
-                                        'tenant_id': tenant_id,
-                                        'enabled': 'BROKEN',
-                                        'metainfo': '{"data": "value"}'}}
-        self.api.post(_get_path('service_profiles', fmt=self.fmt),
-                      self.serialize(expected),
-                      content_type='application/%s' % self.fmt,
-                      status=exc.HTTPBadRequest.code)
-
-    def test_update_service_profile(self):
-        sp_id = "fake_id"
-        expected = {'service_profile': {'description': 'the best sp',
-                                        'enabled': False,
-                                        'metainfo': '{"data1": "value3"}'}}
-
-        instance = self.plugin.return_value
-        instance.update_service_profile.return_value = (
-            expected['service_profile'])
-        res = self.api.put(_get_path('service_profiles',
-                                     id=sp_id, fmt=self.fmt),
-                           self.serialize(expected),
-                           content_type='application/%s' % self.fmt)
-
-        instance.update_service_profile.assert_called_with(
-            mock.ANY,
-            sp_id,
-            service_profile=expected)
-        res = self.deserialize(res)
-        self.assertIn('service_profile', res)
-        self.assertEqual(expected, res)
-
-    def test_update_service_profile_too_long_description(self):
-        sp_id = "fake_id"
-        expected = {'service_profile': {'description': 'the best sp',
-                                        'enabled': 'BROKEN',
-                                        'metainfo': '{"data1": "value3"}'}}
-        self.api.put(_get_path('service_profiles',
-                               id=sp_id, fmt=self.fmt),
-                     self.serialize(expected),
-                     content_type='application/%s' % self.fmt,
-                     status=exc.HTTPBadRequest.code)
-
-    def test_update_service_profile_invalid_enabled(self):
-        sp_id = "fake_id"
-        expected = {'service_profile': {'description': 'the best sp',
-                                        'enabled': 'BROKEN',
-                                        'metainfo': '{"data1": "value3"}'}}
-        self.api.put(_get_path('service_profiles',
-                               id=sp_id, fmt=self.fmt),
-                     self.serialize(expected),
-                     content_type='application/%s' % self.fmt,
-                     status=exc.HTTPBadRequest.code)
-
-    def test_delete_service_profile(self):
-        sp_id = 'fake_id'
-        instance = self.plugin.return_value
-        self.api.delete(_get_path('service_profiles', id=sp_id, fmt=self.fmt),
-                        content_type='application/%s' % self.fmt)
-        instance.delete_service_profile.assert_called_with(mock.ANY,
-                                                           sp_id)
-
-    def test_show_service_profile(self):
-        sp_id = 'fake_id'
-        expected = {'service_profile': {'id': 'id1',
-                                        'driver': _driver,
-                                        'description': 'desc',
-                                        'metainfo': '{}',
-                                        'enabled': True}}
-        instance = self.plugin.return_value
-        instance.get_service_profile.return_value = (
-            expected['service_profile'])
-        res = self.api.get(_get_path('service_profiles',
-                                     id=sp_id, fmt=self.fmt))
-        instance.get_service_profile.assert_called_with(mock.ANY,
-                                                        sp_id,
-                                                        fields=mock.ANY)
-        res = self.deserialize(res)
-        self.assertEqual(expected, res)
-
-    def test_get_service_profiles(self):
-        expected = {'service_profiles': [{'id': 'id1',
-                                          'driver': _driver,
-                                          'description': 'desc',
-                                          'metainfo': '{}',
-                                          'enabled': True},
-                                         {'id': 'id2',
-                                          'driver': _driver,
-                                          'description': 'desc',
-                                          'metainfo': '{}',
-                                          'enabled': True}]}
-        instance = self.plugin.return_value
-        instance.get_service_profiles.return_value = (
-            expected['service_profiles'])
-        res = self.api.get(_get_path('service_profiles', fmt=self.fmt))
-        instance.get_service_profiles.assert_called_with(mock.ANY,
-                                                         fields=mock.ANY,
-                                                         filters=mock.ANY)
-        res = self.deserialize(res)
-        self.assertEqual(expected, res)
-
-    def test_associate_service_profile_with_flavor(self):
-        tenant_id = uuidutils.generate_uuid()
-        expected = {'service_profile': {'id': _uuid(),
-                                        'tenant_id': tenant_id}}
-        instance = self.plugin.return_value
-        instance.create_flavor_service_profile.return_value = (
-            expected['service_profile'])
-        res = self.api.post('/flavors/fl_id/service_profiles',
-                            self.serialize(expected),
-                            content_type='application/%s' % self.fmt)
-        instance.create_flavor_service_profile.assert_called_with(
-            mock.ANY, service_profile=expected, flavor_id='fl_id')
-        res = self.deserialize(res)
-        self.assertEqual(expected, res)
-
-    def test_disassociate_service_profile_with_flavor(self):
-        instance = self.plugin.return_value
-        instance.delete_flavor_service_profile.return_value = None
-        self.api.delete('/flavors/fl_id/service_profiles/%s' % 'fake_spid',
-                        content_type='application/%s' % self.fmt)
-        instance.delete_flavor_service_profile.assert_called_with(
-            mock.ANY,
-            'fake_spid',
-            flavor_id='fl_id')
-
-    def test_update_association_error(self):
-        """Confirm that update is not permitted with user error."""
-        new_id = uuidutils.generate_uuid()
-        data = {'service_profile': {'id': new_id}}
-        self.api.put('/flavors/fl_id/service_profiles/%s' % 'fake_spid',
-                     self.serialize(data),
-                     content_type='application/%s' % self.fmt,
-                     status=exc.HTTPBadRequest.code)
-
-
-class DummyCorePlugin(object):
-    pass
-
-
-class DummyServicePlugin(object):
-
-    def driver_loaded(self, driver, service_profile):
-        pass
-
-    def get_plugin_type(self):
-        return constants.DUMMY
-
-    def get_plugin_description(self):
-        return "Dummy service plugin, aware of flavors"
-
-
-class DummyServiceDriver(object):
-
-    @staticmethod
-    def get_service_type():
-        return constants.DUMMY
-
-    def __init__(self, plugin):
-        pass
-
-
-class FlavorPluginTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
-                           base.PluginFixture):
-    def setUp(self):
-        super(FlavorPluginTestCase, self).setUp()
-
-        self.config_parse()
-        cfg.CONF.set_override(
-            'core_plugin',
-            'neutron.tests.unit.extensions.test_flavors.DummyCorePlugin')
-        cfg.CONF.set_override(
-            'service_plugins',
-            ['neutron.tests.unit.extensions.test_flavors.DummyServicePlugin'])
-
-        self.useFixture(
-            fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance'))
-
-        self.plugin = flavors_plugin.FlavorsPlugin()
-        self.ctx = context.get_admin_context()
-
-        providers = [DummyServiceDriver.get_service_type() +
-                     ":" + _provider + ":" + _driver]
-        self.service_manager = servicetype_db.ServiceTypeManager.get_instance()
-        self.service_providers = mock.patch.object(
-            provconf.NeutronModule, 'service_providers').start()
-        self.service_providers.return_value = providers
-        for provider in providers:
-            self.service_manager.add_provider_configuration(
-                provider.split(':')[0], provconf.ProviderConfiguration())
-
-        dbapi.get_engine()
-
-    def _create_flavor(self, description=None):
-        flavor = {'flavor': {'name': 'GOLD',
-                             'service_type': constants.DUMMY,
-                             'description': description or 'the best flavor',
-                             'enabled': True}}
-        return self.plugin.create_flavor(self.ctx, flavor), flavor
-
-    def test_create_flavor(self):
-        self._create_flavor()
-        res = self.ctx.session.query(flavors_db.Flavor).all()
-        self.assertEqual(1, len(res))
-        self.assertEqual('GOLD', res[0]['name'])
-        self.assertEqual(constants.DUMMY, res[0]['service_type'])
-
-    def test_update_flavor(self):
-        fl, flavor = self._create_flavor()
-        flavor = {'flavor': {'name': 'Silver',
-                             'enabled': False}}
-        self.plugin.update_flavor(self.ctx, fl['id'], flavor)
-        res = (self.ctx.session.query(flavors_db.Flavor).
-               filter_by(id=fl['id']).one())
-        self.assertEqual('Silver', res['name'])
-        self.assertFalse(res['enabled'])
-
-    def test_delete_flavor(self):
-        fl, data = self._create_flavor()
-        self.plugin.delete_flavor(self.ctx, fl['id'])
-        res = (self.ctx.session.query(flavors_db.Flavor).all())
-        self.assertFalse(res)
-
-    def test_show_flavor(self):
-        fl, data = self._create_flavor()
-        show_fl = self.plugin.get_flavor(self.ctx, fl['id'])
-        self.assertEqual(fl, show_fl)
-
-    def test_get_flavors(self):
-        fl, flavor = self._create_flavor()
-        flavor['flavor']['name'] = 'SILVER'
-        self.plugin.create_flavor(self.ctx, flavor)
-        show_fl = self.plugin.get_flavors(self.ctx)
-        self.assertEqual(2, len(show_fl))
-
-    def _create_service_profile(self, description=None):
-        data = {'service_profile':
-                {'description': description or 'the best sp',
-                 'driver': _driver,
-                 'enabled': True,
-                 'metainfo': '{"data": "value"}'}}
-        sp = self.plugin.create_service_profile(self.ctx,
-                                                data)
-        return sp, data
-
-    def test_create_service_profile(self):
-        sp, data = self._create_service_profile()
-        res = (self.ctx.session.query(flavors_db.ServiceProfile).
-               filter_by(id=sp['id']).one())
-        self.assertEqual(data['service_profile']['driver'], res['driver'])
-        self.assertEqual(data['service_profile']['metainfo'], res['metainfo'])
-
-    def test_create_service_profile_empty_driver(self):
-        data = {'service_profile':
-                {'description': 'the best sp',
-                 'driver': '',
-                 'enabled': True,
-                 'metainfo': '{"data": "value"}'}}
-        sp = self.plugin.create_service_profile(self.ctx,
-                                                data)
-        res = (self.ctx.session.query(flavors_db.ServiceProfile).
-               filter_by(id=sp['id']).one())
-        self.assertEqual(data['service_profile']['driver'], res['driver'])
-        self.assertEqual(data['service_profile']['metainfo'], res['metainfo'])
-
-    def test_create_service_profile_invalid_driver(self):
-        data = {'service_profile':
-                {'description': 'the best sp',
-                 'driver': "Broken",
-                 'enabled': True,
-                 'metainfo': '{"data": "value"}'}}
-        self.assertRaises(flavors.ServiceProfileDriverNotFound,
-                          self.plugin.create_service_profile,
-                          self.ctx,
-                          data)
-
-    def test_create_service_profile_invalid_empty(self):
-        data = {'service_profile':
-                {'description': '',
-                 'driver': '',
-                 'enabled': True,
-                 'metainfo': ''}}
-        self.assertRaises(flavors.ServiceProfileEmpty,
-                          self.plugin.create_service_profile,
-                          self.ctx,
-                          data)
-
-    def test_update_service_profile(self):
-        sp, data = self._create_service_profile()
-        data['service_profile']['metainfo'] = '{"data": "value1"}'
-        sp = self.plugin.update_service_profile(self.ctx, sp['id'],
-                                                data)
-        res = (self.ctx.session.query(flavors_db.ServiceProfile).
-               filter_by(id=sp['id']).one())
-        self.assertEqual(data['service_profile']['metainfo'], res['metainfo'])
-
-    def test_delete_service_profile(self):
-        sp, data = self._create_service_profile()
-        self.plugin.delete_service_profile(self.ctx, sp['id'])
-        res = self.ctx.session.query(flavors_db.ServiceProfile).all()
-        self.assertFalse(res)
-
-    def test_show_service_profile(self):
-        sp, data = self._create_service_profile()
-        sp_show = self.plugin.get_service_profile(self.ctx, sp['id'])
-        self.assertEqual(sp, sp_show)
-
-    def test_get_service_profiles(self):
-        self._create_service_profile()
-        self._create_service_profile(description='another sp')
-        self.assertEqual(2, len(self.plugin.get_service_profiles(self.ctx)))
-
-    def test_associate_service_profile_with_flavor(self):
-        sp, data = self._create_service_profile()
-        fl, data = self._create_flavor()
-        self.plugin.create_flavor_service_profile(
-            self.ctx,
-            {'service_profile': {'id': sp['id']}},
-            fl['id'])
-        binding = (
-            self.ctx.session.query(flavors_db.FlavorServiceProfileBinding).
-            first())
-        self.assertEqual(fl['id'], binding['flavor_id'])
-        self.assertEqual(sp['id'], binding['service_profile_id'])
-
-        res = self.plugin.get_flavor(self.ctx, fl['id'])
-        self.assertEqual(1, len(res['service_profiles']))
-        self.assertEqual(sp['id'], res['service_profiles'][0])
-
-        res = self.plugin.get_service_profile(self.ctx, sp['id'])
-        self.assertEqual(1, len(res['flavors']))
-        self.assertEqual(fl['id'], res['flavors'][0])
-
-    def test_autodelete_flavor_associations(self):
-        sp, data = self._create_service_profile()
-        fl, data = self._create_flavor()
-        self.plugin.create_flavor_service_profile(
-            self.ctx,
-            {'service_profile': {'id': sp['id']}},
-            fl['id'])
-        self.plugin.delete_flavor(self.ctx, fl['id'])
-        binding = (
-            self.ctx.session.query(flavors_db.FlavorServiceProfileBinding).
-            first())
-        self.assertIsNone(binding)
-
-    def test_associate_service_profile_with_flavor_exists(self):
-        sp, data = self._create_service_profile()
-        fl, data = self._create_flavor()
-        self.plugin.create_flavor_service_profile(
-            self.ctx,
-            {'service_profile': {'id': sp['id']}},
-            fl['id'])
-        self.assertRaises(flavors.FlavorServiceProfileBindingExists,
-                          self.plugin.create_flavor_service_profile,
-                          self.ctx,
-                          {'service_profile': {'id': sp['id']}},
-                          fl['id'])
-
-    def test_disassociate_service_profile_with_flavor(self):
-        sp, data = self._create_service_profile()
-        fl, data = self._create_flavor()
-        self.plugin.create_flavor_service_profile(
-            self.ctx,
-            {'service_profile': {'id': sp['id']}},
-            fl['id'])
-        self.plugin.delete_flavor_service_profile(
-            self.ctx, sp['id'], fl['id'])
-        binding = (
-            self.ctx.session.query(flavors_db.FlavorServiceProfileBinding).
-            first())
-        self.assertIsNone(binding)
-
-        self.assertRaises(
-            flavors.FlavorServiceProfileBindingNotFound,
-            self.plugin.delete_flavor_service_profile,
-            self.ctx, sp['id'], fl['id'])
-
-    def test_delete_service_profile_in_use(self):
-        sp, data = self._create_service_profile()
-        fl, data = self._create_flavor()
-        self.plugin.create_flavor_service_profile(
-            self.ctx,
-            {'service_profile': {'id': sp['id']}},
-            fl['id'])
-        self.assertRaises(
-            flavors.ServiceProfileInUse,
-            self.plugin.delete_service_profile,
-            self.ctx,
-            sp['id'])
-
-    def test_get_flavor_next_provider_no_binding(self):
-        fl, data = self._create_flavor()
-        self.assertRaises(
-            flavors.FlavorServiceProfileBindingNotFound,
-            self.plugin.get_flavor_next_provider,
-            self.ctx,
-            fl['id'])
-
-    def test_get_flavor_next_provider_disabled(self):
-        data = {'service_profile':
-                {'description': 'the best sp',
-                 'driver': _driver,
-                 'enabled': False,
-                 'metainfo': '{"data": "value"}'}}
-        sp = self.plugin.create_service_profile(self.ctx,
-                                                data)
-        fl, data = self._create_flavor()
-        self.plugin.create_flavor_service_profile(
-            self.ctx,
-            {'service_profile': {'id': sp['id']}},
-            fl['id'])
-        self.assertRaises(
-            flavors.ServiceProfileDisabled,
-            self.plugin.get_flavor_next_provider,
-            self.ctx,
-            fl['id'])
-
-    def test_get_flavor_next_provider_no_driver(self):
-        data = {'service_profile':
-                {'description': 'the best sp',
-                 'driver': '',
-                 'enabled': True,
-                 'metainfo': '{"data": "value"}'}}
-        sp = self.plugin.create_service_profile(self.ctx,
-                                                data)
-        fl, data = self._create_flavor()
-        self.plugin.create_flavor_service_profile(
-            self.ctx,
-            {'service_profile': {'id': sp['id']}},
-            fl['id'])
-        self.assertRaises(
-            flavors.ServiceProfileDriverNotFound,
-            self.plugin.get_flavor_next_provider,
-            self.ctx,
-            fl['id'])
-
-    def test_get_flavor_next_provider(self):
-        sp, data = self._create_service_profile()
-        fl, data = self._create_flavor()
-        self.plugin.create_flavor_service_profile(
-            self.ctx,
-            {'service_profile': {'id': sp['id']}},
-            fl['id'])
-        providers = self.plugin.get_flavor_next_provider(
-            self.ctx,
-            fl['id'])
-        self.assertEqual(_provider, providers[0].get('provider', None))
diff --git a/neutron/tests/unit/extensions/test_l3.py b/neutron/tests/unit/extensions/test_l3.py
deleted file mode 100644 (file)
index 9bdaacc..0000000
+++ /dev/null
@@ -1,3042 +0,0 @@
-# Copyright 2012 VMware, Inc.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import contextlib
-import copy
-
-import mock
-import netaddr
-from oslo_config import cfg
-from oslo_utils import importutils
-from oslo_utils import uuidutils
-from webob import exc
-
-from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
-from neutron.api.rpc.handlers import l3_rpc
-from neutron.api.v2 import attributes
-from neutron.callbacks import events
-from neutron.callbacks import exceptions
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-from neutron.common import constants as l3_constants
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.db import common_db_mixin
-from neutron.db import db_base_plugin_v2
-from neutron.db import external_net_db
-from neutron.db import l3_agentschedulers_db
-from neutron.db import l3_attrs_db
-from neutron.db import l3_db
-from neutron.db import l3_dvr_db
-from neutron.db import l3_dvrscheduler_db
-from neutron.extensions import external_net
-from neutron.extensions import l3
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.plugins.common import constants as service_constants
-from neutron.tests import base
-from neutron.tests.common import helpers
-from neutron.tests import fake_notifier
-from neutron.tests.unit.api import test_extensions
-from neutron.tests.unit.api.v2 import test_base
-from neutron.tests.unit.db import test_db_base_plugin_v2
-from neutron.tests.unit.extensions import base as test_extensions_base
-from neutron.tests.unit.extensions import test_agent
-from neutron.tests.unit.plugins.ml2 import base as ml2_base
-
-
-_uuid = uuidutils.generate_uuid
-_get_path = test_base._get_path
-
-
-DEVICE_OWNER_COMPUTE = l3_constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
-
-
-class L3TestExtensionManager(object):
-
-    def get_resources(self):
-        # Add the resources to the global attribute map
-        # This is done here as the setup process won't
-        # initialize the main API router which extends
-        # the global attribute map
-        attributes.RESOURCE_ATTRIBUTE_MAP.update(
-            l3.RESOURCE_ATTRIBUTE_MAP)
-        return l3.L3.get_resources()
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-class L3NatExtensionTestCase(test_extensions_base.ExtensionTestCase):
-    fmt = 'json'
-
-    def setUp(self):
-        super(L3NatExtensionTestCase, self).setUp()
-        self._setUpExtension(
-            'neutron.extensions.l3.RouterPluginBase', None,
-            l3.RESOURCE_ATTRIBUTE_MAP, l3.L3, '',
-            allow_pagination=True, allow_sorting=True,
-            supported_extension_aliases=['router'],
-            use_quota=True)
-
-    def test_router_create(self):
-        router_id = _uuid()
-        data = {'router': {'name': 'router1', 'admin_state_up': True,
-                           'tenant_id': _uuid(),
-                           'external_gateway_info': None}}
-        return_value = copy.deepcopy(data['router'])
-        return_value.update({'status': "ACTIVE", 'id': router_id})
-
-        instance = self.plugin.return_value
-        instance.create_router.return_value = return_value
-        instance.get_routers_count.return_value = 0
-        res = self.api.post(_get_path('routers', fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/%s' % self.fmt)
-        instance.create_router.assert_called_with(mock.ANY,
-                                                  router=data)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('router', res)
-        router = res['router']
-        self.assertEqual(router['id'], router_id)
-        self.assertEqual(router['status'], "ACTIVE")
-        self.assertTrue(router['admin_state_up'])
-
-    def test_router_list(self):
-        router_id = _uuid()
-        return_value = [{'name': 'router1', 'admin_state_up': True,
-                         'tenant_id': _uuid(), 'id': router_id}]
-
-        instance = self.plugin.return_value
-        instance.get_routers.return_value = return_value
-
-        res = self.api.get(_get_path('routers', fmt=self.fmt))
-
-        instance.get_routers.assert_called_with(mock.ANY, fields=mock.ANY,
-                                                filters=mock.ANY,
-                                                sorts=mock.ANY,
-                                                limit=mock.ANY,
-                                                marker=mock.ANY,
-                                                page_reverse=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('routers', res)
-        self.assertEqual(1, len(res['routers']))
-        self.assertEqual(router_id, res['routers'][0]['id'])
-
-    def test_router_update(self):
-        router_id = _uuid()
-        update_data = {'router': {'admin_state_up': False}}
-        return_value = {'name': 'router1', 'admin_state_up': False,
-                        'tenant_id': _uuid(),
-                        'status': "ACTIVE", 'id': router_id}
-
-        instance = self.plugin.return_value
-        instance.update_router.return_value = return_value
-
-        res = self.api.put(_get_path('routers', id=router_id,
-                                     fmt=self.fmt),
-                           self.serialize(update_data))
-
-        instance.update_router.assert_called_with(mock.ANY, router_id,
-                                                  router=update_data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('router', res)
-        router = res['router']
-        self.assertEqual(router['id'], router_id)
-        self.assertEqual(router['status'], "ACTIVE")
-        self.assertFalse(router['admin_state_up'])
-
-    def test_router_get(self):
-        router_id = _uuid()
-        return_value = {'name': 'router1', 'admin_state_up': False,
-                        'tenant_id': _uuid(),
-                        'status': "ACTIVE", 'id': router_id}
-
-        instance = self.plugin.return_value
-        instance.get_router.return_value = return_value
-
-        res = self.api.get(_get_path('routers', id=router_id,
-                                     fmt=self.fmt))
-
-        instance.get_router.assert_called_with(mock.ANY, router_id,
-                                               fields=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('router', res)
-        router = res['router']
-        self.assertEqual(router['id'], router_id)
-        self.assertEqual(router['status'], "ACTIVE")
-        self.assertFalse(router['admin_state_up'])
-
-    def test_router_delete(self):
-        router_id = _uuid()
-
-        res = self.api.delete(_get_path('routers', id=router_id))
-
-        instance = self.plugin.return_value
-        instance.delete_router.assert_called_with(mock.ANY, router_id)
-        self.assertEqual(res.status_int, exc.HTTPNoContent.code)
-
-    def test_router_add_interface(self):
-        router_id = _uuid()
-        subnet_id = _uuid()
-        port_id = _uuid()
-
-        interface_data = {'subnet_id': subnet_id}
-        return_value = copy.deepcopy(interface_data)
-        return_value['port_id'] = port_id
-
-        instance = self.plugin.return_value
-        instance.add_router_interface.return_value = return_value
-
-        path = _get_path('routers', id=router_id,
-                         action="add_router_interface",
-                         fmt=self.fmt)
-        res = self.api.put(path, self.serialize(interface_data))
-
-        instance.add_router_interface.assert_called_with(mock.ANY, router_id,
-                                                         interface_data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('port_id', res)
-        self.assertEqual(res['port_id'], port_id)
-        self.assertEqual(res['subnet_id'], subnet_id)
-
-
-# This base plugin class is for tests.
-class TestL3NatBasePlugin(db_base_plugin_v2.NeutronDbPluginV2,
-                          external_net_db.External_net_db_mixin):
-
-    __native_pagination_support = True
-    __native_sorting_support = True
-
-    def create_network(self, context, network):
-        session = context.session
-        with session.begin(subtransactions=True):
-            net = super(TestL3NatBasePlugin, self).create_network(context,
-                                                                  network)
-            self._process_l3_create(context, net, network['network'])
-        return net
-
-    def update_network(self, context, id, network):
-
-        session = context.session
-        with session.begin(subtransactions=True):
-            net = super(TestL3NatBasePlugin, self).update_network(context, id,
-                                                                  network)
-            self._process_l3_update(context, net, network['network'])
-        return net
-
-    def delete_network(self, context, id):
-        with context.session.begin(subtransactions=True):
-            self._process_l3_delete(context, id)
-            super(TestL3NatBasePlugin, self).delete_network(context, id)
-
-    def delete_port(self, context, id, l3_port_check=True):
-        plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        if plugin:
-            if l3_port_check:
-                plugin.prevent_l3_port_deletion(context, id)
-            plugin.disassociate_floatingips(context, id)
-        return super(TestL3NatBasePlugin, self).delete_port(context, id)
-
-
-# This plugin class is for tests with plugin that integrates L3.
-class TestL3NatIntPlugin(TestL3NatBasePlugin,
-                         l3_db.L3_NAT_db_mixin):
-
-    supported_extension_aliases = ["external-net", "router"]
-
-
-# This plugin class is for tests with plugin that integrates L3 and L3 agent
-# scheduling.
-class TestL3NatIntAgentSchedulingPlugin(TestL3NatIntPlugin,
-                                        l3_agentschedulers_db.
-                                        L3AgentSchedulerDbMixin):
-
-    supported_extension_aliases = ["external-net", "router",
-                                   "l3_agent_scheduler"]
-    router_scheduler = importutils.import_object(
-        cfg.CONF.router_scheduler_driver)
-
-
-# This plugin class is for tests with plugin not supporting L3.
-class TestNoL3NatPlugin(TestL3NatBasePlugin):
-
-    __native_pagination_support = True
-    __native_sorting_support = True
-
-    supported_extension_aliases = ["external-net"]
-
-
-# A L3 routing service plugin class for tests with plugins that
-# delegate away L3 routing functionality
-class TestL3NatServicePlugin(common_db_mixin.CommonDbMixin,
-                             l3_dvr_db.L3_NAT_with_dvr_db_mixin,
-                             l3_db.L3_NAT_db_mixin):
-
-    supported_extension_aliases = ["router"]
-
-    def get_plugin_type(self):
-        return service_constants.L3_ROUTER_NAT
-
-    def get_plugin_description(self):
-        return "L3 Routing Service Plugin for testing"
-
-
-# A L3 routing with L3 agent scheduling service plugin class for tests with
-# plugins that delegate away L3 routing functionality
-class TestL3NatAgentSchedulingServicePlugin(TestL3NatServicePlugin,
-                                            l3_dvrscheduler_db.
-                                            L3_DVRsch_db_mixin):
-
-    supported_extension_aliases = ["router", "l3_agent_scheduler"]
-
-    def __init__(self):
-        super(TestL3NatAgentSchedulingServicePlugin, self).__init__()
-        self.router_scheduler = importutils.import_object(
-            cfg.CONF.router_scheduler_driver)
-        self.agent_notifiers.update(
-            {l3_constants.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})
-
-
-class L3NatTestCaseMixin(object):
-
-    def _create_router(self, fmt, tenant_id, name=None,
-                       admin_state_up=None, set_context=False,
-                       arg_list=None, **kwargs):
-        data = {'router': {'tenant_id': tenant_id}}
-        if name:
-            data['router']['name'] = name
-        if admin_state_up:
-            data['router']['admin_state_up'] = admin_state_up
-        for arg in (('admin_state_up', 'tenant_id', 'availability_zone_hints')
-                    + (arg_list or ())):
-            # Arg must be present and not empty
-            if arg in kwargs:
-                data['router'][arg] = kwargs[arg]
-        router_req = self.new_create_request('routers', data, fmt)
-        if set_context and tenant_id:
-            # create a specific auth context for this request
-            router_req.environ['neutron.context'] = context.Context(
-                '', tenant_id)
-
-        return router_req.get_response(self.ext_api)
-
-    def _make_router(self, fmt, tenant_id, name=None, admin_state_up=None,
-                     external_gateway_info=None, set_context=False,
-                     arg_list=None, **kwargs):
-        if external_gateway_info:
-            arg_list = ('external_gateway_info', ) + (arg_list or ())
-        res = self._create_router(fmt, tenant_id, name,
-                                  admin_state_up, set_context,
-                                  arg_list=arg_list,
-                                  external_gateway_info=external_gateway_info,
-                                  **kwargs)
-        return self.deserialize(fmt, res)
-
-    def _add_external_gateway_to_router(self, router_id, network_id,
-                                        expected_code=exc.HTTPOk.code,
-                                        neutron_context=None, ext_ips=None):
-        ext_ips = ext_ips or []
-        body = {'router':
-                {'external_gateway_info': {'network_id': network_id}}}
-        if ext_ips:
-            body['router']['external_gateway_info'][
-                'external_fixed_ips'] = ext_ips
-        return self._update('routers', router_id, body,
-                            expected_code=expected_code,
-                            neutron_context=neutron_context)
-
-    def _remove_external_gateway_from_router(self, router_id, network_id,
-                                             expected_code=exc.HTTPOk.code,
-                                             external_gw_info=None):
-        return self._update('routers', router_id,
-                            {'router': {'external_gateway_info':
-                                        external_gw_info}},
-                            expected_code=expected_code)
-
-    def _router_interface_action(self, action, router_id, subnet_id, port_id,
-                                 expected_code=exc.HTTPOk.code,
-                                 expected_body=None,
-                                 tenant_id=None,
-                                 msg=None):
-        interface_data = {}
-        if subnet_id:
-            interface_data.update({'subnet_id': subnet_id})
-        if port_id:
-            interface_data.update({'port_id': port_id})
-
-        req = self.new_action_request('routers', interface_data, router_id,
-                                      "%s_router_interface" % action)
-        # if tenant_id was specified, create a tenant context for this request
-        if tenant_id:
-            req.environ['neutron.context'] = context.Context(
-                '', tenant_id)
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, expected_code, msg)
-        response = self.deserialize(self.fmt, res)
-        if expected_body:
-            self.assertEqual(response, expected_body, msg)
-        return response
-
-    @contextlib.contextmanager
-    def router(self, name='router1', admin_state_up=True,
-               fmt=None, tenant_id=_uuid(),
-               external_gateway_info=None, set_context=False,
-               **kwargs):
-        router = self._make_router(fmt or self.fmt, tenant_id, name,
-                                   admin_state_up, external_gateway_info,
-                                   set_context, **kwargs)
-        yield router
-
-    def _set_net_external(self, net_id):
-        self._update('networks', net_id,
-                     {'network': {external_net.EXTERNAL: True}})
-
-    def _create_floatingip(self, fmt, network_id, port_id=None,
-                           fixed_ip=None, set_context=False,
-                           floating_ip=None, subnet_id=False):
-        data = {'floatingip': {'floating_network_id': network_id,
-                               'tenant_id': self._tenant_id}}
-        if port_id:
-            data['floatingip']['port_id'] = port_id
-            if fixed_ip:
-                data['floatingip']['fixed_ip_address'] = fixed_ip
-
-        if floating_ip:
-            data['floatingip']['floating_ip_address'] = floating_ip
-
-        if subnet_id:
-            data['floatingip']['subnet_id'] = subnet_id
-        floatingip_req = self.new_create_request('floatingips', data, fmt)
-        if set_context and self._tenant_id:
-            # create a specific auth context for this request
-            floatingip_req.environ['neutron.context'] = context.Context(
-                '', self._tenant_id)
-        return floatingip_req.get_response(self.ext_api)
-
-    def _make_floatingip(self, fmt, network_id, port_id=None,
-                         fixed_ip=None, set_context=False, floating_ip=None,
-                         http_status=exc.HTTPCreated.code):
-        res = self._create_floatingip(fmt, network_id, port_id,
-                                      fixed_ip, set_context, floating_ip)
-        self.assertEqual(res.status_int, http_status)
-        return self.deserialize(fmt, res)
-
-    def _validate_floating_ip(self, fip):
-        body = self._list('floatingips')
-        self.assertEqual(len(body['floatingips']), 1)
-        self.assertEqual(body['floatingips'][0]['id'],
-                         fip['floatingip']['id'])
-
-        body = self._show('floatingips', fip['floatingip']['id'])
-        self.assertEqual(body['floatingip']['id'],
-                         fip['floatingip']['id'])
-
-    @contextlib.contextmanager
-    def floatingip_with_assoc(self, port_id=None, fmt=None, fixed_ip=None,
-                              set_context=False):
-        with self.subnet(cidr='11.0.0.0/24') as public_sub:
-            self._set_net_external(public_sub['subnet']['network_id'])
-            private_port = None
-            if port_id:
-                private_port = self._show('ports', port_id)
-            with test_db_base_plugin_v2.optional_ctx(private_port,
-                                             self.port) as private_port:
-                with self.router() as r:
-                    sid = private_port['port']['fixed_ips'][0]['subnet_id']
-                    private_sub = {'subnet': {'id': sid}}
-                    floatingip = None
-
-                    self._add_external_gateway_to_router(
-                        r['router']['id'],
-                        public_sub['subnet']['network_id'])
-                    self._router_interface_action(
-                        'add', r['router']['id'],
-                        private_sub['subnet']['id'], None)
-
-                    floatingip = self._make_floatingip(
-                        fmt or self.fmt,
-                        public_sub['subnet']['network_id'],
-                        port_id=private_port['port']['id'],
-                        fixed_ip=fixed_ip,
-                        set_context=set_context)
-                    yield floatingip
-
-                    if floatingip:
-                        self._delete('floatingips',
-                                     floatingip['floatingip']['id'])
-
-    @contextlib.contextmanager
-    def floatingip_no_assoc_with_public_sub(
-        self, private_sub, fmt=None, set_context=False, public_sub=None):
-        self._set_net_external(public_sub['subnet']['network_id'])
-        with self.router() as r:
-            floatingip = None
-
-            self._add_external_gateway_to_router(
-                r['router']['id'],
-                public_sub['subnet']['network_id'])
-            self._router_interface_action('add', r['router']['id'],
-                                          private_sub['subnet']['id'],
-                                          None)
-
-            floatingip = self._make_floatingip(
-                fmt or self.fmt,
-                public_sub['subnet']['network_id'],
-                set_context=set_context)
-            yield floatingip, r
-
-            if floatingip:
-                self._delete('floatingips',
-                             floatingip['floatingip']['id'])
-
-    @contextlib.contextmanager
-    def floatingip_no_assoc(self, private_sub, fmt=None, set_context=False):
-        with self.subnet(cidr='12.0.0.0/24') as public_sub:
-            with self.floatingip_no_assoc_with_public_sub(
-                private_sub, fmt, set_context, public_sub) as (f, r):
-                # Yield only the floating ip object
-                yield f
-
-
-class ExtraAttributesMixinTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(ExtraAttributesMixinTestCase, self).setUp()
-        self.mixin = l3_attrs_db.ExtraAttributesMixin()
-
-    def _test__extend_extra_router_dict(
-        self, extra_attributes, attributes, expected_attributes):
-        self.mixin._extend_extra_router_dict(
-            attributes, {'extra_attributes': extra_attributes})
-        self.assertEqual(expected_attributes, attributes)
-
-    def test__extend_extra_router_dict_string_default(self):
-        self.mixin.extra_attributes = [{
-            'name': "foo_key",
-            'default': 'foo_default'
-        }]
-        extension_attributes = {'foo_key': 'my_fancy_value'}
-        self._test__extend_extra_router_dict(
-            extension_attributes, {}, extension_attributes)
-
-    def test__extend_extra_router_dict_booleans_false_default(self):
-        self.mixin.extra_attributes = [{
-            'name': "foo_key",
-            'default': False
-        }]
-        extension_attributes = {'foo_key': True}
-        self._test__extend_extra_router_dict(
-            extension_attributes, {}, extension_attributes)
-
-    def test__extend_extra_router_dict_booleans_true_default(self):
-        self.mixin.extra_attributes = [{
-            'name': "foo_key",
-            'default': True
-        }]
-        # Test that the default is overridden
-        extension_attributes = {'foo_key': False}
-        self._test__extend_extra_router_dict(
-            extension_attributes, {}, extension_attributes)
-
-    def test__extend_extra_router_dict_no_extension_attributes(self):
-        self.mixin.extra_attributes = [{
-            'name': "foo_key",
-            'default': 'foo_value'
-        }]
-        self._test__extend_extra_router_dict({}, {}, {'foo_key': 'foo_value'})
-
-    def test__extend_extra_router_dict_none_extension_attributes(self):
-        self._test__extend_extra_router_dict(None, {}, {})
-
-
-class L3NatTestCaseBase(L3NatTestCaseMixin):
-
-    def test_router_create(self):
-        name = 'router1'
-        tenant_id = _uuid()
-        expected_value = [('name', name), ('tenant_id', tenant_id),
-                          ('admin_state_up', True), ('status', 'ACTIVE'),
-                          ('external_gateway_info', None)]
-        with self.router(name='router1', admin_state_up=True,
-                         tenant_id=tenant_id) as router:
-            for k, v in expected_value:
-                self.assertEqual(router['router'][k], v)
-
-    def test_router_create_call_extensions(self):
-        self.extension_called = False
-
-        def _extend_router_dict_test_attr(*args, **kwargs):
-            self.extension_called = True
-
-        db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-            l3.ROUTERS, [_extend_router_dict_test_attr])
-        self.assertFalse(self.extension_called)
-        with self.router():
-            self.assertTrue(self.extension_called)
-
-    def test_router_create_with_gwinfo(self):
-        with self.subnet() as s:
-            self._set_net_external(s['subnet']['network_id'])
-            data = {'router': {'tenant_id': _uuid()}}
-            data['router']['name'] = 'router1'
-            data['router']['external_gateway_info'] = {
-                'network_id': s['subnet']['network_id']}
-            router_req = self.new_create_request('routers', data, self.fmt)
-            res = router_req.get_response(self.ext_api)
-            router = self.deserialize(self.fmt, res)
-            self.assertEqual(
-                s['subnet']['network_id'],
-                router['router']['external_gateway_info']['network_id'])
-
-    def test_router_create_with_gwinfo_ext_ip(self):
-        with self.subnet() as s:
-            self._set_net_external(s['subnet']['network_id'])
-            ext_info = {
-                'network_id': s['subnet']['network_id'],
-                'external_fixed_ips': [{'ip_address': '10.0.0.99'}]
-            }
-            res = self._create_router(
-                self.fmt, _uuid(), arg_list=('external_gateway_info',),
-                external_gateway_info=ext_info
-            )
-            router = self.deserialize(self.fmt, res)
-            self.assertEqual(
-                [{'ip_address': '10.0.0.99', 'subnet_id': s['subnet']['id']}],
-                router['router']['external_gateway_info'][
-                    'external_fixed_ips'])
-
-    def test_router_create_with_gwinfo_ext_ip_subnet(self):
-        with self.network() as n:
-            with self.subnet(network=n) as v1,\
-                    self.subnet(network=n, cidr='1.0.0.0/24') as v2,\
-                    self.subnet(network=n, cidr='2.0.0.0/24') as v3:
-                subnets = (v1, v2, v3)
-                self._set_net_external(n['network']['id'])
-                for s in subnets:
-                    ext_info = {
-                        'network_id': n['network']['id'],
-                        'external_fixed_ips': [
-                            {'subnet_id': s['subnet']['id']}]
-                    }
-                    res = self._create_router(
-                        self.fmt, _uuid(), arg_list=('external_gateway_info',),
-                        external_gateway_info=ext_info
-                    )
-                    router = self.deserialize(self.fmt, res)
-                    ext_ips = router['router']['external_gateway_info'][
-                        'external_fixed_ips']
-
-                    self.assertEqual(
-                        [{'subnet_id': s['subnet']['id'],
-                          'ip_address': mock.ANY}], ext_ips)
-
-    def test_router_create_with_gwinfo_ext_ip_non_admin(self):
-        with self.subnet() as s:
-            self._set_net_external(s['subnet']['network_id'])
-            ext_info = {
-                'network_id': s['subnet']['network_id'],
-                'external_fixed_ips': [{'ip_address': '10.0.0.99'}]
-            }
-            res = self._create_router(
-                self.fmt, _uuid(), arg_list=('external_gateway_info',),
-                set_context=True, external_gateway_info=ext_info
-            )
-            self.assertEqual(res.status_int, exc.HTTPForbidden.code)
-
-    def test_router_list(self):
-        with self.router() as v1, self.router() as v2, self.router() as v3:
-            routers = (v1, v2, v3)
-            self._test_list_resources('router', routers)
-
-    def test_router_list_with_parameters(self):
-        with self.router(name='router1') as router1,\
-                self.router(name='router2') as router2:
-            query_params = 'name=router1'
-            self._test_list_resources('router', [router1],
-                                      query_params=query_params)
-            query_params = 'name=router2'
-            self._test_list_resources('router', [router2],
-                                      query_params=query_params)
-            query_params = 'name=router3'
-            self._test_list_resources('router', [],
-                                      query_params=query_params)
-
-    def test_router_list_with_sort(self):
-        with self.router(name='router1') as router1,\
-                self.router(name='router2') as router2,\
-                self.router(name='router3') as router3:
-            self._test_list_with_sort('router', (router3, router2, router1),
-                                      [('name', 'desc')])
-
-    def test_router_list_with_pagination(self):
-        with self.router(name='router1') as router1,\
-                self.router(name='router2') as router2,\
-                self.router(name='router3') as router3:
-            self._test_list_with_pagination('router',
-                                            (router1, router2, router3),
-                                            ('name', 'asc'), 2, 2)
-
-    def test_router_list_with_pagination_reverse(self):
-        with self.router(name='router1') as router1,\
-                self.router(name='router2') as router2,\
-                self.router(name='router3') as router3:
-            self._test_list_with_pagination_reverse('router',
-                                                    (router1, router2,
-                                                     router3),
-                                                    ('name', 'asc'), 2, 2)
-
-    def test_router_update(self):
-        rname1 = "yourrouter"
-        rname2 = "nachorouter"
-        with self.router(name=rname1) as r:
-            body = self._show('routers', r['router']['id'])
-            self.assertEqual(body['router']['name'], rname1)
-
-            body = self._update('routers', r['router']['id'],
-                                {'router': {'name': rname2}})
-
-            body = self._show('routers', r['router']['id'])
-            self.assertEqual(body['router']['name'], rname2)
-
-    def test_router_update_gateway(self):
-        with self.router() as r:
-            with self.subnet() as s1:
-                with self.subnet() as s2:
-                    self._set_net_external(s1['subnet']['network_id'])
-                    self._add_external_gateway_to_router(
-                        r['router']['id'],
-                        s1['subnet']['network_id'])
-                    body = self._show('routers', r['router']['id'])
-                    net_id = (body['router']
-                              ['external_gateway_info']['network_id'])
-                    self.assertEqual(net_id, s1['subnet']['network_id'])
-                    self._set_net_external(s2['subnet']['network_id'])
-                    self._add_external_gateway_to_router(
-                        r['router']['id'],
-                        s2['subnet']['network_id'])
-                    body = self._show('routers', r['router']['id'])
-                    net_id = (body['router']
-                              ['external_gateway_info']['network_id'])
-                    self.assertEqual(net_id, s2['subnet']['network_id'])
-                    # Validate that we can clear the gateway with
-                    # an empty dict, in any other case, we fall back
-                    # on None as default value
-                    self._remove_external_gateway_from_router(
-                        r['router']['id'],
-                        s2['subnet']['network_id'],
-                        external_gw_info={})
-
-    def test_router_update_gateway_with_external_ip_used_by_gw(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                self._set_net_external(s['subnet']['network_id'])
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'],
-                    ext_ips=[{'ip_address': s['subnet']['gateway_ip']}],
-                    expected_code=exc.HTTPBadRequest.code)
-
-    def test_router_update_gateway_with_invalid_external_ip(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                self._set_net_external(s['subnet']['network_id'])
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'],
-                    ext_ips=[{'ip_address': '99.99.99.99'}],
-                    expected_code=exc.HTTPBadRequest.code)
-
-    def test_router_update_gateway_with_invalid_external_subnet(self):
-        with self.subnet() as s1,\
-                self.subnet(cidr='1.0.0.0/24') as s2,\
-                self.router() as r:
-            self._set_net_external(s1['subnet']['network_id'])
-            self._add_external_gateway_to_router(
-                r['router']['id'],
-                s1['subnet']['network_id'],
-                # this subnet is not on the same network so this should fail
-                ext_ips=[{'subnet_id': s2['subnet']['id']}],
-                expected_code=exc.HTTPBadRequest.code)
-
-    def test_router_update_gateway_with_different_external_subnet(self):
-        with self.network() as n:
-            with self.subnet(network=n) as s1,\
-                    self.subnet(network=n, cidr='1.0.0.0/24') as s2,\
-                    self.router() as r:
-                self._set_net_external(n['network']['id'])
-                res1 = self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    n['network']['id'],
-                    ext_ips=[{'subnet_id': s1['subnet']['id']}])
-                res2 = self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    n['network']['id'],
-                    ext_ips=[{'subnet_id': s2['subnet']['id']}])
-        fip1 = res1['router']['external_gateway_info']['external_fixed_ips'][0]
-        fip2 = res2['router']['external_gateway_info']['external_fixed_ips'][0]
-        self.assertEqual(s1['subnet']['id'], fip1['subnet_id'])
-        self.assertEqual(s2['subnet']['id'], fip2['subnet_id'])
-        self.assertNotEqual(fip1['subnet_id'], fip2['subnet_id'])
-        self.assertNotEqual(fip1['ip_address'], fip2['ip_address'])
-
-    def test_router_update_gateway_with_existed_floatingip(self):
-        with self.subnet() as subnet:
-            self._set_net_external(subnet['subnet']['network_id'])
-            with self.floatingip_with_assoc() as fip:
-                self._add_external_gateway_to_router(
-                    fip['floatingip']['router_id'],
-                    subnet['subnet']['network_id'],
-                    expected_code=exc.HTTPConflict.code)
-
-    def test_router_update_gateway_to_empty_with_existed_floatingip(self):
-        with self.floatingip_with_assoc() as fip:
-            self._remove_external_gateway_from_router(
-                fip['floatingip']['router_id'], None,
-                expected_code=exc.HTTPConflict.code)
-
-    def test_router_update_gateway_add_multiple_prefixes_ipv6(self):
-        with self.network() as n:
-            with self.subnet(network=n) as s1, \
-                self.subnet(network=n, ip_version=6, cidr='2001:db8::/32') \
-                as s2, (self.router()) as r:
-                self._set_net_external(n['network']['id'])
-                res1 = self._add_external_gateway_to_router(
-                        r['router']['id'],
-                        n['network']['id'],
-                        ext_ips=[{'subnet_id': s1['subnet']['id']}])
-                fip1 = (res1['router']['external_gateway_info']
-                        ['external_fixed_ips'][0])
-                self.assertEqual(s1['subnet']['id'], fip1['subnet_id'])
-                res2 = self._add_external_gateway_to_router(
-                        r['router']['id'],
-                        n['network']['id'],
-                        ext_ips=[{'ip_address': fip1['ip_address'],
-                                  'subnet_id': s1['subnet']['id']},
-                                 {'subnet_id': s2['subnet']['id']}])
-                self.assertEqual(fip1, res2['router']['external_gateway_info']
-                                           ['external_fixed_ips'][0])
-                fip2 = (res2['router']['external_gateway_info']
-                        ['external_fixed_ips'][1])
-                self.assertEqual(s2['subnet']['id'], fip2['subnet_id'])
-                self.assertNotEqual(fip1['subnet_id'],
-                                    fip2['subnet_id'])
-                self.assertNotEqual(fip1['ip_address'],
-                                    fip2['ip_address'])
-
-    def test_router_update_gateway_upon_subnet_create_ipv6(self):
-        with self.network() as n:
-            with self.subnet(network=n) as s1, self.router() as r:
-                self._set_net_external(n['network']['id'])
-                res1 = self._add_external_gateway_to_router(
-                          r['router']['id'],
-                          n['network']['id'],
-                          ext_ips=[{'subnet_id': s1['subnet']['id']}])
-                fip1 = (res1['router']['external_gateway_info']
-                        ['external_fixed_ips'][0])
-                sres = self._create_subnet(self.fmt, net_id=n['network']['id'],
-                                         ip_version=6, cidr='2001:db8::/32',
-                                         expected_res_status=(
-                                             exc.HTTPCreated.code))
-                s2 = self.deserialize(self.fmt, sres)
-                res2 = self._show('routers', r['router']['id'])
-                self.assertEqual(fip1, res2['router']['external_gateway_info']
-                                           ['external_fixed_ips'][0])
-                fip2 = (res2['router']['external_gateway_info']
-                        ['external_fixed_ips'][1])
-                self.assertEqual(s2['subnet']['id'], fip2['subnet_id'])
-                self.assertNotEqual(fip1['subnet_id'], fip2['subnet_id'])
-                self.assertNotEqual(fip1['ip_address'], fip2['ip_address'])
-
-    def test_router_update_gateway_upon_subnet_create_max_ips_ipv6(self):
-        """Create subnet should not cause excess fixed IPs on router gw
-
-        If a router gateway port has the maximum of one IPv4 and one IPv6
-        fixed, create subnet should not add any more IP addresses to the port
-        (unless this is the subnet is a SLAAC/DHCPv6-stateless subnet in which
-        case the addresses are added automatically)
-
-        """
-        with self.router() as r, self.network() as n:
-            with self.subnet(cidr='10.0.0.0/24', network=n) as s1, (
-                    self.subnet(ip_version=6, cidr='2001:db8::/64',
-                        network=n)) as s2:
-                self._set_net_external(n['network']['id'])
-                self._add_external_gateway_to_router(
-                        r['router']['id'],
-                        n['network']['id'],
-                        ext_ips=[{'subnet_id': s1['subnet']['id']},
-                                 {'subnet_id': s2['subnet']['id']}],
-                        expected_code=exc.HTTPOk.code)
-                res1 = self._show('routers', r['router']['id'])
-                original_fips = (res1['router']['external_gateway_info']
-                                 ['external_fixed_ips'])
-                # Add another IPv4 subnet - a fip SHOULD NOT be added
-                # to the external gateway port as it already has a v4 address
-                self._create_subnet(self.fmt, net_id=n['network']['id'],
-                                    cidr='10.0.1.0/24')
-                res2 = self._show('routers', r['router']['id'])
-                self.assertEqual(original_fips,
-                                 res2['router']['external_gateway_info']
-                                 ['external_fixed_ips'])
-                # Add a SLAAC subnet - a fip from this subnet SHOULD be added
-                # to the external gateway port
-                s3 = self.deserialize(self.fmt,
-                        self._create_subnet(self.fmt,
-                            net_id=n['network']['id'],
-                            ip_version=6, cidr='2001:db8:1::/64',
-                            ipv6_ra_mode=l3_constants.IPV6_SLAAC,
-                            ipv6_address_mode=l3_constants.IPV6_SLAAC))
-                res3 = self._show('routers', r['router']['id'])
-                fips = (res3['router']['external_gateway_info']
-                        ['external_fixed_ips'])
-                fip_subnet_ids = [fip['subnet_id'] for fip in fips]
-                self.assertIn(s1['subnet']['id'], fip_subnet_ids)
-                self.assertIn(s2['subnet']['id'], fip_subnet_ids)
-                self.assertIn(s3['subnet']['id'], fip_subnet_ids)
-                self._remove_external_gateway_from_router(
-                    r['router']['id'],
-                    n['network']['id'])
-
-    def _test_router_add_interface_subnet(self, router, subnet, msg=None):
-        exp_notifications = ['router.create.start',
-                             'router.create.end',
-                             'network.create.start',
-                             'network.create.end',
-                             'subnet.create.start',
-                             'subnet.create.end',
-                             'router.interface.create',
-                             'router.interface.delete']
-        body = self._router_interface_action('add',
-                                             router['router']['id'],
-                                             subnet['subnet']['id'],
-                                             None)
-        self.assertIn('port_id', body, msg)
-
-        # fetch port and confirm device_id
-        r_port_id = body['port_id']
-        port = self._show('ports', r_port_id)
-        self.assertEqual(port['port']['device_id'],
-                         router['router']['id'], msg)
-
-        self._router_interface_action('remove',
-                                      router['router']['id'],
-                                      subnet['subnet']['id'],
-                                      None)
-        self._show('ports', r_port_id,
-                   expected_code=exc.HTTPNotFound.code)
-
-        self.assertEqual(
-            set(exp_notifications),
-            set(n['event_type'] for n in fake_notifier.NOTIFICATIONS), msg)
-
-        for n in fake_notifier.NOTIFICATIONS:
-            if n['event_type'].startswith('router.interface.'):
-                payload = n['payload']['router_interface']
-                self.assertIn('id', payload)
-                self.assertEqual(payload['id'], router['router']['id'])
-                self.assertIn('tenant_id', payload)
-                stid = subnet['subnet']['tenant_id']
-                # tolerate subnet tenant deliberately set to '' in the
-                # nsx metadata access case
-                self.assertIn(payload['tenant_id'], [stid, ''], msg)
-
-    def test_router_add_interface_subnet(self):
-        fake_notifier.reset()
-        with self.router() as r:
-            with self.network() as n:
-                with self.subnet(network=n) as s:
-                    self._test_router_add_interface_subnet(r, s)
-
-    def test_router_add_interface_ipv6_subnet(self):
-        """Test router-interface-add for valid ipv6 subnets.
-
-        Verify the valid use-cases of an IPv6 subnet where we
-        are allowed to associate to the Neutron Router are successful.
-        """
-        slaac = l3_constants.IPV6_SLAAC
-        stateful = l3_constants.DHCPV6_STATEFUL
-        stateless = l3_constants.DHCPV6_STATELESS
-        use_cases = [{'msg': 'IPv6 Subnet Modes (slaac, none)',
-                      'ra_mode': slaac, 'address_mode': None},
-                     {'msg': 'IPv6 Subnet Modes (none, none)',
-                      'ra_mode': None, 'address_mode': None},
-                     {'msg': 'IPv6 Subnet Modes (dhcpv6-stateful, none)',
-                      'ra_mode': stateful, 'address_mode': None},
-                     {'msg': 'IPv6 Subnet Modes (dhcpv6-stateless, none)',
-                      'ra_mode': stateless, 'address_mode': None},
-                     {'msg': 'IPv6 Subnet Modes (slaac, slaac)',
-                      'ra_mode': slaac, 'address_mode': slaac},
-                     {'msg': 'IPv6 Subnet Modes (dhcpv6-stateful,'
-                      'dhcpv6-stateful)', 'ra_mode': stateful,
-                      'address_mode': stateful},
-                     {'msg': 'IPv6 Subnet Modes (dhcpv6-stateless,'
-                      'dhcpv6-stateless)', 'ra_mode': stateless,
-                      'address_mode': stateless}]
-        for uc in use_cases:
-            fake_notifier.reset()
-            with self.router() as r, self.network() as n:
-                with self.subnet(network=n, cidr='fd00::1/64',
-                                 gateway_ip='fd00::1', ip_version=6,
-                                 ipv6_ra_mode=uc['ra_mode'],
-                                 ipv6_address_mode=uc['address_mode']) as s:
-                    self._test_router_add_interface_subnet(r, s, uc['msg'])
-
-    def test_router_add_interface_multiple_ipv4_subnets(self):
-        """Test router-interface-add for multiple ipv4 subnets.
-
-        Verify that adding multiple ipv4 subnets from the same network
-        to a router places them all on different router interfaces.
-        """
-        with self.router() as r, self.network() as n:
-            with self.subnet(network=n, cidr='10.0.0.0/24') as s1, (
-                 self.subnet(network=n, cidr='10.0.1.0/24')) as s2:
-                    body = self._router_interface_action('add',
-                                                         r['router']['id'],
-                                                         s1['subnet']['id'],
-                                                         None)
-                    pid1 = body['port_id']
-                    body = self._router_interface_action('add',
-                                                         r['router']['id'],
-                                                         s2['subnet']['id'],
-                                                         None)
-                    pid2 = body['port_id']
-                    self.assertNotEqual(pid1, pid2)
-                    self._router_interface_action('remove', r['router']['id'],
-                                                  s1['subnet']['id'], None)
-                    self._router_interface_action('remove', r['router']['id'],
-                                                  s2['subnet']['id'], None)
-
-    def test_router_add_interface_multiple_ipv6_subnets_same_net(self):
-        """Test router-interface-add for multiple ipv6 subnets on a network.
-
-        Verify that adding multiple ipv6 subnets from the same network
-        to a router places them all on the same router interface.
-        """
-        with self.router() as r, self.network() as n:
-            with (self.subnet(network=n, cidr='fd00::1/64', ip_version=6)
-                  ) as s1, self.subnet(network=n, cidr='fd01::1/64',
-                                       ip_version=6) as s2:
-                    body = self._router_interface_action('add',
-                                                         r['router']['id'],
-                                                         s1['subnet']['id'],
-                                                         None)
-                    pid1 = body['port_id']
-                    body = self._router_interface_action('add',
-                                                         r['router']['id'],
-                                                         s2['subnet']['id'],
-                                                         None)
-                    pid2 = body['port_id']
-                    self.assertEqual(pid1, pid2)
-                    port = self._show('ports', pid1)
-                    self.assertEqual(2, len(port['port']['fixed_ips']))
-                    port_subnet_ids = [fip['subnet_id'] for fip in
-                                       port['port']['fixed_ips']]
-                    self.assertIn(s1['subnet']['id'], port_subnet_ids)
-                    self.assertIn(s2['subnet']['id'], port_subnet_ids)
-                    self._router_interface_action('remove', r['router']['id'],
-                                                  s1['subnet']['id'], None)
-                    self._router_interface_action('remove', r['router']['id'],
-                                                  s2['subnet']['id'], None)
-
-    def test_router_add_interface_multiple_ipv6_subnets_different_net(self):
-        """Test router-interface-add for ipv6 subnets on different networks.
-
-        Verify that adding multiple ipv6 subnets from different networks
-        to a router places them on different router interfaces.
-        """
-        with self.router() as r, self.network() as n1, self.network() as n2:
-            with (self.subnet(network=n1, cidr='fd00::1/64', ip_version=6)
-                  ) as s1, self.subnet(network=n2, cidr='fd01::1/64',
-                                       ip_version=6) as s2:
-                    body = self._router_interface_action('add',
-                                                         r['router']['id'],
-                                                         s1['subnet']['id'],
-                                                         None)
-                    pid1 = body['port_id']
-                    body = self._router_interface_action('add',
-                                                         r['router']['id'],
-                                                         s2['subnet']['id'],
-                                                         None)
-                    pid2 = body['port_id']
-                    self.assertNotEqual(pid1, pid2)
-                    self._router_interface_action('remove', r['router']['id'],
-                                                  s1['subnet']['id'], None)
-                    self._router_interface_action('remove', r['router']['id'],
-                                                  s2['subnet']['id'], None)
-
-    def test_router_add_iface_ipv6_ext_ra_subnet_returns_400(self):
-        """Test router-interface-add for in-valid ipv6 subnets.
-
-        Verify that an appropriate error message is displayed when
-        an IPv6 subnet configured to use an external_router for Router
-        Advertisements (i.e., ipv6_ra_mode is None and ipv6_address_mode
-        is not None) is attempted to associate with a Neutron Router.
-        """
-        use_cases = [{'msg': 'IPv6 Subnet Modes (none, slaac)',
-                      'ra_mode': None,
-                      'address_mode': l3_constants.IPV6_SLAAC},
-                     {'msg': 'IPv6 Subnet Modes (none, dhcpv6-stateful)',
-                      'ra_mode': None,
-                      'address_mode': l3_constants.DHCPV6_STATEFUL},
-                     {'msg': 'IPv6 Subnet Modes (none, dhcpv6-stateless)',
-                      'ra_mode': None,
-                      'address_mode': l3_constants.DHCPV6_STATELESS}]
-        for uc in use_cases:
-            with self.router() as r, self.network() as n:
-                with self.subnet(network=n, cidr='fd00::1/64',
-                                 gateway_ip='fd00::1', ip_version=6,
-                                 ipv6_ra_mode=uc['ra_mode'],
-                                 ipv6_address_mode=uc['address_mode']) as s:
-                    exp_code = exc.HTTPBadRequest.code
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  s['subnet']['id'],
-                                                  None,
-                                                  expected_code=exp_code,
-                                                  msg=uc['msg'])
-
-    def test_router_add_interface_ipv6_subnet_without_gateway_ip(self):
-        with self.router() as r:
-            with self.subnet(ip_version=6, cidr='fe80::/64',
-                             gateway_ip=None) as s:
-                error_code = exc.HTTPBadRequest.code
-                self._router_interface_action('add',
-                                              r['router']['id'],
-                                              s['subnet']['id'],
-                                              None,
-                                              expected_code=error_code)
-
-    def test_router_add_interface_subnet_with_bad_tenant_returns_404(self):
-        tenant_id = _uuid()
-        with self.router(tenant_id=tenant_id, set_context=True) as r:
-            with self.network(tenant_id=tenant_id, set_context=True) as n:
-                with self.subnet(network=n, set_context=True) as s:
-                    err_code = exc.HTTPNotFound.code
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  s['subnet']['id'],
-                                                  None,
-                                                  expected_code=err_code,
-                                                  tenant_id='bad_tenant')
-                    body = self._router_interface_action('add',
-                                                         r['router']['id'],
-                                                         s['subnet']['id'],
-                                                         None)
-                    self.assertIn('port_id', body)
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  s['subnet']['id'],
-                                                  None,
-                                                  expected_code=err_code,
-                                                  tenant_id='bad_tenant')
-
-    def test_router_add_interface_subnet_with_port_from_other_tenant(self):
-        tenant_id = _uuid()
-        other_tenant_id = _uuid()
-        with self.router(tenant_id=tenant_id) as r,\
-                self.network(tenant_id=tenant_id) as n1,\
-                self.network(tenant_id=other_tenant_id) as n2:
-            with self.subnet(network=n1, cidr='10.0.0.0/24') as s1,\
-                    self.subnet(network=n2, cidr='10.1.0.0/24') as s2:
-                body = self._router_interface_action(
-                    'add',
-                    r['router']['id'],
-                    s2['subnet']['id'],
-                    None)
-                self.assertIn('port_id', body)
-                self._router_interface_action(
-                    'add',
-                    r['router']['id'],
-                    s1['subnet']['id'],
-                    None,
-                    tenant_id=tenant_id)
-                self.assertIn('port_id', body)
-
-    def test_router_add_interface_port(self):
-        orig_update_port = self.plugin.update_port
-        with self.router() as r, (
-            self.port()) as p, (
-                mock.patch.object(self.plugin, 'update_port')) as update_port:
-            update_port.side_effect = orig_update_port
-            body = self._router_interface_action('add',
-                                                 r['router']['id'],
-                                                 None,
-                                                 p['port']['id'])
-            self.assertIn('port_id', body)
-            self.assertEqual(p['port']['id'], body['port_id'])
-            expected_port_update = {
-                'device_owner': l3_constants.DEVICE_OWNER_ROUTER_INTF,
-                'device_id': r['router']['id']}
-            update_port.assert_called_with(
-                mock.ANY, p['port']['id'], {'port': expected_port_update})
-            # fetch port and confirm device_id
-            body = self._show('ports', p['port']['id'])
-            self.assertEqual(r['router']['id'], body['port']['device_id'])
-
-            # clean-up
-            self._router_interface_action('remove',
-                                          r['router']['id'],
-                                          None,
-                                          p['port']['id'])
-
-    def test_router_add_interface_multiple_ipv4_subnet_port_returns_400(self):
-        """Test adding router port with multiple IPv4 subnets fails.
-
-        Multiple IPv4 subnets are not allowed on a single router port.
-        Ensure that adding a port with multiple IPv4 subnets to a router fails.
-        """
-        with self.network() as n, self.router() as r:
-            with self.subnet(network=n, cidr='10.0.0.0/24') as s1, (
-                 self.subnet(network=n, cidr='10.0.1.0/24')) as s2:
-                fixed_ips = [{'subnet_id': s1['subnet']['id']},
-                             {'subnet_id': s2['subnet']['id']}]
-                with self.port(subnet=s1, fixed_ips=fixed_ips) as p:
-                    exp_code = exc.HTTPBadRequest.code
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'],
-                                                  expected_code=exp_code)
-
-    def test_router_add_interface_ipv6_port_existing_network_returns_400(self):
-        """Ensure unique IPv6 router ports per network id.
-
-        Adding a router port containing one or more IPv6 subnets with the same
-        network id as an existing router port should fail. This is so
-        there is no ambiguity regarding on which port to add an IPv6 subnet
-        when executing router-interface-add with a subnet and no port.
-        """
-        with self.network() as n, self.router() as r:
-            with self.subnet(network=n, cidr='fd00::/64',
-                             ip_version=6) as s1, (
-                 self.subnet(network=n, cidr='fd01::/64',
-                             ip_version=6)) as s2:
-                with self.port(subnet=s1) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  s2['subnet']['id'],
-                                                  None)
-                    exp_code = exc.HTTPBadRequest.code
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'],
-                                                  expected_code=exp_code)
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  s2['subnet']['id'],
-                                                  None)
-
-    def test_router_add_interface_multiple_ipv6_subnet_port(self):
-        """A port with multiple IPv6 subnets can be added to a router
-
-        Create a port with multiple associated IPv6 subnets and attach
-        it to a router. The action should succeed.
-        """
-        with self.network() as n, self.router() as r:
-            with self.subnet(network=n, cidr='fd00::/64',
-                             ip_version=6) as s1, (
-                 self.subnet(network=n, cidr='fd01::/64',
-                             ip_version=6)) as s2:
-                fixed_ips = [{'subnet_id': s1['subnet']['id']},
-                             {'subnet_id': s2['subnet']['id']}]
-                with self.port(subnet=s1, fixed_ips=fixed_ips) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-    def test_router_add_interface_empty_port_and_subnet_ids(self):
-        with self.router() as r:
-            self._router_interface_action('add', r['router']['id'],
-                                          None, None,
-                                          expected_code=exc.
-                                          HTTPBadRequest.code)
-
-    def test_router_add_interface_port_bad_tenant_returns_404(self):
-        tenant_id = _uuid()
-        with self.router(tenant_id=tenant_id, set_context=True) as r:
-            with self.network(tenant_id=tenant_id, set_context=True) as n:
-                with self.subnet(tenant_id=tenant_id, network=n,
-                                 set_context=True) as s:
-                    with self.port(tenant_id=tenant_id, subnet=s,
-                                   set_context=True) as p:
-                        err_code = exc.HTTPNotFound.code
-                        self._router_interface_action('add',
-                                                    r['router']['id'],
-                                                    None,
-                                                    p['port']['id'],
-                                                    expected_code=err_code,
-                                                    tenant_id='bad_tenant')
-                        self._router_interface_action('add',
-                                                    r['router']['id'],
-                                                    None,
-                                                    p['port']['id'],
-                                                    tenant_id=tenant_id)
-
-                        # clean-up should fail as well
-                        self._router_interface_action('remove',
-                                                    r['router']['id'],
-                                                    None,
-                                                    p['port']['id'],
-                                                    expected_code=err_code,
-                                                    tenant_id='bad_tenant')
-
-    def test_router_add_interface_port_without_ips(self):
-        with self.network() as network, self.router() as r:
-            # Create a router port without ips
-            p = self._make_port(self.fmt, network['network']['id'],
-                device_owner=l3_constants.DEVICE_OWNER_ROUTER_INTF)
-            err_code = exc.HTTPBadRequest.code
-            self._router_interface_action('add',
-                                          r['router']['id'],
-                                          None,
-                                          p['port']['id'],
-                                          expected_code=err_code)
-
-    def test_router_add_interface_dup_subnet1_returns_400(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                self._router_interface_action('add',
-                                              r['router']['id'],
-                                              s['subnet']['id'],
-                                              None)
-                self._router_interface_action('add',
-                                              r['router']['id'],
-                                              s['subnet']['id'],
-                                              None,
-                                              expected_code=exc.
-                                              HTTPBadRequest.code)
-
-    def test_router_add_interface_dup_subnet2_returns_400(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                with self.port(subnet=s) as p1:
-                    with self.port(subnet=s) as p2:
-                        self._router_interface_action('add',
-                                                      r['router']['id'],
-                                                      None,
-                                                      p1['port']['id'])
-                        self._router_interface_action('add',
-                                                      r['router']['id'],
-                                                      None,
-                                                      p2['port']['id'],
-                                                      expected_code=exc.
-                                                      HTTPBadRequest.code)
-
-    def test_router_add_interface_overlapped_cidr_returns_400(self):
-        with self.router() as r:
-            with self.subnet(cidr='10.0.1.0/24') as s1:
-                self._router_interface_action('add',
-                                              r['router']['id'],
-                                              s1['subnet']['id'],
-                                              None)
-
-                def try_overlapped_cidr(cidr):
-                    with self.subnet(cidr=cidr) as s2:
-                        self._router_interface_action('add',
-                                                      r['router']['id'],
-                                                      s2['subnet']['id'],
-                                                      None,
-                                                      expected_code=exc.
-                                                      HTTPBadRequest.code)
-                # another subnet with same cidr
-                try_overlapped_cidr('10.0.1.0/24')
-                # another subnet with overlapped cidr including s1
-                try_overlapped_cidr('10.0.0.0/16')
-
-    def test_router_add_interface_no_data_returns_400(self):
-        with self.router() as r:
-            self._router_interface_action('add',
-                                          r['router']['id'],
-                                          None,
-                                          None,
-                                          expected_code=exc.
-                                          HTTPBadRequest.code)
-
-    def test_router_add_interface_with_both_ids_returns_400(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                with self.port(subnet=s) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  s['subnet']['id'],
-                                                  p['port']['id'],
-                                                  expected_code=exc.
-                                                  HTTPBadRequest.code)
-
-    def test_router_add_gateway_dup_subnet1_returns_400(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                self._router_interface_action('add',
-                                              r['router']['id'],
-                                              s['subnet']['id'],
-                                              None)
-                self._set_net_external(s['subnet']['network_id'])
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'],
-                    expected_code=exc.HTTPBadRequest.code)
-
-    def test_router_add_gateway_dup_subnet2_returns_400(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                self._set_net_external(s['subnet']['network_id'])
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'])
-                self._router_interface_action('add',
-                                              r['router']['id'],
-                                              s['subnet']['id'],
-                                              None,
-                                              expected_code=exc.
-                                              HTTPBadRequest.code)
-
-    def test_router_add_gateway_multiple_subnets_ipv6(self):
-        """Ensure external gateway set doesn't add excess IPs on router gw
-
-        Setting the gateway of a router to an external network with more than
-        one IPv4 and one IPv6 subnet should only add an address from the first
-        IPv4 subnet, an address from the first IPv6-stateful subnet, and an
-        address from each IPv6-stateless (SLAAC and DHCPv6-stateless) subnet
-
-        """
-        with self.router() as r, self.network() as n:
-            with self.subnet(
-                    cidr='10.0.0.0/24', network=n) as s1, (
-                 self.subnet(
-                    cidr='10.0.1.0/24', network=n)) as s2, (
-                 self.subnet(
-                    cidr='2001:db8::/64', network=n,
-                    ip_version=6,
-                    ipv6_ra_mode=l3_constants.IPV6_SLAAC,
-                    ipv6_address_mode=l3_constants.IPV6_SLAAC)) as s3, (
-                 self.subnet(
-                    cidr='2001:db8:1::/64', network=n,
-                    ip_version=6,
-                    ipv6_ra_mode=l3_constants.DHCPV6_STATEFUL,
-                    ipv6_address_mode=l3_constants.DHCPV6_STATEFUL)) as s4, (
-                 self.subnet(
-                    cidr='2001:db8:2::/64', network=n,
-                    ip_version=6,
-                    ipv6_ra_mode=l3_constants.DHCPV6_STATELESS,
-                    ipv6_address_mode=l3_constants.DHCPV6_STATELESS)) as s5:
-                self._set_net_external(n['network']['id'])
-                self._add_external_gateway_to_router(
-                        r['router']['id'],
-                        n['network']['id'])
-                res = self._show('routers', r['router']['id'])
-                fips = (res['router']['external_gateway_info']
-                        ['external_fixed_ips'])
-                fip_subnet_ids = {fip['subnet_id'] for fip in fips}
-                # one of s1 or s2 should be in the list.
-                if s1['subnet']['id'] in fip_subnet_ids:
-                    self.assertEqual({s1['subnet']['id'],
-                                      s3['subnet']['id'],
-                                      s4['subnet']['id'],
-                                      s5['subnet']['id']},
-                                     fip_subnet_ids)
-                else:
-                    self.assertEqual({s2['subnet']['id'],
-                                      s3['subnet']['id'],
-                                      s4['subnet']['id'],
-                                      s5['subnet']['id']},
-                                     fip_subnet_ids)
-                self._remove_external_gateway_from_router(
-                    r['router']['id'],
-                    n['network']['id'])
-
-    def test_router_add_and_remove_gateway(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                self._set_net_external(s['subnet']['network_id'])
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'])
-                body = self._show('routers', r['router']['id'])
-                net_id = body['router']['external_gateway_info']['network_id']
-                self.assertEqual(net_id, s['subnet']['network_id'])
-                self._remove_external_gateway_from_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'])
-                body = self._show('routers', r['router']['id'])
-                gw_info = body['router']['external_gateway_info']
-                self.assertIsNone(gw_info)
-
-    def test_router_add_and_remove_gateway_tenant_ctx(self):
-        with self.router(tenant_id='noadmin',
-                         set_context=True) as r:
-            with self.subnet() as s:
-                self._set_net_external(s['subnet']['network_id'])
-                ctx = context.Context('', 'noadmin')
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'],
-                    neutron_context=ctx)
-                body = self._show('routers', r['router']['id'])
-                net_id = body['router']['external_gateway_info']['network_id']
-                self.assertEqual(net_id, s['subnet']['network_id'])
-                self._remove_external_gateway_from_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'])
-                body = self._show('routers', r['router']['id'])
-                gw_info = body['router']['external_gateway_info']
-                self.assertIsNone(gw_info)
-
-    def test_create_router_port_with_device_id_of_other_teants_router(self):
-        with self.router() as admin_router:
-            with self.network(tenant_id='tenant_a',
-                              set_context=True) as n:
-                with self.subnet(network=n):
-                    for device_owner in l3_constants.ROUTER_INTERFACE_OWNERS:
-                        self._create_port(
-                            self.fmt, n['network']['id'],
-                            tenant_id='tenant_a',
-                            device_id=admin_router['router']['id'],
-                            device_owner=device_owner,
-                            set_context=True,
-                            expected_res_status=exc.HTTPConflict.code)
-
-    def test_create_non_router_port_device_id_of_other_teants_router_update(
-        self):
-        # This tests that HTTPConflict is raised if we create a non-router
-        # port that matches the device_id of another tenants router and then
-        # we change the device_owner to be network:router_interface.
-        with self.router() as admin_router:
-            with self.network(tenant_id='tenant_a',
-                              set_context=True) as n:
-                with self.subnet(network=n):
-                    for device_owner in l3_constants.ROUTER_INTERFACE_OWNERS:
-                        port_res = self._create_port(
-                            self.fmt, n['network']['id'],
-                            tenant_id='tenant_a',
-                            device_id=admin_router['router']['id'],
-                            set_context=True)
-                        port = self.deserialize(self.fmt, port_res)
-                        neutron_context = context.Context('', 'tenant_a')
-                        data = {'port': {'device_owner': device_owner}}
-                        self._update('ports', port['port']['id'], data,
-                                     neutron_context=neutron_context,
-                                     expected_code=exc.HTTPConflict.code)
-
-    def test_update_port_device_id_to_different_tenants_router(self):
-        with self.router() as admin_router:
-            with self.router(tenant_id='tenant_a',
-                             set_context=True) as tenant_router:
-                with self.network(tenant_id='tenant_a',
-                                  set_context=True) as n:
-                    with self.subnet(network=n) as s:
-                        port = self._router_interface_action(
-                            'add', tenant_router['router']['id'],
-                            s['subnet']['id'], None, tenant_id='tenant_a')
-                        neutron_context = context.Context('', 'tenant_a')
-                        data = {'port':
-                                {'device_id': admin_router['router']['id']}}
-                        self._update('ports', port['port_id'], data,
-                                     neutron_context=neutron_context,
-                                     expected_code=exc.HTTPConflict.code)
-
-    def test_router_add_gateway_invalid_network_returns_400(self):
-        with self.router() as r:
-            self._add_external_gateway_to_router(
-                r['router']['id'],
-                "foobar", expected_code=exc.HTTPBadRequest.code)
-
-    def test_router_add_gateway_non_existent_network_returns_404(self):
-        with self.router() as r:
-            self._add_external_gateway_to_router(
-                r['router']['id'],
-                _uuid(), expected_code=exc.HTTPNotFound.code)
-
-    def test_router_add_gateway_net_not_external_returns_400(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                # intentionally do not set net as external
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'],
-                    expected_code=exc.HTTPBadRequest.code)
-
-    def test_router_add_gateway_no_subnet(self):
-        with self.router() as r:
-            with self.network() as n:
-                self._set_net_external(n['network']['id'])
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    n['network']['id'])
-                body = self._show('routers', r['router']['id'])
-                net_id = body['router']['external_gateway_info']['network_id']
-                self.assertEqual(net_id, n['network']['id'])
-                self._remove_external_gateway_from_router(
-                    r['router']['id'],
-                    n['network']['id'])
-                body = self._show('routers', r['router']['id'])
-                gw_info = body['router']['external_gateway_info']
-                self.assertIsNone(gw_info)
-
-    def test_router_remove_interface_inuse_returns_409(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                self._router_interface_action('add',
-                                              r['router']['id'],
-                                              s['subnet']['id'],
-                                              None)
-                self._delete('routers', r['router']['id'],
-                             expected_code=exc.HTTPConflict.code)
-
-    def test_router_remove_interface_callback_failure_returns_409(self):
-        with self.router() as r,\
-                self.subnet() as s,\
-                mock.patch.object(registry, 'notify') as notify:
-            errors = [
-                exceptions.NotificationError(
-                    'foo_callback_id', n_exc.InUse()),
-            ]
-            # we fail the first time, but not the second, when
-            # the clean-up takes place
-            notify.side_effect = [
-                exceptions.CallbackFailure(errors=errors), None
-            ]
-            self._router_interface_action('add',
-                                          r['router']['id'],
-                                          s['subnet']['id'],
-                                          None)
-            self._router_interface_action(
-                'remove',
-                r['router']['id'],
-                s['subnet']['id'],
-                None,
-                exc.HTTPConflict.code)
-
-    def test_router_clear_gateway_callback_failure_returns_409(self):
-        with self.router() as r,\
-                self.subnet() as s,\
-                mock.patch.object(registry, 'notify') as notify:
-            errors = [
-                exceptions.NotificationError(
-                    'foo_callback_id', n_exc.InUse()),
-            ]
-            notify.side_effect = exceptions.CallbackFailure(errors=errors)
-            self._set_net_external(s['subnet']['network_id'])
-            self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'])
-            self._remove_external_gateway_from_router(
-                r['router']['id'],
-                s['subnet']['network_id'],
-                external_gw_info={},
-                expected_code=exc.HTTPConflict.code)
-
-    def test_router_remove_interface_wrong_subnet_returns_400(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                with self.port() as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  s['subnet']['id'],
-                                                  p['port']['id'],
-                                                  exc.HTTPBadRequest.code)
-
-    def test_router_remove_interface_nothing_returns_400(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                with self.port(subnet=s) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  None,
-                                                  None,
-                                                  exc.HTTPBadRequest.code)
-                    #remove properly to clean-up
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-
-    def test_router_remove_interface_returns_200(self):
-        with self.router() as r:
-            with self.port() as p:
-                body = self._router_interface_action('add',
-                                                     r['router']['id'],
-                                                     None,
-                                                     p['port']['id'])
-                self._router_interface_action('remove',
-                                              r['router']['id'],
-                                              None,
-                                              p['port']['id'],
-                                              expected_body=body)
-
-    def test_router_remove_interface_with_both_ids_returns_200(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                with self.port(subnet=s) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  s['subnet']['id'],
-                                                  p['port']['id'])
-
-    def test_router_remove_interface_wrong_port_returns_404(self):
-        with self.router() as r:
-            with self.subnet():
-                with self.port() as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-                    # create another port for testing failure case
-                    res = self._create_port(self.fmt, p['port']['network_id'])
-                    p2 = self.deserialize(self.fmt, res)
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p2['port']['id'],
-                                                  exc.HTTPNotFound.code)
-
-    def test_router_remove_ipv6_subnet_from_interface(self):
-        """Delete a subnet from a router interface
-
-        Verify that deleting a subnet with router-interface-delete removes
-        that subnet when there are multiple subnets on the interface and
-        removes the interface when it is the last subnet on the interface.
-        """
-        with self.router() as r, self.network() as n:
-            with (self.subnet(network=n, cidr='fd00::1/64', ip_version=6)
-                  ) as s1, self.subnet(network=n, cidr='fd01::1/64',
-                                       ip_version=6) as s2:
-                body = self._router_interface_action('add', r['router']['id'],
-                                                     s1['subnet']['id'],
-                                                     None)
-                self._router_interface_action('add', r['router']['id'],
-                                              s2['subnet']['id'], None)
-                port = self._show('ports', body['port_id'])
-                self.assertEqual(2, len(port['port']['fixed_ips']))
-                self._router_interface_action('remove', r['router']['id'],
-                                              s1['subnet']['id'], None)
-                port = self._show('ports', body['port_id'])
-                self.assertEqual(1, len(port['port']['fixed_ips']))
-                self._router_interface_action('remove', r['router']['id'],
-                                              s2['subnet']['id'], None)
-                exp_code = exc.HTTPNotFound.code
-                port = self._show('ports', body['port_id'],
-                                  expected_code=exp_code)
-
-    def test_router_delete(self):
-        with self.router() as router:
-            router_id = router['router']['id']
-        req = self.new_show_request('router', router_id)
-        res = req.get_response(self._api_for_resource('router'))
-        self.assertEqual(res.status_int, 404)
-
-    def test_router_delete_with_port_existed_returns_409(self):
-        with self.subnet() as subnet:
-            res = self._create_router(self.fmt, _uuid())
-            router = self.deserialize(self.fmt, res)
-            self._router_interface_action('add',
-                                          router['router']['id'],
-                                          subnet['subnet']['id'],
-                                          None)
-            self._delete('routers', router['router']['id'],
-                         exc.HTTPConflict.code)
-
-    def test_router_delete_with_floatingip_existed_returns_409(self):
-        with self.port() as p:
-            private_sub = {'subnet': {'id':
-                                      p['port']['fixed_ips'][0]['subnet_id']}}
-            with self.subnet(cidr='12.0.0.0/24') as public_sub:
-                self._set_net_external(public_sub['subnet']['network_id'])
-                res = self._create_router(self.fmt, _uuid())
-                r = self.deserialize(self.fmt, res)
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    public_sub['subnet']['network_id'])
-                self._router_interface_action('add', r['router']['id'],
-                                              private_sub['subnet']['id'],
-                                              None)
-                res = self._create_floatingip(
-                    self.fmt, public_sub['subnet']['network_id'],
-                    port_id=p['port']['id'])
-                self.assertEqual(res.status_int, exc.HTTPCreated.code)
-                self._delete('routers', r['router']['id'],
-                             expected_code=exc.HTTPConflict.code)
-
-    def test_router_show(self):
-        name = 'router1'
-        tenant_id = _uuid()
-        expected_value = [('name', name), ('tenant_id', tenant_id),
-                          ('admin_state_up', True), ('status', 'ACTIVE'),
-                          ('external_gateway_info', None)]
-        with self.router(name='router1', admin_state_up=True,
-                         tenant_id=tenant_id) as router:
-            res = self._show('routers', router['router']['id'])
-            for k, v in expected_value:
-                self.assertEqual(res['router'][k], v)
-
-    def test_network_update_external_failure(self):
-        with self.router() as r:
-            with self.subnet() as s1:
-                self._set_net_external(s1['subnet']['network_id'])
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    s1['subnet']['network_id'])
-                self._update('networks', s1['subnet']['network_id'],
-                             {'network': {external_net.EXTERNAL: False}},
-                             expected_code=exc.HTTPConflict.code)
-
-    def test_network_update_external(self):
-        with self.router() as r:
-            with self.network('test_net') as testnet:
-                self._set_net_external(testnet['network']['id'])
-                with self.subnet() as s1:
-                    self._set_net_external(s1['subnet']['network_id'])
-                    self._add_external_gateway_to_router(
-                        r['router']['id'],
-                        s1['subnet']['network_id'])
-                    self._update('networks', testnet['network']['id'],
-                                 {'network': {external_net.EXTERNAL: False}})
-
-    def test_floatingip_crd_ops(self):
-        with self.floatingip_with_assoc() as fip:
-            self._validate_floating_ip(fip)
-
-        # post-delete, check that it is really gone
-        body = self._list('floatingips')
-        self.assertEqual(len(body['floatingips']), 0)
-
-        self._show('floatingips', fip['floatingip']['id'],
-                   expected_code=exc.HTTPNotFound.code)
-
-    def _test_floatingip_with_assoc_fails(self, plugin_method):
-        with self.subnet(cidr='200.0.0.0/24') as public_sub:
-            self._set_net_external(public_sub['subnet']['network_id'])
-            with self.port() as private_port:
-                with self.router() as r:
-                    sid = private_port['port']['fixed_ips'][0]['subnet_id']
-                    private_sub = {'subnet': {'id': sid}}
-                    self._add_external_gateway_to_router(
-                        r['router']['id'],
-                        public_sub['subnet']['network_id'])
-                    self._router_interface_action('add', r['router']['id'],
-                                                  private_sub['subnet']['id'],
-                                                  None)
-                    with mock.patch(plugin_method) as pl:
-                        pl.side_effect = n_exc.BadRequest(
-                            resource='floatingip',
-                            msg='fake_error')
-                        res = self._create_floatingip(
-                            self.fmt,
-                            public_sub['subnet']['network_id'],
-                            port_id=private_port['port']['id'])
-                        self.assertEqual(res.status_int, 400)
-                    for p in self._list('ports')['ports']:
-                        if (p['device_owner'] ==
-                            l3_constants.DEVICE_OWNER_FLOATINGIP):
-                            self.fail('garbage port is not deleted')
-
-    def test_floatingip_with_assoc_fails(self):
-        self._test_floatingip_with_assoc_fails(
-            'neutron.db.l3_db.L3_NAT_db_mixin._check_and_get_fip_assoc')
-
-    def test_create_floatingip_with_assoc(
-        self, expected_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
-        with self.floatingip_with_assoc() as fip:
-            body = self._show('floatingips', fip['floatingip']['id'])
-            self.assertEqual(body['floatingip']['id'],
-                             fip['floatingip']['id'])
-            self.assertEqual(body['floatingip']['port_id'],
-                             fip['floatingip']['port_id'])
-            self.assertEqual(expected_status, body['floatingip']['status'])
-            self.assertIsNotNone(body['floatingip']['fixed_ip_address'])
-            self.assertIsNotNone(body['floatingip']['router_id'])
-
-    def test_create_floatingip_non_admin_context_agent_notification(self):
-        plugin = manager.NeutronManager.get_service_plugins()[
-            service_constants.L3_ROUTER_NAT]
-        if not hasattr(plugin, 'l3_rpc_notifier'):
-            self.skipTest("Plugin does not support l3_rpc_notifier")
-
-        with self.subnet(cidr='11.0.0.0/24') as public_sub,\
-                self.port() as private_port,\
-                self.router() as r:
-            self._set_net_external(public_sub['subnet']['network_id'])
-            subnet_id = private_port['port']['fixed_ips'][0]['subnet_id']
-            private_sub = {'subnet': {'id': subnet_id}}
-
-            self._add_external_gateway_to_router(
-                r['router']['id'],
-                public_sub['subnet']['network_id'])
-            self._router_interface_action(
-                'add', r['router']['id'],
-                private_sub['subnet']['id'], None)
-
-            with mock.patch.object(plugin.l3_rpc_notifier,
-                                   'routers_updated') as agent_notification:
-                self._make_floatingip(
-                    self.fmt,
-                    public_sub['subnet']['network_id'],
-                    port_id=private_port['port']['id'],
-                    set_context=True)
-                self.assertTrue(agent_notification.called)
-
-    def test_floating_port_status_not_applicable(self):
-        with self.floatingip_with_assoc():
-            port_body = self._list('ports',
-               query_params='device_owner=network:floatingip')['ports'][0]
-            self.assertEqual(l3_constants.PORT_STATUS_NOTAPPLICABLE,
-                             port_body['status'])
-
-    def test_floatingip_update(
-        self, expected_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
-        with self.port() as p:
-            private_sub = {'subnet': {'id':
-                                      p['port']['fixed_ips'][0]['subnet_id']}}
-            with self.floatingip_no_assoc(private_sub) as fip:
-                body = self._show('floatingips', fip['floatingip']['id'])
-                self.assertIsNone(body['floatingip']['port_id'])
-                self.assertIsNone(body['floatingip']['fixed_ip_address'])
-                self.assertEqual(body['floatingip']['status'], expected_status)
-
-                port_id = p['port']['id']
-                ip_address = p['port']['fixed_ips'][0]['ip_address']
-                body = self._update('floatingips', fip['floatingip']['id'],
-                                    {'floatingip': {'port_id': port_id}})
-                self.assertEqual(body['floatingip']['port_id'], port_id)
-                self.assertEqual(body['floatingip']['fixed_ip_address'],
-                                 ip_address)
-
-    def test_floatingip_create_different_fixed_ip_same_port(self):
-        '''This tests that it is possible to delete a port that has
-        multiple floating ip addresses associated with it (each floating
-        address associated with a unique fixed address).
-        '''
-
-        with self.router() as r:
-            with self.subnet(cidr='11.0.0.0/24') as public_sub:
-                self._set_net_external(public_sub['subnet']['network_id'])
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    public_sub['subnet']['network_id'])
-
-                with self.subnet() as private_sub:
-                    ip_range = list(netaddr.IPNetwork(
-                        private_sub['subnet']['cidr']))
-                    fixed_ips = [{'ip_address': str(ip_range[-3])},
-                                 {'ip_address': str(ip_range[-2])}]
-
-                    self._router_interface_action(
-                        'add', r['router']['id'],
-                        private_sub['subnet']['id'], None)
-
-                    with self.port(subnet=private_sub,
-                                   fixed_ips=fixed_ips) as p:
-
-                        fip1 = self._make_floatingip(
-                            self.fmt,
-                            public_sub['subnet']['network_id'],
-                            p['port']['id'],
-                            fixed_ip=str(ip_range[-2]))
-                        fip2 = self._make_floatingip(
-                            self.fmt,
-                            public_sub['subnet']['network_id'],
-                            p['port']['id'],
-                            fixed_ip=str(ip_range[-3]))
-
-                        # Test that floating ips are assigned successfully.
-                        body = self._show('floatingips',
-                                          fip1['floatingip']['id'])
-                        self.assertEqual(
-                            body['floatingip']['port_id'],
-                            fip1['floatingip']['port_id'])
-
-                        body = self._show('floatingips',
-                                          fip2['floatingip']['id'])
-                        self.assertEqual(
-                            body['floatingip']['port_id'],
-                            fip2['floatingip']['port_id'])
-                    self._delete('ports', p['port']['id'])
-                    # Test that port has been successfully deleted.
-                    body = self._show('ports', p['port']['id'],
-                                      expected_code=exc.HTTPNotFound.code)
-
-    def test_floatingip_update_different_fixed_ip_same_port(self):
-        with self.subnet() as s:
-            ip_range = list(netaddr.IPNetwork(s['subnet']['cidr']))
-            fixed_ips = [{'ip_address': str(ip_range[-3])},
-                         {'ip_address': str(ip_range[-2])}]
-            with self.port(subnet=s, fixed_ips=fixed_ips) as p:
-                with self.floatingip_with_assoc(
-                    port_id=p['port']['id'],
-                    fixed_ip=str(ip_range[-3])) as fip:
-                    body = self._show('floatingips', fip['floatingip']['id'])
-                    self.assertEqual(fip['floatingip']['id'],
-                                     body['floatingip']['id'])
-                    self.assertEqual(fip['floatingip']['port_id'],
-                                     body['floatingip']['port_id'])
-                    self.assertEqual(str(ip_range[-3]),
-                                     body['floatingip']['fixed_ip_address'])
-                    self.assertIsNotNone(body['floatingip']['router_id'])
-                    body_2 = self._update(
-                        'floatingips', fip['floatingip']['id'],
-                        {'floatingip': {'port_id': p['port']['id'],
-                                        'fixed_ip_address': str(ip_range[-2])}
-                         })
-                    self.assertEqual(fip['floatingip']['port_id'],
-                                     body_2['floatingip']['port_id'])
-                    self.assertEqual(str(ip_range[-2]),
-                                     body_2['floatingip']['fixed_ip_address'])
-
-    def test_floatingip_update_different_router(self):
-        # Create subnet with different CIDRs to account for plugins which
-        # do not support overlapping IPs
-        with self.subnet(cidr='10.0.0.0/24') as s1,\
-                self.subnet(cidr='10.0.1.0/24') as s2:
-            with self.port(subnet=s1) as p1, self.port(subnet=s2) as p2:
-                private_sub1 = {'subnet':
-                                {'id':
-                                 p1['port']['fixed_ips'][0]['subnet_id']}}
-                private_sub2 = {'subnet':
-                                {'id':
-                                 p2['port']['fixed_ips'][0]['subnet_id']}}
-                with self.subnet(cidr='12.0.0.0/24') as public_sub:
-                    with self.floatingip_no_assoc_with_public_sub(
-                        private_sub1,
-                        public_sub=public_sub) as (fip1, r1),\
-                            self.floatingip_no_assoc_with_public_sub(
-                                private_sub2,
-                                public_sub=public_sub) as (fip2, r2):
-
-                        def assert_no_assoc(fip):
-                            body = self._show('floatingips',
-                                              fip['floatingip']['id'])
-                            self.assertIsNone(body['floatingip']['port_id'])
-                            self.assertIsNone(
-                                body['floatingip']['fixed_ip_address'])
-
-                        assert_no_assoc(fip1)
-                        assert_no_assoc(fip2)
-
-                        def associate_and_assert(fip, port):
-                            port_id = port['port']['id']
-                            ip_address = (port['port']['fixed_ips']
-                                          [0]['ip_address'])
-                            body = self._update(
-                                'floatingips', fip['floatingip']['id'],
-                                {'floatingip': {'port_id': port_id}})
-                            self.assertEqual(body['floatingip']['port_id'],
-                                             port_id)
-                            self.assertEqual(
-                                body['floatingip']['fixed_ip_address'],
-                                ip_address)
-                            return body['floatingip']['router_id']
-
-                        fip1_r1_res = associate_and_assert(fip1, p1)
-                        self.assertEqual(fip1_r1_res, r1['router']['id'])
-                        # The following operation will associate the floating
-                        # ip to a different router
-                        fip1_r2_res = associate_and_assert(fip1, p2)
-                        self.assertEqual(fip1_r2_res, r2['router']['id'])
-                        fip2_r1_res = associate_and_assert(fip2, p1)
-                        self.assertEqual(fip2_r1_res, r1['router']['id'])
-                        # disassociate fip1
-                        self._update(
-                            'floatingips', fip1['floatingip']['id'],
-                            {'floatingip': {'port_id': None}})
-                        fip2_r2_res = associate_and_assert(fip2, p2)
-                        self.assertEqual(fip2_r2_res, r2['router']['id'])
-
-    def test_floatingip_port_delete(self):
-        with self.subnet() as private_sub:
-            with self.floatingip_no_assoc(private_sub) as fip:
-                with self.port(subnet=private_sub) as p:
-                    body = self._update('floatingips', fip['floatingip']['id'],
-                                        {'floatingip':
-                                         {'port_id': p['port']['id']}})
-                # note: once this port goes out of scope, the port will be
-                # deleted, which is what we want to test. We want to confirm
-                # that the fields are set back to None
-                self._delete('ports', p['port']['id'])
-                body = self._show('floatingips', fip['floatingip']['id'])
-                self.assertEqual(body['floatingip']['id'],
-                                 fip['floatingip']['id'])
-                self.assertIsNone(body['floatingip']['port_id'])
-                self.assertIsNone(body['floatingip']['fixed_ip_address'])
-                self.assertIsNone(body['floatingip']['router_id'])
-
-    def test_two_fips_one_port_invalid_return_409(self):
-        with self.floatingip_with_assoc() as fip1:
-            res = self._create_floatingip(
-                self.fmt,
-                fip1['floatingip']['floating_network_id'],
-                fip1['floatingip']['port_id'])
-            self.assertEqual(res.status_int, exc.HTTPConflict.code)
-
-    def test_floating_ip_direct_port_delete_returns_409(self):
-        found = False
-        with self.floatingip_with_assoc():
-            for p in self._list('ports')['ports']:
-                if p['device_owner'] == l3_constants.DEVICE_OWNER_FLOATINGIP:
-                    self._delete('ports', p['id'],
-                                 expected_code=exc.HTTPConflict.code)
-                    found = True
-        self.assertTrue(found)
-
-    def _test_floatingip_with_invalid_create_port(self, plugin_class):
-        with self.port() as p:
-            private_sub = {'subnet': {'id':
-                                      p['port']['fixed_ips'][0]['subnet_id']}}
-            with self.subnet(cidr='12.0.0.0/24') as public_sub:
-                self._set_net_external(public_sub['subnet']['network_id'])
-                res = self._create_router(self.fmt, _uuid())
-                r = self.deserialize(self.fmt, res)
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    public_sub['subnet']['network_id'])
-                self._router_interface_action(
-                    'add', r['router']['id'],
-                    private_sub['subnet']['id'],
-                    None)
-
-                with mock.patch(plugin_class + '.create_port') as createport:
-                    createport.return_value = {'fixed_ips': []}
-                    res = self._create_floatingip(
-                        self.fmt, public_sub['subnet']['network_id'],
-                        port_id=p['port']['id'])
-                    self.assertEqual(res.status_int,
-                                     exc.HTTPBadRequest.code)
-
-    def test_floatingip_with_invalid_create_port(self):
-        self._test_floatingip_with_invalid_create_port(
-            'neutron.db.db_base_plugin_v2.NeutronDbPluginV2')
-
-    def test_create_floatingip_with_subnet_id_non_admin(self):
-        with self.subnet() as public_sub:
-            self._set_net_external(public_sub['subnet']['network_id'])
-            with self.router():
-                res = self._create_floatingip(
-                    self.fmt,
-                    public_sub['subnet']['network_id'],
-                    subnet_id=public_sub['subnet']['id'],
-                    set_context=True)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-
-    def test_create_floatingip_with_multisubnet_id(self):
-        with self.network() as network:
-            self._set_net_external(network['network']['id'])
-            with self.subnet(network, cidr='10.0.12.0/24') as subnet1:
-                with self.subnet(network, cidr='10.0.13.0/24') as subnet2:
-                    with self.router():
-                        res = self._create_floatingip(
-                            self.fmt,
-                            subnet1['subnet']['network_id'],
-                            subnet_id=subnet1['subnet']['id'])
-                        fip1 = self.deserialize(self.fmt, res)
-                        res = self._create_floatingip(
-                            self.fmt,
-                            subnet1['subnet']['network_id'],
-                            subnet_id=subnet2['subnet']['id'])
-                        fip2 = self.deserialize(self.fmt, res)
-        self.assertTrue(
-            fip1['floatingip']['floating_ip_address'].startswith('10.0.12'))
-        self.assertTrue(
-            fip2['floatingip']['floating_ip_address'].startswith('10.0.13'))
-
-    def test_create_floatingip_with_wrong_subnet_id(self):
-        with self.network() as network1:
-            self._set_net_external(network1['network']['id'])
-            with self.subnet(network1, cidr='10.0.12.0/24') as subnet1:
-                with self.network() as network2:
-                    self._set_net_external(network2['network']['id'])
-                    with self.subnet(network2, cidr='10.0.13.0/24') as subnet2:
-                        with self.router():
-                            res = self._create_floatingip(
-                                self.fmt,
-                                subnet1['subnet']['network_id'],
-                                subnet_id=subnet2['subnet']['id'])
-        self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_create_floatingip_no_ext_gateway_return_404(self):
-        with self.subnet() as public_sub:
-            self._set_net_external(public_sub['subnet']['network_id'])
-            with self.port() as private_port:
-                with self.router():
-                    res = self._create_floatingip(
-                        self.fmt,
-                        public_sub['subnet']['network_id'],
-                        port_id=private_port['port']['id'])
-                    # this should be some kind of error
-                    self.assertEqual(res.status_int, exc.HTTPNotFound.code)
-
-    def test_create_floating_non_ext_network_returns_400(self):
-        with self.subnet() as public_sub:
-            # normally we would set the network of public_sub to be
-            # external, but the point of this test is to handle when
-            # that is not the case
-            with self.router():
-                res = self._create_floatingip(
-                    self.fmt,
-                    public_sub['subnet']['network_id'])
-                self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_create_floatingip_no_public_subnet_returns_400(self):
-        with self.network() as public_network:
-            with self.port() as private_port:
-                with self.router() as r:
-                    sid = private_port['port']['fixed_ips'][0]['subnet_id']
-                    private_sub = {'subnet': {'id': sid}}
-                    self._router_interface_action('add', r['router']['id'],
-                                                  private_sub['subnet']['id'],
-                                                  None)
-
-                    res = self._create_floatingip(
-                        self.fmt,
-                        public_network['network']['id'],
-                        port_id=private_port['port']['id'])
-                    self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_create_floatingip_invalid_floating_network_id_returns_400(self):
-        # API-level test - no need to create all objects for l3 plugin
-        res = self._create_floatingip(self.fmt, 'iamnotanuuid',
-                                      uuidutils.generate_uuid(), '192.168.0.1')
-        self.assertEqual(res.status_int, 400)
-
-    def test_create_floatingip_invalid_floating_port_id_returns_400(self):
-        # API-level test - no need to create all objects for l3 plugin
-        res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(),
-                                      'iamnotanuuid', '192.168.0.1')
-        self.assertEqual(res.status_int, 400)
-
-    def test_create_floatingip_invalid_fixed_ip_address_returns_400(self):
-        # API-level test - no need to create all objects for l3 plugin
-        res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(),
-                                      uuidutils.generate_uuid(), 'iamnotnanip')
-        self.assertEqual(res.status_int, 400)
-
-    def test_floatingip_list_with_sort(self):
-        with self.subnet(cidr="10.0.0.0/24") as s1,\
-                self.subnet(cidr="11.0.0.0/24") as s2,\
-                self.subnet(cidr="12.0.0.0/24") as s3:
-            network_id1 = s1['subnet']['network_id']
-            network_id2 = s2['subnet']['network_id']
-            network_id3 = s3['subnet']['network_id']
-            self._set_net_external(network_id1)
-            self._set_net_external(network_id2)
-            self._set_net_external(network_id3)
-            fp1 = self._make_floatingip(self.fmt, network_id1)
-            fp2 = self._make_floatingip(self.fmt, network_id2)
-            fp3 = self._make_floatingip(self.fmt, network_id3)
-            self._test_list_with_sort('floatingip', (fp3, fp2, fp1),
-                                      [('floating_ip_address', 'desc')])
-
-    def test_floatingip_list_with_port_id(self):
-        with self.floatingip_with_assoc() as fip:
-            port_id = fip['floatingip']['port_id']
-            res = self._list('floatingips',
-                             query_params="port_id=%s" % port_id)
-            self.assertEqual(len(res['floatingips']), 1)
-            res = self._list('floatingips', query_params="port_id=aaa")
-            self.assertEqual(len(res['floatingips']), 0)
-
-    def test_floatingip_list_with_pagination(self):
-        with self.subnet(cidr="10.0.0.0/24") as s1,\
-                self.subnet(cidr="11.0.0.0/24") as s2,\
-                self.subnet(cidr="12.0.0.0/24") as s3:
-            network_id1 = s1['subnet']['network_id']
-            network_id2 = s2['subnet']['network_id']
-            network_id3 = s3['subnet']['network_id']
-            self._set_net_external(network_id1)
-            self._set_net_external(network_id2)
-            self._set_net_external(network_id3)
-            fp1 = self._make_floatingip(self.fmt, network_id1)
-            fp2 = self._make_floatingip(self.fmt, network_id2)
-            fp3 = self._make_floatingip(self.fmt, network_id3)
-            self._test_list_with_pagination(
-                'floatingip', (fp1, fp2, fp3),
-                ('floating_ip_address', 'asc'), 2, 2)
-
-    def test_floatingip_list_with_pagination_reverse(self):
-        with self.subnet(cidr="10.0.0.0/24") as s1,\
-                self.subnet(cidr="11.0.0.0/24") as s2,\
-                self.subnet(cidr="12.0.0.0/24") as s3:
-            network_id1 = s1['subnet']['network_id']
-            network_id2 = s2['subnet']['network_id']
-            network_id3 = s3['subnet']['network_id']
-            self._set_net_external(network_id1)
-            self._set_net_external(network_id2)
-            self._set_net_external(network_id3)
-            fp1 = self._make_floatingip(self.fmt, network_id1)
-            fp2 = self._make_floatingip(self.fmt, network_id2)
-            fp3 = self._make_floatingip(self.fmt, network_id3)
-            self._test_list_with_pagination_reverse(
-                'floatingip', (fp1, fp2, fp3),
-                ('floating_ip_address', 'asc'), 2, 2)
-
-    def test_floatingip_multi_external_one_internal(self):
-        with self.subnet(cidr="10.0.0.0/24") as exs1,\
-                self.subnet(cidr="11.0.0.0/24") as exs2,\
-                self.subnet(cidr="12.0.0.0/24") as ins1:
-            network_ex_id1 = exs1['subnet']['network_id']
-            network_ex_id2 = exs2['subnet']['network_id']
-            self._set_net_external(network_ex_id1)
-            self._set_net_external(network_ex_id2)
-
-            r2i_fixed_ips = [{'ip_address': '12.0.0.2'}]
-            with self.router() as r1,\
-                    self.router() as r2,\
-                    self.port(subnet=ins1,
-                              fixed_ips=r2i_fixed_ips) as r2i_port:
-                self._add_external_gateway_to_router(
-                    r1['router']['id'],
-                    network_ex_id1)
-                self._router_interface_action('add', r1['router']['id'],
-                                              ins1['subnet']['id'],
-                                              None)
-                self._add_external_gateway_to_router(
-                    r2['router']['id'],
-                    network_ex_id2)
-                self._router_interface_action('add', r2['router']['id'],
-                                              None,
-                                              r2i_port['port']['id'])
-
-                with self.port(subnet=ins1,
-                               fixed_ips=[{'ip_address': '12.0.0.3'}]
-                               ) as private_port:
-
-                    fp1 = self._make_floatingip(self.fmt, network_ex_id1,
-                                            private_port['port']['id'],
-                                            floating_ip='10.0.0.3')
-                    fp2 = self._make_floatingip(self.fmt, network_ex_id2,
-                                            private_port['port']['id'],
-                                            floating_ip='11.0.0.3')
-                    self.assertEqual(fp1['floatingip']['router_id'],
-                                     r1['router']['id'])
-                    self.assertEqual(fp2['floatingip']['router_id'],
-                                     r2['router']['id'])
-
-    def test_floatingip_same_external_and_internal(self):
-        # Select router with subnet's gateway_ip for floatingip when
-        # routers connected to same subnet and external network.
-        with self.subnet(cidr="10.0.0.0/24") as exs,\
-                self.subnet(cidr="12.0.0.0/24", gateway_ip="12.0.0.50") as ins:
-            network_ex_id = exs['subnet']['network_id']
-            self._set_net_external(network_ex_id)
-
-            r2i_fixed_ips = [{'ip_address': '12.0.0.2'}]
-            with self.router() as r1,\
-                    self.router() as r2,\
-                    self.port(subnet=ins,
-                              fixed_ips=r2i_fixed_ips) as r2i_port:
-                self._add_external_gateway_to_router(
-                    r1['router']['id'],
-                    network_ex_id)
-                self._router_interface_action('add', r2['router']['id'],
-                                              None,
-                                              r2i_port['port']['id'])
-                self._router_interface_action('add', r1['router']['id'],
-                                              ins['subnet']['id'],
-                                              None)
-                self._add_external_gateway_to_router(
-                    r2['router']['id'],
-                    network_ex_id)
-
-                with self.port(subnet=ins,
-                               fixed_ips=[{'ip_address': '12.0.0.8'}]
-                               ) as private_port:
-
-                    fp = self._make_floatingip(self.fmt, network_ex_id,
-                                            private_port['port']['id'],
-                                            floating_ip='10.0.0.8')
-                    self.assertEqual(r1['router']['id'],
-                                     fp['floatingip']['router_id'])
-
-    def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self):
-        found = False
-        with self.floatingip_with_assoc():
-            for p in self._list('ports')['ports']:
-                if p['device_owner'] == l3_constants.DEVICE_OWNER_ROUTER_INTF:
-                    subnet_id = p['fixed_ips'][0]['subnet_id']
-                    router_id = p['device_id']
-                    self._router_interface_action(
-                        'remove', router_id, subnet_id, None,
-                        expected_code=exc.HTTPConflict.code)
-                    found = True
-                    break
-        self.assertTrue(found)
-
-    def test_floatingip_delete_router_intf_with_port_id_returns_409(self):
-        found = False
-        with self.floatingip_with_assoc():
-            for p in self._list('ports')['ports']:
-                if p['device_owner'] == l3_constants.DEVICE_OWNER_ROUTER_INTF:
-                    router_id = p['device_id']
-                    self._router_interface_action(
-                        'remove', router_id, None, p['id'],
-                        expected_code=exc.HTTPConflict.code)
-                    found = True
-                    break
-        self.assertTrue(found)
-
-    def _test_router_delete_subnet_inuse_returns_409(self, router, subnet):
-        r, s = router, subnet
-        self._router_interface_action('add',
-                                      r['router']['id'],
-                                      s['subnet']['id'],
-                                      None)
-        # subnet cannot be deleted as it's attached to a router
-        self._delete('subnets', s['subnet']['id'],
-                     expected_code=exc.HTTPConflict.code)
-
-    def _ipv6_subnet(self, mode):
-        return self.subnet(cidr='fd00::1/64', gateway_ip='fd00::1',
-                           ip_version=6,
-                           ipv6_ra_mode=mode,
-                           ipv6_address_mode=mode)
-
-    def test_router_delete_subnet_inuse_returns_409(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                self._test_router_delete_subnet_inuse_returns_409(r, s)
-
-    def test_router_delete_ipv6_slaac_subnet_inuse_returns_409(self):
-        with self.router() as r:
-            with self._ipv6_subnet(l3_constants.IPV6_SLAAC) as s:
-                self._test_router_delete_subnet_inuse_returns_409(r, s)
-
-    def test_router_delete_dhcpv6_stateless_subnet_inuse_returns_409(self):
-        with self.router() as r:
-            with self._ipv6_subnet(l3_constants.DHCPV6_STATELESS) as s:
-                self._test_router_delete_subnet_inuse_returns_409(r, s)
-
-    def test_delete_ext_net_with_disassociated_floating_ips(self):
-        with self.network() as net:
-            net_id = net['network']['id']
-            self._set_net_external(net_id)
-            with self.subnet(network=net):
-                self._make_floatingip(self.fmt, net_id)
-
-    def test_create_floatingip_with_specific_ip(self):
-        with self.subnet(cidr='10.0.0.0/24') as s:
-            network_id = s['subnet']['network_id']
-            self._set_net_external(network_id)
-            fp = self._make_floatingip(self.fmt, network_id,
-                                       floating_ip='10.0.0.10')
-            self.assertEqual(fp['floatingip']['floating_ip_address'],
-                             '10.0.0.10')
-
-    def test_create_floatingip_with_specific_ip_out_of_allocation(self):
-        with self.subnet(cidr='10.0.0.0/24',
-                         allocation_pools=[
-                             {'start': '10.0.0.10', 'end': '10.0.0.20'}]
-                         ) as s:
-            network_id = s['subnet']['network_id']
-            self._set_net_external(network_id)
-            fp = self._make_floatingip(self.fmt, network_id,
-                                       floating_ip='10.0.0.30')
-            self.assertEqual(fp['floatingip']['floating_ip_address'],
-                             '10.0.0.30')
-
-    def test_create_floatingip_with_specific_ip_non_admin(self):
-        ctx = context.Context('user_id', 'tenant_id')
-
-        with self.subnet(cidr='10.0.0.0/24') as s:
-            network_id = s['subnet']['network_id']
-            self._set_net_external(network_id)
-            self._make_floatingip(self.fmt, network_id,
-                                  set_context=ctx,
-                                  floating_ip='10.0.0.10',
-                                  http_status=exc.HTTPForbidden.code)
-
-    def test_create_floatingip_with_specific_ip_out_of_subnet(self):
-
-        with self.subnet(cidr='10.0.0.0/24') as s:
-            network_id = s['subnet']['network_id']
-            self._set_net_external(network_id)
-            self._make_floatingip(self.fmt, network_id,
-                                  floating_ip='10.0.1.10',
-                                  http_status=exc.HTTPBadRequest.code)
-
-    def test_create_floatingip_with_duplicated_specific_ip(self):
-
-        with self.subnet(cidr='10.0.0.0/24') as s:
-            network_id = s['subnet']['network_id']
-            self._set_net_external(network_id)
-            self._make_floatingip(self.fmt, network_id,
-                                  floating_ip='10.0.0.10')
-
-            self._make_floatingip(self.fmt, network_id,
-                                  floating_ip='10.0.0.10',
-                                  http_status=exc.HTTPConflict.code)
-
-    def test_router_specify_id_backend(self):
-        plugin = manager.NeutronManager.get_service_plugins()[
-                    service_constants.L3_ROUTER_NAT]
-        router_req = {'router': {'id': _uuid(), 'name': 'router',
-                                 'tenant_id': 'foo',
-                                 'admin_state_up': True}}
-        result = plugin.create_router(context.Context('', 'foo'), router_req)
-        self.assertEqual(result['id'], router_req['router']['id'])
-
-    def test_create_floatingip_ipv6_only_network_returns_400(self):
-        with self.subnet(cidr="2001:db8::/48", ip_version=6) as public_sub:
-            self._set_net_external(public_sub['subnet']['network_id'])
-            res = self._create_floatingip(
-                self.fmt,
-                public_sub['subnet']['network_id'])
-            self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_create_floatingip_ipv6_and_ipv4_network_creates_ipv4(self):
-        with self.network() as n,\
-                self.subnet(cidr="2001:db8::/48", ip_version=6, network=n),\
-                self.subnet(cidr="192.168.1.0/24", ip_version=4, network=n):
-            self._set_net_external(n['network']['id'])
-            fip = self._make_floatingip(self.fmt, n['network']['id'])
-            self.assertEqual(fip['floatingip']['floating_ip_address'],
-                             '192.168.1.2')
-
-    def test_create_floatingip_with_assoc_to_ipv6_subnet(self):
-        with self.subnet() as public_sub:
-            self._set_net_external(public_sub['subnet']['network_id'])
-            with self.subnet(cidr="2001:db8::/48",
-                             ip_version=6) as private_sub:
-                with self.port(subnet=private_sub) as private_port:
-                    res = self._create_floatingip(
-                        self.fmt,
-                        public_sub['subnet']['network_id'],
-                        port_id=private_port['port']['id'])
-                    self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
-
-    def test_create_floatingip_with_assoc_to_ipv4_and_ipv6_port(self):
-        with self.network() as n,\
-                self.subnet(cidr='10.0.0.0/24', network=n) as s4,\
-                self.subnet(cidr='2001:db8::/64', ip_version=6, network=n),\
-                self.port(subnet=s4) as p:
-            self.assertEqual(len(p['port']['fixed_ips']), 2)
-            ipv4_address = next(i['ip_address'] for i in
-                    p['port']['fixed_ips'] if
-                    netaddr.IPAddress(i['ip_address']).version == 4)
-            with self.floatingip_with_assoc(port_id=p['port']['id']) as fip:
-                self.assertEqual(fip['floatingip']['fixed_ip_address'],
-                                 ipv4_address)
-                floating_ip = netaddr.IPAddress(
-                        fip['floatingip']['floating_ip_address'])
-                self.assertEqual(floating_ip.version, 4)
-
-    def test_update_subnet_gateway_for_external_net(self):
-        """Test to make sure notification to routers occurs when the gateway
-            ip address of a subnet of the external network is changed.
-        """
-        plugin = manager.NeutronManager.get_service_plugins()[
-            service_constants.L3_ROUTER_NAT]
-        if not hasattr(plugin, 'l3_rpc_notifier'):
-            self.skipTest("Plugin does not support l3_rpc_notifier")
-        # make sure the callback is registered.
-        registry.subscribe(
-            l3_db._notify_subnet_gateway_ip_update, resources.SUBNET_GATEWAY,
-            events.AFTER_UPDATE)
-        with mock.patch.object(plugin.l3_rpc_notifier,
-                               'routers_updated') as chk_method:
-            with self.network() as network:
-                allocation_pools = [{'start': '120.0.0.3',
-                                     'end': '120.0.0.254'}]
-                with self.subnet(network=network,
-                                 gateway_ip='120.0.0.1',
-                                 allocation_pools=allocation_pools,
-                                 cidr='120.0.0.0/24') as subnet:
-                    kwargs = {
-                        'device_owner': l3_constants.DEVICE_OWNER_ROUTER_GW,
-                        'device_id': 'fake_device'}
-                    with self.port(subnet=subnet, **kwargs):
-                        data = {'subnet': {'gateway_ip': '120.0.0.2'}}
-                        req = self.new_update_request('subnets', data,
-                                                      subnet['subnet']['id'])
-                        res = self.deserialize(self.fmt,
-                                               req.get_response(self.api))
-                        self.assertEqual(res['subnet']['gateway_ip'],
-                                         data['subnet']['gateway_ip'])
-                        chk_method.assert_called_with(mock.ANY,
-                                                      ['fake_device'], None)
-
-
-class L3AgentDbTestCaseBase(L3NatTestCaseMixin):
-
-    """Unit tests for methods called by the L3 agent."""
-
-    def test_l3_agent_routers_query_interfaces(self):
-        with self.router() as r:
-            with self.port() as p:
-                self._router_interface_action('add',
-                                              r['router']['id'],
-                                              None,
-                                              p['port']['id'])
-
-                routers = self.plugin.get_sync_data(
-                    context.get_admin_context(), None)
-                self.assertEqual(1, len(routers))
-                interfaces = routers[0][l3_constants.INTERFACE_KEY]
-                self.assertEqual(1, len(interfaces))
-                subnets = interfaces[0]['subnets']
-                self.assertEqual(1, len(subnets))
-                subnet_id = subnets[0]['id']
-                wanted_subnetid = p['port']['fixed_ips'][0]['subnet_id']
-                self.assertEqual(wanted_subnetid, subnet_id)
-
-    def test_l3_agent_sync_interfaces(self):
-        """Test L3 interfaces query return valid result"""
-        with self.router() as router1, self.router() as router2:
-            with self.port() as port1, self.port() as port2:
-                self._router_interface_action('add',
-                                              router1['router']['id'],
-                                              None,
-                                              port1['port']['id'])
-                self._router_interface_action('add',
-                                              router2['router']['id'],
-                                              None,
-                                              port2['port']['id'])
-                admin_ctx = context.get_admin_context()
-                router1_id = router1['router']['id']
-                router2_id = router2['router']['id']
-
-                # Verify if router1 pass in, return only interface from router1
-                ifaces = self.plugin._get_sync_interfaces(admin_ctx,
-                                                          [router1_id])
-                self.assertEqual(1, len(ifaces))
-                self.assertEqual(router1_id,
-                                 ifaces[0]['device_id'])
-
-                # Verify if router1 and router2 pass in, return both interfaces
-                ifaces = self.plugin._get_sync_interfaces(admin_ctx,
-                                                          [router1_id,
-                                                           router2_id])
-                self.assertEqual(2, len(ifaces))
-                device_list = [i['device_id'] for i in ifaces]
-                self.assertIn(router1_id, device_list)
-                self.assertIn(router2_id, device_list)
-
-                #Verify if no router pass in, return empty list
-                ifaces = self.plugin._get_sync_interfaces(admin_ctx, None)
-                self.assertEqual(0, len(ifaces))
-
-    def test_l3_agent_routers_query_ignore_interfaces_with_moreThanOneIp(self):
-        with self.router() as r:
-            with self.subnet(cidr='9.0.1.0/24') as subnet:
-                with self.port(subnet=subnet,
-                               fixed_ips=[{'ip_address': '9.0.1.3'}]) as p:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  None,
-                                                  p['port']['id'])
-                    port = {'port': {'fixed_ips':
-                                     [{'ip_address': '9.0.1.4',
-                                       'subnet_id': subnet['subnet']['id']},
-                                      {'ip_address': '9.0.1.5',
-                                       'subnet_id': subnet['subnet']['id']}]}}
-                    ctx = context.get_admin_context()
-                    self.core_plugin.update_port(ctx, p['port']['id'], port)
-                    routers = self.plugin.get_sync_data(ctx, None)
-                    self.assertEqual(1, len(routers))
-                    interfaces = routers[0].get(l3_constants.INTERFACE_KEY, [])
-                    self.assertEqual(1, len(interfaces))
-
-    def test_l3_agent_routers_query_gateway(self):
-        with self.router() as r:
-            with self.subnet() as s:
-                self._set_net_external(s['subnet']['network_id'])
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'])
-                routers = self.plugin.get_sync_data(
-                    context.get_admin_context(), [r['router']['id']])
-                self.assertEqual(1, len(routers))
-                gw_port = routers[0]['gw_port']
-                subnets = gw_port.get('subnets')
-                self.assertEqual(1, len(subnets))
-                self.assertEqual(s['subnet']['id'], subnets[0]['id'])
-                self._remove_external_gateway_from_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'])
-
-    def test_l3_agent_routers_query_floatingips(self):
-        with self.floatingip_with_assoc() as fip:
-            routers = self.plugin.get_sync_data(
-                context.get_admin_context(), [fip['floatingip']['router_id']])
-            self.assertEqual(1, len(routers))
-            floatingips = routers[0][l3_constants.FLOATINGIP_KEY]
-            self.assertEqual(1, len(floatingips))
-            self.assertEqual(floatingips[0]['id'],
-                             fip['floatingip']['id'])
-            self.assertEqual(floatingips[0]['port_id'],
-                             fip['floatingip']['port_id'])
-            self.assertIsNotNone(floatingips[0]['fixed_ip_address'])
-            self.assertIsNotNone(floatingips[0]['router_id'])
-
-    def _test_notify_op_agent(self, target_func, *args):
-        l3_rpc_agent_api_str = (
-            'neutron.api.rpc.agentnotifiers.l3_rpc_agent_api.L3AgentNotifyAPI')
-        with mock.patch(l3_rpc_agent_api_str):
-            plugin = manager.NeutronManager.get_service_plugins()[
-                service_constants.L3_ROUTER_NAT]
-            notifyApi = plugin.l3_rpc_notifier
-            kargs = [item for item in args]
-            kargs.append(notifyApi)
-            target_func(*kargs)
-
-    def _test_router_gateway_op_agent(self, notifyApi):
-        with self.router() as r:
-            with self.subnet() as s:
-                self._set_net_external(s['subnet']['network_id'])
-                self._add_external_gateway_to_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'])
-                self._remove_external_gateway_from_router(
-                    r['router']['id'],
-                    s['subnet']['network_id'])
-                self.assertEqual(
-                    2, notifyApi.routers_updated.call_count)
-
-    def test_router_gateway_op_agent(self):
-        self._test_notify_op_agent(self._test_router_gateway_op_agent)
-
-    def _test_interfaces_op_agent(self, r, notifyApi):
-        with self.port() as p:
-            self._router_interface_action('add',
-                                          r['router']['id'],
-                                          None,
-                                          p['port']['id'])
-            # clean-up
-            self._router_interface_action('remove',
-                                          r['router']['id'],
-                                          None,
-                                          p['port']['id'])
-        self.assertEqual(2, notifyApi.routers_updated.call_count)
-
-    def test_interfaces_op_agent(self):
-        with self.router() as r:
-            self._test_notify_op_agent(
-                self._test_interfaces_op_agent, r)
-
-    def _test_floatingips_op_agent(self, notifyApi):
-        with self.floatingip_with_assoc():
-            pass
-        # add gateway, add interface, associate, deletion of floatingip
-        self.assertEqual(4, notifyApi.routers_updated.call_count)
-
-    def test_floatingips_op_agent(self):
-        self._test_notify_op_agent(self._test_floatingips_op_agent)
-
-
-class L3BaseForIntTests(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-
-    mock_rescheduling = True
-
-    def setUp(self, plugin=None, ext_mgr=None, service_plugins=None):
-        if not plugin:
-            plugin = 'neutron.tests.unit.extensions.test_l3.TestL3NatIntPlugin'
-        # for these tests we need to enable overlapping ips
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        ext_mgr = ext_mgr or L3TestExtensionManager()
-
-        if self.mock_rescheduling:
-            mock.patch('%s._check_router_needs_rescheduling' % plugin,
-                       new=lambda *a: False).start()
-
-        super(L3BaseForIntTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
-                                             service_plugins=service_plugins)
-
-        self.setup_notification_driver()
-
-
-class L3BaseForSepTests(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-
-    def setUp(self, plugin=None, ext_mgr=None):
-        # the plugin without L3 support
-        if not plugin:
-            plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin'
-        # the L3 service plugin
-        l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
-                     'TestL3NatServicePlugin')
-        service_plugins = {'l3_plugin_name': l3_plugin}
-
-        # for these tests we need to enable overlapping ips
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        if not ext_mgr:
-            ext_mgr = L3TestExtensionManager()
-        super(L3BaseForSepTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
-                                             service_plugins=service_plugins)
-
-        self.setup_notification_driver()
-
-
-class L3NatDBIntAgentSchedulingTestCase(L3BaseForIntTests,
-                                        L3NatTestCaseMixin,
-                                        test_agent.
-                                        AgentDBTestMixIn):
-
-    """Unit tests for core plugin with L3 routing and scheduling integrated."""
-
-    def setUp(self, plugin='neutron.tests.unit.extensions.test_l3.'
-                           'TestL3NatIntAgentSchedulingPlugin',
-              ext_mgr=None, service_plugins=None):
-        self.mock_rescheduling = False
-        super(L3NatDBIntAgentSchedulingTestCase, self).setUp(
-            plugin, ext_mgr, service_plugins)
-        self.adminContext = context.get_admin_context()
-
-    def _assert_router_on_agent(self, router_id, agent_host):
-        plugin = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        agents = plugin.list_l3_agents_hosting_router(
-            self.adminContext, router_id)['agents']
-        self.assertEqual(len(agents), 1)
-        self.assertEqual(agents[0]['host'], agent_host)
-
-    def test_update_gateway_agent_exists_supporting_network(self):
-        with self.router() as r, self.subnet() as s1, self.subnet() as s2:
-            self._set_net_external(s1['subnet']['network_id'])
-            l3_rpc_cb = l3_rpc.L3RpcCallback()
-            helpers.register_l3_agent(
-                host='host1',
-                ext_net_id=s1['subnet']['network_id'])
-            helpers.register_l3_agent(
-                host='host2', internal_only=False,
-                ext_net_id=s2['subnet']['network_id'])
-            l3_rpc_cb.sync_routers(self.adminContext,
-                                   host='host1')
-            self._assert_router_on_agent(r['router']['id'], 'host1')
-
-            self._add_external_gateway_to_router(
-                r['router']['id'],
-                s1['subnet']['network_id'])
-            self._assert_router_on_agent(r['router']['id'], 'host1')
-
-            self._set_net_external(s2['subnet']['network_id'])
-            self._add_external_gateway_to_router(
-                r['router']['id'],
-                s2['subnet']['network_id'])
-            self._assert_router_on_agent(r['router']['id'], 'host2')
-
-    def test_update_gateway_agent_exists_supporting_multiple_network(self):
-        with self.router() as r, self.subnet() as s1, self.subnet() as s2:
-            self._set_net_external(s1['subnet']['network_id'])
-            l3_rpc_cb = l3_rpc.L3RpcCallback()
-            helpers.register_l3_agent(
-                host='host1',
-                ext_net_id=s1['subnet']['network_id'])
-            helpers.register_l3_agent(
-                host='host2', internal_only=False,
-                ext_net_id='', ext_bridge='')
-            l3_rpc_cb.sync_routers(self.adminContext,
-                                   host='host1')
-            self._assert_router_on_agent(r['router']['id'], 'host1')
-
-            self._add_external_gateway_to_router(
-                r['router']['id'],
-                s1['subnet']['network_id'])
-            self._assert_router_on_agent(r['router']['id'], 'host1')
-
-            self._set_net_external(s2['subnet']['network_id'])
-            self._add_external_gateway_to_router(
-                r['router']['id'],
-                s2['subnet']['network_id'])
-            self._assert_router_on_agent(r['router']['id'], 'host2')
-
-    def test_router_update_gateway_no_eligible_l3_agent(self):
-        with self.router() as r:
-            with self.subnet() as s1:
-                with self.subnet() as s2:
-                    self._set_net_external(s1['subnet']['network_id'])
-                    self._set_net_external(s2['subnet']['network_id'])
-                    self._add_external_gateway_to_router(
-                        r['router']['id'],
-                        s1['subnet']['network_id'],
-                        expected_code=exc.HTTPBadRequest.code)
-
-
-class L3RpcCallbackTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(L3RpcCallbackTestCase, self).setUp()
-        self.mock_plugin = mock.patch.object(
-            l3_rpc.L3RpcCallback,
-            'plugin', new_callable=mock.PropertyMock).start()
-        self.mock_l3plugin = mock.patch.object(
-            l3_rpc.L3RpcCallback,
-            'l3plugin', new_callable=mock.PropertyMock).start()
-        self.l3_rpc_cb = l3_rpc.L3RpcCallback()
-
-    def test__ensure_host_set_on_port_host_id_none(self):
-        port = {'id': 'id', portbindings.HOST_ID: 'somehost'}
-        self.l3_rpc_cb._ensure_host_set_on_port(None, None, port)
-        self.assertFalse(self.l3_rpc_cb.plugin.update_port.called)
-
-    def test__ensure_host_set_on_port_update_on_concurrent_delete(self):
-        port_id = 'foo_port_id'
-        port = {
-            'id': port_id,
-            'device_owner': DEVICE_OWNER_COMPUTE,
-            portbindings.HOST_ID: '',
-            portbindings.VIF_TYPE: portbindings.VIF_TYPE_BINDING_FAILED
-        }
-        router_id = 'foo_router_id'
-        self.l3_rpc_cb.plugin.update_port.side_effect = n_exc.PortNotFound(
-            port_id=port_id)
-        with mock.patch.object(l3_rpc.LOG, 'debug') as mock_log:
-            self.l3_rpc_cb._ensure_host_set_on_port(
-                mock.ANY, mock.ANY, port, router_id)
-        self.l3_rpc_cb.plugin.update_port.assert_called_once_with(
-            mock.ANY, port_id, {'port': {portbindings.HOST_ID: mock.ANY}})
-        self.assertTrue(mock_log.call_count)
-        expected_message = ('Port foo_port_id not found while updating '
-                            'agent binding for router foo_router_id.')
-        actual_message = mock_log.call_args[0][0] % mock_log.call_args[0][1]
-        self.assertEqual(expected_message, actual_message)
-
-
-class L3AgentDbIntTestCase(L3BaseForIntTests, L3AgentDbTestCaseBase):
-
-    """Unit tests for methods called by the L3 agent for
-    the case where core plugin implements L3 routing.
-    """
-
-    def setUp(self):
-        super(L3AgentDbIntTestCase, self).setUp()
-        self.core_plugin = TestL3NatIntPlugin()
-        self.plugin = self.core_plugin
-
-
-class L3AgentDbSepTestCase(L3BaseForSepTests, L3AgentDbTestCaseBase):
-
-    """Unit tests for methods called by the L3 agent for the
-    case where separate service plugin implements L3 routing.
-    """
-
-    def setUp(self):
-        super(L3AgentDbSepTestCase, self).setUp()
-        self.core_plugin = TestNoL3NatPlugin()
-        self.plugin = TestL3NatServicePlugin()
-
-
-class TestL3DbOperationBounds(test_db_base_plugin_v2.DbOperationBoundMixin,
-                              L3NatTestCaseMixin,
-                              ml2_base.ML2TestFramework):
-    def setUp(self):
-        super(TestL3DbOperationBounds, self).setUp()
-        ext_mgr = L3TestExtensionManager()
-        self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
-
-    def test_router_list_queries_constant(self):
-        with self.subnet() as s:
-            self._set_net_external(s['subnet']['network_id'])
-
-            def router_maker():
-                ext_info = {'network_id': s['subnet']['network_id']}
-                self._create_router(self.fmt, _uuid(),
-                                    arg_list=('external_gateway_info',),
-                                    external_gateway_info=ext_info)
-
-            self._assert_object_list_queries_constant(router_maker, 'routers')
-
-    def test_floatingip_list_queries_constant(self):
-        with self.floatingip_with_assoc() as flip:
-            internal_port = self._show('ports', flip['floatingip']['port_id'])
-            internal_net_id = internal_port['port']['network_id']
-
-            def float_maker():
-                port = self._make_port(self.fmt, internal_net_id)
-                self._make_floatingip(
-                    self.fmt, flip['floatingip']['floating_network_id'],
-                    port_id=port['port']['id'])
-
-            self._assert_object_list_queries_constant(float_maker,
-                                                      'floatingips')
-
-
-class L3NatDBTestCaseMixin(object):
-    """L3_NAT_dbonly_mixin specific test cases."""
-
-    def setUp(self):
-        super(L3NatDBTestCaseMixin, self).setUp()
-        plugin = manager.NeutronManager.get_service_plugins()[
-            service_constants.L3_ROUTER_NAT]
-        if not isinstance(plugin, l3_db.L3_NAT_dbonly_mixin):
-            self.skipTest("Plugin is not L3_NAT_dbonly_mixin")
-
-    def test_create_router_gateway_fails(self):
-        """Force _update_router_gw_info failure and see
-        the exception is propagated.
-        """
-
-        plugin = manager.NeutronManager.get_service_plugins()[
-            service_constants.L3_ROUTER_NAT]
-        ctx = context.Context('', 'foo')
-
-        class MyException(Exception):
-            pass
-
-        mock.patch.object(plugin, '_update_router_gw_info',
-                          side_effect=MyException).start()
-        with self.network() as n:
-            data = {'router': {
-                'name': 'router1', 'admin_state_up': True,
-                'tenant_id': ctx.tenant_id,
-                'external_gateway_info': {'network_id': n['network']['id']}}}
-
-            self.assertRaises(MyException, plugin.create_router, ctx, data)
-            # Verify router doesn't persist on failure
-            routers = plugin.get_routers(ctx)
-            self.assertEqual(0, len(routers))
-
-
-class L3NatDBIntTestCase(L3BaseForIntTests, L3NatTestCaseBase,
-                         L3NatDBTestCaseMixin):
-
-    """Unit tests for core plugin with L3 routing integrated."""
-    pass
-
-
-class L3NatDBSepTestCase(L3BaseForSepTests, L3NatTestCaseBase,
-                         L3NatDBTestCaseMixin):
-
-    """Unit tests for a separate L3 routing service plugin."""
-
-    def test_port_deletion_prevention_handles_missing_port(self):
-        pl = manager.NeutronManager.get_service_plugins().get(
-            service_constants.L3_ROUTER_NAT)
-        self.assertIsNone(
-            pl.prevent_l3_port_deletion(context.get_admin_context(), 'fakeid')
-        )
diff --git a/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py b/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py
deleted file mode 100644 (file)
index 28577f5..0000000
+++ /dev/null
@@ -1,492 +0,0 @@
-# Copyright 2013 VMware, Inc.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import mock
-from oslo_config import cfg
-from oslo_utils import uuidutils
-import testscenarios
-from webob import exc
-
-from neutron.common import constants
-from neutron.db import api as db_api
-from neutron.db import external_net_db
-from neutron.db import l3_db
-from neutron.db import l3_gwmode_db
-from neutron.db import models_v2
-from neutron.extensions import l3
-from neutron.extensions import l3_ext_gw_mode
-from neutron.tests import base
-from neutron.tests.unit.db import test_db_base_plugin_v2
-from neutron.tests.unit.extensions import test_l3
-from neutron.tests.unit import testlib_api
-
-_uuid = uuidutils.generate_uuid
-FAKE_GW_PORT_ID = _uuid()
-FAKE_GW_PORT_MAC = 'aa:bb:cc:dd:ee:ff'
-FAKE_FIP_EXT_PORT_ID = _uuid()
-FAKE_FIP_EXT_PORT_MAC = '11:22:33:44:55:66'
-FAKE_FIP_INT_PORT_ID = _uuid()
-FAKE_FIP_INT_PORT_MAC = 'aa:aa:aa:aa:aa:aa'
-FAKE_ROUTER_PORT_ID = _uuid()
-FAKE_ROUTER_PORT_MAC = 'bb:bb:bb:bb:bb:bb'
-
-
-class TestExtensionManager(object):
-
-    def get_resources(self):
-        # Simulate extension of L3 attribute map
-        for key in l3.RESOURCE_ATTRIBUTE_MAP.keys():
-            l3.RESOURCE_ATTRIBUTE_MAP[key].update(
-                l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
-        return l3.L3.get_resources()
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-# A simple class for making a concrete class out of the mixin
-# for the case of a plugin that integrates l3 routing.
-class TestDbIntPlugin(test_l3.TestL3NatIntPlugin,
-                      l3_gwmode_db.L3_NAT_db_mixin):
-
-    supported_extension_aliases = ["external-net", "router", "ext-gw-mode"]
-
-
-# A simple class for making a concrete class out of the mixin
-# for the case of a l3 router service plugin
-class TestDbSepPlugin(test_l3.TestL3NatServicePlugin,
-                      l3_gwmode_db.L3_NAT_db_mixin):
-
-    supported_extension_aliases = ["router", "ext-gw-mode"]
-
-
-class TestGetEnableSnat(testscenarios.WithScenarios, base.BaseTestCase):
-    scenarios = [
-        ('enabled', {'enable_snat_by_default': True}),
-        ('disabled', {'enable_snat_by_default': False})]
-
-    def setUp(self):
-        super(TestGetEnableSnat, self).setUp()
-        self.config(enable_snat_by_default=self.enable_snat_by_default)
-
-    def _test_get_enable_snat(self, expected, info):
-        observed = l3_gwmode_db.L3_NAT_dbonly_mixin._get_enable_snat(info)
-        self.assertEqual(expected, observed)
-
-    def test_get_enable_snat_without_gw_info(self):
-        self._test_get_enable_snat(self.enable_snat_by_default, {})
-
-    def test_get_enable_snat_without_enable_snat(self):
-        info = {'network_id': _uuid()}
-        self._test_get_enable_snat(self.enable_snat_by_default, info)
-
-    def test_get_enable_snat_with_snat_enabled(self):
-        self._test_get_enable_snat(True, {'enable_snat': True})
-
-    def test_get_enable_snat_with_snat_disabled(self):
-        self._test_get_enable_snat(False, {'enable_snat': False})
-
-
-class TestL3GwModeMixin(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(TestL3GwModeMixin, self).setUp()
-        plugin = __name__ + '.' + TestDbIntPlugin.__name__
-        self.setup_coreplugin(plugin)
-        self.target_object = TestDbIntPlugin()
-        # Patch the context
-        ctx_patcher = mock.patch('neutron.context', autospec=True)
-        mock_context = ctx_patcher.start()
-        self.context = mock_context.get_admin_context()
-        # This ensure also calls to elevated work in unit tests
-        self.context.elevated.return_value = self.context
-        self.context.session = db_api.get_session()
-        # Create sample data for tests
-        self.ext_net_id = _uuid()
-        self.int_net_id = _uuid()
-        self.int_sub_id = _uuid()
-        self.tenant_id = 'the_tenant'
-        self.network = models_v2.Network(
-            id=self.ext_net_id,
-            tenant_id=self.tenant_id,
-            admin_state_up=True,
-            status=constants.NET_STATUS_ACTIVE)
-        self.net_ext = external_net_db.ExternalNetwork(
-            network_id=self.ext_net_id)
-        self.context.session.add(self.network)
-        # The following is to avoid complaints from SQLite on
-        # foreign key violations
-        self.context.session.flush()
-        self.context.session.add(self.net_ext)
-        self.router = l3_db.Router(
-            id=_uuid(),
-            name=None,
-            tenant_id=self.tenant_id,
-            admin_state_up=True,
-            status=constants.NET_STATUS_ACTIVE,
-            enable_snat=True,
-            gw_port_id=None)
-        self.context.session.add(self.router)
-        self.context.session.flush()
-        self.router_gw_port = models_v2.Port(
-            id=FAKE_GW_PORT_ID,
-            tenant_id=self.tenant_id,
-            device_id=self.router.id,
-            device_owner=l3_db.DEVICE_OWNER_ROUTER_GW,
-            admin_state_up=True,
-            status=constants.PORT_STATUS_ACTIVE,
-            mac_address=FAKE_GW_PORT_MAC,
-            network_id=self.ext_net_id)
-        self.router.gw_port_id = self.router_gw_port.id
-        self.context.session.add(self.router)
-        self.context.session.add(self.router_gw_port)
-        self.context.session.flush()
-        self.fip_ext_port = models_v2.Port(
-            id=FAKE_FIP_EXT_PORT_ID,
-            tenant_id=self.tenant_id,
-            admin_state_up=True,
-            device_id=self.router.id,
-            device_owner=l3_db.DEVICE_OWNER_FLOATINGIP,
-            status=constants.PORT_STATUS_ACTIVE,
-            mac_address=FAKE_FIP_EXT_PORT_MAC,
-            network_id=self.ext_net_id)
-        self.context.session.add(self.fip_ext_port)
-        self.context.session.flush()
-        self.int_net = models_v2.Network(
-            id=self.int_net_id,
-            tenant_id=self.tenant_id,
-            admin_state_up=True,
-            status=constants.NET_STATUS_ACTIVE)
-        self.int_sub = models_v2.Subnet(
-            id=self.int_sub_id,
-            tenant_id=self.tenant_id,
-            ip_version=4,
-            cidr='3.3.3.0/24',
-            gateway_ip='3.3.3.1',
-            network_id=self.int_net_id)
-        self.router_port = models_v2.Port(
-            id=FAKE_ROUTER_PORT_ID,
-            tenant_id=self.tenant_id,
-            admin_state_up=True,
-            device_id=self.router.id,
-            device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
-            status=constants.PORT_STATUS_ACTIVE,
-            mac_address=FAKE_ROUTER_PORT_MAC,
-            network_id=self.int_net_id)
-        self.router_port_ip_info = models_v2.IPAllocation(
-            port_id=self.router_port.id,
-            network_id=self.int_net.id,
-            subnet_id=self.int_sub_id,
-            ip_address='3.3.3.1')
-        self.context.session.add(self.int_net)
-        self.context.session.add(self.int_sub)
-        self.context.session.add(self.router_port)
-        self.context.session.add(self.router_port_ip_info)
-        self.context.session.flush()
-        self.fip_int_port = models_v2.Port(
-            id=FAKE_FIP_INT_PORT_ID,
-            tenant_id=self.tenant_id,
-            admin_state_up=True,
-            device_id='something',
-            device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova',
-            status=constants.PORT_STATUS_ACTIVE,
-            mac_address=FAKE_FIP_INT_PORT_MAC,
-            network_id=self.int_net_id)
-        self.fip_int_ip_info = models_v2.IPAllocation(
-            port_id=self.fip_int_port.id,
-            network_id=self.int_net.id,
-            subnet_id=self.int_sub_id,
-            ip_address='3.3.3.3')
-        self.fip = l3_db.FloatingIP(
-            id=_uuid(),
-            floating_ip_address='1.1.1.2',
-            floating_network_id=self.ext_net_id,
-            floating_port_id=FAKE_FIP_EXT_PORT_ID,
-            fixed_port_id=None,
-            fixed_ip_address=None,
-            router_id=None)
-        self.context.session.add(self.fip_int_port)
-        self.context.session.add(self.fip_int_ip_info)
-        self.context.session.add(self.fip)
-        self.context.session.flush()
-        self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID,
-                            'tenant_id': self.tenant_id}
-
-    def _get_gwports_dict(self, gw_ports):
-        return dict((gw_port['id'], gw_port)
-                    for gw_port in gw_ports)
-
-    def _reset_ext_gw(self):
-        # Reset external gateway
-        self.router.gw_port_id = None
-        self.context.session.add(self.router)
-        self.context.session.flush()
-
-    def _test_update_router_gw(self, current_enable_snat, gw_info=None,
-                               expected_enable_snat=True):
-        if not current_enable_snat:
-            previous_gw_info = {'network_id': self.ext_net_id,
-                                'enable_snat': current_enable_snat}
-            self.target_object._update_router_gw_info(
-                self.context, self.router.id, previous_gw_info)
-
-        self.target_object._update_router_gw_info(
-            self.context, self.router.id, gw_info)
-        router = self.target_object._get_router(
-            self.context, self.router.id)
-        try:
-            self.assertEqual(FAKE_GW_PORT_ID,
-                             router.gw_port.id)
-            self.assertEqual(FAKE_GW_PORT_MAC,
-                             router.gw_port.mac_address)
-        except AttributeError:
-            self.assertIsNone(router.gw_port)
-        self.assertEqual(expected_enable_snat, router.enable_snat)
-
-    def test_update_router_gw_with_gw_info_none(self):
-        self._test_update_router_gw(current_enable_snat=True)
-
-    def test_update_router_gw_without_info_and_snat_disabled_previously(self):
-        self._test_update_router_gw(current_enable_snat=False)
-
-    def test_update_router_gw_with_network_only(self):
-        info = {'network_id': self.ext_net_id}
-        self._test_update_router_gw(current_enable_snat=True, gw_info=info)
-
-    def test_update_router_gw_with_network_and_snat_disabled_previously(self):
-        info = {'network_id': self.ext_net_id}
-        self._test_update_router_gw(current_enable_snat=False, gw_info=info)
-
-    def test_update_router_gw_with_snat_disabled(self):
-        info = {'network_id': self.ext_net_id,
-                'enable_snat': False}
-        self._test_update_router_gw(
-            current_enable_snat=True, gw_info=info, expected_enable_snat=False)
-
-    def test_update_router_gw_with_snat_enabled(self):
-        info = {'network_id': self.ext_net_id,
-                'enable_snat': True}
-        self._test_update_router_gw(current_enable_snat=False, gw_info=info)
-
-    def test_make_router_dict_no_ext_gw(self):
-        self._reset_ext_gw()
-        router_dict = self.target_object._make_router_dict(self.router)
-        self.assertIsNone(router_dict[l3.EXTERNAL_GW_INFO])
-
-    def test_make_router_dict_with_ext_gw(self):
-        router_dict = self.target_object._make_router_dict(self.router)
-        self.assertEqual({'network_id': self.ext_net_id,
-                          'enable_snat': True,
-                          'external_fixed_ips': []},
-                         router_dict[l3.EXTERNAL_GW_INFO])
-
-    def test_make_router_dict_with_ext_gw_snat_disabled(self):
-        self.router.enable_snat = False
-        router_dict = self.target_object._make_router_dict(self.router)
-        self.assertEqual({'network_id': self.ext_net_id,
-                          'enable_snat': False,
-                          'external_fixed_ips': []},
-                         router_dict[l3.EXTERNAL_GW_INFO])
-
-    def test_build_routers_list_no_ext_gw(self):
-        self._reset_ext_gw()
-        router_dict = self.target_object._make_router_dict(self.router)
-        routers = self.target_object._build_routers_list(self.context,
-                                                         [router_dict],
-                                                         [])
-        self.assertEqual(1, len(routers))
-        router = routers[0]
-        self.assertIsNone(router.get('gw_port'))
-        self.assertIsNone(router.get('enable_snat'))
-
-    def test_build_routers_list_with_ext_gw(self):
-        router_dict = self.target_object._make_router_dict(self.router)
-        routers = self.target_object._build_routers_list(
-            self.context, [router_dict],
-            self._get_gwports_dict([self.router.gw_port]))
-        self.assertEqual(1, len(routers))
-        router = routers[0]
-        self.assertIsNotNone(router.get('gw_port'))
-        self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
-        self.assertTrue(router.get('enable_snat'))
-
-    def test_build_routers_list_with_ext_gw_snat_disabled(self):
-        self.router.enable_snat = False
-        router_dict = self.target_object._make_router_dict(self.router)
-        routers = self.target_object._build_routers_list(
-            self.context, [router_dict],
-            self._get_gwports_dict([self.router.gw_port]))
-        self.assertEqual(1, len(routers))
-        router = routers[0]
-        self.assertIsNotNone(router.get('gw_port'))
-        self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
-        self.assertFalse(router.get('enable_snat'))
-
-    def test_build_routers_list_with_gw_port_mismatch(self):
-        router_dict = self.target_object._make_router_dict(self.router)
-        routers = self.target_object._build_routers_list(
-            self.context, [router_dict], {})
-        self.assertEqual(1, len(routers))
-        router = routers[0]
-        self.assertIsNone(router.get('gw_port'))
-        self.assertIsNone(router.get('enable_snat'))
-
-
-class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
-                           test_l3.L3NatTestCaseMixin):
-
-    def setUp(self, plugin=None, svc_plugins=None, ext_mgr=None):
-        # Store l3 resource attribute map as it will be updated
-        self._l3_attribute_map_bk = {}
-        for item in l3.RESOURCE_ATTRIBUTE_MAP:
-            self._l3_attribute_map_bk[item] = (
-                l3.RESOURCE_ATTRIBUTE_MAP[item].copy())
-        plugin = plugin or (
-            'neutron.tests.unit.extensions.test_l3_ext_gw_mode.'
-            'TestDbIntPlugin')
-        # for these tests we need to enable overlapping ips
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        ext_mgr = ext_mgr or TestExtensionManager()
-        super(ExtGwModeIntTestCase, self).setUp(plugin=plugin,
-                                                ext_mgr=ext_mgr,
-                                                service_plugins=svc_plugins)
-        self.addCleanup(self.restore_l3_attribute_map)
-
-    def restore_l3_attribute_map(self):
-        l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk
-
-    def tearDown(self):
-        super(ExtGwModeIntTestCase, self).tearDown()
-
-    def _set_router_external_gateway(self, router_id, network_id,
-                                     snat_enabled=None,
-                                     expected_code=exc.HTTPOk.code,
-                                     neutron_context=None):
-        ext_gw_info = {'network_id': network_id}
-        # Need to set enable_snat also if snat_enabled == False
-        if snat_enabled is not None:
-            ext_gw_info['enable_snat'] = snat_enabled
-        return self._update('routers', router_id,
-                            {'router': {'external_gateway_info':
-                                        ext_gw_info}},
-                            expected_code=expected_code,
-                            neutron_context=neutron_context)
-
-    def test_router_create_show_no_ext_gwinfo(self):
-        name = 'router1'
-        tenant_id = _uuid()
-        expected_value = [('name', name), ('tenant_id', tenant_id),
-                          ('admin_state_up', True), ('status', 'ACTIVE'),
-                          ('external_gateway_info', None)]
-        with self.router(name=name, admin_state_up=True,
-                         tenant_id=tenant_id) as router:
-            res = self._show('routers', router['router']['id'])
-            for k, v in expected_value:
-                self.assertEqual(res['router'][k], v)
-
-    def _test_router_create_show_ext_gwinfo(self, snat_input_value,
-                                            snat_expected_value):
-        name = 'router1'
-        tenant_id = _uuid()
-        with self.subnet() as s:
-            ext_net_id = s['subnet']['network_id']
-            self._set_net_external(ext_net_id)
-            input_value = {'network_id': ext_net_id}
-            if snat_input_value in (True, False):
-                input_value['enable_snat'] = snat_input_value
-            expected_value = [('name', name), ('tenant_id', tenant_id),
-                              ('admin_state_up', True), ('status', 'ACTIVE'),
-                              ('external_gateway_info',
-                               {'network_id': ext_net_id,
-                                'enable_snat': snat_expected_value,
-                                'external_fixed_ips': [{
-                                    'ip_address': mock.ANY,
-                                    'subnet_id': s['subnet']['id']}]})]
-            with self.router(
-                name=name, admin_state_up=True, tenant_id=tenant_id,
-                external_gateway_info=input_value) as router:
-                res = self._show('routers', router['router']['id'])
-                for k, v in expected_value:
-                    self.assertEqual(res['router'][k], v)
-
-    def test_router_create_show_ext_gwinfo_default(self):
-        self._test_router_create_show_ext_gwinfo(None, True)
-
-    def test_router_create_show_ext_gwinfo_with_snat_enabled(self):
-        self._test_router_create_show_ext_gwinfo(True, True)
-
-    def test_router_create_show_ext_gwinfo_with_snat_disabled(self):
-        self._test_router_create_show_ext_gwinfo(False, False)
-
-    def _test_router_update_ext_gwinfo(self, snat_input_value,
-                                       snat_expected_value=False,
-                                       expected_http_code=exc.HTTPOk.code):
-        with self.router() as r:
-            with self.subnet() as s:
-                try:
-                    ext_net_id = s['subnet']['network_id']
-                    self._set_net_external(ext_net_id)
-                    self._set_router_external_gateway(
-                        r['router']['id'], ext_net_id,
-                        snat_enabled=snat_input_value,
-                        expected_code=expected_http_code)
-                    if expected_http_code != exc.HTTPOk.code:
-                        return
-                    body = self._show('routers', r['router']['id'])
-                    res_gw_info = body['router']['external_gateway_info']
-                    self.assertEqual(res_gw_info['network_id'], ext_net_id)
-                    self.assertEqual(res_gw_info['enable_snat'],
-                                     snat_expected_value)
-                finally:
-                    self._remove_external_gateway_from_router(
-                        r['router']['id'], ext_net_id)
-
-    def test_router_update_ext_gwinfo_default(self):
-        self._test_router_update_ext_gwinfo(None, True)
-
-    def test_router_update_ext_gwinfo_with_snat_enabled(self):
-        self._test_router_update_ext_gwinfo(True, True)
-
-    def test_router_update_ext_gwinfo_with_snat_disabled(self):
-        self._test_router_update_ext_gwinfo(False, False)
-
-    def test_router_update_ext_gwinfo_with_invalid_snat_setting(self):
-        self._test_router_update_ext_gwinfo(
-            'xxx', None, expected_http_code=exc.HTTPBadRequest.code)
-
-
-class ExtGwModeSepTestCase(ExtGwModeIntTestCase):
-
-    def setUp(self, plugin=None):
-        # Store l3 resource attribute map as it will be updated
-        self._l3_attribute_map_bk = {}
-        for item in l3.RESOURCE_ATTRIBUTE_MAP:
-            self._l3_attribute_map_bk[item] = (
-                l3.RESOURCE_ATTRIBUTE_MAP[item].copy())
-        plugin = plugin or (
-            'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin')
-        # the L3 service plugin
-        l3_plugin = ('neutron.tests.unit.extensions.test_l3_ext_gw_mode.'
-                     'TestDbSepPlugin')
-        svc_plugins = {'l3_plugin_name': l3_plugin}
-        # for these tests we need to enable overlapping ips
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        super(ExtGwModeSepTestCase, self).setUp(plugin=plugin,
-                                                svc_plugins=svc_plugins)
-        self.addCleanup(self.restore_l3_attribute_map)
diff --git a/neutron/tests/unit/extensions/test_netmtu.py b/neutron/tests/unit/extensions/test_netmtu.py
deleted file mode 100644 (file)
index 957adae..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright 2015 Openstack Foundation.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import constants
-from neutron.db import db_base_plugin_v2
-from neutron.db import netmtu_db
-from neutron.extensions import netmtu
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-
-class NetmtuExtensionManager(object):
-
-    def get_resources(self):
-        return []
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-    def get_extended_resources(self, version):
-        return netmtu.get_extended_resources(version)
-
-
-class NetmtuExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
-                                netmtu_db.Netmtu_db_mixin):
-    """Test plugin to mixin the network MTU extensions.
-    """
-
-    supported_extension_aliases = ["net-mtu"]
-
-
-class NetmtuExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2):
-    """Test API extension net-mtu attributes.
-    """
-
-    def setUp(self):
-        plugin = ('neutron.tests.unit.extensions.test_netmtu.' +
-                  'NetmtuExtensionTestPlugin')
-        ext_mgr = NetmtuExtensionManager()
-        super(NetmtuExtensionTestCase, self).setUp(plugin=plugin,
-                                                   ext_mgr=ext_mgr)
-
-    def test_list_networks_with_fields_mtu(self):
-        with self.network(name='net1') as net1:
-            req = self.new_list_request('networks',
-                                        params='fields=name&fields=mtu')
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(1, len(res['networks']))
-            self.assertEqual(res['networks'][0]['name'],
-                             net1['network']['name'])
-            self.assertEqual(res['networks'][0].get('mtu'),
-                             constants.DEFAULT_NETWORK_MTU)
-
-    def test_show_network_mtu(self):
-        with self.network(name='net1') as net:
-            req = self.new_show_request('networks', net['network']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(res['network']['name'],
-                             net['network']['name'])
-            self.assertEqual(res['network']['mtu'],
-                             constants.DEFAULT_NETWORK_MTU)
diff --git a/neutron/tests/unit/extensions/test_portsecurity.py b/neutron/tests/unit/extensions/test_portsecurity.py
deleted file mode 100644 (file)
index 0179ca5..0000000
+++ /dev/null
@@ -1,401 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from webob import exc
-
-from neutron.api.v2 import attributes as attr
-from neutron import context
-from neutron.db import db_base_plugin_v2
-from neutron.db import portsecurity_db
-from neutron.db import securitygroups_db
-from neutron.extensions import portsecurity as psec
-from neutron.extensions import securitygroup as ext_sg
-from neutron import manager
-from neutron.tests.unit.db import test_db_base_plugin_v2
-from neutron.tests.unit.extensions import test_securitygroup
-
-DB_PLUGIN_KLASS = ('neutron.tests.unit.extensions.test_portsecurity.'
-                   'PortSecurityTestPlugin')
-
-
-class PortSecurityTestCase(
-    test_securitygroup.SecurityGroupsTestCase,
-    test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-
-    def setUp(self, plugin=None):
-        ext_mgr = (
-            test_securitygroup.SecurityGroupTestExtensionManager())
-        super(PortSecurityTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
-
-        # Check if a plugin supports security groups
-        plugin_obj = manager.NeutronManager.get_plugin()
-        self._skip_security_group = ('security-group' not in
-                                     plugin_obj.supported_extension_aliases)
-
-    def tearDown(self):
-        super(PortSecurityTestCase, self).tearDown()
-        self._skip_security_group = None
-
-
-class PortSecurityTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
-                             securitygroups_db.SecurityGroupDbMixin,
-                             portsecurity_db.PortSecurityDbMixin):
-
-    """Test plugin that implements necessary calls on create/delete port for
-    associating ports with security groups and port security.
-    """
-
-    supported_extension_aliases = ["security-group", "port-security"]
-
-    def create_network(self, context, network):
-        tenant_id = network['network'].get('tenant_id')
-        self._ensure_default_security_group(context, tenant_id)
-        with context.session.begin(subtransactions=True):
-            neutron_db = super(PortSecurityTestPlugin, self).create_network(
-                context, network)
-            neutron_db.update(network['network'])
-            self._process_network_port_security_create(
-                context, network['network'], neutron_db)
-        return neutron_db
-
-    def update_network(self, context, id, network):
-        with context.session.begin(subtransactions=True):
-            neutron_db = super(PortSecurityTestPlugin, self).update_network(
-                context, id, network)
-            if psec.PORTSECURITY in network['network']:
-                self._process_network_port_security_update(
-                    context, network['network'], neutron_db)
-        return neutron_db
-
-    def get_network(self, context, id, fields=None):
-        with context.session.begin(subtransactions=True):
-            net = super(PortSecurityTestPlugin, self).get_network(
-                context, id)
-        return self._fields(net, fields)
-
-    def create_port(self, context, port):
-        p = port['port']
-        with context.session.begin(subtransactions=True):
-            p[ext_sg.SECURITYGROUPS] = self._get_security_groups_on_port(
-                context, port)
-            neutron_db = super(PortSecurityTestPlugin, self).create_port(
-                context, port)
-            p.update(neutron_db)
-
-            (port_security, has_ip) = self._determine_port_security_and_has_ip(
-                context, p)
-            p[psec.PORTSECURITY] = port_security
-            self._process_port_port_security_create(context, p, neutron_db)
-
-            if (attr.is_attr_set(p.get(ext_sg.SECURITYGROUPS)) and
-                not (port_security and has_ip)):
-                raise psec.PortSecurityAndIPRequiredForSecurityGroups()
-
-            # Port requires ip and port_security enabled for security group
-            if has_ip and port_security:
-                self._ensure_default_security_group_on_port(context, port)
-
-            if (p.get(ext_sg.SECURITYGROUPS) and p[psec.PORTSECURITY]):
-                self._process_port_create_security_group(
-                    context, p, p[ext_sg.SECURITYGROUPS])
-
-        return port['port']
-
-    def update_port(self, context, id, port):
-        delete_security_groups = self._check_update_deletes_security_groups(
-            port)
-        has_security_groups = self._check_update_has_security_groups(port)
-        with context.session.begin(subtransactions=True):
-            ret_port = super(PortSecurityTestPlugin, self).update_port(
-                context, id, port)
-            # copy values over - but not fixed_ips
-            port['port'].pop('fixed_ips', None)
-            ret_port.update(port['port'])
-
-            # populate port_security setting
-            if psec.PORTSECURITY not in ret_port:
-                ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
-                    context, id)
-            has_ip = self._ip_on_port(ret_port)
-            # checks if security groups were updated adding/modifying
-            # security groups, port security is set and port has ip
-            if (has_security_groups and (not ret_port[psec.PORTSECURITY]
-                                         or not has_ip)):
-                raise psec.PortSecurityAndIPRequiredForSecurityGroups()
-
-            # Port security/IP was updated off. Need to check that no security
-            # groups are on port.
-            if ret_port[psec.PORTSECURITY] is not True or not has_ip:
-                if has_security_groups:
-                    raise psec.PortSecurityAndIPRequiredForSecurityGroups()
-
-                # get security groups on port
-                filters = {'port_id': [id]}
-                security_groups = (super(PortSecurityTestPlugin, self).
-                                   _get_port_security_group_bindings(
-                                       context, filters))
-                if security_groups and not delete_security_groups:
-                    raise psec.PortSecurityPortHasSecurityGroup()
-
-            if (delete_security_groups or has_security_groups):
-                # delete the port binding and read it with the new rules.
-                self._delete_port_security_group_bindings(context, id)
-                sgids = self._get_security_groups_on_port(context, port)
-                # process port create sec groups needs port id
-                port['id'] = id
-                self._process_port_create_security_group(context,
-                                                         ret_port, sgids)
-
-            if psec.PORTSECURITY in port['port']:
-                self._process_port_port_security_update(
-                    context, port['port'], ret_port)
-
-        return ret_port
-
-
-class PortSecurityDBTestCase(PortSecurityTestCase):
-    def setUp(self, plugin=None, service_plugins=None):
-        plugin = plugin or DB_PLUGIN_KLASS
-        super(PortSecurityDBTestCase, self).setUp(plugin)
-
-
-class TestPortSecurity(PortSecurityDBTestCase):
-    def test_create_network_with_portsecurity_mac(self):
-        res = self._create_network('json', 'net1', True)
-        net = self.deserialize('json', res)
-        self.assertTrue(net['network'][psec.PORTSECURITY])
-
-    def test_create_network_with_portsecurity_false(self):
-        res = self._create_network('json', 'net1', True,
-                                   arg_list=('port_security_enabled',),
-                                   port_security_enabled=False)
-        net = self.deserialize('json', res)
-        self.assertFalse(net['network'][psec.PORTSECURITY])
-
-    def test_updating_network_port_security(self):
-        res = self._create_network('json', 'net1', True,
-                                   port_security_enabled='True')
-        net = self.deserialize('json', res)
-        self.assertTrue(net['network'][psec.PORTSECURITY])
-        update_net = {'network': {psec.PORTSECURITY: False}}
-        req = self.new_update_request('networks', update_net,
-                                      net['network']['id'])
-        net = self.deserialize('json', req.get_response(self.api))
-        self.assertFalse(net['network'][psec.PORTSECURITY])
-        req = self.new_show_request('networks', net['network']['id'])
-        net = self.deserialize('json', req.get_response(self.api))
-        self.assertFalse(net['network'][psec.PORTSECURITY])
-
-    def test_create_port_default_true(self):
-        with self.network() as net:
-            res = self._create_port('json', net['network']['id'])
-            port = self.deserialize('json', res)
-            self.assertTrue(port['port'][psec.PORTSECURITY])
-            self._delete('ports', port['port']['id'])
-
-    def test_create_port_passing_true(self):
-        res = self._create_network('json', 'net1', True,
-                                   arg_list=('port_security_enabled',),
-                                   port_security_enabled=True)
-        net = self.deserialize('json', res)
-        res = self._create_port('json', net['network']['id'])
-        port = self.deserialize('json', res)
-        self.assertTrue(port['port'][psec.PORTSECURITY])
-        self._delete('ports', port['port']['id'])
-
-    def test_create_port_on_port_security_false_network(self):
-        res = self._create_network('json', 'net1', True,
-                                   arg_list=('port_security_enabled',),
-                                   port_security_enabled=False)
-        net = self.deserialize('json', res)
-        res = self._create_port('json', net['network']['id'])
-        port = self.deserialize('json', res)
-        self.assertFalse(port['port'][psec.PORTSECURITY])
-        self._delete('ports', port['port']['id'])
-
-    def test_create_port_security_overrides_network_value(self):
-        res = self._create_network('json', 'net1', True,
-                                   arg_list=('port_security_enabled',),
-                                   port_security_enabled=False)
-        net = self.deserialize('json', res)
-        res = self._create_port('json', net['network']['id'],
-                                arg_list=('port_security_enabled',),
-                                port_security_enabled=True)
-        port = self.deserialize('json', res)
-        self.assertTrue(port['port'][psec.PORTSECURITY])
-        self._delete('ports', port['port']['id'])
-
-    def test_create_port_fails_with_secgroup_and_port_security_false(self):
-        if self._skip_security_group:
-            self.skipTest("Plugin does not support security groups")
-        with self.network() as net:
-            with self.subnet(network=net):
-                security_group = self.deserialize(
-                    'json',
-                    self._create_security_group(self.fmt, 'asdf', 'asdf'))
-                security_group_id = security_group['security_group']['id']
-                res = self._create_port('json', net['network']['id'],
-                                        arg_list=('security_groups',
-                                                  'port_security_enabled'),
-                                        security_groups=[security_group_id],
-                                        port_security_enabled=False)
-                self.assertEqual(res.status_int, 400)
-
-    def test_create_port_with_default_security_group(self):
-        if self._skip_security_group:
-            self.skipTest("Plugin does not support security groups")
-        with self.network() as net:
-            with self.subnet(network=net):
-                res = self._create_port('json', net['network']['id'])
-                port = self.deserialize('json', res)
-                self.assertTrue(port['port'][psec.PORTSECURITY])
-                self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 1)
-                self._delete('ports', port['port']['id'])
-
-    def test_create_port_with_security_group_and_net_sec_false(self):
-        # This tests that port_security_enabled is true when creating
-        # a port on a network that is marked as port_security_enabled=False
-        # that has a subnet and security_groups are passed it.
-        if self._skip_security_group:
-            self.skipTest("Plugin does not support security groups")
-        res = self._create_network('json', 'net1', True,
-                                   arg_list=('port_security_enabled',),
-                                   port_security_enabled=False)
-        net = self.deserialize('json', res)
-        self._create_subnet('json', net['network']['id'], '10.0.0.0/24')
-        security_group = self.deserialize(
-            'json', self._create_security_group(self.fmt, 'asdf', 'asdf'))
-        security_group_id = security_group['security_group']['id']
-        res = self._create_port('json', net['network']['id'],
-                                arg_list=('security_groups',
-                                    'port_security_enabled'),
-                                port_security_enabled=True,
-                                security_groups=[security_group_id])
-        port = self.deserialize('json', res)
-        self.assertTrue(port['port'][psec.PORTSECURITY])
-        self.assertEqual(port['port']['security_groups'], [security_group_id])
-        self._delete('ports', port['port']['id'])
-
-    def test_create_port_without_security_group_and_net_sec_false(self):
-        res = self._create_network('json', 'net1', True,
-                                   arg_list=('port_security_enabled',),
-                                   port_security_enabled=False)
-        net = self.deserialize('json', res)
-        self._create_subnet('json', net['network']['id'], '10.0.0.0/24')
-        res = self._create_port('json', net['network']['id'])
-        port = self.deserialize('json', res)
-        self.assertFalse(port['port'][psec.PORTSECURITY])
-        self._delete('ports', port['port']['id'])
-
-    def test_update_port_security_off_with_security_group(self):
-        if self._skip_security_group:
-            self.skipTest("Plugin does not support security groups")
-        with self.network() as net:
-            with self.subnet(network=net):
-                res = self._create_port('json', net['network']['id'])
-                port = self.deserialize('json', res)
-                self.assertTrue(port['port'][psec.PORTSECURITY])
-
-                update_port = {'port': {psec.PORTSECURITY: False}}
-                req = self.new_update_request('ports', update_port,
-                                              port['port']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int, 409)
-                # remove security group on port
-                update_port = {'port': {ext_sg.SECURITYGROUPS: None}}
-                req = self.new_update_request('ports', update_port,
-                                              port['port']['id'])
-
-                self.deserialize('json', req.get_response(self.api))
-                self._delete('ports', port['port']['id'])
-
-    def test_update_port_remove_port_security_security_group(self):
-        if self._skip_security_group:
-            self.skipTest("Plugin does not support security groups")
-        with self.network() as net:
-            with self.subnet(network=net):
-                res = self._create_port('json', net['network']['id'],
-                                        arg_list=('port_security_enabled',),
-                                        port_security_enabled=True)
-                port = self.deserialize('json', res)
-                self.assertTrue(port['port'][psec.PORTSECURITY])
-
-                # remove security group on port
-                update_port = {'port': {ext_sg.SECURITYGROUPS: None,
-                                        psec.PORTSECURITY: False}}
-                req = self.new_update_request('ports', update_port,
-                                              port['port']['id'])
-                port = self.deserialize('json', req.get_response(self.api))
-                self.assertFalse(port['port'][psec.PORTSECURITY])
-                self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 0)
-                self._delete('ports', port['port']['id'])
-
-    def test_update_port_remove_port_security_security_group_read(self):
-        if self._skip_security_group:
-            self.skipTest("Plugin does not support security groups")
-        with self.network() as net:
-            with self.subnet(network=net):
-                res = self._create_port('json', net['network']['id'],
-                                        arg_list=('port_security_enabled',),
-                                        port_security_enabled=True)
-                port = self.deserialize('json', res)
-                self.assertTrue(port['port'][psec.PORTSECURITY])
-
-                # remove security group on port
-                update_port = {'port': {ext_sg.SECURITYGROUPS: None,
-                                        psec.PORTSECURITY: False}}
-                req = self.new_update_request('ports', update_port,
-                                              port['port']['id'])
-                self.deserialize('json', req.get_response(self.api))
-
-                sg_id = port['port'][ext_sg.SECURITYGROUPS]
-                update_port = {'port': {ext_sg.SECURITYGROUPS: [sg_id[0]],
-                                        psec.PORTSECURITY: True}}
-
-                req = self.new_update_request('ports', update_port,
-                                              port['port']['id'])
-
-                port = self.deserialize('json', req.get_response(self.api))
-                self.assertTrue(port['port'][psec.PORTSECURITY])
-                self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 1)
-                self._delete('ports', port['port']['id'])
-
-    def test_create_port_security_off_shared_network(self):
-        with self.network(shared=True) as net:
-            with self.subnet(network=net):
-                res = self._create_port('json', net['network']['id'],
-                                        arg_list=('port_security_enabled',),
-                                        port_security_enabled=False,
-                                        tenant_id='not_network_owner',
-                                        set_context=True)
-                self.deserialize('json', res)
-                self.assertEqual(res.status_int, 403)
-
-    def test_update_port_security_off_shared_network(self):
-        with self.network(shared=True) as net:
-            with self.subnet(network=net):
-                res = self._create_port('json', net['network']['id'],
-                                        tenant_id='not_network_owner',
-                                        set_context=True)
-                port = self.deserialize('json', res)
-                # remove security group on port
-                update_port = {'port': {ext_sg.SECURITYGROUPS: None,
-                                        psec.PORTSECURITY: False}}
-                req = self.new_update_request('ports', update_port,
-                                              port['port']['id'])
-                req.environ['neutron.context'] = context.Context(
-                    '', 'not_network_owner')
-                res = req.get_response(self.api)
-                self.assertEqual(res.status_int, exc.HTTPForbidden.code)
diff --git a/neutron/tests/unit/extensions/test_providernet.py b/neutron/tests/unit/extensions/test_providernet.py
deleted file mode 100644 (file)
index d4b5cea..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-# Copyright 2013 VMware
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_config import cfg
-from oslo_utils import uuidutils
-from webob import exc as web_exc
-import webtest
-
-from neutron.api import extensions
-from neutron.api.v2 import router
-from neutron import context
-from neutron.extensions import providernet as pnet
-from neutron import manager
-from neutron import quota
-from neutron.tests import tools
-from neutron.tests.unit.api import test_extensions
-from neutron.tests.unit.api.v2 import test_base
-from neutron.tests.unit import testlib_api
-
-
-class ProviderExtensionManager(object):
-
-    def get_resources(self):
-        return []
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-    def get_extended_resources(self, version):
-        return pnet.get_extended_resources(version)
-
-
-class ProvidernetExtensionTestCase(testlib_api.WebTestCase):
-    fmt = 'json'
-
-    def setUp(self):
-        super(ProvidernetExtensionTestCase, self).setUp()
-
-        plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
-
-        # Ensure existing ExtensionManager is not used
-        extensions.PluginAwareExtensionManager._instance = None
-
-        self.useFixture(tools.AttributeMapMemento())
-
-        # Update the plugin and extensions path
-        self.setup_coreplugin(plugin)
-        cfg.CONF.set_override('allow_pagination', True)
-        cfg.CONF.set_override('allow_sorting', True)
-        self._plugin_patcher = mock.patch(plugin, autospec=True)
-        self.plugin = self._plugin_patcher.start()
-        # Ensure Quota checks never fail because of mock
-        instance = self.plugin.return_value
-        instance.get_networks_count.return_value = 1
-        # Instantiate mock plugin and enable the 'provider' extension
-        manager.NeutronManager.get_plugin().supported_extension_aliases = (
-            ["provider"])
-        ext_mgr = ProviderExtensionManager()
-        self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
-        self.addCleanup(self._plugin_patcher.stop)
-        self.api = webtest.TestApp(router.APIRouter())
-
-        quota.QUOTAS._driver = None
-        cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
-                              group='QUOTAS')
-
-    def _prepare_net_data(self):
-        return {'name': 'net1',
-                pnet.NETWORK_TYPE: 'sometype',
-                pnet.PHYSICAL_NETWORK: 'physnet',
-                pnet.SEGMENTATION_ID: 666}
-
-    def _put_network_with_provider_attrs(self, ctx, expect_errors=False):
-        data = self._prepare_net_data()
-        env = {'neutron.context': ctx}
-        instance = self.plugin.return_value
-        instance.get_network.return_value = {'tenant_id': ctx.tenant_id,
-                                             'shared': False}
-        net_id = uuidutils.generate_uuid()
-        res = self.api.put(test_base._get_path('networks',
-                                               id=net_id,
-                                               fmt=self.fmt),
-                           self.serialize({'network': data}),
-                           extra_environ=env,
-                           expect_errors=expect_errors)
-        return res, data, net_id
-
-    def _post_network_with_provider_attrs(self, ctx, expect_errors=False):
-        data = self._prepare_net_data()
-        env = {'neutron.context': ctx}
-        res = self.api.post(test_base._get_path('networks', fmt=self.fmt),
-                            self.serialize({'network': data}),
-                            content_type='application/' + self.fmt,
-                            extra_environ=env,
-                            expect_errors=expect_errors)
-        return res, data
-
-    def _post_network_with_bad_provider_attrs(self, ctx, bad_data,
-                                              expect_errors=False):
-        data = self._prepare_net_data()
-        data.update(bad_data)
-        env = {'neutron.context': ctx}
-        res = self.api.post(test_base._get_path('networks', fmt=self.fmt),
-                            self.serialize({'network': data}),
-                            content_type='application/' + self.fmt,
-                            extra_environ=env,
-                            expect_errors=expect_errors)
-        return res, data
-
-    def test_network_create_with_provider_attrs(self):
-        ctx = context.get_admin_context()
-        ctx.tenant_id = 'an_admin'
-        res, data = self._post_network_with_provider_attrs(ctx)
-        instance = self.plugin.return_value
-        exp_input = {'network': data}
-        exp_input['network'].update({'admin_state_up': True,
-                                     'tenant_id': 'an_admin',
-                                     'shared': False})
-        instance.create_network.assert_called_with(mock.ANY,
-                                                   network=exp_input)
-        self.assertEqual(res.status_int, web_exc.HTTPCreated.code)
-
-    def test_network_create_with_bad_provider_attrs_400(self):
-        ctx = context.get_admin_context()
-        ctx.tenant_id = 'an_admin'
-        bad_data = {pnet.SEGMENTATION_ID: "abc"}
-        res, _1 = self._post_network_with_bad_provider_attrs(ctx, bad_data,
-                                                             True)
-        self.assertEqual(web_exc.HTTPBadRequest.code, res.status_int)
-
-    def test_network_update_with_provider_attrs(self):
-        ctx = context.get_admin_context()
-        ctx.tenant_id = 'an_admin'
-        res, data, net_id = self._put_network_with_provider_attrs(ctx)
-        instance = self.plugin.return_value
-        exp_input = {'network': data}
-        instance.update_network.assert_called_with(mock.ANY,
-                                                   net_id,
-                                                   network=exp_input)
-        self.assertEqual(res.status_int, web_exc.HTTPOk.code)
-
-    def test_network_create_with_provider_attrs_noadmin_returns_403(self):
-        tenant_id = 'no_admin'
-        ctx = context.Context('', tenant_id, is_admin=False)
-        res, _1 = self._post_network_with_provider_attrs(ctx, True)
-        self.assertEqual(res.status_int, web_exc.HTTPForbidden.code)
-
-    def test_network_update_with_provider_attrs_noadmin_returns_403(self):
-        tenant_id = 'no_admin'
-        ctx = context.Context('', tenant_id, is_admin=False)
-        res, _1, _2 = self._put_network_with_provider_attrs(ctx, True)
-        self.assertEqual(res.status_int, web_exc.HTTPForbidden.code)
diff --git a/neutron/tests/unit/extensions/test_quotasv2.py b/neutron/tests/unit/extensions/test_quotasv2.py
deleted file mode 100644 (file)
index e547920..0000000
+++ /dev/null
@@ -1,478 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-import mock
-from oslo_config import cfg
-import testtools
-from webob import exc
-import webtest
-
-from neutron.api import extensions
-from neutron.api.v2 import router
-from neutron.common import config
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron import context
-from neutron.db.quota import driver
-from neutron import quota
-from neutron.quota import resource_registry
-from neutron.tests import base
-from neutron.tests import tools
-from neutron.tests.unit.api.v2 import test_base
-from neutron.tests.unit import testlib_api
-
-TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-
-_get_path = test_base._get_path
-
-
-class QuotaExtensionTestCase(testlib_api.WebTestCase):
-
-    def setUp(self):
-        super(QuotaExtensionTestCase, self).setUp()
-        # Ensure existing ExtensionManager is not used
-        extensions.PluginAwareExtensionManager._instance = None
-
-        self.useFixture(tools.AttributeMapMemento())
-
-        # Create the default configurations
-        self.config_parse()
-
-        # Update the plugin and extensions path
-        self.setup_coreplugin(TARGET_PLUGIN)
-        cfg.CONF.set_override(
-            'quota_items',
-            ['network', 'subnet', 'port', 'extra1'],
-            group='QUOTAS')
-        quota.QUOTAS = quota.QuotaEngine()
-        quota.register_resources_from_config()
-        self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
-        self.plugin = self._plugin_patcher.start()
-        self.plugin.return_value.supported_extension_aliases = ['quotas']
-        # QUOTAS will register the items in conf when starting
-        # extra1 here is added later, so have to do it manually
-        resource_registry.register_resource_by_name('extra1')
-        ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
-        app = config.load_paste_app('extensions_test_app')
-        ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
-        self.api = webtest.TestApp(ext_middleware)
-        # Initialize the router for the core API in order to ensure core quota
-        # resources are registered
-        router.APIRouter()
-
-    def tearDown(self):
-        self.api = None
-        self.plugin = None
-        super(QuotaExtensionTestCase, self).tearDown()
-
-    def _test_quota_default_values(self, expected_values):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env)
-        quota = self.deserialize(res)
-        for resource, expected_value in expected_values.items():
-            self.assertEqual(expected_value,
-                             quota['quota'][resource])
-
-
-class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
-    fmt = 'json'
-
-    def setUp(self):
-        cfg.CONF.set_override(
-            'quota_driver',
-            'neutron.db.quota.driver.DbQuotaDriver',
-            group='QUOTAS')
-        super(QuotaExtensionDbTestCase, self).setUp()
-
-    def test_quotas_loaded_right(self):
-        res = self.api.get(_get_path('quotas', fmt=self.fmt))
-        quota = self.deserialize(res)
-        self.assertEqual([], quota['quotas'])
-        self.assertEqual(200, res.status_int)
-
-    def test_quotas_default_values(self):
-        self._test_quota_default_values(
-            {'network': 10,
-             'subnet': 10,
-             'port': 50,
-             'extra1': -1})
-
-    def test_quotas_negative_default_value(self):
-        cfg.CONF.set_override(
-            'quota_port', -666, group='QUOTAS')
-        cfg.CONF.set_override(
-            'quota_network', -10, group='QUOTAS')
-        cfg.CONF.set_override(
-            'quota_subnet', -50, group='QUOTAS')
-        self._test_quota_default_values(
-            {'network': -1,
-             'subnet': -1,
-             'port': -1,
-             'extra1': -1})
-
-    def test_show_quotas_with_admin(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id + '2',
-                                                  is_admin=True)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env)
-        self.assertEqual(200, res.status_int)
-        quota = self.deserialize(res)
-        self.assertEqual(10, quota['quota']['network'])
-        self.assertEqual(10, quota['quota']['subnet'])
-        self.assertEqual(50, quota['quota']['port'])
-
-    def test_show_quotas_without_admin_forbidden_returns_403(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id + '2',
-                                                  is_admin=False)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env, expect_errors=True)
-        self.assertEqual(403, res.status_int)
-
-    def test_show_quotas_with_owner_tenant(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=False)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env)
-        self.assertEqual(200, res.status_int)
-        quota = self.deserialize(res)
-        self.assertEqual(10, quota['quota']['network'])
-        self.assertEqual(10, quota['quota']['subnet'])
-        self.assertEqual(50, quota['quota']['port'])
-
-    def test_list_quotas_with_admin(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=True)}
-        res = self.api.get(_get_path('quotas', fmt=self.fmt),
-                           extra_environ=env)
-        self.assertEqual(200, res.status_int)
-        quota = self.deserialize(res)
-        self.assertEqual([], quota['quotas'])
-
-    def test_list_quotas_without_admin_forbidden_returns_403(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=False)}
-        res = self.api.get(_get_path('quotas', fmt=self.fmt),
-                           extra_environ=env, expect_errors=True)
-        self.assertEqual(403, res.status_int)
-
-    def test_update_quotas_without_admin_forbidden_returns_403(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=False)}
-        quotas = {'quota': {'network': 100}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env,
-                           expect_errors=True)
-        self.assertEqual(403, res.status_int)
-
-    def test_update_quotas_with_non_integer_returns_400(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=True)}
-        quotas = {'quota': {'network': 'abc'}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env,
-                           expect_errors=True)
-        self.assertEqual(400, res.status_int)
-
-    def test_update_quotas_with_negative_integer_returns_400(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=True)}
-        quotas = {'quota': {'network': -2}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env,
-                           expect_errors=True)
-        self.assertEqual(400, res.status_int)
-
-    def test_update_quotas_with_out_of_range_integer_returns_400(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=True)}
-        quotas = {'quota': {'network': constants.DB_INTEGER_MAX_VALUE + 1}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env,
-                           expect_errors=True)
-        self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
-
-    def test_update_quotas_to_unlimited(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=True)}
-        quotas = {'quota': {'network': -1}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env,
-                           expect_errors=False)
-        self.assertEqual(200, res.status_int)
-
-    def test_update_quotas_exceeding_current_limit(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=True)}
-        quotas = {'quota': {'network': 120}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env,
-                           expect_errors=False)
-        self.assertEqual(200, res.status_int)
-
-    def test_update_quotas_with_non_support_resource_returns_400(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=True)}
-        quotas = {'quota': {'abc': 100}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env,
-                           expect_errors=True)
-        self.assertEqual(400, res.status_int)
-
-    def test_update_quotas_with_admin(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id + '2',
-                                                  is_admin=True)}
-        quotas = {'quota': {'network': 100}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env)
-        self.assertEqual(200, res.status_int)
-        env2 = {'neutron.context': context.Context('', tenant_id)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env2)
-        quota = self.deserialize(res)
-        self.assertEqual(100, quota['quota']['network'])
-        self.assertEqual(10, quota['quota']['subnet'])
-        self.assertEqual(50, quota['quota']['port'])
-
-    def test_update_attributes(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id + '2',
-                                                  is_admin=True)}
-        quotas = {'quota': {'extra1': 100}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env)
-        self.assertEqual(200, res.status_int)
-        env2 = {'neutron.context': context.Context('', tenant_id)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env2)
-        quota = self.deserialize(res)
-        self.assertEqual(100, quota['quota']['extra1'])
-
-    def test_delete_quotas_with_admin(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id + '2',
-                                                  is_admin=True)}
-        res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                              extra_environ=env)
-        self.assertEqual(204, res.status_int)
-
-    def test_delete_quotas_without_admin_forbidden_returns_403(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=False)}
-        res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                              extra_environ=env, expect_errors=True)
-        self.assertEqual(403, res.status_int)
-
-    def test_quotas_loaded_bad_returns_404(self):
-        try:
-            res = self.api.get(_get_path('quotas'), expect_errors=True)
-            self.assertEqual(404, res.status_int)
-        except Exception:
-            pass
-
-    def test_quotas_limit_check(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=True)}
-        quotas = {'quota': {'network': 5}}
-        res = self.api.put(_get_path('quotas', id=tenant_id,
-                                     fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env)
-        self.assertEqual(200, res.status_int)
-        quota.QUOTAS.limit_check(context.Context('', tenant_id),
-                                 tenant_id,
-                                 network=4)
-
-    def test_quotas_limit_check_with_invalid_quota_value(self):
-        tenant_id = 'tenant_id1'
-        with testtools.ExpectedException(exceptions.InvalidQuotaValue):
-            quota.QUOTAS.limit_check(context.Context('', tenant_id),
-                                     tenant_id,
-                                     network=-2)
-
-    def test_quotas_limit_check_with_not_registered_resource_fails(self):
-        tenant_id = 'tenant_id1'
-        self.assertRaises(exceptions.QuotaResourceUnknown,
-                          quota.QUOTAS.limit_check,
-                          context.get_admin_context(),
-                          tenant_id,
-                          foobar=1)
-
-    def test_quotas_get_tenant_from_request_context(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=True)}
-        res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
-                           extra_environ=env)
-        self.assertEqual(200, res.status_int)
-        quota = self.deserialize(res)
-        self.assertEqual(quota['tenant']['tenant_id'], tenant_id)
-
-    def test_quotas_get_tenant_from_empty_request_context_returns_400(self):
-        env = {'neutron.context': context.Context('', '',
-                                                  is_admin=True)}
-        res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
-                           extra_environ=env, expect_errors=True)
-        self.assertEqual(400, res.status_int)
-
-    def test_make_reservation_resource_unknown_raises(self):
-        tenant_id = 'tenant_id1'
-        self.assertRaises(exceptions.QuotaResourceUnknown,
-                          quota.QUOTAS.make_reservation,
-                          context.get_admin_context(),
-                          tenant_id,
-                          {'foobar': 1},
-                          plugin=None)
-
-    def test_make_reservation_negative_delta_raises(self):
-        tenant_id = 'tenant_id1'
-        self.assertRaises(exceptions.InvalidQuotaValue,
-                          quota.QUOTAS.make_reservation,
-                          context.get_admin_context(),
-                          tenant_id,
-                          {'network': -1},
-                          plugin=None)
-
-
-class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
-    fmt = 'json'
-
-    def setUp(self):
-        cfg.CONF.set_override(
-            'quota_driver',
-            'neutron.quota.ConfDriver',
-            group='QUOTAS')
-        super(QuotaExtensionCfgTestCase, self).setUp()
-
-    def test_quotas_default_values(self):
-        self._test_quota_default_values(
-            {'network': 10,
-             'subnet': 10,
-             'port': 50,
-             'extra1': -1})
-
-    def test_quotas_negative_default_value(self):
-        cfg.CONF.set_override(
-            'quota_port', -666, group='QUOTAS')
-        self._test_quota_default_values(
-            {'network': 10,
-             'subnet': 10,
-             'port': -1,
-             'extra1': -1})
-
-    def test_show_quotas_with_admin(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id + '2',
-                                                  is_admin=True)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env)
-        self.assertEqual(200, res.status_int)
-
-    def test_show_quotas_without_admin_forbidden(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id + '2',
-                                                  is_admin=False)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env, expect_errors=True)
-        self.assertEqual(403, res.status_int)
-
-    def test_update_quotas_forbidden(self):
-        tenant_id = 'tenant_id1'
-        quotas = {'quota': {'network': 100}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas),
-                           expect_errors=True)
-        self.assertEqual(403, res.status_int)
-
-    def test_delete_quotas_forbidden(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=False)}
-        res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                              extra_environ=env, expect_errors=True)
-        self.assertEqual(403, res.status_int)
-
-
-class TestDbQuotaDriver(base.BaseTestCase):
-    """Test for neutron.db.quota.driver.DbQuotaDriver."""
-
-    def test_get_tenant_quotas_arg(self):
-        """Call neutron.db.quota.driver.DbQuotaDriver._get_quotas."""
-
-        quota_driver = driver.DbQuotaDriver()
-        ctx = context.Context('', 'bar')
-
-        foo_quotas = {'network': 5}
-        default_quotas = {'network': 10}
-        target_tenant = 'foo'
-
-        with mock.patch.object(driver.DbQuotaDriver,
-                               'get_tenant_quotas',
-                               return_value=foo_quotas) as get_tenant_quotas:
-
-            quotas = quota_driver._get_quotas(ctx,
-                                              target_tenant,
-                                              default_quotas)
-
-            self.assertEqual(quotas, foo_quotas)
-            get_tenant_quotas.assert_called_once_with(ctx,
-                                                      default_quotas,
-                                                      target_tenant)
-
-
-class TestQuotaDriverLoad(base.BaseTestCase):
-    def setUp(self):
-        super(TestQuotaDriverLoad, self).setUp()
-        # Make sure QuotaEngine is reinitialized in each test.
-        quota.QUOTAS._driver = None
-
-    def _test_quota_driver(self, cfg_driver, loaded_driver,
-                           with_quota_db_module=True):
-        cfg.CONF.set_override('quota_driver', cfg_driver, group='QUOTAS')
-        with mock.patch.dict(sys.modules, {}):
-            if (not with_quota_db_module and
-                    'neutron.db.quota.driver' in sys.modules):
-                del sys.modules['neutron.db.quota.driver']
-            driver = quota.QUOTAS.get_driver()
-            self.assertEqual(loaded_driver, driver.__class__.__name__)
-
-    def test_quota_db_driver_with_quotas_table(self):
-        self._test_quota_driver('neutron.db.quota.driver.DbQuotaDriver',
-                                'DbQuotaDriver', True)
-
-    def test_quota_db_driver_fallback_conf_driver(self):
-        self._test_quota_driver('neutron.db.quota.driver.DbQuotaDriver',
-                                'ConfDriver', False)
-
-    def test_quota_conf_driver(self):
-        self._test_quota_driver('neutron.quota.ConfDriver',
-                                'ConfDriver', True)
diff --git a/neutron/tests/unit/extensions/test_router_availability_zone.py b/neutron/tests/unit/extensions/test_router_availability_zone.py
deleted file mode 100644 (file)
index 12e6a86..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import six
-
-from neutron.db.availability_zone import router as router_az_db
-from neutron.db import common_db_mixin
-from neutron.db import l3_agentschedulers_db
-from neutron.db import l3_db
-from neutron.extensions import l3
-from neutron.extensions import router_availability_zone as router_az
-from neutron.plugins.common import constants as service_constants
-from neutron.tests.unit.extensions import test_availability_zone as test_az
-from neutron.tests.unit.extensions import test_l3
-
-
-class AZL3ExtensionManager(test_az.AZExtensionManager):
-
-    def get_resources(self):
-        return (super(AZL3ExtensionManager, self).get_resources() +
-                l3.L3.get_resources())
-
-
-class AZRouterTestPlugin(common_db_mixin.CommonDbMixin,
-                         l3_db.L3_NAT_db_mixin,
-                         router_az_db.RouterAvailabilityZoneMixin,
-                         l3_agentschedulers_db.AZL3AgentSchedulerDbMixin):
-    supported_extension_aliases = ["router", "l3_agent_scheduler",
-                                   "router_availability_zone"]
-
-    def get_plugin_type(self):
-        return service_constants.L3_ROUTER_NAT
-
-    def get_plugin_description(self):
-        return "L3 Routing Service Plugin for testing"
-
-    def _create_router_db(self, context, router, tenant_id):
-        # l3-plugin using routerextraattributes must call
-        # _process_extra_attr_router_create.
-        with context.session.begin(subtransactions=True):
-            router_db = super(AZRouterTestPlugin, self)._create_router_db(
-                context, router, tenant_id)
-            self._process_extra_attr_router_create(context, router_db, router)
-            return router_db
-
-
-class TestAZRouterCase(test_az.AZTestCommon, test_l3.L3NatTestCaseMixin):
-    def setUp(self):
-        plugin = ('neutron.tests.unit.extensions.'
-                  'test_availability_zone.AZTestPlugin')
-        l3_plugin = ('neutron.tests.unit.extensions.'
-                     'test_router_availability_zone.AZRouterTestPlugin')
-        service_plugins = {'l3_plugin_name': l3_plugin}
-
-        self._backup()
-        l3.RESOURCE_ATTRIBUTE_MAP['routers'].update(
-            router_az.EXTENDED_ATTRIBUTES_2_0['routers'])
-        ext_mgr = AZL3ExtensionManager()
-        super(TestAZRouterCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
-                                            service_plugins=service_plugins)
-
-    def _backup(self):
-        self.contents_backup = {}
-        for res, attrs in six.iteritems(l3.RESOURCE_ATTRIBUTE_MAP):
-            self.contents_backup[res] = attrs.copy()
-        self.addCleanup(self._restore)
-
-    def _restore(self):
-        l3.RESOURCE_ATTRIBUTE_MAP = self.contents_backup
-
-    def test_create_router_with_az(self):
-        self._register_azs()
-        az_hints = ['nova2']
-        with self.router(availability_zone_hints=az_hints) as router:
-            res = self._show('routers', router['router']['id'])
-            self.assertItemsEqual(az_hints,
-                                  res['router']['availability_zone_hints'])
-
-    def test_create_router_with_azs(self):
-        self._register_azs()
-        az_hints = ['nova2', 'nova3']
-        with self.router(availability_zone_hints=az_hints) as router:
-            res = self._show('routers', router['router']['id'])
-            self.assertItemsEqual(az_hints,
-                                  res['router']['availability_zone_hints'])
-
-    def test_create_router_without_az(self):
-        with self.router() as router:
-            res = self._show('routers', router['router']['id'])
-            self.assertEqual([], res['router']['availability_zone_hints'])
-
-    def test_create_router_with_empty_az(self):
-        with self.router(availability_zone_hints=[]) as router:
-            res = self._show('routers', router['router']['id'])
-            self.assertEqual([], res['router']['availability_zone_hints'])
-
-    def test_create_router_with_none_existing_az(self):
-        res = self._create_router(self.fmt, 'tenant_id',
-                                  availability_zone_hints=['nova4'])
-        self.assertEqual(404, res.status_int)
diff --git a/neutron/tests/unit/extensions/test_securitygroup.py b/neutron/tests/unit/extensions/test_securitygroup.py
deleted file mode 100644 (file)
index b94c4fd..0000000
+++ /dev/null
@@ -1,1586 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import contextlib
-
-import mock
-import oslo_db.exception as exc
-import six
-import testtools
-import webob.exc
-
-from neutron.api.v2 import attributes as attr
-from neutron.common import constants as const
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.db import db_base_plugin_v2
-from neutron.db import securitygroups_db
-from neutron.extensions import securitygroup as ext_sg
-from neutron import manager
-from neutron.tests import base
-from neutron.tests.unit.db import test_db_base_plugin_v2
-
-DB_PLUGIN_KLASS = ('neutron.tests.unit.extensions.test_securitygroup.'
-                   'SecurityGroupTestPlugin')
-
-
-class SecurityGroupTestExtensionManager(object):
-
-    def get_resources(self):
-        # Add the resources to the global attribute map
-        # This is done here as the setup process won't
-        # initialize the main API router which extends
-        # the global attribute map
-        attr.RESOURCE_ATTRIBUTE_MAP.update(
-            ext_sg.RESOURCE_ATTRIBUTE_MAP)
-        return ext_sg.Securitygroup.get_resources()
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
-
-    def _create_security_group(self, fmt, name, description, **kwargs):
-
-        data = {'security_group': {'name': name,
-                                   'tenant_id': kwargs.get('tenant_id',
-                                                           'test-tenant'),
-                                   'description': description}}
-        security_group_req = self.new_create_request('security-groups', data,
-                                                     fmt)
-        if (kwargs.get('set_context') and 'tenant_id' in kwargs):
-            # create a specific auth context for this request
-            security_group_req.environ['neutron.context'] = (
-                context.Context('', kwargs['tenant_id']))
-        return security_group_req.get_response(self.ext_api)
-
-    def _build_security_group_rule(self, security_group_id, direction, proto,
-                                   port_range_min=None, port_range_max=None,
-                                   remote_ip_prefix=None, remote_group_id=None,
-                                   tenant_id='test-tenant',
-                                   ethertype=const.IPv4):
-
-        data = {'security_group_rule': {'security_group_id': security_group_id,
-                                        'direction': direction,
-                                        'protocol': proto,
-                                        'ethertype': ethertype,
-                                        'tenant_id': tenant_id}}
-        if port_range_min:
-            data['security_group_rule']['port_range_min'] = port_range_min
-
-        if port_range_max:
-            data['security_group_rule']['port_range_max'] = port_range_max
-
-        if remote_ip_prefix:
-            data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix
-
-        if remote_group_id:
-            data['security_group_rule']['remote_group_id'] = remote_group_id
-
-        return data
-
-    def _create_security_group_rule(self, fmt, rules, **kwargs):
-
-        security_group_rule_req = self.new_create_request(
-            'security-group-rules', rules, fmt)
-
-        if (kwargs.get('set_context') and 'tenant_id' in kwargs):
-            # create a specific auth context for this request
-            security_group_rule_req.environ['neutron.context'] = (
-                context.Context('', kwargs['tenant_id']))
-        return security_group_rule_req.get_response(self.ext_api)
-
-    def _make_security_group(self, fmt, name, description, **kwargs):
-        res = self._create_security_group(fmt, name, description, **kwargs)
-        if res.status_int >= webob.exc.HTTPBadRequest.code:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        return self.deserialize(fmt, res)
-
-    def _make_security_group_rule(self, fmt, rules, **kwargs):
-        res = self._create_security_group_rule(self.fmt, rules)
-        if res.status_int >= webob.exc.HTTPBadRequest.code:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        return self.deserialize(fmt, res)
-
-    @contextlib.contextmanager
-    def security_group(self, name='webservers', description='webservers',
-                       fmt=None):
-        if not fmt:
-            fmt = self.fmt
-        security_group = self._make_security_group(fmt, name, description)
-        yield security_group
-
-    @contextlib.contextmanager
-    def security_group_rule(self, security_group_id='4cd70774-cc67-4a87-9b39-7'
-                                                    'd1db38eb087',
-                            direction='ingress', protocol=const.PROTO_NAME_TCP,
-                            port_range_min='22', port_range_max='22',
-                            remote_ip_prefix=None, remote_group_id=None,
-                            fmt=None, ethertype=const.IPv4):
-        if not fmt:
-            fmt = self.fmt
-        rule = self._build_security_group_rule(security_group_id,
-                                               direction,
-                                               protocol, port_range_min,
-                                               port_range_max,
-                                               remote_ip_prefix,
-                                               remote_group_id,
-                                               ethertype=ethertype)
-        security_group_rule = self._make_security_group_rule(self.fmt, rule)
-        yield security_group_rule
-
-    def _delete_default_security_group_egress_rules(self, security_group_id):
-        """Deletes default egress rules given a security group ID."""
-        res = self._list(
-            'security-group-rules',
-            query_params='security_group_id=%s' % security_group_id)
-
-        for r in res['security_group_rules']:
-            if (r['direction'] == 'egress' and not r['port_range_max'] and
-                    not r['port_range_min'] and not r['protocol']
-                    and not r['remote_ip_prefix']):
-                self._delete('security-group-rules', r['id'])
-
-    def _assert_sg_rule_has_kvs(self, security_group_rule, expected_kvs):
-        """Asserts that the sg rule has expected key/value pairs passed
-           in as expected_kvs dictionary
-        """
-        for k, v in six.iteritems(expected_kvs):
-            self.assertEqual(security_group_rule[k], v)
-
-
-class SecurityGroupTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
-                              securitygroups_db.SecurityGroupDbMixin):
-    """Test plugin that implements necessary calls on create/delete port for
-    associating ports with security groups.
-    """
-
-    __native_pagination_support = True
-    __native_sorting_support = True
-
-    supported_extension_aliases = ["security-group"]
-
-    def create_port(self, context, port):
-        tenant_id = port['port']['tenant_id']
-        default_sg = self._ensure_default_security_group(context, tenant_id)
-        if not attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)):
-            port['port'][ext_sg.SECURITYGROUPS] = [default_sg]
-        session = context.session
-        with session.begin(subtransactions=True):
-            sgids = self._get_security_groups_on_port(context, port)
-            port = super(SecurityGroupTestPlugin, self).create_port(context,
-                                                                    port)
-            self._process_port_create_security_group(context, port,
-                                                     sgids)
-        return port
-
-    def update_port(self, context, id, port):
-        session = context.session
-        with session.begin(subtransactions=True):
-            if ext_sg.SECURITYGROUPS in port['port']:
-                port['port'][ext_sg.SECURITYGROUPS] = (
-                    self._get_security_groups_on_port(context, port))
-                # delete the port binding and read it with the new rules
-                self._delete_port_security_group_bindings(context, id)
-                port['port']['id'] = id
-                self._process_port_create_security_group(
-                    context, port['port'],
-                    port['port'].get(ext_sg.SECURITYGROUPS))
-            port = super(SecurityGroupTestPlugin, self).update_port(
-                context, id, port)
-        return port
-
-    def create_network(self, context, network):
-        self._ensure_default_security_group(context,
-                                            network['network']['tenant_id'])
-        return super(SecurityGroupTestPlugin, self).create_network(context,
-                                                                   network)
-
-    def get_ports(self, context, filters=None, fields=None,
-                  sorts=None, limit=None, marker=None,
-                  page_reverse=False):
-        sorts = sorts or []
-        neutron_lports = super(SecurityGroupTestPlugin, self).get_ports(
-            context, filters, sorts=sorts, limit=limit, marker=marker,
-            page_reverse=page_reverse)
-        return neutron_lports
-
-
-class SecurityGroupDBTestCase(SecurityGroupsTestCase):
-    def setUp(self, plugin=None, ext_mgr=None):
-        plugin = plugin or DB_PLUGIN_KLASS
-        ext_mgr = ext_mgr or SecurityGroupTestExtensionManager()
-        super(SecurityGroupDBTestCase,
-              self).setUp(plugin=plugin, ext_mgr=ext_mgr)
-
-
-class TestSecurityGroups(SecurityGroupDBTestCase):
-    def test_create_security_group(self):
-        name = 'webservers'
-        description = 'my webservers'
-        keys = [('name', name,), ('description', description)]
-        with self.security_group(name, description) as security_group:
-            for k, v, in keys:
-                self.assertEqual(security_group['security_group'][k], v)
-
-        # Verify that default egress rules have been created
-
-        sg_rules = security_group['security_group']['security_group_rules']
-        self.assertEqual(2, len(sg_rules))
-
-        v4_rules = [r for r in sg_rules if r['ethertype'] == const.IPv4]
-        self.assertEqual(1, len(v4_rules))
-        v4_rule = v4_rules[0]
-        expected = {'direction': 'egress',
-                    'ethertype': const.IPv4,
-                    'remote_group_id': None,
-                    'remote_ip_prefix': None,
-                    'protocol': None,
-                    'port_range_max': None,
-                    'port_range_min': None}
-        self._assert_sg_rule_has_kvs(v4_rule, expected)
-
-        v6_rules = [r for r in sg_rules if r['ethertype'] == const.IPv6]
-        self.assertEqual(1, len(v6_rules))
-        v6_rule = v6_rules[0]
-        expected = {'direction': 'egress',
-                    'ethertype': const.IPv6,
-                    'remote_group_id': None,
-                    'remote_ip_prefix': None,
-                    'protocol': None,
-                    'port_range_max': None,
-                    'port_range_min': None}
-        self._assert_sg_rule_has_kvs(v6_rule, expected)
-
-    def test_skip_duplicate_default_sg_error(self):
-        num_called = [0]
-        original_func = self.plugin.create_security_group
-
-        def side_effect(context, security_group, default_sg):
-            # can't always raise, or create_security_group will hang
-            self.assertTrue(default_sg)
-            self.assertTrue(num_called[0] < 2)
-            num_called[0] += 1
-            ret = original_func(context, security_group, default_sg)
-            if num_called[0] == 1:
-                return ret
-            # make another call to cause an exception.
-            # NOTE(yamamoto): raising the exception by ourselves
-            # doesn't update the session state appropriately.
-            self.assertRaises(exc.DBDuplicateEntry,
-                              original_func, context, security_group,
-                              default_sg)
-
-        with mock.patch.object(SecurityGroupTestPlugin,
-                               'create_security_group',
-                               side_effect=side_effect):
-            self.plugin.create_network(
-                context.get_admin_context(),
-                {'network': {'name': 'foo',
-                             'admin_state_up': True,
-                             'shared': False,
-                             'tenant_id': 'bar'}})
-
-    def test_update_security_group(self):
-        with self.security_group() as sg:
-            data = {'security_group': {'name': 'new_name',
-                                       'description': 'new_desc'}}
-            req = self.new_update_request('security-groups',
-                                          data,
-                                          sg['security_group']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            self.assertEqual(data['security_group']['name'],
-                             res['security_group']['name'])
-            self.assertEqual(data['security_group']['description'],
-                             res['security_group']['description'])
-
-    def test_update_security_group_name_to_default_fail(self):
-        with self.security_group() as sg:
-            data = {'security_group': {'name': 'default',
-                                       'description': 'new_desc'}}
-            req = self.new_update_request('security-groups',
-                                          data,
-                                          sg['security_group']['id'])
-            req.environ['neutron.context'] = context.Context('', 'somebody')
-            res = req.get_response(self.ext_api)
-            self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_update_default_security_group_name_fail(self):
-        with self.network():
-            res = self.new_list_request('security-groups')
-            sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
-            data = {'security_group': {'name': 'new_name',
-                                       'description': 'new_desc'}}
-            req = self.new_update_request('security-groups',
-                                          data,
-                                          sg['security_groups'][0]['id'])
-            req.environ['neutron.context'] = context.Context('', 'somebody')
-            res = req.get_response(self.ext_api)
-            self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
-
-    def test_update_default_security_group_with_description(self):
-        with self.network():
-            res = self.new_list_request('security-groups')
-            sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
-            data = {'security_group': {'description': 'new_desc'}}
-            req = self.new_update_request('security-groups',
-                                          data,
-                                          sg['security_groups'][0]['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            self.assertEqual(data['security_group']['description'],
-                             res['security_group']['description'])
-
-    def test_check_default_security_group_description(self):
-        with self.network():
-            res = self.new_list_request('security-groups')
-            sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
-            self.assertEqual('Default security group',
-                             sg['security_groups'][0]['description'])
-
-    def test_default_security_group(self):
-        with self.network():
-            res = self.new_list_request('security-groups')
-            groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
-            self.assertEqual(1, len(groups['security_groups']))
-
-    def test_create_default_security_group_fail(self):
-        name = 'default'
-        description = 'my webservers'
-        res = self._create_security_group(self.fmt, name, description)
-        self.deserialize(self.fmt, res)
-        self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_create_default_security_group_check_case_insensitive(self):
-        name = 'DEFAULT'
-        description = 'my webservers'
-        res = self._create_security_group(self.fmt, name, description)
-        self.deserialize(self.fmt, res)
-        self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_list_security_groups(self):
-        with self.security_group(name='sg1', description='sg') as v1,\
-                self.security_group(name='sg2', description='sg') as v2,\
-                self.security_group(name='sg3', description='sg') as v3:
-            security_groups = (v1, v2, v3)
-            self._test_list_resources('security-group',
-                                      security_groups,
-                                      query_params='description=sg')
-
-    def test_list_security_groups_with_sort(self):
-        with self.security_group(name='sg1', description='sg') as sg1,\
-                self.security_group(name='sg2', description='sg') as sg2,\
-                self.security_group(name='sg3', description='sg') as sg3:
-            self._test_list_with_sort('security-group',
-                                      (sg3, sg2, sg1),
-                                      [('name', 'desc')],
-                                      query_params='description=sg')
-
-    def test_list_security_groups_with_pagination(self):
-        with self.security_group(name='sg1', description='sg') as sg1,\
-                self.security_group(name='sg2', description='sg') as sg2,\
-                self.security_group(name='sg3', description='sg') as sg3:
-            self._test_list_with_pagination('security-group',
-                                            (sg1, sg2, sg3),
-                                            ('name', 'asc'), 2, 2,
-                                            query_params='description=sg')
-
-    def test_list_security_groups_with_pagination_reverse(self):
-        with self.security_group(name='sg1', description='sg') as sg1,\
-                self.security_group(name='sg2', description='sg') as sg2,\
-                self.security_group(name='sg3', description='sg') as sg3:
-            self._test_list_with_pagination_reverse(
-                'security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2,
-                query_params='description=sg')
-
-    def test_create_security_group_rule_ethertype_invalid_as_number(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            ethertype = 2
-            rule = self._build_security_group_rule(
-                security_group_id, 'ingress', const.PROTO_NAME_TCP, '22',
-                '22', None, None, ethertype=ethertype)
-            res = self._create_security_group_rule(self.fmt, rule)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
-
-    def test_create_security_group_rule_ethertype_invalid_for_protocol(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            rule = self._build_security_group_rule(
-                security_group_id, 'ingress', const.PROTO_NAME_ICMP_V6)
-            res = self._create_security_group_rule(self.fmt, rule)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
-
-    def test_create_security_group_rule_invalid_ip_prefix(self):
-        name = 'webservers'
-        description = 'my webservers'
-        for bad_prefix in ['bad_ip', 256, "2001:db8:a::123/129", '172.30./24']:
-            with self.security_group(name, description) as sg:
-                sg_id = sg['security_group']['id']
-                remote_ip_prefix = bad_prefix
-                rule = self._build_security_group_rule(
-                    sg_id,
-                    'ingress',
-                    const.PROTO_NAME_TCP,
-                    '22', '22',
-                    remote_ip_prefix)
-                res = self._create_security_group_rule(self.fmt, rule)
-                self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
-
-    def test_create_security_group_rule_invalid_ethertype_for_prefix(self):
-        name = 'webservers'
-        description = 'my webservers'
-        test_addr = {'192.168.1.1/24': 'IPv6',
-                     '2001:db8:1234::/48': 'IPv4',
-                     '192.168.2.1/24': 'BadEthertype'}
-        for remote_ip_prefix, ethertype in six.iteritems(test_addr):
-            with self.security_group(name, description) as sg:
-                sg_id = sg['security_group']['id']
-                rule = self._build_security_group_rule(
-                    sg_id,
-                    'ingress',
-                    const.PROTO_NAME_TCP,
-                    '22', '22',
-                    remote_ip_prefix,
-                    None,
-                    ethertype=ethertype)
-                res = self._create_security_group_rule(self.fmt, rule)
-                self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
-
-    def test_create_security_group_rule_with_unmasked_prefix(self):
-        name = 'webservers'
-        description = 'my webservers'
-        addr = {'10.1.2.3': {'mask': '32', 'ethertype': 'IPv4'},
-                'fe80::2677:3ff:fe7d:4c': {'mask': '128', 'ethertype': 'IPv6'}}
-        for ip in addr:
-            with self.security_group(name, description) as sg:
-                sg_id = sg['security_group']['id']
-                ethertype = addr[ip]['ethertype']
-                remote_ip_prefix = ip
-                rule = self._build_security_group_rule(
-                    sg_id,
-                    'ingress',
-                    const.PROTO_NAME_TCP,
-                    '22', '22',
-                    remote_ip_prefix,
-                    None,
-                    ethertype=ethertype)
-                res = self._create_security_group_rule(self.fmt, rule)
-                self.assertEqual(res.status_int, 201)
-                res_sg = self.deserialize(self.fmt, res)
-                prefix = res_sg['security_group_rule']['remote_ip_prefix']
-                self.assertEqual(prefix, '%s/%s' % (ip, addr[ip]['mask']))
-
-    def test_create_security_group_rule_tcp_protocol_as_number(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            protocol = const.PROTO_NUM_TCP  # TCP
-            rule = self._build_security_group_rule(
-                security_group_id, 'ingress', protocol, '22', '22')
-            res = self._create_security_group_rule(self.fmt, rule)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
-
-    def test_create_security_group_rule_protocol_as_number(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            protocol = 2
-            rule = self._build_security_group_rule(
-                security_group_id, 'ingress', protocol)
-            res = self._create_security_group_rule(self.fmt, rule)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
-
-    def test_create_security_group_rule_case_insensitive(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            direction = "ingress"
-            remote_ip_prefix = "10.0.0.0/24"
-            protocol = 'TCP'
-            port_range_min = 22
-            port_range_max = 22
-            ethertype = 'ipV4'
-            with self.security_group_rule(security_group_id, direction,
-                                          protocol, port_range_min,
-                                          port_range_max,
-                                          remote_ip_prefix,
-                                          ethertype=ethertype) as rule:
-
-                # the lower case value will be return
-                self.assertEqual(rule['security_group_rule']['protocol'],
-                                 protocol.lower())
-                self.assertEqual(rule['security_group_rule']['ethertype'],
-                                 const.IPv4)
-
-    def test_get_security_group(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            remote_group_id = sg['security_group']['id']
-            res = self.new_show_request('security-groups', remote_group_id)
-            security_group_id = sg['security_group']['id']
-            direction = "ingress"
-            remote_ip_prefix = "10.0.0.0/24"
-            protocol = const.PROTO_NAME_TCP
-            port_range_min = 22
-            port_range_max = 22
-            keys = [('remote_ip_prefix', remote_ip_prefix),
-                    ('security_group_id', security_group_id),
-                    ('direction', direction),
-                    ('protocol', protocol),
-                    ('port_range_min', port_range_min),
-                    ('port_range_max', port_range_max)]
-            with self.security_group_rule(security_group_id, direction,
-                                          protocol, port_range_min,
-                                          port_range_max,
-                                          remote_ip_prefix):
-
-                group = self.deserialize(
-                    self.fmt, res.get_response(self.ext_api))
-                sg_rule = group['security_group']['security_group_rules']
-                self.assertEqual(group['security_group']['id'],
-                                 remote_group_id)
-                self.assertEqual(3, len(sg_rule))
-                sg_rule = [r for r in sg_rule if r['direction'] == 'ingress']
-                for k, v, in keys:
-                    self.assertEqual(sg_rule[0][k], v)
-
-    def test_get_security_group_on_port_from_wrong_tenant(self):
-        plugin = manager.NeutronManager.get_plugin()
-        if not hasattr(plugin, '_get_security_groups_on_port'):
-            self.skipTest("plugin doesn't use the mixin with this method")
-        neutron_context = context.get_admin_context()
-        res = self._create_security_group(self.fmt, 'webservers', 'webservers',
-                                          tenant_id='bad_tenant')
-        sg1 = self.deserialize(self.fmt, res)
-        with testtools.ExpectedException(ext_sg.SecurityGroupNotFound):
-            plugin._get_security_groups_on_port(
-                neutron_context,
-                {'port': {'security_groups': [sg1['security_group']['id']],
-                          'tenant_id': 'tenant'}}
-            )
-
-    def test_delete_security_group(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            remote_group_id = sg['security_group']['id']
-            self._delete('security-groups', remote_group_id,
-                         webob.exc.HTTPNoContent.code)
-
-    def test_delete_default_security_group_admin(self):
-        with self.network():
-            res = self.new_list_request('security-groups')
-            sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
-            self._delete('security-groups', sg['security_groups'][0]['id'],
-                         webob.exc.HTTPNoContent.code)
-
-    def test_delete_default_security_group_nonadmin(self):
-        with self.network():
-            res = self.new_list_request('security-groups')
-            sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
-            neutron_context = context.Context('', 'test-tenant')
-            self._delete('security-groups', sg['security_groups'][0]['id'],
-                         webob.exc.HTTPConflict.code,
-                         neutron_context=neutron_context)
-
-    def test_security_group_list_creates_default_security_group(self):
-        neutron_context = context.Context('', 'test-tenant')
-        sg = self._list('security-groups',
-                        neutron_context=neutron_context).get('security_groups')
-        self.assertEqual(1, len(sg))
-
-    def test_security_group_port_create_creates_default_security_group(self):
-        res = self._create_network(self.fmt, 'net1', True,
-                                   tenant_id='not_admin',
-                                   set_context=True)
-        net1 = self.deserialize(self.fmt, res)
-        res = self._create_port(self.fmt, net1['network']['id'],
-                                tenant_id='not_admin', set_context=True)
-        sg = self._list('security-groups').get('security_groups')
-        self.assertEqual(1, len(sg))
-
-    def test_default_security_group_rules(self):
-        with self.network():
-            res = self.new_list_request('security-groups')
-            groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
-            self.assertEqual(len(groups['security_groups']), 1)
-            security_group_id = groups['security_groups'][0]['id']
-            res = self.new_list_request('security-group-rules')
-            rules = self.deserialize(self.fmt, res.get_response(self.ext_api))
-            self.assertEqual(len(rules['security_group_rules']), 4)
-
-            # Verify default rule for v4 egress
-            sg_rules = rules['security_group_rules']
-            rules = [
-                r for r in sg_rules
-                if r['direction'] == 'egress' and r['ethertype'] == const.IPv4
-            ]
-            self.assertEqual(1, len(rules))
-            v4_egress = rules[0]
-
-            expected = {'direction': 'egress',
-                        'ethertype': const.IPv4,
-                        'remote_group_id': None,
-                        'remote_ip_prefix': None,
-                        'protocol': None,
-                        'port_range_max': None,
-                        'port_range_min': None}
-            self._assert_sg_rule_has_kvs(v4_egress, expected)
-
-            # Verify default rule for v6 egress
-            rules = [
-                r for r in sg_rules
-                if r['direction'] == 'egress' and r['ethertype'] == const.IPv6
-            ]
-            self.assertEqual(1, len(rules))
-            v6_egress = rules[0]
-
-            expected = {'direction': 'egress',
-                        'ethertype': const.IPv6,
-                        'remote_group_id': None,
-                        'remote_ip_prefix': None,
-                        'protocol': None,
-                        'port_range_max': None,
-                        'port_range_min': None}
-            self._assert_sg_rule_has_kvs(v6_egress, expected)
-
-            # Verify default rule for v4 ingress
-            rules = [
-                r for r in sg_rules
-                if r['direction'] == 'ingress' and r['ethertype'] == const.IPv4
-            ]
-            self.assertEqual(1, len(rules))
-            v4_ingress = rules[0]
-
-            expected = {'direction': 'ingress',
-                        'ethertype': const.IPv4,
-                        'remote_group_id': security_group_id,
-                        'remote_ip_prefix': None,
-                        'protocol': None,
-                        'port_range_max': None,
-                        'port_range_min': None}
-            self._assert_sg_rule_has_kvs(v4_ingress, expected)
-
-            # Verify default rule for v6 ingress
-            rules = [
-                r for r in sg_rules
-                if r['direction'] == 'ingress' and r['ethertype'] == const.IPv6
-            ]
-            self.assertEqual(1, len(rules))
-            v6_ingress = rules[0]
-
-            expected = {'direction': 'ingress',
-                        'ethertype': const.IPv6,
-                        'remote_group_id': security_group_id,
-                        'remote_ip_prefix': None,
-                        'protocol': None,
-                        'port_range_max': None,
-                        'port_range_min': None}
-            self._assert_sg_rule_has_kvs(v6_ingress, expected)
-
-    def test_create_security_group_rule_remote_ip_prefix(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            direction = "ingress"
-            remote_ip_prefix = "10.0.0.0/24"
-            protocol = const.PROTO_NAME_TCP
-            port_range_min = 22
-            port_range_max = 22
-            keys = [('remote_ip_prefix', remote_ip_prefix),
-                    ('security_group_id', security_group_id),
-                    ('direction', direction),
-                    ('protocol', protocol),
-                    ('port_range_min', port_range_min),
-                    ('port_range_max', port_range_max)]
-            with self.security_group_rule(security_group_id, direction,
-                                          protocol, port_range_min,
-                                          port_range_max,
-                                          remote_ip_prefix) as rule:
-                for k, v, in keys:
-                    self.assertEqual(rule['security_group_rule'][k], v)
-
-    def test_create_security_group_rule_group_id(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            with self.security_group(name, description) as sg2:
-                security_group_id = sg['security_group']['id']
-                direction = "ingress"
-                remote_group_id = sg2['security_group']['id']
-                protocol = const.PROTO_NAME_TCP
-                port_range_min = 22
-                port_range_max = 22
-                keys = [('remote_group_id', remote_group_id),
-                        ('security_group_id', security_group_id),
-                        ('direction', direction),
-                        ('protocol', protocol),
-                        ('port_range_min', port_range_min),
-                        ('port_range_max', port_range_max)]
-                with self.security_group_rule(security_group_id, direction,
-                                              protocol, port_range_min,
-                                              port_range_max,
-                                              remote_group_id=remote_group_id
-                                              ) as rule:
-                    for k, v, in keys:
-                        self.assertEqual(rule['security_group_rule'][k], v)
-
-    def test_create_security_group_rule_icmp_with_type_and_code(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            direction = "ingress"
-            remote_ip_prefix = "10.0.0.0/24"
-            protocol = const.PROTO_NAME_ICMP
-            # port_range_min (ICMP type) is greater than port_range_max
-            # (ICMP code) in order to confirm min <= max port check is
-            # not called for ICMP.
-            port_range_min = 8
-            port_range_max = 5
-            keys = [('remote_ip_prefix', remote_ip_prefix),
-                    ('security_group_id', security_group_id),
-                    ('direction', direction),
-                    ('protocol', protocol),
-                    ('port_range_min', port_range_min),
-                    ('port_range_max', port_range_max)]
-            with self.security_group_rule(security_group_id, direction,
-                                          protocol, port_range_min,
-                                          port_range_max,
-                                          remote_ip_prefix) as rule:
-                for k, v, in keys:
-                    self.assertEqual(rule['security_group_rule'][k], v)
-
-    def test_create_security_group_rule_icmp_with_type_only(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            direction = "ingress"
-            remote_ip_prefix = "10.0.0.0/24"
-            protocol = const.PROTO_NAME_ICMP
-            # ICMP type
-            port_range_min = 8
-            # ICMP code
-            port_range_max = None
-            keys = [('remote_ip_prefix', remote_ip_prefix),
-                    ('security_group_id', security_group_id),
-                    ('direction', direction),
-                    ('protocol', protocol),
-                    ('port_range_min', port_range_min),
-                    ('port_range_max', port_range_max)]
-            with self.security_group_rule(security_group_id, direction,
-                                          protocol, port_range_min,
-                                          port_range_max,
-                                          remote_ip_prefix) as rule:
-                for k, v, in keys:
-                    self.assertEqual(rule['security_group_rule'][k], v)
-
-    def test_create_security_group_rule_icmpv6_with_type_only(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            direction = "ingress"
-            ethertype = const.IPv6
-            remote_ip_prefix = "2001::f401:56ff:fefe:d3dc/128"
-            protocol = const.PROTO_NAME_ICMP_V6
-            # ICMPV6 type
-            port_range_min = const.ICMPV6_TYPE_RA
-            # ICMPV6 code
-            port_range_max = None
-            keys = [('remote_ip_prefix', remote_ip_prefix),
-                    ('security_group_id', security_group_id),
-                    ('direction', direction),
-                    ('ethertype', ethertype),
-                    ('protocol', protocol),
-                    ('port_range_min', port_range_min),
-                    ('port_range_max', port_range_max)]
-            with self.security_group_rule(security_group_id, direction,
-                                          protocol, port_range_min,
-                                          port_range_max,
-                                          remote_ip_prefix,
-                                          None, None,
-                                          ethertype) as rule:
-                for k, v, in keys:
-                    self.assertEqual(rule['security_group_rule'][k], v)
-
-    def test_create_security_group_source_group_ip_and_ip_prefix(self):
-        security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
-        direction = "ingress"
-        remote_ip_prefix = "10.0.0.0/24"
-        protocol = const.PROTO_NAME_TCP
-        port_range_min = 22
-        port_range_max = 22
-        remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
-        rule = self._build_security_group_rule(security_group_id, direction,
-                                               protocol, port_range_min,
-                                               port_range_max,
-                                               remote_ip_prefix,
-                                               remote_group_id)
-        res = self._create_security_group_rule(self.fmt, rule)
-        self.deserialize(self.fmt, res)
-        self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
-
-    def test_create_security_group_rule_bad_security_group_id(self):
-        security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
-        direction = "ingress"
-        remote_ip_prefix = "10.0.0.0/24"
-        protocol = const.PROTO_NAME_TCP
-        port_range_min = 22
-        port_range_max = 22
-        rule = self._build_security_group_rule(security_group_id, direction,
-                                               protocol, port_range_min,
-                                               port_range_max,
-                                               remote_ip_prefix)
-        res = self._create_security_group_rule(self.fmt, rule)
-        self.deserialize(self.fmt, res)
-        self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
-
-    def test_create_security_group_rule_bad_tenant(self):
-        with self.security_group() as sg:
-            rule = {'security_group_rule':
-                    {'security_group_id': sg['security_group']['id'],
-                     'direction': 'ingress',
-                     'protocol': const.PROTO_NAME_TCP,
-                     'port_range_min': '22',
-                     'port_range_max': '22',
-                     'tenant_id': "bad_tenant"}}
-
-            res = self._create_security_group_rule(self.fmt, rule,
-                                                   tenant_id='bad_tenant',
-                                                   set_context=True)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
-
-    def test_create_security_group_rule_bad_tenant_remote_group_id(self):
-        with self.security_group() as sg:
-            res = self._create_security_group(self.fmt, 'webservers',
-                                              'webservers',
-                                              tenant_id='bad_tenant')
-            sg2 = self.deserialize(self.fmt, res)
-            rule = {'security_group_rule':
-                    {'security_group_id': sg2['security_group']['id'],
-                     'direction': 'ingress',
-                     'protocol': const.PROTO_NAME_TCP,
-                     'port_range_min': '22',
-                     'port_range_max': '22',
-                     'tenant_id': 'bad_tenant',
-                     'remote_group_id': sg['security_group']['id']}}
-
-            res = self._create_security_group_rule(self.fmt, rule,
-                                                   tenant_id='bad_tenant',
-                                                   set_context=True)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
-
-    def test_create_security_group_rule_bad_tenant_security_group_rule(self):
-        with self.security_group() as sg:
-            res = self._create_security_group(self.fmt, 'webservers',
-                                              'webservers',
-                                              tenant_id='bad_tenant')
-            self.deserialize(self.fmt, res)
-            rule = {'security_group_rule':
-                    {'security_group_id': sg['security_group']['id'],
-                     'direction': 'ingress',
-                     'protocol': const.PROTO_NAME_TCP,
-                     'port_range_min': '22',
-                     'port_range_max': '22',
-                     'tenant_id': 'bad_tenant'}}
-
-            res = self._create_security_group_rule(self.fmt, rule,
-                                                   tenant_id='bad_tenant',
-                                                   set_context=True)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
-
-    def test_create_security_group_rule_bad_remote_group_id(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            remote_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
-            direction = "ingress"
-            protocol = const.PROTO_NAME_TCP
-            port_range_min = 22
-            port_range_max = 22
-        rule = self._build_security_group_rule(security_group_id, direction,
-                                               protocol, port_range_min,
-                                               port_range_max,
-                                               remote_group_id=remote_group_id)
-        res = self._create_security_group_rule(self.fmt, rule)
-        self.deserialize(self.fmt, res)
-        self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
-
-    def test_create_security_group_rule_duplicate_rules(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            with self.security_group_rule(security_group_id):
-                rule = self._build_security_group_rule(
-                    sg['security_group']['id'], 'ingress',
-                    const.PROTO_NAME_TCP, '22', '22')
-                self._create_security_group_rule(self.fmt, rule)
-                res = self._create_security_group_rule(self.fmt, rule)
-                self.deserialize(self.fmt, res)
-                self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
-
-    def test_create_security_group_rule_min_port_greater_max(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            with self.security_group_rule(security_group_id):
-                for protocol in [const.PROTO_NAME_TCP, const.PROTO_NAME_UDP,
-                                 const.PROTO_NUM_TCP, const.PROTO_NUM_UDP]:
-                    rule = self._build_security_group_rule(
-                        sg['security_group']['id'],
-                        'ingress', protocol, '50', '22')
-                    res = self._create_security_group_rule(self.fmt, rule)
-                    self.deserialize(self.fmt, res)
-                    self.assertEqual(res.status_int,
-                                     webob.exc.HTTPBadRequest.code)
-
-    def test_create_security_group_rule_ports_but_no_protocol(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            with self.security_group_rule(security_group_id):
-                rule = self._build_security_group_rule(
-                    sg['security_group']['id'], 'ingress', None, '22', '22')
-                res = self._create_security_group_rule(self.fmt, rule)
-                self.deserialize(self.fmt, res)
-                self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
-
-    def test_create_security_group_rule_port_range_min_only(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            with self.security_group_rule(security_group_id):
-                rule = self._build_security_group_rule(
-                    sg['security_group']['id'], 'ingress',
-                    const.PROTO_NAME_TCP, '22', None)
-                res = self._create_security_group_rule(self.fmt, rule)
-                self.deserialize(self.fmt, res)
-                self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
-
-    def test_create_security_group_rule_port_range_max_only(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            with self.security_group_rule(security_group_id):
-                rule = self._build_security_group_rule(
-                    sg['security_group']['id'], 'ingress',
-                    const.PROTO_NAME_TCP, None, '22')
-                res = self._create_security_group_rule(self.fmt, rule)
-                self.deserialize(self.fmt, res)
-                self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
-
-    def test_create_security_group_rule_icmp_type_too_big(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            with self.security_group_rule(security_group_id):
-                rule = self._build_security_group_rule(
-                    sg['security_group']['id'], 'ingress',
-                    const.PROTO_NAME_ICMP, '256', None)
-                res = self._create_security_group_rule(self.fmt, rule)
-                self.deserialize(self.fmt, res)
-                self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
-
-    def test_create_security_group_rule_icmp_code_too_big(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            with self.security_group_rule(security_group_id):
-                rule = self._build_security_group_rule(
-                    sg['security_group']['id'], 'ingress',
-                    const.PROTO_NAME_ICMP, '8', '256')
-                res = self._create_security_group_rule(self.fmt, rule)
-                self.deserialize(self.fmt, res)
-                self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
-
-    def test_create_security_group_rule_icmp_with_code_only(self):
-        name = 'webservers'
-        description = 'my webservers'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            with self.security_group_rule(security_group_id):
-                for code in ['2', '0']:
-                    rule = self._build_security_group_rule(
-                        sg['security_group']['id'], 'ingress',
-                        const.PROTO_NAME_ICMP, None, code)
-                    res = self._create_security_group_rule(self.fmt, rule)
-                    self.deserialize(self.fmt, res)
-                    self.assertEqual(res.status_int,
-                                     webob.exc.HTTPBadRequest.code)
-
-    def test_list_ports_security_group(self):
-        with self.network() as n:
-            with self.subnet(n):
-                self._create_port(self.fmt, n['network']['id'])
-                req = self.new_list_request('ports')
-                res = req.get_response(self.api)
-                ports = self.deserialize(self.fmt, res)
-                port = ports['ports'][0]
-                self.assertEqual(len(port[ext_sg.SECURITYGROUPS]), 1)
-                self._delete('ports', port['id'])
-
-    def test_list_security_group_rules(self):
-        with self.security_group(name='sg') as sg:
-            security_group_id = sg['security_group']['id']
-            with self.security_group_rule(security_group_id,
-                                          direction='egress',
-                                          port_range_min=22,
-                                          port_range_max=22) as sgr1,\
-                    self.security_group_rule(security_group_id,
-                                             direction='egress',
-                                             port_range_min=23,
-                                             port_range_max=23) as sgr2,\
-                    self.security_group_rule(security_group_id,
-                                             direction='egress',
-                                             port_range_min=24,
-                                             port_range_max=24) as sgr3:
-
-                # Delete default rules as they would fail the following
-                # assertion at the end.
-                self._delete_default_security_group_egress_rules(
-                    security_group_id)
-
-                q = 'direction=egress&security_group_id=' + security_group_id
-                self._test_list_resources('security-group-rule',
-                                          [sgr1, sgr2, sgr3],
-                                          query_params=q)
-
-    def test_list_security_group_rules_with_sort(self):
-        with self.security_group(name='sg') as sg:
-            security_group_id = sg['security_group']['id']
-            with self.security_group_rule(security_group_id,
-                                          direction='egress',
-                                          port_range_min=22,
-                                          port_range_max=22) as sgr1,\
-                    self.security_group_rule(security_group_id,
-                                             direction='egress',
-                                             port_range_min=23,
-                                             port_range_max=23) as sgr2,\
-                    self.security_group_rule(security_group_id,
-                                             direction='egress',
-                                             port_range_min=24,
-                                             port_range_max=24) as sgr3:
-
-                # Delete default rules as they would fail the following
-                # assertion at the end.
-                self._delete_default_security_group_egress_rules(
-                    security_group_id)
-
-                q = 'direction=egress&security_group_id=' + security_group_id
-                self._test_list_with_sort('security-group-rule',
-                                          (sgr3, sgr2, sgr1),
-                                          [('port_range_max', 'desc')],
-                                          query_params=q)
-
-    def test_list_security_group_rules_with_pagination(self):
-        with self.security_group(name='sg') as sg:
-            security_group_id = sg['security_group']['id']
-            with self.security_group_rule(security_group_id,
-                                          direction='egress',
-                                          port_range_min=22,
-                                          port_range_max=22) as sgr1,\
-                    self.security_group_rule(security_group_id,
-                                             direction='egress',
-                                             port_range_min=23,
-                                             port_range_max=23) as sgr2,\
-                    self.security_group_rule(security_group_id,
-                                             direction='egress',
-                                             port_range_min=24,
-                                             port_range_max=24) as sgr3:
-
-                # Delete default rules as they would fail the following
-                # assertion at the end.
-                self._delete_default_security_group_egress_rules(
-                    security_group_id)
-
-                q = 'direction=egress&security_group_id=' + security_group_id
-                self._test_list_with_pagination(
-                    'security-group-rule', (sgr3, sgr2, sgr1),
-                    ('port_range_max', 'desc'), 2, 2,
-                    query_params=q)
-
-    def test_list_security_group_rules_with_pagination_reverse(self):
-        with self.security_group(name='sg') as sg:
-            security_group_id = sg['security_group']['id']
-            with self.security_group_rule(security_group_id,
-                                          direction='egress',
-                                          port_range_min=22,
-                                          port_range_max=22) as sgr1,\
-                    self.security_group_rule(security_group_id,
-                                             direction='egress',
-                                             port_range_min=23,
-                                             port_range_max=23) as sgr2,\
-                    self.security_group_rule(security_group_id,
-                                             direction='egress',
-                                             port_range_min=24,
-                                             port_range_max=24) as sgr3:
-                self._test_list_with_pagination_reverse(
-                    'security-group-rule', (sgr3, sgr2, sgr1),
-                    ('port_range_max', 'desc'), 2, 2,
-                    query_params='direction=egress')
-
-    def test_create_port_with_multiple_security_groups(self):
-        with self.network() as n:
-            with self.subnet(n):
-                with self.security_group() as sg1:
-                    with self.security_group() as sg2:
-                        res = self._create_port(
-                            self.fmt, n['network']['id'],
-                            security_groups=[sg1['security_group']['id'],
-                                             sg2['security_group']['id']])
-                        port = self.deserialize(self.fmt, res)
-                        self.assertEqual(2, len(
-                            port['port'][ext_sg.SECURITYGROUPS]))
-                        self._delete('ports', port['port']['id'])
-
-    def test_create_port_with_no_security_groups(self):
-        with self.network() as n:
-            with self.subnet(n):
-                res = self._create_port(self.fmt, n['network']['id'],
-                                        security_groups=[])
-                port = self.deserialize(self.fmt, res)
-                self.assertEqual([], port['port'][ext_sg.SECURITYGROUPS])
-
-    def test_update_port_with_security_group(self):
-        with self.network() as n:
-            with self.subnet(n):
-                with self.security_group() as sg:
-                    res = self._create_port(self.fmt, n['network']['id'])
-                    port = self.deserialize(self.fmt, res)
-
-                    data = {'port': {'fixed_ips': port['port']['fixed_ips'],
-                                     'name': port['port']['name'],
-                                     ext_sg.SECURITYGROUPS:
-                                     [sg['security_group']['id']]}}
-
-                    req = self.new_update_request('ports', data,
-                                                  port['port']['id'])
-                    res = self.deserialize(self.fmt,
-                                           req.get_response(self.api))
-                    self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
-                                     sg['security_group']['id'])
-
-                    # Test update port without security group
-                    data = {'port': {'fixed_ips': port['port']['fixed_ips'],
-                                     'name': port['port']['name']}}
-
-                    req = self.new_update_request('ports', data,
-                                                  port['port']['id'])
-                    res = self.deserialize(self.fmt,
-                                           req.get_response(self.api))
-                    self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
-                                     sg['security_group']['id'])
-
-                    self._delete('ports', port['port']['id'])
-
-    def test_update_port_with_multiple_security_groups(self):
-        with self.network() as n:
-            with self.subnet(n) as s:
-                with self.port(s) as port:
-                    with self.security_group() as sg1:
-                        with self.security_group() as sg2:
-                            data = {'port': {ext_sg.SECURITYGROUPS:
-                                             [sg1['security_group']['id'],
-                                              sg2['security_group']['id']]}}
-                            req = self.new_update_request(
-                                'ports', data, port['port']['id'])
-                            port = self.deserialize(
-                                self.fmt, req.get_response(self.api))
-                            self.assertEqual(
-                                2, len(port['port'][ext_sg.SECURITYGROUPS]))
-
-    def test_update_port_remove_security_group_empty_list(self):
-        with self.network() as n:
-            with self.subnet(n):
-                with self.security_group() as sg:
-                    res = self._create_port(self.fmt, n['network']['id'],
-                                            security_groups=(
-                                                [sg['security_group']['id']]))
-                    port = self.deserialize(self.fmt, res)
-
-                    data = {'port': {'fixed_ips': port['port']['fixed_ips'],
-                                     'name': port['port']['name'],
-                                     'security_groups': []}}
-
-                    req = self.new_update_request('ports', data,
-                                                  port['port']['id'])
-                    res = self.deserialize(self.fmt,
-                                           req.get_response(self.api))
-                    self.assertEqual([],
-                                     res['port'].get(ext_sg.SECURITYGROUPS))
-                    self._delete('ports', port['port']['id'])
-
-    def test_update_port_remove_security_group_none(self):
-        with self.network() as n:
-            with self.subnet(n):
-                with self.security_group() as sg:
-                    res = self._create_port(self.fmt, n['network']['id'],
-                                            security_groups=(
-                                                [sg['security_group']['id']]))
-                    port = self.deserialize(self.fmt, res)
-
-                    data = {'port': {'fixed_ips': port['port']['fixed_ips'],
-                                     'name': port['port']['name'],
-                                     'security_groups': None}}
-
-                    req = self.new_update_request('ports', data,
-                                                  port['port']['id'])
-                    res = self.deserialize(self.fmt,
-                                           req.get_response(self.api))
-                    self.assertEqual([],
-                                     res['port'].get(ext_sg.SECURITYGROUPS))
-                    self._delete('ports', port['port']['id'])
-
-    def test_create_port_with_bad_security_group(self):
-        with self.network() as n:
-            with self.subnet(n):
-                res = self._create_port(self.fmt, n['network']['id'],
-                                        security_groups=['bad_id'])
-
-                self.deserialize(self.fmt, res)
-                self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
-
-    def test_create_delete_security_group_port_in_use(self):
-        with self.network() as n:
-            with self.subnet(n):
-                with self.security_group() as sg:
-                    res = self._create_port(self.fmt, n['network']['id'],
-                                            security_groups=(
-                                                [sg['security_group']['id']]))
-                    port = self.deserialize(self.fmt, res)
-                    self.assertEqual(port['port'][ext_sg.SECURITYGROUPS][0],
-                                     sg['security_group']['id'])
-                    # try to delete security group that's in use
-                    res = self._delete('security-groups',
-                                       sg['security_group']['id'],
-                                       webob.exc.HTTPConflict.code)
-                    # delete the blocking port
-                    self._delete('ports', port['port']['id'])
-
-    def test_create_security_group_rule_bulk_native(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk "
-                          "security_group_rule create")
-        with self.security_group() as sg:
-            rule1 = self._build_security_group_rule(sg['security_group']['id'],
-                                                    'ingress',
-                                                    const.PROTO_NAME_TCP, '22',
-                                                    '22', '10.0.0.1/24')
-            rule2 = self._build_security_group_rule(sg['security_group']['id'],
-                                                    'ingress',
-                                                    const.PROTO_NAME_TCP, '23',
-                                                    '23', '10.0.0.1/24')
-            rules = {'security_group_rules': [rule1['security_group_rule'],
-                                              rule2['security_group_rule']]}
-            res = self._create_security_group_rule(self.fmt, rules)
-            ret = self.deserialize(self.fmt, res)
-            self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
-            self.assertEqual(2, len(ret['security_group_rules']))
-
-    def test_create_security_group_rule_bulk_emulated(self):
-        real_has_attr = hasattr
-
-        #ensures the API choose the emulation code path
-        def fakehasattr(item, attr):
-            if attr.endswith('__native_bulk_support'):
-                return False
-            return real_has_attr(item, attr)
-
-        with mock.patch('six.moves.builtins.hasattr',
-                        new=fakehasattr):
-            with self.security_group() as sg:
-                rule1 = self._build_security_group_rule(
-                    sg['security_group']['id'], 'ingress',
-                    const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
-                rule2 = self._build_security_group_rule(
-                    sg['security_group']['id'], 'ingress',
-                    const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24')
-                rules = {'security_group_rules': [rule1['security_group_rule'],
-                                                  rule2['security_group_rule']]
-                         }
-                res = self._create_security_group_rule(self.fmt, rules)
-                self.deserialize(self.fmt, res)
-                self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
-
-    def test_create_security_group_rule_allow_all_ipv4(self):
-        with self.security_group() as sg:
-            rule = {'security_group_id': sg['security_group']['id'],
-                    'direction': 'ingress',
-                    'ethertype': 'IPv4',
-                    'tenant_id': 'test-tenant'}
-
-            res = self._create_security_group_rule(
-                self.fmt, {'security_group_rule': rule})
-            rule = self.deserialize(self.fmt, res)
-            self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
-
-    def test_create_security_group_rule_allow_all_ipv4_v6_bulk(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk "
-                          "security_group_rule create")
-        with self.security_group() as sg:
-            rule_v4 = {'security_group_id': sg['security_group']['id'],
-                       'direction': 'ingress',
-                       'ethertype': 'IPv4',
-                       'tenant_id': 'test-tenant'}
-            rule_v6 = {'security_group_id': sg['security_group']['id'],
-                       'direction': 'ingress',
-                       'ethertype': 'IPv6',
-                       'tenant_id': 'test-tenant'}
-
-            rules = {'security_group_rules': [rule_v4, rule_v6]}
-            res = self._create_security_group_rule(self.fmt, rules)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
-
-    def test_create_security_group_rule_duplicate_rule_in_post(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk "
-                          "security_group_rule create")
-        with self.security_group() as sg:
-            rule = self._build_security_group_rule(sg['security_group']['id'],
-                                                   'ingress',
-                                                   const.PROTO_NAME_TCP, '22',
-                                                   '22', '10.0.0.1/24')
-            rules = {'security_group_rules': [rule['security_group_rule'],
-                                              rule['security_group_rule']]}
-            res = self._create_security_group_rule(self.fmt, rules)
-            rule = self.deserialize(self.fmt, res)
-            self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_create_security_group_rule_duplicate_rule_in_post_emulated(self):
-        real_has_attr = hasattr
-
-        #ensures the API choose the emulation code path
-        def fakehasattr(item, attr):
-            if attr.endswith('__native_bulk_support'):
-                return False
-            return real_has_attr(item, attr)
-
-        with mock.patch('six.moves.builtins.hasattr',
-                        new=fakehasattr):
-
-            with self.security_group() as sg:
-                rule = self._build_security_group_rule(
-                    sg['security_group']['id'], 'ingress',
-                    const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
-                rules = {'security_group_rules': [rule['security_group_rule'],
-                                                  rule['security_group_rule']]}
-                res = self._create_security_group_rule(self.fmt, rules)
-                rule = self.deserialize(self.fmt, res)
-                self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_create_security_group_rule_duplicate_rule_db(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk "
-                          "security_group_rule create")
-        with self.security_group() as sg:
-            rule = self._build_security_group_rule(sg['security_group']['id'],
-                                                   'ingress',
-                                                   const.PROTO_NAME_TCP, '22',
-                                                   '22', '10.0.0.1/24')
-            rules = {'security_group_rules': [rule]}
-            self._create_security_group_rule(self.fmt, rules)
-            res = self._create_security_group_rule(self.fmt, rules)
-            rule = self.deserialize(self.fmt, res)
-            self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_create_security_group_rule_duplicate_rule_db_emulated(self):
-        real_has_attr = hasattr
-
-        #ensures the API choose the emulation code path
-        def fakehasattr(item, attr):
-            if attr.endswith('__native_bulk_support'):
-                return False
-            return real_has_attr(item, attr)
-
-        with mock.patch('six.moves.builtins.hasattr',
-                        new=fakehasattr):
-            with self.security_group() as sg:
-                rule = self._build_security_group_rule(
-                    sg['security_group']['id'], 'ingress',
-                    const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
-                rules = {'security_group_rules': [rule]}
-                self._create_security_group_rule(self.fmt, rules)
-                res = self._create_security_group_rule(self.fmt, rule)
-                self.deserialize(self.fmt, res)
-                self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_create_security_group_rule_different_security_group_ids(self):
-        if self._skip_native_bulk:
-            self.skipTest("Plugin does not support native bulk "
-                          "security_group_rule create")
-        with self.security_group() as sg1:
-            with self.security_group() as sg2:
-                rule1 = self._build_security_group_rule(
-                    sg1['security_group']['id'], 'ingress',
-                    const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
-                rule2 = self._build_security_group_rule(
-                    sg2['security_group']['id'], 'ingress',
-                    const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24')
-
-                rules = {'security_group_rules': [rule1['security_group_rule'],
-                                                  rule2['security_group_rule']]
-                         }
-                res = self._create_security_group_rule(self.fmt, rules)
-                self.deserialize(self.fmt, res)
-                self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
-
-    def test_create_security_group_rule_with_invalid_ethertype(self):
-        security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
-        direction = "ingress"
-        remote_ip_prefix = "10.0.0.0/24"
-        protocol = const.PROTO_NAME_TCP
-        port_range_min = 22
-        port_range_max = 22
-        remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
-        rule = self._build_security_group_rule(security_group_id, direction,
-                                               protocol, port_range_min,
-                                               port_range_max,
-                                               remote_ip_prefix,
-                                               remote_group_id,
-                                               ethertype='IPv5')
-        res = self._create_security_group_rule(self.fmt, rule)
-        self.deserialize(self.fmt, res)
-        self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
-
-    def test_create_security_group_rule_with_invalid_protocol(self):
-        security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
-        direction = "ingress"
-        remote_ip_prefix = "10.0.0.0/24"
-        protocol = 'tcp/ip'
-        port_range_min = 22
-        port_range_max = 22
-        remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
-        rule = self._build_security_group_rule(security_group_id, direction,
-                                               protocol, port_range_min,
-                                               port_range_max,
-                                               remote_ip_prefix,
-                                               remote_group_id)
-        res = self._create_security_group_rule(self.fmt, rule)
-        self.deserialize(self.fmt, res)
-        self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
-
-    def test_create_security_group_rule_with_invalid_tcp_or_udp_protocol(self):
-        security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
-        direction = "ingress"
-        remote_ip_prefix = "10.0.0.0/24"
-        protocol = 'tcp'
-        port_range_min = 0
-        port_range_max = 80
-        remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
-        rule = self._build_security_group_rule(security_group_id, direction,
-                                               protocol, port_range_min,
-                                               port_range_max,
-                                               remote_ip_prefix,
-                                               remote_group_id)
-        res = self._create_security_group_rule(self.fmt, rule)
-        self.deserialize(self.fmt, res)
-        self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
-
-    def test_create_port_with_non_uuid(self):
-        with self.network() as n:
-            with self.subnet(n):
-                res = self._create_port(self.fmt, n['network']['id'],
-                                        security_groups=['not_valid'])
-
-                self.deserialize(self.fmt, res)
-                self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
-
-    def test_create_security_group_rule_with_specific_id(self):
-        neutron_context = context.Context('', 'test-tenant')
-        specified_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
-        with self.security_group() as sg:
-            rule = self._build_security_group_rule(
-                sg['security_group']['id'], 'ingress', const.PROTO_NUM_TCP)
-            rule['security_group_rule'].update({'id': specified_id,
-                                                'port_range_min': None,
-                                                'port_range_max': None,
-                                                'remote_ip_prefix': None,
-                                                'remote_group_id': None})
-            result = self.plugin.create_security_group_rule(
-                neutron_context, rule)
-            self.assertEqual(specified_id, result['id'])
-
-
-class TestConvertIPPrefixToCIDR(base.BaseTestCase):
-
-    def test_convert_bad_ip_prefix_to_cidr(self):
-        for val in ['bad_ip', 256, "2001:db8:a::123/129"]:
-            self.assertRaises(n_exc.InvalidCIDR,
-                              ext_sg.convert_ip_prefix_to_cidr, val)
-        self.assertIsNone(ext_sg.convert_ip_prefix_to_cidr(None))
-
-    def test_convert_ip_prefix_no_netmask_to_cidr(self):
-        addr = {'10.1.2.3': '32', 'fe80::2677:3ff:fe7d:4c': '128'}
-        for k, v in six.iteritems(addr):
-            self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(k),
-                             '%s/%s' % (k, v))
-
-    def test_convert_ip_prefix_with_netmask_to_cidr(self):
-        addresses = ['10.1.0.0/16', '10.1.2.3/32', '2001:db8:1234::/48']
-        for addr in addresses:
-            self.assertEqual(addr, ext_sg.convert_ip_prefix_to_cidr(addr))
-
-
-class TestConvertProtocol(base.BaseTestCase):
-    def test_convert_numeric_protocol(self):
-        self.assertIsInstance(ext_sg.convert_protocol('2'), str)
-
-    def test_convert_bad_protocol(self):
-        for val in ['bad', '256', '-1']:
-            self.assertRaises(ext_sg.SecurityGroupRuleInvalidProtocol,
-                              ext_sg.convert_protocol, val)
-
-    def test_convert_numeric_protocol_to_string(self):
-        self.assertIsInstance(ext_sg.convert_protocol(2), str)
-
-
-class TestConvertEtherType(base.BaseTestCase):
-    def test_convert_unsupported_ethertype(self):
-        for val in ['ip', 'ip4', 'ip6', '']:
-            self.assertRaises(ext_sg.SecurityGroupRuleInvalidEtherType,
-                              ext_sg.convert_ethertype_to_case_insensitive,
-                              val)
diff --git a/neutron/tests/unit/extensions/test_servicetype.py b/neutron/tests/unit/extensions/test_servicetype.py
deleted file mode 100644 (file)
index 56b89bd..0000000
+++ /dev/null
@@ -1,218 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_config import cfg
-import webob.exc as webexc
-import webtest
-
-from neutron.api import extensions
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.db import servicetype_db as st_db
-from neutron.extensions import servicetype
-from neutron.plugins.common import constants
-from neutron.services import provider_configuration as provconf
-from neutron.tests.unit.api import test_extensions
-from neutron.tests.unit.api.v2 import test_base
-from neutron.tests.unit.db import test_db_base_plugin_v2
-from neutron.tests.unit import dummy_plugin as dp
-from neutron.tests.unit import testlib_api
-
-
-_uuid = test_base._uuid
-_get_path = test_base._get_path
-
-
-class ServiceTypeManagerTestCase(testlib_api.SqlTestCase):
-    def setUp(self):
-        self.service_providers = mock.patch.object(
-            provconf.NeutronModule, 'service_providers').start()
-        super(ServiceTypeManagerTestCase, self).setUp()
-        self.ctx = context.get_admin_context()
-
-    def _set_override(self, service_providers):
-        self.service_providers.return_value = service_providers
-        st_db.ServiceTypeManager._instance = None
-        self.manager = st_db.ServiceTypeManager.get_instance()
-        for provider in service_providers:
-            self.manager.add_provider_configuration(
-                provider.split(':')[0], provconf.ProviderConfiguration())
-
-    def test_service_provider_driver_not_unique(self):
-        self._set_override([constants.LOADBALANCER + ':lbaas:driver'])
-        prov = {'service_type': constants.LOADBALANCER,
-                'name': 'name2',
-                'driver': 'driver',
-                'default': False}
-        self.assertRaises(
-            n_exc.Invalid,
-            self.manager.config['LOADBALANCER'].add_provider, prov)
-
-    def test_get_service_providers(self):
-        """Test that get_service_providers filters correctly."""
-        self._set_override(
-            [constants.LOADBALANCER +
-             ':lbaas:driver_path1',
-             constants.FIREWALL +
-             ':fwaas:driver_path2'])
-        ctx = context.get_admin_context()
-        res = self.manager.get_service_providers(
-            ctx,
-            filters=dict(service_type=[constants.LOADBALANCER])
-        )
-        self.assertEqual(1, len(res))
-
-        res = self.manager.get_service_providers(
-            ctx,
-            filters=dict(service_type=[constants.FIREWALL])
-        )
-        self.assertEqual(1, len(res))
-
-    def test_multiple_default_providers_specified_for_service(self):
-        self.assertRaises(
-            n_exc.Invalid,
-            self._set_override,
-            [constants.LOADBALANCER +
-            ':lbaas1:driver_path:default',
-            constants.LOADBALANCER +
-            ':lbaas2:driver_path:default'])
-
-    def test_get_default_provider(self):
-        self._set_override([constants.LOADBALANCER +
-                            ':lbaas1:driver_path:default',
-                            constants.DUMMY +
-                            ':lbaas2:driver_path2'])
-        # can pass None as a context
-        p = self.manager.get_default_service_provider(None,
-                                                      constants.LOADBALANCER)
-        self.assertEqual({'service_type': constants.LOADBALANCER,
-                          'name': 'lbaas1',
-                          'driver': 'driver_path',
-                          'default': True}, p)
-
-        self.assertRaises(
-            provconf.DefaultServiceProviderNotFound,
-            self.manager.get_default_service_provider,
-            None, constants.DUMMY
-        )
-
-    def test_add_resource_association(self):
-        self._set_override([constants.LOADBALANCER +
-                            ':lbaas1:driver_path:default',
-                            constants.DUMMY +
-                            ':lbaas2:driver_path2'])
-        ctx = context.get_admin_context()
-        self.manager.add_resource_association(ctx,
-                                              constants.LOADBALANCER,
-                                              'lbaas1', '123-123')
-        self.assertEqual(ctx.session.
-                         query(st_db.ProviderResourceAssociation).count(),
-                         1)
-        assoc = ctx.session.query(st_db.ProviderResourceAssociation).one()
-        ctx.session.delete(assoc)
-
-    def test_invalid_resource_association(self):
-        self._set_override([constants.LOADBALANCER +
-                            ':lbaas1:driver_path:default',
-                            constants.DUMMY +
-                            ':lbaas2:driver_path2'])
-        ctx = context.get_admin_context()
-        self.assertRaises(provconf.ServiceProviderNotFound,
-                          self.manager.add_resource_association,
-                          ctx, 'BLABLA_svc', 'name', '123-123')
-
-
-class TestServiceTypeExtensionManager(object):
-    """Mock extensions manager."""
-    def get_resources(self):
-        return (servicetype.Servicetype.get_resources() +
-                dp.Dummy.get_resources())
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-class ServiceTypeExtensionTestCaseBase(testlib_api.WebTestCase):
-    fmt = 'json'
-
-    def setUp(self):
-        # This is needed because otherwise a failure will occur due to
-        # nonexisting core_plugin
-        self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS)
-
-        cfg.CONF.set_override('service_plugins',
-                              ["%s.%s" % (dp.__name__,
-                                          dp.DummyServicePlugin.__name__)])
-        # Ensure existing ExtensionManager is not used
-        extensions.PluginAwareExtensionManager._instance = None
-        ext_mgr = TestServiceTypeExtensionManager()
-        self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
-        self.api = webtest.TestApp(self.ext_mdw)
-        self.resource_name = servicetype.RESOURCE_NAME.replace('-', '_')
-        super(ServiceTypeExtensionTestCaseBase, self).setUp()
-
-
-class ServiceTypeExtensionTestCase(ServiceTypeExtensionTestCaseBase):
-
-    def setUp(self):
-        self._patcher = mock.patch(
-            "neutron.db.servicetype_db.ServiceTypeManager",
-            autospec=True)
-        self.mock_mgr = self._patcher.start()
-        self.mock_mgr.get_instance.return_value = self.mock_mgr.return_value
-        super(ServiceTypeExtensionTestCase, self).setUp()
-
-    def test_service_provider_list(self):
-        instance = self.mock_mgr.return_value
-
-        res = self.api.get(_get_path('service-providers', fmt=self.fmt))
-
-        instance.get_service_providers.assert_called_with(mock.ANY,
-                                                          filters={},
-                                                          fields=[])
-        self.assertEqual(webexc.HTTPOk.code, res.status_int)
-
-
-class ServiceTypeManagerExtTestCase(ServiceTypeExtensionTestCaseBase):
-    """Tests ServiceTypemanager as a public API."""
-    def setUp(self):
-        self.service_providers = mock.patch.object(
-            provconf.NeutronModule, 'service_providers').start()
-        service_providers = [
-            constants.LOADBALANCER + ':lbaas:driver_path',
-            constants.DUMMY + ':dummy:dummy_dr'
-        ]
-        self.service_providers.return_value = service_providers
-        # Blank out service type manager instance
-        st_db.ServiceTypeManager._instance = None
-        self.manager = st_db.ServiceTypeManager.get_instance()
-        for provider in service_providers:
-            self.manager.add_provider_configuration(
-                provider.split(':')[0], provconf.ProviderConfiguration())
-        super(ServiceTypeManagerExtTestCase, self).setUp()
-
-    def _list_service_providers(self):
-        return self.api.get(_get_path('service-providers', fmt=self.fmt))
-
-    def test_list_service_providers(self):
-        res = self._list_service_providers()
-        self.assertEqual(webexc.HTTPOk.code, res.status_int)
-        data = self.deserialize(res)
-        self.assertIn('service_providers', data)
-        self.assertGreaterEqual(len(data['service_providers']), 2)
diff --git a/neutron/tests/unit/extensions/test_vlantransparent.py b/neutron/tests/unit/extensions/test_vlantransparent.py
deleted file mode 100644 (file)
index 1b9eeac..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright (c) 2015 Cisco Systems Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-import six
-from webob import exc as web_exc
-
-from neutron.api.v2 import attributes
-from neutron.db import db_base_plugin_v2
-from neutron.db import vlantransparent_db as vlt_db
-from neutron.extensions import vlantransparent as vlt
-from neutron import quota
-from neutron.tests.unit.db import test_db_base_plugin_v2
-from neutron.tests.unit import testlib_api
-
-
-class VlanTransparentExtensionManager(object):
-
-    def get_resources(self):
-        return []
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-    def get_extended_resources(self, version):
-        return vlt.get_extended_resources(version)
-
-
-class VlanTransparentExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
-                                         vlt_db.Vlantransparent_db_mixin):
-    """Test plugin to mixin the VLAN transparent extensions."""
-
-    supported_extension_aliases = ["vlan-transparent"]
-
-    def create_network(self, context, network):
-        with context.session.begin(subtransactions=True):
-            new_net = super(VlanTransparentExtensionTestPlugin,
-                            self).create_network(context, network)
-            # Update the vlan_transparent in the database
-            n = network['network']
-            vlan_transparent = vlt.get_vlan_transparent(n)
-            network = self._get_network(context, new_net['id'])
-            n['vlan_transparent'] = vlan_transparent
-            network.update(n)
-        return new_net
-
-
-class VlanTransparentExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2):
-    fmt = 'json'
-
-    def setUp(self):
-        plugin = ('neutron.tests.unit.extensions.test_vlantransparent.'
-                  'VlanTransparentExtensionTestPlugin')
-
-        # Save the global RESOURCE_ATTRIBUTE_MAP
-        self.saved_attr_map = {}
-        for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP):
-            self.saved_attr_map[res] = attrs.copy()
-
-        # Update the plugin and extensions path
-        cfg.CONF.set_override('allow_pagination', True)
-        cfg.CONF.set_override('allow_sorting', True)
-        ext_mgr = VlanTransparentExtensionManager()
-        self.addCleanup(self._restore_attribute_map)
-        super(VlanTransparentExtensionTestCase, self).setUp(plugin=plugin,
-                                                            ext_mgr=ext_mgr)
-
-        quota.QUOTAS._driver = None
-        cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
-                              group='QUOTAS')
-
-    def _restore_attribute_map(self):
-        # Restore the global RESOURCE_ATTRIBUTE_MAP
-        attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
-
-    def test_network_create_with_vlan_transparent_attr(self):
-        vlantrans = {'vlan_transparent': True}
-        with self.network(name='net1', **vlantrans) as net:
-            req = self.new_show_request('networks', net['network']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(net['network']['name'],
-                             res['network']['name'])
-            self.assertTrue(res['network'][vlt.VLANTRANSPARENT])
-
-    def test_network_create_with_bad_vlan_transparent_attr(self):
-        vlantrans = {'vlan_transparent': "abc"}
-        with testlib_api.ExpectedException(
-                web_exc.HTTPClientError) as ctx_manager:
-            with self.network(name='net1', **vlantrans):
-                pass
-        self.assertEqual(web_exc.HTTPClientError.code,
-                         ctx_manager.exception.code)
-
-    def test_network_update_with_vlan_transparent_exception(self):
-        with self.network(name='net1') as net:
-            self._update('networks', net['network']['id'],
-                         {'network': {vlt.VLANTRANSPARENT: False}},
-                         web_exc.HTTPBadRequest.code)
-            req = self.new_show_request('networks', net['network']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(net['network']['name'],
-                             res['network']['name'])
-            self.assertFalse(res['network'][vlt.VLANTRANSPARENT])
diff --git a/neutron/tests/unit/extensions/v2attributes.py b/neutron/tests/unit/extensions/v2attributes.py
deleted file mode 100644 (file)
index 2adfcc1..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from neutron.api import extensions
-
-
-EXTENDED_ATTRIBUTES_2_0 = {
-    'networks': {
-        'v2attrs:something': {'allow_post': False,
-                              'allow_put': False,
-                              'is_visible': True},
-        'v2attrs:something_else': {'allow_post': True,
-                                   'allow_put': False,
-                                   'is_visible': False},
-    }
-}
-
-
-class V2attributes(extensions.ExtensionDescriptor):
-    def get_name(self):
-        return "V2 Extended Attributes Example"
-
-    def get_alias(self):
-        return "v2attrs"
-
-    def get_description(self):
-        return "Demonstrates extended attributes on V2 core resources"
-
-    def get_updated(self):
-        return "2012-07-18T10:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/tests/unit/hacking/__init__.py b/neutron/tests/unit/hacking/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/hacking/test_checks.py b/neutron/tests/unit/hacking/test_checks.py
deleted file mode 100644 (file)
index 91d9af3..0000000
+++ /dev/null
@@ -1,262 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import testtools
-
-from neutron.hacking import checks
-from neutron.tests import base
-
-
-class HackingTestCase(base.BaseTestCase):
-
-    def assertLinePasses(self, func, line):
-        with testtools.ExpectedException(StopIteration):
-            next(func(line))
-
-    def assertLineFails(self, func, line):
-        self.assertIsInstance(next(func(line)), tuple)
-
-    def test_log_translations(self):
-        expected_marks = {
-            'error': '_LE',
-            'info': '_LI',
-            'warn': '_LW',
-            'warning': '_LW',
-            'critical': '_LC',
-            'exception': '_LE',
-        }
-        logs = expected_marks.keys()
-        debug = "LOG.debug('OK')"
-        self.assertEqual(
-            0, len(list(checks.validate_log_translations(debug, debug, 'f'))))
-        for log in logs:
-            bad = 'LOG.%s(_("Bad"))' % log
-            self.assertEqual(
-                1, len(list(checks.validate_log_translations(bad, bad, 'f'))))
-            bad = 'LOG.%s("Bad")' % log
-            self.assertEqual(
-                1, len(list(checks.validate_log_translations(bad, bad, 'f'))))
-            ok = "LOG.%s('OK')    # noqa" % log
-            self.assertEqual(
-                0, len(list(checks.validate_log_translations(ok, ok, 'f'))))
-            ok = "LOG.%s(variable)" % log
-            self.assertEqual(
-                0, len(list(checks.validate_log_translations(ok, ok, 'f'))))
-
-            for mark in checks._all_hints:
-                stmt = "LOG.%s(%s('test'))" % (log, mark)
-                self.assertEqual(
-                    0 if expected_marks[log] == mark else 1,
-                    len(list(checks.validate_log_translations(stmt, stmt,
-                                                              'f'))))
-
-    def test_no_translate_debug_logs(self):
-        for hint in checks._all_hints:
-            bad = "LOG.debug(%s('bad'))" % hint
-            self.assertEqual(
-                1, len(list(checks.no_translate_debug_logs(bad, 'f'))))
-
-    def test_use_jsonutils(self):
-        def __get_msg(fun):
-            msg = ("N321: jsonutils.%(fun)s must be used instead of "
-                   "json.%(fun)s" % {'fun': fun})
-            return [(0, msg)]
-
-        for method in ('dump', 'dumps', 'load', 'loads'):
-            self.assertEqual(
-                __get_msg(method),
-                list(checks.use_jsonutils("json.%s(" % method,
-                                          "./neutron/common/rpc.py")))
-
-            self.assertEqual(0,
-                len(list(checks.use_jsonutils("jsonx.%s(" % method,
-                                              "./neutron/common/rpc.py"))))
-
-            self.assertEqual(0,
-                len(list(checks.use_jsonutils("json.%sx(" % method,
-                                              "./neutron/common/rpc.py"))))
-
-            self.assertEqual(0,
-                len(list(checks.use_jsonutils(
-                    "json.%s" % method,
-                    "./neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/"
-                    "etc/xapi.d/plugins/netwrap"))))
-
-    def test_assert_called_once_with(self):
-        fail_code1 = """
-               mock = Mock()
-               mock.method(1, 2, 3, test='wow')
-               mock.method.assert_called_once()
-               """
-        fail_code2 = """
-               mock = Mock()
-               mock.method(1, 2, 3, test='wow')
-               mock.method.assertCalledOnceWith()
-               """
-        fail_code3 = """
-               mock = Mock()
-               mock.method(1, 2, 3, test='wow')
-               mock.method.assert_has_called()
-               """
-        pass_code = """
-               mock = Mock()
-               mock.method(1, 2, 3, test='wow')
-               mock.method.assert_called_once_with()
-               """
-        pass_code2 = """
-               mock = Mock()
-               mock.method(1, 2, 3, test='wow')
-               mock.method.assert_has_calls()
-               """
-        self.assertEqual(
-            1, len(list(checks.check_assert_called_once_with(fail_code1,
-                                            "neutron/tests/test_assert.py"))))
-        self.assertEqual(
-            1, len(list(checks.check_assert_called_once_with(fail_code2,
-                                            "neutron/tests/test_assert.py"))))
-        self.assertEqual(
-            0, len(list(checks.check_assert_called_once_with(pass_code,
-                                            "neutron/tests/test_assert.py"))))
-        self.assertEqual(
-            1, len(list(checks.check_assert_called_once_with(fail_code3,
-                                            "neutron/tests/test_assert.py"))))
-        self.assertEqual(
-            0, len(list(checks.check_assert_called_once_with(pass_code2,
-                                            "neutron/tests/test_assert.py"))))
-
-    def test_check_oslo_namespace_imports(self):
-        f = checks.check_oslo_namespace_imports
-        self.assertLinePasses(f, 'from oslo_utils import importutils')
-        self.assertLinePasses(f, 'import oslo_messaging')
-        self.assertLineFails(f, 'from oslo.utils import importutils')
-        self.assertLineFails(f, 'from oslo import messaging')
-        self.assertLineFails(f, 'import oslo.messaging')
-
-    def test_check_python3_xrange(self):
-        f = checks.check_python3_xrange
-        self.assertLineFails(f, 'a = xrange(1000)')
-        self.assertLineFails(f, 'b =xrange   (   42 )')
-        self.assertLineFails(f, 'c = xrange(1, 10, 2)')
-        self.assertLinePasses(f, 'd = range(1000)')
-        self.assertLinePasses(f, 'e = six.moves.range(1337)')
-
-    def test_no_basestring(self):
-        self.assertEqual(1,
-            len(list(checks.check_no_basestring("isinstance(x, basestring)"))))
-
-    def test_check_python3_iteritems(self):
-        f = checks.check_python3_no_iteritems
-        self.assertLineFails(f, "d.iteritems()")
-        self.assertLinePasses(f, "six.iteritems(d)")
-
-    def test_asserttrue(self):
-        fail_code1 = """
-               test_bool = True
-               self.assertEqual(True, test_bool)
-               """
-        fail_code2 = """
-               test_bool = True
-               self.assertEqual(test_bool, True)
-               """
-        pass_code = """
-               test_bool = True
-               self.assertTrue(test_bool)
-               """
-        self.assertEqual(
-            1, len(list(checks.check_asserttrue(fail_code1,
-                                            "neutron/tests/test_assert.py"))))
-        self.assertEqual(
-            1, len(list(checks.check_asserttrue(fail_code2,
-                                            "neutron/tests/test_assert.py"))))
-        self.assertEqual(
-            0, len(list(checks.check_asserttrue(pass_code,
-                                            "neutron/tests/test_assert.py"))))
-
-    def test_no_mutable_default_args(self):
-        self.assertEqual(1, len(list(checks.no_mutable_default_args(
-            " def fake_suds_context(calls={}):"))))
-
-        self.assertEqual(1, len(list(checks.no_mutable_default_args(
-            "def get_info_from_bdm(virt_type, bdm, mapping=[])"))))
-
-        self.assertEqual(0, len(list(checks.no_mutable_default_args(
-            "defined = []"))))
-
-        self.assertEqual(0, len(list(checks.no_mutable_default_args(
-            "defined, undefined = [], {}"))))
-
-    def test_assertfalse(self):
-        fail_code1 = """
-               test_bool = False
-               self.assertEqual(False, test_bool)
-               """
-        fail_code2 = """
-               test_bool = False
-               self.assertEqual(test_bool, False)
-               """
-        pass_code = """
-               test_bool = False
-               self.assertFalse(test_bool)
-               """
-        self.assertEqual(
-            1, len(list(checks.check_assertfalse(fail_code1,
-                                            "neutron/tests/test_assert.py"))))
-        self.assertEqual(
-            1, len(list(checks.check_assertfalse(fail_code2,
-                                            "neutron/tests/test_assert.py"))))
-        self.assertEqual(
-            0, len(list(checks.check_assertfalse(pass_code,
-                                            "neutron/tests/test_assert.py"))))
-
-    def test_assertempty(self):
-        fail_code = """
-                test_empty = %s
-                self.assertEqual(test_empty, %s)
-                """
-        pass_code1 = """
-                test_empty = %s
-                self.assertEqual(%s, test_empty)
-                """
-        pass_code2 = """
-                self.assertEqual(123, foo(abc, %s))
-                """
-        empty_cases = ['{}', '[]', '""', "''", '()', 'set()']
-        for ec in empty_cases:
-            self.assertEqual(
-                1, len(list(checks.check_assertempty(fail_code % (ec, ec),
-                                            "neutron/tests/test_assert.py"))))
-            self.assertEqual(
-                0, len(list(checks.check_assertfalse(pass_code1 % (ec, ec),
-                                            "neutron/tests/test_assert.py"))))
-            self.assertEqual(
-                0, len(list(checks.check_assertfalse(pass_code2 % ec,
-                                            "neutron/tests/test_assert.py"))))
-
-    def test_assertisinstance(self):
-        fail_code = """
-               self.assertTrue(isinstance(observed, ANY_TYPE))
-               """
-        pass_code1 = """
-               self.assertEqual(ANY_TYPE, type(observed))
-               """
-        pass_code2 = """
-               self.assertIsInstance(observed, ANY_TYPE)
-               """
-        self.assertEqual(
-            1, len(list(checks.check_assertisinstance(fail_code,
-                                        "neutron/tests/test_assert.py"))))
-        self.assertEqual(
-            0, len(list(checks.check_assertisinstance(pass_code1,
-                                            "neutron/tests/test_assert.py"))))
-        self.assertEqual(
-            0, len(list(checks.check_assertisinstance(pass_code2,
-                                            "neutron/tests/test_assert.py"))))
diff --git a/neutron/tests/unit/ipam/__init__.py b/neutron/tests/unit/ipam/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/ipam/drivers/__init__.py b/neutron/tests/unit/ipam/drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/ipam/drivers/neutrondb_ipam/__init__.py b/neutron/tests/unit/ipam/drivers/neutrondb_ipam/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_db_api.py b/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_db_api.py
deleted file mode 100644 (file)
index 32b4a6a..0000000
+++ /dev/null
@@ -1,214 +0,0 @@
-# Copyright 2015 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from oslo_db import exception as db_exc
-from oslo_utils import uuidutils
-from sqlalchemy.orm import exc as orm_exc
-
-from neutron import context
-from neutron.ipam.drivers.neutrondb_ipam import db_api
-from neutron.ipam.drivers.neutrondb_ipam import db_models
-from neutron.ipam import exceptions as ipam_exc
-from neutron.tests.unit import testlib_api
-
-
-class TestIpamSubnetManager(testlib_api.SqlTestCase):
-    """Test case for SubnetManager DB helper class"""
-
-    def setUp(self):
-        super(TestIpamSubnetManager, self).setUp()
-        self.ctx = context.get_admin_context()
-        self.neutron_subnet_id = uuidutils.generate_uuid()
-        self.ipam_subnet_id = uuidutils.generate_uuid()
-        self.subnet_ip = '1.2.3.4'
-        self.single_pool = ('1.2.3.4', '1.2.3.10')
-        self.multi_pool = (('1.2.3.2', '1.2.3.12'), ('1.2.3.15', '1.2.3.24'))
-        self.subnet_manager = db_api.IpamSubnetManager(self.ipam_subnet_id,
-                                                       self.neutron_subnet_id)
-        self.subnet_manager_id = self.subnet_manager.create(self.ctx.session)
-        self.ctx.session.flush()
-
-    def test_create(self):
-        self.assertEqual(self.ipam_subnet_id, self.subnet_manager_id)
-        subnets = self.ctx.session.query(db_models.IpamSubnet).filter_by(
-            id=self.ipam_subnet_id).all()
-        self.assertEqual(1, len(subnets))
-
-    def test_remove(self):
-        count = db_api.IpamSubnetManager.delete(self.ctx.session,
-                                                self.neutron_subnet_id)
-        self.assertEqual(1, count)
-        subnets = self.ctx.session.query(db_models.IpamSubnet).filter_by(
-            id=self.ipam_subnet_id).all()
-        self.assertEqual(0, len(subnets))
-
-    def test_remove_non_existent_subnet(self):
-        count = db_api.IpamSubnetManager.delete(self.ctx.session,
-                                                'non-existent')
-        self.assertEqual(0, count)
-
-    def _create_pools(self, pools):
-        db_pools = []
-        for pool in pools:
-            db_pool = self.subnet_manager.create_pool(self.ctx.session,
-                                                      pool[0],
-                                                      pool[1])
-            db_pools.append(db_pool)
-        return db_pools
-
-    def _validate_ips(self, pools, db_pool):
-        self.assertTrue(
-            any(pool == (db_pool.first_ip, db_pool.last_ip) for pool in pools))
-
-    def test_create_pool(self):
-        db_pools = self._create_pools([self.single_pool])
-
-        ipam_pool = self.ctx.session.query(db_models.IpamAllocationPool).\
-            filter_by(ipam_subnet_id=self.ipam_subnet_id).first()
-        self._validate_ips([self.single_pool], ipam_pool)
-
-        range = self.ctx.session.query(db_models.IpamAvailabilityRange).\
-            filter_by(allocation_pool_id=db_pools[0].id).first()
-        self._validate_ips([self.single_pool], range)
-
-    def test_get_first_range(self):
-        self._create_pools(self.multi_pool)
-        range = self.subnet_manager.get_first_range(self.ctx.session)
-        self._validate_ips(self.multi_pool, range)
-
-    def test_list_ranges_by_subnet_id(self):
-        self._create_pools(self.multi_pool)
-
-        db_ranges = self.subnet_manager.list_ranges_by_subnet_id(
-            self.ctx.session).all()
-        self.assertEqual(2, len(db_ranges))
-        self.assertEqual(db_models.IpamAvailabilityRange, type(db_ranges[0]))
-
-    def test_list_ranges_by_allocation_pool(self):
-        db_pools = self._create_pools([self.single_pool])
-        # generate ids for allocation pools on flush
-        self.ctx.session.flush()
-        db_ranges = self.subnet_manager.list_ranges_by_allocation_pool(
-            self.ctx.session,
-            db_pools[0].id).all()
-        self.assertEqual(1, len(db_ranges))
-        self.assertEqual(db_models.IpamAvailabilityRange, type(db_ranges[0]))
-        self._validate_ips([self.single_pool], db_ranges[0])
-
-    def test_create_range(self):
-        self._create_pools([self.single_pool])
-        pool = self.ctx.session.query(db_models.IpamAllocationPool).\
-            filter_by(ipam_subnet_id=self.ipam_subnet_id).first()
-        self._validate_ips([self.single_pool], pool)
-        allocation_pool_id = pool.id
-
-        # delete the range
-        db_range = self.subnet_manager.list_ranges_by_allocation_pool(
-            self.ctx.session,
-            pool.id).first()
-        self._validate_ips([self.single_pool], db_range)
-        self.ctx.session.delete(db_range)
-
-        # create a new range
-        range_start = '1.2.3.5'
-        range_end = '1.2.3.9'
-        new_range = self.subnet_manager.create_range(self.ctx.session,
-                                                     allocation_pool_id,
-                                                     range_start,
-                                                     range_end)
-        self.assertEqual(range_start, new_range.first_ip)
-        self.assertEqual(range_end, new_range.last_ip)
-
-    def test_update_range(self):
-        self._create_pools([self.single_pool])
-        db_range = self.subnet_manager.get_first_range(self.ctx.session)
-        updated_count = self.subnet_manager.update_range(self.ctx.session,
-                                                         db_range,
-                                                         first_ip='1.2.3.6',
-                                                         last_ip='1.2.3.8')
-        self.assertEqual(1, updated_count)
-
-    def test_update_range_no_new_values(self):
-        self._create_pools([self.single_pool])
-        db_range = self.subnet_manager.get_first_range(self.ctx.session)
-        self.assertRaises(ipam_exc.IpamAvailabilityRangeNoChanges,
-                          self.subnet_manager.update_range,
-                          self.ctx.session, db_range)
-
-    def test_update_range_reraise_error(self):
-        session = mock.Mock()
-        session.query.side_effect = orm_exc.ObjectDeletedError(None, None)
-        self.assertRaises(db_exc.RetryRequest,
-                          self.subnet_manager.update_range,
-                          session,
-                          mock.Mock(),
-                          first_ip='1.2.3.5')
-
-    def test_delete_range(self):
-        self._create_pools([self.single_pool])
-        db_range = self.subnet_manager.get_first_range(self.ctx.session)
-        deleted_count = self.subnet_manager.delete_range(self.ctx.session,
-                                                         db_range)
-        self.assertEqual(1, deleted_count)
-
-    def test_delete_range_reraise_error(self):
-        session = mock.Mock()
-        session.query.side_effect = orm_exc.ObjectDeletedError(None, None)
-        self.assertRaises(db_exc.RetryRequest,
-                          self.subnet_manager.delete_range,
-                          session,
-                          mock.Mock())
-
-    def test_check_unique_allocation(self):
-        self.assertTrue(self.subnet_manager.check_unique_allocation(
-            self.ctx.session, self.subnet_ip))
-
-    def test_check_unique_allocation_negative(self):
-        self.subnet_manager.create_allocation(self.ctx.session,
-                                              self.subnet_ip)
-        self.assertFalse(self.subnet_manager.check_unique_allocation(
-            self.ctx.session, self.subnet_ip))
-
-    def test_list_allocations(self):
-        ips = ['1.2.3.4', '1.2.3.6', '1.2.3.7']
-        for ip in ips:
-            self.subnet_manager.create_allocation(self.ctx.session, ip)
-        allocs = self.subnet_manager.list_allocations(self.ctx.session).all()
-        self.assertEqual(len(ips), len(allocs))
-        for allocation in allocs:
-            self.assertIn(allocation.ip_address, ips)
-
-    def _test_create_allocation(self):
-        self.subnet_manager.create_allocation(self.ctx.session,
-                                              self.subnet_ip)
-        alloc = self.ctx.session.query(db_models.IpamAllocation).filter_by(
-            ipam_subnet_id=self.ipam_subnet_id).all()
-        self.assertEqual(1, len(alloc))
-        self.assertEqual(self.subnet_ip, alloc[0].ip_address)
-        return alloc
-
-    def test_create_allocation(self):
-        self._test_create_allocation()
-
-    def test_delete_allocation(self):
-        allocs = self._test_create_allocation()
-        self.subnet_manager.delete_allocation(self.ctx.session,
-                                              allocs[0].ip_address)
-
-        allocs = self.ctx.session.query(db_models.IpamAllocation).filter_by(
-            ipam_subnet_id=self.ipam_subnet_id).all()
-        self.assertEqual(0, len(allocs))
diff --git a/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py b/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py
deleted file mode 100644 (file)
index 7baab1a..0000000
+++ /dev/null
@@ -1,462 +0,0 @@
-# Copyright 2015 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import netaddr
-
-from oslo_db import exception as db_exc
-
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.ipam.drivers.neutrondb_ipam import driver
-from neutron.ipam import exceptions as ipam_exc
-from neutron.ipam import requests as ipam_req
-from neutron import manager
-
-from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin
-from neutron.tests.unit import testlib_api
-
-
-def convert_firstip_to_ipaddress(range_item):
-    return netaddr.IPAddress(range_item['first_ip'])
-
-
-class TestNeutronDbIpamMixin(object):
-
-    def _create_network(self, plugin, ctx, shared=False):
-        network = {'network': {'name': 'net',
-                               'shared': shared,
-                               'admin_state_up': True,
-                               'tenant_id': self._tenant_id}}
-        created_network = plugin.create_network(ctx, network)
-        return (created_network, created_network['id'])
-
-    def _create_subnet(self, plugin, ctx, network_id, cidr, ip_version=4,
-                       v6_address_mode=attributes.ATTR_NOT_SPECIFIED,
-                       allocation_pools=attributes.ATTR_NOT_SPECIFIED):
-        subnet = {'subnet': {'name': 'sub',
-                             'cidr': cidr,
-                             'ip_version': ip_version,
-                             'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
-                             'allocation_pools': allocation_pools,
-                             'enable_dhcp': True,
-                             'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
-                             'host_routes': attributes.ATTR_NOT_SPECIFIED,
-                             'ipv6_address_mode': v6_address_mode,
-                             'ipv6_ra_mode': attributes.ATTR_NOT_SPECIFIED,
-                             'network_id': network_id,
-                             'tenant_id': self._tenant_id}}
-        return plugin.create_subnet(ctx, subnet)
-
-
-class TestNeutronDbIpamPool(testlib_api.SqlTestCase,
-                            TestNeutronDbIpamMixin):
-    """Test case for the Neutron's DB IPAM driver subnet pool interface."""
-
-    def setUp(self):
-        super(TestNeutronDbIpamPool, self).setUp()
-        self._tenant_id = 'test-tenant'
-
-        # Configure plugin for tests
-        self.setup_coreplugin(test_db_plugin.DB_PLUGIN_KLASS)
-
-        # Prepare environment for tests
-        self.plugin = manager.NeutronManager.get_plugin()
-        self.ctx = context.get_admin_context()
-        self.network, self.net_id = self._create_network(self.plugin,
-                                                         self.ctx)
-
-        # Allocate IPAM driver
-        self.ipam_pool = driver.NeutronDbPool(None, self.ctx)
-
-    def _verify_ipam_subnet_details(self, ipam_subnet,
-                                    cidr=None,
-                                    tenant_id=None,
-                                    gateway_ip=None,
-                                    allocation_pools=None):
-        ipam_subnet_details = ipam_subnet.get_details()
-        gateway_ip_address = None
-        cidr_ip_network = None
-        if gateway_ip:
-            gateway_ip_address = netaddr.IPAddress(gateway_ip)
-        if cidr:
-            cidr_ip_network = netaddr.IPNetwork(cidr)
-        self.assertEqual(tenant_id, ipam_subnet_details.tenant_id)
-        self.assertEqual(gateway_ip_address, ipam_subnet_details.gateway_ip)
-        self.assertEqual(cidr_ip_network, ipam_subnet_details.subnet_cidr)
-        self.assertEqual(allocation_pools,
-                         ipam_subnet_details.allocation_pools)
-
-    def test_allocate_ipam_subnet_no_neutron_subnet_id(self):
-        cidr = '10.0.0.0/24'
-        allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'),
-                            netaddr.IPRange('10.0.0.200', '10.0.0.250')]
-        subnet_req = ipam_req.SpecificSubnetRequest(
-            self._tenant_id,
-            None,
-            cidr,
-            allocation_pools=allocation_pools,
-            gateway_ip='10.0.0.101')
-        ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req)
-        self._verify_ipam_subnet_details(ipam_subnet,
-                                         cidr,
-                                         self._tenant_id,
-                                         '10.0.0.101',
-                                         allocation_pools)
-
-    def _prepare_specific_subnet_request(self, cidr):
-        subnet = self._create_subnet(
-            self.plugin, self.ctx, self.net_id, cidr)
-        subnet_req = ipam_req.SpecificSubnetRequest(
-            self._tenant_id,
-            subnet['id'],
-            cidr,
-            gateway_ip=subnet['gateway_ip'])
-        return subnet, subnet_req
-
-    def test_allocate_ipam_subnet_with_neutron_subnet_id(self):
-        cidr = '10.0.0.0/24'
-        subnet, subnet_req = self._prepare_specific_subnet_request(cidr)
-        ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req)
-        self._verify_ipam_subnet_details(
-            ipam_subnet,
-            cidr, self._tenant_id, subnet['gateway_ip'],
-            [netaddr.IPRange('10.0.0.2', '10.0.0.254')])
-
-    def test_allocate_any_subnet_fails(self):
-        self.assertRaises(
-            ipam_exc.InvalidSubnetRequestType,
-            self.ipam_pool.allocate_subnet,
-            ipam_req.AnySubnetRequest(self._tenant_id, 'meh',
-                                      constants.IPv4, 24))
-
-    def test_update_subnet_pools(self):
-        cidr = '10.0.0.0/24'
-        subnet, subnet_req = self._prepare_specific_subnet_request(cidr)
-        self.ipam_pool.allocate_subnet(subnet_req)
-        allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'),
-                            netaddr.IPRange('10.0.0.200', '10.0.0.250')]
-        update_subnet_req = ipam_req.SpecificSubnetRequest(
-            self._tenant_id,
-            subnet['id'],
-            cidr,
-            gateway_ip=subnet['gateway_ip'],
-            allocation_pools=allocation_pools)
-        ipam_subnet = self.ipam_pool.update_subnet(update_subnet_req)
-        self._verify_ipam_subnet_details(
-            ipam_subnet,
-            cidr, self._tenant_id, subnet['gateway_ip'], allocation_pools)
-
-    def test_get_subnet(self):
-        cidr = '10.0.0.0/24'
-        subnet, subnet_req = self._prepare_specific_subnet_request(cidr)
-        self.ipam_pool.allocate_subnet(subnet_req)
-        # Retrieve the subnet
-        ipam_subnet = self.ipam_pool.get_subnet(subnet['id'])
-        self._verify_ipam_subnet_details(
-            ipam_subnet,
-            cidr, self._tenant_id, subnet['gateway_ip'],
-            [netaddr.IPRange('10.0.0.2', '10.0.0.254')])
-
-    def test_get_non_existing_subnet_fails(self):
-        self.assertRaises(n_exc.SubnetNotFound,
-                          self.ipam_pool.get_subnet,
-                          'boo')
-
-    def test_remove_ipam_subnet(self):
-        cidr = '10.0.0.0/24'
-        subnet, subnet_req = self._prepare_specific_subnet_request(cidr)
-        self.ipam_pool.allocate_subnet(subnet_req)
-        # Remove ipam subnet by neutron subnet id
-        self.ipam_pool.remove_subnet(subnet['id'])
-
-    def test_remove_non_existent_subnet_fails(self):
-        self.assertRaises(n_exc.SubnetNotFound,
-                          self.ipam_pool.remove_subnet,
-                          'non-existent-id')
-
-    def test_get_details_for_invalid_subnet_id_fails(self):
-        cidr = '10.0.0.0/24'
-        subnet_req = ipam_req.SpecificSubnetRequest(
-            self._tenant_id,
-            'non-existent-id',
-            cidr)
-        self.ipam_pool.allocate_subnet(subnet_req)
-        # Neutron subnet does not exist, so get_subnet should fail
-        self.assertRaises(n_exc.SubnetNotFound,
-                          self.ipam_pool.get_subnet,
-                          'non-existent-id')
-
-
-class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
-                              TestNeutronDbIpamMixin):
-    """Test case for Subnet interface for Neutron's DB IPAM driver.
-
-    This test case exercises the reference IPAM driver.
-    Even if it loads a plugin, the unit tests in this class do not exercise
-    it at all; they simply perform white box testing on the IPAM driver.
-    The plugin is exclusively used to create the neutron objects on which
-    the IPAM driver will operate.
-    """
-
-    def _create_and_allocate_ipam_subnet(
-        self, cidr, allocation_pools=attributes.ATTR_NOT_SPECIFIED,
-        ip_version=4, v6_auto_address=False, tenant_id=None):
-        v6_address_mode = attributes.ATTR_NOT_SPECIFIED
-        if v6_auto_address:
-            # set ip version to 6 regardless of what's been passed to the
-            # method
-            ip_version = 6
-            v6_address_mode = constants.IPV6_SLAAC
-        subnet = self._create_subnet(
-            self.plugin, self.ctx, self.net_id, cidr,
-            ip_version=ip_version,
-            allocation_pools=allocation_pools,
-            v6_address_mode=v6_address_mode)
-        # Build netaddr.IPRanges from allocation pools since IPAM SubnetRequest
-        # objects are strongly typed
-        allocation_pool_ranges = [netaddr.IPRange(
-            pool['start'], pool['end']) for pool in
-            subnet['allocation_pools']]
-        subnet_req = ipam_req.SpecificSubnetRequest(
-            tenant_id,
-            subnet['id'],
-            cidr,
-            gateway_ip=subnet['gateway_ip'],
-            allocation_pools=allocation_pool_ranges)
-        ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req)
-        return ipam_subnet, subnet
-
-    def setUp(self):
-        super(TestNeutronDbIpamSubnet, self).setUp()
-        self._tenant_id = 'test-tenant'
-
-        # Configure plugin for tests
-        self.setup_coreplugin(test_db_plugin.DB_PLUGIN_KLASS)
-
-        # Prepare environment for tests
-        self.plugin = manager.NeutronManager.get_plugin()
-        self.ctx = context.get_admin_context()
-        self.network, self.net_id = self._create_network(self.plugin,
-                                                         self.ctx)
-
-        # Allocate IPAM driver
-        self.ipam_pool = driver.NeutronDbPool(None, self.ctx)
-
-    def test__verify_ip_succeeds(self):
-        cidr = '10.0.0.0/24'
-        ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0]
-        ipam_subnet._verify_ip(self.ctx.session, '10.0.0.2')
-
-    def test__verify_ip_not_in_subnet_fails(self):
-        cidr = '10.0.0.0/24'
-        ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0]
-        self.assertRaises(ipam_exc.InvalidIpForSubnet,
-                          ipam_subnet._verify_ip,
-                          self.ctx.session,
-                          '192.168.0.2')
-
-    def test__verify_ip_bcast_and_network_fail(self):
-        cidr = '10.0.0.0/24'
-        ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0]
-        self.assertRaises(ipam_exc.InvalidIpForSubnet,
-                          ipam_subnet._verify_ip,
-                          self.ctx.session,
-                          '10.0.0.255')
-        self.assertRaises(ipam_exc.InvalidIpForSubnet,
-                          ipam_subnet._verify_ip,
-                          self.ctx.session,
-                          '10.0.0.0')
-
-    def test__allocate_specific_ip(self):
-        cidr = '10.0.0.0/24'
-        ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0]
-        with self.ctx.session.begin():
-            ranges = ipam_subnet._allocate_specific_ip(
-                self.ctx.session, '10.0.0.33')
-        self.assertEqual(2, len(ranges))
-        # 10.0.0.1 should be allocated for gateway ip
-        ranges.sort(key=convert_firstip_to_ipaddress)
-        self.assertEqual('10.0.0.2', ranges[0]['first_ip'])
-        self.assertEqual('10.0.0.32', ranges[0]['last_ip'])
-        self.assertEqual('10.0.0.34', ranges[1]['first_ip'])
-        self.assertEqual('10.0.0.254', ranges[1]['last_ip'])
-        # Limit test - first address in range
-        ranges = ipam_subnet._allocate_specific_ip(
-            self.ctx.session, '10.0.0.2')
-        self.assertEqual(2, len(ranges))
-        ranges.sort(key=convert_firstip_to_ipaddress)
-        self.assertEqual('10.0.0.3', ranges[0]['first_ip'])
-        self.assertEqual('10.0.0.32', ranges[0]['last_ip'])
-        self.assertEqual('10.0.0.34', ranges[1]['first_ip'])
-        self.assertEqual('10.0.0.254', ranges[1]['last_ip'])
-        # Limit test - last address in range
-        ranges = ipam_subnet._allocate_specific_ip(
-            self.ctx.session, '10.0.0.254')
-        self.assertEqual(2, len(ranges))
-        ranges.sort(key=convert_firstip_to_ipaddress)
-        self.assertEqual('10.0.0.3', ranges[0]['first_ip'])
-        self.assertEqual('10.0.0.32', ranges[0]['last_ip'])
-        self.assertEqual('10.0.0.34', ranges[1]['first_ip'])
-        self.assertEqual('10.0.0.253', ranges[1]['last_ip'])
-
-    def test__allocate_specific_ips_multiple_ranges(self):
-        cidr = '10.0.0.0/24'
-        ipam_subnet = self._create_and_allocate_ipam_subnet(
-            cidr,
-            allocation_pools=[{'start': '10.0.0.10', 'end': '10.0.0.19'},
-                              {'start': '10.0.0.30', 'end': '10.0.0.39'}])[0]
-        with self.ctx.session.begin():
-            ranges = ipam_subnet._allocate_specific_ip(
-                self.ctx.session, '10.0.0.33')
-        self.assertEqual(3, len(ranges))
-        # 10.0.0.1 should be allocated for gateway ip
-        ranges.sort(key=convert_firstip_to_ipaddress)
-        self.assertEqual('10.0.0.10', ranges[0]['first_ip'])
-        self.assertEqual('10.0.0.19', ranges[0]['last_ip'])
-        self.assertEqual('10.0.0.30', ranges[1]['first_ip'])
-        self.assertEqual('10.0.0.32', ranges[1]['last_ip'])
-        self.assertEqual('10.0.0.34', ranges[2]['first_ip'])
-        self.assertEqual('10.0.0.39', ranges[2]['last_ip'])
-
-    def test__allocate_specific_ip_out_of_range(self):
-        cidr = '10.0.0.0/24'
-        subnet = self._create_subnet(
-            self.plugin, self.ctx, self.net_id, cidr)
-        subnet_req = ipam_req.SpecificSubnetRequest(
-            'tenant_id', subnet['id'], cidr, gateway_ip=subnet['gateway_ip'])
-        ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req)
-        with self.ctx.session.begin():
-            ranges = ipam_subnet._allocate_specific_ip(
-                self.ctx.session, '192.168.0.1')
-        # In this case _allocate_specific_ips does not fail, but
-        # simply does not update availability ranges at all
-        self.assertEqual(1, len(ranges))
-        # 10.0.0.1 should be allocated for gateway ip
-        ranges.sort(key=convert_firstip_to_ipaddress)
-        self.assertEqual('10.0.0.2', ranges[0]['first_ip'])
-        self.assertEqual('10.0.0.254', ranges[0]['last_ip'])
-
-    def _allocate_address(self, cidr, ip_version, address_request):
-        ipam_subnet = self._create_and_allocate_ipam_subnet(
-            cidr, ip_version=ip_version)[0]
-        return ipam_subnet.allocate(address_request)
-
-    def test_allocate_any_v4_address_succeeds(self):
-        ip_address = self._allocate_address(
-            '10.0.0.0/24', 4, ipam_req.AnyAddressRequest)
-        # As the DB IPAM driver allocation logic is strictly sequential, we can
-        # expect this test to allocate the .2 address as .1 is used by default
-        # as subnet gateway
-        self.assertEqual('10.0.0.2', ip_address)
-
-    def test_allocate_any_v6_address_succeeds(self):
-        ip_address = self._allocate_address(
-            'fde3:abcd:4321:1::/64', 6, ipam_req.AnyAddressRequest)
-        # As the DB IPAM driver allocation logic is strictly sequential, we can
-        # expect this test to allocate the .2 address as .1 is used by default
-        # as subnet gateway
-        self.assertEqual('fde3:abcd:4321:1::2', ip_address)
-
-    def test_allocate_specific_v4_address_succeeds(self):
-        ip_address = self._allocate_address(
-            '10.0.0.0/24', 4, ipam_req.SpecificAddressRequest('10.0.0.33'))
-        self.assertEqual('10.0.0.33', ip_address)
-
-    def test_allocate_specific_v6_address_succeeds(self):
-        ip_address = self._allocate_address(
-            'fde3:abcd:4321:1::/64', 6,
-            ipam_req.SpecificAddressRequest('fde3:abcd:4321:1::33'))
-        self.assertEqual('fde3:abcd:4321:1::33', ip_address)
-
-    def test_allocate_specific_v4_address_out_of_range_fails(self):
-        self.assertRaises(ipam_exc.InvalidIpForSubnet,
-                          self._allocate_address,
-                          '10.0.0.0/24', 4,
-                          ipam_req.SpecificAddressRequest('192.168.0.1'))
-
-    def test_allocate_specific_v6_address_out_of_range_fails(self):
-        self.assertRaises(ipam_exc.InvalidIpForSubnet,
-                          self._allocate_address,
-                          'fde3:abcd:4321:1::/64', 6,
-                          ipam_req.SpecificAddressRequest(
-                              'fde3:abcd:eeee:1::33'))
-
-    def test_allocate_specific_address_in_use_fails(self):
-        ipam_subnet = self._create_and_allocate_ipam_subnet(
-            'fde3:abcd:4321:1::/64', ip_version=6)[0]
-        addr_req = ipam_req.SpecificAddressRequest('fde3:abcd:4321:1::33')
-        ipam_subnet.allocate(addr_req)
-        self.assertRaises(ipam_exc.IpAddressAlreadyAllocated,
-                          ipam_subnet.allocate,
-                          addr_req)
-
-    def test_allocate_any_address_exhausted_pools_fails(self):
-        # Same as above, the ranges will be recalculated always
-        ipam_subnet = self._create_and_allocate_ipam_subnet(
-            '192.168.0.0/30', ip_version=4)[0]
-        ipam_subnet.allocate(ipam_req.AnyAddressRequest)
-        # The second address generation request on a /30 for v4 net must fail
-        self.assertRaises(ipam_exc.IpAddressGenerationFailure,
-                          ipam_subnet.allocate,
-                          ipam_req.AnyAddressRequest)
-
-    def _test_deallocate_address(self, cidr, ip_version):
-        ipam_subnet = self._create_and_allocate_ipam_subnet(
-            cidr, ip_version=ip_version)[0]
-        ip_address = ipam_subnet.allocate(ipam_req.AnyAddressRequest)
-        ipam_subnet.deallocate(ip_address)
-
-    def test_deallocate_v4_address(self):
-        self._test_deallocate_address('10.0.0.0/24', 4)
-
-    def test_deallocate_v6_address(self):
-        # This test does not really exercise any different code path wrt
-        # test_deallocate_v4_address. It is provided for completeness and for
-        # future proofing in case v6-specific logic will be added.
-        self._test_deallocate_address('fde3:abcd:4321:1::/64', 6)
-
-    def test_allocate_unallocated_address_fails(self):
-        ipam_subnet = self._create_and_allocate_ipam_subnet(
-            '10.0.0.0/24', ip_version=4)[0]
-        self.assertRaises(ipam_exc.IpAddressAllocationNotFound,
-                          ipam_subnet.deallocate, '10.0.0.2')
-
-    def test_allocate_all_pool_addresses_triggers_range_recalculation(self):
-        # This test instead might be made to pass, but for the wrong reasons!
-        pass
-
-    def test_allocate_subnet_for_non_existent_subnet_pass(self):
-        # This test should pass because ipam subnet is no longer
-        # have foreign key relationship with neutron subnet.
-        # Creating ipam subnet before neutron subnet is a valid case.
-        subnet_req = ipam_req.SpecificSubnetRequest(
-            'tenant_id', 'meh', '192.168.0.0/24')
-        self.ipam_pool.allocate_subnet(subnet_req)
-
-    def test__allocate_specific_ip_raises_exception(self):
-        cidr = '10.0.0.0/24'
-        ip = '10.0.0.15'
-        ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0]
-        ipam_subnet.subnet_manager = mock.Mock()
-        ipam_subnet.subnet_manager.list_ranges_by_subnet_id.return_value = [{
-            'first_ip': '10.0.0.15', 'last_ip': '10.0.0.15'}]
-        ipam_subnet.subnet_manager.delete_range.return_value = 0
-
-        self.assertRaises(db_exc.RetryRequest,
-                          ipam_subnet._allocate_specific_ip,
-                          self.ctx.session, ip)
diff --git a/neutron/tests/unit/ipam/fake_driver.py b/neutron/tests/unit/ipam/fake_driver.py
deleted file mode 100644 (file)
index 3236a4c..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (c) 2015 Infoblox Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.ipam import driver
-
-
-class FakeDriver(driver.Pool):
-    """Fake IPAM driver for tests only
-
-    Just implement IPAM Driver interface without any functionality inside
-    """
-
-    def allocate_subnet(self, subnet):
-        return driver.Subnet()
-
-    def get_subnet(self, cidr):
-        return driver.Subnet()
-
-    def update_subnet(self, request):
-        return driver.Subnet()
-
-    def remove_subnet(self, cidr):
-        pass
diff --git a/neutron/tests/unit/ipam/test_requests.py b/neutron/tests/unit/ipam/test_requests.py
deleted file mode 100644 (file)
index 20efc44..0000000
+++ /dev/null
@@ -1,386 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import netaddr
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-from neutron.common import constants
-from neutron.common import ipv6_utils
-from neutron import context
-from neutron.ipam import driver
-from neutron.ipam import exceptions as ipam_exc
-from neutron.ipam import requests as ipam_req
-from neutron import manager
-from neutron.tests import base
-from neutron.tests.unit.ipam import fake_driver
-
-FAKE_IPAM_CLASS = 'neutron.tests.unit.ipam.fake_driver.FakeDriver'
-
-
-class IpamSubnetRequestTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(IpamSubnetRequestTestCase, self).setUp()
-        self.tenant_id = uuidutils.generate_uuid()
-        self.subnet_id = uuidutils.generate_uuid()
-
-
-class TestIpamSubnetRequests(IpamSubnetRequestTestCase):
-
-    def test_subnet_request(self):
-        pool = ipam_req.SubnetRequest(self.tenant_id,
-                                  self.subnet_id)
-        self.assertEqual(self.tenant_id, pool.tenant_id)
-        self.assertEqual(self.subnet_id, pool.subnet_id)
-        self.assertIsNone(pool.gateway_ip)
-        self.assertIsNone(pool.allocation_pools)
-
-    def test_subnet_request_gateway(self):
-        request = ipam_req.SubnetRequest(self.tenant_id,
-                                     self.subnet_id,
-                                     gateway_ip='1.2.3.1')
-        self.assertEqual('1.2.3.1', str(request.gateway_ip))
-
-    def test_subnet_request_bad_gateway(self):
-        self.assertRaises(netaddr.core.AddrFormatError,
-                          ipam_req.SubnetRequest,
-                          self.tenant_id,
-                          self.subnet_id,
-                          gateway_ip='1.2.3.')
-
-    def test_subnet_request_with_range(self):
-        allocation_pools = [netaddr.IPRange('1.2.3.4', '1.2.3.5'),
-                            netaddr.IPRange('1.2.3.7', '1.2.3.9')]
-        request = ipam_req.SubnetRequest(self.tenant_id,
-                                     self.subnet_id,
-                                     allocation_pools=allocation_pools)
-        self.assertEqual(allocation_pools, request.allocation_pools)
-
-    def test_subnet_request_range_not_list(self):
-        self.assertRaises(TypeError,
-                          ipam_req.SubnetRequest,
-                          self.tenant_id,
-                          self.subnet_id,
-                          allocation_pools=1)
-
-    def test_subnet_request_bad_range(self):
-        self.assertRaises(TypeError,
-                          ipam_req.SubnetRequest,
-                          self.tenant_id,
-                          self.subnet_id,
-                          allocation_pools=['1.2.3.4'])
-
-    def test_subnet_request_different_versions(self):
-        pools = [netaddr.IPRange('0.0.0.1', '0.0.0.2'),
-                 netaddr.IPRange('::1', '::2')]
-        self.assertRaises(ValueError,
-                          ipam_req.SubnetRequest,
-                          self.tenant_id,
-                          self.subnet_id,
-                          allocation_pools=pools)
-
-    def test_subnet_request_overlap(self):
-        pools = [netaddr.IPRange('0.0.0.10', '0.0.0.20'),
-                 netaddr.IPRange('0.0.0.8', '0.0.0.10')]
-        self.assertRaises(ValueError,
-                          ipam_req.SubnetRequest,
-                          self.tenant_id,
-                          self.subnet_id,
-                          allocation_pools=pools)
-
-
-class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase):
-
-    def test_subnet_request(self):
-        request = ipam_req.AnySubnetRequest(self.tenant_id,
-                                        self.subnet_id,
-                                        constants.IPv4,
-                                        24,
-                                        gateway_ip='0.0.0.1')
-        self.assertEqual(24, request.prefixlen)
-
-    def test_subnet_request_bad_prefix_type(self):
-        self.assertRaises(netaddr.core.AddrFormatError,
-                          ipam_req.AnySubnetRequest,
-                          self.tenant_id,
-                          self.subnet_id,
-                          constants.IPv4,
-                          'A')
-
-    def test_subnet_request_bad_prefix(self):
-        self.assertRaises(netaddr.core.AddrFormatError,
-                          ipam_req.AnySubnetRequest,
-                          self.tenant_id,
-                          self.subnet_id,
-                          constants.IPv4,
-                          33)
-        self.assertRaises(netaddr.core.AddrFormatError,
-                          ipam_req.AnySubnetRequest,
-                          self.tenant_id,
-                          self.subnet_id,
-                          constants.IPv6,
-                          129)
-
-    def test_subnet_request_bad_gateway(self):
-        self.assertRaises(ValueError,
-                          ipam_req.AnySubnetRequest,
-                          self.tenant_id,
-                          self.subnet_id,
-                          constants.IPv6,
-                          64,
-                          gateway_ip='2000::1')
-
-    def test_subnet_request_allocation_pool_wrong_version(self):
-        pools = [netaddr.IPRange('0.0.0.4', '0.0.0.5')]
-        self.assertRaises(ValueError,
-                          ipam_req.AnySubnetRequest,
-                          self.tenant_id,
-                          self.subnet_id,
-                          constants.IPv6,
-                          64,
-                          allocation_pools=pools)
-
-    def test_subnet_request_allocation_pool_not_in_net(self):
-        pools = [netaddr.IPRange('0.0.0.64', '0.0.0.128')]
-        self.assertRaises(ValueError,
-                          ipam_req.AnySubnetRequest,
-                          self.tenant_id,
-                          self.subnet_id,
-                          constants.IPv4,
-                          25,
-                          allocation_pools=pools)
-
-
-class TestIpamSpecificSubnetRequest(IpamSubnetRequestTestCase):
-
-    def test_subnet_request(self):
-        request = ipam_req.SpecificSubnetRequest(self.tenant_id,
-                                             self.subnet_id,
-                                             '1.2.3.0/24',
-                                             gateway_ip='1.2.3.1')
-        self.assertEqual(24, request.prefixlen)
-        self.assertEqual(netaddr.IPAddress('1.2.3.1'), request.gateway_ip)
-        self.assertEqual(netaddr.IPNetwork('1.2.3.0/24'), request.subnet_cidr)
-
-    def test_subnet_request_bad_gateway(self):
-        self.assertRaises(ValueError,
-                          ipam_req.SpecificSubnetRequest,
-                          self.tenant_id,
-                          self.subnet_id,
-                          '2001::1',
-                          gateway_ip='2000::1')
-
-
-class TestAddressRequest(base.BaseTestCase):
-
-    # This class doesn't test much.  At least running through all of the
-    # constructors may shake out some trivial bugs.
-
-    EUI64 = ipam_req.AutomaticAddressRequest.EUI64
-
-    def setUp(self):
-        super(TestAddressRequest, self).setUp()
-
-    def test_specific_address_ipv6(self):
-        request = ipam_req.SpecificAddressRequest('2000::45')
-        self.assertEqual(netaddr.IPAddress('2000::45'), request.address)
-
-    def test_specific_address_ipv4(self):
-        request = ipam_req.SpecificAddressRequest('1.2.3.32')
-        self.assertEqual(netaddr.IPAddress('1.2.3.32'), request.address)
-
-    def test_any_address(self):
-        ipam_req.AnyAddressRequest()
-
-    def test_automatic_address_request_eui64(self):
-        subnet_cidr = '2607:f0d0:1002:51::/64'
-        port_mac = 'aa:bb:cc:dd:ee:ff'
-        eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
-                                                         port_mac))
-        request = ipam_req.AutomaticAddressRequest(
-            address_type=self.EUI64,
-            prefix=subnet_cidr,
-            mac=port_mac)
-        self.assertEqual(request.address, netaddr.IPAddress(eui_addr))
-
-    def test_automatic_address_request_invalid_address_type_raises(self):
-        self.assertRaises(ipam_exc.InvalidAddressType,
-                          ipam_req.AutomaticAddressRequest,
-                          address_type='kaboom')
-
-    def test_automatic_address_request_eui64_no_mac_raises(self):
-        self.assertRaises(ipam_exc.AddressCalculationFailure,
-                          ipam_req.AutomaticAddressRequest,
-                          address_type=self.EUI64,
-                          prefix='meh')
-
-    def test_automatic_address_request_eui64_alien_param_raises(self):
-        self.assertRaises(ipam_exc.AddressCalculationFailure,
-                          ipam_req.AutomaticAddressRequest,
-                          address_type=self.EUI64,
-                          mac='meh',
-                          alien='et',
-                          prefix='meh')
-
-
-class TestIpamDriverLoader(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestIpamDriverLoader, self).setUp()
-        self.ctx = context.get_admin_context()
-
-    def _verify_fake_ipam_driver_is_loaded(self, driver_name):
-        mgr = manager.NeutronManager
-        ipam_driver = mgr.load_class_for_provider('neutron.ipam_drivers',
-                                                  driver_name)
-
-        self.assertEqual(
-            fake_driver.FakeDriver, ipam_driver,
-            "loaded ipam driver should be FakeDriver")
-
-    def _verify_import_error_is_generated(self, driver_name):
-        mgr = manager.NeutronManager
-        self.assertRaises(ImportError, mgr.load_class_for_provider,
-                          'neutron.ipam_drivers',
-                          driver_name)
-
-    def test_ipam_driver_is_loaded_by_class(self):
-        self._verify_fake_ipam_driver_is_loaded(FAKE_IPAM_CLASS)
-
-    def test_ipam_driver_is_loaded_by_name(self):
-        self._verify_fake_ipam_driver_is_loaded('fake')
-
-    def test_ipam_driver_raises_import_error(self):
-        self._verify_import_error_is_generated(
-            'neutron.tests.unit.ipam_req.SomeNonExistentClass')
-
-    def test_ipam_driver_raises_import_error_for_none(self):
-        self._verify_import_error_is_generated(None)
-
-    def _load_ipam_driver(self, driver_name, subnet_pool_id):
-        cfg.CONF.set_override("ipam_driver", driver_name)
-        return driver.Pool.get_instance(subnet_pool_id, self.ctx)
-
-    def test_ipam_driver_is_loaded_from_ipam_driver_config_value(self):
-        ipam_driver = self._load_ipam_driver('fake', None)
-        self.assertIsInstance(
-            ipam_driver, fake_driver.FakeDriver,
-            "loaded ipam driver should be of type FakeDriver")
-
-    @mock.patch(FAKE_IPAM_CLASS)
-    def test_ipam_driver_is_loaded_with_subnet_pool_id(self, ipam_mock):
-        subnet_pool_id = 'SomePoolID'
-        self._load_ipam_driver('fake', subnet_pool_id)
-        ipam_mock.assert_called_once_with(subnet_pool_id, self.ctx)
-
-
-class TestAddressRequestFactory(base.BaseTestCase):
-
-    def test_specific_address_request_is_loaded(self):
-        for address in ('10.12.0.15', 'fffe::1'):
-            ip = {'ip_address': address}
-            self.assertIsInstance(
-                ipam_req.AddressRequestFactory.get_request(None, None, ip),
-                ipam_req.SpecificAddressRequest)
-
-    def test_any_address_request_is_loaded(self):
-        for addr in [None, '']:
-            ip = {'ip_address': addr}
-            self.assertIsInstance(
-                ipam_req.AddressRequestFactory.get_request(None, None, ip),
-                ipam_req.AnyAddressRequest)
-
-    def test_automatic_address_request_is_loaded(self):
-        ip = {'mac': '6c:62:6d:de:cf:49',
-              'subnet_cidr': '2001:470:abcd::/64',
-              'eui64_address': True}
-        self.assertIsInstance(
-            ipam_req.AddressRequestFactory.get_request(None, None, ip),
-            ipam_req.AutomaticAddressRequest)
-
-
-class TestSubnetRequestFactory(IpamSubnetRequestTestCase):
-
-    def _build_subnet_dict(self, id=None, cidr='192.168.1.0/24',
-                           prefixlen=8, ip_version=4):
-        subnet = {'cidr': cidr,
-                  'prefixlen': prefixlen,
-                  'ip_version': ip_version,
-                  'tenant_id': self.tenant_id,
-                  'gateway_ip': None,
-                  'allocation_pools': None,
-                  'id': id or self.subnet_id}
-        subnetpool = {'ip_version': ip_version,
-                      'default_prefixlen': prefixlen}
-        return subnet, subnetpool
-
-    def test_specific_subnet_request_is_loaded(self):
-        addresses = [
-            '10.12.0.15/24',
-            '10.12.0.0/24',
-            'fffe::1/64',
-            'fffe::/64']
-        for address in addresses:
-            subnet, subnetpool = self._build_subnet_dict(cidr=address)
-            self.assertIsInstance(
-                ipam_req.SubnetRequestFactory.get_request(None,
-                                                          subnet,
-                                                          subnetpool),
-                ipam_req.SpecificSubnetRequest)
-
-    def test_any_address_request_is_loaded_for_ipv4(self):
-        subnet, subnetpool = self._build_subnet_dict(cidr=None, ip_version=4)
-        self.assertIsInstance(
-            ipam_req.SubnetRequestFactory.get_request(None,
-                                                      subnet,
-                                                      subnetpool),
-            ipam_req.AnySubnetRequest)
-
-    def test_any_address_request_is_loaded_for_ipv6(self):
-        subnet, subnetpool = self._build_subnet_dict(cidr=None, ip_version=6)
-        self.assertIsInstance(
-            ipam_req.SubnetRequestFactory.get_request(None,
-                                                      subnet,
-                                                      subnetpool),
-            ipam_req.AnySubnetRequest)
-
-    def test_args_are_passed_to_specific_request(self):
-        subnet, subnetpool = self._build_subnet_dict()
-        request = ipam_req.SubnetRequestFactory.get_request(None,
-                                                            subnet,
-                                                            subnetpool)
-        self.assertIsInstance(request,
-                              ipam_req.SpecificSubnetRequest)
-        self.assertEqual(self.tenant_id, request.tenant_id)
-        self.assertEqual(self.subnet_id, request.subnet_id)
-        self.assertIsNone(request.gateway_ip)
-        self.assertIsNone(request.allocation_pools)
-
-
-class TestGetRequestFactory(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestGetRequestFactory, self).setUp()
-        cfg.CONF.set_override('ipam_driver', 'fake')
-        self.driver = driver.Pool.get_instance(None, None)
-
-    def test_get_subnet_request_factory(self):
-        self.assertEqual(
-            self.driver.get_subnet_request_factory(),
-            ipam_req.SubnetRequestFactory)
-
-    def test_get_address_request_factory(self):
-        self.assertEqual(
-            self.driver.get_address_request_factory(),
-            ipam_req.AddressRequestFactory)
diff --git a/neutron/tests/unit/ipam/test_subnet_alloc.py b/neutron/tests/unit/ipam/test_subnet_alloc.py
deleted file mode 100644 (file)
index bea86c8..0000000
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import netaddr
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_utils import uuidutils
-
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.ipam import requests as ipam_req
-from neutron.ipam import subnet_alloc
-from neutron import manager
-from neutron.tests.unit.db import test_db_base_plugin_v2
-from neutron.tests.unit import testlib_api
-
-
-class TestSubnetAllocation(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(TestSubnetAllocation, self).setUp()
-        self._tenant_id = 'test-tenant'
-        self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS)
-        self.plugin = manager.NeutronManager.get_plugin()
-        self.ctx = context.get_admin_context()
-        cfg.CONF.set_override('allow_overlapping_ips', True)
-
-    def _create_subnet_pool(self, plugin, ctx, name, prefix_list,
-                            min_prefixlen, ip_version,
-                            max_prefixlen=attributes.ATTR_NOT_SPECIFIED,
-                            default_prefixlen=attributes.ATTR_NOT_SPECIFIED,
-                            default_quota=attributes.ATTR_NOT_SPECIFIED,
-                            shared=False, is_default=False):
-        subnetpool = {'subnetpool': {'name': name,
-                                     'tenant_id': self._tenant_id,
-                                     'prefixes': prefix_list,
-                                     'min_prefixlen': min_prefixlen,
-                                     'max_prefixlen': max_prefixlen,
-                                     'default_prefixlen': default_prefixlen,
-                                     'shared': shared,
-                                     'is_default': is_default,
-                                     'default_quota': default_quota}}
-        return plugin.create_subnetpool(ctx, subnetpool)
-
-    def _get_subnetpool(self, ctx, plugin, id):
-        return plugin.get_subnetpool(ctx, id)
-
-    def test_allocate_any_subnet(self):
-        prefix_list = ['10.1.0.0/16', '192.168.1.0/24']
-        sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
-                                      prefix_list, 21, 4)
-        sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
-        with self.ctx.session.begin(subtransactions=True):
-            sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
-            req = ipam_req.AnySubnetRequest(self._tenant_id,
-                                        uuidutils.generate_uuid(),
-                                        constants.IPv4, 21)
-            res = sa.allocate_subnet(req)
-            detail = res.get_details()
-            prefix_set = netaddr.IPSet(iterable=prefix_list)
-            allocated_set = netaddr.IPSet(iterable=[detail.subnet_cidr])
-            self.assertTrue(allocated_set.issubset(prefix_set))
-            self.assertEqual(detail.prefixlen, 21)
-
-    def test_allocate_specific_subnet(self):
-        sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
-                                      ['10.1.0.0/16', '192.168.1.0/24'],
-                                      21, 4)
-        with self.ctx.session.begin(subtransactions=True):
-            sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
-            sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
-            req = ipam_req.SpecificSubnetRequest(self._tenant_id,
-                                             uuidutils.generate_uuid(),
-                                             '10.1.2.0/24')
-            res = sa.allocate_subnet(req)
-            detail = res.get_details()
-            sp = self._get_subnetpool(self.ctx, self.plugin, sp['id'])
-            self.assertEqual(str(detail.subnet_cidr), '10.1.2.0/24')
-            self.assertEqual(detail.prefixlen, 24)
-
-    def test_insufficient_prefix_space_for_any_allocation(self):
-        sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
-                                      ['10.1.1.0/24', '192.168.1.0/24'],
-                                      21, 4)
-        sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
-        sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
-        req = ipam_req.AnySubnetRequest(self._tenant_id,
-                                    uuidutils.generate_uuid(),
-                                    constants.IPv4,
-                                    21)
-        self.assertRaises(n_exc.SubnetAllocationError,
-                          sa.allocate_subnet, req)
-
-    def test_insufficient_prefix_space_for_specific_allocation(self):
-        sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
-                                      ['10.1.0.0/24'],
-                                      21, 4)
-        sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
-        sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
-        req = ipam_req.SpecificSubnetRequest(self._tenant_id,
-                                         uuidutils.generate_uuid(),
-                                         '10.1.0.0/21')
-        self.assertRaises(n_exc.SubnetAllocationError,
-                          sa.allocate_subnet, req)
-
-    def test_allocate_any_subnet_gateway(self):
-        sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
-                                      ['10.1.0.0/16', '192.168.1.0/24'],
-                                      21, 4)
-        sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
-        with self.ctx.session.begin(subtransactions=True):
-            sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
-            req = ipam_req.AnySubnetRequest(self._tenant_id,
-                                        uuidutils.generate_uuid(),
-                                        constants.IPv4, 21)
-            res = sa.allocate_subnet(req)
-            detail = res.get_details()
-            self.assertEqual(detail.gateway_ip,
-                             detail.subnet_cidr.network + 1)
-
-    def test_allocate_specific_subnet_specific_gateway(self):
-        sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
-                                      ['10.1.0.0/16', '192.168.1.0/24'],
-                                      21, 4)
-        sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
-        with self.ctx.session.begin(subtransactions=True):
-            sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
-            req = ipam_req.SpecificSubnetRequest(self._tenant_id,
-                                             uuidutils.generate_uuid(),
-                                             '10.1.2.0/24',
-                                             gateway_ip='10.1.2.254')
-            res = sa.allocate_subnet(req)
-            detail = res.get_details()
-            self.assertEqual(detail.gateway_ip,
-                             netaddr.IPAddress('10.1.2.254'))
-
-    def test_allocate_specific_ipv6_subnet_specific_gateway(self):
-        # Same scenario as described in bug #1466322
-        sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
-                                      ['2210::/64'],
-                                      64, 6)
-        sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
-        with self.ctx.session.begin(subtransactions=True):
-            sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
-            req = ipam_req.SpecificSubnetRequest(self._tenant_id,
-                                                 uuidutils.generate_uuid(),
-                                                 '2210::/64',
-                                                 '2210::ffff:ffff:ffff:ffff')
-            res = sa.allocate_subnet(req)
-            detail = res.get_details()
-            self.assertEqual(detail.gateway_ip,
-                             netaddr.IPAddress('2210::ffff:ffff:ffff:ffff'))
-
-    def test__allocation_value_for_tenant_no_allocations(self):
-        sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
-                                      ['10.1.0.0/16', '192.168.1.0/24'],
-                                      21, 4)
-        sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
-        value = sa._allocations_used_by_tenant(32)
-        self.assertEqual(value, 0)
-
-    def test_subnetpool_default_quota_exceeded(self):
-        sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
-                                      ['fe80::/48'],
-                                      48, 6, default_quota=1)
-        sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
-        sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
-        req = ipam_req.SpecificSubnetRequest(self._tenant_id,
-                                         uuidutils.generate_uuid(),
-                                         'fe80::/63')
-        self.assertRaises(n_exc.SubnetPoolQuotaExceeded,
-                          sa.allocate_subnet,
-                          req)
-
-    def test_subnetpool_concurrent_allocation_exception(self):
-        sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
-                                      ['fe80::/48'],
-                                      48, 6, default_quota=1)
-        sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
-        sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
-        req = ipam_req.SpecificSubnetRequest(self._tenant_id,
-                                         uuidutils.generate_uuid(),
-                                         'fe80::/63')
-        with mock.patch("sqlalchemy.orm.query.Query.update", return_value=0):
-            self.assertRaises(db_exc.RetryRequest, sa.allocate_subnet, req)
diff --git a/neutron/tests/unit/ipam/test_utils.py b/neutron/tests/unit/ipam/test_utils.py
deleted file mode 100644 (file)
index db2ee9c..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-
-from neutron.ipam import utils
-from neutron.tests import base
-
-
-class TestIpamUtils(base.BaseTestCase):
-
-    def test_check_subnet_ip_v4_network(self):
-        self.assertFalse(utils.check_subnet_ip('1.1.1.0/24', '1.1.1.0'))
-
-    def test_check_subnet_ip_v4_broadcast(self):
-        self.assertFalse(utils.check_subnet_ip('1.1.1.0/24', '1.1.1.255'))
-
-    def test_check_subnet_ip_v4_valid(self):
-        self.assertTrue(utils.check_subnet_ip('1.1.1.0/24', '1.1.1.1'))
-        self.assertTrue(utils.check_subnet_ip('1.1.1.0/24', '1.1.1.254'))
-
-    def test_check_subnet_ip_v6_network(self):
-        self.assertFalse(utils.check_subnet_ip('F111::0/64', 'F111::0'))
-
-    def test_check_subnet_ip_v6_valid(self):
-        self.assertTrue(utils.check_subnet_ip('F111::0/64', 'F111::1'))
-        self.assertTrue(utils.check_subnet_ip('F111::0/64',
-                                              'F111::FFFF:FFFF:FFFF:FFFF'))
-
-    def test_generate_pools_v4_nogateway(self):
-        cidr = '192.168.0.0/24'
-        expected = [netaddr.IPRange('192.168.0.1', '192.168.0.254')]
-        self.assertEqual(expected, utils.generate_pools(cidr, None))
-
-    def test_generate_pools_v4_gateway_first(self):
-        cidr = '192.168.0.0/24'
-        gateway = '192.168.0.1'
-        expected = [netaddr.IPRange('192.168.0.2', '192.168.0.254')]
-        self.assertEqual(expected, utils.generate_pools(cidr, gateway))
-
-    def test_generate_pools_v4_gateway_last(self):
-        cidr = '192.168.0.0/24'
-        gateway = '192.168.0.254'
-        expected = [netaddr.IPRange('192.168.0.1', '192.168.0.253')]
-        self.assertEqual(expected, utils.generate_pools(cidr, gateway))
-
-    def test_generate_pools_v4_32(self):
-        # 32 is special because it should have 1 usable address
-        cidr = '192.168.0.0/32'
-        expected = [netaddr.IPRange('192.168.0.0', '192.168.0.0')]
-        self.assertEqual(expected, utils.generate_pools(cidr, None))
-
-    def test_generate_pools_v4_31(self):
-        cidr = '192.168.0.0/31'
-        expected = []
-        self.assertEqual(expected, utils.generate_pools(cidr, None))
-
-    def test_generate_pools_v4_gateway_middle(self):
-        cidr = '192.168.0.0/24'
-        gateway = '192.168.0.128'
-        expected = [netaddr.IPRange('192.168.0.1', '192.168.0.127'),
-                    netaddr.IPRange('192.168.0.129', '192.168.0.254')]
-        self.assertEqual(expected, utils.generate_pools(cidr, gateway))
-
-    def test_generate_pools_v6_nogateway(self):
-        # other than the difference in the last address, the rest of the
-        # logic is the same as v4 so we only need one test
-        cidr = 'F111::0/64'
-        expected = [netaddr.IPRange('F111::1', 'F111::FFFF:FFFF:FFFF:FFFF')]
-        self.assertEqual(expected, utils.generate_pools(cidr, None))
-
-    def test_generate_pools_v6_empty(self):
-        # We want to be sure the range will begin and end with an IPv6
-        # address, even if an ambiguous ::/64 cidr is given.
-        cidr = '::/64'
-        expected = [netaddr.IPRange('::1', '::FFFF:FFFF:FFFF:FFFF')]
-        self.assertEqual(expected, utils.generate_pools(cidr, None))
diff --git a/neutron/tests/unit/notifiers/__init__.py b/neutron/tests/unit/notifiers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/notifiers/test_batch_notifier.py b/neutron/tests/unit/notifiers/test_batch_notifier.py
deleted file mode 100644 (file)
index 23bede8..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.notifiers import batch_notifier
-from neutron.tests import base
-
-
-class TestBatchNotifier(base.BaseTestCase):
-    def setUp(self):
-        super(TestBatchNotifier, self).setUp()
-        self.notifier = batch_notifier.BatchNotifier(0.1, lambda x: x)
-        self.spawn_n = mock.patch('eventlet.spawn_n').start()
-
-    def test_queue_event_no_event(self):
-        self.notifier.queue_event(None)
-        self.assertEqual(0, len(self.notifier.pending_events))
-        self.assertEqual(0, self.spawn_n.call_count)
-
-    def test_queue_event_first_event(self):
-        self.notifier.queue_event(mock.Mock())
-        self.assertEqual(1, len(self.notifier.pending_events))
-        self.assertEqual(1, self.spawn_n.call_count)
-
-    def test_queue_event_multiple_events(self):
-        events = 6
-        for i in range(0, events):
-            self.notifier.queue_event(mock.Mock())
-        self.assertEqual(events, len(self.notifier.pending_events))
-        self.assertEqual(1, self.spawn_n.call_count)
-
-    def test_queue_event_call_send_events(self):
-        with mock.patch.object(self.notifier,
-                               'callback') as send_events:
-            self.spawn_n.side_effect = lambda func: func()
-            self.notifier.queue_event(mock.Mock())
-            self.assertFalse(self.notifier._waiting_to_send)
-            self.assertTrue(send_events.called)
diff --git a/neutron/tests/unit/notifiers/test_nova.py b/neutron/tests/unit/notifiers/test_nova.py
deleted file mode 100644 (file)
index 2481a59..0000000
+++ /dev/null
@@ -1,331 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-import mock
-from novaclient import exceptions as nova_exceptions
-from oslo_utils import uuidutils
-from sqlalchemy.orm import attributes as sql_attr
-
-from oslo_config import cfg
-
-from neutron.common import constants as n_const
-from neutron.db import models_v2
-from neutron.notifiers import nova
-from neutron.tests import base
-
-DEVICE_OWNER_COMPUTE = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
-
-
-class TestNovaNotify(base.BaseTestCase):
-    def setUp(self, plugin=None):
-        super(TestNovaNotify, self).setUp()
-
-        class FakePlugin(object):
-            def get_port(self, context, port_id):
-                device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-                return {'device_id': device_id,
-                        'device_owner': DEVICE_OWNER_COMPUTE}
-
-        self.nova_notifier = nova.Notifier()
-        self.nova_notifier._plugin_ref = FakePlugin()
-
-    def test_notify_port_status_all_values(self):
-        states = [n_const.PORT_STATUS_ACTIVE, n_const.PORT_STATUS_DOWN,
-                  n_const.PORT_STATUS_ERROR, n_const.PORT_STATUS_BUILD,
-                  sql_attr.NO_VALUE]
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        # test all combinations
-        for previous_port_status in states:
-            for current_port_status in states:
-
-                port = models_v2.Port(id='port-uuid', device_id=device_id,
-                                      device_owner=DEVICE_OWNER_COMPUTE,
-                                      status=current_port_status)
-                self._record_port_status_changed_helper(current_port_status,
-                                                        previous_port_status,
-                                                        port)
-
-    def test_port_without_uuid_device_id_no_notify(self):
-        port = models_v2.Port(id='port-uuid', device_id='compute_probe:',
-                              device_owner=DEVICE_OWNER_COMPUTE,
-                              status=n_const.PORT_STATUS_ACTIVE)
-        self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE,
-                                                sql_attr.NO_VALUE,
-                                                port)
-
-    def test_port_without_device_owner_no_notify(self):
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        port = models_v2.Port(id='port-uuid', device_id=device_id,
-                              status=n_const.PORT_STATUS_ACTIVE)
-        self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE,
-                                                sql_attr.NO_VALUE,
-                                                port)
-
-    def test_port_without_device_id_no_notify(self):
-        port = models_v2.Port(id='port-uuid',
-                              device_owner=n_const.DEVICE_OWNER_DHCP,
-                              status=n_const.PORT_STATUS_ACTIVE)
-        self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE,
-                                                sql_attr.NO_VALUE,
-                                                port)
-
-    def test_port_without_id_no_notify(self):
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        port = models_v2.Port(device_id=device_id,
-                              device_owner=DEVICE_OWNER_COMPUTE,
-                              status=n_const.PORT_STATUS_ACTIVE)
-        self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE,
-                                                sql_attr.NO_VALUE,
-                                                port)
-
-    def test_non_compute_instances_no_notify(self):
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        port = models_v2.Port(id='port-uuid', device_id=device_id,
-                              device_owner=n_const.DEVICE_OWNER_DHCP,
-                              status=n_const.PORT_STATUS_ACTIVE)
-        self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE,
-                                                sql_attr.NO_VALUE,
-                                                port)
-
-    def _record_port_status_changed_helper(self, current_port_status,
-                                           previous_port_status, port):
-
-        if not (port.device_id and port.id and port.device_owner and
-                port.device_owner.startswith(
-                    n_const.DEVICE_OWNER_COMPUTE_PREFIX) and
-                uuidutils.is_uuid_like(port.device_id)):
-            return
-
-        if (previous_port_status == n_const.PORT_STATUS_ACTIVE and
-                current_port_status == n_const.PORT_STATUS_DOWN):
-            event_name = nova.VIF_UNPLUGGED
-
-        elif (previous_port_status in [sql_attr.NO_VALUE,
-                                       n_const.PORT_STATUS_DOWN,
-                                       n_const.PORT_STATUS_BUILD]
-              and current_port_status in [n_const.PORT_STATUS_ACTIVE,
-                                          n_const.PORT_STATUS_ERROR]):
-            event_name = nova.VIF_PLUGGED
-
-        else:
-            return
-
-        status = nova.NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status)
-        self.nova_notifier.record_port_status_changed(port,
-                                                      current_port_status,
-                                                      previous_port_status,
-                                                      None)
-
-        event = {'server_uuid': port.device_id, 'status': status,
-                 'name': event_name, 'tag': 'port-uuid'}
-        self.assertEqual(event, port._notify_event)
-
-    def test_update_fixed_ip_changed(self):
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        returned_obj = {'port':
-                        {'device_owner': DEVICE_OWNER_COMPUTE,
-                         'id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222',
-                         'device_id': device_id}}
-
-        expected_event = {'server_uuid': device_id,
-                          'name': 'network-changed'}
-        event = self.nova_notifier.create_port_changed_event('update_port',
-                                                             {}, returned_obj)
-        self.assertEqual(event, expected_event)
-
-    def test_create_floatingip_notify(self):
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        returned_obj = {'floatingip':
-                        {'port_id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222'}}
-
-        expected_event = {'server_uuid': device_id,
-                          'name': 'network-changed'}
-        event = self.nova_notifier.create_port_changed_event(
-            'create_floatingip', {}, returned_obj)
-        self.assertEqual(event, expected_event)
-
-    def test_create_floatingip_no_port_id_no_notify(self):
-        returned_obj = {'floatingip':
-                        {'port_id': None}}
-
-        event = self.nova_notifier.create_port_changed_event(
-            'create_floatingip', {}, returned_obj)
-        self.assertFalse(event, None)
-
-    def test_delete_floatingip_notify(self):
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        returned_obj = {'floatingip':
-                        {'port_id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222'}}
-
-        expected_event = {'server_uuid': device_id,
-                          'name': 'network-changed'}
-        event = self.nova_notifier.create_port_changed_event(
-            'delete_floatingip', {}, returned_obj)
-        self.assertEqual(expected_event, event)
-
-    def test_delete_floatingip_no_port_id_no_notify(self):
-        returned_obj = {'floatingip':
-                        {'port_id': None}}
-
-        event = self.nova_notifier.create_port_changed_event(
-            'delete_floatingip', {}, returned_obj)
-        self.assertIsNone(event)
-
-    def test_associate_floatingip_notify(self):
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        returned_obj = {'floatingip':
-                        {'port_id': u'5a39def4-3d3f-473d-9ff4-8e90064b9cc1'}}
-        original_obj = {'port_id': None}
-
-        expected_event = {'server_uuid': device_id,
-                          'name': 'network-changed'}
-        event = self.nova_notifier.create_port_changed_event(
-            'update_floatingip', original_obj, returned_obj)
-        self.assertEqual(expected_event, event)
-
-    def test_disassociate_floatingip_notify(self):
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        returned_obj = {'floatingip': {'port_id': None}}
-        original_obj = {'port_id': '5a39def4-3d3f-473d-9ff4-8e90064b9cc1'}
-
-        expected_event = {'server_uuid': device_id,
-                          'name': 'network-changed'}
-
-        event = self.nova_notifier.create_port_changed_event(
-            'update_floatingip', original_obj, returned_obj)
-        self.assertEqual(expected_event, event)
-
-    def test_no_notification_notify_nova_on_port_data_changes_false(self):
-        cfg.CONF.set_override('notify_nova_on_port_data_changes', False)
-
-        with mock.patch.object(self.nova_notifier,
-                               'send_events') as send_events:
-            self.nova_notifier.send_network_change('update_floatingip',
-                                                   {}, {})
-            self.assertFalse(send_events.called, False)
-
-    def test_nova_send_events_returns_bad_list(self):
-        with mock.patch.object(
-            self.nova_notifier.nclient.server_external_events,
-                'create') as nclient_create:
-            nclient_create.return_value = 'i am a string!'
-            self.nova_notifier.send_events([])
-
-    def test_nova_send_event_rasies_404(self):
-        with mock.patch.object(
-            self.nova_notifier.nclient.server_external_events,
-                'create') as nclient_create:
-            nclient_create.side_effect = nova_exceptions.NotFound
-            self.nova_notifier.send_events([])
-
-    def test_nova_send_events_raises(self):
-        with mock.patch.object(
-            self.nova_notifier.nclient.server_external_events,
-                'create') as nclient_create:
-            nclient_create.side_effect = Exception
-            self.nova_notifier.send_events([])
-
-    def test_nova_send_events_returns_non_200(self):
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        with mock.patch.object(
-            self.nova_notifier.nclient.server_external_events,
-                'create') as nclient_create:
-            nclient_create.return_value = [{'code': 404,
-                                            'name': 'network-changed',
-                                            'server_uuid': device_id}]
-            self.nova_notifier.send_events(
-                [{'name': 'network-changed', 'server_uuid': device_id}])
-
-    def test_nova_send_events_return_200(self):
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        with mock.patch.object(
-            self.nova_notifier.nclient.server_external_events,
-                'create') as nclient_create:
-            nclient_create.return_value = [{'code': 200,
-                                            'name': 'network-changed',
-                                            'server_uuid': device_id}]
-            self.nova_notifier.send_events(
-                [{'name': 'network-changed', 'server_uuid': device_id}])
-
-    def test_nova_send_events_multiple(self):
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        with mock.patch.object(
-            self.nova_notifier.nclient.server_external_events,
-                'create') as nclient_create:
-            nclient_create.return_value = [{'code': 200,
-                                            'name': 'network-changed',
-                                            'server_uuid': device_id},
-                                           {'code': 200,
-                                            'name': 'network-changed',
-                                            'server_uuid': device_id}]
-            self.nova_notifier.send_events([
-                {'name': 'network-changed', 'server_uuid': device_id},
-                {'name': 'network-changed', 'server_uuid': device_id}])
-
-    def test_reassociate_floatingip_without_disassociate_event(self):
-        returned_obj = {'floatingip':
-                        {'port_id': 'f5348a16-609a-4971-b0f0-4b8def5235fb'}}
-        original_obj = {'port_id': '5a39def4-3d3f-473d-9ff4-8e90064b9cc1'}
-        self.nova_notifier._waiting_to_send = True
-        self.nova_notifier.send_network_change(
-            'update_floatingip', original_obj, returned_obj)
-        self.assertEqual(
-            2, len(self.nova_notifier.batch_notifier.pending_events))
-
-        returned_obj_non = {'floatingip': {'port_id': None}}
-        event_dis = self.nova_notifier.create_port_changed_event(
-            'update_floatingip', original_obj, returned_obj_non)
-        event_assoc = self.nova_notifier.create_port_changed_event(
-            'update_floatingip', original_obj, returned_obj)
-        self.assertEqual(
-            self.nova_notifier.batch_notifier.pending_events[0], event_dis)
-        self.assertEqual(
-            self.nova_notifier.batch_notifier.pending_events[1], event_assoc)
-
-    def test_delete_port_notify(self):
-        device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87'
-        port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222'
-        returned_obj = {'port':
-                        {'device_owner': DEVICE_OWNER_COMPUTE,
-                         'id': port_id,
-                         'device_id': device_id}}
-
-        expected_event = {'server_uuid': device_id,
-                          'name': nova.VIF_DELETED,
-                          'tag': port_id}
-        event = self.nova_notifier.create_port_changed_event('delete_port',
-                                                             {}, returned_obj)
-        self.assertEqual(expected_event, event)
-
-    @mock.patch('novaclient.client.Client')
-    def test_endpoint_types(self, mock_client):
-        nova.Notifier()
-        mock_client.assert_called_once_with(
-                                        nova.NOVA_API_VERSION,
-                                        session=mock.ANY,
-                                        region_name=cfg.CONF.nova.region_name,
-                                        endpoint_type='public',
-                                        extensions=mock.ANY)
-
-        mock_client.reset_mock()
-        cfg.CONF.set_override('endpoint_type', 'internal', 'nova')
-        nova.Notifier()
-        mock_client.assert_called_once_with(
-                                        nova.NOVA_API_VERSION,
-                                        session=mock.ANY,
-                                        region_name=cfg.CONF.nova.region_name,
-                                        endpoint_type='internal',
-                                        extensions=mock.ANY)
diff --git a/neutron/tests/unit/objects/__init__.py b/neutron/tests/unit/objects/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/objects/qos/__init__.py b/neutron/tests/unit/objects/qos/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/objects/qos/test_policy.py b/neutron/tests/unit/objects/qos/test_policy.py
deleted file mode 100644 (file)
index 05baad5..0000000
+++ /dev/null
@@ -1,295 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.common import exceptions as n_exc
-from neutron.db import api as db_api
-from neutron.db import models_v2
-from neutron.objects.qos import policy
-from neutron.objects.qos import rule
-from neutron.tests.unit.objects import test_base
-from neutron.tests.unit import testlib_api
-
-
-class QosPolicyObjectTestCase(test_base.BaseObjectIfaceTestCase):
-
-    _test_class = policy.QosPolicy
-
-    def setUp(self):
-        super(QosPolicyObjectTestCase, self).setUp()
-        # qos_policy_ids will be incorrect, but we don't care in this test
-        self.db_qos_bandwidth_rules = [
-            self.get_random_fields(rule.QosBandwidthLimitRule)
-            for _ in range(3)]
-
-        self.model_map = {
-            self._test_class.db_model: self.db_objs,
-            rule.QosBandwidthLimitRule.db_model: self.db_qos_bandwidth_rules}
-
-    def fake_get_objects(self, context, model, **kwargs):
-        return self.model_map[model]
-
-    def fake_get_object(self, context, model, id):
-        objects = self.model_map[model]
-        return [obj for obj in objects if obj['id'] == id][0]
-
-    def test_get_objects(self):
-        admin_context = self.context.elevated()
-        with mock.patch.object(
-            db_api, 'get_objects',
-            side_effect=self.fake_get_objects) as get_objects_mock:
-
-            with mock.patch.object(
-                db_api, 'get_object',
-                side_effect=self.fake_get_object):
-
-                with mock.patch.object(
-                    self.context,
-                    'elevated',
-                    return_value=admin_context) as context_mock:
-
-                    objs = self._test_class.get_objects(self.context)
-                    context_mock.assert_called_once_with()
-            get_objects_mock.assert_any_call(
-                admin_context, self._test_class.db_model)
-        self._validate_objects(self.db_objs, objs)
-
-    def test_get_objects_valid_fields(self):
-        admin_context = self.context.elevated()
-
-        with mock.patch.object(
-            db_api, 'get_objects',
-            return_value=[self.db_obj]) as get_objects_mock:
-
-            with mock.patch.object(
-                self.context,
-                'elevated',
-                return_value=admin_context) as context_mock:
-
-                objs = self._test_class.get_objects(
-                    self.context,
-                    **self.valid_field_filter)
-                context_mock.assert_called_once_with()
-            get_objects_mock.assert_any_call(
-                admin_context, self._test_class.db_model,
-                **self.valid_field_filter)
-        self._validate_objects([self.db_obj], objs)
-
-    def test_get_by_id(self):
-        admin_context = self.context.elevated()
-        with mock.patch.object(db_api, 'get_object',
-                               return_value=self.db_obj) as get_object_mock:
-            with mock.patch.object(self.context,
-                                   'elevated',
-                                   return_value=admin_context) as context_mock:
-                obj = self._test_class.get_by_id(self.context, id='fake_id')
-                self.assertTrue(self._is_test_class(obj))
-                self.assertEqual(self.db_obj, test_base.get_obj_db_fields(obj))
-                context_mock.assert_called_once_with()
-                get_object_mock.assert_called_once_with(
-                    admin_context, self._test_class.db_model, id='fake_id')
-
-
-class QosPolicyDbObjectTestCase(test_base.BaseDbObjectTestCase,
-                                testlib_api.SqlTestCase):
-
-    _test_class = policy.QosPolicy
-
-    def setUp(self):
-        super(QosPolicyDbObjectTestCase, self).setUp()
-        self._create_test_network()
-        self._create_test_port(self._network)
-
-    def _create_test_policy(self):
-        policy_obj = policy.QosPolicy(self.context, **self.db_obj)
-        policy_obj.create()
-        return policy_obj
-
-    def _create_test_policy_with_rule(self):
-        policy_obj = self._create_test_policy()
-
-        rule_fields = self.get_random_fields(
-            obj_cls=rule.QosBandwidthLimitRule)
-        rule_fields['qos_policy_id'] = policy_obj.id
-
-        rule_obj = rule.QosBandwidthLimitRule(self.context, **rule_fields)
-        rule_obj.create()
-
-        return policy_obj, rule_obj
-
-    def _create_test_network(self):
-        # TODO(ihrachys): replace with network.create() once we get an object
-        # implementation for networks
-        self._network = db_api.create_object(self.context, models_v2.Network,
-                                             {'name': 'test-network1'})
-
-    def _create_test_port(self, network):
-        # TODO(ihrachys): replace with port.create() once we get an object
-        # implementation for ports
-        self._port = db_api.create_object(self.context, models_v2.Port,
-                                          {'name': 'test-port1',
-                                           'network_id': network['id'],
-                                           'mac_address': 'fake_mac',
-                                           'admin_state_up': True,
-                                           'status': 'ACTIVE',
-                                           'device_id': 'fake_device',
-                                           'device_owner': 'fake_owner'})
-
-    def test_attach_network_get_network_policy(self):
-
-        obj = self._create_test_policy()
-
-        policy_obj = policy.QosPolicy.get_network_policy(self.context,
-                                                         self._network['id'])
-        self.assertIsNone(policy_obj)
-
-        # Now attach policy and repeat
-        obj.attach_network(self._network['id'])
-
-        policy_obj = policy.QosPolicy.get_network_policy(self.context,
-                                                         self._network['id'])
-        self.assertEqual(obj, policy_obj)
-
-    def test_attach_network_nonexistent_network(self):
-
-        obj = self._create_test_policy()
-        self.assertRaises(n_exc.NetworkQosBindingNotFound,
-                          obj.attach_network, 'non-existent-network')
-
-    def test_attach_port_nonexistent_port(self):
-
-        obj = self._create_test_policy()
-        self.assertRaises(n_exc.PortQosBindingNotFound,
-                          obj.attach_port, 'non-existent-port')
-
-    def test_attach_network_nonexistent_policy(self):
-
-        policy_obj = policy.QosPolicy(self.context, **self.db_obj)
-        self.assertRaises(n_exc.NetworkQosBindingNotFound,
-                          policy_obj.attach_network, self._network['id'])
-
-    def test_attach_port_nonexistent_policy(self):
-
-        policy_obj = policy.QosPolicy(self.context, **self.db_obj)
-        self.assertRaises(n_exc.PortQosBindingNotFound,
-                          policy_obj.attach_port, self._port['id'])
-
-    def test_attach_port_get_port_policy(self):
-
-        obj = self._create_test_policy()
-
-        policy_obj = policy.QosPolicy.get_network_policy(self.context,
-                                                         self._network['id'])
-
-        self.assertIsNone(policy_obj)
-
-        # Now attach policy and repeat
-        obj.attach_port(self._port['id'])
-
-        policy_obj = policy.QosPolicy.get_port_policy(self.context,
-                                                      self._port['id'])
-        self.assertEqual(obj, policy_obj)
-
-    def test_detach_port(self):
-        obj = self._create_test_policy()
-        obj.attach_port(self._port['id'])
-        obj.detach_port(self._port['id'])
-
-        policy_obj = policy.QosPolicy.get_port_policy(self.context,
-                                                      self._port['id'])
-        self.assertIsNone(policy_obj)
-
-    def test_detach_network(self):
-        obj = self._create_test_policy()
-        obj.attach_network(self._network['id'])
-        obj.detach_network(self._network['id'])
-
-        policy_obj = policy.QosPolicy.get_network_policy(self.context,
-                                                         self._network['id'])
-        self.assertIsNone(policy_obj)
-
-    def test_detach_port_nonexistent_port(self):
-        obj = self._create_test_policy()
-        self.assertRaises(n_exc.PortQosBindingNotFound,
-                          obj.detach_port, 'non-existent-port')
-
-    def test_detach_network_nonexistent_network(self):
-        obj = self._create_test_policy()
-        self.assertRaises(n_exc.NetworkQosBindingNotFound,
-                          obj.detach_network, 'non-existent-port')
-
-    def test_detach_port_nonexistent_policy(self):
-        policy_obj = policy.QosPolicy(self.context, **self.db_obj)
-        self.assertRaises(n_exc.PortQosBindingNotFound,
-                          policy_obj.detach_port, self._port['id'])
-
-    def test_detach_network_nonexistent_policy(self):
-        policy_obj = policy.QosPolicy(self.context, **self.db_obj)
-        self.assertRaises(n_exc.NetworkQosBindingNotFound,
-                          policy_obj.detach_network, self._network['id'])
-
-    def test_synthetic_rule_fields(self):
-        policy_obj, rule_obj = self._create_test_policy_with_rule()
-        policy_obj = policy.QosPolicy.get_by_id(self.context, policy_obj.id)
-        self.assertEqual([rule_obj], policy_obj.rules)
-
-    def test_get_by_id_fetches_rules_non_lazily(self):
-        policy_obj, rule_obj = self._create_test_policy_with_rule()
-        policy_obj = policy.QosPolicy.get_by_id(self.context, policy_obj.id)
-
-        primitive = policy_obj.obj_to_primitive()
-        self.assertNotEqual([], (primitive['versioned_object.data']['rules']))
-
-    def test_to_dict_returns_rules_as_dicts(self):
-        policy_obj, rule_obj = self._create_test_policy_with_rule()
-        policy_obj = policy.QosPolicy.get_by_id(self.context, policy_obj.id)
-
-        obj_dict = policy_obj.to_dict()
-        rule_dict = rule_obj.to_dict()
-
-        # first make sure that to_dict() is still sane and does not return
-        # objects
-        for obj in (rule_dict, obj_dict):
-            self.assertIsInstance(obj, dict)
-
-        self.assertEqual(rule_dict, obj_dict['rules'][0])
-
-    def test_shared_default(self):
-        self.db_obj.pop('shared')
-        obj = self._test_class(self.context, **self.db_obj)
-        self.assertFalse(obj.shared)
-
-    def test_delete_not_allowed_if_policy_in_use_by_port(self):
-        obj = self._create_test_policy()
-        obj.attach_port(self._port['id'])
-
-        self.assertRaises(n_exc.QosPolicyInUse, obj.delete)
-
-        obj.detach_port(self._port['id'])
-        obj.delete()
-
-    def test_delete_not_allowed_if_policy_in_use_by_network(self):
-        obj = self._create_test_policy()
-        obj.attach_network(self._network['id'])
-
-        self.assertRaises(n_exc.QosPolicyInUse, obj.delete)
-
-        obj.detach_network(self._network['id'])
-        obj.delete()
-
-    def test_reload_rules_reloads_rules(self):
-        policy_obj, rule_obj = self._create_test_policy_with_rule()
-        self.assertEqual([], policy_obj.rules)
-
-        policy_obj.reload_rules()
-        self.assertEqual([rule_obj], policy_obj.rules)
diff --git a/neutron/tests/unit/objects/qos/test_rule.py b/neutron/tests/unit/objects/qos/test_rule.py
deleted file mode 100644 (file)
index e737782..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import constants
-from neutron.objects.qos import policy
-from neutron.objects.qos import rule
-from neutron.services.qos import qos_consts
-from neutron.tests import base as neutron_test_base
-from neutron.tests.unit.objects import test_base
-from neutron.tests.unit import testlib_api
-
-POLICY_ID_A = 'policy-id-a'
-POLICY_ID_B = 'policy-id-b'
-DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
-
-
-class QosRuleObjectTestCase(neutron_test_base.BaseTestCase):
-
-    def _test_should_apply_to_port(self, rule_policy_id, port_policy_id,
-                                   device_owner, expected_result):
-        test_rule = rule.QosRule(qos_policy_id=rule_policy_id)
-        port = {qos_consts.QOS_POLICY_ID: port_policy_id,
-                'device_owner': device_owner}
-        self.assertEqual(expected_result, test_rule.should_apply_to_port(port))
-
-    def test_should_apply_to_port_with_network_port_and_net_policy(self):
-        self._test_should_apply_to_port(
-            rule_policy_id=POLICY_ID_B,
-            port_policy_id=POLICY_ID_A,
-            device_owner=constants.DEVICE_OWNER_ROUTER_INTF,
-            expected_result=False)
-
-    def test_should_apply_to_port_with_network_port_and_port_policy(self):
-        self._test_should_apply_to_port(
-            rule_policy_id=POLICY_ID_A,
-            port_policy_id=POLICY_ID_A,
-            device_owner=constants.DEVICE_OWNER_ROUTER_INTF,
-            expected_result=True)
-
-    def test_should_apply_to_port_with_compute_port_and_net_policy(self):
-        self._test_should_apply_to_port(
-            rule_policy_id=POLICY_ID_B,
-            port_policy_id=POLICY_ID_A,
-            device_owner=DEVICE_OWNER_COMPUTE,
-            expected_result=True)
-
-    def test_should_apply_to_port_with_compute_port_and_port_policy(self):
-        self._test_should_apply_to_port(
-            rule_policy_id=POLICY_ID_A,
-            port_policy_id=POLICY_ID_A,
-            device_owner=DEVICE_OWNER_COMPUTE,
-            expected_result=True)
-
-
-class QosBandwidthLimitRuleObjectTestCase(test_base.BaseObjectIfaceTestCase):
-
-    _test_class = rule.QosBandwidthLimitRule
-
-    def test_to_dict_returns_type(self):
-        obj = rule.QosBandwidthLimitRule(self.context, **self.db_obj)
-        dict_ = obj.to_dict()
-        self.assertEqual(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, dict_['type'])
-
-
-class QosBandwidthLimitRuleDbObjectTestCase(test_base.BaseDbObjectTestCase,
-                                            testlib_api.SqlTestCase):
-
-    _test_class = rule.QosBandwidthLimitRule
-
-    def setUp(self):
-        super(QosBandwidthLimitRuleDbObjectTestCase, self).setUp()
-
-        # Prepare policy to be able to insert a rule
-        generated_qos_policy_id = self.db_obj['qos_policy_id']
-        policy_obj = policy.QosPolicy(self.context,
-                                      id=generated_qos_policy_id)
-        policy_obj.create()
diff --git a/neutron/tests/unit/objects/qos/test_rule_type.py b/neutron/tests/unit/objects/qos/test_rule_type.py
deleted file mode 100644 (file)
index b9a3159..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# rule types are so different from other objects that we don't base the test
-# class on the common base class for all objects
-
-import mock
-
-from neutron import manager
-from neutron.objects.qos import rule_type
-from neutron.services.qos import qos_consts
-from neutron.tests import base as test_base
-
-
-DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
-
-
-class QosRuleTypeObjectTestCase(test_base.BaseTestCase):
-
-    def setUp(self):
-        self.config_parse()
-        self.setup_coreplugin(DB_PLUGIN_KLASS)
-        super(QosRuleTypeObjectTestCase, self).setUp()
-
-    def test_get_objects(self):
-        core_plugin = manager.NeutronManager.get_plugin()
-        rule_types_mock = mock.PropertyMock(
-            return_value=qos_consts.VALID_RULE_TYPES)
-        with mock.patch.object(core_plugin, 'supported_qos_rule_types',
-                               new_callable=rule_types_mock,
-                               create=True):
-            types = rule_type.QosRuleType.get_objects()
-            self.assertEqual(sorted(qos_consts.VALID_RULE_TYPES),
-                             sorted(type_['type'] for type_ in types))
-
-    def test_wrong_type(self):
-        self.assertRaises(ValueError, rule_type.QosRuleType, type='bad_type')
diff --git a/neutron/tests/unit/objects/test_base.py b/neutron/tests/unit/objects/test_base.py
deleted file mode 100644 (file)
index 34743ac..0000000
+++ /dev/null
@@ -1,345 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-import mock
-from oslo_db import exception as obj_exc
-from oslo_versionedobjects import base as obj_base
-from oslo_versionedobjects import fields as obj_fields
-
-from neutron.common import exceptions as n_exc
-from neutron.common import utils as common_utils
-from neutron import context
-from neutron.db import api as db_api
-from neutron.objects import base
-from neutron.tests import base as test_base
-from neutron.tests import tools
-
-
-SQLALCHEMY_COMMIT = 'sqlalchemy.engine.Connection._commit_impl'
-OBJECTS_BASE_OBJ_FROM_PRIMITIVE = ('oslo_versionedobjects.base.'
-                                   'VersionedObject.obj_from_primitive')
-
-
-class FakeModel(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-
-@obj_base.VersionedObjectRegistry.register_if(False)
-class FakeNeutronObject(base.NeutronDbObject):
-    # Version 1.0: Initial version
-    VERSION = '1.0'
-
-    db_model = FakeModel
-
-    fields = {
-        'id': obj_fields.UUIDField(),
-        'field1': obj_fields.StringField(),
-        'field2': obj_fields.StringField()
-    }
-
-    fields_no_update = ['id']
-
-    synthetic_fields = ['field2']
-
-
-FIELD_TYPE_VALUE_GENERATOR_MAP = {
-    obj_fields.BooleanField: tools.get_random_boolean,
-    obj_fields.IntegerField: tools.get_random_integer,
-    obj_fields.StringField: tools.get_random_string,
-    obj_fields.UUIDField: tools.get_random_string,
-    obj_fields.ListOfObjectsField: lambda: []
-}
-
-
-def get_obj_db_fields(obj):
-    return {field: getattr(obj, field) for field in obj.fields
-            if field not in obj.synthetic_fields}
-
-
-class _BaseObjectTestCase(object):
-
-    _test_class = FakeNeutronObject
-
-    def setUp(self):
-        super(_BaseObjectTestCase, self).setUp()
-        self.context = context.get_admin_context()
-        self.db_objs = list(self.get_random_fields() for _ in range(3))
-        self.db_obj = self.db_objs[0]
-
-        valid_field = [f for f in self._test_class.fields
-                       if f not in self._test_class.synthetic_fields][0]
-        self.valid_field_filter = {valid_field: self.db_obj[valid_field]}
-
-    @classmethod
-    def get_random_fields(cls, obj_cls=None):
-        obj_cls = obj_cls or cls._test_class
-        fields = {}
-        for field, field_obj in obj_cls.fields.items():
-            if field not in obj_cls.synthetic_fields:
-                generator = FIELD_TYPE_VALUE_GENERATOR_MAP[type(field_obj)]
-                fields[field] = generator()
-        return fields
-
-    def get_updatable_fields(self, fields):
-        return base.get_updatable_fields(self._test_class, fields)
-
-    @classmethod
-    def _is_test_class(cls, obj):
-        return isinstance(obj, cls._test_class)
-
-
-class BaseObjectIfaceTestCase(_BaseObjectTestCase, test_base.BaseTestCase):
-
-    def test_get_by_id(self):
-        with mock.patch.object(db_api, 'get_object',
-                               return_value=self.db_obj) as get_object_mock:
-            obj = self._test_class.get_by_id(self.context, id='fake_id')
-            self.assertTrue(self._is_test_class(obj))
-            self.assertEqual(self.db_obj, get_obj_db_fields(obj))
-            get_object_mock.assert_called_once_with(
-                self.context, self._test_class.db_model, id='fake_id')
-
-    def test_get_by_id_missing_object(self):
-        with mock.patch.object(db_api, 'get_object', return_value=None):
-            obj = self._test_class.get_by_id(self.context, id='fake_id')
-            self.assertIsNone(obj)
-
-    def test_get_objects(self):
-        with mock.patch.object(db_api, 'get_objects',
-                               return_value=self.db_objs) as get_objects_mock:
-            objs = self._test_class.get_objects(self.context)
-            self._validate_objects(self.db_objs, objs)
-        get_objects_mock.assert_called_once_with(
-            self.context, self._test_class.db_model)
-
-    def test_get_objects_valid_fields(self):
-        with mock.patch.object(
-            db_api, 'get_objects',
-            return_value=[self.db_obj]) as get_objects_mock:
-
-            objs = self._test_class.get_objects(self.context,
-                                                **self.valid_field_filter)
-            self._validate_objects([self.db_obj], objs)
-
-        get_objects_mock.assert_called_with(
-            self.context, self._test_class.db_model,
-            **self.valid_field_filter)
-
-    def test_get_objects_mixed_fields(self):
-        synthetic_fields = self._test_class.synthetic_fields
-        if not synthetic_fields:
-            self.skipTest('No synthetic fields found in test class %r' %
-                          self._test_class)
-
-        filters = copy.copy(self.valid_field_filter)
-        filters[synthetic_fields[0]] = 'xxx'
-
-        with mock.patch.object(db_api, 'get_objects',
-                               return_value=self.db_objs):
-            self.assertRaises(base.exceptions.InvalidInput,
-                              self._test_class.get_objects, self.context,
-                              **filters)
-
-    def test_get_objects_synthetic_fields(self):
-        synthetic_fields = self._test_class.synthetic_fields
-        if not synthetic_fields:
-            self.skipTest('No synthetic fields found in test class %r' %
-                          self._test_class)
-
-        with mock.patch.object(db_api, 'get_objects',
-                               return_value=self.db_objs):
-            self.assertRaises(base.exceptions.InvalidInput,
-                              self._test_class.get_objects, self.context,
-                              **{synthetic_fields[0]: 'xxx'})
-
-    def test_get_objects_invalid_fields(self):
-        with mock.patch.object(db_api, 'get_objects',
-                               return_value=self.db_objs):
-            self.assertRaises(base.exceptions.InvalidInput,
-                              self._test_class.get_objects, self.context,
-                              fake_field='xxx')
-
-    def _validate_objects(self, expected, observed):
-        self.assertTrue(all(self._is_test_class(obj) for obj in observed))
-        self.assertEqual(
-            sorted(expected,
-                   key=common_utils.safe_sort_key),
-            sorted([get_obj_db_fields(obj) for obj in observed],
-                   key=common_utils.safe_sort_key))
-
-    def _check_equal(self, obj, db_obj):
-        self.assertEqual(
-            sorted(db_obj),
-            sorted(get_obj_db_fields(obj)))
-
-    def test_create(self):
-        with mock.patch.object(db_api, 'create_object',
-                               return_value=self.db_obj) as create_mock:
-            obj = self._test_class(self.context, **self.db_obj)
-            self._check_equal(obj, self.db_obj)
-            obj.create()
-            self._check_equal(obj, self.db_obj)
-            create_mock.assert_called_once_with(
-                self.context, self._test_class.db_model, self.db_obj)
-
-    def test_create_updates_from_db_object(self):
-        with mock.patch.object(db_api, 'create_object',
-                               return_value=self.db_obj):
-            obj = self._test_class(self.context, **self.db_objs[1])
-            self._check_equal(obj, self.db_objs[1])
-            obj.create()
-            self._check_equal(obj, self.db_obj)
-
-    def test_create_duplicates(self):
-        with mock.patch.object(db_api, 'create_object',
-                               side_effect=obj_exc.DBDuplicateEntry):
-            obj = self._test_class(self.context, **self.db_obj)
-            self.assertRaises(base.NeutronDbObjectDuplicateEntry, obj.create)
-
-    @mock.patch.object(db_api, 'update_object')
-    def test_update_no_changes(self, update_mock):
-        with mock.patch.object(base.NeutronDbObject,
-                               '_get_changed_persistent_fields',
-                               return_value={}):
-            obj = self._test_class(self.context)
-            obj.update()
-            self.assertFalse(update_mock.called)
-
-    @mock.patch.object(db_api, 'update_object')
-    def test_update_changes(self, update_mock):
-        fields_to_update = self.get_updatable_fields(self.db_obj)
-        with mock.patch.object(base.NeutronDbObject,
-                               '_get_changed_persistent_fields',
-                               return_value=fields_to_update):
-            obj = self._test_class(self.context, **self.db_obj)
-            obj.update()
-            update_mock.assert_called_once_with(
-                self.context, self._test_class.db_model,
-                self.db_obj['id'], fields_to_update)
-
-    @mock.patch.object(base.NeutronDbObject,
-                       '_get_changed_persistent_fields',
-                       return_value={'a': 'a', 'b': 'b', 'c': 'c'})
-    def test_update_changes_forbidden(self, *mocks):
-        with mock.patch.object(
-            self._test_class,
-            'fields_no_update',
-            new_callable=mock.PropertyMock(return_value=['a', 'c']),
-            create=True):
-            obj = self._test_class(self.context, **self.db_obj)
-            self.assertRaises(base.NeutronObjectUpdateForbidden, obj.update)
-
-    def test_update_updates_from_db_object(self):
-        with mock.patch.object(db_api, 'update_object',
-                               return_value=self.db_obj):
-            obj = self._test_class(self.context, **self.db_objs[1])
-            fields_to_update = self.get_updatable_fields(self.db_objs[1])
-            with mock.patch.object(base.NeutronDbObject,
-                                   '_get_changed_persistent_fields',
-                                   return_value=fields_to_update):
-                obj.update()
-            self._check_equal(obj, self.db_obj)
-
-    @mock.patch.object(db_api, 'delete_object')
-    def test_delete(self, delete_mock):
-        obj = self._test_class(self.context, **self.db_obj)
-        self._check_equal(obj, self.db_obj)
-        obj.delete()
-        self._check_equal(obj, self.db_obj)
-        delete_mock.assert_called_once_with(
-            self.context, self._test_class.db_model, self.db_obj['id'])
-
-    @mock.patch(OBJECTS_BASE_OBJ_FROM_PRIMITIVE)
-    def test_clean_obj_from_primitive(self, get_prim_m):
-        expected_obj = get_prim_m.return_value
-        observed_obj = self._test_class.clean_obj_from_primitive('foo', 'bar')
-        self.assertIs(expected_obj, observed_obj)
-        self.assertTrue(observed_obj.obj_reset_changes.called)
-
-
-class BaseDbObjectTestCase(_BaseObjectTestCase):
-
-    def test_get_by_id_create_update_delete(self):
-        obj = self._test_class(self.context, **self.db_obj)
-        obj.create()
-
-        new = self._test_class.get_by_id(self.context, id=obj.id)
-        self.assertEqual(obj, new)
-
-        obj = new
-
-        for key, val in self.get_updatable_fields(self.db_objs[1]).items():
-            setattr(obj, key, val)
-        obj.update()
-
-        new = self._test_class.get_by_id(self.context, id=obj.id)
-        self.assertEqual(obj, new)
-
-        obj = new
-        new.delete()
-
-        new = self._test_class.get_by_id(self.context, id=obj.id)
-        self.assertIsNone(new)
-
-    def test_update_non_existent_object_raises_not_found(self):
-        obj = self._test_class(self.context, **self.db_obj)
-        obj.obj_reset_changes()
-
-        for key, val in self.get_updatable_fields(self.db_obj).items():
-            setattr(obj, key, val)
-
-        self.assertRaises(n_exc.ObjectNotFound, obj.update)
-
-    def test_delete_non_existent_object_raises_not_found(self):
-        obj = self._test_class(self.context, **self.db_obj)
-        self.assertRaises(n_exc.ObjectNotFound, obj.delete)
-
-    @mock.patch(SQLALCHEMY_COMMIT)
-    def test_create_single_transaction(self, mock_commit):
-        obj = self._test_class(self.context, **self.db_obj)
-        obj.create()
-        self.assertEqual(1, mock_commit.call_count)
-
-    def test_update_single_transaction(self):
-        obj = self._test_class(self.context, **self.db_obj)
-        obj.create()
-
-        for key, val in self.get_updatable_fields(self.db_obj).items():
-            setattr(obj, key, val)
-
-        with mock.patch(SQLALCHEMY_COMMIT) as mock_commit:
-            obj.update()
-        self.assertEqual(1, mock_commit.call_count)
-
-    def test_delete_single_transaction(self):
-        obj = self._test_class(self.context, **self.db_obj)
-        obj.create()
-
-        with mock.patch(SQLALCHEMY_COMMIT) as mock_commit:
-            obj.delete()
-        self.assertEqual(1, mock_commit.call_count)
-
-    @mock.patch(SQLALCHEMY_COMMIT)
-    def test_get_objects_single_transaction(self, mock_commit):
-        self._test_class.get_objects(self.context)
-        self.assertEqual(1, mock_commit.call_count)
-
-    @mock.patch(SQLALCHEMY_COMMIT)
-    def test_get_by_id_single_transaction(self, mock_commit):
-        obj = self._test_class(self.context, **self.db_obj)
-        obj.create()
-
-        obj = self._test_class.get_by_id(self.context, obj.id)
-        self.assertEqual(2, mock_commit.call_count)
diff --git a/neutron/tests/unit/objects/test_objects.py b/neutron/tests/unit/objects/test_objects.py
deleted file mode 100644 (file)
index 341fc0f..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2015 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-import pprint
-
-from oslo_versionedobjects import base as obj_base
-from oslo_versionedobjects import fixture
-
-from neutron.tests import base as test_base
-
-
-# NOTE: The hashes in this list should only be changed if they come with a
-# corresponding version bump in the affected objects.
-object_data = {
-    'QosBandwidthLimitRule': '1.0-4e44a8f5c2895ab1278399f87b40a13d',
-    'QosRuleType': '1.0-d0df298d49eeffab91af18d1a4cf7eaf',
-    'QosPolicy': '1.0-721fa60ea8f0e8f15d456d6e917dfe59',
-}
-
-
-class TestObjectVersions(test_base.BaseTestCase):
-
-    def test_versions(self):
-        checker = fixture.ObjectVersionChecker(
-            obj_base.VersionedObjectRegistry.obj_classes())
-        fingerprints = checker.get_hashes()
-
-        if os.getenv('GENERATE_HASHES'):
-            file('object_hashes.txt', 'w').write(
-                pprint.pformat(fingerprints))
-
-        expected, actual = checker.test_hashes(object_data)
-        self.assertEqual(expected, actual,
-                         'Some objects have changed; please make sure the '
-                         'versions have been bumped, and then update their '
-                         'hashes in the object_data map in this test module.')
diff --git a/neutron/tests/unit/plugins/__init__.py b/neutron/tests/unit/plugins/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/common/__init__.py b/neutron/tests/unit/plugins/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/common/test_utils.py b/neutron/tests/unit/plugins/common/test_utils.py
deleted file mode 100644 (file)
index c4dfdfe..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright (c) 2015 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import hashlib
-import mock
-
-from neutron.common import constants
-from neutron.plugins.common import utils
-from neutron.tests import base
-
-LONG_NAME1 = "A_REALLY_LONG_INTERFACE_NAME1"
-LONG_NAME2 = "A_REALLY_LONG_INTERFACE_NAME2"
-SHORT_NAME = "SHORT"
-MOCKED_HASH = "mockedhash"
-
-
-class MockSHA(object):
-    def hexdigest(self):
-        return MOCKED_HASH
-
-
-class TestUtils(base.BaseTestCase):
-
-    @mock.patch.object(hashlib, 'sha1', return_value=MockSHA())
-    def test_get_interface_name(self, mock_sha1):
-        prefix = "pre-"
-        prefix_long = "long_prefix"
-        prefix_exceeds_max_dev_len = "much_too_long_prefix"
-        hash_used = MOCKED_HASH[0:6]
-
-        self.assertEqual("A_REALLY_" + hash_used,
-                         utils.get_interface_name(LONG_NAME1))
-        self.assertEqual("SHORT",
-                         utils.get_interface_name(SHORT_NAME))
-        self.assertEqual("pre-A_REA" + hash_used,
-                         utils.get_interface_name(LONG_NAME1, prefix=prefix))
-        self.assertEqual("pre-SHORT",
-                         utils.get_interface_name(SHORT_NAME, prefix=prefix))
-        # len(prefix) > max_device_len - len(hash_used)
-        self.assertRaises(ValueError, utils.get_interface_name, SHORT_NAME,
-                          prefix_long)
-        # len(prefix) > max_device_len
-        self.assertRaises(ValueError, utils.get_interface_name, SHORT_NAME,
-                          prefix=prefix_exceeds_max_dev_len)
-
-    def test_get_interface_uniqueness(self):
-        prefix = "prefix-"
-        if_prefix1 = utils.get_interface_name(LONG_NAME1, prefix=prefix)
-        if_prefix2 = utils.get_interface_name(LONG_NAME2, prefix=prefix)
-        self.assertNotEqual(if_prefix1, if_prefix2)
-
-    @mock.patch.object(hashlib, 'sha1', return_value=MockSHA())
-    def test_get_interface_max_len(self, mock_sha1):
-        self.assertEqual(constants.DEVICE_NAME_MAX_LEN,
-                         len(utils.get_interface_name(LONG_NAME1)))
-        self.assertEqual(10, len(utils.get_interface_name(LONG_NAME1,
-                                                          max_len=10)))
-        self.assertEqual(12, len(utils.get_interface_name(LONG_NAME1,
-                                                          prefix="pre-",
-                                                          max_len=12)))
diff --git a/neutron/tests/unit/plugins/ml2/__init__.py b/neutron/tests/unit/plugins/ml2/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/_test_mech_agent.py b/neutron/tests/unit/plugins/ml2/_test_mech_agent.py
deleted file mode 100644 (file)
index 271620c..0000000
+++ /dev/null
@@ -1,294 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-from neutron.extensions import portbindings
-from neutron.plugins.ml2 import driver_api as api
-from neutron.tests import base
-
-NETWORK_ID = "fake_network"
-PORT_ID = "fake_port"
-
-
-class FakeNetworkContext(api.NetworkContext):
-    def __init__(self, segments):
-        self._network_segments = segments
-
-    @property
-    def current(self):
-        return {'id': NETWORK_ID}
-
-    @property
-    def original(self):
-        return None
-
-    @property
-    def network_segments(self):
-        return self._network_segments
-
-
-class FakePortContext(api.PortContext):
-    def __init__(self, agent_type, agents, segments,
-                 vnic_type=portbindings.VNIC_NORMAL):
-        self._agent_type = agent_type
-        self._agents = agents
-        self._network_context = FakeNetworkContext(segments)
-        self._bound_vnic_type = vnic_type
-        self._bound_segment_id = None
-        self._bound_vif_type = None
-        self._bound_vif_details = None
-
-    @property
-    def current(self):
-        return {'id': PORT_ID,
-                portbindings.VNIC_TYPE: self._bound_vnic_type}
-
-    @property
-    def original(self):
-        return None
-
-    @property
-    def status(self):
-        return 'DOWN'
-
-    @property
-    def original_status(self):
-        return None
-
-    @property
-    def network(self):
-        return self._network_context
-
-    @property
-    def binding_levels(self):
-        if self._bound_segment:
-            return [{
-                api.BOUND_DRIVER: 'fake_driver',
-                api.BOUND_SEGMENT: self._expand_segment(self._bound_segment)
-            }]
-
-    @property
-    def original_binding_levels(self):
-        return None
-
-    @property
-    def top_bound_segment(self):
-        return self._expand_segment(self._bound_segment)
-
-    @property
-    def original_top_bound_segment(self):
-        return None
-
-    @property
-    def bottom_bound_segment(self):
-        return self._expand_segment(self._bound_segment)
-
-    @property
-    def original_bottom_bound_segment(self):
-        return None
-
-    def _expand_segment(self, segment_id):
-        for segment in self._network_context.network_segments:
-            if segment[api.ID] == self._bound_segment_id:
-                return segment
-
-    @property
-    def host(self):
-        return ''
-
-    @property
-    def original_host(self):
-        return None
-
-    @property
-    def vif_type(self):
-        return portbindings.UNBOUND
-
-    @property
-    def original_vif_type(self):
-        return portbindings.UNBOUND
-
-    @property
-    def vif_details(self):
-        return None
-
-    @property
-    def original_vif_details(self):
-        return None
-
-    @property
-    def segments_to_bind(self):
-        return self._network_context.network_segments
-
-    def host_agents(self, agent_type):
-        if agent_type == self._agent_type:
-            return self._agents
-        else:
-            return []
-
-    def set_binding(self, segment_id, vif_type, vif_details):
-        self._bound_segment_id = segment_id
-        self._bound_vif_type = vif_type
-        self._bound_vif_details = vif_details
-
-    def continue_binding(self, segment_id, next_segments_to_bind):
-        pass
-
-    def allocate_dynamic_segment(self, segment):
-        pass
-
-    def release_dynamic_segment(self, segment_id):
-        pass
-
-
-class AgentMechanismBaseTestCase(base.BaseTestCase):
-    # The following must be overridden for the specific mechanism
-    # driver being tested:
-    VIF_TYPE = None
-    VIF_DETAILS = None
-    AGENT_TYPE = None
-    AGENTS = None
-    AGENTS_DEAD = None
-    AGENTS_BAD = None
-    VNIC_TYPE = portbindings.VNIC_NORMAL
-
-    def _check_unbound(self, context):
-        self.assertIsNone(context._bound_segment_id)
-        self.assertIsNone(context._bound_vif_type)
-        self.assertIsNone(context._bound_vif_details)
-
-    def _check_bound(self, context, segment):
-        self.assertEqual(context._bound_segment_id, segment[api.ID])
-        self.assertEqual(context._bound_vif_type, self.VIF_TYPE)
-        vif_details = context._bound_vif_details
-        self.assertIsNotNone(vif_details)
-        # NOTE(r-mibu): The following five lines are just for backward
-        # compatibility.  In this class, HAS_PORT_FILTER has been replaced
-        # by VIF_DETAILS which can be set expected vif_details to check,
-        # but all replacement of HAS_PORT_FILTER in successor has not been
-        # completed.
-        if self.VIF_DETAILS is None:
-            expected = getattr(self, 'CAP_PORT_FILTER', None)
-            port_filter = vif_details[portbindings.CAP_PORT_FILTER]
-            self.assertEqual(expected, port_filter)
-            return
-        self.assertEqual(self.VIF_DETAILS, vif_details)
-
-
-class AgentMechanismGenericTestCase(AgentMechanismBaseTestCase):
-    UNKNOWN_TYPE_SEGMENTS = [{api.ID: 'unknown_segment_id',
-                              api.NETWORK_TYPE: 'no_such_type'}]
-
-    def test_unknown_type(self):
-        context = FakePortContext(self.AGENT_TYPE,
-                                  self.AGENTS,
-                                  self.UNKNOWN_TYPE_SEGMENTS,
-                                  vnic_type=self.VNIC_TYPE)
-        self.driver.bind_port(context)
-        self._check_unbound(context)
-
-
-class AgentMechanismLocalTestCase(AgentMechanismBaseTestCase):
-    LOCAL_SEGMENTS = [{api.ID: 'unknown_segment_id',
-                       api.NETWORK_TYPE: 'no_such_type'},
-                      {api.ID: 'local_segment_id',
-                       api.NETWORK_TYPE: 'local'}]
-
-    def test_type_local(self):
-        context = FakePortContext(self.AGENT_TYPE,
-                                  self.AGENTS,
-                                  self.LOCAL_SEGMENTS,
-                                  vnic_type=self.VNIC_TYPE)
-        self.driver.bind_port(context)
-        self._check_bound(context, self.LOCAL_SEGMENTS[1])
-
-    def test_type_local_dead(self):
-        context = FakePortContext(self.AGENT_TYPE,
-                                  self.AGENTS_DEAD,
-                                  self.LOCAL_SEGMENTS,
-                                  vnic_type=self.VNIC_TYPE)
-        self.driver.bind_port(context)
-        self._check_unbound(context)
-
-
-class AgentMechanismFlatTestCase(AgentMechanismBaseTestCase):
-    FLAT_SEGMENTS = [{api.ID: 'unknown_segment_id',
-                      api.NETWORK_TYPE: 'no_such_type'},
-                     {api.ID: 'flat_segment_id',
-                      api.NETWORK_TYPE: 'flat',
-                      api.PHYSICAL_NETWORK: 'fake_physical_network'}]
-
-    def test_type_flat(self):
-        context = FakePortContext(self.AGENT_TYPE,
-                                  self.AGENTS,
-                                  self.FLAT_SEGMENTS,
-                                  vnic_type=self.VNIC_TYPE)
-        self.driver.bind_port(context)
-        self._check_bound(context, self.FLAT_SEGMENTS[1])
-
-    def test_type_flat_bad(self):
-        context = FakePortContext(self.AGENT_TYPE,
-                                  self.AGENTS_BAD,
-                                  self.FLAT_SEGMENTS,
-                                  vnic_type=self.VNIC_TYPE)
-        self.driver.bind_port(context)
-        self._check_unbound(context)
-
-
-class AgentMechanismVlanTestCase(AgentMechanismBaseTestCase):
-    VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id',
-                      api.NETWORK_TYPE: 'no_such_type'},
-                     {api.ID: 'vlan_segment_id',
-                      api.NETWORK_TYPE: 'vlan',
-                      api.PHYSICAL_NETWORK: 'fake_physical_network',
-                      api.SEGMENTATION_ID: 1234}]
-
-    def test_type_vlan(self):
-        context = FakePortContext(self.AGENT_TYPE,
-                                  self.AGENTS,
-                                  self.VLAN_SEGMENTS,
-                                  vnic_type=self.VNIC_TYPE)
-        self.driver.bind_port(context)
-        self._check_bound(context, self.VLAN_SEGMENTS[1])
-
-    def test_type_vlan_bad(self):
-        context = FakePortContext(self.AGENT_TYPE,
-                                  self.AGENTS_BAD,
-                                  self.VLAN_SEGMENTS,
-                                  vnic_type=self.VNIC_TYPE)
-        self.driver.bind_port(context)
-        self._check_unbound(context)
-
-
-class AgentMechanismGreTestCase(AgentMechanismBaseTestCase):
-    GRE_SEGMENTS = [{api.ID: 'unknown_segment_id',
-                     api.NETWORK_TYPE: 'no_such_type'},
-                    {api.ID: 'gre_segment_id',
-                     api.NETWORK_TYPE: 'gre',
-                     api.SEGMENTATION_ID: 1234}]
-
-    def test_type_gre(self):
-        context = FakePortContext(self.AGENT_TYPE,
-                                  self.AGENTS,
-                                  self.GRE_SEGMENTS)
-        self.driver.bind_port(context)
-        self._check_bound(context, self.GRE_SEGMENTS[1])
-
-    def test_type_gre_bad(self):
-        context = FakePortContext(self.AGENT_TYPE,
-                                  self.AGENTS_BAD,
-                                  self.GRE_SEGMENTS)
-        self.driver.bind_port(context)
-        self._check_unbound(context)
diff --git a/neutron/tests/unit/plugins/ml2/base.py b/neutron/tests/unit/plugins/ml2/base.py
deleted file mode 100644 (file)
index 6c193a4..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron import manager
-from neutron.plugins.common import constants as plugin_constants
-from neutron.tests.unit.plugins.ml2 import test_plugin
-
-
-class ML2TestFramework(test_plugin.Ml2PluginV2TestCase):
-    l3_plugin = ('neutron.services.l3_router.l3_router_plugin.'
-                 'L3RouterPlugin')
-    _mechanism_drivers = ['openvswitch']
-
-    def setUp(self):
-        super(ML2TestFramework, self).setUp()
-        self.core_plugin = manager.NeutronManager.get_instance().get_plugin()
-        self.l3_plugin = manager.NeutronManager.get_service_plugins().get(
-            plugin_constants.L3_ROUTER_NAT)
-
-    def _create_router(self, distributed=False, ha=False):
-        return self.l3_plugin.create_router(
-            self.context,
-            {'router':
-             {'name': 'router',
-              'admin_state_up': True,
-              'tenant_id': self._tenant_id,
-              'ha': ha,
-              'distributed': distributed}})
diff --git a/neutron/tests/unit/plugins/ml2/db/__init__.py b/neutron/tests/unit/plugins/ml2/db/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py b/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py
deleted file mode 100644 (file)
index d5619fd..0000000
+++ /dev/null
@@ -1,393 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation, all rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-from six import moves
-import testtools
-from testtools import matchers
-
-from neutron.common import exceptions as exc
-from neutron.db import api as db
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2.drivers import type_tunnel
-
-TUNNEL_IP_ONE = "10.10.10.10"
-TUNNEL_IP_TWO = "10.10.10.20"
-HOST_ONE = 'fake_host_one'
-HOST_TWO = 'fake_host_two'
-TUN_MIN = 100
-TUN_MAX = 109
-TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)]
-UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)]
-
-
-class TunnelTypeTestMixin(object):
-    DRIVER_CLASS = None
-    TYPE = None
-
-    def setUp(self):
-        super(TunnelTypeTestMixin, self).setUp()
-        self.driver = self.DRIVER_CLASS()
-        self.driver.tunnel_ranges = TUNNEL_RANGES
-        self.driver.sync_allocations()
-        self.session = db.get_session()
-
-    def test_tunnel_type(self):
-        self.assertEqual(self.TYPE, self.driver.get_type())
-
-    def test_validate_provider_segment(self):
-        segment = {api.NETWORK_TYPE: self.TYPE,
-                   api.PHYSICAL_NETWORK: 'phys_net',
-                   api.SEGMENTATION_ID: None}
-
-        with testtools.ExpectedException(exc.InvalidInput):
-            self.driver.validate_provider_segment(segment)
-
-        segment[api.PHYSICAL_NETWORK] = None
-        self.driver.validate_provider_segment(segment)
-
-        segment[api.SEGMENTATION_ID] = 1
-        self.driver.validate_provider_segment(segment)
-
-    def test_sync_tunnel_allocations(self):
-        self.assertIsNone(
-            self.driver.get_allocation(self.session, (TUN_MIN - 1)))
-        self.assertFalse(
-            self.driver.get_allocation(self.session, (TUN_MIN)).allocated)
-        self.assertFalse(
-            self.driver.get_allocation(self.session, (TUN_MIN + 1)).allocated)
-        self.assertFalse(
-            self.driver.get_allocation(self.session, (TUN_MAX - 1)).allocated)
-        self.assertFalse(
-            self.driver.get_allocation(self.session, (TUN_MAX)).allocated)
-        self.assertIsNone(
-            self.driver.get_allocation(self.session, (TUN_MAX + 1)))
-
-        self.driver.tunnel_ranges = UPDATED_TUNNEL_RANGES
-        self.driver.sync_allocations()
-
-        self.assertIsNone(
-            self.driver.get_allocation(self.session, (TUN_MIN + 5 - 1)))
-        self.assertFalse(
-            self.driver.get_allocation(self.session, (TUN_MIN + 5)).allocated)
-        self.assertFalse(
-            self.driver.get_allocation(self.session,
-                                       (TUN_MIN + 5 + 1)).allocated)
-        self.assertFalse(
-            self.driver.get_allocation(self.session,
-                                       (TUN_MAX + 5 - 1)).allocated)
-        self.assertFalse(
-            self.driver.get_allocation(self.session, (TUN_MAX + 5)).allocated)
-        self.assertIsNone(
-            self.driver.get_allocation(self.session, (TUN_MAX + 5 + 1)))
-
-    def _test_sync_allocations_and_allocated(self, tunnel_id):
-        segment = {api.NETWORK_TYPE: self.TYPE,
-                   api.PHYSICAL_NETWORK: None,
-                   api.SEGMENTATION_ID: tunnel_id}
-        self.driver.reserve_provider_segment(self.session, segment)
-
-        self.driver.tunnel_ranges = UPDATED_TUNNEL_RANGES
-        self.driver.sync_allocations()
-
-        self.assertTrue(
-            self.driver.get_allocation(self.session, tunnel_id).allocated)
-
-    def test_sync_allocations_and_allocated_in_initial_range(self):
-        self._test_sync_allocations_and_allocated(TUN_MIN + 2)
-
-    def test_sync_allocations_and_allocated_in_final_range(self):
-        self._test_sync_allocations_and_allocated(TUN_MAX + 2)
-
-    def test_sync_allocations_no_op(self):
-
-        def verify_no_chunk(iterable, chunk_size):
-            # no segment removed/added
-            self.assertEqual(0, len(list(iterable)))
-            return []
-        with mock.patch.object(
-                type_tunnel, 'chunks', side_effect=verify_no_chunk) as chunks:
-            self.driver.sync_allocations()
-            self.assertEqual(2, len(chunks.mock_calls))
-
-    def test_partial_segment_is_partial_segment(self):
-        segment = {api.NETWORK_TYPE: self.TYPE,
-                   api.PHYSICAL_NETWORK: None,
-                   api.SEGMENTATION_ID: None}
-        self.assertTrue(self.driver.is_partial_segment(segment))
-
-    def test_specific_segment_is_not_partial_segment(self):
-        segment = {api.NETWORK_TYPE: self.TYPE,
-                   api.PHYSICAL_NETWORK: None,
-                   api.SEGMENTATION_ID: 101}
-        self.assertFalse(self.driver.is_partial_segment(segment))
-
-    def test_reserve_provider_segment_full_specs(self):
-        segment = {api.NETWORK_TYPE: self.TYPE,
-                   api.PHYSICAL_NETWORK: None,
-                   api.SEGMENTATION_ID: 101}
-        observed = self.driver.reserve_provider_segment(self.session, segment)
-        alloc = self.driver.get_allocation(self.session,
-                                           observed[api.SEGMENTATION_ID])
-        self.assertTrue(alloc.allocated)
-
-        with testtools.ExpectedException(exc.TunnelIdInUse):
-            self.driver.reserve_provider_segment(self.session, segment)
-
-        self.driver.release_segment(self.session, segment)
-        alloc = self.driver.get_allocation(self.session,
-                                           observed[api.SEGMENTATION_ID])
-        self.assertFalse(alloc.allocated)
-
-        segment[api.SEGMENTATION_ID] = 1000
-        observed = self.driver.reserve_provider_segment(self.session, segment)
-        alloc = self.driver.get_allocation(self.session,
-                                           observed[api.SEGMENTATION_ID])
-        self.assertTrue(alloc.allocated)
-
-        self.driver.release_segment(self.session, segment)
-        alloc = self.driver.get_allocation(self.session,
-                                           observed[api.SEGMENTATION_ID])
-        self.assertIsNone(alloc)
-
-    def test_reserve_provider_segment(self):
-        tunnel_ids = set()
-        specs = {api.NETWORK_TYPE: self.TYPE,
-                 api.PHYSICAL_NETWORK: 'None',
-                 api.SEGMENTATION_ID: None}
-
-        for x in moves.range(TUN_MIN, TUN_MAX + 1):
-            segment = self.driver.reserve_provider_segment(self.session,
-                                                           specs)
-            self.assertEqual(self.TYPE, segment[api.NETWORK_TYPE])
-            self.assertThat(segment[api.SEGMENTATION_ID],
-                            matchers.GreaterThan(TUN_MIN - 1))
-            self.assertThat(segment[api.SEGMENTATION_ID],
-                            matchers.LessThan(TUN_MAX + 1))
-            tunnel_ids.add(segment[api.SEGMENTATION_ID])
-
-        with testtools.ExpectedException(exc.NoNetworkAvailable):
-            segment = self.driver.reserve_provider_segment(self.session,
-                                                           specs)
-
-        segment = {api.NETWORK_TYPE: self.TYPE,
-                   api.PHYSICAL_NETWORK: 'None',
-                   api.SEGMENTATION_ID: tunnel_ids.pop()}
-        self.driver.release_segment(self.session, segment)
-        segment = self.driver.reserve_provider_segment(self.session, specs)
-        self.assertThat(segment[api.SEGMENTATION_ID],
-                        matchers.GreaterThan(TUN_MIN - 1))
-        self.assertThat(segment[api.SEGMENTATION_ID],
-                        matchers.LessThan(TUN_MAX + 1))
-        tunnel_ids.add(segment[api.SEGMENTATION_ID])
-
-        for tunnel_id in tunnel_ids:
-            segment[api.SEGMENTATION_ID] = tunnel_id
-            self.driver.release_segment(self.session, segment)
-
-    def test_allocate_tenant_segment(self):
-        tunnel_ids = set()
-        for x in moves.range(TUN_MIN, TUN_MAX + 1):
-            segment = self.driver.allocate_tenant_segment(self.session)
-            self.assertThat(segment[api.SEGMENTATION_ID],
-                            matchers.GreaterThan(TUN_MIN - 1))
-            self.assertThat(segment[api.SEGMENTATION_ID],
-                            matchers.LessThan(TUN_MAX + 1))
-            tunnel_ids.add(segment[api.SEGMENTATION_ID])
-
-        segment = self.driver.allocate_tenant_segment(self.session)
-        self.assertIsNone(segment)
-
-        segment = {api.NETWORK_TYPE: self.TYPE,
-                   api.PHYSICAL_NETWORK: 'None',
-                   api.SEGMENTATION_ID: tunnel_ids.pop()}
-        self.driver.release_segment(self.session, segment)
-        segment = self.driver.allocate_tenant_segment(self.session)
-        self.assertThat(segment[api.SEGMENTATION_ID],
-                        matchers.GreaterThan(TUN_MIN - 1))
-        self.assertThat(segment[api.SEGMENTATION_ID],
-                        matchers.LessThan(TUN_MAX + 1))
-        tunnel_ids.add(segment[api.SEGMENTATION_ID])
-
-        for tunnel_id in tunnel_ids:
-            segment[api.SEGMENTATION_ID] = tunnel_id
-            self.driver.release_segment(self.session, segment)
-
-    def add_endpoint(self, ip=TUNNEL_IP_ONE, host=HOST_ONE):
-        return self.driver.add_endpoint(ip, host)
-
-    def test_add_endpoint(self):
-        endpoint = self.add_endpoint()
-        self.assertEqual(TUNNEL_IP_ONE, endpoint.ip_address)
-        self.assertEqual(HOST_ONE, endpoint.host)
-        return endpoint
-
-    def test_add_endpoint_for_existing_tunnel_ip(self):
-        self.add_endpoint()
-
-        with mock.patch.object(type_tunnel.LOG, 'warning') as log_warn:
-            self.add_endpoint()
-            log_warn.assert_called_once_with(mock.ANY, TUNNEL_IP_ONE)
-
-    def test_get_endpoint_by_host(self):
-        self.add_endpoint()
-
-        host_endpoint = self.driver.get_endpoint_by_host(HOST_ONE)
-        self.assertEqual(TUNNEL_IP_ONE, host_endpoint.ip_address)
-        return host_endpoint
-
-    def test_get_endpoint_by_host_for_not_existing_host(self):
-        ip_endpoint = self.driver.get_endpoint_by_host(HOST_TWO)
-        self.assertIsNone(ip_endpoint)
-
-    def test_get_endpoint_by_ip(self):
-        self.add_endpoint()
-
-        ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_ONE)
-        self.assertEqual(HOST_ONE, ip_endpoint.host)
-        return ip_endpoint
-
-    def test_get_endpoint_by_ip_for_not_existing_tunnel_ip(self):
-        ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_TWO)
-        self.assertIsNone(ip_endpoint)
-
-    def test_delete_endpoint(self):
-        self.add_endpoint()
-
-        self.assertIsNone(self.driver.delete_endpoint(TUNNEL_IP_ONE))
-        # Get all the endpoints and verify its empty
-        endpoints = self.driver.get_endpoints()
-        self.assertNotIn(TUNNEL_IP_ONE, endpoints)
-
-
-class TunnelTypeMultiRangeTestMixin(object):
-    DRIVER_CLASS = None
-
-    TUN_MIN0 = 100
-    TUN_MAX0 = 101
-    TUN_MIN1 = 200
-    TUN_MAX1 = 201
-    TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)]
-
-    def setUp(self):
-        super(TunnelTypeMultiRangeTestMixin, self).setUp()
-        self.driver = self.DRIVER_CLASS()
-        self.driver.tunnel_ranges = self.TUNNEL_MULTI_RANGES
-        self.driver.sync_allocations()
-        self.session = db.get_session()
-
-    def test_release_segment(self):
-        segments = [self.driver.allocate_tenant_segment(self.session)
-                    for i in range(4)]
-
-        # Release them in random order. No special meaning.
-        for i in (0, 2, 1, 3):
-            self.driver.release_segment(self.session, segments[i])
-
-        for key in (self.TUN_MIN0, self.TUN_MAX0,
-                    self.TUN_MIN1, self.TUN_MAX1):
-            alloc = self.driver.get_allocation(self.session, key)
-            self.assertFalse(alloc.allocated)
-
-
-class TunnelRpcCallbackTestMixin(object):
-
-    DRIVER_CLASS = None
-    TYPE = None
-
-    def setUp(self):
-        super(TunnelRpcCallbackTestMixin, self).setUp()
-        self.driver = self.DRIVER_CLASS()
-
-    def _test_tunnel_sync(self, kwargs, delete_tunnel=False):
-        with mock.patch.object(self.notifier,
-                               'tunnel_update') as tunnel_update,\
-                mock.patch.object(self.notifier,
-                                  'tunnel_delete') as tunnel_delete:
-            details = self.callbacks.tunnel_sync('fake_context', **kwargs)
-            tunnels = details['tunnels']
-            for tunnel in tunnels:
-                self.assertEqual(kwargs['tunnel_ip'], tunnel['ip_address'])
-                self.assertEqual(kwargs['host'], tunnel['host'])
-            self.assertTrue(tunnel_update.called)
-            if delete_tunnel:
-                self.assertTrue(tunnel_delete.called)
-            else:
-                self.assertFalse(tunnel_delete.called)
-
-    def _test_tunnel_sync_raises(self, kwargs):
-        with mock.patch.object(self.notifier,
-                               'tunnel_update') as tunnel_update,\
-                mock.patch.object(self.notifier,
-                                  'tunnel_delete') as tunnel_delete:
-            self.assertRaises(exc.InvalidInput,
-                              self.callbacks.tunnel_sync,
-                              'fake_context', **kwargs)
-            self.assertFalse(tunnel_update.called)
-            self.assertFalse(tunnel_delete.called)
-
-    def test_tunnel_sync_called_without_host_passed(self):
-        kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
-                  'host': None}
-        self._test_tunnel_sync(kwargs)
-
-    def test_tunnel_sync_called_with_host_passed_for_existing_tunnel_ip(self):
-        self.driver.add_endpoint(TUNNEL_IP_ONE, None)
-
-        kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
-                  'host': HOST_ONE}
-        self._test_tunnel_sync(kwargs)
-
-    def test_tunnel_sync_called_with_host_passed(self):
-        kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
-                  'host': HOST_ONE}
-        self._test_tunnel_sync(kwargs)
-
-    def test_tunnel_sync_called_for_existing_endpoint(self):
-        self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE)
-
-        kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
-                  'host': HOST_ONE}
-        self._test_tunnel_sync(kwargs)
-
-    def test_tunnel_sync_called_for_existing_host_with_tunnel_ip_changed(self):
-        self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE)
-
-        kwargs = {'tunnel_ip': TUNNEL_IP_TWO, 'tunnel_type': self.TYPE,
-                  'host': HOST_ONE}
-        self._test_tunnel_sync(kwargs, True)
-
-    def test_tunnel_sync_called_with_used_tunnel_ip_host_roaming(self):
-        self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE)
-
-        kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
-                  'host': HOST_TWO}
-        self._test_tunnel_sync(kwargs, False)
-
-    def test_tunnel_sync_called_with_used_tunnel_ip_roaming_case_two(self):
-        self.driver.add_endpoint(TUNNEL_IP_ONE, None)
-        self.driver.add_endpoint(TUNNEL_IP_TWO, HOST_TWO)
-
-        kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
-                  'host': HOST_TWO}
-        self._test_tunnel_sync(kwargs, False)
-
-    def test_tunnel_sync_called_without_tunnel_ip(self):
-        kwargs = {'tunnel_type': self.TYPE, 'host': None}
-        self._test_tunnel_sync_raises(kwargs)
-
-    def test_tunnel_sync_called_without_tunnel_type(self):
-        kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'host': None}
-        self._test_tunnel_sync_raises(kwargs)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/ext_test.py b/neutron/tests/unit/plugins/ml2/drivers/ext_test.py
deleted file mode 100644 (file)
index 90c0f72..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-# Copyright 2015 Intel Corporation.
-# Copyright 2015 Isaku Yamahata <isaku.yamahata at intel com>
-#                               <isaku.yamahata at gmail com>
-# All Rights Reserved.
-#
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-
-import oslo_db.sqlalchemy.session
-
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.plugins.ml2 import driver_api
-from neutron.tests.unit.plugins.ml2 import extensions as test_extensions
-
-
-class TestExtensionDriverBase(driver_api.ExtensionDriver):
-    _supported_extension_aliases = 'fake_extension'
-
-    def initialize(self):
-        extensions.append_api_extensions_path(test_extensions.__path__)
-
-    @property
-    def extension_alias(self):
-        return self._supported_extension_aliases
-
-
-class TestExtensionDriver(TestExtensionDriverBase):
-    def initialize(self):
-        super(TestExtensionDriver, self).initialize()
-        self.network_extension = 'Test_Network_Extension'
-        self.subnet_extension = 'Test_Subnet_Extension'
-        self.port_extension = 'Test_Port_Extension'
-
-    def _check_create(self, session, data, result):
-        assert(isinstance(session, oslo_db.sqlalchemy.session.Session))
-        assert(isinstance(data, dict))
-        assert('id' not in data)
-        assert(isinstance(result, dict))
-        assert(result['id'] is not None)
-
-    def _check_update(self, session, data, result):
-        assert(isinstance(session, oslo_db.sqlalchemy.session.Session))
-        assert(isinstance(data, dict))
-        assert(isinstance(result, dict))
-        assert(result['id'] is not None)
-
-    def _check_extend(self, session, result, db_entry,
-                      expected_db_entry_class):
-        assert(isinstance(session, oslo_db.sqlalchemy.session.Session))
-        assert(isinstance(result, dict))
-        assert(result['id'] is not None)
-        assert(isinstance(db_entry, expected_db_entry_class))
-        assert(db_entry.id == result['id'])
-
-    def process_create_network(self, plugin_context, data, result):
-        session = plugin_context.session
-        self._check_create(session, data, result)
-        result['network_extension'] = self.network_extension + '_create'
-
-    def process_update_network(self, plugin_context, data, result):
-        session = plugin_context.session
-        self._check_update(session, data, result)
-        self.network_extension = data['network_extension']
-        result['network_extension'] = self.network_extension + '_update'
-
-    def extend_network_dict(self, session, net_db, result):
-        self._check_extend(session, result, net_db, models_v2.Network)
-        result['network_extension'] = self.network_extension + '_extend'
-
-    def process_create_subnet(self, plugin_context, data, result):
-        session = plugin_context.session
-        self._check_create(session, data, result)
-        result['subnet_extension'] = self.subnet_extension + '_create'
-
-    def process_update_subnet(self, plugin_context, data, result):
-        session = plugin_context.session
-        self._check_update(session, data, result)
-        self.subnet_extension = data['subnet_extension']
-        result['subnet_extension'] = self.subnet_extension + '_update'
-
-    def extend_subnet_dict(self, session, subnet_db, result):
-        self._check_extend(session, result, subnet_db, models_v2.Subnet)
-        result['subnet_extension'] = self.subnet_extension + '_extend'
-
-    def process_create_port(self, plugin_context, data, result):
-        session = plugin_context.session
-        self._check_create(session, data, result)
-        result['port_extension'] = self.port_extension + '_create'
-
-    def process_update_port(self, plugin_context, data, result):
-        session = plugin_context.session
-        self._check_update(session, data, result)
-        self.port_extension = data['port_extension']
-        result['port_extension'] = self.port_extension + '_update'
-
-    def extend_port_dict(self, session, port_db, result):
-        self._check_extend(session, result, port_db, models_v2.Port)
-        result['port_extension'] = self.port_extension + '_extend'
-
-
-class TestNetworkExtension(model_base.BASEV2):
-    network_id = sa.Column(sa.String(36),
-                           sa.ForeignKey('networks.id', ondelete="CASCADE"),
-                           primary_key=True)
-    value = sa.Column(sa.String(64))
-    network = orm.relationship(
-        models_v2.Network,
-        backref=orm.backref('extension', cascade='delete', uselist=False))
-
-
-class TestSubnetExtension(model_base.BASEV2):
-    subnet_id = sa.Column(sa.String(36),
-                          sa.ForeignKey('subnets.id', ondelete="CASCADE"),
-                          primary_key=True)
-    value = sa.Column(sa.String(64))
-    subnet = orm.relationship(
-        models_v2.Subnet,
-        backref=orm.backref('extension', cascade='delete', uselist=False))
-
-
-class TestPortExtension(model_base.BASEV2):
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey('ports.id', ondelete="CASCADE"),
-                        primary_key=True)
-    value = sa.Column(sa.String(64))
-    port = orm.relationship(
-        models_v2.Port,
-        backref=orm.backref('extension', cascade='delete', uselist=False))
-
-
-class TestDBExtensionDriver(TestExtensionDriverBase):
-    def _get_value(self, data, key):
-        value = data[key]
-        if not attributes.is_attr_set(value):
-            value = ''
-        return value
-
-    def process_create_network(self, plugin_context, data, result):
-        session = plugin_context.session
-        value = self._get_value(data, 'network_extension')
-        record = TestNetworkExtension(network_id=result['id'], value=value)
-        session.add(record)
-        result['network_extension'] = value
-
-    def process_update_network(self, plugin_context, data, result):
-        session = plugin_context.session
-        record = (session.query(TestNetworkExtension).
-                  filter_by(network_id=result['id']).one())
-        value = data.get('network_extension')
-        if value and value != record.value:
-            record.value = value
-        result['network_extension'] = record.value
-
-    def extend_network_dict(self, session, net_db, result):
-        result['network_extension'] = net_db.extension.value
-
-    def process_create_subnet(self, plugin_context, data, result):
-        session = plugin_context.session
-        value = self._get_value(data, 'subnet_extension')
-        record = TestSubnetExtension(subnet_id=result['id'], value=value)
-        session.add(record)
-        result['subnet_extension'] = value
-
-    def process_update_subnet(self, plugin_context, data, result):
-        session = plugin_context.session
-        record = (session.query(TestSubnetExtension).
-                  filter_by(subnet_id=result['id']).one())
-        value = data.get('subnet_extension')
-        if value and value != record.value:
-            record.value = value
-        result['subnet_extension'] = record.value
-
-    def extend_subnet_dict(self, session, subnet_db, result):
-        value = subnet_db.extension.value if subnet_db.extension else ''
-        result['subnet_extension'] = value
-
-    def process_create_port(self, plugin_context, data, result):
-        session = plugin_context.session
-        value = self._get_value(data, 'port_extension')
-        record = TestPortExtension(port_id=result['id'], value=value)
-        session.add(record)
-        result['port_extension'] = value
-
-    def process_update_port(self, plugin_context, data, result):
-        session = plugin_context.session
-        record = (session.query(TestPortExtension).
-                  filter_by(port_id=result['id']).one())
-        value = data.get('port_extension')
-        if value and value != record.value:
-            record.value = value
-        result['port_extension'] = record.value
-
-    def extend_port_dict(self, session, port_db, result):
-        value = port_db.extension.value if port_db.extension else ''
-        result['port_extension'] = value
diff --git a/neutron/tests/unit/plugins/ml2/drivers/l2pop/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/l2pop/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc_base.py b/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc_base.py
deleted file mode 100644 (file)
index 3d128fd..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright (C) 2014 VA Linux Systems Japan K.K.
-# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import mock
-
-from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
-from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
-from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent
-from neutron.tests import base
-
-
-class FakeNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin):
-
-    def fdb_add(self, context, fdb_entries):
-        pass
-
-    def fdb_remove(self, context, fdb_entries):
-        pass
-
-    def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
-        pass
-
-    def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
-        pass
-
-    def setup_tunnel_port(self, br, remote_ip, network_type):
-        pass
-
-    def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
-        pass
-
-    def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
-                                  ip_address):
-        pass
-
-
-class TestL2populationRpcCallBackTunnelMixinBase(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestL2populationRpcCallBackTunnelMixinBase, self).setUp()
-        self.fakeagent = FakeNeutronAgent()
-        self.fakebr = mock.Mock()
-        Port = collections.namedtuple('Port', 'ip, ofport')
-        LVM = collections.namedtuple(
-            'LVM', 'net, vlan, phys, segid, mac, ip, vif, port')
-
-        self.local_ip = '127.0.0.1'
-        self.type_gre = 'gre'
-        self.ports = [Port(ip='10.1.0.1', ofport='ofport1'),
-                      Port(ip='10.1.0.2', ofport='ofport2'),
-                      Port(ip='10.1.0.3', ofport='ofport3')]
-        self.ofports = {
-            self.type_gre: {
-                self.ports[0].ip: self.ports[0].ofport,
-                self.ports[1].ip: self.ports[1].ofport,
-                self.ports[2].ip: self.ports[2].ofport,
-            }
-        }
-
-        self.lvms = [LVM(net='net1', vlan=1, phys='phys1', segid='tun1',
-                         mac='mac1', ip='1.1.1.1', vif='vifid1',
-                         port='port1'),
-                     LVM(net='net2', vlan=2, phys='phys2', segid='tun2',
-                         mac='mac2', ip='2.2.2.2', vif='vifid2',
-                         port='port2'),
-                     LVM(net='net3', vlan=3, phys='phys3', segid='tun3',
-                         mac='mac3', ip='3.3.3.3', vif='vifid3',
-                         port='port3')]
-
-        self.agent_ports = {
-            self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)],
-            self.ports[1].ip: [(self.lvms[1].mac, self.lvms[1].ip)],
-            self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)],
-        }
-
-        self.fdb_entries1 = {
-            self.lvms[0].net: {
-                'network_type': self.type_gre,
-                'segment_id': self.lvms[0].segid,
-                'ports': {
-                    self.local_ip: [],
-                    self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)]},
-            },
-            self.lvms[1].net: {
-                'network_type': self.type_gre,
-                'segment_id': self.lvms[1].segid,
-                'ports': {
-                    self.local_ip: [],
-                    self.ports[1].ip: [(self.lvms[1].mac, self.lvms[1].ip)]},
-            },
-            self.lvms[2].net: {
-                'network_type': self.type_gre,
-                'segment_id': self.lvms[2].segid,
-                'ports': {
-                    self.local_ip: [],
-                    self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)]},
-            },
-        }
-
-        self.lvm1 = ovs_neutron_agent.LocalVLANMapping(
-            self.lvms[0].vlan, self.type_gre, self.lvms[0].phys,
-            self.lvms[0].segid, {self.lvms[0].vif: self.lvms[0].port})
-        self.lvm2 = ovs_neutron_agent.LocalVLANMapping(
-            self.lvms[1].vlan, self.type_gre, self.lvms[1].phys,
-            self.lvms[1].segid, {self.lvms[1].vif: self.lvms[1].port})
-        self.lvm3 = ovs_neutron_agent.LocalVLANMapping(
-            self.lvms[2].vlan, self.type_gre, self.lvms[2].phys,
-            self.lvms[2].segid, {self.lvms[2].vif: self.lvms[2].port})
-
-        self.local_vlan_map1 = {
-            self.lvms[0].net: self.lvm1,
-            self.lvms[1].net: self.lvm2,
-            self.lvms[2].net: self.lvm3,
-        }
-
-        self.upd_fdb_entry1_val = {
-            self.lvms[0].net: {
-                self.ports[0].ip: {
-                    'before': [l2pop_rpc.PortInfo(self.lvms[0].mac,
-                               self.lvms[0].ip)],
-                    'after': [l2pop_rpc.PortInfo(self.lvms[1].mac,
-                              self.lvms[1].ip)],
-                },
-                self.ports[1].ip: {
-                    'before': [l2pop_rpc.PortInfo(self.lvms[0].mac,
-                               self.lvms[0].ip)],
-                    'after': [l2pop_rpc.PortInfo(self.lvms[1].mac,
-                              self.lvms[1].ip)],
-                },
-            },
-            self.lvms[1].net: {
-                self.ports[2].ip: {
-                    'before': [l2pop_rpc.PortInfo(self.lvms[0].mac,
-                               self.lvms[0].ip)],
-                    'after': [l2pop_rpc.PortInfo(self.lvms[2].mac,
-                              self.lvms[2].ip)],
-                },
-            },
-        }
-        self.upd_fdb_entry1 = {'chg_ip': self.upd_fdb_entry1_val}
-
-    def _tunnel_port_lookup(self, network_type, remote_ip):
-        return self.ofports[network_type].get(remote_ip)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population_rpc.py b/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population_rpc.py
deleted file mode 100644 (file)
index d8a39a3..0000000
+++ /dev/null
@@ -1,252 +0,0 @@
-# Copyright (C) 2014 VA Linux Systems Japan K.K.
-# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.common import constants as n_const
-from neutron.tests.unit.plugins.ml2.drivers.l2pop.rpc_manager \
-    import l2population_rpc_base
-
-
-class TestL2populationRpcCallBackTunnelMixin(
-    l2population_rpc_base.TestL2populationRpcCallBackTunnelMixinBase):
-
-    def test_get_agent_ports_no_data(self):
-        self.assertFalse(
-            list(self.fakeagent.get_agent_ports(self.fdb_entries1, {})))
-
-    def test_get_agent_ports_non_existence_key_in_lvm(self):
-        results = {}
-        del self.local_vlan_map1[self.lvms[1].net]
-        for lvm, agent_ports in self.fakeagent.get_agent_ports(
-            self.fdb_entries1, self.local_vlan_map1):
-            results[lvm] = agent_ports
-        expected = {
-            self.lvm1: {
-                self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)],
-                self.local_ip: []},
-            self.lvm3: {
-                self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)],
-                self.local_ip: []},
-        }
-        self.assertEqual(expected, results)
-
-    def test_get_agent_ports_no_agent_ports(self):
-        results = {}
-        self.fdb_entries1[self.lvms[1].net]['ports'] = {}
-        for lvm, agent_ports in self.fakeagent.get_agent_ports(
-            self.fdb_entries1, self.local_vlan_map1):
-            results[lvm] = agent_ports
-        expected = {
-            self.lvm1: {
-                self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)],
-                self.local_ip: []},
-            self.lvm2: {},
-            self.lvm3: {
-                self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)],
-                self.local_ip: []},
-        }
-        self.assertEqual(expected, results)
-
-    def test_fdb_add_tun(self):
-        with mock.patch.object(self.fakeagent, 'setup_tunnel_port'),\
-                mock.patch.object(self.fakeagent, 'add_fdb_flow'
-                                  ) as mock_add_fdb_flow:
-            self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm1,
-                                       self.agent_ports,
-                                       self._tunnel_port_lookup)
-        expected = [
-            mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip),
-                      self.ports[0].ip, self.lvm1, self.ports[0].ofport),
-            mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip),
-                      self.ports[1].ip, self.lvm1, self.ports[1].ofport),
-            mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip),
-                      self.ports[2].ip, self.lvm1, self.ports[2].ofport),
-        ]
-        self.assertEqual(sorted(expected),
-                         sorted(mock_add_fdb_flow.call_args_list))
-
-    def test_fdb_add_tun_non_existence_key_in_ofports(self):
-        ofport = self.lvm1.network_type + '0a0a0a0a'
-        del self.ofports[self.type_gre][self.ports[1].ip]
-        with mock.patch.object(self.fakeagent, 'setup_tunnel_port',
-                               return_value=ofport
-                               ) as mock_setup_tunnel_port,\
-                mock.patch.object(self.fakeagent, 'add_fdb_flow'
-                                  ) as mock_add_fdb_flow:
-            self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm1,
-                                       self.agent_ports,
-                                       self._tunnel_port_lookup)
-        mock_setup_tunnel_port.assert_called_once_with(
-            self.fakebr, self.ports[1].ip, self.lvm1.network_type)
-        expected = [
-            mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip),
-                      self.ports[0].ip, self.lvm1, self.ports[0].ofport),
-            mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip),
-                      self.ports[1].ip, self.lvm1, ofport),
-            mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip),
-                      self.ports[2].ip, self.lvm1, self.ports[2].ofport),
-        ]
-        self.assertEqual(sorted(expected),
-                         sorted(mock_add_fdb_flow.call_args_list))
-
-    def test_fdb_add_tun_unavailable_ofport(self):
-        del self.ofports[self.type_gre][self.ports[1].ip]
-        with mock.patch.object(self.fakeagent, 'setup_tunnel_port',
-                               return_value=0
-                               ) as mock_setup_tunnel_port,\
-                mock.patch.object(self.fakeagent, 'add_fdb_flow'
-                                  ) as mock_add_fdb_flow:
-            self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm1,
-                                       self.agent_ports,
-                                       self._tunnel_port_lookup)
-        mock_setup_tunnel_port.assert_called_once_with(
-            self.fakebr, self.ports[1].ip, self.lvm1.network_type)
-        expected = [
-            mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip),
-                      self.ports[0].ip, self.lvm1, self.ports[0].ofport),
-            mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip),
-                      self.ports[2].ip, self.lvm1, self.ports[2].ofport),
-        ]
-        self.assertEqual(sorted(expected),
-                         sorted(mock_add_fdb_flow.call_args_list))
-
-    def test_fdb_remove_tun(self):
-        with mock.patch.object(
-            self.fakeagent, 'del_fdb_flow') as mock_del_fdb_flow:
-            self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm1,
-                                          self.agent_ports,
-                                          self._tunnel_port_lookup)
-        expected = [
-            mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip),
-                      self.ports[0].ip, self.lvm1, self.ports[0].ofport),
-            mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip),
-                      self.ports[1].ip, self.lvm1, self.ports[1].ofport),
-            mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip),
-                      self.ports[2].ip, self.lvm1, self.ports[2].ofport),
-        ]
-        self.assertEqual(sorted(expected),
-                         sorted(mock_del_fdb_flow.call_args_list))
-
-    def test_fdb_remove_tun_flooding_entry(self):
-        self.agent_ports[self.ports[1].ip] = [n_const.FLOODING_ENTRY]
-        with mock.patch.object(self.fakeagent, 'del_fdb_flow'
-                               ) as mock_del_fdb_flow,\
-                mock.patch.object(self.fakeagent, 'cleanup_tunnel_port'
-                                  ) as mock_cleanup_tunnel_port:
-            self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm1,
-                                          self.agent_ports,
-                                          self._tunnel_port_lookup)
-        expected = [
-            mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip),
-                      self.ports[0].ip, self.lvm1, self.ports[0].ofport),
-            mock.call(self.fakebr,
-                      (n_const.FLOODING_ENTRY[0], n_const.FLOODING_ENTRY[1]),
-                      self.ports[1].ip, self.lvm1, self.ports[1].ofport),
-            mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip),
-                      self.ports[2].ip, self.lvm1, self.ports[2].ofport),
-        ]
-        self.assertEqual(sorted(expected),
-                         sorted(mock_del_fdb_flow.call_args_list))
-        mock_cleanup_tunnel_port.assert_called_once_with(
-            self.fakebr, self.ports[1].ofport, self.lvm1.network_type)
-
-    def test_fdb_remove_tun_non_existence_key_in_ofports(self):
-        del self.ofports[self.type_gre][self.ports[1].ip]
-        with mock.patch.object(
-            self.fakeagent, 'del_fdb_flow') as mock_del_fdb_flow:
-            self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm1,
-                                          self.agent_ports,
-                                          self._tunnel_port_lookup)
-        expected = [
-            mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip),
-                      self.ports[0].ip, self.lvm1, self.ports[0].ofport),
-            mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip),
-                      self.ports[2].ip, self.lvm1, self.ports[2].ofport),
-        ]
-        self.assertEqual(sorted(expected),
-                         sorted(mock_del_fdb_flow.call_args_list))
-
-    def test_fdb_update(self):
-        fake__fdb_chg_ip = mock.Mock()
-        self.fakeagent._fdb_chg_ip = fake__fdb_chg_ip
-        self.fakeagent.fdb_update('context', self.upd_fdb_entry1)
-        fake__fdb_chg_ip.assert_called_once_with(
-            'context', self.upd_fdb_entry1_val)
-
-    def test_fdb_update_non_existence_method(self):
-        self.assertRaises(NotImplementedError,
-                          self.fakeagent.fdb_update,
-                          'context', self.upd_fdb_entry1)
-
-    def test__fdb_chg_ip(self):
-        m_setup_entry_for_arp_reply = mock.Mock()
-        self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply
-        self.fakeagent.fdb_chg_ip_tun('context', self.fakebr,
-                                      self.upd_fdb_entry1_val, self.local_ip,
-                                      self.local_vlan_map1)
-        expected = [
-            mock.call(self.fakebr, 'remove', self.lvm1.vlan, self.lvms[0].mac,
-                      self.lvms[0].ip),
-            mock.call(self.fakebr, 'add', self.lvm1.vlan, self.lvms[1].mac,
-                      self.lvms[1].ip),
-            mock.call(self.fakebr, 'remove', self.lvm1.vlan, self.lvms[0].mac,
-                      self.lvms[0].ip),
-            mock.call(self.fakebr, 'add', self.lvm1.vlan, self.lvms[1].mac,
-                      self.lvms[1].ip),
-            mock.call(self.fakebr, 'remove', self.lvm2.vlan, self.lvms[0].mac,
-                      self.lvms[0].ip),
-            mock.call(self.fakebr, 'add', self.lvm2.vlan, self.lvms[2].mac,
-                      self.lvms[2].ip),
-        ]
-        m_setup_entry_for_arp_reply.assert_has_calls(expected, any_order=True)
-
-    def test__fdb_chg_ip_no_lvm(self):
-        m_setup_entry_for_arp_reply = mock.Mock()
-        self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply
-        self.fakeagent.fdb_chg_ip_tun(
-            'context', self.fakebr, self.upd_fdb_entry1, self.local_ip, {})
-        self.assertFalse(m_setup_entry_for_arp_reply.call_count)
-
-    def test__fdb_chg_ip_ip_is_local_ip(self):
-        upd_fdb_entry_val = {
-            self.lvms[0].net: {
-                self.local_ip: {
-                    'before': [(self.lvms[0].mac, self.lvms[0].ip)],
-                    'after': [(self.lvms[1].mac, self.lvms[1].ip)],
-                },
-            },
-        }
-        m_setup_entry_for_arp_reply = mock.Mock()
-        self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply
-        self.fakeagent.fdb_chg_ip_tun('context', self.fakebr,
-                                      upd_fdb_entry_val, self.local_ip,
-                                      self.local_vlan_map1)
-        self.assertFalse(m_setup_entry_for_arp_reply.call_count)
-
-    def test_fdb_chg_ip_tun_empty_before_after(self):
-        upd_fdb_entry_val = {
-            self.lvms[0].net: {
-                self.local_ip: {},
-            },
-        }
-        m_setup_entry_for_arp_reply = mock.Mock()
-        self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply
-        # passing non-local ip
-        self.fakeagent.fdb_chg_ip_tun('context', self.fakebr,
-                                      upd_fdb_entry_val, "8.8.8.8",
-                                      self.local_vlan_map1)
-        self.assertFalse(m_setup_entry_for_arp_reply.call_count)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_db.py b/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_db.py
deleted file mode 100644 (file)
index 212e4af..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import constants
-from neutron import context
-from neutron.db import models_v2
-from neutron.extensions import portbindings
-from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
-from neutron.plugins.ml2 import models
-from neutron.tests.common import helpers
-from neutron.tests.unit import testlib_api
-
-
-class TestL2PopulationDBTestCase(testlib_api.SqlTestCase):
-    def setUp(self):
-        super(TestL2PopulationDBTestCase, self).setUp()
-        self.ctx = context.get_admin_context()
-
-    def test_get_agent_by_host(self):
-        # Register a L2 agent + A bunch of other agents on the same host
-        helpers.register_l3_agent()
-        helpers.register_dhcp_agent()
-        helpers.register_ovs_agent()
-        agent = l2pop_db.get_agent_by_host(
-            self.ctx.session, helpers.HOST)
-        self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type)
-
-    def test_get_agent_by_host_no_candidate(self):
-        # Register a bunch of non-L2 agents on the same host
-        helpers.register_l3_agent()
-        helpers.register_dhcp_agent()
-        agent = l2pop_db.get_agent_by_host(
-            self.ctx.session, helpers.HOST)
-        self.assertIsNone(agent)
-
-    def _setup_port_binding(self, network_id='network_id', dvr=True):
-        with self.ctx.session.begin(subtransactions=True):
-            self.ctx.session.add(models_v2.Network(id=network_id))
-            device_owner = constants.DEVICE_OWNER_DVR_INTERFACE if dvr else ''
-            self.ctx.session.add(models_v2.Port(
-                id='port_id',
-                network_id=network_id,
-                mac_address='00:11:22:33:44:55',
-                admin_state_up=True,
-                status=constants.PORT_STATUS_ACTIVE,
-                device_id='',
-                device_owner=device_owner))
-            port_binding_cls = (models.DVRPortBinding if dvr
-                                else models.PortBinding)
-            binding_kwarg = {
-                'port_id': 'port_id',
-                'host': helpers.HOST,
-                'vif_type': portbindings.VIF_TYPE_UNBOUND,
-                'vnic_type': portbindings.VNIC_NORMAL
-            }
-            if dvr:
-                binding_kwarg['router_id'] = 'router_id'
-                binding_kwarg['status'] = constants.PORT_STATUS_DOWN
-
-            self.ctx.session.add(port_binding_cls(**binding_kwarg))
-
-    def test_get_dvr_active_network_ports(self):
-        self._setup_port_binding()
-        # Register a L2 agent + A bunch of other agents on the same host
-        helpers.register_l3_agent()
-        helpers.register_dhcp_agent()
-        helpers.register_ovs_agent()
-        tunnel_network_ports = l2pop_db.get_dvr_active_network_ports(
-            self.ctx.session, 'network_id')
-        self.assertEqual(1, len(tunnel_network_ports))
-        _, agent = tunnel_network_ports[0]
-        self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type)
-
-    def test_get_dvr_active_network_ports_no_candidate(self):
-        self._setup_port_binding()
-        # Register a bunch of non-L2 agents on the same host
-        helpers.register_l3_agent()
-        helpers.register_dhcp_agent()
-        tunnel_network_ports = l2pop_db.get_dvr_active_network_ports(
-            self.ctx.session, 'network_id')
-        self.assertEqual(0, len(tunnel_network_ports))
-
-    def test_get_nondvr_active_network_ports(self):
-        self._setup_port_binding(dvr=False)
-        # Register a L2 agent + A bunch of other agents on the same host
-        helpers.register_l3_agent()
-        helpers.register_dhcp_agent()
-        helpers.register_ovs_agent()
-        fdb_network_ports = l2pop_db.get_nondvr_active_network_ports(
-            self.ctx.session, 'network_id')
-        self.assertEqual(1, len(fdb_network_ports))
-        _, agent = fdb_network_ports[0]
-        self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type)
-
-    def test_get_nondvr_active_network_ports_no_candidate(self):
-        self._setup_port_binding(dvr=False)
-        # Register a bunch of non-L2 agents on the same host
-        helpers.register_l3_agent()
-        helpers.register_dhcp_agent()
-        fdb_network_ports = l2pop_db.get_nondvr_active_network_ports(
-            self.ctx.session, 'network_id')
-        self.assertEqual(0, len(fdb_network_ports))
diff --git a/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py
deleted file mode 100644 (file)
index b9ae973..0000000
+++ /dev/null
@@ -1,936 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import testtools
-
-import mock
-
-from neutron.common import constants
-from neutron.common import topics
-from neutron import context
-from neutron.extensions import portbindings
-from neutron.extensions import providernet as pnet
-from neutron import manager
-from neutron.plugins.ml2.common import exceptions as ml2_exc
-from neutron.plugins.ml2 import driver_context
-from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
-from neutron.plugins.ml2.drivers.l2pop import mech_driver as l2pop_mech_driver
-from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
-from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
-from neutron.plugins.ml2 import managers
-from neutron.plugins.ml2 import rpc
-from neutron.tests import base
-from neutron.tests.common import helpers
-from neutron.tests.unit.plugins.ml2 import test_plugin
-
-HOST = 'my_l2_host'
-HOST_2 = HOST + '_2'
-HOST_3 = HOST + '_3'
-HOST_4 = HOST + '_4'
-HOST_5 = HOST + '_5'
-
-
-NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
-DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
-
-
-class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
-    _mechanism_drivers = ['openvswitch', 'fake_agent', 'l2population']
-
-    def setUp(self):
-        super(TestL2PopulationRpcTestCase, self).setUp()
-
-        self.adminContext = context.get_admin_context()
-
-        self.type_manager = managers.TypeManager()
-        self.notifier = rpc.AgentNotifierApi(topics.AGENT)
-        self.callbacks = rpc.RpcCallbacks(self.notifier, self.type_manager)
-
-        net_arg = {pnet.NETWORK_TYPE: 'vxlan',
-                   pnet.SEGMENTATION_ID: '1'}
-        self._network = self._make_network(self.fmt, 'net1', True,
-                                           arg_list=(pnet.NETWORK_TYPE,
-                                                     pnet.SEGMENTATION_ID,),
-                                           **net_arg)
-
-        net_arg = {pnet.NETWORK_TYPE: 'vlan',
-                   pnet.PHYSICAL_NETWORK: 'physnet1',
-                   pnet.SEGMENTATION_ID: '2'}
-        self._network2 = self._make_network(self.fmt, 'net2', True,
-                                            arg_list=(pnet.NETWORK_TYPE,
-                                                      pnet.PHYSICAL_NETWORK,
-                                                      pnet.SEGMENTATION_ID,),
-                                            **net_arg)
-
-        net_arg = {pnet.NETWORK_TYPE: 'flat',
-                   pnet.PHYSICAL_NETWORK: 'noagent'}
-        self._network3 = self._make_network(self.fmt, 'net3', True,
-                                            arg_list=(pnet.NETWORK_TYPE,
-                                                      pnet.PHYSICAL_NETWORK,),
-                                            **net_arg)
-
-        notifier_patch = mock.patch(NOTIFIER)
-        notifier_patch.start()
-
-        self.fanout_topic = topics.get_topic_name(topics.AGENT,
-                                                  topics.L2POPULATION,
-                                                  topics.UPDATE)
-        fanout = ('neutron.plugins.ml2.drivers.l2pop.rpc.'
-                  'L2populationAgentNotifyAPI._notification_fanout')
-        fanout_patch = mock.patch(fanout)
-        self.mock_fanout = fanout_patch.start()
-
-        cast = ('neutron.plugins.ml2.drivers.l2pop.rpc.'
-                'L2populationAgentNotifyAPI._notification_host')
-        cast_patch = mock.patch(cast)
-        self.mock_cast = cast_patch.start()
-
-        uptime = ('neutron.plugins.ml2.drivers.l2pop.db.get_agent_uptime')
-        uptime_patch = mock.patch(uptime, return_value=190)
-        uptime_patch.start()
-
-    def _register_ml2_agents(self):
-        helpers.register_ovs_agent(host=HOST, tunneling_ip='20.0.0.1')
-        helpers.register_ovs_agent(host=HOST_2, tunneling_ip='20.0.0.2')
-        helpers.register_ovs_agent(host=HOST_3, tunneling_ip='20.0.0.3',
-                                   tunnel_types=[])
-        helpers.register_ovs_agent(host=HOST_4, tunneling_ip='20.0.0.4')
-        helpers.register_ovs_agent(host=HOST_5, tunneling_ip='20.0.0.5',
-                                   binary='neutron-fake-agent',
-                                   tunnel_types=[],
-                                   interface_mappings={'physnet1': 'eth9'},
-                                   agent_type=constants.AGENT_TYPE_OFA,
-                                   l2pop_network_types=['vlan'])
-
-    def test_port_info_compare(self):
-        # An assumption the code makes is that PortInfo compares equal to
-        # equivalent regular tuples.
-        self.assertEqual(("mac", "ip"), l2pop_rpc.PortInfo("mac", "ip"))
-
-        flooding_entry = l2pop_rpc.PortInfo(*constants.FLOODING_ENTRY)
-        self.assertEqual(constants.FLOODING_ENTRY, flooding_entry)
-
-    def test__unmarshall_fdb_entries(self):
-        entries = {'foouuid': {
-            'segment_id': 1001,
-            'ports': {'192.168.0.10': [['00:00:00:00:00:00', '0.0.0.0'],
-                                       ['fa:16:3e:ff:8c:0f', '10.0.0.6']]},
-            'network_type': 'vxlan'}}
-
-        mixin = l2population_rpc.L2populationRpcCallBackMixin
-        entries = mixin._unmarshall_fdb_entries(entries)
-
-        port_info_list = entries['foouuid']['ports']['192.168.0.10']
-        # Check that the lists have been properly converted to PortInfo
-        self.assertIsInstance(port_info_list[0], l2pop_rpc.PortInfo)
-        self.assertIsInstance(port_info_list[1], l2pop_rpc.PortInfo)
-        self.assertEqual(('00:00:00:00:00:00', '0.0.0.0'), port_info_list[0])
-        self.assertEqual(('fa:16:3e:ff:8c:0f', '10.0.0.6'), port_info_list[1])
-
-    def test__marshall_fdb_entries(self):
-        entries = {'foouuid': {
-            'segment_id': 1001,
-            'ports': {'192.168.0.10': [('00:00:00:00:00:00', '0.0.0.0'),
-                                       ('fa:16:3e:ff:8c:0f', '10.0.0.6')]},
-            'network_type': 'vxlan'}}
-
-        entries = l2pop_rpc.L2populationAgentNotifyAPI._marshall_fdb_entries(
-            entries)
-
-        port_info_list = entries['foouuid']['ports']['192.168.0.10']
-        # Check that the PortInfo tuples have been converted to list
-        self.assertIsInstance(port_info_list[0], list)
-        self.assertIsInstance(port_info_list[1], list)
-        self.assertEqual(['00:00:00:00:00:00', '0.0.0.0'], port_info_list[0])
-        self.assertEqual(['fa:16:3e:ff:8c:0f', '10.0.0.6'], port_info_list[1])
-
-    def test_fdb_add_called(self):
-        self._register_ml2_agents()
-
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST}
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg) as port1:
-                with self.port(subnet=subnet,
-                               arg_list=(portbindings.HOST_ID,),
-                               **host_arg):
-                    p1 = port1['port']
-
-                    device = 'tap' + p1['id']
-
-                    self.mock_fanout.reset_mock()
-                    self.callbacks.update_device_up(self.adminContext,
-                                                    agent_id=HOST,
-                                                    device=device)
-
-                    p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
-                    expected = {p1['network_id']:
-                                {'ports':
-                                 {'20.0.0.1': [constants.FLOODING_ENTRY,
-                                               l2pop_rpc.PortInfo(
-                                                   p1['mac_address'],
-                                                   p1_ips[0])]},
-                                 'network_type': 'vxlan',
-                                 'segment_id': 1}}
-
-                    self.mock_fanout.assert_called_with(
-                        mock.ANY, 'add_fdb_entries', expected)
-
-    def test_fdb_add_not_called_type_local(self):
-        self._register_ml2_agents()
-
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST + '_3'}
-            with self.port(subnet=subnet,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg) as port1:
-                with self.port(subnet=subnet,
-                               arg_list=(portbindings.HOST_ID,),
-                               **host_arg):
-                    p1 = port1['port']
-
-                    device = 'tap' + p1['id']
-
-                    self.mock_fanout.reset_mock()
-                    self.callbacks.update_device_up(self.adminContext,
-                                                    agent_id=HOST,
-                                                    device=device)
-
-                    self.assertFalse(self.mock_fanout.called)
-
-    def test_fdb_add_called_for_l2pop_network_types(self):
-        self._register_ml2_agents()
-
-        host = HOST + '_5'
-        with self.subnet(network=self._network2) as subnet:
-            host_arg = {portbindings.HOST_ID: host}
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg) as port1:
-                with self.port(subnet=subnet,
-                               arg_list=(portbindings.HOST_ID,),
-                               **host_arg):
-                    p1 = port1['port']
-
-                    device = 'tap' + p1['id']
-
-                    self.mock_fanout.reset_mock()
-                    self.callbacks.update_device_up(self.adminContext,
-                                                    agent_id=host,
-                                                    device=device)
-
-                    p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
-                    expected = {p1['network_id']:
-                                {'ports':
-                                 {'20.0.0.5': [constants.FLOODING_ENTRY,
-                                               l2pop_rpc.PortInfo(
-                                                   p1['mac_address'],
-                                                   p1_ips[0])]},
-                                 'network_type': 'vlan',
-                                 'segment_id': 2}}
-
-                    self.mock_fanout.assert_called_with(
-                        mock.ANY, 'add_fdb_entries', expected)
-
-    def test_fdb_called_for_active_ports(self):
-        self._register_ml2_agents()
-
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST}
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg) as port1:
-                host_arg = {portbindings.HOST_ID: HOST + '_2'}
-                with self.port(subnet=subnet,
-                               device_owner=DEVICE_OWNER_COMPUTE,
-                               arg_list=(portbindings.HOST_ID,),
-                               **host_arg):
-                    p1 = port1['port']
-
-                    device1 = 'tap' + p1['id']
-
-                    self.mock_cast.reset_mock()
-                    self.mock_fanout.reset_mock()
-                    self.callbacks.update_device_up(self.adminContext,
-                                                    agent_id=HOST,
-                                                    device=device1)
-
-                    p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
-
-                    self.assertFalse(self.mock_cast.called)
-
-                    expected2 = {p1['network_id']:
-                                 {'ports':
-                                  {'20.0.0.1': [constants.FLOODING_ENTRY,
-                                                l2pop_rpc.PortInfo(
-                                                    p1['mac_address'],
-                                                    p1_ips[0])]},
-                                  'network_type': 'vxlan',
-                                  'segment_id': 1}}
-
-                    self.mock_fanout.assert_called_with(
-                        mock.ANY, 'add_fdb_entries', expected2)
-
-    def test_fdb_add_two_agents(self):
-        self._register_ml2_agents()
-
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST,
-                        'admin_state_up': True}
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID, 'admin_state_up',),
-                           **host_arg) as port1:
-                host_arg = {portbindings.HOST_ID: HOST + '_2',
-                            'admin_state_up': True}
-                with self.port(subnet=subnet,
-                               device_owner=DEVICE_OWNER_COMPUTE,
-                               arg_list=(portbindings.HOST_ID,
-                                         'admin_state_up',),
-                               **host_arg) as port2:
-                    p1 = port1['port']
-                    p2 = port2['port']
-
-                    device1 = 'tap' + p1['id']
-                    device2 = 'tap' + p2['id']
-
-                    self.mock_cast.reset_mock()
-                    self.mock_fanout.reset_mock()
-                    self.callbacks.update_device_up(self.adminContext,
-                                                    agent_id=HOST + '_2',
-                                                    device=device2)
-                    self.callbacks.update_device_up(self.adminContext,
-                                                    agent_id=HOST,
-                                                    device=device1)
-
-                    p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
-                    p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
-
-                    expected1 = {p1['network_id']:
-                                 {'ports':
-                                  {'20.0.0.2': [constants.FLOODING_ENTRY,
-                                                l2pop_rpc.PortInfo(
-                                                    p2['mac_address'],
-                                                    p2_ips[0])]},
-                                  'network_type': 'vxlan',
-                                  'segment_id': 1}}
-
-                    self.mock_cast.assert_called_with(mock.ANY,
-                                                      'add_fdb_entries',
-                                                      expected1, HOST)
-
-                    expected2 = {p1['network_id']:
-                                 {'ports':
-                                  {'20.0.0.1': [constants.FLOODING_ENTRY,
-                                                l2pop_rpc.PortInfo(
-                                                    p1['mac_address'],
-                                                    p1_ips[0])]},
-                                  'network_type': 'vxlan',
-                                  'segment_id': 1}}
-
-                    self.mock_fanout.assert_called_with(
-                        mock.ANY, 'add_fdb_entries', expected2)
-
-    def test_fdb_add_called_two_networks(self):
-        self._register_ml2_agents()
-
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST + '_2'}
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg) as port1:
-                with self.subnet(cidr='10.1.0.0/24') as subnet2:
-                    with self.port(subnet=subnet2,
-                                   device_owner=DEVICE_OWNER_COMPUTE,
-                                   arg_list=(portbindings.HOST_ID,),
-                                   **host_arg):
-                        host_arg = {portbindings.HOST_ID: HOST}
-                        with self.port(subnet=subnet,
-                                       device_owner=DEVICE_OWNER_COMPUTE,
-                                       arg_list=(portbindings.HOST_ID,),
-                                       **host_arg) as port3:
-                            p1 = port1['port']
-                            p3 = port3['port']
-
-                            device1 = 'tap' + p1['id']
-                            device3 = 'tap' + p3['id']
-
-                            self.mock_cast.reset_mock()
-                            self.mock_fanout.reset_mock()
-                            self.callbacks.update_device_up(
-                                self.adminContext, agent_id=HOST + '_2',
-                                device=device1)
-                            self.callbacks.update_device_up(
-                                self.adminContext, agent_id=HOST,
-                                device=device3)
-
-                            p1_ips = [p['ip_address']
-                                      for p in p1['fixed_ips']]
-                            expected1 = {p1['network_id']:
-                                         {'ports':
-                                          {'20.0.0.2':
-                                           [constants.FLOODING_ENTRY,
-                                            l2pop_rpc.PortInfo(
-                                                p1['mac_address'],
-                                                p1_ips[0])]},
-                                         'network_type': 'vxlan',
-                                         'segment_id': 1}}
-
-                            self.mock_cast.assert_called_with(
-                                    mock.ANY, 'add_fdb_entries', expected1,
-                                    HOST)
-
-                            p3_ips = [p['ip_address']
-                                      for p in p3['fixed_ips']]
-                            expected2 = {p1['network_id']:
-                                         {'ports':
-                                          {'20.0.0.1':
-                                           [constants.FLOODING_ENTRY,
-                                            l2pop_rpc.PortInfo(
-                                                p3['mac_address'],
-                                                p3_ips[0])]},
-                                         'network_type': 'vxlan',
-                                         'segment_id': 1}}
-
-                            self.mock_fanout.assert_called_with(
-                                mock.ANY, 'add_fdb_entries', expected2)
-
-    def test_update_port_down(self):
-        self._register_ml2_agents()
-
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST}
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg) as port1:
-                with self.port(subnet=subnet,
-                               device_owner=DEVICE_OWNER_COMPUTE,
-                               arg_list=(portbindings.HOST_ID,),
-                               **host_arg) as port2:
-                    p2 = port2['port']
-                    device2 = 'tap' + p2['id']
-
-                    self.mock_fanout.reset_mock()
-                    self.callbacks.update_device_up(self.adminContext,
-                                                    agent_id=HOST,
-                                                    device=device2)
-
-                    p1 = port1['port']
-                    device1 = 'tap' + p1['id']
-
-                    self.callbacks.update_device_up(self.adminContext,
-                                                    agent_id=HOST,
-                                                    device=device1)
-                    self.mock_fanout.reset_mock()
-                    self.callbacks.update_device_down(self.adminContext,
-                                                      agent_id=HOST,
-                                                      device=device2)
-
-                    p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
-                    expected = {p2['network_id']:
-                                {'ports':
-                                 {'20.0.0.1': [l2pop_rpc.PortInfo(
-                                               p2['mac_address'],
-                                               p2_ips[0])]},
-                                 'network_type': 'vxlan',
-                                 'segment_id': 1}}
-
-                    self.mock_fanout.assert_called_with(
-                        mock.ANY, 'remove_fdb_entries', expected)
-
-    def test_update_port_down_last_port_up(self):
-        self._register_ml2_agents()
-
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST}
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg):
-                with self.port(subnet=subnet,
-                               device_owner=DEVICE_OWNER_COMPUTE,
-                               arg_list=(portbindings.HOST_ID,),
-                               **host_arg) as port2:
-                    p2 = port2['port']
-                    device2 = 'tap' + p2['id']
-
-                    self.mock_fanout.reset_mock()
-                    self.callbacks.update_device_up(self.adminContext,
-                                                    agent_id=HOST,
-                                                    device=device2)
-
-                    self.callbacks.update_device_down(self.adminContext,
-                                                      agent_id=HOST,
-                                                      device=device2)
-
-                    p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
-                    expected = {p2['network_id']:
-                                {'ports':
-                                 {'20.0.0.1': [constants.FLOODING_ENTRY,
-                                               l2pop_rpc.PortInfo(
-                                                    p2['mac_address'],
-                                                    p2_ips[0])]},
-                                 'network_type': 'vxlan',
-                                 'segment_id': 1}}
-
-                    self.mock_fanout.assert_called_with(
-                        mock.ANY, 'remove_fdb_entries', expected)
-
-    def test_delete_port(self):
-        self._register_ml2_agents()
-
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST}
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg) as port:
-                p1 = port['port']
-                device = 'tap' + p1['id']
-
-                self.mock_fanout.reset_mock()
-                self.callbacks.update_device_up(self.adminContext,
-                                                agent_id=HOST,
-                                                device=device)
-
-                with self.port(subnet=subnet,
-                               device_owner=DEVICE_OWNER_COMPUTE,
-                               arg_list=(portbindings.HOST_ID,),
-                               **host_arg) as port2:
-                    p2 = port2['port']
-                    device1 = 'tap' + p2['id']
-
-                    self.mock_fanout.reset_mock()
-                    self.callbacks.update_device_up(self.adminContext,
-                                                    agent_id=HOST,
-                                                    device=device1)
-                self._delete('ports', port2['port']['id'])
-                p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
-                expected = {p2['network_id']:
-                            {'ports':
-                             {'20.0.0.1': [l2pop_rpc.PortInfo(
-                                           p2['mac_address'],
-                                           p2_ips[0])]},
-                             'network_type': 'vxlan',
-                             'segment_id': 1}}
-
-                self.mock_fanout.assert_any_call(
-                    mock.ANY, 'remove_fdb_entries', expected)
-
-    def test_delete_port_last_port_up(self):
-        self._register_ml2_agents()
-
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST}
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg):
-                with self.port(subnet=subnet,
-                               device_owner=DEVICE_OWNER_COMPUTE,
-                               arg_list=(portbindings.HOST_ID,),
-                               **host_arg) as port:
-                    p1 = port['port']
-
-                    device = 'tap' + p1['id']
-
-                    self.callbacks.update_device_up(self.adminContext,
-                                                    agent_id=HOST,
-                                                    device=device)
-                self._delete('ports', port['port']['id'])
-                p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
-                expected = {p1['network_id']:
-                            {'ports':
-                             {'20.0.0.1': [constants.FLOODING_ENTRY,
-                                           l2pop_rpc.PortInfo(
-                                               p1['mac_address'],
-                                               p1_ips[0])]},
-                             'network_type': 'vxlan',
-                             'segment_id': 1}}
-
-                self.mock_fanout.assert_any_call(
-                    mock.ANY, 'remove_fdb_entries', expected)
-
-    def test_mac_addr_changed(self):
-        self._register_ml2_agents()
-
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST + '_5'}
-            with self.port(subnet=subnet,
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg) as port1:
-                p1 = port1['port']
-
-                self.mock_fanout.reset_mock()
-                device = 'tap' + p1['id']
-
-                old_mac = p1['mac_address']
-                mac = old_mac.split(':')
-                mac[5] = '01' if mac[5] != '01' else '00'
-                new_mac = ':'.join(mac)
-                data = {'port': {'mac_address': new_mac,
-                                 portbindings.HOST_ID: HOST}}
-                req = self.new_update_request('ports', data, p1['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                self.assertIn('port', res)
-                self.assertEqual(new_mac, res['port']['mac_address'])
-
-                # port was not bound before, so no fdb call expected yet
-                self.assertFalse(self.mock_fanout.called)
-
-                self.callbacks.update_device_up(self.adminContext,
-                                                agent_id=HOST,
-                                                device=device)
-
-                self.assertEqual(1, self.mock_fanout.call_count)
-                add_expected = {
-                    p1['network_id']: {
-                        'segment_id': 1,
-                        'network_type': 'vxlan',
-                        'ports': {
-                            '20.0.0.1': [
-                                l2pop_rpc.PortInfo('00:00:00:00:00:00',
-                                                   '0.0.0.0'),
-                                l2pop_rpc.PortInfo(new_mac, '10.0.0.2')
-                            ]
-                        }
-                    }
-                }
-                self.mock_fanout.assert_called_with(
-                    mock.ANY, 'add_fdb_entries', add_expected)
-
-    def test_fixed_ips_changed(self):
-        self._register_ml2_agents()
-
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST}
-            with self.port(subnet=subnet, cidr='10.0.0.0/24',
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg) as port1:
-                p1 = port1['port']
-
-                device = 'tap' + p1['id']
-
-                self.callbacks.update_device_up(self.adminContext,
-                                                agent_id=HOST,
-                                                device=device)
-
-                self.mock_fanout.reset_mock()
-
-                data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
-                                               {'ip_address': '10.0.0.10'}]}}
-                req = self.new_update_request('ports', data, p1['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                ips = res['port']['fixed_ips']
-                self.assertEqual(2, len(ips))
-
-                add_expected = {'chg_ip':
-                                {p1['network_id']:
-                                 {'20.0.0.1':
-                                  {'after': [(p1['mac_address'],
-                                              '10.0.0.10')]}}}}
-
-                self.mock_fanout.assert_any_call(
-                    mock.ANY, 'update_fdb_entries', add_expected)
-
-                self.mock_fanout.reset_mock()
-
-                data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
-                                               {'ip_address': '10.0.0.16'}]}}
-                req = self.new_update_request('ports', data, p1['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                ips = res['port']['fixed_ips']
-                self.assertEqual(2, len(ips))
-
-                upd_expected = {'chg_ip':
-                                {p1['network_id']:
-                                 {'20.0.0.1':
-                                  {'before': [(p1['mac_address'],
-                                               '10.0.0.10')],
-                                   'after': [(p1['mac_address'],
-                                              '10.0.0.16')]}}}}
-
-                self.mock_fanout.assert_any_call(
-                    mock.ANY, 'update_fdb_entries', upd_expected)
-
-                self.mock_fanout.reset_mock()
-
-                data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.16'}]}}
-                req = self.new_update_request('ports', data, p1['id'])
-                res = self.deserialize(self.fmt, req.get_response(self.api))
-                ips = res['port']['fixed_ips']
-                self.assertEqual(1, len(ips))
-
-                del_expected = {'chg_ip':
-                                {p1['network_id']:
-                                 {'20.0.0.1':
-                                  {'before': [(p1['mac_address'],
-                                               '10.0.0.2')]}}}}
-
-                self.mock_fanout.assert_any_call(
-                    mock.ANY, 'update_fdb_entries', del_expected)
-
-    def test_no_fdb_updates_without_port_updates(self):
-        self._register_ml2_agents()
-
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST}
-            with self.port(subnet=subnet, cidr='10.0.0.0/24',
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg) as port1:
-                p1 = port1['port']
-
-                device = 'tap' + p1['id']
-
-                self.callbacks.update_device_up(self.adminContext,
-                                                agent_id=HOST,
-                                                device=device)
-                p1['status'] = 'ACTIVE'
-                self.mock_fanout.reset_mock()
-
-                plugin = manager.NeutronManager.get_plugin()
-                plugin.update_port(self.adminContext, p1['id'], port1)
-
-                self.assertFalse(self.mock_fanout.called)
-
-    def test_get_device_details_port_id(self):
-        self._register_ml2_agents()
-        host_arg = {portbindings.HOST_ID: HOST}
-        with self.port(arg_list=(portbindings.HOST_ID,),
-                       **host_arg) as port:
-            port_id = port['port']['id']
-            # ensure various formats all result in correct port_id
-            formats = ['tap' + port_id[0:8], port_id,
-                       port['port']['mac_address']]
-            for device in formats:
-                details = self.callbacks.get_device_details(
-                    self.adminContext, device=device,
-                    agent_id=HOST_2)
-                self.assertEqual(port_id, details['port_id'])
-
-    def _update_and_check_portbinding(self, port_id, host_id):
-        data = {'port': {portbindings.HOST_ID: host_id}}
-        req = self.new_update_request('ports', data, port_id)
-        res = self.deserialize(self.fmt,
-                               req.get_response(self.api))
-        self.assertEqual(host_id, res['port'][portbindings.HOST_ID])
-
-    def _test_host_changed(self, twice):
-        self._register_ml2_agents()
-        with self.subnet(network=self._network) as subnet:
-            host_arg = {portbindings.HOST_ID: HOST}
-            with self.port(subnet=subnet, cidr='10.0.0.0/24',
-                           device_owner=DEVICE_OWNER_COMPUTE,
-                           arg_list=(portbindings.HOST_ID,),
-                           **host_arg) as port1:
-                p1 = port1['port']
-                device1 = 'tap' + p1['id']
-                self.callbacks.update_device_up(
-                    self.adminContext,
-                    agent_id=HOST,
-                    device=device1)
-                if twice:
-                    self._update_and_check_portbinding(p1['id'], HOST_4)
-                self._update_and_check_portbinding(p1['id'], HOST_2)
-                self.mock_fanout.reset_mock()
-                # NOTE(yamamoto): see bug #1441488
-                self.adminContext.session.expire_all()
-                self.callbacks.get_device_details(
-                    self.adminContext,
-                    device=device1,
-                    agent_id=HOST_2)
-                p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
-                expected = {p1['network_id']:
-                            {'ports':
-                             {'20.0.0.1': [constants.FLOODING_ENTRY,
-                                           l2pop_rpc.PortInfo(
-                                               p1['mac_address'],
-                                               p1_ips[0])]},
-                             'network_type': 'vxlan',
-                             'segment_id': 1}}
-
-                self.mock_fanout.assert_called_with(
-                    mock.ANY, 'remove_fdb_entries', expected)
-
-    def test_host_changed(self):
-        self._test_host_changed(twice=False)
-
-    def test_host_changed_twice(self):
-        self._test_host_changed(twice=True)
-
-    def test_delete_port_invokes_update_device_down(self):
-        l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver()
-        l2pop_mech.L2PopulationAgentNotify = mock.Mock()
-        l2pop_mech.rpc_ctx = mock.Mock()
-        with mock.patch.object(l2pop_mech,
-                               '_get_agent_fdb',
-                               return_value=None) as upd_port_down,\
-                mock.patch.object(l2pop_mech.L2PopulationAgentNotify,
-                                  'remove_fdb_entries'):
-            l2pop_mech.delete_port_postcommit(mock.Mock())
-            self.assertTrue(upd_port_down.called)
-
-    def test_delete_unbound_port(self):
-        l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver()
-        l2pop_mech.initialize()
-
-        with self.port() as port:
-            port_context = driver_context.PortContext(
-                self.driver, self.context, port['port'],
-                self.driver.get_network(
-                    self.context, port['port']['network_id']),
-                None, None)
-            # The point is to provide coverage and to assert that no exceptions
-            # are raised.
-            l2pop_mech.delete_port_postcommit(port_context)
-
-    def test_fixed_ips_change_unbound_port_no_rpc(self):
-        l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver()
-        l2pop_mech.initialize()
-        l2pop_mech.L2populationAgentNotify = mock.Mock()
-
-        with self.port() as port:
-            port_context = driver_context.PortContext(
-                self.driver, self.context, port['port'],
-                self.driver.get_network(
-                    self.context, port['port']['network_id']),
-                None, None)
-            l2pop_mech._fixed_ips_changed(
-                port_context, None, port['port'], (set(['10.0.0.1']), set()))
-
-        # There's no need to send an RPC update if the IP address for an
-        # unbound port changed.
-        self.assertFalse(
-            l2pop_mech.L2populationAgentNotify.update_fdb_entries.called)
-
-
-class TestL2PopulationMechDriver(base.BaseTestCase):
-
-    def _test_get_tunnels(self, agent_ip, exclude_host=True):
-        mech_driver = l2pop_mech_driver.L2populationMechanismDriver()
-        agent = mock.Mock()
-        agent.host = HOST
-        network_ports = ((None, agent),)
-        with mock.patch.object(l2pop_db, 'get_agent_ip',
-                               return_value=agent_ip):
-            excluded_host = HOST + '-EXCLUDE' if exclude_host else HOST
-            return mech_driver._get_tunnels(network_ports, excluded_host)
-
-    def test_get_tunnels(self):
-        tunnels = self._test_get_tunnels('20.0.0.1')
-        self.assertIn('20.0.0.1', tunnels)
-
-    def test_get_tunnels_no_ip(self):
-        tunnels = self._test_get_tunnels(None)
-        self.assertEqual(0, len(tunnels))
-
-    def test_get_tunnels_dont_exclude_host(self):
-        tunnels = self._test_get_tunnels(None, exclude_host=False)
-        self.assertEqual(0, len(tunnels))
-
-    def _test_create_agent_fdb(self, fdb_network_ports, agent_ips):
-        mech_driver = l2pop_mech_driver.L2populationMechanismDriver()
-        tunnel_network_ports, tunnel_agent = (
-            self._mock_network_ports(HOST + '1', None))
-        agent_ips[tunnel_agent] = '10.0.0.1'
-
-        def agent_ip_side_effect(agent):
-            return agent_ips[agent]
-
-        with mock.patch.object(l2pop_db, 'get_agent_ip',
-                               side_effect=agent_ip_side_effect),\
-                mock.patch.object(l2pop_db, 'get_nondvr_active_network_ports',
-                                  return_value=fdb_network_ports),\
-                mock.patch.object(l2pop_db, 'get_dvr_active_network_ports',
-                                  return_value=tunnel_network_ports):
-            session = mock.Mock()
-            agent = mock.Mock()
-            agent.host = HOST
-            segment = {'segmentation_id': 1, 'network_type': 'vxlan'}
-            return mech_driver._create_agent_fdb(session,
-                                                 agent,
-                                                 segment,
-                                                 'network_id')
-
-    def _mock_network_ports(self, host_name, binding):
-        agent = mock.Mock()
-        agent.host = host_name
-        return [(binding, agent)], agent
-
-    def test_create_agent_fdb(self):
-        binding = mock.Mock()
-        binding.port = {'mac_address': '00:00:DE:AD:BE:EF',
-                        'fixed_ips': [{'ip_address': '1.1.1.1'}]}
-        fdb_network_ports, fdb_agent = (
-            self._mock_network_ports(HOST + '2', binding))
-        agent_ips = {fdb_agent: '20.0.0.1'}
-
-        agent_fdb = self._test_create_agent_fdb(fdb_network_ports,
-                                                agent_ips)
-        result = agent_fdb['network_id']
-
-        expected_result = {'segment_id': 1,
-                           'network_type': 'vxlan',
-                           'ports':
-                           {'10.0.0.1':
-                            [constants.FLOODING_ENTRY],
-                            '20.0.0.1':
-                            [constants.FLOODING_ENTRY,
-                             l2pop_rpc.PortInfo(
-                                 mac_address='00:00:DE:AD:BE:EF',
-                                 ip_address='1.1.1.1')]}}
-        self.assertEqual(expected_result, result)
-
-    def test_create_agent_fdb_only_tunnels(self):
-        agent_fdb = self._test_create_agent_fdb([], {})
-        result = agent_fdb['network_id']
-
-        expected_result = {'segment_id': 1,
-                           'network_type': 'vxlan',
-                           'ports':
-                           {'10.0.0.1':
-                            [constants.FLOODING_ENTRY]}}
-        self.assertEqual(expected_result, result)
-
-    def test_update_port_precommit_mac_address_changed_raises(self):
-        port = {'status': u'ACTIVE',
-                'device_owner': DEVICE_OWNER_COMPUTE,
-                'mac_address': u'12:34:56:78:4b:0e',
-                'id': u'1'}
-
-        original_port = port.copy()
-        original_port['mac_address'] = u'12:34:56:78:4b:0f'
-
-        with mock.patch.object(driver_context.db, 'get_network_segments'):
-            ctx = driver_context.PortContext(mock.Mock(),
-                                             mock.Mock(),
-                                             port,
-                                             mock.MagicMock(),
-                                             mock.Mock(),
-                                             None,
-                                             original_port=original_port)
-
-        mech_driver = l2pop_mech_driver.L2populationMechanismDriver()
-        with testtools.ExpectedException(ml2_exc.MechanismDriverError):
-            mech_driver.update_port_precommit(ctx)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py b/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py
deleted file mode 100644 (file)
index 83a13a3..0000000
+++ /dev/null
@@ -1,1383 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import sys
-
-import mock
-from oslo_config import cfg
-
-from neutron.agent.linux import bridge_lib
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect
-from neutron.plugins.ml2.drivers.linuxbridge.agent.common \
-    import constants as lconst
-from neutron.plugins.ml2.drivers.linuxbridge.agent \
-    import linuxbridge_neutron_agent
-from neutron.tests import base
-
-LOCAL_IP = '192.168.0.33'
-LOCAL_IPV6 = '2001:db8:1::33'
-VXLAN_GROUPV6 = 'ff05::/120'
-PORT_1 = 'abcdef01-12ddssdfds-fdsfsd'
-DEVICE_1 = 'tapabcdef01-12'
-NETWORK_ID = '57653b20-ed5b-4ed0-a31d-06f84e3fd909'
-BRIDGE_MAPPING_VALUE = 'br-eth2'
-BRIDGE_MAPPINGS = {'physnet0': BRIDGE_MAPPING_VALUE}
-INTERFACE_MAPPINGS = {'physnet1': 'eth1'}
-FAKE_DEFAULT_DEV = mock.Mock()
-FAKE_DEFAULT_DEV.name = 'eth1'
-PORT_DATA = {
-    "port_id": PORT_1,
-    "device": DEVICE_1
-}
-
-
-class FakeIpLinkCommand(object):
-    def set_up(self):
-        pass
-
-
-class FakeIpDevice(object):
-    def __init__(self):
-        self.link = FakeIpLinkCommand()
-
-
-def get_linuxbridge_manager(bridge_mappings, interface_mappings):
-    with mock.patch.object(ip_lib.IPWrapper, 'get_device_by_ip',
-                           return_value=FAKE_DEFAULT_DEV),\
-            mock.patch.object(ip_lib, 'device_exists', return_value=True),\
-            mock.patch.object(linuxbridge_neutron_agent.LinuxBridgeManager,
-                              'check_vxlan_support'):
-        cfg.CONF.set_override('local_ip', LOCAL_IP, 'VXLAN')
-        return linuxbridge_neutron_agent.LinuxBridgeManager(
-            bridge_mappings, interface_mappings)
-
-
-class TestLinuxBridge(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestLinuxBridge, self).setUp()
-        self.linux_bridge = get_linuxbridge_manager(
-            BRIDGE_MAPPINGS, INTERFACE_MAPPINGS)
-
-    def test_ensure_physical_in_bridge_invalid(self):
-        result = self.linux_bridge.ensure_physical_in_bridge('network_id',
-                                                             p_const.TYPE_VLAN,
-                                                             'physnetx',
-                                                             7)
-        self.assertFalse(result)
-
-    def test_ensure_physical_in_bridge_flat(self):
-        with mock.patch.object(self.linux_bridge,
-                               'ensure_flat_bridge') as flat_bridge_func:
-            self.linux_bridge.ensure_physical_in_bridge(
-                'network_id', p_const.TYPE_FLAT, 'physnet1', None)
-        self.assertTrue(flat_bridge_func.called)
-
-    def test_ensure_physical_in_bridge_vlan(self):
-        with mock.patch.object(self.linux_bridge,
-                               'ensure_vlan_bridge') as vlan_bridge_func:
-            self.linux_bridge.ensure_physical_in_bridge(
-                'network_id', p_const.TYPE_VLAN, 'physnet1', 7)
-        self.assertTrue(vlan_bridge_func.called)
-
-    def test_ensure_physical_in_bridge_vxlan(self):
-        self.linux_bridge.vxlan_mode = lconst.VXLAN_UCAST
-        with mock.patch.object(self.linux_bridge,
-                               'ensure_vxlan_bridge') as vxlan_bridge_func:
-            self.linux_bridge.ensure_physical_in_bridge(
-                'network_id', 'vxlan', 'physnet1', 7)
-        self.assertTrue(vxlan_bridge_func.called)
-
-
-class TestLinuxBridgeAgent(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestLinuxBridgeAgent, self).setUp()
-        # disable setting up periodic state reporting
-        cfg.CONF.set_override('report_interval', 0, 'AGENT')
-        cfg.CONF.set_override('prevent_arp_spoofing', False, 'AGENT')
-        cfg.CONF.set_default('firewall_driver',
-                             'neutron.agent.firewall.NoopFirewallDriver',
-                             group='SECURITYGROUP')
-        cfg.CONF.set_default('quitting_rpc_timeout', 10, 'AGENT')
-        cfg.CONF.set_override('local_ip', LOCAL_IP, 'VXLAN')
-        self.get_devices_p = mock.patch.object(ip_lib.IPWrapper, 'get_devices')
-        self.get_devices = self.get_devices_p.start()
-        self.get_devices.return_value = [ip_lib.IPDevice('eth77')]
-        self.get_mac_p = mock.patch('neutron.agent.linux.utils.'
-                                    'get_interface_mac')
-        self.get_mac = self.get_mac_p.start()
-        self.get_mac.return_value = '00:00:00:00:00:01'
-        self.get_bridge_names_p = mock.patch.object(bridge_lib,
-                                                    'get_bridge_names')
-        self.get_bridge_names = self.get_bridge_names_p.start()
-        self.get_bridge_names.return_value = ["br-int", "brq1"]
-        with mock.patch.object(ip_lib.IPWrapper,
-                               'get_device_by_ip',
-                               return_value=FAKE_DEFAULT_DEV):
-            self.agent = linuxbridge_neutron_agent.LinuxBridgeNeutronAgentRPC(
-                {}, {}, 0, cfg.CONF.AGENT.quitting_rpc_timeout)
-            with mock.patch.object(self.agent, "daemon_loop"),\
-                    mock.patch.object(
-                        linuxbridge_neutron_agent.LinuxBridgeManager,
-                        'check_vxlan_support'):
-                self.agent.start()
-
-    def test_treat_devices_removed_with_existed_device(self):
-        agent = self.agent
-        agent._ensure_port_admin_state = mock.Mock()
-        devices = [DEVICE_1]
-        agent.network_ports[NETWORK_ID].append(PORT_DATA)
-        with mock.patch.object(agent.plugin_rpc,
-                               "update_device_down") as fn_udd,\
-                mock.patch.object(agent.sg_agent,
-                                  "remove_devices_filter") as fn_rdf,\
-                mock.patch.object(agent.ext_manager,
-                                  "delete_port") as ext_mgr_delete_port:
-            fn_udd.return_value = {'device': DEVICE_1,
-                                   'exists': True}
-            with mock.patch.object(linuxbridge_neutron_agent.LOG,
-                                   'info') as log:
-                resync = agent.treat_devices_removed(devices)
-                self.assertEqual(2, log.call_count)
-                self.assertFalse(resync)
-                self.assertTrue(fn_udd.called)
-                self.assertTrue(fn_rdf.called)
-                self.assertTrue(ext_mgr_delete_port.called)
-                self.assertTrue(
-                    PORT_DATA not in agent.network_ports[NETWORK_ID]
-                )
-
-    def test_treat_devices_removed_with_not_existed_device(self):
-        agent = self.agent
-        devices = [DEVICE_1]
-        agent.network_ports[NETWORK_ID].append(PORT_DATA)
-        with mock.patch.object(agent.plugin_rpc,
-                               "update_device_down") as fn_udd,\
-                mock.patch.object(agent.sg_agent,
-                                  "remove_devices_filter") as fn_rdf,\
-                mock.patch.object(agent.ext_manager,
-                                  "delete_port") as ext_mgr_delete_port:
-            fn_udd.return_value = {'device': DEVICE_1,
-                                   'exists': False}
-            with mock.patch.object(linuxbridge_neutron_agent.LOG,
-                                   'debug') as log:
-                resync = agent.treat_devices_removed(devices)
-                self.assertEqual(1, log.call_count)
-                self.assertFalse(resync)
-                self.assertTrue(fn_udd.called)
-                self.assertTrue(fn_rdf.called)
-                self.assertTrue(ext_mgr_delete_port.called)
-                self.assertTrue(
-                    PORT_DATA not in agent.network_ports[NETWORK_ID]
-                )
-
-    def test_treat_devices_removed_failed(self):
-        agent = self.agent
-        devices = [DEVICE_1]
-        agent.network_ports[NETWORK_ID].append(PORT_DATA)
-        with mock.patch.object(agent.plugin_rpc,
-                               "update_device_down") as fn_udd,\
-                mock.patch.object(agent.sg_agent,
-                                  "remove_devices_filter") as fn_rdf,\
-                mock.patch.object(agent.ext_manager,
-                                  "delete_port") as ext_mgr_delete_port:
-            fn_udd.side_effect = Exception()
-            resync = agent.treat_devices_removed(devices)
-            self.assertTrue(resync)
-            self.assertTrue(fn_udd.called)
-            self.assertTrue(fn_rdf.called)
-            self.assertTrue(ext_mgr_delete_port.called)
-            self.assertTrue(
-                PORT_DATA not in agent.network_ports[NETWORK_ID]
-            )
-
-    def test_treat_devices_removed_with_prevent_arp_spoofing_true(self):
-        agent = self.agent
-        agent.prevent_arp_spoofing = True
-        agent._ensure_port_admin_state = mock.Mock()
-        devices = [DEVICE_1]
-        with mock.patch.object(agent.plugin_rpc,
-                               "update_device_down") as fn_udd,\
-                mock.patch.object(agent.sg_agent,
-                                  "remove_devices_filter"):
-            fn_udd.return_value = {'device': DEVICE_1,
-                                   'exists': True}
-            with mock.patch.object(arp_protect,
-                                   'delete_arp_spoofing_protection') as de_arp:
-                agent.treat_devices_removed(devices)
-                de_arp.assert_called_with(devices)
-
-    def _test_scan_devices(self, previous, updated,
-                           fake_current, expected, sync):
-        self.agent.br_mgr = mock.Mock()
-        self.agent.br_mgr.get_tap_devices.return_value = fake_current
-
-        self.agent.updated_devices = updated
-        results = self.agent.scan_devices(previous, sync)
-        self.assertEqual(expected, results)
-
-    def test_scan_devices_no_changes(self):
-        previous = {'current': set([1, 2]),
-                    'updated': set(),
-                    'added': set(),
-                    'removed': set()}
-        fake_current = set([1, 2])
-        updated = set()
-        expected = {'current': set([1, 2]),
-                    'updated': set(),
-                    'added': set(),
-                    'removed': set()}
-
-        self._test_scan_devices(previous, updated, fake_current, expected,
-                                sync=False)
-
-    def test_scan_devices_added_removed(self):
-        previous = {'current': set([1, 2]),
-                    'updated': set(),
-                    'added': set(),
-                    'removed': set()}
-        fake_current = set([2, 3])
-        updated = set()
-        expected = {'current': set([2, 3]),
-                    'updated': set(),
-                    'added': set([3]),
-                    'removed': set([1])}
-
-        self._test_scan_devices(previous, updated, fake_current, expected,
-                                sync=False)
-
-    def test_scan_devices_removed_retried_on_sync(self):
-        previous = {'current': set([2, 3]),
-                    'updated': set(),
-                    'added': set(),
-                    'removed': set([1])}
-        fake_current = set([2, 3])
-        updated = set()
-        expected = {'current': set([2, 3]),
-                    'updated': set(),
-                    'added': set([2, 3]),
-                    'removed': set([1])}
-
-        self._test_scan_devices(previous, updated, fake_current, expected,
-                                sync=True)
-
-    def test_scan_devices_vanished_removed_on_sync(self):
-        previous = {'current': set([2, 3]),
-                    'updated': set(),
-                    'added': set(),
-                    'removed': set([1])}
-        # Device 2 disappeared.
-        fake_current = set([3])
-        updated = set()
-        # Device 1 should be retried.
-        expected = {'current': set([3]),
-                    'updated': set(),
-                    'added': set([3]),
-                    'removed': set([1, 2])}
-
-        self._test_scan_devices(previous, updated, fake_current, expected,
-                                sync=True)
-
-    def test_scan_devices_updated(self):
-        previous = {'current': set([1, 2]),
-                    'updated': set(),
-                    'added': set(),
-                    'removed': set()}
-        fake_current = set([1, 2])
-        updated = set([1])
-        expected = {'current': set([1, 2]),
-                    'updated': set([1]),
-                    'added': set(),
-                    'removed': set()}
-
-        self._test_scan_devices(previous, updated, fake_current, expected,
-                                sync=False)
-
-    def test_scan_devices_updated_non_existing(self):
-        previous = {'current': set([1, 2]),
-                    'updated': set(),
-                    'added': set(),
-                    'removed': set()}
-        fake_current = set([1, 2])
-        updated = set([3])
-        expected = {'current': set([1, 2]),
-                    'updated': set(),
-                    'added': set(),
-                    'removed': set()}
-
-        self._test_scan_devices(previous, updated, fake_current, expected,
-                                sync=False)
-
-    def test_scan_devices_updated_deleted_concurrently(self):
-        previous = {
-            'current': set([1, 2]),
-            'updated': set(),
-            'added': set(),
-            'removed': set()
-        }
-        # Device 2 disappeared.
-        fake_current = set([1])
-        # Device 2 got an concurrent update via network_update
-        updated = set([2])
-        expected = {
-            'current': set([1]),
-            'updated': set(),
-            'added': set(),
-            'removed': set([2])
-        }
-        self._test_scan_devices(
-            previous, updated, fake_current, expected, sync=False
-        )
-
-    def test_scan_devices_updated_on_sync(self):
-        previous = {'current': set([1, 2]),
-                    'updated': set([1]),
-                    'added': set(),
-                    'removed': set()}
-        fake_current = set([1, 2])
-        updated = set([2])
-        expected = {'current': set([1, 2]),
-                    'updated': set([1, 2]),
-                    'added': set([1, 2]),
-                    'removed': set()}
-
-        self._test_scan_devices(previous, updated, fake_current, expected,
-                                sync=True)
-
-    def test_scan_devices_with_prevent_arp_spoofing_true(self):
-        self.agent.prevent_arp_spoofing = True
-        previous = None
-        fake_current = set([1, 2])
-        updated = set()
-        expected = {'current': set([1, 2]),
-                    'updated': set(),
-                    'added': set([1, 2]),
-                    'removed': set()}
-        with mock.patch.object(arp_protect,
-                               'delete_unreferenced_arp_protection') as de_arp:
-            self._test_scan_devices(previous, updated, fake_current, expected,
-                                sync=False)
-            de_arp.assert_called_with(fake_current)
-
-    def test_process_network_devices(self):
-        agent = self.agent
-        device_info = {'current': set(),
-                       'added': set(['tap3', 'tap4']),
-                       'updated': set(['tap2', 'tap3']),
-                       'removed': set(['tap1'])}
-        agent.sg_agent.setup_port_filters = mock.Mock()
-        agent.treat_devices_added_updated = mock.Mock(return_value=False)
-        agent.treat_devices_removed = mock.Mock(return_value=False)
-
-        agent.process_network_devices(device_info)
-
-        agent.sg_agent.setup_port_filters.assert_called_with(
-                device_info['added'],
-                device_info['updated'])
-        agent.treat_devices_added_updated.assert_called_with(set(['tap2',
-                                                                  'tap3',
-                                                                  'tap4']))
-        agent.treat_devices_removed.assert_called_with(set(['tap1']))
-
-    def test_treat_devices_added_updated_admin_state_up_true(self):
-        agent = self.agent
-        mock_details = {'device': 'dev123',
-                        'port_id': 'port123',
-                        'network_id': 'net123',
-                        'admin_state_up': True,
-                        'network_type': 'vlan',
-                        'segmentation_id': 100,
-                        'physical_network': 'physnet1',
-                        'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX}
-        mock_port_data = {
-            'port_id': mock_details['port_id'],
-            'device': mock_details['device']
-        }
-        agent.ext_manager = mock.Mock()
-        agent.plugin_rpc = mock.Mock()
-        agent.plugin_rpc.get_devices_details_list.return_value = [mock_details]
-        agent.br_mgr = mock.Mock()
-        agent.br_mgr.add_interface.return_value = True
-        agent._ensure_port_admin_state = mock.Mock()
-        resync_needed = agent.treat_devices_added_updated(set(['tap1']))
-
-        self.assertFalse(resync_needed)
-        agent.br_mgr.add_interface.assert_called_with(
-                                      'net123', 'vlan', 'physnet1',
-                                      100, 'port123',
-                                      constants.DEVICE_OWNER_NETWORK_PREFIX)
-        self.assertTrue(agent.plugin_rpc.update_device_up.called)
-        self.assertTrue(agent.ext_manager.handle_port.called)
-        self.assertTrue(
-            mock_port_data in agent.network_ports[mock_details['network_id']]
-        )
-
-    def test_treat_devices_added_updated_prevent_arp_spoofing_true(self):
-        agent = self.agent
-        agent.prevent_arp_spoofing = True
-        mock_details = {'device': 'dev123',
-                        'port_id': 'port123',
-                        'network_id': 'net123',
-                        'admin_state_up': True,
-                        'network_type': 'vlan',
-                        'segmentation_id': 100,
-                        'physical_network': 'physnet1',
-                        'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX}
-        tap_name = constants.TAP_DEVICE_PREFIX + mock_details['port_id']
-        agent.plugin_rpc = mock.Mock()
-        agent.plugin_rpc.get_devices_details_list.return_value = [mock_details]
-        agent.br_mgr = mock.Mock()
-        agent.br_mgr.add_interface.return_value = True
-        agent.br_mgr.get_tap_device_name.return_value = tap_name
-        agent._ensure_port_admin_state = mock.Mock()
-        with mock.patch.object(arp_protect,
-                               'setup_arp_spoofing_protection') as set_arp:
-            agent.treat_devices_added_updated(set(['tap1']))
-            set_arp.assert_called_with(tap_name, mock_details)
-
-    def test_set_rpc_timeout(self):
-        self.agent.stop()
-        for rpc_client in (self.agent.plugin_rpc.client,
-                           self.agent.sg_plugin_rpc.client,
-                           self.agent.state_rpc.client):
-            self.assertEqual(cfg.CONF.AGENT.quitting_rpc_timeout,
-                             rpc_client.timeout)
-
-    def test_set_rpc_timeout_no_value(self):
-        self.agent.quitting_rpc_timeout = None
-        with mock.patch.object(self.agent, 'set_rpc_timeout') as mock_set_rpc:
-            self.agent.stop()
-            self.assertFalse(mock_set_rpc.called)
-
-    def test_report_state_revived(self):
-        with mock.patch.object(self.agent.state_rpc,
-                               "report_state") as report_st:
-            report_st.return_value = constants.AGENT_REVIVED
-            self.agent._report_state()
-            self.assertTrue(self.agent.fullsync)
-
-    def _test_ensure_port_admin_state(self, admin_state):
-        port_id = 'fake_id'
-        with mock.patch.object(ip_lib, 'IPDevice') as dev_mock:
-            self.agent._ensure_port_admin_state(port_id, admin_state)
-
-        tap_name = self.agent.br_mgr.get_tap_device_name(port_id)
-        self.assertEqual(admin_state,
-                         dev_mock(tap_name).link.set_up.called)
-        self.assertNotEqual(admin_state,
-                            dev_mock(tap_name).link.set_down.called)
-
-    def test_ensure_port_admin_state_up(self):
-        self._test_ensure_port_admin_state(True)
-
-    def test_ensure_port_admin_state_down(self):
-        self._test_ensure_port_admin_state(False)
-
-    def test_update_network_ports(self):
-        port_1_data = PORT_DATA
-        NETWORK_2_ID = 'fake_second_network'
-        port_2_data = {
-            'port_id': 'fake_port_2',
-            'device': 'fake_port_2_device_name'
-        }
-        self.agent.network_ports[NETWORK_ID].append(
-            port_1_data
-        )
-        self.agent.network_ports[NETWORK_ID].append(
-            port_2_data
-        )
-        #check update port:
-        self.agent._update_network_ports(
-            NETWORK_2_ID, port_2_data['port_id'], port_2_data['device']
-        )
-        self.assertTrue(
-            port_2_data not in self.agent.network_ports[NETWORK_ID]
-        )
-        self.assertTrue(
-            port_2_data in self.agent.network_ports[NETWORK_2_ID]
-        )
-
-    def test_clean_network_ports(self):
-        port_1_data = PORT_DATA
-        port_2_data = {
-            'port_id': 'fake_port_2',
-            'device': 'fake_port_2_device_name'
-        }
-        self.agent.network_ports[NETWORK_ID].append(
-            port_1_data
-        )
-        self.agent.network_ports[NETWORK_ID].append(
-            port_2_data
-        )
-        #check removing port from network when other ports are still there:
-        cleaned_port_id = self.agent._clean_network_ports(DEVICE_1)
-        self.assertTrue(
-            NETWORK_ID in self.agent.network_ports.keys()
-        )
-        self.assertTrue(
-            port_1_data not in self.agent.network_ports[NETWORK_ID]
-        )
-        self.assertTrue(
-            port_2_data in self.agent.network_ports[NETWORK_ID]
-        )
-        self.assertEqual(PORT_1, cleaned_port_id)
-        #and now remove last port from network:
-        cleaned_port_id = self.agent._clean_network_ports(
-            port_2_data['device']
-        )
-        self.assertTrue(
-            NETWORK_ID not in self.agent.network_ports.keys()
-        )
-        self.assertEqual(port_2_data['port_id'], cleaned_port_id)
-
-
-class TestLinuxBridgeManager(base.BaseTestCase):
-    def setUp(self):
-        super(TestLinuxBridgeManager, self).setUp()
-        self.lbm = get_linuxbridge_manager(
-            BRIDGE_MAPPINGS, INTERFACE_MAPPINGS)
-
-    def test_local_ip_validation_with_valid_ip(self):
-        with mock.patch.object(ip_lib.IPWrapper,
-                               'get_device_by_ip',
-                               return_value=FAKE_DEFAULT_DEV):
-            result = self.lbm.get_local_ip_device(LOCAL_IP)
-            self.assertEqual(FAKE_DEFAULT_DEV, result)
-
-    def test_local_ip_validation_with_invalid_ip(self):
-        with mock.patch.object(ip_lib.IPWrapper,
-                               'get_device_by_ip',
-                               return_value=None),\
-                mock.patch.object(sys, 'exit') as exit,\
-                mock.patch.object(linuxbridge_neutron_agent.LOG,
-                                  'error') as log:
-            self.lbm.get_local_ip_device(LOCAL_IP)
-            self.assertEqual(1, log.call_count)
-            exit.assert_called_once_with(1)
-
-    def _test_vxlan_group_validation(self, bad_local_ip, bad_vxlan_group):
-        with mock.patch.object(ip_lib.IPWrapper,
-                               'get_device_by_ip',
-                               return_value=FAKE_DEFAULT_DEV),\
-                mock.patch.object(sys, 'exit') as exit,\
-                mock.patch.object(linuxbridge_neutron_agent.LOG,
-                                  'error') as log:
-            self.lbm.local_ip = bad_local_ip
-            cfg.CONF.set_override('vxlan_group', bad_vxlan_group, 'VXLAN')
-            self.lbm.validate_vxlan_group_with_local_ip()
-            self.assertEqual(1, log.call_count)
-            exit.assert_called_once_with(1)
-
-    def test_vxlan_group_validation_with_mismatched_local_ip(self):
-        self._test_vxlan_group_validation(LOCAL_IP, VXLAN_GROUPV6)
-
-    def test_vxlan_group_validation_with_unicast_group(self):
-        self._test_vxlan_group_validation(LOCAL_IP, '240.0.0.0')
-
-    def test_vxlan_group_validation_with_invalid_cidr(self):
-        self._test_vxlan_group_validation(LOCAL_IP, '224.0.0.1/')
-
-    def test_vxlan_group_validation_with_v6_unicast_group(self):
-        self._test_vxlan_group_validation(LOCAL_IPV6, '2001:db8::')
-
-    def test_get_existing_bridge_name(self):
-        phy_net = 'physnet0'
-        self.assertEqual('br-eth2',
-                         self.lbm.get_existing_bridge_name(phy_net))
-
-        phy_net = ''
-        self.assertIsNone(self.lbm.get_existing_bridge_name(phy_net))
-
-    def test_get_bridge_name(self):
-        nw_id = "123456789101112"
-        self.assertEqual("brq" + nw_id[0:11],
-                         self.lbm.get_bridge_name(nw_id))
-        nw_id = ""
-        self.assertEqual("brq", self.lbm.get_bridge_name(nw_id))
-
-    def test_get_subinterface_name(self):
-        self.assertEqual("eth0.0",
-                         self.lbm.get_subinterface_name("eth0", "0"))
-        self.assertEqual("eth0.", self.lbm.get_subinterface_name("eth0", ""))
-
-    def test_get_tap_device_name(self):
-        if_id = "123456789101112"
-        self.assertEqual(constants.TAP_DEVICE_PREFIX + if_id[0:11],
-                         self.lbm.get_tap_device_name(if_id))
-        if_id = ""
-        self.assertEqual(constants.TAP_DEVICE_PREFIX,
-                         self.lbm.get_tap_device_name(if_id))
-
-    def test_get_vxlan_device_name(self):
-        vn_id = p_const.MAX_VXLAN_VNI
-        self.assertEqual("vxlan-" + str(vn_id),
-                         self.lbm.get_vxlan_device_name(vn_id))
-        self.assertIsNone(self.lbm.get_vxlan_device_name(vn_id + 1))
-
-    def test_get_vxlan_group(self):
-        cfg.CONF.set_override('vxlan_group', '239.1.2.3/24', 'VXLAN')
-        vn_id = p_const.MAX_VXLAN_VNI
-        self.assertEqual('239.1.2.255', self.lbm.get_vxlan_group(vn_id))
-        vn_id = 256
-        self.assertEqual('239.1.2.0', self.lbm.get_vxlan_group(vn_id))
-        vn_id = 257
-        self.assertEqual('239.1.2.1', self.lbm.get_vxlan_group(vn_id))
-
-    def test_get_vxlan_group_with_ipv6(self):
-        cfg.CONF.set_override('local_ip', LOCAL_IPV6, 'VXLAN')
-        self.lbm.local_ip = LOCAL_IPV6
-        cfg.CONF.set_override('vxlan_group', VXLAN_GROUPV6, 'VXLAN')
-        vn_id = p_const.MAX_VXLAN_VNI
-        self.assertEqual('ff05::ff', self.lbm.get_vxlan_group(vn_id))
-        vn_id = 256
-        self.assertEqual('ff05::', self.lbm.get_vxlan_group(vn_id))
-        vn_id = 257
-        self.assertEqual('ff05::1', self.lbm.get_vxlan_group(vn_id))
-
-    def test_get_deletable_bridges(self):
-        br_list = ["br-int", "brq1", "brq2", "brq-user"]
-        expected = set(br_list[1:3])
-        lbm = get_linuxbridge_manager(
-            bridge_mappings={"physnet0": "brq-user"}, interface_mappings={})
-        with mock.patch.object(
-                bridge_lib, 'get_bridge_names', return_value=br_list):
-            self.assertEqual(expected, lbm.get_deletable_bridges())
-
-    def test_get_tap_devices_count(self):
-        with mock.patch.object(
-                bridge_lib.BridgeDevice, 'get_interfaces') as get_ifs_fn:
-            get_ifs_fn.return_value = ['tap2101', 'eth0.100', 'vxlan-1000']
-            self.assertEqual(self.lbm.get_tap_devices_count('br0'), 1)
-
-    def test_get_interface_details(self):
-        with mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn,\
-                mock.patch.object(ip_lib.IpRouteCommand,
-                                  'get_gateway') as getgw_fn:
-            gwdict = dict(gateway='1.1.1.1')
-            getgw_fn.return_value = gwdict
-            ipdict = dict(cidr='1.1.1.1/24',
-                          broadcast='1.1.1.255',
-                          scope='global',
-                          ip_version=4,
-                          dynamic=False)
-            list_fn.return_value = ipdict
-            ret = self.lbm.get_interface_details("eth0")
-
-            self.assertTrue(list_fn.called)
-            self.assertTrue(getgw_fn.called)
-            self.assertEqual(ret, (ipdict, gwdict))
-
-    def test_ensure_flat_bridge(self):
-        with mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn,\
-                mock.patch.object(ip_lib.IpRouteCommand,
-                                  'get_gateway') as getgw_fn:
-            gwdict = dict(gateway='1.1.1.1')
-            getgw_fn.return_value = gwdict
-            ipdict = dict(cidr='1.1.1.1/24',
-                          broadcast='1.1.1.255',
-                          scope='global',
-                          ip_version=4,
-                          dynamic=False)
-            list_fn.return_value = ipdict
-            with mock.patch.object(self.lbm, 'ensure_bridge') as ens:
-                self.assertEqual(
-                    self.lbm.ensure_flat_bridge("123", None, "eth0"),
-                    "eth0"
-                )
-                self.assertTrue(list_fn.called)
-                self.assertTrue(getgw_fn.called)
-                ens.assert_called_once_with("brq123", "eth0",
-                                            ipdict, gwdict)
-
-    def test_ensure_flat_bridge_with_existed_brq(self):
-        with mock.patch.object(self.lbm, 'ensure_bridge') as ens:
-            ens.return_value = "br-eth2"
-            self.assertEqual("br-eth2",
-                             self.lbm.ensure_flat_bridge("123",
-                                                         "br-eth2",
-                                                         None))
-            ens.assert_called_with("br-eth2")
-
-    def test_ensure_vlan_bridge(self):
-        with mock.patch.object(self.lbm, 'ensure_vlan') as ens_vl_fn,\
-                mock.patch.object(self.lbm, 'ensure_bridge') as ens,\
-                mock.patch.object(self.lbm,
-                                  'get_interface_details') as get_int_det_fn:
-            ens_vl_fn.return_value = "eth0.1"
-            get_int_det_fn.return_value = (None, None)
-            self.assertEqual(self.lbm.ensure_vlan_bridge("123",
-                                                         None,
-                                                         "eth0",
-                                                         "1"),
-                             "eth0.1")
-            ens.assert_called_with("brq123", "eth0.1", None, None)
-
-            get_int_det_fn.return_value = ("ips", "gateway")
-            self.assertEqual(self.lbm.ensure_vlan_bridge("123",
-                                                         None,
-                                                         "eth0",
-                                                         "1"),
-                             "eth0.1")
-            ens.assert_called_with("brq123", "eth0.1", "ips", "gateway")
-
-    def test_ensure_vlan_bridge_with_existed_brq(self):
-        with mock.patch.object(self.lbm, 'ensure_vlan') as ens_vl_fn,\
-                mock.patch.object(self.lbm, 'ensure_bridge') as ens:
-            ens_vl_fn.return_value = None
-            ens.return_value = "br-eth2"
-            self.assertEqual("br-eth2",
-                             self.lbm.ensure_vlan_bridge("123",
-                                                         "br-eth2",
-                                                         None,
-                                                         None))
-            ens.assert_called_with("br-eth2")
-
-    def test_ensure_local_bridge(self):
-        with mock.patch.object(self.lbm, 'ensure_bridge') as ens_fn:
-            self.lbm.ensure_local_bridge("54321", None)
-            ens_fn.assert_called_once_with("brq54321")
-
-    def test_ensure_local_bridge_with_existed_brq(self):
-        with mock.patch.object(self.lbm, 'ensure_bridge') as ens_fn:
-            ens_fn.return_value = "br-eth2"
-            self.lbm.ensure_local_bridge("54321", 'br-eth2')
-            ens_fn.assert_called_once_with("br-eth2")
-
-    def test_ensure_vlan(self):
-        with mock.patch.object(ip_lib, 'device_exists') as de_fn:
-            de_fn.return_value = True
-            self.assertEqual(self.lbm.ensure_vlan("eth0", "1"), "eth0.1")
-            de_fn.return_value = False
-            with mock.patch.object(utils, 'execute') as exec_fn:
-                exec_fn.return_value = False
-                self.assertEqual(self.lbm.ensure_vlan("eth0", "1"), "eth0.1")
-                # FIXME(kevinbenton): validate the params to the exec_fn calls
-                self.assertEqual(exec_fn.call_count, 2)
-                exec_fn.return_value = True
-                self.assertIsNone(self.lbm.ensure_vlan("eth0", "1"))
-                self.assertEqual(exec_fn.call_count, 3)
-
-    def test_ensure_vxlan(self):
-        seg_id = "12345678"
-        self.lbm.local_int = 'eth0'
-        self.lbm.vxlan_mode = lconst.VXLAN_MCAST
-        with mock.patch.object(ip_lib, 'device_exists') as de_fn:
-            de_fn.return_value = True
-            self.assertEqual(self.lbm.ensure_vxlan(seg_id), "vxlan-" + seg_id)
-            de_fn.return_value = False
-            with mock.patch.object(self.lbm.ip,
-                                   'add_vxlan') as add_vxlan_fn:
-                add_vxlan_fn.return_value = FakeIpDevice()
-                self.assertEqual(self.lbm.ensure_vxlan(seg_id),
-                                 "vxlan-" + seg_id)
-                add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id,
-                                                group="224.0.0.1",
-                                                dev=self.lbm.local_int)
-                cfg.CONF.set_override('l2_population', 'True', 'VXLAN')
-                self.assertEqual(self.lbm.ensure_vxlan(seg_id),
-                                 "vxlan-" + seg_id)
-                add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id,
-                                                group="224.0.0.1",
-                                                dev=self.lbm.local_int,
-                                                proxy=True)
-
-    def test_update_interface_ip_details(self):
-        gwdict = dict(gateway='1.1.1.1',
-                      metric=50)
-        ipdict = dict(cidr='1.1.1.1/24',
-                      broadcast='1.1.1.255',
-                      scope='global',
-                      ip_version=4,
-                      dynamic=False)
-        with mock.patch.object(ip_lib.IpAddrCommand, 'add') as add_fn,\
-                mock.patch.object(ip_lib.IpAddrCommand, 'delete') as del_fn:
-            self.lbm.update_interface_ip_details("br0", "eth0",
-                                                 [ipdict], None)
-            self.assertTrue(add_fn.called)
-            self.assertTrue(del_fn.called)
-
-        with mock.patch.object(ip_lib.IpRouteCommand,
-                               'add_gateway') as addgw_fn,\
-                mock.patch.object(ip_lib.IpRouteCommand,
-                                  'delete_gateway') as delgw_fn:
-            self.lbm.update_interface_ip_details("br0", "eth0",
-                                                 None, gwdict)
-            self.assertTrue(addgw_fn.called)
-            self.assertTrue(delgw_fn.called)
-
-    def test_bridge_exists_and_ensure_up(self):
-        ip_lib_mock = mock.Mock()
-        with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock):
-            # device exists
-            self.assertTrue(self.lbm._bridge_exists_and_ensure_up("br0"))
-            self.assertTrue(ip_lib_mock.link.set_up.called)
-            # device doesn't exists
-            ip_lib_mock.link.set_up.side_effect = RuntimeError
-            self.assertFalse(self.lbm._bridge_exists_and_ensure_up("br0"))
-
-    def test_ensure_bridge(self):
-        bridge_device = mock.Mock()
-        bridge_device_old = mock.Mock()
-        with mock.patch.object(self.lbm,
-                               '_bridge_exists_and_ensure_up') as de_fn,\
-                mock.patch.object(bridge_lib, "BridgeDevice",
-                                  return_value=bridge_device) as br_fn,\
-                mock.patch.object(self.lbm,
-                                  'update_interface_ip_details') as upd_fn,\
-                mock.patch.object(bridge_lib, 'is_bridged_interface'),\
-                mock.patch.object(bridge_lib.BridgeDevice,
-                                  'get_interface_bridge') as get_if_br_fn:
-            de_fn.return_value = False
-            br_fn.addbr.return_value = bridge_device
-            bridge_device.setfd.return_value = False
-            bridge_device.disable_stp.return_value = False
-            bridge_device.disable_ipv6.return_value = False
-            bridge_device.link.set_up.return_value = False
-            self.assertEqual(self.lbm.ensure_bridge("br0", None), "br0")
-
-            bridge_device.owns_interface.return_value = False
-            self.lbm.ensure_bridge("br0", "eth0")
-            upd_fn.assert_called_with("br0", "eth0", None, None)
-            bridge_device.owns_interface.assert_called_with("eth0")
-
-            self.lbm.ensure_bridge("br0", "eth0", "ips", "gateway")
-            upd_fn.assert_called_with("br0", "eth0", "ips", "gateway")
-            bridge_device.owns_interface.assert_called_with("eth0")
-
-            de_fn.return_value = True
-            bridge_device.delif.side_effect = Exception()
-            self.lbm.ensure_bridge("br0", "eth0")
-            bridge_device.owns_interface.assert_called_with("eth0")
-
-            de_fn.return_value = True
-            bridge_device.owns_interface.return_value = False
-            get_if_br_fn.return_value = bridge_device_old
-            bridge_device.addif.reset_mock()
-            self.lbm.ensure_bridge("br0", "eth0")
-            bridge_device_old.delif.assert_called_once_with('eth0')
-            bridge_device.addif.assert_called_once_with('eth0')
-
-    def test_ensure_physical_in_bridge(self):
-        self.assertFalse(
-            self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VLAN,
-                                               "phys", "1")
-        )
-        with mock.patch.object(self.lbm, "ensure_flat_bridge") as flbr_fn:
-            self.assertTrue(
-                self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_FLAT,
-                                                   "physnet1", None)
-            )
-            self.assertTrue(flbr_fn.called)
-        with mock.patch.object(self.lbm, "ensure_vlan_bridge") as vlbr_fn:
-            self.assertTrue(
-                self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VLAN,
-                                                   "physnet1", "1")
-            )
-            self.assertTrue(vlbr_fn.called)
-
-        with mock.patch.object(self.lbm, "ensure_vxlan_bridge") as vlbr_fn:
-            self.lbm.vxlan_mode = lconst.VXLAN_MCAST
-            self.assertTrue(
-                self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VXLAN,
-                                                   "physnet1", "1")
-            )
-            self.assertTrue(vlbr_fn.called)
-
-    def test_ensure_physical_in_bridge_with_existed_brq(self):
-        with mock.patch.object(linuxbridge_neutron_agent.LOG, 'error') as log:
-                self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_FLAT,
-                                                   "physnet9", "1")
-                self.assertEqual(1, log.call_count)
-
-    def test_add_tap_interface_owner_other(self):
-        with mock.patch.object(ip_lib, "device_exists"):
-            with mock.patch.object(self.lbm, "ensure_local_bridge"):
-                self.assertTrue(self.lbm.add_tap_interface("123",
-                                                           p_const.TYPE_LOCAL,
-                                                           "physnet1", None,
-                                                           "tap1", "foo"))
-
-    def _test_add_tap_interface(self, dev_owner_prefix):
-        with mock.patch.object(ip_lib, "device_exists") as de_fn:
-            de_fn.return_value = False
-            self.assertFalse(
-                self.lbm.add_tap_interface("123", p_const.TYPE_VLAN,
-                                           "physnet1", "1", "tap1",
-                                           dev_owner_prefix))
-
-            de_fn.return_value = True
-            bridge_device = mock.Mock()
-            with mock.patch.object(self.lbm, "ensure_local_bridge") as en_fn,\
-                    mock.patch.object(bridge_lib, "BridgeDevice",
-                                      return_value=bridge_device), \
-                    mock.patch.object(bridge_lib.BridgeDevice,
-                                      "get_interface_bridge") as get_br:
-                bridge_device.addif.retun_value = False
-                get_br.return_value = True
-                self.assertTrue(self.lbm.add_tap_interface("123",
-                                                           p_const.TYPE_LOCAL,
-                                                           "physnet1", None,
-                                                           "tap1",
-                                                           dev_owner_prefix))
-                en_fn.assert_called_with("123", "brq123")
-
-                self.lbm.bridge_mappings = {"physnet1": "brq999"}
-                self.assertTrue(self.lbm.add_tap_interface("123",
-                                                           p_const.TYPE_LOCAL,
-                                                           "physnet1", None,
-                                                           "tap1",
-                                                           dev_owner_prefix))
-                en_fn.assert_called_with("123", "brq999")
-
-                get_br.return_value = False
-                bridge_device.addif.retun_value = True
-                self.assertFalse(self.lbm.add_tap_interface("123",
-                                                            p_const.TYPE_LOCAL,
-                                                            "physnet1", None,
-                                                            "tap1",
-                                                            dev_owner_prefix))
-            with mock.patch.object(self.lbm,
-                                   "ensure_physical_in_bridge") as ens_fn,\
-                    mock.patch.object(self.lbm,
-                                      "ensure_tap_mtu") as en_mtu_fn,\
-                    mock.patch.object(bridge_lib.BridgeDevice,
-                                      "get_interface_bridge") as get_br:
-                ens_fn.return_value = False
-                self.assertFalse(self.lbm.add_tap_interface("123",
-                                                            p_const.TYPE_VLAN,
-                                                            "physnet1", "1",
-                                                            "tap1",
-                                                            dev_owner_prefix))
-
-                ens_fn.return_value = "eth0.1"
-                get_br.return_value = "brq123"
-                self.lbm.add_tap_interface("123", p_const.TYPE_VLAN,
-                                           "physnet1", "1", "tap1",
-                                           dev_owner_prefix)
-                en_mtu_fn.assert_called_once_with("tap1", "eth0.1")
-                bridge_device.addif.assert_called_once_with("tap1")
-
-    def test_add_tap_interface_owner_network(self):
-        self._test_add_tap_interface(constants.DEVICE_OWNER_NETWORK_PREFIX)
-
-    def test_add_tap_interface_owner_neutron(self):
-        self._test_add_tap_interface(constants.DEVICE_OWNER_NEUTRON_PREFIX)
-
-    def test_add_interface(self):
-        with mock.patch.object(self.lbm, "add_tap_interface") as add_tap:
-            self.lbm.add_interface("123", p_const.TYPE_VLAN, "physnet-1",
-                                   "1", "234",
-                                   constants.DEVICE_OWNER_NETWORK_PREFIX)
-            add_tap.assert_called_with("123", p_const.TYPE_VLAN, "physnet-1",
-                                       "1", "tap234",
-                                       constants.DEVICE_OWNER_NETWORK_PREFIX)
-
-    def test_delete_bridge(self):
-        with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\
-                mock.patch.object(ip_lib, "IpLinkCommand") as link_cmd,\
-                mock.patch.object(bridge_lib.BridgeDevice,
-                                  "get_interfaces") as getif_fn,\
-                mock.patch.object(self.lbm, "remove_interface"),\
-                mock.patch.object(self.lbm,
-                                  "get_interface_details") as if_det_fn,\
-                mock.patch.object(self.lbm,
-                                  "update_interface_ip_details") as updif_fn,\
-                mock.patch.object(self.lbm, "delete_interface") as delif_fn:
-            de_fn.return_value = False
-            self.lbm.delete_bridge("br0")
-            self.assertFalse(getif_fn.called)
-
-            de_fn.return_value = True
-            getif_fn.return_value = ["eth0", "eth1", "vxlan-1002"]
-            if_det_fn.return_value = ("ips", "gateway")
-            link_cmd.set_down.return_value = False
-            self.lbm.delete_bridge("br0")
-            updif_fn.assert_called_with("eth1", "br0", "ips", "gateway")
-            delif_fn.assert_called_with("vxlan-1002")
-
-    def test_delete_bridge_with_ip(self):
-        bridge_device = mock.Mock()
-        with mock.patch.object(ip_lib, "device_exists") as de_fn,\
-                mock.patch.object(self.lbm, "remove_interface"),\
-                mock.patch.object(self.lbm,
-                                  "get_interface_details") as if_det_fn,\
-                mock.patch.object(self.lbm,
-                                  "update_interface_ip_details") as updif_fn,\
-                mock.patch.object(self.lbm, "delete_interface") as del_interface,\
-                mock.patch.object(bridge_lib, "BridgeDevice",
-                                  return_value=bridge_device):
-            de_fn.return_value = True
-            bridge_device.get_interfaces.return_value = ["eth0", "eth1.1"]
-            if_det_fn.return_value = ("ips", "gateway")
-            bridge_device.link.set_down.return_value = False
-            self.lbm.delete_bridge("br0")
-            updif_fn.assert_called_with("eth1.1", "br0", "ips", "gateway")
-            self.assertFalse(del_interface.called)
-
-    def test_delete_bridge_no_ip(self):
-        bridge_device = mock.Mock()
-        with mock.patch.object(ip_lib, "device_exists") as de_fn,\
-                mock.patch.object(self.lbm, "remove_interface"),\
-                mock.patch.object(self.lbm,
-                                  "get_interface_details") as if_det_fn,\
-                mock.patch.object(self.lbm,
-                                  "update_interface_ip_details") as updif_fn,\
-                mock.patch.object(self.lbm, "delete_interface") as del_interface,\
-                mock.patch.object(bridge_lib, "BridgeDevice",
-                                  return_value=bridge_device):
-            de_fn.return_value = True
-            bridge_device.get_interfaces.return_value = ["eth0", "eth1.1"]
-            bridge_device.link.set_down.return_value = False
-            if_det_fn.return_value = ([], None)
-            self.lbm.delete_bridge("br0")
-            del_interface.assert_called_with("eth1.1")
-            self.assertFalse(updif_fn.called)
-
-    def test_delete_bridge_no_int_mappings(self):
-        lbm = get_linuxbridge_manager(
-            bridge_mappings={}, interface_mappings={})
-
-        with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\
-                mock.patch.object(ip_lib, "IpLinkCommand") as link_cmd,\
-                mock.patch.object(bridge_lib.BridgeDevice,
-                                  "get_interfaces") as getif_fn,\
-                mock.patch.object(lbm, "remove_interface"),\
-                mock.patch.object(lbm, "delete_interface") as del_interface:
-            de_fn.return_value = False
-            lbm.delete_bridge("br0")
-            self.assertFalse(getif_fn.called)
-
-            de_fn.return_value = True
-            getif_fn.return_value = ["vxlan-1002"]
-            link_cmd.set_down.return_value = False
-            lbm.delete_bridge("br0")
-            del_interface.assert_called_with("vxlan-1002")
-
-    def test_delete_bridge_with_physical_vlan(self):
-        self.lbm.interface_mappings.update({"physnet2": "eth1.4000"})
-        bridge_device = mock.Mock()
-        with mock.patch.object(ip_lib, "device_exists") as de_fn,\
-                mock.patch.object(self.lbm, "remove_interface"),\
-                mock.patch.object(self.lbm, "get_interface_details") as if_det_fn,\
-                mock.patch.object(self.lbm, "delete_interface") as del_int,\
-                mock.patch.object(bridge_lib, "BridgeDevice",
-                                  return_value=bridge_device):
-            de_fn.return_value = True
-            bridge_device.get_interfaces.return_value = ["eth1.1", "eth1.4000"]
-            if_det_fn.return_value = ([], None)
-            bridge_device.link.set_down.return_value = False
-            self.lbm.delete_bridge("br0")
-            del_int.assert_called_once_with("eth1.1")
-
-    def test_remove_interface(self):
-        with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\
-                mock.patch.object(bridge_lib,
-                                  'is_bridged_interface') as isdev_fn,\
-                mock.patch.object(bridge_lib.BridgeDevice,
-                                  "delif") as delif_fn:
-            de_fn.return_value = False
-            self.assertFalse(self.lbm.remove_interface("br0", "eth0"))
-            self.assertFalse(isdev_fn.called)
-
-            de_fn.return_value = True
-            isdev_fn.return_value = False
-            self.assertTrue(self.lbm.remove_interface("br0", "eth0"))
-
-            isdev_fn.return_value = True
-            delif_fn.return_value = True
-            self.assertFalse(self.lbm.remove_interface("br0", "eth0"))
-
-            delif_fn.return_value = False
-            self.assertTrue(self.lbm.remove_interface("br0", "eth0"))
-
-    def test_delete_interface(self):
-        with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\
-                mock.patch.object(ip_lib.IpLinkCommand, "set_down") as down_fn,\
-                mock.patch.object(ip_lib.IpLinkCommand, "delete") as delete_fn:
-            de_fn.return_value = False
-            self.lbm.delete_interface("eth1.1")
-            self.assertFalse(down_fn.called)
-            self.assertFalse(delete_fn.called)
-
-            de_fn.return_value = True
-            self.lbm.delete_interface("eth1.1")
-            self.assertTrue(down_fn.called)
-            self.assertTrue(delete_fn.called)
-
-    def _check_vxlan_support(self, expected, vxlan_ucast_supported,
-                             vxlan_mcast_supported):
-        with mock.patch.object(self.lbm,
-                               'vxlan_ucast_supported',
-                               return_value=vxlan_ucast_supported),\
-                mock.patch.object(self.lbm,
-                                  'vxlan_mcast_supported',
-                                  return_value=vxlan_mcast_supported):
-            if expected == lconst.VXLAN_NONE:
-                self.assertRaises(exceptions.VxlanNetworkUnsupported,
-                                  self.lbm.check_vxlan_support)
-                self.assertEqual(expected, self.lbm.vxlan_mode)
-            else:
-                self.lbm.check_vxlan_support()
-                self.assertEqual(expected, self.lbm.vxlan_mode)
-
-    def test_check_vxlan_support(self):
-        self._check_vxlan_support(expected=lconst.VXLAN_UCAST,
-                                  vxlan_ucast_supported=True,
-                                  vxlan_mcast_supported=True)
-        self._check_vxlan_support(expected=lconst.VXLAN_MCAST,
-                                  vxlan_ucast_supported=False,
-                                  vxlan_mcast_supported=True)
-
-        self._check_vxlan_support(expected=lconst.VXLAN_NONE,
-                                  vxlan_ucast_supported=False,
-                                  vxlan_mcast_supported=False)
-        self._check_vxlan_support(expected=lconst.VXLAN_NONE,
-                                  vxlan_ucast_supported=False,
-                                  vxlan_mcast_supported=False)
-
-    def _check_vxlan_ucast_supported(
-            self, expected, l2_population, iproute_arg_supported, fdb_append):
-        cfg.CONF.set_override('l2_population', l2_population, 'VXLAN')
-        with mock.patch.object(ip_lib, 'device_exists', return_value=False),\
-                mock.patch.object(ip_lib, 'vxlan_in_use', return_value=False),\
-                mock.patch.object(self.lbm,
-                                  'delete_interface',
-                                  return_value=None),\
-                mock.patch.object(self.lbm,
-                                  'ensure_vxlan',
-                                  return_value=None),\
-                mock.patch.object(
-                    utils,
-                    'execute',
-                    side_effect=None if fdb_append else RuntimeError()),\
-                mock.patch.object(ip_lib,
-                                  'iproute_arg_supported',
-                                  return_value=iproute_arg_supported):
-            self.assertEqual(expected, self.lbm.vxlan_ucast_supported())
-
-    def test_vxlan_ucast_supported(self):
-        self._check_vxlan_ucast_supported(
-            expected=False,
-            l2_population=False, iproute_arg_supported=True, fdb_append=True)
-        self._check_vxlan_ucast_supported(
-            expected=False,
-            l2_population=True, iproute_arg_supported=False, fdb_append=True)
-        self._check_vxlan_ucast_supported(
-            expected=False,
-            l2_population=True, iproute_arg_supported=True, fdb_append=False)
-        self._check_vxlan_ucast_supported(
-            expected=True,
-            l2_population=True, iproute_arg_supported=True, fdb_append=True)
-
-    def _check_vxlan_mcast_supported(
-            self, expected, vxlan_group, iproute_arg_supported):
-        cfg.CONF.set_override('vxlan_group', vxlan_group, 'VXLAN')
-        with mock.patch.object(
-                ip_lib, 'iproute_arg_supported',
-                return_value=iproute_arg_supported):
-            self.assertEqual(expected, self.lbm.vxlan_mcast_supported())
-
-    def test_vxlan_mcast_supported(self):
-        self._check_vxlan_mcast_supported(
-            expected=False,
-            vxlan_group='',
-            iproute_arg_supported=True)
-        self._check_vxlan_mcast_supported(
-            expected=False,
-            vxlan_group='224.0.0.1',
-            iproute_arg_supported=False)
-        self._check_vxlan_mcast_supported(
-            expected=True,
-            vxlan_group='224.0.0.1',
-            iproute_arg_supported=True)
-
-
-class TestLinuxBridgeRpcCallbacks(base.BaseTestCase):
-    def setUp(self):
-        super(TestLinuxBridgeRpcCallbacks, self).setUp()
-
-        class FakeLBAgent(object):
-            def __init__(self):
-                self.agent_id = 1
-                self.br_mgr = get_linuxbridge_manager(
-                    BRIDGE_MAPPINGS, INTERFACE_MAPPINGS)
-
-                self.br_mgr.vxlan_mode = lconst.VXLAN_UCAST
-                segment = mock.Mock()
-                segment.network_type = 'vxlan'
-                segment.segmentation_id = 1
-                self.br_mgr.network_map['net_id'] = segment
-                self.updated_devices = set()
-                self.network_ports = collections.defaultdict(list)
-
-        self.lb_rpc = linuxbridge_neutron_agent.LinuxBridgeRpcCallbacks(
-            object(),
-            FakeLBAgent(),
-            object()
-        )
-
-    def test_network_delete(self):
-        mock_net = mock.Mock()
-        mock_net.physical_network = None
-
-        self.lb_rpc.agent.br_mgr.network_map = {NETWORK_ID: mock_net}
-
-        with mock.patch.object(self.lb_rpc.agent.br_mgr,
-                               "get_bridge_name") as get_br_fn,\
-                mock.patch.object(self.lb_rpc.agent.br_mgr,
-                                  "delete_bridge") as del_fn:
-            get_br_fn.return_value = "br0"
-            self.lb_rpc.network_delete("anycontext", network_id=NETWORK_ID)
-            get_br_fn.assert_called_with(NETWORK_ID)
-            del_fn.assert_called_with("br0")
-
-    def test_port_update(self):
-        port = {'id': PORT_1}
-        self.lb_rpc.port_update(context=None, port=port)
-        self.assertEqual(set([DEVICE_1]), self.lb_rpc.agent.updated_devices)
-
-    def test_network_update(self):
-        updated_network = {'id': NETWORK_ID}
-        self.lb_rpc.agent.network_ports = {
-            NETWORK_ID: [PORT_DATA]
-        }
-        self.lb_rpc.network_update(context=None, network=updated_network)
-        self.assertEqual(set([DEVICE_1]), self.lb_rpc.agent.updated_devices)
-
-    def test_network_delete_with_existed_brq(self):
-        mock_net = mock.Mock()
-        mock_net.physical_network = 'physnet0'
-
-        self.lb_rpc.agent.br_mgr.network_map = {'123': mock_net}
-
-        with mock.patch.object(linuxbridge_neutron_agent.LOG, 'info') as log,\
-                mock.patch.object(self.lb_rpc.agent.br_mgr,
-                                  "delete_bridge") as del_fn:
-                self.lb_rpc.network_delete("anycontext", network_id="123")
-                self.assertEqual(0, del_fn.call_count)
-                self.assertEqual(1, log.call_count)
-
-    def test_fdb_add(self):
-        fdb_entries = {'net_id':
-                       {'ports':
-                        {'agent_ip': [constants.FLOODING_ENTRY,
-                                      ['port_mac', 'port_ip']]},
-                        'network_type': 'vxlan',
-                        'segment_id': 1}}
-
-        with mock.patch.object(utils, 'execute',
-                               return_value='') as execute_fn, \
-                mock.patch.object(ip_lib.IpNeighCommand, 'add',
-                                  return_value='') as add_fn:
-            self.lb_rpc.fdb_add(None, fdb_entries)
-
-            expected = [
-                mock.call(['bridge', 'fdb', 'show', 'dev', 'vxlan-1'],
-                          run_as_root=True),
-                mock.call(['bridge', 'fdb', 'add',
-                           constants.FLOODING_ENTRY[0],
-                           'dev', 'vxlan-1', 'dst', 'agent_ip'],
-                          run_as_root=True,
-                          check_exit_code=False),
-                mock.call(['bridge', 'fdb', 'replace', 'port_mac', 'dev',
-                           'vxlan-1', 'dst', 'agent_ip'],
-                          run_as_root=True,
-                          check_exit_code=False),
-            ]
-            execute_fn.assert_has_calls(expected)
-            add_fn.assert_called_with('port_ip', 'port_mac')
-
-    def test_fdb_ignore(self):
-        fdb_entries = {'net_id':
-                       {'ports':
-                        {LOCAL_IP: [constants.FLOODING_ENTRY,
-                                    ['port_mac', 'port_ip']]},
-                        'network_type': 'vxlan',
-                        'segment_id': 1}}
-
-        with mock.patch.object(utils, 'execute',
-                               return_value='') as execute_fn:
-            self.lb_rpc.fdb_add(None, fdb_entries)
-            self.lb_rpc.fdb_remove(None, fdb_entries)
-
-            self.assertFalse(execute_fn.called)
-
-        fdb_entries = {'other_net_id':
-                       {'ports':
-                        {'192.168.0.67': [constants.FLOODING_ENTRY,
-                                          ['port_mac', 'port_ip']]},
-                        'network_type': 'vxlan',
-                        'segment_id': 1}}
-
-        with mock.patch.object(utils, 'execute',
-                               return_value='') as execute_fn:
-            self.lb_rpc.fdb_add(None, fdb_entries)
-            self.lb_rpc.fdb_remove(None, fdb_entries)
-
-            self.assertFalse(execute_fn.called)
-
-    def test_fdb_remove(self):
-        fdb_entries = {'net_id':
-                       {'ports':
-                        {'agent_ip': [constants.FLOODING_ENTRY,
-                                      ['port_mac', 'port_ip']]},
-                        'network_type': 'vxlan',
-                        'segment_id': 1}}
-
-        with mock.patch.object(utils, 'execute',
-                               return_value='') as execute_fn, \
-                mock.patch.object(ip_lib.IpNeighCommand, 'delete',
-                                  return_value='') as del_fn:
-            self.lb_rpc.fdb_remove(None, fdb_entries)
-
-            expected = [
-                mock.call(['bridge', 'fdb', 'del',
-                           constants.FLOODING_ENTRY[0],
-                           'dev', 'vxlan-1', 'dst', 'agent_ip'],
-                          run_as_root=True,
-                          check_exit_code=False),
-                mock.call(['bridge', 'fdb', 'del', 'port_mac',
-                           'dev', 'vxlan-1', 'dst', 'agent_ip'],
-                          run_as_root=True,
-                          check_exit_code=False),
-            ]
-            execute_fn.assert_has_calls(expected)
-            del_fn.assert_called_with('port_ip', 'port_mac')
-
-    def test_fdb_update_chg_ip(self):
-        fdb_entries = {'chg_ip':
-                       {'net_id':
-                        {'agent_ip':
-                         {'before': [['port_mac', 'port_ip_1']],
-                          'after': [['port_mac', 'port_ip_2']]}}}}
-
-        with mock.patch.object(ip_lib.IpNeighCommand, 'add',
-                               return_value='') as add_fn, \
-                mock.patch.object(ip_lib.IpNeighCommand, 'delete',
-                                  return_value='') as del_fn:
-            self.lb_rpc.fdb_update(None, fdb_entries)
-
-            del_fn.assert_called_with('port_ip_1', 'port_mac')
-            add_fn.assert_called_with('port_ip_2', 'port_mac')
-
-    def test_fdb_update_chg_ip_empty_lists(self):
-        fdb_entries = {'chg_ip': {'net_id': {'agent_ip': {}}}}
-        self.lb_rpc.fdb_update(None, fdb_entries)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/test_mech_linuxbridge.py b/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/test_mech_linuxbridge.py
deleted file mode 100644 (file)
index fb28aad..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import constants
-from neutron.extensions import portbindings
-from neutron.plugins.ml2.drivers.linuxbridge.mech_driver \
-    import mech_linuxbridge
-from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base
-
-
-class LinuxbridgeMechanismBaseTestCase(base.AgentMechanismBaseTestCase):
-    VIF_TYPE = portbindings.VIF_TYPE_BRIDGE
-    CAP_PORT_FILTER = True
-    AGENT_TYPE = constants.AGENT_TYPE_LINUXBRIDGE
-
-    GOOD_MAPPINGS = {'fake_physical_network': 'fake_interface'}
-    GOOD_TUNNEL_TYPES = ['gre', 'vxlan']
-    GOOD_CONFIGS = {'interface_mappings': GOOD_MAPPINGS,
-                    'tunnel_types': GOOD_TUNNEL_TYPES}
-
-    BAD_MAPPINGS = {'wrong_physical_network': 'wrong_interface'}
-    BAD_TUNNEL_TYPES = ['bad_tunnel_type']
-    BAD_CONFIGS = {'interface_mappings': BAD_MAPPINGS,
-                   'tunnel_types': BAD_TUNNEL_TYPES}
-
-    AGENTS = [{'alive': True,
-               'configurations': GOOD_CONFIGS,
-               'host': 'host'}]
-    AGENTS_DEAD = [{'alive': False,
-                    'configurations': GOOD_CONFIGS,
-                    'host': 'dead_host'}]
-    AGENTS_BAD = [{'alive': False,
-                   'configurations': GOOD_CONFIGS,
-                   'host': 'bad_host_1'},
-                  {'alive': True,
-                   'configurations': BAD_CONFIGS,
-                   'host': 'bad_host_2'}]
-
-    def setUp(self):
-        super(LinuxbridgeMechanismBaseTestCase, self).setUp()
-        self.driver = mech_linuxbridge.LinuxbridgeMechanismDriver()
-        self.driver.initialize()
-
-
-class LinuxbridgeMechanismGenericTestCase(LinuxbridgeMechanismBaseTestCase,
-                                          base.AgentMechanismGenericTestCase):
-    pass
-
-
-class LinuxbridgeMechanismLocalTestCase(LinuxbridgeMechanismBaseTestCase,
-                                        base.AgentMechanismLocalTestCase):
-    pass
-
-
-class LinuxbridgeMechanismFlatTestCase(LinuxbridgeMechanismBaseTestCase,
-                                       base.AgentMechanismFlatTestCase):
-    pass
-
-
-class LinuxbridgeMechanismVlanTestCase(LinuxbridgeMechanismBaseTestCase,
-                                       base.AgentMechanismVlanTestCase):
-    pass
-
-
-class LinuxbridgeMechanismGreTestCase(LinuxbridgeMechanismBaseTestCase,
-                                      base.AgentMechanismGreTestCase):
-    pass
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_fake_agent.py b/neutron/tests/unit/plugins/ml2/drivers/mech_fake_agent.py
deleted file mode 100644 (file)
index 2c8744a..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-# Based on openvswitch mechanism driver.
-#
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent import securitygroups_rpc
-from neutron.common import constants
-from neutron.extensions import portbindings
-from neutron.plugins.common import constants as p_constants
-from neutron.plugins.ml2.drivers import mech_agent
-
-
-class FakeAgentMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
-    """ML2 mechanism driver for testing.
-
-    This is a ML2 mechanism driver used by UTs in test_l2population.
-    This driver implements minimum requirements for L2pop mech driver.
-    As there are some agent-based mechanism drivers and OVS agent
-    mech driver is not the only one to support L2pop, it is useful to
-    test L2pop with multiple drivers like this to check the minimum
-    requirements.
-
-    NOTE(yamamoto): This is a modified copy of ofagent mechanism driver as
-    of writing this.  There's no need to keep this synced with the "real"
-    ofagent mechansim driver or its agent.
-    """
-
-    def __init__(self):
-        sg_enabled = securitygroups_rpc.is_firewall_enabled()
-        vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled,
-                       portbindings.OVS_HYBRID_PLUG: sg_enabled}
-        super(FakeAgentMechanismDriver, self).__init__(
-            # NOTE(yamamoto): l2pop driver has a hardcoded list of
-            # supported agent types.
-            constants.AGENT_TYPE_OFA,
-            portbindings.VIF_TYPE_OVS,
-            vif_details)
-
-    def get_allowed_network_types(self, agent):
-        return (agent['configurations'].get('tunnel_types', []) +
-                [p_constants.TYPE_LOCAL, p_constants.TYPE_FLAT,
-                 p_constants.TYPE_VLAN])
-
-    def get_mappings(self, agent):
-        return dict(agent['configurations'].get('interface_mappings', {}))
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config.py
deleted file mode 100644 (file)
index a0a913f..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from oslo_config import cfg
-
-from neutron.common import utils as n_utils
-from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config
-from neutron.plugins.ml2.drivers.mech_sriov.agent \
-    import sriov_nic_agent as agent
-from neutron.tests import base
-
-
-class TestSriovAgentConfig(base.BaseTestCase):
-    EXCLUDE_DEVICES_LIST = ['p7p1:0000:07:00.1;0000:07:00.2',
-                            'p3p1:0000:04:00.3']
-
-    EXCLUDE_DEVICES_LIST_INVALID = ['p7p2:0000:07:00.1;0000:07:00.2']
-
-    EXCLUDE_DEVICES_WITH_SPACES_LIST = ['p7p1: 0000:07:00.1 ; 0000:07:00.2',
-                                        'p3p1:0000:04:00.3 ']
-
-    EXCLUDE_DEVICES_WITH_SPACES_ERROR = ['p7p1',
-                                         'p3p1:0000:04:00.3 ']
-
-    EXCLUDE_DEVICES = {'p7p1': set(['0000:07:00.1', '0000:07:00.2']),
-                       'p3p1': set(['0000:04:00.3'])}
-
-    DEVICE_MAPPING_LIST = ['physnet7:p7p1',
-                           'physnet3:p3p1']
-
-    DEVICE_MAPPING_WITH_ERROR_LIST = ['physnet7',
-                                      'physnet3:p3p1']
-
-    DEVICE_MAPPING_WITH_SPACES_LIST = ['physnet7 : p7p1',
-                                       'physnet3 : p3p1 ']
-    DEVICE_MAPPING = {'physnet7': 'p7p1',
-                      'physnet3': 'p3p1'}
-
-    def test_defaults(self):
-        self.assertEqual(config.DEFAULT_DEVICE_MAPPINGS,
-                         cfg.CONF.SRIOV_NIC.physical_device_mappings)
-        self.assertEqual(config.DEFAULT_EXCLUDE_DEVICES,
-                         cfg.CONF.SRIOV_NIC.exclude_devices)
-        self.assertEqual(2,
-                         cfg.CONF.AGENT.polling_interval)
-
-    def test_device_mappings(self):
-        cfg.CONF.set_override('physical_device_mappings',
-                              self.DEVICE_MAPPING_LIST,
-                              'SRIOV_NIC')
-        device_mappings = n_utils.parse_mappings(
-            cfg.CONF.SRIOV_NIC.physical_device_mappings)
-        self.assertEqual(self.DEVICE_MAPPING, device_mappings)
-
-    def test_device_mappings_with_error(self):
-        cfg.CONF.set_override('physical_device_mappings',
-                              self.DEVICE_MAPPING_WITH_ERROR_LIST,
-                              'SRIOV_NIC')
-        self.assertRaises(ValueError, n_utils.parse_mappings,
-                          cfg.CONF.SRIOV_NIC.physical_device_mappings)
-
-    def test_device_mappings_with_spaces(self):
-        cfg.CONF.set_override('physical_device_mappings',
-                              self.DEVICE_MAPPING_WITH_SPACES_LIST,
-                              'SRIOV_NIC')
-        device_mappings = n_utils.parse_mappings(
-            cfg.CONF.SRIOV_NIC.physical_device_mappings)
-        self.assertEqual(self.DEVICE_MAPPING, device_mappings)
-
-    def test_exclude_devices(self):
-        cfg.CONF.set_override('exclude_devices',
-                              self.EXCLUDE_DEVICES_LIST,
-                              'SRIOV_NIC')
-        exclude_devices = config.parse_exclude_devices(
-            cfg.CONF.SRIOV_NIC.exclude_devices)
-        self.assertEqual(self.EXCLUDE_DEVICES, exclude_devices)
-
-    def test_exclude_devices_with_spaces(self):
-        cfg.CONF.set_override('exclude_devices',
-                              self.EXCLUDE_DEVICES_WITH_SPACES_LIST,
-                              'SRIOV_NIC')
-        exclude_devices = config.parse_exclude_devices(
-            cfg.CONF.SRIOV_NIC.exclude_devices)
-        self.assertEqual(self.EXCLUDE_DEVICES, exclude_devices)
-
-    def test_exclude_devices_with_error(self):
-        cfg.CONF.set_override('exclude_devices',
-                              self.EXCLUDE_DEVICES_WITH_SPACES_ERROR,
-                              'SRIOV_NIC')
-        self.assertRaises(ValueError, config.parse_exclude_devices,
-                          cfg.CONF.SRIOV_NIC.exclude_devices)
-
-    def test_validate_config_ok(self):
-        cfg.CONF.set_override('physical_device_mappings',
-                              self.DEVICE_MAPPING_LIST,
-                              'SRIOV_NIC')
-        cfg.CONF.set_override('exclude_devices',
-                              self.EXCLUDE_DEVICES_LIST,
-                              'SRIOV_NIC')
-        config_parser = agent.SriovNicAgentConfigParser()
-        config_parser.parse()
-        device_mappings = config_parser.device_mappings
-        exclude_devices = config_parser.exclude_devices
-        self.assertEqual(self.EXCLUDE_DEVICES, exclude_devices)
-        self.assertEqual(self.DEVICE_MAPPING, device_mappings)
-
-    def test_validate_config_fail(self):
-        cfg.CONF.set_override('physical_device_mappings',
-                              self.DEVICE_MAPPING_LIST,
-                              'SRIOV_NIC')
-        cfg.CONF.set_override('exclude_devices',
-                              self.EXCLUDE_DEVICES_LIST_INVALID,
-                              'SRIOV_NIC')
-        config_parser = agent.SriovNicAgentConfigParser()
-        self.assertRaises(ValueError, config_parser.parse)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py
deleted file mode 100755 (executable)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py
deleted file mode 100755 (executable)
index db4ca6b..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2015 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron import context
-from neutron.objects.qos import policy
-from neutron.objects.qos import rule
-from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions
-from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import (
-    qos_driver)
-from neutron.services.qos import qos_consts
-from neutron.tests import base
-
-
-class QosSRIOVAgentDriverTestCase(base.BaseTestCase):
-
-    ASSIGNED_MAC = '00:00:00:00:00:66'
-    PCI_SLOT = '0000:06:00.1'
-
-    def setUp(self):
-        super(QosSRIOVAgentDriverTestCase, self).setUp()
-        self.context = context.get_admin_context()
-        self.qos_driver = qos_driver.QosSRIOVAgentDriver()
-        self.qos_driver.initialize()
-        self.qos_driver.eswitch_mgr = mock.Mock()
-        self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock()
-        self.qos_driver.eswitch_mgr.clear_max_rate = mock.Mock()
-        self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate
-        self.clear_max_rate_mock = self.qos_driver.eswitch_mgr.clear_max_rate
-        self.rule = self._create_bw_limit_rule_obj()
-        self.qos_policy = self._create_qos_policy_obj([self.rule])
-        self.port = self._create_fake_port(self.qos_policy.id)
-
-    def _create_bw_limit_rule_obj(self):
-        rule_obj = rule.QosBandwidthLimitRule()
-        rule_obj.id = uuidutils.generate_uuid()
-        rule_obj.max_kbps = 2
-        rule_obj.max_burst_kbps = 200
-        rule_obj.obj_reset_changes()
-        return rule_obj
-
-    def _create_qos_policy_obj(self, rules):
-        policy_dict = {'id': uuidutils.generate_uuid(),
-                'tenant_id': uuidutils.generate_uuid(),
-                'name': 'test',
-                'description': 'test',
-                'shared': False,
-                'rules': rules}
-        policy_obj = policy.QosPolicy(self.context, **policy_dict)
-        policy_obj.obj_reset_changes()
-        for policy_rule in policy_obj.rules:
-            policy_rule.qos_policy_id = policy_obj.id
-            policy_rule.obj_reset_changes()
-
-        return policy_obj
-
-    def _create_fake_port(self, qos_policy_id):
-        return {'port_id': uuidutils.generate_uuid(),
-                'profile': {'pci_slot': self.PCI_SLOT},
-                'device': self.ASSIGNED_MAC,
-                qos_consts.QOS_POLICY_ID: qos_policy_id,
-                'device_owner': uuidutils.generate_uuid()}
-
-    def test_create_rule(self):
-        self.qos_driver.create(self.port, self.qos_policy)
-        self.max_rate_mock.assert_called_once_with(
-            self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
-
-    def test_update_rule(self):
-        self.qos_driver.update(self.port, self.qos_policy)
-        self.max_rate_mock.assert_called_once_with(
-            self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
-
-    def test_delete_rules_on_assigned_vf(self):
-        self.qos_driver.delete(self.port, self.qos_policy)
-        self.max_rate_mock.assert_called_once_with(
-            self.ASSIGNED_MAC, self.PCI_SLOT, 0)
-
-    def test_delete_rules_on_released_vf(self):
-        del self.port['device_owner']
-        self.qos_driver.delete(self.port, self.qos_policy)
-        self.clear_max_rate_mock.assert_called_once_with(self.PCI_SLOT)
-
-    def test__set_vf_max_rate_captures_sriov_failure(self):
-        self.max_rate_mock.side_effect = exceptions.SriovNicError()
-        self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
-
-    def test__set_vf_max_rate_unknown_device(self):
-        with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists',
-                               return_value=False):
-            self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
-            self.assertFalse(self.max_rate_mock.called)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manager.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manager.py
deleted file mode 100644 (file)
index bdaf9f3..0000000
+++ /dev/null
@@ -1,485 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-
-import mock
-import testtools
-
-from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
-    import exceptions as exc
-from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm
-from neutron.tests import base
-
-
-class TestCreateESwitchManager(base.BaseTestCase):
-    SCANNED_DEVICES = [('0000:06:00.1', 0),
-                       ('0000:06:00.2', 1),
-                       ('0000:06:00.3', 2)]
-
-    def test_create_eswitch_mgr_fail(self):
-        device_mappings = {'physnet1': 'p6p1'}
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.PciOsWrapper.scan_vf_devices",
-                        side_effect=exc.InvalidDeviceError(
-                            dev_name="p6p1", reason="device" " not found")),\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.PciOsWrapper.is_assigned_vf",
-                           return_value=True):
-
-            with testtools.ExpectedException(exc.InvalidDeviceError):
-                esm.ESwitchManager().discover_devices(
-                    device_mappings, None)
-
-    def test_create_eswitch_mgr_ok(self):
-        device_mappings = {'physnet1': 'p6p1'}
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.PciOsWrapper.scan_vf_devices",
-                        return_value=self.SCANNED_DEVICES),\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.PciOsWrapper.is_assigned_vf",
-                           return_value=True):
-
-            esm.ESwitchManager().discover_devices(device_mappings, None)
-
-
-class TestESwitchManagerApi(base.BaseTestCase):
-    SCANNED_DEVICES = [('0000:06:00.1', 0),
-                       ('0000:06:00.2', 1),
-                       ('0000:06:00.3', 2)]
-
-    ASSIGNED_MAC = '00:00:00:00:00:66'
-    PCI_SLOT = '0000:06:00.1'
-    WRONG_MAC = '00:00:00:00:00:67'
-    WRONG_PCI = "0000:06:00.6"
-
-    def setUp(self):
-        super(TestESwitchManagerApi, self).setUp()
-        device_mappings = {'physnet1': 'p6p1'}
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.PciOsWrapper.scan_vf_devices",
-                        return_value=self.SCANNED_DEVICES),\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.PciOsWrapper.is_assigned_vf",
-                           return_value=True):
-            self.eswitch_mgr = esm.ESwitchManager()
-            self.eswitch_mgr.discover_devices(device_mappings, None)
-
-    def test_get_assigned_devices_info(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.EmbSwitch.get_assigned_devices_info",
-                        return_value=[(self.ASSIGNED_MAC, self.PCI_SLOT)]):
-            result = self.eswitch_mgr.get_assigned_devices_info()
-            self.assertIn(self.ASSIGNED_MAC, list(result)[0])
-            self.assertIn(self.PCI_SLOT, list(result)[0])
-
-    def test_get_device_status_true(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.EmbSwitch.get_pci_device",
-                        return_value=self.ASSIGNED_MAC),\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.EmbSwitch.get_device_state",
-                           return_value=True):
-            result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC,
-                                                       self.PCI_SLOT)
-            self.assertTrue(result)
-
-    def test_get_device_status_false(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.EmbSwitch.get_pci_device",
-                        return_value=self.ASSIGNED_MAC),\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.EmbSwitch.get_device_state",
-                           return_value=False):
-            result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC,
-                                                       self.PCI_SLOT)
-            self.assertFalse(result)
-
-    def test_get_device_status_mismatch(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.EmbSwitch.get_pci_device",
-                        return_value=self.ASSIGNED_MAC),\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.EmbSwitch.get_device_state",
-                           return_value=True):
-            with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                            "eswitch_manager.LOG.warning") as log_mock:
-                result = self.eswitch_mgr.get_device_state(self.WRONG_MAC,
-                                                           self.PCI_SLOT)
-                log_mock.assert_called_with('device pci mismatch: '
-                                            '%(device_mac)s - %(pci_slot)s',
-                                            {'pci_slot': self.PCI_SLOT,
-                                             'device_mac': self.WRONG_MAC})
-                self.assertFalse(result)
-
-    def test_set_device_status(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.EmbSwitch.get_pci_device",
-                        return_value=self.ASSIGNED_MAC),\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.EmbSwitch.set_device_state"):
-            self.eswitch_mgr.set_device_state(self.ASSIGNED_MAC,
-                                              self.PCI_SLOT, True)
-
-    def test_set_device_max_rate(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.EmbSwitch.get_pci_device",
-                        return_value=self.ASSIGNED_MAC) as get_pci_mock,\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.EmbSwitch.set_device_max_rate")\
-                as set_device_max_rate_mock:
-            self.eswitch_mgr.set_device_max_rate(self.ASSIGNED_MAC,
-                                                 self.PCI_SLOT, 1000)
-            get_pci_mock.assert_called_once_with(self.PCI_SLOT)
-            set_device_max_rate_mock.assert_called_once_with(
-                self.PCI_SLOT, 1000)
-
-    def test_set_device_status_mismatch(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.EmbSwitch.get_pci_device",
-                        return_value=self.ASSIGNED_MAC),\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.EmbSwitch.set_device_state"):
-            with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                            "eswitch_manager.LOG.warning") as log_mock:
-                self.eswitch_mgr.set_device_state(self.WRONG_MAC,
-                                                  self.PCI_SLOT, True)
-                log_mock.assert_called_with('device pci mismatch: '
-                                            '%(device_mac)s - %(pci_slot)s',
-                                            {'pci_slot': self.PCI_SLOT,
-                                             'device_mac': self.WRONG_MAC})
-
-    def _mock_device_exists(self, pci_slot, mac_address, expected_result):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.EmbSwitch.get_pci_device",
-                        return_value=self.ASSIGNED_MAC):
-            result = self.eswitch_mgr.device_exists(mac_address,
-                                                    pci_slot)
-            self.assertEqual(expected_result, result)
-
-    def test_device_exists_true(self):
-        self._mock_device_exists(self.PCI_SLOT,
-                                 self.ASSIGNED_MAC,
-                                 True)
-
-    def test_device_exists_false(self):
-        self._mock_device_exists(self.WRONG_PCI,
-                                 self.WRONG_MAC,
-                                 False)
-
-    def test_device_exists_mismatch(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.EmbSwitch.get_pci_device",
-                        return_value=self.ASSIGNED_MAC):
-            with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                            "eswitch_manager.LOG.warning") as log_mock:
-                result = self.eswitch_mgr.device_exists(self.WRONG_MAC,
-                                                        self.PCI_SLOT)
-                log_mock.assert_called_with('device pci mismatch: '
-                                            '%(device_mac)s - %(pci_slot)s',
-                                            {'pci_slot': self.PCI_SLOT,
-                                             'device_mac': self.WRONG_MAC})
-                self.assertFalse(result)
-
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                "PciDeviceIPWrapper.get_assigned_macs",
-                return_value={})
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                "eswitch_manager.EmbSwitch.set_device_max_rate")
-    def test_clear_max_rate_existing_pci_slot(self, max_rate_mock, *args):
-        self.eswitch_mgr.clear_max_rate(self.PCI_SLOT)
-        max_rate_mock.assert_called_once_with(self.PCI_SLOT, 0)
-
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                "PciDeviceIPWrapper.get_assigned_macs",
-                return_value={0: ASSIGNED_MAC})
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                "eswitch_manager.EmbSwitch.set_device_max_rate")
-    def test_clear_max_rate_exist_and_assigned_pci(
-            self, max_rate_mock, *args):
-        self.eswitch_mgr.clear_max_rate(self.PCI_SLOT)
-        self.assertFalse(max_rate_mock.called)
-
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                "eswitch_manager.EmbSwitch.set_device_max_rate")
-    def test_clear_max_rate_nonexisting_pci_slot(self, max_rate_mock):
-        self.eswitch_mgr.clear_max_rate(self.WRONG_PCI)
-        self.assertFalse(max_rate_mock.called)
-
-
-class TestEmbSwitch(base.BaseTestCase):
-    DEV_NAME = "eth2"
-    PHYS_NET = "default"
-    ASSIGNED_MAC = '00:00:00:00:00:66'
-    PCI_SLOT = "0000:06:00.1"
-    WRONG_PCI_SLOT = "0000:06:00.4"
-    SCANNED_DEVICES = [('0000:06:00.1', 0),
-                       ('0000:06:00.2', 1),
-                       ('0000:06:00.3', 2)]
-    VF_TO_MAC_MAPPING = {0: '00:00:00:00:00:11',
-                         1: '00:00:00:00:00:22',
-                         2: '00:00:00:00:00:33'}
-    EXPECTED_MAC_TO_PCI = {
-        '00:00:00:00:00:11': '0000:06:00.1',
-        '00:00:00:00:00:22': '0000:06:00.2',
-        '00:00:00:00:00:33': '0000:06:00.3'}
-
-    def setUp(self):
-        super(TestEmbSwitch, self).setUp()
-        exclude_devices = set()
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.PciOsWrapper.scan_vf_devices",
-                        return_value=self.SCANNED_DEVICES):
-            self.emb_switch = esm.EmbSwitch(self.PHYS_NET, self.DEV_NAME,
-                                            exclude_devices)
-
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                "eswitch_manager.PciOsWrapper.scan_vf_devices",
-                return_value=[(PCI_SLOT, 0)])
-    def test_get_assigned_devices_info(self, *args):
-        emb_switch = esm.EmbSwitch(self.PHYS_NET, self.DEV_NAME, ())
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.get_assigned_macs",
-                        return_value={0: self.ASSIGNED_MAC}),\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.PciOsWrapper.is_assigned_vf",
-                           return_value=True):
-            result = emb_switch.get_assigned_devices_info()
-            self.assertIn(self.ASSIGNED_MAC, list(result)[0])
-            self.assertIn(self.PCI_SLOT, list(result)[0])
-
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                "eswitch_manager.PciOsWrapper.scan_vf_devices",
-                return_value=SCANNED_DEVICES)
-    def test_get_assigned_devices_info_multiple_slots(self, *args):
-        emb_switch = esm.EmbSwitch(self.PHYS_NET, self.DEV_NAME, ())
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.get_assigned_macs",
-                        return_value=self.VF_TO_MAC_MAPPING),\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.PciOsWrapper.is_assigned_vf",
-                           return_value=True):
-            devices_info = emb_switch.get_assigned_devices_info()
-            for device_info in devices_info:
-                mac = device_info[0]
-                pci_slot = device_info[1]
-                self.assertEqual(
-                    self.EXPECTED_MAC_TO_PCI[mac], pci_slot)
-
-    def test_get_assigned_devices_empty(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                        "eswitch_manager.PciOsWrapper.is_assigned_vf",
-                        return_value=False):
-            result = self.emb_switch.get_assigned_devices_info()
-            self.assertFalse(result)
-
-    def test_get_device_state_ok(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.get_vf_state",
-                        return_value=False):
-            result = self.emb_switch.get_device_state(self.PCI_SLOT)
-            self.assertFalse(result)
-
-    def test_get_device_state_fail(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.get_vf_state",
-                        return_value=False):
-            self.assertRaises(exc.InvalidPciSlotError,
-                              self.emb_switch.get_device_state,
-                              self.WRONG_PCI_SLOT)
-
-    def test_set_device_state_ok(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.set_vf_state"):
-            with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                            "pci_lib.LOG.warning") as log_mock:
-                self.emb_switch.set_device_state(self.PCI_SLOT, True)
-                self.assertEqual(0, log_mock.call_count)
-
-    def test_set_device_state_fail(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.set_vf_state"):
-            self.assertRaises(exc.InvalidPciSlotError,
-                              self.emb_switch.set_device_state,
-                              self.WRONG_PCI_SLOT, True)
-
-    def test_set_device_spoofcheck_ok(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.set_vf_spoofcheck") as \
-                                set_vf_spoofcheck_mock:
-            self.emb_switch.set_device_spoofcheck(self.PCI_SLOT, True)
-            self.assertTrue(set_vf_spoofcheck_mock.called)
-
-    def test_set_device_spoofcheck_fail(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.set_vf_spoofcheck"):
-            self.assertRaises(exc.InvalidPciSlotError,
-                              self.emb_switch.set_device_spoofcheck,
-                              self.WRONG_PCI_SLOT, True)
-
-    def test_set_device_max_rate_ok(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock:
-            self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2000)
-            pci_lib_mock.assert_called_with(0, 2)
-
-    def test_set_device_max_rate_ok2(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock:
-            self.emb_switch.set_device_max_rate(self.PCI_SLOT, 99)
-            pci_lib_mock.assert_called_with(0, 1)
-
-    def test_set_device_max_rate_rounded_ok(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock:
-            self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2001)
-            pci_lib_mock.assert_called_with(0, 2)
-
-    def test_set_device_max_rate_rounded_ok2(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock:
-            self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2499)
-            pci_lib_mock.assert_called_with(0, 2)
-
-    def test_set_device_max_rate_rounded_ok3(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock:
-            self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2500)
-            pci_lib_mock.assert_called_with(0, 3)
-
-    def test_set_device_max_rate_disable(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock:
-            self.emb_switch.set_device_max_rate(self.PCI_SLOT, 0)
-            pci_lib_mock.assert_called_with(0, 0)
-
-    def test_set_device_max_rate_fail(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.set_vf_max_rate"):
-            self.assertRaises(exc.InvalidPciSlotError,
-                              self.emb_switch.set_device_max_rate,
-                              self.WRONG_PCI_SLOT, 1000)
-
-    def test_get_pci_device(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.get_assigned_macs",
-                        return_value={0: self.ASSIGNED_MAC}),\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.PciOsWrapper.is_assigned_vf",
-                           return_value=True):
-            result = self.emb_switch.get_pci_device(self.PCI_SLOT)
-            self.assertEqual(self.ASSIGNED_MAC, result)
-
-    def test_get_pci_device_fail(self):
-        with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                        "PciDeviceIPWrapper.get_assigned_macs",
-                        return_value=[self.ASSIGNED_MAC]),\
-                mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                           "eswitch_manager.PciOsWrapper.is_assigned_vf",
-                           return_value=True):
-            result = self.emb_switch.get_pci_device(self.WRONG_PCI_SLOT)
-            self.assertIsNone(result)
-
-    def test_get_pci_list(self):
-        result = self.emb_switch.get_pci_slot_list()
-        self.assertEqual([tup[0] for tup in self.SCANNED_DEVICES],
-                         sorted(result))
-
-
-class TestPciOsWrapper(base.BaseTestCase):
-    DEV_NAME = "p7p1"
-    VF_INDEX = 1
-    DIR_CONTENTS = [
-        "mlx4_port1",
-        "virtfn0",
-        "virtfn1",
-        "virtfn2"
-    ]
-    DIR_CONTENTS_NO_MATCH = [
-        "mlx4_port1",
-        "mlx4_port1"
-    ]
-    LINKS = {
-        "virtfn0": "../0000:04:00.1",
-        "virtfn1": "../0000:04:00.2",
-        "virtfn2": "../0000:04:00.3"
-    }
-    PCI_SLOTS = [
-        ('0000:04:00.1', 0),
-        ('0000:04:00.2', 1),
-        ('0000:04:00.3', 2)
-    ]
-
-    def test_scan_vf_devices(self):
-        def _get_link(file_path):
-            file_name = os.path.basename(file_path)
-            return self.LINKS[file_name]
-
-        with mock.patch("os.path.isdir", return_value=True),\
-                mock.patch("os.listdir", return_value=self.DIR_CONTENTS),\
-                mock.patch("os.path.islink", return_value=True),\
-                mock.patch("os.readlink", side_effect=_get_link):
-            result = esm.PciOsWrapper.scan_vf_devices(self.DEV_NAME)
-            self.assertEqual(self.PCI_SLOTS, result)
-
-    def test_scan_vf_devices_no_dir(self):
-        with mock.patch("os.path.isdir", return_value=False):
-            self.assertRaises(exc.InvalidDeviceError,
-                              esm.PciOsWrapper.scan_vf_devices,
-                              self.DEV_NAME)
-
-    def test_scan_vf_devices_no_content(self):
-        with mock.patch("os.path.isdir", return_value=True),\
-                mock.patch("os.listdir", return_value=[]):
-            self.assertRaises(exc.InvalidDeviceError,
-                              esm.PciOsWrapper.scan_vf_devices,
-                              self.DEV_NAME)
-
-    def test_scan_vf_devices_no_match(self):
-        with mock.patch("os.path.isdir", return_value=True),\
-                mock.patch("os.listdir",
-                           return_value=self.DIR_CONTENTS_NO_MATCH):
-            self.assertRaises(exc.InvalidDeviceError,
-                              esm.PciOsWrapper.scan_vf_devices,
-                              self.DEV_NAME)
-
-    @mock.patch("os.listdir", side_effect=OSError())
-    def test_is_assigned_vf_true(self, *args):
-        self.assertTrue(esm.PciOsWrapper.is_assigned_vf(
-            self.DEV_NAME, self.VF_INDEX))
-
-    @mock.patch("os.listdir", return_value=[DEV_NAME, "eth1"])
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                "PciDeviceIPWrapper.is_macvtap_assigned", return_value=False)
-    def test_is_assigned_vf_false(self, *args):
-        self.assertFalse(esm.PciOsWrapper.is_assigned_vf(
-            self.DEV_NAME, self.VF_INDEX))
-
-    @mock.patch("os.listdir", return_value=["eth0", "eth1"])
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                "PciDeviceIPWrapper.is_macvtap_assigned", return_value=True)
-    def test_is_assigned_vf_macvtap(
-        self, mock_is_macvtap_assigned, *args):
-        esm.PciOsWrapper.is_assigned_vf(self.DEV_NAME, self.VF_INDEX)
-        mock_is_macvtap_assigned.called_with(self.VF_INDEX, "eth0")
-
-    @mock.patch("os.listdir", side_effect=OSError())
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                "PciDeviceIPWrapper.is_macvtap_assigned")
-    def test_is_assigned_vf_macvtap_failure(
-        self, mock_is_macvtap_assigned, *args):
-        esm.PciOsWrapper.is_assigned_vf(self.DEV_NAME, self.VF_INDEX)
-        self.assertFalse(mock_is_macvtap_assigned.called)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py
deleted file mode 100644 (file)
index c6b892e..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import mock
-
-from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
-    import exceptions as exc
-from neutron.plugins.ml2.drivers.mech_sriov.agent import pci_lib
-from neutron.tests import base
-
-
-class TestPciLib(base.BaseTestCase):
-    DEV_NAME = "p7p1"
-    VF_INDEX = 1
-    VF_INDEX_DISABLE = 0
-    PF_LINK_SHOW = ('122: p7p1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop'
-                    ' state DOWN mode DEFAULT group default qlen 1000')
-    PF_MAC = '    link/ether f4:52:14:2a:3e:c0 brd ff:ff:ff:ff:ff:ff'
-    VF_0_LINK_SHOW = ('    vf 0 MAC fa:16:3e:b4:81:ac, vlan 4095, spoof'
-                      ' checking off, link-state disable')
-    VF_1_LINK_SHOW = ('    vf 1 MAC 00:00:00:00:00:11, vlan 4095, spoof'
-                      ' checking off, link-state enable')
-    VF_2_LINK_SHOW = ('    vf 2 MAC fa:16:3e:68:4e:79, vlan 4095, spoof'
-                      ' checking off, link-state enable')
-    VF_LINK_SHOW = '\n'.join((PF_LINK_SHOW, PF_MAC, VF_0_LINK_SHOW,
-                              VF_1_LINK_SHOW, VF_2_LINK_SHOW))
-    MACVTAP_LINK_SHOW = ('63: macvtap1@enp129s0f1: <BROADCAST,MULTICAST> mtu '
-                         '1500 qdisc  noop state DOWN mode DEFAULT group '
-                         'default qlen 500 link/ether 4a:9b:6d:de:65:b5 brd '
-                         'ff:ff:ff:ff:ff:ff')
-
-    IP_LINK_SHOW_WITH_MACVTAP = '\n'.join((VF_LINK_SHOW, MACVTAP_LINK_SHOW))
-
-    MAC_MAPPING = {
-        0: "fa:16:3e:b4:81:ac",
-        1: "00:00:00:00:00:11",
-        2: "fa:16:3e:68:4e:79",
-    }
-
-    def setUp(self):
-        super(TestPciLib, self).setUp()
-        self.pci_wrapper = pci_lib.PciDeviceIPWrapper(self.DEV_NAME)
-
-    def test_get_assigned_macs(self):
-        with mock.patch.object(self.pci_wrapper,
-                               "_as_root") as mock_as_root:
-            mock_as_root.return_value = self.VF_LINK_SHOW
-            result = self.pci_wrapper.get_assigned_macs([self.VF_INDEX])
-            self.assertEqual(
-                {self.VF_INDEX: self.MAC_MAPPING[self.VF_INDEX]}, result)
-
-    def test_get_assigned_macs_fail(self):
-        with mock.patch.object(self.pci_wrapper,
-                               "_as_root") as mock_as_root:
-            mock_as_root.side_effect = Exception()
-            self.assertRaises(exc.IpCommandDeviceError,
-                              self.pci_wrapper.get_assigned_macs,
-                              [self.VF_INDEX])
-
-    def test_get_vf_state_enable(self):
-        with mock.patch.object(self.pci_wrapper,
-                               "_as_root") as mock_as_root:
-            mock_as_root.return_value = self.VF_LINK_SHOW
-            result = self.pci_wrapper.get_vf_state(self.VF_INDEX)
-            self.assertTrue(result)
-
-    def test_get_vf_state_disable(self):
-        with mock.patch.object(self.pci_wrapper,
-                               "_as_root") as mock_as_root:
-            mock_as_root.return_value = self.VF_LINK_SHOW
-            result = self.pci_wrapper.get_vf_state(self.VF_INDEX_DISABLE)
-            self.assertFalse(result)
-
-    def test_get_vf_state_fail(self):
-        with mock.patch.object(self.pci_wrapper,
-                               "_as_root") as mock_as_root:
-            mock_as_root.side_effect = Exception()
-            self.assertRaises(exc.IpCommandDeviceError,
-                              self.pci_wrapper.get_vf_state,
-                              self.VF_INDEX)
-
-    def test_set_vf_state(self):
-        with mock.patch.object(self.pci_wrapper, "_as_root"):
-            result = self.pci_wrapper.set_vf_state(self.VF_INDEX,
-                                                   True)
-            self.assertIsNone(result)
-
-    def test_set_vf_state_fail(self):
-        with mock.patch.object(self.pci_wrapper,
-                               "_as_root") as mock_as_root:
-            mock_as_root.side_effect = Exception()
-            self.assertRaises(exc.IpCommandDeviceError,
-                              self.pci_wrapper.set_vf_state,
-                              self.VF_INDEX,
-                              True)
-
-    def test_set_vf_spoofcheck(self):
-        with mock.patch.object(self.pci_wrapper, "_execute"):
-            result = self.pci_wrapper.set_vf_spoofcheck(self.VF_INDEX,
-                                                        True)
-            self.assertIsNone(result)
-
-    def test_set_vf_spoofcheck_fail(self):
-        with mock.patch.object(self.pci_wrapper,
-                               "_execute") as mock_exec:
-            mock_exec.side_effect = Exception()
-            self.assertRaises(exc.IpCommandDeviceError,
-                              self.pci_wrapper.set_vf_spoofcheck,
-                              self.VF_INDEX,
-                              True)
-
-    def test_set_vf_max_rate(self):
-        with mock.patch.object(self.pci_wrapper, "_as_root") \
-                as mock_as_root:
-            result = self.pci_wrapper.set_vf_max_rate(self.VF_INDEX, 1000)
-            self.assertIsNone(result)
-        mock_as_root.assert_called_once_with([], "link",
-            ("set", self.DEV_NAME, "vf", str(self.VF_INDEX), "rate", '1000'))
-
-    def test_set_vf_max_rate_fail(self):
-        with mock.patch.object(self.pci_wrapper,
-                               "_execute") as mock_exec:
-            mock_exec.side_effect = Exception()
-            self.assertRaises(exc.IpCommandDeviceError,
-                              self.pci_wrapper.set_vf_max_rate,
-                              self.VF_INDEX,
-                              1000)
-
-    def test_set_vf_state_not_supported(self):
-        with mock.patch.object(self.pci_wrapper,
-                               "_execute") as mock_exec:
-            mock_exec.side_effect = Exception(
-                pci_lib.PciDeviceIPWrapper.IP_LINK_OP_NOT_SUPPORTED)
-            self.assertRaises(exc.IpCommandOperationNotSupportedError,
-                              self.pci_wrapper.set_vf_state,
-                              self.VF_INDEX,
-                              state=True)
-
-    def test_is_macvtap_assigned(self):
-        with mock.patch.object(pci_lib.PciDeviceIPWrapper,
-                               "_execute") as mock_exec:
-            mock_exec.return_value = self.IP_LINK_SHOW_WITH_MACVTAP
-            self.assertTrue(
-                pci_lib.PciDeviceIPWrapper.is_macvtap_assigned('enp129s0f1'))
-
-    def test_is_macvtap_assigned_not_assigned(self):
-        with mock.patch.object(pci_lib.PciDeviceIPWrapper,
-                               "_execute") as mock_exec:
-            mock_exec.return_value = self.IP_LINK_SHOW_WITH_MACVTAP
-            self.assertFalse(
-                pci_lib.PciDeviceIPWrapper.is_macvtap_assigned('enp129s0f2'))
-
-    def test_is_macvtap_assigned_failed(self):
-        with mock.patch.object(pci_lib.PciDeviceIPWrapper,
-                               "_execute") as mock_exec:
-            mock_exec.side_effect = Exception()
-            self.assertRaises(exc.IpCommandError,
-                              pci_lib.PciDeviceIPWrapper.is_macvtap_assigned,
-                              'enp129s0f3')
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py
deleted file mode 100644 (file)
index 947a7ae..0000000
+++ /dev/null
@@ -1,332 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import mock
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-from neutron.agent.l2.extensions import manager as l2_ext_manager
-from neutron.agent import rpc as agent_rpc
-from neutron.extensions import portbindings
-from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config  # noqa
-from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions
-from neutron.plugins.ml2.drivers.mech_sriov.agent import sriov_nic_agent
-from neutron.tests import base
-
-DEVICE_MAC = '11:22:33:44:55:66'
-PCI_SLOT = "0000:06:00.1"
-
-
-class TestSriovAgent(base.BaseTestCase):
-    def setUp(self):
-        super(TestSriovAgent, self).setUp()
-        # disable setting up periodic state reporting
-        cfg.CONF.set_override('report_interval', 0, 'AGENT')
-        cfg.CONF.set_default('firewall_driver',
-                             'neutron.agent.firewall.NoopFirewallDriver',
-                             group='SECURITYGROUP')
-        cfg.CONF.set_default('enable_security_group',
-                             False,
-                             group='SECURITYGROUP')
-
-        class MockFixedIntervalLoopingCall(object):
-            def __init__(self, f):
-                self.f = f
-
-            def start(self, interval=0):
-                self.f()
-
-        mock.patch('oslo_service.loopingcall.'
-                   'FixedIntervalLoopingCall',
-                   new=MockFixedIntervalLoopingCall)
-
-        self.agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0)
-
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                "PciDeviceIPWrapper.get_assigned_macs",
-                return_value=[(DEVICE_MAC, PCI_SLOT)])
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                "eswitch_manager.PciOsWrapper.is_assigned_vf",
-                return_value=True)
-    def test_treat_devices_removed_with_existed_device(self, *args):
-        agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0)
-        devices = [(DEVICE_MAC, PCI_SLOT)]
-        with mock.patch.object(agent.plugin_rpc,
-                               "update_device_down") as fn_udd:
-            fn_udd.return_value = {'device': DEVICE_MAC,
-                                   'exists': True}
-            resync = agent.treat_devices_removed(devices)
-            self.assertFalse(resync)
-            self.assertTrue(fn_udd.called)
-
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                "PciDeviceIPWrapper.get_assigned_macs",
-                return_value=[(DEVICE_MAC, PCI_SLOT)])
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                "eswitch_manager.PciOsWrapper.is_assigned_vf",
-                return_value=True)
-    def test_treat_devices_removed_with_not_existed_device(self, *args):
-        agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0)
-        devices = [(DEVICE_MAC, PCI_SLOT)]
-        with mock.patch.object(agent.plugin_rpc,
-                               "update_device_down") as fn_udd:
-            fn_udd.return_value = {'device': DEVICE_MAC,
-                                   'exists': False}
-            with mock.patch.object(sriov_nic_agent.LOG,
-                                   'debug') as log:
-                resync = agent.treat_devices_removed(devices)
-                self.assertEqual(1, log.call_count)
-                self.assertFalse(resync)
-                self.assertTrue(fn_udd.called)
-
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
-                "PciDeviceIPWrapper.get_assigned_macs",
-                return_value=[(DEVICE_MAC, PCI_SLOT)])
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
-                "eswitch_manager.PciOsWrapper.is_assigned_vf",
-                return_value=True)
-    def test_treat_devices_removed_failed(self, *args):
-        agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0)
-        devices = [(DEVICE_MAC, PCI_SLOT)]
-        with mock.patch.object(agent.plugin_rpc,
-                               "update_device_down") as fn_udd:
-            fn_udd.side_effect = Exception()
-            with mock.patch.object(sriov_nic_agent.LOG,
-                                   'debug') as log:
-                resync = agent.treat_devices_removed(devices)
-                self.assertEqual(1, log.call_count)
-                self.assertTrue(resync)
-                self.assertTrue(fn_udd.called)
-
-    def mock_scan_devices(self, expected, mock_current,
-                          registered_devices, updated_devices):
-        self.agent.eswitch_mgr = mock.Mock()
-        self.agent.eswitch_mgr.get_assigned_devices_info.return_value = (
-            mock_current)
-
-        results = self.agent.scan_devices(registered_devices, updated_devices)
-        self.assertEqual(expected, results)
-
-    def test_scan_devices_returns_empty_sets(self):
-        registered = set()
-        updated = set()
-        mock_current = set()
-        expected = {'current': set(),
-                    'updated': set(),
-                    'added': set(),
-                    'removed': set()}
-        self.mock_scan_devices(expected, mock_current, registered, updated)
-
-    def test_scan_devices_no_changes(self):
-        registered = set(['1', '2'])
-        updated = set()
-        mock_current = set(['1', '2'])
-        expected = {'current': set(['1', '2']),
-                    'updated': set(),
-                    'added': set(),
-                    'removed': set()}
-        self.mock_scan_devices(expected, mock_current, registered, updated)
-
-    def test_scan_devices_new_and_removed(self):
-        registered = set(['1', '2'])
-        updated = set()
-        mock_current = set(['2', '3'])
-        expected = {'current': set(['2', '3']),
-                    'updated': set(),
-                    'added': set(['3']),
-                    'removed': set(['1'])}
-        self.mock_scan_devices(expected, mock_current, registered, updated)
-
-    def test_scan_devices_new_updates(self):
-        registered = set(['1'])
-        updated = set(['2'])
-        mock_current = set(['1', '2'])
-        expected = {'current': set(['1', '2']),
-                    'updated': set(['2']),
-                    'added': set(['2']),
-                    'removed': set()}
-        self.mock_scan_devices(expected, mock_current, registered, updated)
-
-    def test_scan_devices_updated_missing(self):
-        registered = set(['1'])
-        updated = set(['2'])
-        mock_current = set(['1'])
-        expected = {'current': set(['1']),
-                    'updated': set(),
-                    'added': set(),
-                    'removed': set()}
-        self.mock_scan_devices(expected, mock_current, registered, updated)
-
-    def test_process_network_devices(self):
-        agent = self.agent
-        device_info = {'current': set(),
-                       'added': set(['mac3', 'mac4']),
-                       'updated': set(['mac2', 'mac3']),
-                       'removed': set(['mac1'])}
-        agent.sg_agent.prepare_devices_filter = mock.Mock()
-        agent.sg_agent.refresh_firewall = mock.Mock()
-        agent.treat_devices_added_updated = mock.Mock(return_value=False)
-        agent.treat_devices_removed = mock.Mock(return_value=False)
-
-        agent.process_network_devices(device_info)
-
-        agent.sg_agent.prepare_devices_filter.assert_called_with(
-                set(['mac3', 'mac4']))
-        self.assertTrue(agent.sg_agent.refresh_firewall.called)
-        agent.treat_devices_added_updated.assert_called_with(set(['mac2',
-                                                                  'mac3',
-                                                                  'mac4']))
-        agent.treat_devices_removed.assert_called_with(set(['mac1']))
-
-    def test_treat_devices_added_updated_admin_state_up_true(self):
-        agent = self.agent
-        mock_details = {'device': 'aa:bb:cc:dd:ee:ff',
-                        'port_id': 'port123',
-                        'network_id': 'net123',
-                        'admin_state_up': True,
-                        'network_type': 'vlan',
-                        'segmentation_id': 100,
-                        'profile': {'pci_slot': '1:2:3.0'},
-                        'physical_network': 'physnet1',
-                        'port_security_enabled': False}
-        agent.plugin_rpc = mock.Mock()
-        agent.plugin_rpc.get_devices_details_list.return_value = [mock_details]
-        agent.eswitch_mgr = mock.Mock()
-        agent.eswitch_mgr.device_exists.return_value = True
-        agent.set_device_state = mock.Mock()
-        agent.set_device_spoofcheck = mock.Mock()
-        resync_needed = agent.treat_devices_added_updated(
-                                    set(['aa:bb:cc:dd:ee:ff']))
-
-        self.assertFalse(resync_needed)
-        agent.eswitch_mgr.device_exists.assert_called_with('aa:bb:cc:dd:ee:ff',
-                                                          '1:2:3.0')
-        agent.eswitch_mgr.set_device_state.assert_called_with(
-                                        'aa:bb:cc:dd:ee:ff',
-                                        '1:2:3.0',
-                                        True)
-        agent.eswitch_mgr.set_device_spoofcheck.assert_called_with(
-                                        'aa:bb:cc:dd:ee:ff',
-                                        '1:2:3.0',
-                                        False)
-        self.assertTrue(agent.plugin_rpc.update_device_up.called)
-
-    def test_treat_device_ip_link_state_not_supported(self):
-        agent = self.agent
-        agent.plugin_rpc = mock.Mock()
-        agent.eswitch_mgr = mock.Mock()
-        agent.eswitch_mgr.device_exists.return_value = True
-        agent.eswitch_mgr.set_device_state.side_effect = (
-            exceptions.IpCommandOperationNotSupportedError(
-                dev_name='aa:bb:cc:dd:ee:ff'))
-
-        agent.treat_device('aa:bb:cc:dd:ee:ff', '1:2:3:0',
-                           admin_state_up=True)
-        self.assertTrue(agent.plugin_rpc.update_device_up.called)
-
-    def test_treat_device_set_device_state_exception(self):
-        agent = self.agent
-        agent.plugin_rpc = mock.Mock()
-        agent.eswitch_mgr = mock.Mock()
-        agent.eswitch_mgr.device_exists.return_value = True
-        agent.eswitch_mgr.set_device_state.side_effect = (
-            exceptions.SriovNicError())
-
-        agent.treat_device('aa:bb:cc:dd:ee:ff', '1:2:3:0',
-                           admin_state_up=True)
-        self.assertFalse(agent.plugin_rpc.update_device_up.called)
-
-    def test_treat_devices_added_updated_admin_state_up_false(self):
-        agent = self.agent
-        mock_details = {'device': 'aa:bb:cc:dd:ee:ff',
-                        'port_id': 'port123',
-                        'network_id': 'net123',
-                        'admin_state_up': False,
-                        'network_type': 'vlan',
-                        'segmentation_id': 100,
-                        'profile': {'pci_slot': '1:2:3.0'},
-                        'physical_network': 'physnet1'}
-        agent.plugin_rpc = mock.Mock()
-        agent.plugin_rpc.get_devices_details_list.return_value = [mock_details]
-        agent.remove_port_binding = mock.Mock()
-        resync_needed = agent.treat_devices_added_updated(
-                            set(['aa:bb:cc:dd:ee:ff']))
-
-        self.assertFalse(resync_needed)
-        self.assertFalse(agent.plugin_rpc.update_device_up.called)
-
-
-class FakeAgent(object):
-    def __init__(self):
-        self.updated_devices = set()
-
-
-class TestSriovNicSwitchRpcCallbacks(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestSriovNicSwitchRpcCallbacks, self).setUp()
-        self.context = object()
-        self.agent = FakeAgent()
-        sg_agent = object()
-        self.sriov_rpc_callback = sriov_nic_agent.SriovNicSwitchRpcCallbacks(
-            self.context, self.agent, sg_agent)
-
-    def _create_fake_port(self):
-        return {'id': uuidutils.generate_uuid(),
-                portbindings.PROFILE: {'pci_slot': PCI_SLOT},
-                'mac_address': DEVICE_MAC}
-
-    def test_port_update_with_pci_slot(self):
-        port = self._create_fake_port()
-        kwargs = {'context': self.context, 'port': port}
-        self.sriov_rpc_callback.port_update(**kwargs)
-        self.assertEqual(set([(DEVICE_MAC, PCI_SLOT)]),
-                         self.agent.updated_devices)
-
-    def test_port_update_with_vnic_physical_direct(self):
-        port = self._create_fake_port()
-        port[portbindings.VNIC_TYPE] = portbindings.VNIC_DIRECT_PHYSICAL
-        kwargs = {'context': self.context, 'port': port}
-        self.sriov_rpc_callback.port_update(**kwargs)
-        self.assertEqual(set(), self.agent.updated_devices)
-
-    def test_port_update_without_pci_slot(self):
-        port = self._create_fake_port()
-        port[portbindings.PROFILE] = None
-        kwargs = {'context': self.context, 'port': port}
-        self.sriov_rpc_callback.port_update(**kwargs)
-        self.assertEqual(set(), self.agent.updated_devices)
-
-
-class TestSRIOVAgentExtensionConfig(base.BaseTestCase):
-    def setUp(self):
-        super(TestSRIOVAgentExtensionConfig, self).setUp()
-        l2_ext_manager.register_opts(cfg.CONF)
-        # disable setting up periodic state reporting
-        cfg.CONF.set_override('report_interval', 0, group='AGENT')
-        cfg.CONF.set_override('extensions', ['qos'], group='agent')
-
-    @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.eswitch_manager"
-               ".ESwitchManager.get_assigned_devices_info", return_value=[])
-    def test_report_loaded_extension(self, *args):
-        with mock.patch.object(agent_rpc.PluginReportStateAPI,
-                               'report_state') as mock_report_state:
-            agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0)
-            agent._report_state()
-            mock_report_state.assert_called_with(
-                agent.context, agent.agent_state)
-            self.assertEqual(
-                ['qos'], agent.agent_state['configurations']['extensions'])
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py
deleted file mode 100644 (file)
index c69e0a4..0000000
+++ /dev/null
@@ -1,290 +0,0 @@
-# Copyright 2014 Mellanox Technologies, Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-from oslo_config import cfg
-import testtools
-
-from neutron.common import constants
-from neutron.extensions import portbindings
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2 import config  # noqa
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2.drivers.mech_sriov.mech_driver \
-    import exceptions as exc
-from neutron.plugins.ml2.drivers.mech_sriov.mech_driver import mech_driver
-from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base
-
-MELLANOX_CONNECTX3_PCI_INFO = '15b3:1004'
-DEFAULT_PCI_INFO = ['15b3:1004', '8086:10ca']
-
-
-class TestFakePortContext(base.FakePortContext):
-        def __init__(self, agent_type, agents, segments,
-                     vnic_type=portbindings.VNIC_NORMAL,
-                     profile={'pci_vendor_info':
-                              MELLANOX_CONNECTX3_PCI_INFO}):
-            super(TestFakePortContext, self).__init__(agent_type,
-                                                      agents,
-                                                      segments,
-                                                      vnic_type)
-            self._bound_profile = profile
-
-        @property
-        def current(self):
-            return {'id': base.PORT_ID,
-                    portbindings.VNIC_TYPE: self._bound_vnic_type,
-                    portbindings.PROFILE: self._bound_profile}
-
-        def set_binding(self, segment_id, vif_type, vif_details, state):
-            self._bound_segment_id = segment_id
-            self._bound_vif_type = vif_type
-            self._bound_vif_details = vif_details
-            self._bound_state = state
-
-
-class SriovNicSwitchMechanismBaseTestCase(base.AgentMechanismBaseTestCase):
-    VIF_TYPE = mech_driver.VIF_TYPE_HW_VEB
-    CAP_PORT_FILTER = False
-    AGENT_TYPE = constants.AGENT_TYPE_NIC_SWITCH
-    VLAN_SEGMENTS = base.AgentMechanismVlanTestCase.VLAN_SEGMENTS
-
-    GOOD_MAPPINGS = {'fake_physical_network': 'fake_device'}
-    GOOD_CONFIGS = {'device_mappings': GOOD_MAPPINGS}
-
-    BAD_MAPPINGS = {'wrong_physical_network': 'wrong_device'}
-    BAD_CONFIGS = {'device_mappings': BAD_MAPPINGS}
-
-    AGENTS = [{'alive': True,
-               'configurations': GOOD_CONFIGS}]
-    AGENTS_DEAD = [{'alive': False,
-                    'configurations': GOOD_CONFIGS}]
-    AGENTS_BAD = [{'alive': False,
-                   'configurations': GOOD_CONFIGS},
-                  {'alive': True,
-                   'configurations': BAD_CONFIGS}]
-
-    def setUp(self):
-        cfg.CONF.set_override('supported_pci_vendor_devs',
-                              DEFAULT_PCI_INFO,
-                              'ml2_sriov')
-        super(SriovNicSwitchMechanismBaseTestCase, self).setUp()
-        self.driver = mech_driver.SriovNicSwitchMechanismDriver()
-        self.driver.initialize()
-
-
-class SriovSwitchMechGenericTestCase(SriovNicSwitchMechanismBaseTestCase,
-                                     base.AgentMechanismGenericTestCase):
-    def test_check_segment(self):
-        """Validate the check_segment call."""
-        segment = {'api.NETWORK_TYPE': ""}
-        segment[api.NETWORK_TYPE] = p_const.TYPE_VLAN
-        self.assertTrue(self.driver.check_segment(segment))
-        # Validate a network type not currently supported
-        segment[api.NETWORK_TYPE] = p_const.TYPE_GRE
-        self.assertFalse(self.driver.check_segment(segment))
-
-    def test_check_segment_allows_supported_network_types(self):
-        for network_type in self.driver.supported_network_types:
-            segment = {api.NETWORK_TYPE: network_type}
-            self.assertTrue(self.driver.check_segment(segment))
-
-
-class SriovMechVlanTestCase(SriovNicSwitchMechanismBaseTestCase,
-                            base.AgentMechanismBaseTestCase):
-    VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id',
-                      api.NETWORK_TYPE: 'no_such_type'},
-                     {api.ID: 'vlan_segment_id',
-                      api.NETWORK_TYPE: 'vlan',
-                      api.PHYSICAL_NETWORK: 'fake_physical_network',
-                      api.SEGMENTATION_ID: 1234}]
-
-    def test_type_vlan(self):
-        context = TestFakePortContext(self.AGENT_TYPE,
-                                  self.AGENTS,
-                                  self.VLAN_SEGMENTS,
-                                  portbindings.VNIC_DIRECT)
-        self.driver.bind_port(context)
-        self._check_bound(context, self.VLAN_SEGMENTS[1])
-
-    def test_type_vlan_bad(self):
-        context = TestFakePortContext(self.AGENT_TYPE,
-                                  self.AGENTS_BAD,
-                                  self.VLAN_SEGMENTS,
-                                  portbindings.VNIC_DIRECT)
-        self.driver.bind_port(context)
-        self._check_unbound(context)
-
-
-class SriovSwitchMechVnicTypeTestCase(SriovNicSwitchMechanismBaseTestCase):
-    def _check_vif_type_for_vnic_type(self, vnic_type,
-                                      expected_vif_type):
-        context = TestFakePortContext(self.AGENT_TYPE,
-                                      self.AGENTS,
-                                      self.VLAN_SEGMENTS,
-                                      vnic_type)
-        self.driver.bind_port(context)
-        self.assertEqual(expected_vif_type, context._bound_vif_type)
-        vlan = int(context._bound_vif_details[portbindings.VIF_DETAILS_VLAN])
-        self.assertEqual(1234, vlan)
-
-    def test_vnic_type_direct(self):
-        self._check_vif_type_for_vnic_type(portbindings.VNIC_DIRECT,
-                                           mech_driver.VIF_TYPE_HW_VEB)
-
-    def test_vnic_type_macvtap(self):
-        self._check_vif_type_for_vnic_type(portbindings.VNIC_MACVTAP,
-                                           mech_driver.VIF_TYPE_HW_VEB)
-
-    def test_vnic_type_direct_physical(self):
-        self._check_vif_type_for_vnic_type(portbindings.VNIC_DIRECT_PHYSICAL,
-                                           mech_driver.VIF_TYPE_HW_VEB)
-
-
-class SriovSwitchMechProfileTestCase(SriovNicSwitchMechanismBaseTestCase):
-    def _check_vif_for_pci_info(self, pci_vendor_info, expected_vif_type):
-        context = TestFakePortContext(self.AGENT_TYPE,
-                                      self.AGENTS,
-                                      self.VLAN_SEGMENTS,
-                                      portbindings.VNIC_DIRECT,
-                                      {'pci_vendor_info': pci_vendor_info})
-        self.driver.bind_port(context)
-        self.assertEqual(expected_vif_type, context._bound_vif_type)
-
-    def test_profile_supported_pci_info(self):
-        self._check_vif_for_pci_info(MELLANOX_CONNECTX3_PCI_INFO,
-                                     mech_driver.VIF_TYPE_HW_VEB)
-
-    def test_profile_unsupported_pci_info(self):
-        with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.'
-                        'mech_driver.mech_driver.LOG') as log_mock:
-            self._check_vif_for_pci_info('xxxx:yyyy', None)
-            log_mock.debug.assert_called_with('Refusing to bind due to '
-                                              'unsupported pci_vendor device')
-
-
-class SriovSwitchMechProfileFailTestCase(SriovNicSwitchMechanismBaseTestCase):
-    def _check_for_pci_vendor_info(self, pci_vendor_info):
-        context = TestFakePortContext(self.AGENT_TYPE,
-                                      self.AGENTS,
-                                      self.VLAN_SEGMENTS,
-                                      portbindings.VNIC_DIRECT,
-                                      pci_vendor_info)
-        self.driver._check_supported_pci_vendor_device(context)
-
-    def test_profile_missing_profile(self):
-        with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.'
-                        'mech_driver.mech_driver.LOG') as log_mock:
-            self._check_for_pci_vendor_info({})
-            log_mock.debug.assert_called_with("Missing profile in port"
-                                              " binding")
-
-    def test_profile_missing_pci_vendor_info(self):
-        with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.'
-                        'mech_driver.mech_driver.LOG') as log_mock:
-            self._check_for_pci_vendor_info({'aa': 'bb'})
-            log_mock.debug.assert_called_with("Missing pci vendor"
-                                              " info in profile")
-
-
-class SriovSwitchMechVifDetailsTestCase(SriovNicSwitchMechanismBaseTestCase):
-    VLAN_SEGMENTS = [{api.ID: 'vlan_segment_id',
-                      api.NETWORK_TYPE: 'vlan',
-                      api.PHYSICAL_NETWORK: 'fake_physical_network',
-                      api.SEGMENTATION_ID: 1234}]
-
-    def test_vif_details_contains_vlan_id(self):
-        context = TestFakePortContext(self.AGENT_TYPE,
-                                      self.AGENTS,
-                                      self.VLAN_SEGMENTS,
-                                      portbindings.VNIC_DIRECT)
-
-        self.driver.bind_port(context)
-        vif_details = context._bound_vif_details
-        self.assertIsNotNone(vif_details)
-        vlan_id = int(vif_details.get(portbindings.VIF_DETAILS_VLAN))
-        self.assertEqual(1234, vlan_id)
-
-    def test_get_vif_details_for_flat_network(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT}
-        vif_details = self.driver._get_vif_details(segment)
-        vlan_id = vif_details[portbindings.VIF_DETAILS_VLAN]
-        self.assertEqual('0', vlan_id)
-
-    def test_get_vif_details_unsupported_net(self):
-        segment = {api.NETWORK_TYPE: 'foo'}
-        with testtools.ExpectedException(exc.SriovUnsupportedNetworkType):
-            self.driver._get_vif_details(segment)
-
-    def test_get_vif_details_with_agent(self):
-        context = TestFakePortContext(self.AGENT_TYPE,
-                                      self.AGENTS,
-                                      self.VLAN_SEGMENTS,
-                                      portbindings.VNIC_DIRECT)
-
-        self.driver.bind_port(context)
-        self.assertEqual(constants.PORT_STATUS_DOWN, context._bound_state)
-
-    def test_get_vif_details_with_agent_direct_physical(self):
-        context = TestFakePortContext(self.AGENT_TYPE,
-                                      self.AGENTS,
-                                      self.VLAN_SEGMENTS,
-                                      portbindings.VNIC_DIRECT_PHYSICAL)
-
-        self.driver.bind_port(context)
-        self.assertEqual(constants.PORT_STATUS_ACTIVE, context._bound_state)
-
-
-class SriovSwitchMechConfigTestCase(SriovNicSwitchMechanismBaseTestCase):
-    def _set_config(self, pci_devs=['aa:bb']):
-        cfg.CONF.set_override('mechanism_drivers',
-                              ['logger', 'sriovnicswitch'], 'ml2')
-        cfg.CONF.set_override('supported_pci_vendor_devs', pci_devs,
-                              'ml2_sriov')
-
-    def test_pci_vendor_config_single_entry(self):
-        self._set_config()
-        self.driver.initialize()
-        self.assertEqual(['aa:bb'], self.driver.pci_vendor_info)
-
-    def test_pci_vendor_config_multiple_entry(self):
-        self._set_config(['x:y', 'a:b'])
-        self.driver.initialize()
-        self.assertEqual(['x:y', 'a:b'], self.driver.pci_vendor_info)
-
-    def test_pci_vendor_config_default_entry(self):
-        self.driver.initialize()
-        self.assertEqual(DEFAULT_PCI_INFO,
-                         self.driver.pci_vendor_info)
-
-    def test_pci_vendor_config_wrong_entry(self):
-        self._set_config(['wrong_entry'])
-        self.assertRaises(cfg.Error, self.driver.initialize)
-
-    def test_initialize_missing_product_id(self):
-        self._set_config(['vendor_id:'])
-        self.assertRaises(cfg.Error, self.driver.initialize)
-
-    def test_initialize_missing_vendor_id(self):
-        self._set_config([':product_id'])
-        self.assertRaises(cfg.Error, self.driver.initialize)
-
-    def test_initialize_multiple_colons(self):
-        self._set_config(['foo:bar:baz'])
-        self.assertRaises(cfg.Error, self.driver.initialize)
-
-    def test_initialize_empty_string(self):
-        self._set_config([''])
-        self.assertRaises(cfg.Error, self.driver.initialize)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py b/neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py
deleted file mode 100644 (file)
index 93b1f53..0000000
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log
-
-from neutron._i18n import _
-from neutron.plugins.ml2 import driver_api as api
-
-LOG = log.getLogger(__name__)
-
-
-class LoggerMechanismDriver(api.MechanismDriver):
-    """Mechanism driver that logs all calls and parameters made.
-
-    Generally used for testing and debugging.
-    """
-
-    def initialize(self):
-        pass
-
-    def _log_network_call(self, method_name, context):
-        LOG.info(_("%(method)s called with network settings %(current)s "
-                   "(original settings %(original)s) and "
-                   "network segments %(segments)s"),
-                 {'method': method_name,
-                  'current': context.current,
-                  'original': context.original,
-                  'segments': context.network_segments})
-
-    def create_network_precommit(self, context):
-        self._log_network_call("create_network_precommit", context)
-
-    def create_network_postcommit(self, context):
-        self._log_network_call("create_network_postcommit", context)
-
-    def update_network_precommit(self, context):
-        self._log_network_call("update_network_precommit", context)
-
-    def update_network_postcommit(self, context):
-        self._log_network_call("update_network_postcommit", context)
-
-    def delete_network_precommit(self, context):
-        self._log_network_call("delete_network_precommit", context)
-
-    def delete_network_postcommit(self, context):
-        self._log_network_call("delete_network_postcommit", context)
-
-    def _log_subnet_call(self, method_name, context):
-        LOG.info(_("%(method)s called with subnet settings %(current)s "
-                   "(original settings %(original)s)"),
-                 {'method': method_name,
-                  'current': context.current,
-                  'original': context.original})
-
-    def create_subnet_precommit(self, context):
-        self._log_subnet_call("create_subnet_precommit", context)
-
-    def create_subnet_postcommit(self, context):
-        self._log_subnet_call("create_subnet_postcommit", context)
-
-    def update_subnet_precommit(self, context):
-        self._log_subnet_call("update_subnet_precommit", context)
-
-    def update_subnet_postcommit(self, context):
-        self._log_subnet_call("update_subnet_postcommit", context)
-
-    def delete_subnet_precommit(self, context):
-        self._log_subnet_call("delete_subnet_precommit", context)
-
-    def delete_subnet_postcommit(self, context):
-        self._log_subnet_call("delete_subnet_postcommit", context)
-
-    def _log_port_call(self, method_name, context):
-        network_context = context.network
-        LOG.info(_("%(method)s called with port settings %(current)s "
-                   "(original settings %(original)s) "
-                   "host %(host)s "
-                   "(original host %(original_host)s) "
-                   "vif type %(vif_type)s "
-                   "(original vif type %(original_vif_type)s) "
-                   "vif details %(vif_details)s "
-                   "(original vif details %(original_vif_details)s) "
-                   "binding levels %(levels)s "
-                   "(original binding levels %(original_levels)s) "
-                   "on network %(network)s "
-                   "with segments to bind %(segments_to_bind)s"),
-                 {'method': method_name,
-                  'current': context.current,
-                  'original': context.original,
-                  'host': context.host,
-                  'original_host': context.original_host,
-                  'vif_type': context.vif_type,
-                  'original_vif_type': context.original_vif_type,
-                  'vif_details': context.vif_details,
-                  'original_vif_details': context.original_vif_details,
-                  'levels': context.binding_levels,
-                  'original_levels': context.original_binding_levels,
-                  'network': network_context.current,
-                  'segments_to_bind': context.segments_to_bind})
-
-    def create_port_precommit(self, context):
-        self._log_port_call("create_port_precommit", context)
-
-    def create_port_postcommit(self, context):
-        self._log_port_call("create_port_postcommit", context)
-
-    def update_port_precommit(self, context):
-        self._log_port_call("update_port_precommit", context)
-
-    def update_port_postcommit(self, context):
-        self._log_port_call("update_port_postcommit", context)
-
-    def delete_port_precommit(self, context):
-        self._log_port_call("delete_port_precommit", context)
-
-    def delete_port_postcommit(self, context):
-        self._log_port_call("delete_port_postcommit", context)
-
-    def bind_port(self, context):
-        self._log_port_call("bind_port", context)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py b/neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py
deleted file mode 100644 (file)
index de2accc..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import constants as const
-from neutron.extensions import portbindings
-from neutron.plugins.ml2 import driver_api as api
-
-
-class TestMechanismDriver(api.MechanismDriver):
-    """Test mechanism driver for testing mechanism driver api."""
-
-    def initialize(self):
-        self.bound_ports = set()
-
-    def _check_network_context(self, context, original_expected):
-        assert(isinstance(context, api.NetworkContext))
-        assert(isinstance(context.current, dict))
-        assert(context.current['id'] is not None)
-        if original_expected:
-            assert(isinstance(context.original, dict))
-            assert(context.current['id'] == context.original['id'])
-        else:
-            assert(not context.original)
-        assert(context.network_segments)
-
-    def create_network_precommit(self, context):
-        self._check_network_context(context, False)
-
-    def create_network_postcommit(self, context):
-        self._check_network_context(context, False)
-
-    def update_network_precommit(self, context):
-        self._check_network_context(context, True)
-
-    def update_network_postcommit(self, context):
-        self._check_network_context(context, True)
-
-    def delete_network_precommit(self, context):
-        self._check_network_context(context, False)
-
-    def delete_network_postcommit(self, context):
-        self._check_network_context(context, False)
-
-    def _check_subnet_context(self, context, original_expected):
-        assert(isinstance(context, api.SubnetContext))
-        assert(isinstance(context.current, dict))
-        assert(context.current['id'] is not None)
-        if original_expected:
-            assert(isinstance(context.original, dict))
-            assert(context.current['id'] == context.original['id'])
-        else:
-            assert(not context.original)
-        network_context = context.network
-        assert(isinstance(network_context, api.NetworkContext))
-        self._check_network_context(network_context, False)
-
-    def create_subnet_precommit(self, context):
-        self._check_subnet_context(context, False)
-
-    def create_subnet_postcommit(self, context):
-        self._check_subnet_context(context, False)
-
-    def update_subnet_precommit(self, context):
-        self._check_subnet_context(context, True)
-
-    def update_subnet_postcommit(self, context):
-        self._check_subnet_context(context, True)
-
-    def delete_subnet_precommit(self, context):
-        self._check_subnet_context(context, False)
-
-    def delete_subnet_postcommit(self, context):
-        self._check_subnet_context(context, False)
-
-    def _check_port_context(self, context, original_expected):
-        assert(isinstance(context, api.PortContext))
-
-        self._check_port_info(context.current, context.host,
-                              context.vif_type, context.vif_details)
-
-        if context.vif_type in (portbindings.VIF_TYPE_UNBOUND,
-                                portbindings.VIF_TYPE_BINDING_FAILED):
-            if (context.segments_to_bind and
-                context.segments_to_bind[0][api.NETWORK_TYPE] == 'vlan'):
-                # Partially bound.
-                self._check_bound(context.binding_levels,
-                                  context.top_bound_segment,
-                                  context.bottom_bound_segment)
-            else:
-                self._check_unbound(context.binding_levels,
-                                    context.top_bound_segment,
-                                    context.bottom_bound_segment)
-            assert((context.current['id'], context.host)
-                   not in self.bound_ports)
-        else:
-            self._check_bound(context.binding_levels,
-                              context.top_bound_segment,
-                              context.bottom_bound_segment)
-            assert((context.current['id'], context.host) in self.bound_ports)
-
-        if original_expected:
-            self._check_port_info(context.original, context.original_host,
-                                  context.original_vif_type,
-                                  context.original_vif_details)
-
-            assert(context.current['id'] == context.original['id'])
-
-            if (context.original_vif_type in
-                (portbindings.VIF_TYPE_UNBOUND,
-                 portbindings.VIF_TYPE_BINDING_FAILED)):
-                self._check_unbound(context.original_binding_levels,
-                                    context.original_top_bound_segment,
-                                    context.original_bottom_bound_segment)
-            else:
-                self._check_bound(context.original_binding_levels,
-                                  context.original_top_bound_segment,
-                                  context.original_bottom_bound_segment)
-        else:
-            assert(context.original is None)
-            assert(context.original_host is None)
-            assert(context.original_vif_type is None)
-            assert(context.original_vif_details is None)
-            assert(context.original_status is None)
-            self._check_unbound(context.original_binding_levels,
-                                context.original_top_bound_segment,
-                                context.original_bottom_bound_segment)
-
-        network_context = context.network
-        assert(isinstance(network_context, api.NetworkContext))
-        self._check_network_context(network_context, False)
-
-    def _check_port_info(self, port, host, vif_type, vif_details):
-        assert(isinstance(port, dict))
-        assert(port['id'] is not None)
-        assert(vif_type in (portbindings.VIF_TYPE_UNBOUND,
-                            portbindings.VIF_TYPE_BINDING_FAILED,
-                            portbindings.VIF_TYPE_DISTRIBUTED,
-                            portbindings.VIF_TYPE_OVS,
-                            portbindings.VIF_TYPE_BRIDGE))
-        if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
-            assert(port[portbindings.HOST_ID] == '')
-            assert(port[portbindings.VIF_TYPE] ==
-                   portbindings.VIF_TYPE_DISTRIBUTED)
-            assert(port[portbindings.VIF_DETAILS] == {})
-        else:
-            assert(port[portbindings.HOST_ID] == host)
-            assert(port[portbindings.VIF_TYPE] !=
-                   portbindings.VIF_TYPE_DISTRIBUTED)
-            assert(port[portbindings.VIF_TYPE] == vif_type)
-            assert(isinstance(vif_details, dict))
-            assert(port[portbindings.VIF_DETAILS] == vif_details)
-
-    def _check_unbound(self, levels, top_segment, bottom_segment):
-        assert(levels is None)
-        assert(top_segment is None)
-        assert(bottom_segment is None)
-
-    def _check_bound(self, levels, top_segment, bottom_segment):
-        assert(isinstance(levels, list))
-        top_level = levels[0]
-        assert(isinstance(top_level, dict))
-        assert(isinstance(top_segment, dict))
-        assert(top_segment == top_level[api.BOUND_SEGMENT])
-        assert('test' == top_level[api.BOUND_DRIVER])
-        bottom_level = levels[-1]
-        assert(isinstance(bottom_level, dict))
-        assert(isinstance(bottom_segment, dict))
-        assert(bottom_segment == bottom_level[api.BOUND_SEGMENT])
-        assert('test' == bottom_level[api.BOUND_DRIVER])
-
-    def create_port_precommit(self, context):
-        self._check_port_context(context, False)
-
-    def create_port_postcommit(self, context):
-        self._check_port_context(context, False)
-
-    def update_port_precommit(self, context):
-        if (context.original_top_bound_segment and
-            not context.top_bound_segment):
-            self.bound_ports.remove((context.original['id'],
-                                     context.original_host))
-        self._check_port_context(context, True)
-
-    def update_port_postcommit(self, context):
-        self._check_port_context(context, True)
-
-    def delete_port_precommit(self, context):
-        self._check_port_context(context, False)
-
-    def delete_port_postcommit(self, context):
-        self._check_port_context(context, False)
-
-    def bind_port(self, context):
-        self._check_port_context(context, False)
-
-        host = context.host
-        segment = context.segments_to_bind[0]
-        segment_id = segment[api.ID]
-        if host == "host-ovs-no_filter":
-            context.set_binding(segment_id, portbindings.VIF_TYPE_OVS,
-                                {portbindings.CAP_PORT_FILTER: False})
-            self.bound_ports.add((context.current['id'], host))
-        elif host == "host-bridge-filter":
-            context.set_binding(segment_id, portbindings.VIF_TYPE_BRIDGE,
-                                {portbindings.CAP_PORT_FILTER: True})
-            self.bound_ports.add((context.current['id'], host))
-        elif host == "host-ovs-filter-active":
-            context.set_binding(segment_id, portbindings.VIF_TYPE_OVS,
-                                {portbindings.CAP_PORT_FILTER: True},
-                                status=const.PORT_STATUS_ACTIVE)
-            self.bound_ports.add((context.current['id'], host))
-        elif host == "host-hierarchical":
-            segment_type = segment[api.NETWORK_TYPE]
-            if segment_type == 'local':
-                next_segment = context.allocate_dynamic_segment(
-                    {api.NETWORK_TYPE: 'vlan',
-                     api.PHYSICAL_NETWORK: 'physnet1'}
-                )
-                context.continue_binding(segment_id, [next_segment])
-            elif segment_type == 'vlan':
-                context.set_binding(segment_id,
-                                    portbindings.VIF_TYPE_OVS,
-                                    {portbindings.CAP_PORT_FILTER: False})
-                self.bound_ports.add((context.current['id'], host))
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py
deleted file mode 100644 (file)
index 5e19c0f..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron import context
-from neutron.objects.qos import policy
-from neutron.objects.qos import rule
-from neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers import (
-    qos_driver)
-from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent import (
-    ovs_test_base)
-
-
-class QosOVSAgentDriverTestCase(ovs_test_base.OVSAgentConfigTestBase):
-
-    def setUp(self):
-        super(QosOVSAgentDriverTestCase, self).setUp()
-        self.context = context.get_admin_context()
-        self.qos_driver = qos_driver.QosOVSAgentDriver()
-        self.qos_driver.initialize()
-        self.qos_driver.br_int = mock.Mock()
-        self.qos_driver.br_int.get_egress_bw_limit_for_port = mock.Mock(
-            return_value=(1000, 10))
-        self.get = self.qos_driver.br_int.get_egress_bw_limit_for_port
-        self.qos_driver.br_int.del_egress_bw_limit_for_port = mock.Mock()
-        self.delete = self.qos_driver.br_int.delete_egress_bw_limit_for_port
-        self.qos_driver.br_int.create_egress_bw_limit_for_port = mock.Mock()
-        self.create = self.qos_driver.br_int.create_egress_bw_limit_for_port
-        self.rule = self._create_bw_limit_rule_obj()
-        self.qos_policy = self._create_qos_policy_obj([self.rule])
-        self.port = self._create_fake_port(self.qos_policy.id)
-
-    def _create_bw_limit_rule_obj(self):
-        rule_obj = rule.QosBandwidthLimitRule()
-        rule_obj.id = uuidutils.generate_uuid()
-        rule_obj.max_kbps = 2
-        rule_obj.max_burst_kbps = 200
-        rule_obj.obj_reset_changes()
-        return rule_obj
-
-    def _create_qos_policy_obj(self, rules):
-        policy_dict = {'id': uuidutils.generate_uuid(),
-                'tenant_id': uuidutils.generate_uuid(),
-                'name': 'test',
-                'description': 'test',
-                'shared': False,
-                'rules': rules}
-        policy_obj = policy.QosPolicy(self.context, **policy_dict)
-        policy_obj.obj_reset_changes()
-        for policy_rule in policy_obj.rules:
-            policy_rule.qos_policy_id = policy_obj.id
-            policy_rule.obj_reset_changes()
-        return policy_obj
-
-    def _create_fake_port(self, policy_id):
-        self.port_name = 'fakeport'
-
-        class FakeVifPort(object):
-            port_name = self.port_name
-
-        return {'vif_port': FakeVifPort(),
-                'qos_policy_id': policy_id,
-                'network_qos_policy_id': None,
-                'device_owner': uuidutils.generate_uuid()}
-
-    def test_create_new_rule(self):
-        self.qos_driver.br_int.get_egress_bw_limit_for_port = mock.Mock(
-            return_value=(None, None))
-        self.qos_driver.create(self.port, self.qos_policy)
-        # Assert create is the last call
-        self.assertEqual(
-            'create_egress_bw_limit_for_port',
-            self.qos_driver.br_int.method_calls[-1][0])
-        self.assertEqual(0, self.delete.call_count)
-        self.create.assert_called_once_with(
-            self.port_name, self.rule.max_kbps,
-            self.rule.max_burst_kbps)
-
-    def test_create_existing_rules(self):
-        self.qos_driver.create(self.port, self.qos_policy)
-        self._assert_rule_create_updated()
-
-    def test_update_rules(self):
-        self.qos_driver.update(self.port, self.qos_policy)
-        self._assert_rule_create_updated()
-
-    def test_delete_rules(self):
-        self.qos_driver.delete(self.port, self.qos_policy)
-        self.delete.assert_called_once_with(self.port_name)
-
-    def _assert_rule_create_updated(self):
-        # Assert create is the last call
-        self.assertEqual(
-            'create_egress_bw_limit_for_port',
-            self.qos_driver.br_int.method_calls[-1][0])
-
-        self.create.assert_called_once_with(
-            self.port_name, self.rule.max_kbps,
-            self.rule.max_burst_kbps)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/fake_oflib.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/fake_oflib.py
deleted file mode 100644 (file)
index ea36327..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-# Copyright (C) 2014 VA Linux Systems Japan K.K.
-# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
-# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-
-class _Eq(object):
-    def __eq__(self, other):
-        return repr(self) == repr(other)
-
-    def __ne__(self, other):
-        return not self.__eq__(other)
-
-
-class _Value(_Eq):
-    def __or__(self, b):
-        return _Op('|', self, b)
-
-    def __ror__(self, a):
-        return _Op('|', a, self)
-
-
-class _SimpleValue(_Value):
-    def __init__(self, name):
-        self.name = name
-
-    def __repr__(self):
-        return self.name
-
-
-class _Op(_Value):
-    def __init__(self, op, a, b):
-        self.op = op
-        self.a = a
-        self.b = b
-
-    def __repr__(self):
-        return '%s%s%s' % (self.a, self.op, self.b)
-
-
-def _mkcls(name):
-    class Cls(_Eq):
-        _name = name
-
-        def __init__(self, *args, **kwargs):
-            self._args = args
-            self._kwargs = kwargs
-            self._hist = []
-
-        def __getattr__(self, name):
-            return self._kwargs[name]
-
-        def __repr__(self):
-            args = list(map(repr, self._args))
-            kwargs = sorted(['%s=%s' % (x, y) for x, y in
-                             self._kwargs.items()])
-            return '%s(%s)' % (self._name, ', '.join(args + kwargs))
-
-    return Cls
-
-
-class _Mod(object):
-    _cls_cache = {}
-
-    def __init__(self, name):
-        self._name = name
-
-    def __getattr__(self, name):
-        fullname = '%s.%s' % (self._name, name)
-        if '_' in name:  # constants are named like OFPxxx_yyy_zzz
-            return _SimpleValue(fullname)
-        try:
-            return self._cls_cache[fullname]
-        except KeyError:
-            pass
-        cls = _mkcls(fullname)
-        self._cls_cache[fullname] = cls
-        return cls
-
-    def __repr__(self):
-        return 'Mod(%s)' % (self._name,)
-
-
-def patch_fake_oflib_of():
-    ryu_mod = mock.Mock()
-    ryu_base_mod = ryu_mod.base
-    ryu_exc_mod = ryu_mod.exception
-    ryu_ctrl_mod = ryu_mod.controller
-    handler = _Mod('ryu.controller.handler')
-    handler.set_ev_cls = mock.Mock()
-    ofp_event = _Mod('ryu.controller.ofp_event')
-    ryu_ctrl_mod.handler = handler
-    ryu_ctrl_mod.ofp_event = ofp_event
-    ryu_lib_mod = ryu_mod.lib
-    ryu_lib_hub = ryu_lib_mod.hub
-    ryu_packet_mod = ryu_lib_mod.packet
-    packet = _Mod('ryu.lib.packet.packet')
-    arp = _Mod('ryu.lib.packet.arp')
-    ethernet = _Mod('ryu.lib.packet.ethernet')
-    ether_types = _Mod('ryu.lib.packet.ether_types')
-    in_proto = _Mod('ryu.lib.packet.in_proto')
-    icmpv6 = _Mod('ryu.lib.packet.icmpv6')
-    vlan = _Mod('ryu.lib.packet.vlan')
-    ryu_packet_mod.packet = packet
-    packet.Packet = mock.Mock()
-    ryu_packet_mod.arp = arp
-    ryu_packet_mod.ethernet = ethernet
-    ryu_packet_mod.ether_types = ether_types
-    ryu_packet_mod.icmpv6 = icmpv6
-    ryu_packet_mod.in_proto = in_proto
-    ryu_packet_mod.vlan = vlan
-    ryu_ofproto_mod = ryu_mod.ofproto
-    ofp = _Mod('ryu.ofproto.ofproto_v1_3')
-    ofpp = _Mod('ryu.ofproto.ofproto_v1_3_parser')
-    ryu_ofproto_mod.ofproto_v1_3 = ofp
-    ryu_ofproto_mod.ofproto_v1_3_parser = ofpp
-    ryu_app_mod = ryu_mod.app
-    ryu_app_ofctl_mod = ryu_app_mod.ofctl
-    ryu_ofctl_api = ryu_app_ofctl_mod.api
-    modules = {'ryu': ryu_mod,
-               'ryu.base': ryu_base_mod,
-               'ryu.controller': ryu_ctrl_mod,
-               'ryu.controller.handler': handler,
-               'ryu.controller.handler.set_ev_cls': handler.set_ev_cls,
-               'ryu.controller.ofp_event': ofp_event,
-               'ryu.exception': ryu_exc_mod,
-               'ryu.lib': ryu_lib_mod,
-               'ryu.lib.hub': ryu_lib_hub,
-               'ryu.lib.packet': ryu_packet_mod,
-               'ryu.lib.packet.packet': packet,
-               'ryu.lib.packet.packet.Packet': packet.Packet,
-               'ryu.lib.packet.arp': arp,
-               'ryu.lib.packet.ethernet': ethernet,
-               'ryu.lib.packet.ether_types': ether_types,
-               'ryu.lib.packet.icmpv6': icmpv6,
-               'ryu.lib.packet.in_proto': in_proto,
-               'ryu.lib.packet.vlan': vlan,
-               'ryu.ofproto': ryu_ofproto_mod,
-               'ryu.ofproto.ofproto_v1_3': ofp,
-               'ryu.ofproto.ofproto_v1_3_parser': ofpp,
-               'ryu.app': ryu_app_mod,
-               'ryu.app.ofctl': ryu_app_ofctl_mod,
-               'ryu.app.ofctl.api': ryu_ofctl_api}
-    return mock.patch.dict('sys.modules', modules)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_test_base.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_test_base.py
deleted file mode 100644 (file)
index 1437e0c..0000000
+++ /dev/null
@@ -1,257 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from oslo_utils import importutils
-
-from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
-    import ovs_test_base
-
-
-call = mock.call  # short hand
-
-
-class OVSBridgeTestBase(ovs_test_base.OVSRyuTestBase):
-    _ARP_MODULE = 'ryu.lib.packet.arp'
-    _ETHER_TYPES_MODULE = 'ryu.lib.packet.ether_types'
-    _ICMPV6_MODULE = 'ryu.lib.packet.icmpv6'
-    _IN_PROTO_MODULE = 'ryu.lib.packet.in_proto'
-    _OFP_MODULE = 'ryu.ofproto.ofproto_v1_3'
-    _OFPP_MODULE = 'ryu.ofproto.ofproto_v1_3_parser'
-
-    def setup_bridge_mock(self, name, cls):
-        self.br = cls(name)
-        self.dp = mock.Mock()
-        self.ofp = importutils.import_module(self._OFP_MODULE)
-        self.ofpp = importutils.import_module(self._OFPP_MODULE)
-        self.arp = importutils.import_module(self._ARP_MODULE)
-        self.ether_types = importutils.import_module(self._ETHER_TYPES_MODULE)
-        self.icmpv6 = importutils.import_module(self._ICMPV6_MODULE)
-        self.in_proto = importutils.import_module(self._IN_PROTO_MODULE)
-        mock.patch.object(self.br, '_get_dp', autospec=True,
-                          return_value=self._get_dp()).start()
-        mock__send_msg = mock.patch.object(self.br, '_send_msg').start()
-        mock_delete_flows = mock.patch.object(self.br, 'delete_flows').start()
-        self.mock = mock.Mock()
-        self.mock.attach_mock(mock__send_msg, '_send_msg')
-        self.mock.attach_mock(mock_delete_flows, 'delete_flows')
-
-    def _get_dp(self):
-        return self.dp, self.ofp, self.ofpp
-
-    def test_drop_port(self):
-        in_port = 2345
-        self.br.drop_port(in_port=in_port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(
-                ofpp.OFPFlowMod(dp,
-                    cookie=0,
-                    instructions=[],
-                    match=ofpp.OFPMatch(in_port=in_port),
-                    priority=2,
-                    table_id=0)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_goto(self):
-        dest_table_id = 123
-        priority = 99
-        in_port = 666
-        self.br.install_goto(dest_table_id=dest_table_id,
-                             priority=priority, in_port=in_port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(
-                ofpp.OFPFlowMod(dp,
-                    cookie=0,
-                    instructions=[
-                        ofpp.OFPInstructionGotoTable(table_id=dest_table_id),
-                    ],
-                    match=ofpp.OFPMatch(in_port=in_port),
-                    priority=priority,
-                    table_id=0)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_drop(self):
-        priority = 99
-        in_port = 666
-        self.br.install_drop(priority=priority, in_port=in_port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(
-                ofpp.OFPFlowMod(dp,
-                    cookie=0,
-                    instructions=[],
-                    match=ofpp.OFPMatch(in_port=in_port),
-                    priority=priority,
-                    table_id=0)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_normal(self):
-        priority = 99
-        in_port = 666
-        self.br.install_normal(priority=priority, in_port=in_port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(
-                ofpp.OFPFlowMod(dp,
-                    cookie=0,
-                    instructions=[
-                        ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                            ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)
-                        ]),
-                    ],
-                    match=ofpp.OFPMatch(in_port=in_port),
-                    priority=priority,
-                    table_id=0)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test__cidr_to_ryu(self):
-        f = self.br._cidr_to_ryu
-        self.assertEqual('192.168.0.1', f('192.168.0.1'))
-        self.assertEqual('192.168.0.1', f('192.168.0.1/32'))
-        self.assertEqual(('192.168.0.0', '255.255.255.0'), f('192.168.0.0/24'))
-
-
-class OVSDVRProcessTestMixin(object):
-    def test_install_dvr_process_ipv4(self):
-        vlan_tag = 999
-        gateway_ip = '192.0.2.1'
-        self.br.install_dvr_process_ipv4(vlan_tag=vlan_tag,
-                                         gateway_ip=gateway_ip)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(
-                    eth_type=self.ether_types.ETH_TYPE_ARP,
-                    arp_tpa=gateway_ip,
-                    vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
-                priority=3,
-                table_id=self.dvr_process_table_id)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_dvr_process_ipv4(self):
-        vlan_tag = 999
-        gateway_ip = '192.0.2.1'
-        self.br.delete_dvr_process_ipv4(vlan_tag=vlan_tag,
-                                        gateway_ip=gateway_ip)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(table_id=self.dvr_process_table_id,
-                match=ofpp.OFPMatch(
-                    eth_type=self.ether_types.ETH_TYPE_ARP,
-                    arp_tpa=gateway_ip,
-                    vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_dvr_process_ipv6(self):
-        vlan_tag = 999
-        gateway_mac = '08:60:6e:7f:74:e7'
-        self.br.install_dvr_process_ipv6(vlan_tag=vlan_tag,
-                                         gateway_mac=gateway_mac)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(
-                    eth_src=gateway_mac,
-                    eth_type=self.ether_types.ETH_TYPE_IPV6,
-                    icmpv6_type=self.icmpv6.ND_ROUTER_ADVERT,
-                    ip_proto=self.in_proto.IPPROTO_ICMPV6,
-                    vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
-                priority=3,
-                table_id=self.dvr_process_table_id)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_dvr_process_ipv6(self):
-        vlan_tag = 999
-        gateway_mac = '08:60:6e:7f:74:e7'
-        self.br.delete_dvr_process_ipv6(vlan_tag=vlan_tag,
-                                        gateway_mac=gateway_mac)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(table_id=self.dvr_process_table_id,
-                match=ofpp.OFPMatch(
-                    eth_src=gateway_mac,
-                    eth_type=self.ether_types.ETH_TYPE_IPV6,
-                    icmpv6_type=self.icmpv6.ND_ROUTER_ADVERT,
-                    ip_proto=self.in_proto.IPPROTO_ICMPV6,
-                    vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_dvr_process(self):
-        vlan_tag = 999
-        vif_mac = '00:0e:0c:5e:95:d0'
-        dvr_mac_address = 'f2:0b:a4:5b:b2:ab'
-        self.br.install_dvr_process(vlan_tag=vlan_tag,
-                                    vif_mac=vif_mac,
-                                    dvr_mac_address=dvr_mac_address)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(
-                    eth_dst=vif_mac,
-                    vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
-                priority=2,
-                table_id=self.dvr_process_table_id)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionSetField(eth_src=dvr_mac_address),
-                    ]),
-                    ofpp.OFPInstructionGotoTable(
-                        table_id=self.dvr_process_next_table_id),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_src=vif_mac,
-                    vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
-                priority=1,
-                table_id=self.dvr_process_table_id)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_dvr_process(self):
-        vlan_tag = 999
-        vif_mac = '00:0e:0c:5e:95:d0'
-        self.br.delete_dvr_process(vlan_tag=vlan_tag,
-                                   vif_mac=vif_mac)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(table_id=self.dvr_process_table_id,
-                match=ofpp.OFPMatch(
-                    eth_dst=vif_mac,
-                    vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
-            call.delete_flows(table_id=self.dvr_process_table_id,
-                match=ofpp.OFPMatch(
-                    eth_src=vif_mac,
-                    vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_int.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_int.py
deleted file mode 100644 (file)
index 17a865a..0000000
+++ /dev/null
@@ -1,403 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.native \
-    import ovs_bridge_test_base
-
-
-call = mock.call  # short hand
-
-
-class OVSIntegrationBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase):
-    def setUp(self):
-        super(OVSIntegrationBridgeTest, self).setUp()
-        self.setup_bridge_mock('br-int', self.br_int_cls)
-
-    def test_setup_default_table(self):
-        self.br.setup_default_table()
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(
-                        ofp.OFPIT_APPLY_ACTIONS, [
-                            ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)
-                        ]),
-                ],
-                match=ofpp.OFPMatch(),
-                priority=0,
-                table_id=0)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(),
-                priority=0,
-                table_id=23)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(),
-                priority=0,
-                table_id=24)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_provision_local_vlan(self):
-        port = 999
-        lvid = 888
-        segmentation_id = 777
-        self.br.provision_local_vlan(port=port, lvid=lvid,
-                                     segmentation_id=segmentation_id)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionSetField(
-                            vlan_vid=lvid | ofp.OFPVID_PRESENT),
-                        ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)
-                    ]),
-                ],
-                match=ofpp.OFPMatch(
-                    in_port=port,
-                    vlan_vid=segmentation_id | ofp.OFPVID_PRESENT),
-                priority=3,
-                table_id=0)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_provision_local_vlan_novlan(self):
-        port = 999
-        lvid = 888
-        segmentation_id = None
-        self.br.provision_local_vlan(port=port, lvid=lvid,
-                                     segmentation_id=segmentation_id)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionPushVlan(),
-                        ofpp.OFPActionSetField(
-                            vlan_vid=lvid | ofp.OFPVID_PRESENT),
-                        ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(
-                    in_port=port,
-                    vlan_vid=ofp.OFPVID_NONE),
-                priority=3,
-                table_id=0)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_reclaim_local_vlan(self):
-        port = 999
-        segmentation_id = 777
-        self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(
-                match=ofpp.OFPMatch(
-                    in_port=port,
-                    vlan_vid=segmentation_id | ofp.OFPVID_PRESENT)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_reclaim_local_vlan_novlan(self):
-        port = 999
-        segmentation_id = None
-        self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(
-                match=ofpp.OFPMatch(
-                    in_port=port,
-                    vlan_vid=ofp.OFPVID_NONE)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_dvr_to_src_mac(self):
-        network_type = 'vxlan'
-        vlan_tag = 1111
-        gateway_mac = '08:60:6e:7f:74:e7'
-        dst_mac = '00:02:b3:13:fe:3d'
-        dst_port = 6666
-        self.br.install_dvr_to_src_mac(network_type=network_type,
-                                       vlan_tag=vlan_tag,
-                                       gateway_mac=gateway_mac,
-                                       dst_mac=dst_mac,
-                                       dst_port=dst_port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionPopVlan(),
-                        ofpp.OFPActionSetField(eth_src=gateway_mac),
-                        ofpp.OFPActionOutput(6666, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_dst=dst_mac,
-                    vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
-                priority=4,
-                table_id=1)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_dvr_to_src_mac(self):
-        network_type = 'vxlan'
-        vlan_tag = 1111
-        dst_mac = '00:02:b3:13:fe:3d'
-        self.br.delete_dvr_to_src_mac(network_type=network_type,
-                                      vlan_tag=vlan_tag,
-                                      dst_mac=dst_mac)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(table_id=1,
-                match=ofpp.OFPMatch(
-                    eth_dst=dst_mac,
-                    vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_dvr_to_src_mac_vlan(self):
-        network_type = 'vlan'
-        vlan_tag = 1111
-        gateway_mac = '08:60:6e:7f:74:e7'
-        dst_mac = '00:02:b3:13:fe:3d'
-        dst_port = 6666
-        self.br.install_dvr_to_src_mac(network_type=network_type,
-                                       vlan_tag=vlan_tag,
-                                       gateway_mac=gateway_mac,
-                                       dst_mac=dst_mac,
-                                       dst_port=dst_port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionPopVlan(),
-                        ofpp.OFPActionSetField(eth_src=gateway_mac),
-                        ofpp.OFPActionOutput(dst_port, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_dst=dst_mac,
-                    vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
-                priority=4,
-                table_id=2)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_dvr_to_src_mac_vlan(self):
-        network_type = 'vlan'
-        vlan_tag = 1111
-        dst_mac = '00:02:b3:13:fe:3d'
-        self.br.delete_dvr_to_src_mac(network_type=network_type,
-                                      vlan_tag=vlan_tag,
-                                      dst_mac=dst_mac)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(table_id=2,
-                match=ofpp.OFPMatch(
-                    eth_dst=dst_mac,
-                    vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_add_dvr_mac_vlan(self):
-        mac = '00:02:b3:13:fe:3d'
-        port = 8888
-        self.br.add_dvr_mac_vlan(mac=mac, port=port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionGotoTable(table_id=2),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_src=mac,
-                    in_port=port),
-                priority=4,
-                table_id=0))
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_remove_dvr_mac_vlan(self):
-        mac = '00:02:b3:13:fe:3d'
-        self.br.remove_dvr_mac_vlan(mac=mac)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(eth_src=mac, table_id=0),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_add_dvr_mac_tun(self):
-        mac = '00:02:b3:13:fe:3d'
-        port = 8888
-        self.br.add_dvr_mac_tun(mac=mac, port=port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionGotoTable(table_id=1),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_src=mac,
-                    in_port=port),
-                priority=2,
-                table_id=0))
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_remove_dvr_mac_tun(self):
-        mac = '00:02:b3:13:fe:3d'
-        port = 8888
-        self.br.remove_dvr_mac_tun(mac=mac, port=port)
-        expected = [
-            call.delete_flows(eth_src=mac, in_port=port, table_id=0),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_icmpv6_na_spoofing_protection(self):
-        port = 8888
-        ip_addresses = ['2001:db8::1', 'fdf8:f53b:82e4::1/128']
-        self.br.install_icmpv6_na_spoofing_protection(port, ip_addresses)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_type=self.ether_types.ETH_TYPE_IPV6,
-                    icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT,
-                    ip_proto=self.in_proto.IPPROTO_ICMPV6,
-                    ipv6_nd_target='2001:db8::1',
-                    in_port=8888,
-                ),
-                priority=2,
-                table_id=24)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_type=self.ether_types.ETH_TYPE_IPV6,
-                    icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT,
-                    ip_proto=self.in_proto.IPPROTO_ICMPV6,
-                    ipv6_nd_target='fdf8:f53b:82e4::1',
-                    in_port=8888,
-                ),
-                priority=2,
-                table_id=24)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionGotoTable(table_id=24),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_type=self.ether_types.ETH_TYPE_IPV6,
-                    icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT,
-                    ip_proto=self.in_proto.IPPROTO_ICMPV6,
-                    in_port=8888,
-                ),
-                priority=10,
-                table_id=0)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_arp_spoofing_protection(self):
-        port = 8888
-        ip_addresses = ['192.0.2.1', '192.0.2.2/32']
-        self.br.install_arp_spoofing_protection(port, ip_addresses)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_type=self.ether_types.ETH_TYPE_ARP,
-                    arp_spa='192.0.2.1',
-                    in_port=8888,
-                ),
-                priority=2,
-                table_id=24)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_type=self.ether_types.ETH_TYPE_ARP,
-                    arp_spa='192.0.2.2',
-                    in_port=8888
-                ),
-                priority=2,
-                table_id=24)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionGotoTable(table_id=24),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_type=self.ether_types.ETH_TYPE_ARP,
-                    in_port=8888,
-                ),
-                priority=10,
-                table_id=0)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_arp_spoofing_protection(self):
-        port = 8888
-        self.br.delete_arp_spoofing_protection(port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(table_id=0, match=ofpp.OFPMatch(
-                eth_type=self.ether_types.ETH_TYPE_ARP,
-                in_port=8888)),
-            call.delete_flows(table_id=0, match=ofpp.OFPMatch(
-                eth_type=self.ether_types.ETH_TYPE_IPV6,
-                icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT,
-                in_port=8888,
-                ip_proto=self.in_proto.IPPROTO_ICMPV6)),
-            call.delete_flows(table_id=24, in_port=port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_phys.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_phys.py
deleted file mode 100644 (file)
index a478f9a..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-import neutron.plugins.ml2.drivers.openvswitch.agent.common.constants \
-    as ovs_const
-from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.native \
-    import ovs_bridge_test_base
-
-
-call = mock.call  # short hand
-
-
-class OVSPhysicalBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase,
-                            ovs_bridge_test_base.OVSDVRProcessTestMixin):
-    dvr_process_table_id = ovs_const.DVR_PROCESS_VLAN
-    dvr_process_next_table_id = ovs_const.LOCAL_VLAN_TRANSLATION
-
-    def setUp(self):
-        super(OVSPhysicalBridgeTest, self).setUp()
-        self.setup_bridge_mock('br-phys', self.br_phys_cls)
-
-    def test_setup_default_table(self):
-        self.br.setup_default_table()
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(),
-                priority=0,
-                table_id=0)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_provision_local_vlan(self):
-        port = 999
-        lvid = 888
-        segmentation_id = 777
-        distributed = False
-        self.br.provision_local_vlan(port=port, lvid=lvid,
-                                     segmentation_id=segmentation_id,
-                                     distributed=distributed)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionSetField(
-                            vlan_vid=segmentation_id | ofp.OFPVID_PRESENT),
-                        ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(
-                    in_port=port,
-                    vlan_vid=lvid | ofp.OFPVID_PRESENT),
-                priority=4,
-                table_id=0)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_provision_local_vlan_novlan(self):
-        port = 999
-        lvid = 888
-        segmentation_id = None
-        distributed = False
-        self.br.provision_local_vlan(port=port, lvid=lvid,
-                                     segmentation_id=segmentation_id,
-                                     distributed=distributed)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionPopVlan(),
-                        ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(
-                    in_port=port,
-                    vlan_vid=lvid | ofp.OFPVID_PRESENT),
-                priority=4,
-                table_id=0)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_reclaim_local_vlan(self):
-        port = 999
-        lvid = 888
-        self.br.reclaim_local_vlan(port=port, lvid=lvid)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(
-                match=ofpp.OFPMatch(
-                    in_port=port,
-                    vlan_vid=lvid | ofp.OFPVID_PRESENT)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_add_dvr_mac_vlan(self):
-        mac = '00:02:b3:13:fe:3d'
-        port = 8888
-        self.br.add_dvr_mac_vlan(mac=mac, port=port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionOutput(port, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(eth_src=mac),
-                priority=2,
-                table_id=3)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_remove_dvr_mac_vlan(self):
-        mac = '00:02:b3:13:fe:3d'
-        self.br.remove_dvr_mac_vlan(mac=mac)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(eth_src=mac, table_id=3),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py
deleted file mode 100644 (file)
index 91b0af6..0000000
+++ /dev/null
@@ -1,484 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-import neutron.plugins.ml2.drivers.openvswitch.agent.common.constants \
-    as ovs_const
-from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.native \
-    import ovs_bridge_test_base
-
-
-call = mock.call  # short hand
-
-
-class OVSTunnelBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase,
-                          ovs_bridge_test_base.OVSDVRProcessTestMixin):
-    dvr_process_table_id = ovs_const.DVR_PROCESS
-    dvr_process_next_table_id = ovs_const.PATCH_LV_TO_TUN
-
-    def setUp(self):
-        super(OVSTunnelBridgeTest, self).setUp()
-        self.setup_bridge_mock('br-tun', self.br_tun_cls)
-
-    def test_setup_default_table(self):
-        patch_int_ofport = 5555
-        arp_responder_enabled = False
-        self.br.setup_default_table(patch_int_ofport=patch_int_ofport,
-            arp_responder_enabled=arp_responder_enabled)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[ofpp.OFPInstructionGotoTable(table_id=2)],
-                match=ofpp.OFPMatch(in_port=patch_int_ofport),
-                priority=1, table_id=0)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(),
-                priority=0, table_id=0)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[ofpp.OFPInstructionGotoTable(table_id=20)],
-                match=ofpp.OFPMatch(
-                    eth_dst=('00:00:00:00:00:00', '01:00:00:00:00:00')),
-                priority=0,
-                table_id=2)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[ofpp.OFPInstructionGotoTable(table_id=22)],
-                match=ofpp.OFPMatch(
-                    eth_dst=('01:00:00:00:00:00', '01:00:00:00:00:00')),
-                priority=0,
-                table_id=2)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(),
-                priority=0, table_id=3)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(),
-                priority=0, table_id=4)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(),
-                priority=0, table_id=6)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.NXActionLearn(
-                            cookie=0,
-                            hard_timeout=300,
-                            priority=1,
-                            specs=[
-                                ofpp.NXFlowSpecMatch(
-                                    dst=('vlan_vid', 0),
-                                    n_bits=12,
-                                    src=('vlan_vid', 0)),
-                                ofpp.NXFlowSpecMatch(
-                                    dst=('eth_dst', 0),
-                                    n_bits=48,
-                                    src=('eth_src', 0)),
-                                ofpp.NXFlowSpecLoad(
-                                    dst=('vlan_vid', 0),
-                                    n_bits=12,
-                                    src=0),
-                                ofpp.NXFlowSpecLoad(
-                                    dst=('tunnel_id', 0),
-                                    n_bits=64,
-                                    src=('tunnel_id', 0)),
-                                ofpp.NXFlowSpecOutput(
-                                    dst='',
-                                    n_bits=32,
-                                    src=('in_port', 0)),
-                            ],
-                            table_id=20),
-                        ofpp.OFPActionOutput(patch_int_ofport, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(),
-                priority=1,
-                table_id=10)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[ofpp.OFPInstructionGotoTable(table_id=22)],
-                match=ofpp.OFPMatch(),
-                priority=0,
-                table_id=20)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(),
-                priority=0,
-                table_id=22))
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_setup_default_table_arp_responder_enabled(self):
-        patch_int_ofport = 5555
-        arp_responder_enabled = True
-        self.br.setup_default_table(patch_int_ofport=patch_int_ofport,
-            arp_responder_enabled=arp_responder_enabled)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[ofpp.OFPInstructionGotoTable(table_id=2)],
-                match=ofpp.OFPMatch(in_port=patch_int_ofport),
-                priority=1, table_id=0)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(),
-                priority=0, table_id=0)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[ofpp.OFPInstructionGotoTable(table_id=21)],
-                match=ofpp.OFPMatch(
-                    eth_dst='ff:ff:ff:ff:ff:ff',
-                    eth_type=self.ether_types.ETH_TYPE_ARP),
-                priority=1,
-                table_id=2)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[ofpp.OFPInstructionGotoTable(table_id=20)],
-                match=ofpp.OFPMatch(
-                    eth_dst=('00:00:00:00:00:00', '01:00:00:00:00:00')),
-                priority=0,
-                table_id=2)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[ofpp.OFPInstructionGotoTable(table_id=22)],
-                match=ofpp.OFPMatch(
-                    eth_dst=('01:00:00:00:00:00', '01:00:00:00:00:00')),
-                priority=0,
-                table_id=2)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(),
-                priority=0, table_id=3)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(),
-                priority=0, table_id=4)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(),
-                priority=0, table_id=6)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.NXActionLearn(
-                            cookie=0,
-                            hard_timeout=300,
-                            priority=1,
-                            specs=[
-                                ofpp.NXFlowSpecMatch(
-                                    dst=('vlan_vid', 0),
-                                    n_bits=12,
-                                    src=('vlan_vid', 0)),
-                                ofpp.NXFlowSpecMatch(
-                                    dst=('eth_dst', 0),
-                                    n_bits=48,
-                                    src=('eth_src', 0)),
-                                ofpp.NXFlowSpecLoad(
-                                    dst=('vlan_vid', 0),
-                                    n_bits=12,
-                                    src=0),
-                                ofpp.NXFlowSpecLoad(
-                                    dst=('tunnel_id', 0),
-                                    n_bits=64,
-                                    src=('tunnel_id', 0)),
-                                ofpp.NXFlowSpecOutput(
-                                    dst='',
-                                    n_bits=32,
-                                    src=('in_port', 0)),
-                            ],
-                            table_id=20),
-                        ofpp.OFPActionOutput(patch_int_ofport, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(),
-                priority=1,
-                table_id=10)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[ofpp.OFPInstructionGotoTable(table_id=22)],
-                match=ofpp.OFPMatch(),
-                priority=0,
-                table_id=20)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[ofpp.OFPInstructionGotoTable(table_id=22)],
-                match=ofpp.OFPMatch(),
-                priority=0,
-                table_id=21)),
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[],
-                match=ofpp.OFPMatch(),
-                priority=0,
-                table_id=22))
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_provision_local_vlan(self):
-        network_type = 'vxlan'
-        lvid = 888
-        segmentation_id = 777
-        distributed = False
-        self.br.provision_local_vlan(network_type=network_type, lvid=lvid,
-                                     segmentation_id=segmentation_id,
-                                     distributed=distributed)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionPushVlan(),
-                        ofpp.OFPActionSetField(
-                            vlan_vid=lvid | ofp.OFPVID_PRESENT)
-                    ]),
-                    ofpp.OFPInstructionGotoTable(table_id=10),
-                ],
-                match=ofpp.OFPMatch(tunnel_id=segmentation_id),
-                priority=1,
-                table_id=4)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_reclaim_local_vlan(self):
-        network_type = 'vxlan'
-        segmentation_id = 777
-        self.br.reclaim_local_vlan(network_type=network_type,
-                                   segmentation_id=segmentation_id)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(
-                table_id=4,
-                match=ofpp.OFPMatch(tunnel_id=segmentation_id)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_flood_to_tun(self):
-        vlan = 3333
-        tun_id = 2222
-        ports = [11, 44, 22, 33]
-        self.br.install_flood_to_tun(vlan=vlan,
-                                     tun_id=tun_id,
-                                     ports=ports)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionPopVlan(),
-                        ofpp.OFPActionSetField(tunnel_id=tun_id),
-                    ] + [ofpp.OFPActionOutput(p, 0) for p in ports]),
-                ],
-                match=ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT),
-                priority=1,
-                table_id=22)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_flood_to_tun(self):
-        vlan = 3333
-        self.br.delete_flood_to_tun(vlan=vlan)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(table_id=22,
-                match=ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_unicast_to_tun(self):
-        vlan = 3333
-        port = 55
-        mac = '08:60:6e:7f:74:e7'
-        tun_id = 2222
-        self.br.install_unicast_to_tun(vlan=vlan,
-                                       tun_id=tun_id,
-                                       port=port,
-                                       mac=mac)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionPopVlan(),
-                        ofpp.OFPActionSetField(tunnel_id=tun_id),
-                        ofpp.OFPActionOutput(port, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_dst=mac, vlan_vid=vlan | ofp.OFPVID_PRESENT),
-                priority=2,
-                table_id=20)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_unicast_to_tun(self):
-        vlan = 3333
-        mac = '08:60:6e:7f:74:e7'
-        self.br.delete_unicast_to_tun(vlan=vlan, mac=mac)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(table_id=20,
-                match=ofpp.OFPMatch(
-                    eth_dst=mac, vlan_vid=vlan | ofp.OFPVID_PRESENT)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_unicast_to_tun_without_mac(self):
-        vlan = 3333
-        mac = None
-        self.br.delete_unicast_to_tun(vlan=vlan, mac=mac)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(table_id=20,
-                match=ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_arp_responder(self):
-        vlan = 3333
-        ip = '192.0.2.1'
-        mac = '08:60:6e:7f:74:e7'
-        self.br.install_arp_responder(vlan=vlan, ip=ip, mac=mac)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionSetField(arp_op=self.arp.ARP_REPLY),
-                        ofpp.NXActionRegMove(
-                            dst_field='arp_tha',
-                            n_bits=48,
-                            src_field='arp_sha'),
-                        ofpp.NXActionRegMove(
-                            dst_field='arp_tpa',
-                            n_bits=32,
-                            src_field='arp_spa'),
-                        ofpp.OFPActionSetField(arp_sha=mac),
-                        ofpp.OFPActionSetField(arp_spa=ip),
-                        ofpp.OFPActionOutput(ofp.OFPP_IN_PORT, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(
-                    eth_type=self.ether_types.ETH_TYPE_ARP,
-                    arp_tpa=ip,
-                    vlan_vid=vlan | ofp.OFPVID_PRESENT),
-                priority=1,
-                table_id=21)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_arp_responder(self):
-        vlan = 3333
-        ip = '192.0.2.1'
-        self.br.delete_arp_responder(vlan=vlan, ip=ip)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(
-                match=ofpp.OFPMatch(
-                    eth_type=self.ether_types.ETH_TYPE_ARP,
-                    arp_tpa=ip,
-                    vlan_vid=vlan | ofp.OFPVID_PRESENT),
-                table_id=21),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_arp_responder_without_ip(self):
-        vlan = 3333
-        ip = None
-        self.br.delete_arp_responder(vlan=vlan, ip=ip)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(
-                match=ofpp.OFPMatch(
-                    eth_type=self.ether_types.ETH_TYPE_ARP,
-                    vlan_vid=vlan | ofp.OFPVID_PRESENT),
-                table_id=21),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_setup_tunnel_port(self):
-        network_type = 'vxlan'
-        port = 11111
-        self.br.setup_tunnel_port(network_type=network_type, port=port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionGotoTable(table_id=4),
-                ],
-                match=ofpp.OFPMatch(in_port=port),
-                priority=1,
-                table_id=0)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_cleanup_tunnel_port(self):
-        port = 11111
-        self.br.cleanup_tunnel_port(port=port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(in_port=port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_add_dvr_mac_tun(self):
-        mac = '00:02:b3:13:fe:3d'
-        port = 8888
-        self.br.add_dvr_mac_tun(mac=mac, port=port)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call._send_msg(ofpp.OFPFlowMod(dp,
-                cookie=0,
-                instructions=[
-                    ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
-                        ofpp.OFPActionOutput(port, 0),
-                    ]),
-                ],
-                match=ofpp.OFPMatch(eth_src=mac),
-                priority=1,
-                table_id=9)),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_remove_dvr_mac_tun(self):
-        mac = '00:02:b3:13:fe:3d'
-        self.br.remove_dvr_mac_tun(mac=mac)
-        (dp, ofp, ofpp) = self._get_dp()
-        expected = [
-            call.delete_flows(eth_src=mac, table_id=9),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.py
deleted file mode 100644 (file)
index b396103..0000000
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.common import constants
-
-from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
-    import ovs_test_base
-
-
-call = mock.call  # short hand
-
-
-class OVSBridgeTestBase(ovs_test_base.OVSOFCtlTestBase):
-    def setup_bridge_mock(self, name, cls):
-        self.br = cls(name)
-        mock_add_flow = mock.patch.object(self.br, 'add_flow').start()
-        mock_mod_flow = mock.patch.object(self.br, 'mod_flow').start()
-        mock_delete_flows = mock.patch.object(self.br, 'delete_flows').start()
-        self.mock = mock.Mock()
-        self.mock.attach_mock(mock_add_flow, 'add_flow')
-        self.mock.attach_mock(mock_mod_flow, 'mod_flow')
-        self.mock.attach_mock(mock_delete_flows, 'delete_flows')
-
-    def test_drop_port(self):
-        in_port = 2345
-        self.br.drop_port(in_port=in_port)
-        expected = [
-            call.add_flow(priority=2, table=0, actions='drop',
-                          in_port=in_port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_goto(self):
-        dest_table_id = 123
-        priority = 99
-        in_port = 666
-        self.br.install_goto(dest_table_id=dest_table_id,
-                             priority=priority, in_port=in_port)
-        expected = [
-            call.add_flow(priority=priority, table=0,
-                          actions='resubmit(,%s)' % dest_table_id,
-                          in_port=in_port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_drop(self):
-        priority = 99
-        in_port = 666
-        self.br.install_drop(priority=priority, in_port=in_port)
-        expected = [
-            call.add_flow(priority=priority, table=0,
-                          actions='drop',
-                          in_port=in_port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_normal(self):
-        priority = 99
-        in_port = 666
-        self.br.install_normal(priority=priority, in_port=in_port)
-        expected = [
-            call.add_flow(priority=priority, table=0,
-                          actions='normal',
-                          in_port=in_port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_dump_flows_for_table(self):
-        table = 23
-        with mock.patch.object(self.br, 'run_ofctl') as run_ofctl:
-            self.br.dump_flows(table)
-            run_ofctl.assert_has_calls([mock.call("dump-flows", mock.ANY)])
-
-    def test_dump_all_flows(self):
-        with mock.patch.object(self.br, 'run_ofctl') as run_ofctl:
-            self.br.dump_flows_all_tables()
-            run_ofctl.assert_has_calls([mock.call("dump-flows", [])])
-
-
-class OVSDVRProcessTestMixin(object):
-    def test_install_dvr_process_ipv4(self):
-        vlan_tag = 999
-        gateway_ip = '192.0.2.1'
-        self.br.install_dvr_process_ipv4(vlan_tag=vlan_tag,
-                                         gateway_ip=gateway_ip)
-        expected = [
-            call.add_flow(table=self.dvr_process_table_id,
-                          proto='arp', nw_dst=gateway_ip, actions='drop',
-                          priority=3, dl_vlan=vlan_tag),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_dvr_process_ipv4(self):
-        vlan_tag = 999
-        gateway_ip = '192.0.2.1'
-        self.br.delete_dvr_process_ipv4(vlan_tag=vlan_tag,
-                                        gateway_ip=gateway_ip)
-        expected = [
-            call.delete_flows(table=self.dvr_process_table_id,
-                              dl_vlan=vlan_tag, proto='arp',
-                              nw_dst=gateway_ip),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_dvr_process_ipv6(self):
-        vlan_tag = 999
-        gateway_mac = '08:60:6e:7f:74:e7'
-        self.br.install_dvr_process_ipv6(vlan_tag=vlan_tag,
-                                         gateway_mac=gateway_mac)
-        expected = [
-            call.add_flow(table=self.dvr_process_table_id,
-                          proto='icmp6', dl_src=gateway_mac, actions='drop',
-                          priority=3, dl_vlan=vlan_tag,
-                          icmp_type=constants.ICMPV6_TYPE_RA),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_dvr_process_ipv6(self):
-        vlan_tag = 999
-        gateway_mac = '08:60:6e:7f:74:e7'
-        self.br.delete_dvr_process_ipv6(vlan_tag=vlan_tag,
-                                        gateway_mac=gateway_mac)
-        expected = [
-            call.delete_flows(table=self.dvr_process_table_id,
-                              dl_vlan=vlan_tag, dl_src=gateway_mac,
-                              proto='icmp6',
-                              icmp_type=constants.ICMPV6_TYPE_RA),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_dvr_process(self):
-        vlan_tag = 999
-        vif_mac = '00:0e:0c:5e:95:d0'
-        dvr_mac_address = 'f2:0b:a4:5b:b2:ab'
-        self.br.install_dvr_process(vlan_tag=vlan_tag,
-                                    vif_mac=vif_mac,
-                                    dvr_mac_address=dvr_mac_address)
-        expected = [
-            call.add_flow(priority=2, table=self.dvr_process_table_id,
-                          dl_dst=vif_mac, dl_vlan=vlan_tag, actions='drop'),
-            call.add_flow(priority=1, table=self.dvr_process_table_id,
-                          dl_vlan=vlan_tag, dl_src=vif_mac,
-                          actions='mod_dl_src:%(mac)s,resubmit(,%(next)s)' % {
-                              'mac': dvr_mac_address,
-                              'next': self.dvr_process_next_table_id,
-                          }),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_dvr_process(self):
-        vlan_tag = 999
-        vif_mac = '00:0e:0c:5e:95:d0'
-        self.br.delete_dvr_process(vlan_tag=vlan_tag,
-                                   vif_mac=vif_mac)
-        expected = [
-            call.delete_flows(table=self.dvr_process_table_id,
-                              dl_dst=vif_mac, dl_vlan=vlan_tag),
-            call.delete_flows(table=self.dvr_process_table_id,
-                              dl_vlan=vlan_tag, dl_src=vif_mac),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_int.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_int.py
deleted file mode 100644 (file)
index 8c77e18..0000000
+++ /dev/null
@@ -1,238 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.common import constants as const
-from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.\
-    openflow.ovs_ofctl import ovs_bridge_test_base
-
-
-call = mock.call  # short hand
-
-
-class OVSIntegrationBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase):
-    def setUp(self):
-        super(OVSIntegrationBridgeTest, self).setUp()
-        self.setup_bridge_mock('br-int', self.br_int_cls)
-
-    def test_setup_default_table(self):
-        self.br.setup_default_table()
-        expected = [
-            call.add_flow(priority=0, table=0, actions='normal'),
-            call.add_flow(priority=0, table=23, actions='drop'),
-            call.add_flow(priority=0, table=24, actions='drop'),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_provision_local_vlan(self):
-        port = 999
-        lvid = 888
-        segmentation_id = 777
-        self.br.provision_local_vlan(port=port, lvid=lvid,
-                                     segmentation_id=segmentation_id)
-        expected = [
-            call.add_flow(priority=3, dl_vlan=segmentation_id,
-                          in_port=port,
-                          actions='mod_vlan_vid:%s,normal' % lvid),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_provision_local_vlan_novlan(self):
-        port = 999
-        lvid = 888
-        segmentation_id = None
-        self.br.provision_local_vlan(port=port, lvid=lvid,
-                                     segmentation_id=segmentation_id)
-        expected = [
-            call.add_flow(priority=3, dl_vlan=0xffff,
-                          in_port=port,
-                          actions='mod_vlan_vid:%s,normal' % lvid),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_reclaim_local_vlan(self):
-        port = 999
-        segmentation_id = 777
-        self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id)
-        expected = [
-            call.delete_flows(dl_vlan=segmentation_id, in_port=port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_reclaim_local_vlan_novlan(self):
-        port = 999
-        segmentation_id = None
-        self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id)
-        expected = [
-            call.delete_flows(dl_vlan=0xffff, in_port=port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_dvr_to_src_mac(self):
-        network_type = 'vxlan'
-        vlan_tag = 1111
-        gateway_mac = '08:60:6e:7f:74:e7'
-        dst_mac = '00:02:b3:13:fe:3d'
-        dst_port = 6666
-        self.br.install_dvr_to_src_mac(network_type=network_type,
-                                       vlan_tag=vlan_tag,
-                                       gateway_mac=gateway_mac,
-                                       dst_mac=dst_mac,
-                                       dst_port=dst_port)
-        expected = [
-            call.add_flow(priority=4, table=1, dl_dst=dst_mac,
-                          dl_vlan=vlan_tag,
-                          actions='strip_vlan,mod_dl_src:%(mac)s,'
-                          'output:%(port)s' % {
-                              'mac': gateway_mac,
-                              'port': dst_port,
-                          }),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_dvr_to_src_mac(self):
-        network_type = 'vxlan'
-        vlan_tag = 1111
-        dst_mac = '00:02:b3:13:fe:3d'
-        self.br.delete_dvr_to_src_mac(network_type=network_type,
-                                      vlan_tag=vlan_tag,
-                                      dst_mac=dst_mac)
-        expected = [
-            call.delete_flows(table=1, dl_dst=dst_mac, dl_vlan=vlan_tag),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_dvr_to_src_mac_vlan(self):
-        network_type = 'vlan'
-        vlan_tag = 1111
-        gateway_mac = '08:60:6e:7f:74:e7'
-        dst_mac = '00:02:b3:13:fe:3d'
-        dst_port = 6666
-        self.br.install_dvr_to_src_mac(network_type=network_type,
-                                       vlan_tag=vlan_tag,
-                                       gateway_mac=gateway_mac,
-                                       dst_mac=dst_mac,
-                                       dst_port=dst_port)
-        expected = [
-            call.add_flow(priority=4, table=2, dl_dst=dst_mac,
-                          dl_vlan=vlan_tag,
-                          actions='strip_vlan,mod_dl_src:%(mac)s,'
-                          'output:%(port)s' % {
-                              'mac': gateway_mac,
-                              'port': dst_port,
-                          }),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_dvr_to_src_mac_vlan(self):
-        network_type = 'vlan'
-        vlan_tag = 1111
-        dst_mac = '00:02:b3:13:fe:3d'
-        self.br.delete_dvr_to_src_mac(network_type=network_type,
-                                      vlan_tag=vlan_tag,
-                                      dst_mac=dst_mac)
-        expected = [
-            call.delete_flows(table=2, dl_dst=dst_mac, dl_vlan=vlan_tag),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_add_dvr_mac_vlan(self):
-        mac = '00:02:b3:13:fe:3d'
-        port = 8888
-        self.br.add_dvr_mac_vlan(mac=mac, port=port)
-        expected = [
-            call.add_flow(priority=4, table=0, actions='resubmit(,2)',
-                          dl_src=mac, in_port=port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_remove_dvr_mac_vlan(self):
-        mac = '00:02:b3:13:fe:3d'
-        self.br.remove_dvr_mac_vlan(mac=mac)
-        expected = [
-            call.delete_flows(eth_src=mac, table_id=0),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_add_dvr_mac_tun(self):
-        mac = '00:02:b3:13:fe:3d'
-        port = 8888
-        self.br.add_dvr_mac_tun(mac=mac, port=port)
-        expected = [
-            call.add_flow(priority=2, table=0, actions='resubmit(,1)',
-                          dl_src=mac, in_port=port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_remove_dvr_mac_tun(self):
-        mac = '00:02:b3:13:fe:3d'
-        port = 8888
-        self.br.remove_dvr_mac_tun(mac=mac, port=port)
-        expected = [
-            call.delete_flows(eth_src=mac, table_id=0, in_port=port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_icmpv6_na_spoofing_protection(self):
-        port = 8888
-        ip_addresses = ['2001:db8::1', 'fdf8:f53b:82e4::1/128']
-        self.br.install_icmpv6_na_spoofing_protection(port, ip_addresses)
-        expected = [
-            call.add_flow(dl_type=const.ETHERTYPE_IPV6, actions='normal',
-                          icmp_type=const.ICMPV6_TYPE_NA,
-                          nw_proto=const.PROTO_NUM_ICMP_V6,
-                          nd_target='2001:db8::1',
-                          priority=2, table=24, in_port=8888),
-            call.add_flow(dl_type=const.ETHERTYPE_IPV6, actions='normal',
-                          icmp_type=const.ICMPV6_TYPE_NA,
-                          nw_proto=const.PROTO_NUM_ICMP_V6,
-                          nd_target='fdf8:f53b:82e4::1/128',
-                          priority=2, table=24, in_port=8888),
-            call.add_flow(dl_type=const.ETHERTYPE_IPV6,
-                          icmp_type=const.ICMPV6_TYPE_NA,
-                          nw_proto=const.PROTO_NUM_ICMP_V6,
-                          priority=10, table=0, in_port=8888,
-                          actions='resubmit(,24)')
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_arp_spoofing_protection(self):
-        port = 8888
-        ip_addresses = ['192.0.2.1', '192.0.2.2/32']
-        self.br.install_arp_spoofing_protection(port, ip_addresses)
-        expected = [
-            call.add_flow(proto='arp', actions='normal',
-                          arp_spa='192.0.2.1',
-                          priority=2, table=24, in_port=8888),
-            call.add_flow(proto='arp', actions='normal',
-                          arp_spa='192.0.2.2/32',
-                          priority=2, table=24, in_port=8888),
-            call.add_flow(priority=10, table=0, in_port=8888,
-                          actions='resubmit(,24)', proto='arp')
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_arp_spoofing_protection(self):
-        port = 8888
-        self.br.delete_arp_spoofing_protection(port)
-        expected = [
-            call.delete_flows(table_id=0, in_port=8888, proto='arp'),
-            call.delete_flows(table_id=0, in_port=8888, icmp_type=136,
-                              nw_proto=58),
-            call.delete_flows(table_id=24, in_port=8888),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_phys.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_phys.py
deleted file mode 100644 (file)
index 47ca317..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-import neutron.plugins.ml2.drivers.openvswitch.agent.common.constants \
-    as ovs_const
-from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.\
-    openflow.ovs_ofctl import ovs_bridge_test_base
-
-
-call = mock.call  # short hand
-
-
-class OVSPhysicalBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase,
-                            ovs_bridge_test_base.OVSDVRProcessTestMixin):
-    dvr_process_table_id = ovs_const.DVR_PROCESS_VLAN
-    dvr_process_next_table_id = ovs_const.LOCAL_VLAN_TRANSLATION
-
-    def setUp(self):
-        super(OVSPhysicalBridgeTest, self).setUp()
-        self.setup_bridge_mock('br-phys', self.br_phys_cls)
-
-    def test_setup_default_table(self):
-        self.br.setup_default_table()
-        expected = [
-            call.delete_flows(),
-            call.add_flow(priority=0, table=0, actions='normal'),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_provision_local_vlan(self):
-        port = 999
-        lvid = 888
-        segmentation_id = 777
-        distributed = False
-        self.br.provision_local_vlan(port=port, lvid=lvid,
-                                     segmentation_id=segmentation_id,
-                                     distributed=distributed)
-        expected = [
-            call.add_flow(priority=4, table=0, dl_vlan=lvid, in_port=port,
-                          actions='mod_vlan_vid:%s,normal' % segmentation_id),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_provision_local_vlan_novlan(self):
-        port = 999
-        lvid = 888
-        segmentation_id = None
-        distributed = False
-        self.br.provision_local_vlan(port=port, lvid=lvid,
-                                     segmentation_id=segmentation_id,
-                                     distributed=distributed)
-        expected = [
-            call.add_flow(priority=4, table=0, dl_vlan=lvid, in_port=port,
-                          actions='strip_vlan,normal')
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_reclaim_local_vlan(self):
-        port = 999
-        lvid = 888
-        self.br.reclaim_local_vlan(port=port, lvid=lvid)
-        expected = [
-            call.delete_flows(dl_vlan=lvid, in_port=port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_add_dvr_mac_vlan(self):
-        mac = '00:02:b3:13:fe:3d'
-        port = 8888
-        self.br.add_dvr_mac_vlan(mac=mac, port=port)
-        expected = [
-            call.add_flow(priority=2, table=3, dl_src=mac,
-                          actions='output:%s' % port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_remove_dvr_mac_vlan(self):
-        mac = '00:02:b3:13:fe:3d'
-        self.br.remove_dvr_mac_vlan(mac=mac)
-        expected = [
-            call.delete_flows(eth_src=mac, table_id=3),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py
deleted file mode 100644 (file)
index 6c77d31..0000000
+++ /dev/null
@@ -1,317 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import netaddr
-
-import neutron.plugins.ml2.drivers.openvswitch.agent.common.constants \
-    as ovs_const
-from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.\
-    openflow.ovs_ofctl import ovs_bridge_test_base
-
-
-call = mock.call  # short hand
-
-
-class OVSTunnelBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase,
-                          ovs_bridge_test_base.OVSDVRProcessTestMixin):
-    dvr_process_table_id = ovs_const.DVR_PROCESS
-    dvr_process_next_table_id = ovs_const.PATCH_LV_TO_TUN
-
-    def setUp(self):
-        super(OVSTunnelBridgeTest, self).setUp()
-        self.setup_bridge_mock('br-tun', self.br_tun_cls)
-
-    def test_setup_default_table(self):
-        patch_int_ofport = 5555
-        mock_do_action_flows = mock.patch.object(self.br,
-                                                 'do_action_flows').start()
-        self.mock.attach_mock(mock_do_action_flows, 'do_action_flows')
-        self.br.setup_default_table(patch_int_ofport=patch_int_ofport,
-                                    arp_responder_enabled=False)
-        flow_args = [{'priority': 1, 'in_port': patch_int_ofport,
-                      'actions': 'resubmit(,2)'},
-                     {'priority': 0, 'actions': 'drop'},
-                     {'priority': 0, 'table': 2,
-                      'dl_dst': '00:00:00:00:00:00/01:00:00:00:00:00',
-                      'actions': 'resubmit(,20)'},
-                     {'priority': 0, 'table': 2,
-                      'dl_dst': '01:00:00:00:00:00/01:00:00:00:00:00',
-                      'actions': 'resubmit(,22)'},
-                     {'priority': 0, 'table': 3, 'actions': 'drop'},
-                     {'priority': 0, 'table': 4, 'actions': 'drop'},
-                     {'priority': 0, 'table': 6, 'actions': 'drop'},
-                     {'priority': 1, 'table': 10,
-                      'actions': 'learn(cookie=0,table=20,priority=1,'
-                      'hard_timeout=300,NXM_OF_VLAN_TCI[0..11],'
-                      'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],'
-                      'load:0->NXM_OF_VLAN_TCI[],'
-                      'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],'
-                      'output:NXM_OF_IN_PORT[]),'
-                      'output:%s' % patch_int_ofport},
-                     {'priority': 0, 'table': 20, 'actions': 'resubmit(,22)'}
-                     ]
-        expected = [call.do_action_flows('add', flow_args),
-                    call.add_flow(priority=0, table=22, actions='drop')]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_setup_default_table_arp_responder_enabled(self):
-        patch_int_ofport = 5555
-        mock_do_action_flows = mock.patch.object(self.br,
-                                                 'do_action_flows').start()
-        self.mock.attach_mock(mock_do_action_flows, 'do_action_flows')
-        self.br.setup_default_table(patch_int_ofport=patch_int_ofport,
-            arp_responder_enabled=True)
-        flow_args = [{'priority': 1, 'in_port': patch_int_ofport,
-                      'actions': 'resubmit(,2)'},
-                     {'priority': 0, 'actions': 'drop'},
-                     {'priority': 1, 'table': 2, 'dl_dst': 'ff:ff:ff:ff:ff:ff',
-                      'actions': 'resubmit(,21)', 'proto': 'arp'},
-                     {'priority': 0, 'table': 2,
-                      'dl_dst': '00:00:00:00:00:00/01:00:00:00:00:00',
-                      'actions': 'resubmit(,20)'},
-                     {'priority': 0, 'table': 2,
-                      'dl_dst': '01:00:00:00:00:00/01:00:00:00:00:00',
-                      'actions': 'resubmit(,22)'},
-                     {'priority': 0, 'table': 3, 'actions': 'drop'},
-                     {'priority': 0, 'table': 4, 'actions': 'drop'},
-                     {'priority': 0, 'table': 6, 'actions': 'drop'},
-                     {'priority': 1, 'table': 10,
-                      'actions': 'learn(cookie=0,table=20,priority=1,'
-                      'hard_timeout=300,NXM_OF_VLAN_TCI[0..11],'
-                      'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],'
-                      'load:0->NXM_OF_VLAN_TCI[],'
-                      'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],'
-                      'output:NXM_OF_IN_PORT[]),'
-                      'output:%s' % patch_int_ofport},
-                     {'priority': 0, 'table': 20, 'actions': 'resubmit(,22)'},
-                     {'priority': 0, 'table': 21, 'actions': 'resubmit(,22)'}
-                     ]
-        expected = [call.do_action_flows('add', flow_args),
-                    call.add_flow(priority=0, table=22, actions='drop')]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_provision_local_vlan(self):
-        network_type = 'vxlan'
-        lvid = 888
-        segmentation_id = 777
-        distributed = False
-        self.br.provision_local_vlan(network_type=network_type, lvid=lvid,
-                                     segmentation_id=segmentation_id,
-                                     distributed=distributed)
-        expected = [
-            call.add_flow(priority=1, tun_id=segmentation_id,
-                          actions='mod_vlan_vid:%s,resubmit(,10)' % lvid,
-                          table=4),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_reclaim_local_vlan(self):
-        network_type = 'vxlan'
-        segmentation_id = 777
-        self.br.reclaim_local_vlan(network_type=network_type,
-                                   segmentation_id=segmentation_id)
-        expected = [
-            call.delete_flows(tun_id=segmentation_id, table=4),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_flood_to_tun(self):
-        vlan = 3333
-        tun_id = 2222
-        ports = [11, 44, 22, 33]
-        self.br.install_flood_to_tun(vlan=vlan,
-                                     tun_id=tun_id,
-                                     ports=ports)
-        expected = [
-            call.mod_flow(table=22, dl_vlan=vlan,
-                          actions='strip_vlan,set_tunnel:%(tun)s,'
-                          'output:%(ports)s' % {
-                              'tun': tun_id,
-                              'ports': ','.join(map(str, ports)),
-                          }),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_flood_to_tun(self):
-        vlan = 3333
-        self.br.delete_flood_to_tun(vlan=vlan)
-        expected = [
-            call.delete_flows(table=22, dl_vlan=vlan),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_unicast_to_tun(self):
-        vlan = 3333
-        port = 55
-        mac = '08:60:6e:7f:74:e7'
-        tun_id = 2222
-        self.br.install_unicast_to_tun(vlan=vlan,
-                                       tun_id=tun_id,
-                                       port=port,
-                                       mac=mac)
-        expected = [
-            call.add_flow(priority=2, table=20, dl_dst=mac, dl_vlan=vlan,
-                          actions='strip_vlan,set_tunnel:%(tun)s,'
-                          'output:%(port)s' % {
-                              'tun': tun_id,
-                              'port': port,
-                          }),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_unicast_to_tun(self):
-        vlan = 3333
-        mac = '08:60:6e:7f:74:e7'
-        self.br.delete_unicast_to_tun(vlan=vlan, mac=mac)
-        expected = [
-            call.delete_flows(table=20, dl_dst=mac, dl_vlan=vlan),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_unicast_to_tun_without_mac(self):
-        vlan = 3333
-        mac = None
-        self.br.delete_unicast_to_tun(vlan=vlan, mac=mac)
-        expected = [
-            call.delete_flows(table=20, dl_vlan=vlan),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_install_arp_responder(self):
-        vlan = 3333
-        ip = '192.0.2.1'
-        mac = '08:60:6e:7f:74:e7'
-        self.br.install_arp_responder(vlan=vlan, ip=ip, mac=mac)
-        expected = [
-            call.add_flow(proto='arp', nw_dst=ip,
-                          actions='move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],'
-                          'mod_dl_src:%(mac)s,load:0x2->NXM_OF_ARP_OP[],'
-                          'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],'
-                          'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],'
-                          'load:%(mac)#x->NXM_NX_ARP_SHA[],'
-                          'load:%(ip)#x->NXM_OF_ARP_SPA[],in_port' % {
-                              'mac': netaddr.EUI(mac,
-                                                 dialect=netaddr.mac_unix),
-                              'ip': netaddr.IPAddress(ip),
-                          },
-                          priority=1, table=21, dl_vlan=vlan),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_arp_responder(self):
-        vlan = 3333
-        ip = '192.0.2.1'
-        self.br.delete_arp_responder(vlan=vlan, ip=ip)
-        expected = [
-            call.delete_flows(table=21, dl_vlan=vlan, proto='arp', nw_dst=ip),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_delete_arp_responder_without_ip(self):
-        vlan = 3333
-        ip = None
-        self.br.delete_arp_responder(vlan=vlan, ip=ip)
-        expected = [
-            call.delete_flows(table=21, dl_vlan=vlan, proto='arp'),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_setup_tunnel_port(self):
-        network_type = 'vxlan'
-        port = 11111
-        self.br.setup_tunnel_port(network_type=network_type, port=port)
-        expected = [
-            call.add_flow(priority=1, in_port=port, actions='resubmit(,4)'),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_cleanup_tunnel_port(self):
-        port = 11111
-        self.br.cleanup_tunnel_port(port=port)
-        expected = [
-            call.delete_flows(in_port=port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_add_dvr_mac_tun(self):
-        mac = '00:02:b3:13:fe:3d'
-        port = 8888
-        self.br.add_dvr_mac_tun(mac=mac, port=port)
-        expected = [
-            call.add_flow(priority=1, table=9, dl_src=mac,
-                          actions='output:%s' % port),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def test_remove_dvr_mac_tun(self):
-        mac = '00:02:b3:13:fe:3d'
-        self.br.remove_dvr_mac_tun(mac=mac)
-        expected = [
-            call.delete_flows(eth_src=mac, table_id=9),
-        ]
-        self.assertEqual(expected, self.mock.mock_calls)
-
-    def _mock_add_tunnel_port(self, deferred_br=False):
-        port_name = 'fake_port'
-        remote_ip = '192.168.1.3'
-        local_ip = '192.168.1.2'
-        tunnel_type = 'vxlan'
-        vxlan_udp_port = '4789'
-        dont_fragment = True
-        if deferred_br:
-            with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.add_port',
-                            return_value=9999) as add_port, \
-                    self.br.deferred() as deferred_br:
-                ofport = deferred_br.add_tunnel_port(port_name, remote_ip,
-                                                     local_ip, tunnel_type,
-                                                     vxlan_udp_port,
-                                                     dont_fragment)
-        else:
-            with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.add_port',
-                            return_value=9999) as add_port:
-                ofport = self.br.add_tunnel_port(port_name, remote_ip,
-                                                 local_ip, tunnel_type,
-                                                 vxlan_udp_port,
-                                                 dont_fragment)
-        self.assertEqual(9999, ofport)
-        self.assertEqual(1, add_port.call_count)
-        self.assertEqual(port_name, add_port.call_args[0][0])
-
-    def _mock_delete_port(self, deferred_br=False):
-        port_name = 'fake_port'
-        if deferred_br:
-            with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.'
-                            'delete_port') as delete_port, \
-                    self.br.deferred() as deferred_br:
-                deferred_br.delete_port(port_name)
-        else:
-            with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.'
-                            'delete_port') as delete_port:
-                self.br.delete_port(port_name)
-        self.assertEqual([call(port_name)], delete_port.mock_calls)
-
-    def test_add_tunnel_port(self):
-        self._mock_add_tunnel_port()
-
-    def test_delete_port(self):
-        self._mock_delete_port()
-
-    def test_deferred_br_add_tunnel_port(self):
-        self._mock_add_tunnel_port(True)
-
-    def test_deferred_br_delete_port(self):
-        self._mock_delete_port(True)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/ovs_test_base.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/ovs_test_base.py
deleted file mode 100644 (file)
index 264b922..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
-# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
-# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import functools
-
-import mock
-from oslo_utils import importutils
-
-from neutron.tests import base
-from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
-    import fake_oflib
-
-
-_AGENT_PACKAGE = 'neutron.plugins.ml2.drivers.openvswitch.agent'
-_AGENT_NAME = _AGENT_PACKAGE + '.ovs_neutron_agent'
-_DVR_AGENT_NAME = ('neutron.plugins.ml2.drivers.openvswitch.agent.'
-                   'ovs_dvr_neutron_agent')
-
-
-class OVSAgentConfigTestBase(base.BaseTestCase):
-    def setUp(self):
-        super(OVSAgentConfigTestBase, self).setUp()
-        self.mod_agent = importutils.import_module(_AGENT_NAME)
-        self.mod_dvr_agent = importutils.import_module(_DVR_AGENT_NAME)
-
-
-class OVSAgentTestBase(OVSAgentConfigTestBase):
-    def setUp(self):
-        super(OVSAgentTestBase, self).setUp()
-        self.br_int_cls = importutils.import_class(self._BR_INT_CLASS)
-        self.br_phys_cls = importutils.import_class(self._BR_PHYS_CLASS)
-        self.br_tun_cls = importutils.import_class(self._BR_TUN_CLASS)
-
-    def _bridge_classes(self):
-        return {
-            'br_int': self.br_int_cls,
-            'br_phys': self.br_phys_cls,
-            'br_tun': self.br_tun_cls,
-        }
-
-
-class OVSOFCtlTestBase(OVSAgentTestBase):
-    _DRIVER_PACKAGE = _AGENT_PACKAGE + '.openflow.ovs_ofctl'
-    _BR_INT_CLASS = _DRIVER_PACKAGE + '.br_int.OVSIntegrationBridge'
-    _BR_TUN_CLASS = _DRIVER_PACKAGE + '.br_tun.OVSTunnelBridge'
-    _BR_PHYS_CLASS = _DRIVER_PACKAGE + '.br_phys.OVSPhysicalBridge'
-
-
-class OVSRyuTestBase(OVSAgentTestBase):
-    _DRIVER_PACKAGE = _AGENT_PACKAGE + '.openflow.native'
-    _BR_INT_CLASS = _DRIVER_PACKAGE + '.br_int.OVSIntegrationBridge'
-    _BR_TUN_CLASS = _DRIVER_PACKAGE + '.br_tun.OVSTunnelBridge'
-    _BR_PHYS_CLASS = _DRIVER_PACKAGE + '.br_phys.OVSPhysicalBridge'
-
-    def setUp(self):
-        self.fake_oflib_of = fake_oflib.patch_fake_oflib_of()
-        self.fake_oflib_of.start()
-        self.addCleanup(self.fake_oflib_of.stop)
-        super(OVSRyuTestBase, self).setUp()
-        ryu_app = mock.Mock()
-        self.br_int_cls = functools.partial(self.br_int_cls, ryu_app=ryu_app)
-        self.br_phys_cls = functools.partial(self.br_phys_cls, ryu_app=ryu_app)
-        self.br_tun_cls = functools.partial(self.br_tun_cls, ryu_app=ryu_app)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py
deleted file mode 100644 (file)
index 87ef62c..0000000
+++ /dev/null
@@ -1,2795 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-import time
-
-import mock
-from oslo_config import cfg
-from oslo_log import log
-import oslo_messaging
-import testtools
-
-from neutron._i18n import _
-from neutron.agent.common import ovs_lib
-from neutron.agent.common import utils
-from neutron.agent.linux import async_process
-from neutron.agent.linux import ip_lib
-from neutron.common import constants as n_const
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent \
-    as ovs_agent
-from neutron.tests import base
-from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
-    import ovs_test_base
-
-
-NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
-OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
-
-FAKE_MAC = '00:11:22:33:44:55'
-FAKE_IP1 = '10.0.0.1'
-FAKE_IP2 = '10.0.0.2'
-
-TEST_PORT_ID1 = 'port-id-1'
-TEST_PORT_ID2 = 'port-id-2'
-TEST_PORT_ID3 = 'port-id-3'
-
-TEST_NETWORK_ID1 = 'net-id-1'
-TEST_NETWORK_ID2 = 'net-id-2'
-
-DEVICE_OWNER_COMPUTE = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
-
-
-class FakeVif(object):
-    ofport = 99
-    port_name = 'name'
-    vif_mac = 'aa:bb:cc:11:22:33'
-
-
-class MockFixedIntervalLoopingCall(object):
-    def __init__(self, f):
-        self.f = f
-
-    def start(self, interval=0):
-        self.f()
-
-
-class ValidateTunnelTypes(ovs_test_base.OVSAgentConfigTestBase):
-
-    def setUp(self):
-        super(ValidateTunnelTypes, self).setUp()
-        self.mock_validate_local_ip = mock.patch.object(
-            self.mod_agent, 'validate_local_ip').start()
-
-    def test_validate_tunnel_types_succeeds(self):
-        cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
-        cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE],
-                              group='AGENT')
-        self.mod_agent.validate_tunnel_config(cfg.CONF.AGENT.tunnel_types,
-                                              cfg.CONF.OVS.local_ip)
-        self.mock_validate_local_ip.assert_called_once_with('10.10.10.10')
-
-    def test_validate_tunnel_types_fails_for_invalid_tunnel_type(self):
-        cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
-        cfg.CONF.set_override('tunnel_types', ['foobar'], group='AGENT')
-        with testtools.ExpectedException(SystemExit):
-            self.mod_agent.validate_tunnel_config(cfg.CONF.AGENT.tunnel_types,
-                                                  cfg.CONF.OVS.local_ip)
-
-
-class TestOvsNeutronAgent(object):
-
-    def setUp(self):
-        super(TestOvsNeutronAgent, self).setUp()
-        notifier_p = mock.patch(NOTIFIER)
-        notifier_cls = notifier_p.start()
-        self.notifier = mock.Mock()
-        notifier_cls.return_value = self.notifier
-        systemd_patch = mock.patch('oslo_service.systemd.notify_once')
-        self.systemd_notify = systemd_patch.start()
-
-        cfg.CONF.set_default('firewall_driver',
-                             'neutron.agent.firewall.NoopFirewallDriver',
-                             group='SECURITYGROUP')
-        cfg.CONF.set_default('quitting_rpc_timeout', 10, 'AGENT')
-        cfg.CONF.set_default('prevent_arp_spoofing', False, 'AGENT')
-        mock.patch(
-            'neutron.agent.common.ovs_lib.OVSBridge.get_ports_attributes',
-            return_value=[]).start()
-
-        mock.patch('neutron.agent.common.ovs_lib.BaseOVS.config',
-                   new_callable=mock.PropertyMock,
-                   return_value={}).start()
-        with mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                               'setup_integration_br'),\
-                mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                                  'setup_ancillary_bridges',
-                                  return_value=[]),\
-                mock.patch('neutron.agent.linux.utils.get_interface_mac',
-                           return_value='00:00:00:00:00:01'),\
-                mock.patch(
-                    'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),\
-                mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                           new=MockFixedIntervalLoopingCall),\
-                mock.patch(
-                    'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports',
-                    return_value=[]):
-            self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(),
-                                                        cfg.CONF)
-            self.agent.tun_br = self.br_tun_cls(br_name='br-tun')
-        self.agent.sg_agent = mock.Mock()
-
-    def _mock_port_bound(self, ofport=None, new_local_vlan=None,
-                         old_local_vlan=None, db_get_val=None):
-        port = mock.Mock()
-        port.ofport = ofport
-        net_uuid = 'my-net-uuid'
-        fixed_ips = [{'subnet_id': 'my-subnet-uuid',
-                      'ip_address': '1.1.1.1'}]
-        if old_local_vlan is not None:
-            self.agent.local_vlan_map[net_uuid] = (
-                self.mod_agent.LocalVLANMapping(
-                    old_local_vlan, None, None, None))
-        with mock.patch.object(self.agent, 'int_br', autospec=True) as int_br:
-            int_br.db_get_val.return_value = db_get_val
-            int_br.set_db_attribute.return_value = True
-            needs_binding = self.agent.port_bound(
-                port, net_uuid, 'local', None, None,
-                fixed_ips, DEVICE_OWNER_COMPUTE, False)
-        if db_get_val is None:
-            self.assertEqual(0, int_br.set_db_attribute.call_count)
-            self.assertFalse(needs_binding)
-        else:
-            vlan_mapping = {'net_uuid': net_uuid,
-                            'network_type': 'local',
-                            'physical_network': None}
-            int_br.set_db_attribute.assert_called_once_with(
-                "Port", mock.ANY, "other_config", vlan_mapping)
-            self.assertTrue(needs_binding)
-
-    def test_datapath_type_system(self):
-        # verify kernel datapath is default
-        expected = constants.OVS_DATAPATH_SYSTEM
-        self.assertEqual(expected, self.agent.int_br.datapath_type)
-
-    def test_datapath_type_netdev(self):
-
-        with mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                               'setup_integration_br'), \
-            mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                           'setup_ancillary_bridges',
-                           return_value=[]), \
-            mock.patch('neutron.agent.linux.utils.get_interface_mac',
-                    return_value='00:00:00:00:00:01'), \
-            mock.patch(
-                'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'), \
-            mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                       new=MockFixedIntervalLoopingCall), \
-            mock.patch(
-                'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports',
-                return_value=[]), \
-            mock.patch('neutron.agent.common.ovs_lib.BaseOVS.config',
-                       new_callable=mock.PropertyMock,
-                       return_value={'datapath_types': ['netdev']}):
-            # validate setting non default datapath
-            expected = constants.OVS_DATAPATH_NETDEV
-            cfg.CONF.set_override('datapath_type',
-                                  expected,
-                                  group='OVS')
-            self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(),
-                                                        cfg.CONF)
-            self.assertEqual(expected, self.agent.int_br.datapath_type)
-
-    def test_agent_type_ovs(self):
-        # verify agent_type is default
-        expected = n_const.AGENT_TYPE_OVS
-        self.assertEqual(expected,
-                         self.agent.agent_state['agent_type'])
-
-    def test_agent_type_alt(self):
-        with mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                               'setup_integration_br'),\
-            mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                              'setup_ancillary_bridges',
-                              return_value=[]), \
-            mock.patch('neutron.agent.linux.utils.get_interface_mac',
-                       return_value='00:00:00:00:00:01'), \
-            mock.patch(
-                'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'), \
-            mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                       new=MockFixedIntervalLoopingCall), \
-            mock.patch(
-                'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports',
-                return_value=[]):
-            # validate setting non default agent_type
-            expected = 'alt agent type'
-            cfg.CONF.set_override('agent_type',
-                                  expected,
-                                  group='AGENT')
-            self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(),
-                                                        cfg.CONF)
-            self.assertEqual(expected,
-                             self.agent.agent_state['agent_type'])
-
-    def _test_restore_local_vlan_maps(self, tag, segmentation_id='1'):
-        port = mock.Mock()
-        port.port_name = 'fake_port'
-        net_uuid = 'fake_network_id'
-        local_vlan_map = {'net_uuid': net_uuid,
-                          'network_type': 'vlan',
-                          'physical_network': 'fake_network'}
-        if segmentation_id is not None:
-            local_vlan_map['segmentation_id'] = segmentation_id
-        with mock.patch.object(self.agent, 'int_br') as int_br:
-            int_br.get_vif_ports.return_value = [port]
-            int_br.get_ports_attributes.return_value = [{
-                'name': port.port_name, 'other_config': local_vlan_map,
-                'tag': tag
-            }]
-            self.agent._restore_local_vlan_map()
-            expected_hints = {}
-            if tag:
-                expected_hints[net_uuid] = tag
-            self.assertEqual(expected_hints, self.agent._local_vlan_hints)
-
-    def test_restore_local_vlan_map_with_device_has_tag(self):
-        self._test_restore_local_vlan_maps(2)
-
-    def test_restore_local_vlan_map_with_device_no_tag(self):
-        self._test_restore_local_vlan_maps([])
-
-    def test_restore_local_vlan_map_no_segmentation_id(self):
-        self._test_restore_local_vlan_maps(2, segmentation_id=None)
-
-    def test_restore_local_vlan_map_segmentation_id_compat(self):
-        self._test_restore_local_vlan_maps(2, segmentation_id='None')
-
-    def test_check_agent_configurations_for_dvr_raises(self):
-        self.agent.enable_distributed_routing = True
-        self.agent.enable_tunneling = True
-        self.agent.l2_pop = False
-        self.assertRaises(ValueError,
-                          self.agent._check_agent_configurations)
-
-    def test_check_agent_configurations_for_dvr(self):
-        self.agent.enable_distributed_routing = True
-        self.agent.enable_tunneling = True
-        self.agent.l2_pop = True
-        self.assertIsNone(self.agent._check_agent_configurations())
-
-    def test_check_agent_configurations_for_dvr_with_vlan(self):
-        self.agent.enable_distributed_routing = True
-        self.agent.enable_tunneling = False
-        self.agent.l2_pop = False
-        self.assertIsNone(self.agent._check_agent_configurations())
-
-    def test_port_bound_deletes_flows_for_valid_ofport(self):
-        self._mock_port_bound(ofport=1, new_local_vlan=1, db_get_val={})
-
-    def test_port_bound_ignores_flows_for_invalid_ofport(self):
-        self._mock_port_bound(ofport=-1, new_local_vlan=1, db_get_val={})
-
-    def test_port_bound_does_not_rewire_if_already_bound(self):
-        self._mock_port_bound(
-            ofport=-1, new_local_vlan=1, old_local_vlan=1, db_get_val={})
-
-    def test_port_bound_not_found(self):
-        self._mock_port_bound(ofport=1, new_local_vlan=1, db_get_val=None)
-
-    def _test_port_dead(self, cur_tag=None):
-        port = mock.Mock()
-        port.ofport = 1
-        with mock.patch.object(self.agent, 'int_br') as int_br:
-            int_br.db_get_val.return_value = cur_tag
-            self.agent.port_dead(port)
-        if cur_tag is None or cur_tag == self.mod_agent.DEAD_VLAN_TAG:
-            self.assertFalse(int_br.set_db_attribute.called)
-            self.assertFalse(int_br.drop_port.called)
-        else:
-            int_br.assert_has_calls([
-                mock.call.set_db_attribute("Port", mock.ANY, "tag",
-                                           self.mod_agent.DEAD_VLAN_TAG,
-                                           log_errors=True),
-                mock.call.drop_port(in_port=port.ofport),
-            ])
-
-    def test_port_dead(self):
-        self._test_port_dead()
-
-    def test_port_dead_with_port_already_dead(self):
-        self._test_port_dead(self.mod_agent.DEAD_VLAN_TAG)
-
-    def test_port_dead_with_valid_tag(self):
-        self._test_port_dead(cur_tag=1)
-
-    def mock_scan_ports(self, vif_port_set=None, registered_ports=None,
-                        updated_ports=None, port_tags_dict=None, sync=False):
-        if port_tags_dict is None:  # Because empty dicts evaluate as False.
-            port_tags_dict = {}
-        with mock.patch.object(self.agent.int_br,
-                               'get_vif_port_set',
-                               return_value=vif_port_set),\
-                mock.patch.object(self.agent.int_br,
-                                  'get_port_tag_dict',
-                                  return_value=port_tags_dict):
-            return self.agent.scan_ports(registered_ports, sync, updated_ports)
-
-    def test_scan_ports_returns_current_only_for_unchanged_ports(self):
-        vif_port_set = set([1, 3])
-        registered_ports = set([1, 3])
-        expected = {'current': vif_port_set}
-        actual = self.mock_scan_ports(vif_port_set, registered_ports)
-        self.assertEqual(expected, actual)
-
-    def test_scan_ports_returns_port_changes(self):
-        vif_port_set = set([1, 3])
-        registered_ports = set([1, 2])
-        expected = dict(current=vif_port_set, added=set([3]), removed=set([2]))
-        actual = self.mock_scan_ports(vif_port_set, registered_ports)
-        self.assertEqual(expected, actual)
-
-    def test_scan_ports_returns_port_changes_with_sync(self):
-        vif_port_set = set([1, 3])
-        registered_ports = set([1, 2])
-        expected = dict(current=vif_port_set, added=vif_port_set,
-                        removed=set([2]))
-        actual = self.mock_scan_ports(vif_port_set, registered_ports,
-                                      sync=True)
-        self.assertEqual(expected, actual)
-
-    def _test_scan_ports_with_updated_ports(self, updated_ports):
-        vif_port_set = set([1, 3, 4])
-        registered_ports = set([1, 2, 4])
-        expected = dict(current=vif_port_set, added=set([3]),
-                        removed=set([2]), updated=set([4]))
-        actual = self.mock_scan_ports(vif_port_set, registered_ports,
-                                      updated_ports)
-        self.assertEqual(expected, actual)
-
-    def test_scan_ports_finds_known_updated_ports(self):
-        self._test_scan_ports_with_updated_ports(set([4]))
-
-    def test_scan_ports_ignores_unknown_updated_ports(self):
-        # the port '5' was not seen on current ports. Hence it has either
-        # never been wired or already removed and should be ignored
-        self._test_scan_ports_with_updated_ports(set([4, 5]))
-
-    def test_scan_ports_ignores_updated_port_if_removed(self):
-        vif_port_set = set([1, 3])
-        registered_ports = set([1, 2])
-        updated_ports = set([1, 2])
-        expected = dict(current=vif_port_set, added=set([3]),
-                        removed=set([2]), updated=set([1]))
-        actual = self.mock_scan_ports(vif_port_set, registered_ports,
-                                      updated_ports)
-        self.assertEqual(expected, actual)
-
-    def test_scan_ports_no_vif_changes_returns_updated_port_only(self):
-        vif_port_set = set([1, 2, 3])
-        registered_ports = set([1, 2, 3])
-        updated_ports = set([2])
-        expected = dict(current=vif_port_set, updated=set([2]))
-        actual = self.mock_scan_ports(vif_port_set, registered_ports,
-                                      updated_ports)
-        self.assertEqual(expected, actual)
-
-    def _test_process_ports_events(self, events, registered_ports,
-                                   ancillary_ports, expected_ports,
-                                   expected_ancillary, updated_ports=None):
-        with mock.patch.object(self.agent, 'check_changed_vlans',
-                               return_value=set()):
-            devices_not_ready_yet = set()
-            actual = self.agent.process_ports_events(
-                events, registered_ports, ancillary_ports,
-                devices_not_ready_yet, updated_ports)
-            self.assertEqual(
-                (expected_ports, expected_ancillary, devices_not_ready_yet),
-                actual)
-
-    def test_process_ports_events_returns_current_for_unchanged_ports(self):
-        events = {'added': [], 'removed': []}
-        registered_ports = {1, 3}
-        ancillary_ports = {2, 5}
-        expected_ports = {'current': registered_ports, 'added': set(),
-                          'removed': set()}
-        expected_ancillary = {'current': ancillary_ports, 'added': set(),
-                              'removed': set()}
-        self._test_process_ports_events(events, registered_ports,
-                                        ancillary_ports, expected_ports,
-                                        expected_ancillary)
-
-    def test_process_port_events_no_vif_changes_return_updated_port_only(self):
-        events = {'added': [], 'removed': []}
-        registered_ports = {1, 2, 3}
-        updated_ports = {2}
-        expected_ports = dict(current=registered_ports, updated={2},
-                              added=set(), removed=set())
-        expected_ancillary = dict(current=set(), added=set(), removed=set())
-        self._test_process_ports_events(events, registered_ports,
-                                        set(), expected_ports,
-                                        expected_ancillary, updated_ports)
-
-    def test_process_port_events_ignores_removed_port_if_never_added(self):
-        events = {'added': [],
-                  'removed': [{'name': 'port2', 'ofport': 2,
-                               'external_ids': {'attached-mac': 'test-mac'}}]}
-        registered_ports = {1}
-        expected_ports = dict(current=registered_ports, added=set(),
-                              removed=set())
-        expected_ancillary = dict(current=set(), added=set(), removed=set())
-        devices_not_ready_yet = set()
-        with mock.patch.object(self.agent.int_br, 'portid_from_external_ids',
-                               side_effect=[2]), \
-            mock.patch.object(self.agent, 'check_changed_vlans',
-                              return_value=set()):
-            actual = self.agent.process_ports_events(
-                events, registered_ports, set(), devices_not_ready_yet)
-            self.assertEqual(
-                (expected_ports, expected_ancillary, devices_not_ready_yet),
-                actual)
-
-    def test_process_port_events_port_not_ready_yet(self):
-        events = {'added': [{'name': 'port5', 'ofport': [],
-                  'external_ids': {'attached-mac': 'test-mac'}}],
-                  'removed': []}
-        old_devices_not_ready = {'port4'}
-        registered_ports = set([1, 2, 3])
-        expected_ports = dict(current=set([1, 2, 3, 4]),
-                              added=set([4]), removed=set())
-        self.agent.ancillary_brs = []
-        expected_ancillary = dict(current=set(), added=set(), removed=set())
-        with mock.patch.object(self.agent.int_br, 'portid_from_external_ids',
-                               side_effect=[5, 4]), \
-            mock.patch.object(self.agent, 'check_changed_vlans',
-                              return_value=set()), \
-            mock.patch.object(self.agent.int_br, 'get_ports_attributes',
-                              return_value=[{'name': 'port4', 'ofport': 4,
-                                             'external_ids': {
-                                                 'attached-mac': 'mac4'}}]):
-            expected_devices_not_ready = {'port5'}
-            actual = self.agent.process_ports_events(
-                events, registered_ports, set(), old_devices_not_ready)
-            self.assertEqual(
-                (expected_ports, expected_ancillary,
-                 expected_devices_not_ready),
-                actual)
-
-    def _test_process_port_events_with_updated_ports(self, updated_ports):
-        events = {'added': [{'name': 'port3', 'ofport': 3,
-                            'external_ids': {'attached-mac': 'test-mac'}},
-                            {'name': 'qg-port2', 'ofport': 6,
-                             'external_ids': {'attached-mac': 'test-mac'}}],
-                  'removed': [{'name': 'port2', 'ofport': 2,
-                               'external_ids': {'attached-mac': 'test-mac'}},
-                              {'name': 'qg-port1', 'ofport': 5,
-                               'external_ids': {'attached-mac': 'test-mac'}}]}
-        registered_ports = {1, 2, 4}
-        ancillary_ports = {5, 8}
-        expected_ports = dict(current={1, 3, 4}, added={3}, removed={2})
-        if updated_ports:
-            expected_ports['updated'] = updated_ports
-        expected_ancillary = dict(current={6, 8}, added={6},
-                                  removed={5})
-        ancillary_bridge = mock.Mock()
-        ancillary_bridge.get_vif_port_set.return_value = {5, 6, 8}
-        self.agent.ancillary_brs = [ancillary_bridge]
-        with mock.patch.object(self.agent.int_br, 'portid_from_external_ids',
-                              side_effect=[3, 6, 2, 5]), \
-            mock.patch.object(self.agent, 'check_changed_vlans',
-                              return_value=set()):
-
-            devices_not_ready_yet = set()
-            actual = self.agent.process_ports_events(
-                events, registered_ports, ancillary_ports,
-                devices_not_ready_yet, updated_ports)
-            self.assertEqual(
-                (expected_ports, expected_ancillary, devices_not_ready_yet),
-                actual)
-
-    def test_process_port_events_returns_port_changes(self):
-        self._test_process_port_events_with_updated_ports(set())
-
-    def test_process_port_events_finds_known_updated_ports(self):
-        self._test_process_port_events_with_updated_ports({4})
-
-    def test_process_port_events_ignores_unknown_updated_ports(self):
-        # the port '10' was not seen on current ports. Hence it has either
-        # never been wired or already removed and should be ignored
-        self._test_process_port_events_with_updated_ports({4, 10})
-
-    def test_process_port_events_ignores_updated_port_if_removed(self):
-        self._test_process_port_events_with_updated_ports({4, 5})
-
-    def test_update_ports_returns_changed_vlan(self):
-        br = self.br_int_cls('br-int')
-        mac = "ca:fe:de:ad:be:ef"
-        port = ovs_lib.VifPort(1, 1, 1, mac, br)
-        lvm = self.mod_agent.LocalVLANMapping(
-            1, '1', None, 1, {port.vif_id: port})
-        local_vlan_map = {'1': lvm}
-        vif_port_set = set([1, 3])
-        registered_ports = set([1, 2])
-        port_tags_dict = {1: []}
-        expected = dict(
-            added=set([3]), current=vif_port_set,
-            removed=set([2]), updated=set([1])
-        )
-        with mock.patch.dict(self.agent.local_vlan_map, local_vlan_map),\
-                mock.patch.object(self.agent, 'tun_br', autospec=True):
-            actual = self.mock_scan_ports(
-                vif_port_set, registered_ports, port_tags_dict=port_tags_dict)
-        self.assertEqual(expected, actual)
-
-    def test_bind_devices(self):
-        devices_up = ['tap1']
-        devices_down = ['tap2']
-        self.agent.local_vlan_map["net1"] = mock.Mock()
-        ovs_db_list = [{'name': 'tap1', 'tag': []},
-                       {'name': 'tap2', 'tag': []}]
-        vif_port1 = mock.Mock()
-        vif_port1.port_name = 'tap1'
-        vif_port2 = mock.Mock()
-        vif_port2.port_name = 'tap2'
-        port_details = [
-            {'network_id': 'net1', 'vif_port': vif_port1,
-             'device': devices_up[0],
-             'admin_state_up': True},
-            {'network_id': 'net1', 'vif_port': vif_port2,
-             'device': devices_down[0],
-             'admin_state_up': False}]
-        with mock.patch.object(
-            self.agent.plugin_rpc, 'update_device_list',
-            return_value={'devices_up': devices_up,
-                          'devices_down': devices_down,
-                          'failed_devices_up': [],
-                          'failed_devices_down': []}) as update_devices, \
-                mock.patch.object(self.agent,
-                                  'int_br') as int_br:
-            int_br.get_ports_attributes.return_value = ovs_db_list
-            self.agent._bind_devices(port_details)
-            update_devices.assert_called_once_with(mock.ANY, devices_up,
-                                                   devices_down,
-                                                   mock.ANY, mock.ANY)
-
-    def _test_arp_spoofing(self, enable_prevent_arp_spoofing):
-        self.agent.prevent_arp_spoofing = enable_prevent_arp_spoofing
-
-        ovs_db_list = [{'name': 'fake_device', 'tag': []}]
-        self.agent.local_vlan_map = {
-            'fake_network': ovs_agent.LocalVLANMapping(1, None, None, 1)}
-        vif_port = mock.Mock()
-        vif_port.port_name = 'fake_device'
-        vif_port.ofport = 1
-        need_binding_ports = [{'network_id': 'fake_network',
-                               'vif_port': vif_port,
-                               'device': 'fake_device',
-                               'admin_state_up': True}]
-        with mock.patch.object(
-            self.agent.plugin_rpc, 'update_device_list',
-            return_value={'devices_up': [],
-                          'devices_down': [],
-                          'failed_devices_up': [],
-                          'failed_devices_down': []}), \
-                mock.patch.object(self.agent,
-                                  'int_br') as int_br, \
-                mock.patch.object(
-                    self.agent,
-                    'setup_arp_spoofing_protection') as setup_arp:
-            int_br.get_ports_attributes.return_value = ovs_db_list
-            self.agent._bind_devices(need_binding_ports)
-            self.assertEqual(enable_prevent_arp_spoofing, setup_arp.called)
-
-    def test_setup_arp_spoofing_protection_enable(self):
-        self._test_arp_spoofing(True)
-
-    def test_setup_arp_spoofing_protection_disabled(self):
-        self._test_arp_spoofing(False)
-
-    def _mock_treat_devices_added_updated(self, details, port, func_name):
-        """Mock treat devices added or updated.
-
-        :param details: the details to return for the device
-        :param port: the port that get_vif_port_by_id should return
-        :param func_name: the function that should be called
-        :returns: whether the named function was called
-        """
-        with mock.patch.object(self.agent.plugin_rpc,
-                               'get_devices_details_list_and_failed_devices',
-                               return_value={'devices': [details],
-                                             'failed_devices': None}),\
-                mock.patch.object(self.agent.int_br,
-                                  'get_vifs_by_ids',
-                                  return_value={details['device']: port}),\
-                mock.patch.object(self.agent.plugin_rpc, 'update_device_list',
-                                  return_value={'devices_up': [],
-                                                'devices_down': details,
-                                                'failed_devices_up': [],
-                                                'failed_devices_down': []}),\
-                mock.patch.object(self.agent.int_br,
-                    'get_port_tag_dict',
-                    return_value={}),\
-                mock.patch.object(self.agent, func_name) as func:
-            skip_devs, need_bound_devices, insecure_ports = (
-                self.agent.treat_devices_added_or_updated([{}], False))
-            # The function should not raise
-            self.assertFalse(skip_devs)
-            return func.called
-
-    def test_treat_devices_added_updated_ignores_invalid_ofport(self):
-        port = mock.Mock()
-        port.ofport = -1
-        self.assertFalse(self._mock_treat_devices_added_updated(
-            mock.MagicMock(), port, 'port_dead'))
-
-    def test_treat_devices_added_updated_marks_unknown_port_as_dead(self):
-        port = mock.Mock()
-        port.ofport = 1
-        self.assertTrue(self._mock_treat_devices_added_updated(
-            mock.MagicMock(), port, 'port_dead'))
-
-    def test_treat_devices_added_does_not_process_missing_port(self):
-        with mock.patch.object(
-            self.agent.plugin_rpc,
-            'get_devices_details_list_and_failed_devices') as get_dev_fn,\
-                mock.patch.object(self.agent.int_br,
-                                  'get_vif_port_by_id',
-                                  return_value=None):
-            self.assertFalse(get_dev_fn.called)
-
-    def test_treat_devices_added_updated_updates_known_port(self):
-        details = mock.MagicMock()
-        details.__contains__.side_effect = lambda x: True
-        self.assertTrue(self._mock_treat_devices_added_updated(
-            details, mock.Mock(), 'treat_vif_port'))
-
-    def test_treat_devices_added_updated_sends_vif_port_into_extension_manager(
-        self, *args):
-        details = mock.MagicMock()
-        details.__contains__.side_effect = lambda x: True
-        port = mock.MagicMock()
-
-        def fake_handle_port(context, port):
-            self.assertIn('vif_port', port)
-
-        with mock.patch.object(self.agent.plugin_rpc,
-                               'get_devices_details_list_and_failed_devices',
-                               return_value={'devices': [details],
-                                             'failed_devices': None}),\
-            mock.patch.object(self.agent.ext_manager,
-                              'handle_port', new=fake_handle_port),\
-            mock.patch.object(self.agent.int_br,
-                              'get_vifs_by_ids',
-                              return_value={details['device']: port}),\
-            mock.patch.object(self.agent, 'treat_vif_port',
-                              return_value=False):
-
-            self.agent.treat_devices_added_or_updated([{}], False)
-
-    def test_treat_devices_added_updated_skips_if_port_not_found(self):
-        dev_mock = mock.MagicMock()
-        dev_mock.__getitem__.return_value = 'the_skipped_one'
-        with mock.patch.object(self.agent.plugin_rpc,
-                               'get_devices_details_list_and_failed_devices',
-                               return_value={'devices': [dev_mock],
-                                             'failed_devices': None}),\
-                mock.patch.object(self.agent.int_br,
-                    'get_port_tag_dict',
-                    return_value={}),\
-                mock.patch.object(self.agent.int_br,
-                                  'get_vifs_by_ids',
-                                  return_value={}),\
-                mock.patch.object(self.agent,
-                                  'treat_vif_port') as treat_vif_port:
-            skip_devs = self.agent.treat_devices_added_or_updated([{}], False)
-            # The function should return False for resync and no device
-            # processed
-            self.assertEqual((['the_skipped_one'], [], []), skip_devs)
-            self.assertFalse(treat_vif_port.called)
-
-    def test_treat_devices_added_updated_put_port_down(self):
-        fake_details_dict = {'admin_state_up': False,
-                             'port_id': 'xxx',
-                             'device': 'xxx',
-                             'network_id': 'yyy',
-                             'physical_network': 'foo',
-                             'segmentation_id': 'bar',
-                             'network_type': 'baz',
-                             'fixed_ips': [{'subnet_id': 'my-subnet-uuid',
-                                            'ip_address': '1.1.1.1'}],
-                             'device_owner': DEVICE_OWNER_COMPUTE,
-                             'port_security_enabled': True
-                             }
-
-        with mock.patch.object(self.agent.plugin_rpc,
-                               'get_devices_details_list_and_failed_devices',
-                               return_value={'devices': [fake_details_dict],
-                                             'failed_devices': None}),\
-                mock.patch.object(self.agent.int_br,
-                                  'get_vifs_by_ids',
-                                  return_value={'xxx': mock.MagicMock()}),\
-                mock.patch.object(self.agent.int_br, 'get_port_tag_dict',
-                                  return_value={}),\
-                mock.patch.object(self.agent,
-                                  'treat_vif_port') as treat_vif_port:
-            skip_devs, need_bound_devices, insecure_ports = (
-                self.agent.treat_devices_added_or_updated([{}], False))
-            # The function should return False for resync
-            self.assertFalse(skip_devs)
-            self.assertTrue(treat_vif_port.called)
-
-    def _mock_treat_devices_removed(self, port_exists):
-        details = dict(exists=port_exists)
-        with mock.patch.object(self.agent.plugin_rpc,
-                               'update_device_list',
-                               return_value={'devices_up': [],
-                                             'devices_down': details,
-                                             'failed_devices_up': [],
-                                             'failed_devices_down': []}):
-            with mock.patch.object(self.agent, 'port_unbound') as port_unbound:
-                self.assertFalse(self.agent.treat_devices_removed([{}]))
-        self.assertTrue(port_unbound.called)
-
-    def test_treat_devices_removed_unbinds_port(self):
-        self._mock_treat_devices_removed(True)
-
-    def test_treat_devices_removed_ignores_missing_port(self):
-        self._mock_treat_devices_removed(False)
-
-    def test_bind_port_with_missing_network(self):
-        vif_port = mock.Mock()
-        vif_port.name.return_value = 'port'
-        self.agent._bind_devices([{'network_id': 'non-existent',
-                                   'vif_port': vif_port}])
-
-    def _test_process_network_ports(self, port_info):
-        with mock.patch.object(self.agent.sg_agent,
-                               "setup_port_filters") as setup_port_filters,\
-                mock.patch.object(
-                    self.agent,
-                    "treat_devices_added_or_updated",
-                    return_value=([], [], [])) as device_added_updated,\
-                mock.patch.object(self.agent.int_br, "get_ports_attributes",
-                                  return_value=[]),\
-                mock.patch.object(self.agent,
-                                  "treat_devices_removed",
-                                  return_value=False) as device_removed:
-            self.assertFalse(self.agent.process_network_ports(port_info,
-                                                              False))
-            setup_port_filters.assert_called_once_with(
-                port_info.get('added', set()),
-                port_info.get('updated', set()))
-            devices_added_updated = (port_info.get('added', set()) |
-                                     port_info.get('updated', set()))
-            if devices_added_updated:
-                device_added_updated.assert_called_once_with(
-                    devices_added_updated, False)
-            if port_info.get('removed', set()):
-                device_removed.assert_called_once_with(port_info['removed'])
-
-    def test_process_network_ports(self):
-        self._test_process_network_ports(
-            {'current': set(['tap0']),
-             'removed': set(['eth0']),
-             'added': set(['eth1'])})
-
-    def test_process_network_port_with_updated_ports(self):
-        self._test_process_network_ports(
-            {'current': set(['tap0', 'tap1']),
-             'updated': set(['tap1', 'eth1']),
-             'removed': set(['eth0']),
-             'added': set(['eth1'])})
-
-    def test_process_network_port_with_empty_port(self):
-        self._test_process_network_ports({})
-
-    def test_process_network_ports_with_insecure_ports(self):
-        port_info = {'current': set(['tap0', 'tap1']),
-                     'updated': set(['tap1']),
-                     'removed': set([]),
-                     'added': set(['eth1'])}
-        with mock.patch.object(self.agent.sg_agent,
-                               "setup_port_filters") as setup_port_filters,\
-                mock.patch.object(
-                    self.agent,
-                    "treat_devices_added_or_updated",
-                    return_value=([], [], ['eth1'])) as device_added_updated:
-            self.assertFalse(self.agent.process_network_ports(port_info,
-                                                              False))
-            device_added_updated.assert_called_once_with(
-                set(['eth1', 'tap1']), False)
-            setup_port_filters.assert_called_once_with(
-                set(), port_info.get('updated', set()))
-
-    def test_report_state(self):
-        with mock.patch.object(self.agent.state_rpc,
-                               "report_state") as report_st:
-            self.agent.int_br_device_count = 5
-            self.systemd_notify.assert_not_called()
-            self.agent._report_state()
-            report_st.assert_called_with(self.agent.context,
-                                         self.agent.agent_state, True)
-            self.systemd_notify.assert_called_once_with()
-            self.systemd_notify.reset_mock()
-            self.assertNotIn("start_flag", self.agent.agent_state)
-            self.assertEqual(
-                self.agent.agent_state["configurations"]["devices"],
-                self.agent.int_br_device_count
-            )
-            self.agent._report_state()
-            report_st.assert_called_with(self.agent.context,
-                                         self.agent.agent_state, True)
-            self.systemd_notify.assert_not_called()
-
-    def test_report_state_fail(self):
-        with mock.patch.object(self.agent.state_rpc,
-                               "report_state") as report_st:
-            report_st.side_effect = Exception()
-            self.agent._report_state()
-            report_st.assert_called_with(self.agent.context,
-                                         self.agent.agent_state, True)
-            self.agent._report_state()
-            report_st.assert_called_with(self.agent.context,
-                                         self.agent.agent_state, True)
-            self.systemd_notify.assert_not_called()
-
-    def test_report_state_revived(self):
-        with mock.patch.object(self.agent.state_rpc,
-                               "report_state") as report_st:
-            report_st.return_value = n_const.AGENT_REVIVED
-            self.agent._report_state()
-            self.assertTrue(self.agent.fullsync)
-
-    def test_port_update(self):
-        port = {"id": TEST_PORT_ID1,
-                "network_id": TEST_NETWORK_ID1,
-                "admin_state_up": False}
-        self.agent.port_update("unused_context",
-                               port=port,
-                               network_type="vlan",
-                               segmentation_id="1",
-                               physical_network="physnet")
-        self.assertEqual(set([TEST_PORT_ID1]), self.agent.updated_ports)
-
-    def test_port_delete_after_update(self):
-        """Make sure a port is not marked for delete and update."""
-        port = {'id': TEST_PORT_ID1}
-
-        self.agent.port_update(context=None, port=port)
-        self.agent.port_delete(context=None, port_id=port['id'])
-        self.assertEqual(set(), self.agent.updated_ports)
-        self.assertEqual(set([port['id']]), self.agent.deleted_ports)
-
-    def test_process_deleted_ports_cleans_network_ports(self):
-        self.agent._update_port_network(TEST_PORT_ID1, TEST_NETWORK_ID1)
-        self.agent.port_delete(context=None, port_id=TEST_PORT_ID1)
-        self.agent.sg_agent = mock.Mock()
-        self.agent.int_br = mock.Mock()
-        self.agent.process_deleted_ports(port_info={})
-        self.assertEqual(set(), self.agent.network_ports[TEST_NETWORK_ID1])
-
-    def test_network_update(self):
-        """Network update marks port for update. """
-        network = {'id': TEST_NETWORK_ID1}
-        port = {'id': TEST_PORT_ID1, 'network_id': network['id']}
-
-        self.agent._update_port_network(port['id'], port['network_id'])
-        self.agent.network_update(context=None, network=network)
-        self.assertEqual(set([port['id']]), self.agent.updated_ports)
-
-    def test_network_update_outoforder(self):
-        """Network update arrives later than port_delete.
-
-        But the main agent loop still didn't process the ports,
-        so we ensure the port is not marked for update.
-        """
-        network = {'id': TEST_NETWORK_ID1}
-        port = {'id': TEST_PORT_ID1, 'network_id': network['id']}
-
-        self.agent._update_port_network(port['id'], port['network_id'])
-        self.agent.port_delete(context=None, port_id=port['id'])
-        self.agent.network_update(context=None, network=network)
-        self.assertEqual(set(), self.agent.updated_ports)
-
-    def test_update_port_network(self):
-        """Ensure ports are associated and moved across networks correctly."""
-        self.agent._update_port_network(TEST_PORT_ID1, TEST_NETWORK_ID1)
-        self.agent._update_port_network(TEST_PORT_ID2, TEST_NETWORK_ID1)
-        self.agent._update_port_network(TEST_PORT_ID3, TEST_NETWORK_ID2)
-        self.agent._update_port_network(TEST_PORT_ID1, TEST_NETWORK_ID2)
-
-        self.assertEqual(set([TEST_PORT_ID2]),
-                         self.agent.network_ports[TEST_NETWORK_ID1])
-        self.assertEqual(set([TEST_PORT_ID1, TEST_PORT_ID3]),
-                         self.agent.network_ports[TEST_NETWORK_ID2])
-
-    def test_port_delete(self):
-        vif = FakeVif()
-        with mock.patch.object(self.agent, 'int_br') as int_br:
-            int_br.get_vif_by_port_id.return_value = vif.port_name
-            int_br.get_vif_port_by_id.return_value = vif
-            self.agent.port_delete("unused_context",
-                                   port_id='id')
-            self.agent.process_deleted_ports(port_info={})
-            # the main things we care about are that it gets put in the
-            # dead vlan and gets blocked
-            int_br.set_db_attribute.assert_any_call(
-                'Port', vif.port_name, 'tag', self.mod_agent.DEAD_VLAN_TAG,
-                log_errors=False)
-            int_br.drop_port.assert_called_once_with(in_port=vif.ofport)
-
-    def test_port_delete_removed_port(self):
-        with mock.patch.object(self.agent, 'int_br') as int_br:
-            self.agent.port_delete("unused_context",
-                                   port_id='id')
-            # if it was removed from the bridge, we shouldn't be processing it
-            self.agent.process_deleted_ports(port_info={'removed': {'id', }})
-            self.assertFalse(int_br.set_db_attribute.called)
-            self.assertFalse(int_br.drop_port.called)
-
-    def test_setup_physical_bridges(self):
-        with mock.patch.object(ip_lib.IPDevice, "exists") as devex_fn,\
-                mock.patch.object(sys, "exit"),\
-                mock.patch.object(utils, "execute"),\
-                mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\
-                mock.patch.object(self.agent, 'int_br') as int_br:
-            devex_fn.return_value = True
-            parent = mock.MagicMock()
-            phys_br = phys_br_cls()
-            parent.attach_mock(phys_br_cls, 'phys_br_cls')
-            parent.attach_mock(phys_br, 'phys_br')
-            parent.attach_mock(int_br, 'int_br')
-            phys_br.add_patch_port.return_value = "phy_ofport"
-            int_br.add_patch_port.return_value = "int_ofport"
-            self.agent.setup_physical_bridges({"physnet1": "br-eth"})
-            expected_calls = [
-                mock.call.phys_br_cls('br-eth'),
-                mock.call.phys_br.create(),
-                mock.call.phys_br.setup_controllers(mock.ANY),
-                mock.call.phys_br.setup_default_table(),
-                mock.call.int_br.db_get_val('Interface', 'int-br-eth',
-                                            'type'),
-                # Have to use __getattr__ here to avoid mock._Call.__eq__
-                # method being called
-                mock.call.int_br.db_get_val().__getattr__('__eq__')('veth'),
-                mock.call.int_br.add_patch_port('int-br-eth',
-                                                constants.NONEXISTENT_PEER),
-                mock.call.phys_br.add_patch_port('phy-br-eth',
-                                                 constants.NONEXISTENT_PEER),
-                mock.call.int_br.drop_port(in_port='int_ofport'),
-                mock.call.phys_br.drop_port(in_port='phy_ofport'),
-                mock.call.int_br.set_db_attribute('Interface', 'int-br-eth',
-                                                  'options:peer',
-                                                  'phy-br-eth'),
-                mock.call.phys_br.set_db_attribute('Interface', 'phy-br-eth',
-                                                   'options:peer',
-                                                   'int-br-eth'),
-            ]
-            parent.assert_has_calls(expected_calls)
-            self.assertEqual(self.agent.int_ofports["physnet1"],
-                             "int_ofport")
-            self.assertEqual(self.agent.phys_ofports["physnet1"],
-                             "phy_ofport")
-
-    def test_setup_physical_bridges_using_veth_interconnection(self):
-        self.agent.use_veth_interconnection = True
-        with mock.patch.object(ip_lib.IPDevice, "exists") as devex_fn,\
-                mock.patch.object(sys, "exit"),\
-                mock.patch.object(utils, "execute") as utilsexec_fn,\
-                mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\
-                mock.patch.object(self.agent, 'int_br') as int_br,\
-                mock.patch.object(ip_lib.IPWrapper, "add_veth") as addveth_fn,\
-                mock.patch.object(ip_lib.IpLinkCommand,
-                                  "delete") as linkdel_fn,\
-                mock.patch.object(ip_lib.IpLinkCommand, "set_up"),\
-                mock.patch.object(ip_lib.IpLinkCommand, "set_mtu"),\
-                mock.patch.object(ovs_lib.BaseOVS, "get_bridges") as get_br_fn:
-            devex_fn.return_value = True
-            parent = mock.MagicMock()
-            parent.attach_mock(utilsexec_fn, 'utils_execute')
-            parent.attach_mock(linkdel_fn, 'link_delete')
-            parent.attach_mock(addveth_fn, 'add_veth')
-            addveth_fn.return_value = (ip_lib.IPDevice("int-br-eth1"),
-                                       ip_lib.IPDevice("phy-br-eth1"))
-            phys_br = phys_br_cls()
-            phys_br.add_port.return_value = "phys_veth_ofport"
-            int_br.add_port.return_value = "int_veth_ofport"
-            get_br_fn.return_value = ["br-eth"]
-            self.agent.setup_physical_bridges({"physnet1": "br-eth"})
-            expected_calls = [mock.call.link_delete(),
-                              mock.call.utils_execute(['udevadm',
-                                                       'settle',
-                                                       '--timeout=10']),
-                              mock.call.add_veth('int-br-eth',
-                                                 'phy-br-eth')]
-            parent.assert_has_calls(expected_calls, any_order=False)
-            self.assertEqual(self.agent.int_ofports["physnet1"],
-                             "int_veth_ofport")
-            self.assertEqual(self.agent.phys_ofports["physnet1"],
-                             "phys_veth_ofport")
-
-    def test_setup_physical_bridges_change_from_veth_to_patch_conf(self):
-        with mock.patch.object(sys, "exit"),\
-                mock.patch.object(utils, "execute"),\
-                mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\
-                mock.patch.object(self.agent, 'int_br') as int_br,\
-                mock.patch.object(self.agent.int_br, 'db_get_val',
-                                  return_value='veth'):
-            phys_br = phys_br_cls()
-            parent = mock.MagicMock()
-            parent.attach_mock(phys_br_cls, 'phys_br_cls')
-            parent.attach_mock(phys_br, 'phys_br')
-            parent.attach_mock(int_br, 'int_br')
-            phys_br.add_patch_port.return_value = "phy_ofport"
-            int_br.add_patch_port.return_value = "int_ofport"
-            self.agent.setup_physical_bridges({"physnet1": "br-eth"})
-            expected_calls = [
-                mock.call.phys_br_cls('br-eth'),
-                mock.call.phys_br.create(),
-                mock.call.phys_br.setup_controllers(mock.ANY),
-                mock.call.phys_br.setup_default_table(),
-                mock.call.int_br.delete_port('int-br-eth'),
-                mock.call.phys_br.delete_port('phy-br-eth'),
-                mock.call.int_br.add_patch_port('int-br-eth',
-                                                constants.NONEXISTENT_PEER),
-                mock.call.phys_br.add_patch_port('phy-br-eth',
-                                                 constants.NONEXISTENT_PEER),
-                mock.call.int_br.drop_port(in_port='int_ofport'),
-                mock.call.phys_br.drop_port(in_port='phy_ofport'),
-                mock.call.int_br.set_db_attribute('Interface', 'int-br-eth',
-                                                  'options:peer',
-                                                  'phy-br-eth'),
-                mock.call.phys_br.set_db_attribute('Interface', 'phy-br-eth',
-                                                   'options:peer',
-                                                   'int-br-eth'),
-            ]
-            parent.assert_has_calls(expected_calls)
-            self.assertEqual(self.agent.int_ofports["physnet1"],
-                             "int_ofport")
-            self.assertEqual(self.agent.phys_ofports["physnet1"],
-                             "phy_ofport")
-
-    def test_setup_tunnel_br(self):
-        self.tun_br = mock.Mock()
-        with mock.patch.object(self.agent.int_br,
-                               "add_patch_port",
-                               return_value=1) as int_patch_port,\
-                mock.patch.object(self.agent.tun_br,
-                                  "add_patch_port",
-                                  return_value=1) as tun_patch_port,\
-                mock.patch.object(self.agent.tun_br, 'bridge_exists',
-                                  return_value=False),\
-                mock.patch.object(self.agent.tun_br, 'create') as create_tun,\
-                mock.patch.object(self.agent.tun_br,
-                                  'setup_controllers') as setup_controllers,\
-                mock.patch.object(self.agent.tun_br, 'port_exists',
-                                  return_value=False),\
-                mock.patch.object(self.agent.int_br, 'port_exists',
-                                  return_value=False),\
-                mock.patch.object(sys, "exit"):
-            self.agent.setup_tunnel_br(None)
-            self.agent.setup_tunnel_br()
-            self.assertTrue(create_tun.called)
-            self.assertTrue(setup_controllers.called)
-            self.assertTrue(int_patch_port.called)
-            self.assertTrue(tun_patch_port.called)
-
-    def test_setup_tunnel_br_ports_exits_drop_flows(self):
-        cfg.CONF.set_override('drop_flows_on_start', True, 'AGENT')
-        with mock.patch.object(self.agent.tun_br, 'port_exists',
-                               return_value=True),\
-                mock.patch.object(self.agent, 'tun_br'),\
-                mock.patch.object(self.agent.int_br, 'port_exists',
-                                  return_value=True),\
-                mock.patch.object(self.agent.tun_br, 'setup_controllers'),\
-                mock.patch.object(self.agent, 'patch_tun_ofport', new=2),\
-                mock.patch.object(self.agent, 'patch_int_ofport', new=2),\
-                mock.patch.object(self.agent.tun_br,
-                                  'delete_flows') as delete,\
-                mock.patch.object(self.agent.int_br,
-                                  "add_patch_port") as int_patch_port,\
-                mock.patch.object(self.agent.tun_br,
-                                  "add_patch_port") as tun_patch_port,\
-                mock.patch.object(sys, "exit"):
-            self.agent.setup_tunnel_br(None)
-            self.agent.setup_tunnel_br()
-            self.assertFalse(int_patch_port.called)
-            self.assertFalse(tun_patch_port.called)
-            self.assertTrue(delete.called)
-
-    def test_setup_tunnel_port(self):
-        self.agent.tun_br = mock.Mock()
-        self.agent.l2_pop = False
-        self.agent.udp_vxlan_port = 8472
-        self.agent.tun_br_ofports['vxlan'] = {}
-        with mock.patch.object(self.agent.tun_br,
-                               "add_tunnel_port",
-                               return_value='6') as add_tun_port_fn,\
-                mock.patch.object(self.agent.tun_br, "add_flow"):
-            self.agent._setup_tunnel_port(self.agent.tun_br, 'portname',
-                                          '1.2.3.4', 'vxlan')
-            self.assertTrue(add_tun_port_fn.called)
-
-    def test_port_unbound(self):
-        with mock.patch.object(self.agent, "reclaim_local_vlan") as reclvl_fn:
-            self.agent.enable_tunneling = True
-            lvm = mock.Mock()
-            lvm.network_type = "gre"
-            lvm.vif_ports = {"vif1": mock.Mock()}
-            self.agent.local_vlan_map["netuid12345"] = lvm
-            self.agent.port_unbound("vif1", "netuid12345")
-            self.assertTrue(reclvl_fn.called)
-
-            lvm.vif_ports = {}
-            self.agent.port_unbound("vif1", "netuid12345")
-            self.assertEqual(reclvl_fn.call_count, 2)
-
-            lvm.vif_ports = {"vif1": mock.Mock()}
-            self.agent.port_unbound("vif3", "netuid12345")
-            self.assertEqual(reclvl_fn.call_count, 2)
-
-    def _prepare_l2_pop_ofports(self):
-        lvm1 = mock.Mock()
-        lvm1.network_type = 'gre'
-        lvm1.vlan = 'vlan1'
-        lvm1.segmentation_id = 'seg1'
-        lvm1.tun_ofports = set(['1'])
-        lvm2 = mock.Mock()
-        lvm2.network_type = 'gre'
-        lvm2.vlan = 'vlan2'
-        lvm2.segmentation_id = 'seg2'
-        lvm2.tun_ofports = set(['1', '2'])
-        self.agent.local_vlan_map = {'net1': lvm1, 'net2': lvm2}
-        self.agent.tun_br_ofports = {'gre':
-                                     {'1.1.1.1': '1', '2.2.2.2': '2'}}
-        self.agent.arp_responder_enabled = True
-
-    def test_fdb_ignore_network(self):
-        self._prepare_l2_pop_ofports()
-        fdb_entry = {'net3': {}}
-        with mock.patch.object(self.agent.tun_br, 'add_flow') as add_flow_fn,\
-                mock.patch.object(self.agent.tun_br,
-                                  'delete_flows') as del_flow_fn,\
-                mock.patch.object(self.agent,
-                                  '_setup_tunnel_port') as add_tun_fn,\
-                mock.patch.object(self.agent,
-                                  'cleanup_tunnel_port') as clean_tun_fn:
-            self.agent.fdb_add(None, fdb_entry)
-            self.assertFalse(add_flow_fn.called)
-            self.assertFalse(add_tun_fn.called)
-            self.agent.fdb_remove(None, fdb_entry)
-            self.assertFalse(del_flow_fn.called)
-            self.assertFalse(clean_tun_fn.called)
-
-    def test_fdb_ignore_self(self):
-        self._prepare_l2_pop_ofports()
-        self.agent.local_ip = 'agent_ip'
-        fdb_entry = {'net2':
-                     {'network_type': 'gre',
-                      'segment_id': 'tun2',
-                      'ports':
-                      {'agent_ip':
-                       [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1),
-                        n_const.FLOODING_ENTRY]}}}
-        with mock.patch.object(self.agent.tun_br,
-                               "deferred") as defer_fn:
-            self.agent.fdb_add(None, fdb_entry)
-            self.assertFalse(defer_fn.called)
-
-            self.agent.fdb_remove(None, fdb_entry)
-            self.assertFalse(defer_fn.called)
-
-    def test_fdb_add_flows(self):
-        self._prepare_l2_pop_ofports()
-        fdb_entry = {'net1':
-                     {'network_type': 'gre',
-                      'segment_id': 'tun1',
-                      'ports':
-                      {'2.2.2.2':
-                       [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1),
-                        n_const.FLOODING_ENTRY]}}}
-
-        with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br,\
-                mock.patch.object(self.agent,
-                                  '_setup_tunnel_port',
-                                  autospec=True) as add_tun_fn:
-            self.agent.fdb_add(None, fdb_entry)
-            self.assertFalse(add_tun_fn.called)
-            deferred_br_call = mock.call.deferred().__enter__()
-            expected_calls = [
-                deferred_br_call.install_arp_responder('vlan1', FAKE_IP1,
-                                                       FAKE_MAC),
-                deferred_br_call.install_unicast_to_tun('vlan1', 'seg1', '2',
-                                                        FAKE_MAC),
-                deferred_br_call.install_flood_to_tun('vlan1', 'seg1',
-                                                      set(['1', '2'])),
-            ]
-            tun_br.assert_has_calls(expected_calls)
-
-    def test_fdb_del_flows(self):
-        self._prepare_l2_pop_ofports()
-        fdb_entry = {'net2':
-                     {'network_type': 'gre',
-                      'segment_id': 'tun2',
-                      'ports':
-                      {'2.2.2.2':
-                       [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1),
-                        n_const.FLOODING_ENTRY]}}}
-        with mock.patch.object(self.agent, 'tun_br', autospec=True) as br_tun:
-            self.agent.fdb_remove(None, fdb_entry)
-            deferred_br_call = mock.call.deferred().__enter__()
-            expected_calls = [
-                mock.call.deferred(),
-                mock.call.deferred().__enter__(),
-                deferred_br_call.delete_arp_responder('vlan2', FAKE_IP1),
-                deferred_br_call.delete_unicast_to_tun('vlan2', FAKE_MAC),
-                deferred_br_call.install_flood_to_tun('vlan2', 'seg2',
-                                                      set(['1'])),
-                deferred_br_call.delete_port('gre-02020202'),
-                deferred_br_call.cleanup_tunnel_port('2'),
-                mock.call.deferred().__exit__(None, None, None),
-            ]
-            br_tun.assert_has_calls(expected_calls)
-
-    def test_fdb_add_port(self):
-        self._prepare_l2_pop_ofports()
-        fdb_entry = {'net1':
-                     {'network_type': 'gre',
-                      'segment_id': 'tun1',
-                      'ports': {'1.1.1.1': [l2pop_rpc.PortInfo(FAKE_MAC,
-                                                               FAKE_IP1)]}}}
-        with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br,\
-                mock.patch.object(self.agent,
-                                  '_setup_tunnel_port') as add_tun_fn:
-            self.agent.fdb_add(None, fdb_entry)
-            self.assertFalse(add_tun_fn.called)
-            fdb_entry['net1']['ports']['10.10.10.10'] = [
-                l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)]
-            self.agent.fdb_add(None, fdb_entry)
-            deferred_br = tun_br.deferred().__enter__()
-            add_tun_fn.assert_called_with(
-                deferred_br, 'gre-0a0a0a0a', '10.10.10.10', 'gre')
-
-    def test_fdb_del_port(self):
-        self._prepare_l2_pop_ofports()
-        fdb_entry = {'net2':
-                     {'network_type': 'gre',
-                      'segment_id': 'tun2',
-                      'ports': {'2.2.2.2': [n_const.FLOODING_ENTRY]}}}
-        with mock.patch.object(self.agent.tun_br, 'deferred') as defer_fn,\
-                mock.patch.object(self.agent.tun_br,
-                                  'delete_port') as delete_port_fn:
-            self.agent.fdb_remove(None, fdb_entry)
-            deferred_br = defer_fn().__enter__()
-            deferred_br.delete_port.assert_called_once_with('gre-02020202')
-            self.assertFalse(delete_port_fn.called)
-
-    def test_fdb_update_chg_ip(self):
-        self._prepare_l2_pop_ofports()
-        fdb_entries = {'chg_ip':
-                       {'net1':
-                        {'agent_ip':
-                         {'before': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)],
-                          'after': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP2)]}}}}
-        with mock.patch.object(self.agent.tun_br, 'deferred') as deferred_fn:
-            self.agent.fdb_update(None, fdb_entries)
-            deferred_br = deferred_fn().__enter__()
-            deferred_br.assert_has_calls([
-                mock.call.install_arp_responder('vlan1', FAKE_IP2, FAKE_MAC),
-                mock.call.delete_arp_responder('vlan1', FAKE_IP1)
-            ])
-
-    def test_del_fdb_flow_idempotency(self):
-        lvm = mock.Mock()
-        lvm.network_type = 'gre'
-        lvm.vlan = 'vlan1'
-        lvm.segmentation_id = 'seg1'
-        lvm.tun_ofports = set(['1', '2'])
-        with mock.patch.object(self.agent.tun_br, 'mod_flow') as mod_flow_fn,\
-                mock.patch.object(self.agent.tun_br,
-                                  'delete_flows') as delete_flows_fn:
-            self.agent.del_fdb_flow(self.agent.tun_br, n_const.FLOODING_ENTRY,
-                                    '1.1.1.1', lvm, '3')
-            self.assertFalse(mod_flow_fn.called)
-            self.assertFalse(delete_flows_fn.called)
-
-    def test_recl_lv_port_to_preserve(self):
-        self._prepare_l2_pop_ofports()
-        self.agent.l2_pop = True
-        self.agent.enable_tunneling = True
-        with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br:
-            self.agent.reclaim_local_vlan('net1')
-            self.assertFalse(tun_br.cleanup_tunnel_port.called)
-
-    def test_recl_lv_port_to_remove(self):
-        self._prepare_l2_pop_ofports()
-        self.agent.l2_pop = True
-        self.agent.enable_tunneling = True
-        with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br:
-            self.agent.reclaim_local_vlan('net2')
-            tun_br.delete_port.assert_called_once_with('gre-02020202')
-
-    def test_daemon_loop_uses_polling_manager(self):
-        with mock.patch(
-            'neutron.agent.common.polling.get_polling_manager') as mock_get_pm:
-            with mock.patch.object(self.agent, 'rpc_loop') as mock_loop:
-                self.agent.daemon_loop()
-        mock_get_pm.assert_called_with(True,
-                                       constants.DEFAULT_OVSDBMON_RESPAWN)
-        mock_loop.assert_called_once_with(polling_manager=mock.ANY)
-
-    def test_setup_tunnel_port_invalid_ofport(self):
-        with mock.patch.object(
-            self.agent.tun_br,
-            'add_tunnel_port',
-            return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\
-                mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn:
-            ofport = self.agent._setup_tunnel_port(
-                self.agent.tun_br, 'gre-1', 'remote_ip', p_const.TYPE_GRE)
-            add_tunnel_port_fn.assert_called_once_with(
-                'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
-                self.agent.vxlan_udp_port, self.agent.dont_fragment,
-                self.agent.tunnel_csum)
-            log_error_fn.assert_called_once_with(
-                _("Failed to set-up %(type)s tunnel port to %(ip)s"),
-                {'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
-            self.assertEqual(ofport, 0)
-
-    def test_setup_tunnel_port_error_negative_df_disabled(self):
-        with mock.patch.object(
-            self.agent.tun_br,
-            'add_tunnel_port',
-            return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\
-                mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn:
-            self.agent.dont_fragment = False
-            self.agent.tunnel_csum = False
-            ofport = self.agent._setup_tunnel_port(
-                self.agent.tun_br, 'gre-1', 'remote_ip', p_const.TYPE_GRE)
-            add_tunnel_port_fn.assert_called_once_with(
-                'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
-                self.agent.vxlan_udp_port, self.agent.dont_fragment,
-                self.agent.tunnel_csum)
-            log_error_fn.assert_called_once_with(
-                _("Failed to set-up %(type)s tunnel port to %(ip)s"),
-                {'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
-            self.assertEqual(ofport, 0)
-
-    def test_setup_tunnel_port_error_negative_tunnel_csum(self):
-        with mock.patch.object(
-            self.agent.tun_br,
-            'add_tunnel_port',
-            return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\
-                mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn:
-            self.agent.dont_fragment = True
-            self.agent.tunnel_csum = True
-            ofport = self.agent._setup_tunnel_port(
-                self.agent.tun_br, 'gre-1', 'remote_ip', p_const.TYPE_GRE)
-            add_tunnel_port_fn.assert_called_once_with(
-                'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
-                self.agent.vxlan_udp_port, self.agent.dont_fragment,
-                self.agent.tunnel_csum)
-            log_error_fn.assert_called_once_with(
-                _("Failed to set-up %(type)s tunnel port to %(ip)s"),
-                {'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
-            self.assertEqual(ofport, 0)
-
-    def test_tunnel_sync_with_ml2_plugin(self):
-        fake_tunnel_details = {'tunnels': [{'ip_address': '100.101.31.15'}]}
-        with mock.patch.object(self.agent.plugin_rpc,
-                               'tunnel_sync',
-                               return_value=fake_tunnel_details),\
-                mock.patch.object(
-                    self.agent,
-                    '_setup_tunnel_port') as _setup_tunnel_port_fn,\
-                mock.patch.object(self.agent,
-                                  'cleanup_stale_flows') as cleanup:
-            self.agent.tunnel_types = ['vxlan']
-            self.agent.tunnel_sync()
-            expected_calls = [mock.call(self.agent.tun_br, 'vxlan-64651f0f',
-                                        '100.101.31.15', 'vxlan')]
-            _setup_tunnel_port_fn.assert_has_calls(expected_calls)
-            self.assertEqual([], cleanup.mock_calls)
-
-    def test_tunnel_sync_invalid_ip_address(self):
-        fake_tunnel_details = {'tunnels': [{'ip_address': '300.300.300.300'},
-                                           {'ip_address': '100.100.100.100'}]}
-        with mock.patch.object(self.agent.plugin_rpc,
-                               'tunnel_sync',
-                               return_value=fake_tunnel_details),\
-                mock.patch.object(
-                    self.agent,
-                    '_setup_tunnel_port') as _setup_tunnel_port_fn,\
-                mock.patch.object(self.agent,
-                                  'cleanup_stale_flows') as cleanup:
-            self.agent.tunnel_types = ['vxlan']
-            self.agent.tunnel_sync()
-            _setup_tunnel_port_fn.assert_called_once_with(self.agent.tun_br,
-                                                          'vxlan-64646464',
-                                                          '100.100.100.100',
-                                                          'vxlan')
-            self.assertEqual([], cleanup.mock_calls)
-
-    def test_tunnel_update(self):
-        kwargs = {'tunnel_ip': '10.10.10.10',
-                  'tunnel_type': 'gre'}
-        self.agent._setup_tunnel_port = mock.Mock()
-        self.agent.enable_tunneling = True
-        self.agent.tunnel_types = ['gre']
-        self.agent.l2_pop = False
-        self.agent.tunnel_update(context=None, **kwargs)
-        expected_calls = [
-            mock.call(self.agent.tun_br, 'gre-0a0a0a0a', '10.10.10.10', 'gre')]
-        self.agent._setup_tunnel_port.assert_has_calls(expected_calls)
-
-    def test_tunnel_delete(self):
-        kwargs = {'tunnel_ip': '10.10.10.10',
-                  'tunnel_type': 'gre'}
-        self.agent.enable_tunneling = True
-        self.agent.tunnel_types = ['gre']
-        self.agent.tun_br_ofports = {'gre': {'10.10.10.10': '1'}}
-        with mock.patch.object(
-            self.agent, 'cleanup_tunnel_port'
-        ) as clean_tun_fn:
-            self.agent.tunnel_delete(context=None, **kwargs)
-            self.assertTrue(clean_tun_fn.called)
-
-    def _test_ovs_status(self, *args):
-        reply2 = {'current': set(['tap0']),
-                  'added': set(['tap2']),
-                  'removed': set([])}
-
-        reply3 = {'current': set(['tap2']),
-                  'added': set([]),
-                  'removed': set(['tap0'])}
-
-        reply_ancillary = {'current': set([]),
-                           'added': set([]),
-                           'removed': set([])}
-
-        with mock.patch.object(async_process.AsyncProcess, "_spawn"),\
-                mock.patch.object(async_process.AsyncProcess, "start"),\
-                mock.patch.object(async_process.AsyncProcess, "stop"),\
-                mock.patch.object(log.KeywordArgumentAdapter,
-                                  'exception') as log_exception,\
-                mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                                  'process_ports_events') as process_p_events,\
-                mock.patch.object(
-                    self.mod_agent.OVSNeutronAgent,
-                    'process_network_ports') as process_network_ports,\
-                mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                                  'check_ovs_status') as check_ovs_status,\
-                mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                                  'setup_integration_br') as setup_int_br,\
-                mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                                  'setup_physical_bridges') as setup_phys_br,\
-                mock.patch.object(time, 'sleep'),\
-                mock.patch.object(
-                    self.mod_agent.OVSNeutronAgent,
-                    'update_stale_ofport_rules') as update_stale, \
-                mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                                  'cleanup_stale_flows') as cleanup:
-            log_exception.side_effect = Exception(
-                'Fake exception to get out of the loop')
-            devices_not_ready = set()
-            process_p_events.side_effect = [(reply2, reply_ancillary,
-                                             devices_not_ready),
-                                            (reply3, reply_ancillary,
-                                             devices_not_ready)]
-            process_network_ports.side_effect = [
-                False, Exception('Fake exception to get out of the loop')]
-            check_ovs_status.side_effect = args
-            try:
-                self.agent.daemon_loop()
-            except Exception:
-                pass
-
-            process_p_events.assert_has_calls([
-                mock.call({'removed': [], 'added': []}, set(), set(), set(),
-                          set()),
-                mock.call({'removed': [], 'added': []}, set(['tap0']), set(),
-                          set(), set())
-            ])
-
-            process_network_ports.assert_has_calls([
-                mock.call(reply2, False),
-                mock.call(reply3, True)
-            ])
-            cleanup.assert_called_once_with()
-            self.assertTrue(update_stale.called)
-            # Verify the OVS restart we triggered in the loop
-            # re-setup the bridges
-            setup_int_br.assert_has_calls([mock.call()])
-            setup_phys_br.assert_has_calls([mock.call({})])
-
-    def test_ovs_status(self):
-        self._test_ovs_status(constants.OVS_NORMAL,
-                              constants.OVS_DEAD,
-                              constants.OVS_RESTARTED)
-        # OVS will not DEAD in some exception, like DBConnectionError.
-        self._test_ovs_status(constants.OVS_NORMAL,
-                              constants.OVS_RESTARTED)
-
-    def test_rpc_loop_fail_to_process_network_ports_keep_flows(self):
-        with mock.patch.object(async_process.AsyncProcess, "_spawn"),\
-                mock.patch.object(async_process.AsyncProcess, "start"),\
-                mock.patch.object(async_process.AsyncProcess, "stop"),\
-                mock.patch.object(
-                    self.mod_agent.OVSNeutronAgent,
-                    'process_network_ports') as process_network_ports,\
-                mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                                  'check_ovs_status') as check_ovs_status,\
-                mock.patch.object(time, 'sleep'),\
-                mock.patch.object(
-                    self.mod_agent.OVSNeutronAgent,
-                    'update_stale_ofport_rules') as update_stale, \
-                mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                                  'cleanup_stale_flows') as cleanup,\
-                mock.patch.object(
-                    self.mod_agent.OVSNeutronAgent,
-                    '_check_and_handle_signal') as check_and_handle_signal:
-            process_network_ports.return_value = True
-            check_ovs_status.return_value = constants.OVS_NORMAL
-            check_and_handle_signal.side_effect = [True, False]
-            self.agent.daemon_loop()
-            self.assertTrue(update_stale.called)
-            self.assertFalse(cleanup.called)
-
-    def test_set_rpc_timeout(self):
-        self.agent._handle_sigterm(None, None)
-        for rpc_client in (self.agent.plugin_rpc.client,
-                           self.agent.sg_plugin_rpc.client,
-                           self.agent.dvr_plugin_rpc.client,
-                           self.agent.state_rpc.client):
-            self.assertEqual(10, rpc_client.timeout)
-
-    def test_set_rpc_timeout_no_value(self):
-        self.agent.quitting_rpc_timeout = None
-        with mock.patch.object(self.agent, 'set_rpc_timeout') as mock_set_rpc:
-            self.agent._handle_sigterm(None, None)
-        self.assertFalse(mock_set_rpc.called)
-
-    def test_arp_spoofing_network_port(self):
-        int_br = mock.create_autospec(self.agent.int_br)
-        self.agent.setup_arp_spoofing_protection(
-            int_br, FakeVif(),
-            {'device_owner': n_const.DEVICE_OWNER_ROUTER_INTF})
-        self.assertTrue(int_br.delete_arp_spoofing_protection.called)
-        self.assertFalse(int_br.install_arp_spoofing_protection.called)
-
-    def test_arp_spoofing_port_security_disabled(self):
-        int_br = mock.create_autospec(self.agent.int_br)
-        self.agent.setup_arp_spoofing_protection(
-            int_br, FakeVif(), {'port_security_enabled': False})
-        self.assertTrue(int_br.delete_arp_spoofing_protection.called)
-        self.assertFalse(int_br.install_arp_spoofing_protection.called)
-
-    def test_arp_spoofing_basic_rule_setup(self):
-        vif = FakeVif()
-        fake_details = {'fixed_ips': [], 'device_owner': 'nobody'}
-        self.agent.prevent_arp_spoofing = True
-        int_br = mock.create_autospec(self.agent.int_br)
-        self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details)
-        self.assertEqual(
-            [mock.call(port=vif.ofport)],
-            int_br.delete_arp_spoofing_allow_rules.mock_calls)
-        self.assertEqual(
-            [mock.call(ip_addresses=set(), port=vif.ofport)],
-            int_br.install_arp_spoofing_protection.mock_calls)
-
-    def test_arp_spoofing_basic_rule_setup_fixed_ipv6(self):
-        vif = FakeVif()
-        fake_details = {'fixed_ips': [{'ip_address': 'fdf8:f53b:82e4::1'}],
-                        'device_owner': 'nobody'}
-        self.agent.prevent_arp_spoofing = True
-        br = mock.create_autospec(self.agent.int_br)
-        self.agent.setup_arp_spoofing_protection(br, vif, fake_details)
-        self.assertEqual(
-            [mock.call(port=vif.ofport)],
-            br.delete_arp_spoofing_allow_rules.mock_calls)
-        self.assertTrue(br.install_icmpv6_na_spoofing_protection.called)
-
-    def test_arp_spoofing_fixed_and_allowed_addresses(self):
-        vif = FakeVif()
-        fake_details = {
-            'device_owner': 'nobody',
-            'fixed_ips': [{'ip_address': '192.168.44.100'},
-                          {'ip_address': '192.168.44.101'}],
-            'allowed_address_pairs': [{'ip_address': '192.168.44.102/32'},
-                                      {'ip_address': '192.168.44.103/32'}]
-        }
-        self.agent.prevent_arp_spoofing = True
-        int_br = mock.create_autospec(self.agent.int_br)
-        self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details)
-        # make sure all addresses are allowed
-        addresses = {'192.168.44.100', '192.168.44.101', '192.168.44.102/32',
-                     '192.168.44.103/32'}
-        self.assertEqual(
-            [mock.call(port=vif.ofport, ip_addresses=addresses)],
-            int_br.install_arp_spoofing_protection.mock_calls)
-
-    def test_arp_spoofing_fixed_and_allowed_addresses_ipv6(self):
-        vif = FakeVif()
-        fake_details = {
-            'device_owner': 'nobody',
-            'fixed_ips': [{'ip_address': '2001:db8::1'},
-                          {'ip_address': '2001:db8::2'}],
-            'allowed_address_pairs': [{'ip_address': '2001:db8::200',
-                                       'mac_address': 'aa:22:33:44:55:66'}]
-        }
-        self.agent.prevent_arp_spoofing = True
-        int_br = mock.create_autospec(self.agent.int_br)
-        self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details)
-        # make sure all addresses are allowed including ipv6 LLAs
-        addresses = {'2001:db8::1', '2001:db8::2', '2001:db8::200',
-                     'fe80::a822:33ff:fe44:5566', 'fe80::a8bb:ccff:fe11:2233'}
-        self.assertEqual(
-            [mock.call(port=vif.ofport, ip_addresses=addresses)],
-            int_br.install_icmpv6_na_spoofing_protection.mock_calls)
-
-    def test__get_ofport_moves(self):
-        previous = {'port1': 1, 'port2': 2}
-        current = {'port1': 5, 'port2': 2}
-        # we expect it to tell us port1 moved
-        expected = ['port1']
-        self.assertEqual(expected,
-                         self.agent._get_ofport_moves(current, previous))
-
-    def test_update_stale_ofport_rules_clears_old(self):
-        self.agent.prevent_arp_spoofing = True
-        self.agent.vifname_to_ofport_map = {'port1': 1, 'port2': 2}
-        self.agent.int_br = mock.Mock()
-        # simulate port1 was removed
-        newmap = {'port2': 2}
-        self.agent.int_br.get_vif_port_to_ofport_map.return_value = newmap
-        self.agent.update_stale_ofport_rules()
-        # rules matching port 1 should have been deleted
-        self.assertEqual(
-            [mock.call(port=1)],
-            self.agent.int_br.delete_arp_spoofing_protection.mock_calls)
-        # make sure the state was updated with the new map
-        self.assertEqual(self.agent.vifname_to_ofport_map, newmap)
-
-    def test_update_stale_ofport_rules_treats_moved(self):
-        self.agent.prevent_arp_spoofing = True
-        self.agent.vifname_to_ofport_map = {'port1': 1, 'port2': 2}
-        self.agent.treat_devices_added_or_updated = mock.Mock()
-        self.agent.int_br = mock.Mock()
-        # simulate port1 was moved
-        newmap = {'port2': 2, 'port1': 90}
-        self.agent.int_br.get_vif_port_to_ofport_map.return_value = newmap
-        ofport_changed_ports = self.agent.update_stale_ofport_rules()
-        self.assertEqual(['port1'], ofport_changed_ports)
-
-    def test__setup_tunnel_port_while_new_mapping_is_added(self):
-        """
-        Test that _setup_tunnel_port doesn't fail if new vlan mapping is
-        added in a different coroutine while iterating over existing mappings.
-        See bug 1449944 for more info.
-        """
-
-        def add_new_vlan_mapping(*args, **kwargs):
-            self.agent.local_vlan_map['bar'] = (
-                self.mod_agent.LocalVLANMapping(1, 2, 3, 4))
-        bridge = mock.Mock()
-        tunnel_type = 'vxlan'
-        self.agent.tun_br_ofports = {tunnel_type: dict()}
-        self.agent.l2_pop = False
-        self.agent.local_vlan_map = {
-            'foo': self.mod_agent.LocalVLANMapping(4, tunnel_type, 2, 1)}
-        bridge.install_flood_to_tun.side_effect = add_new_vlan_mapping
-        self.agent._setup_tunnel_port(bridge, 1, 2, tunnel_type=tunnel_type)
-        self.assertIn('bar', self.agent.local_vlan_map)
-
-    def test_setup_entry_for_arp_reply_ignores_ipv6_addresses(self):
-        self.agent.arp_responder_enabled = True
-        ip = '2001:db8::1'
-        br = mock.Mock()
-        self.agent.setup_entry_for_arp_reply(
-            br, 'add', mock.Mock(), mock.Mock(), ip)
-        self.assertFalse(br.install_arp_responder.called)
-
-
-class TestOvsNeutronAgentOFCtl(TestOvsNeutronAgent,
-                               ovs_test_base.OVSOFCtlTestBase):
-    def test_cleanup_stale_flows(self):
-        with mock.patch.object(self.agent.int_br, 'agent_uuid_stamp',
-                               new=1234),\
-            mock.patch.object(self.agent.int_br,
-                              'dump_flows_all_tables') as dump_flows,\
-                mock.patch.object(self.agent.int_br,
-                                  'delete_flows') as del_flow:
-            dump_flows.return_value = [
-                'cookie=0x4d2, duration=50.156s, table=0,actions=drop',
-                'cookie=0x4321, duration=54.143s, table=2, priority=0',
-                'cookie=0x2345, duration=50.125s, table=2, priority=0',
-                'cookie=0x4d2, duration=52.112s, table=3, actions=drop',
-            ]
-            self.agent.iter_num = 3
-            self.agent.cleanup_stale_flows()
-            expected = [
-                mock.call(cookie='0x4321/-1', table='2'),
-                mock.call(cookie='0x2345/-1', table='2'),
-            ]
-            self.assertEqual(expected, del_flow.mock_calls)
-
-
-class TestOvsNeutronAgentRyu(TestOvsNeutronAgent,
-                             ovs_test_base.OVSRyuTestBase):
-    def test_cleanup_stale_flows(self):
-        uint64_max = (1 << 64) - 1
-        with mock.patch.object(self.agent.int_br, 'agent_uuid_stamp',
-                               new=1234),\
-            mock.patch.object(self.agent.int_br,
-                              'dump_flows') as dump_flows,\
-                mock.patch.object(self.agent.int_br,
-                                  'delete_flows') as del_flow:
-            dump_flows.return_value = [
-                # mock ryu.ofproto.ofproto_v1_3_parser.OFPFlowStats
-                mock.Mock(cookie=1234, table_id=0),
-                mock.Mock(cookie=17185, table_id=2),
-                mock.Mock(cookie=9029, table_id=2),
-                mock.Mock(cookie=1234, table_id=3),
-            ]
-            self.agent.iter_num = 3
-            self.agent.cleanup_stale_flows()
-            expected = [mock.call(cookie=17185,
-                                  cookie_mask=uint64_max),
-                        mock.call(cookie=9029,
-                                  cookie_mask=uint64_max)]
-            del_flow.assert_has_calls(expected, any_order=True)
-            self.assertEqual(len(expected), len(del_flow.mock_calls))
-
-
-class AncillaryBridgesTest(object):
-
-    def setUp(self):
-        super(AncillaryBridgesTest, self).setUp()
-        notifier_p = mock.patch(NOTIFIER)
-        notifier_cls = notifier_p.start()
-        self.notifier = mock.Mock()
-        notifier_cls.return_value = self.notifier
-        cfg.CONF.set_default('firewall_driver',
-                             'neutron.agent.firewall.NoopFirewallDriver',
-                             group='SECURITYGROUP')
-        cfg.CONF.set_override('report_interval', 0, 'AGENT')
-        mock.patch('neutron.agent.common.ovs_lib.BaseOVS.config',
-                   new_callable=mock.PropertyMock,
-                   return_value={}).start()
-
-    def _test_ancillary_bridges(self, bridges, ancillary):
-        device_ids = ancillary[:]
-
-        def pullup_side_effect(*args):
-            # Check that the device_id exists, if it does return it
-            # if it does not return None
-            try:
-                device_ids.remove(args[0])
-                return args[0]
-            except Exception:
-                return None
-
-        with mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                               'setup_integration_br'),\
-                mock.patch('neutron.agent.linux.utils.get_interface_mac',
-                           return_value='00:00:00:00:00:01'),\
-                mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges',
-                           return_value=bridges),\
-                mock.patch('neutron.agent.common.ovs_lib.BaseOVS.'
-                           'get_bridge_external_bridge_id',
-                           side_effect=pullup_side_effect),\
-                mock.patch(
-                    'neutron.agent.common.ovs_lib.OVSBridge.'
-                    'get_ports_attributes',
-                    return_value=[]),\
-                mock.patch(
-                    'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports',
-                    return_value=[]):
-            self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(),
-                                                        cfg.CONF)
-            self.assertEqual(len(ancillary), len(self.agent.ancillary_brs))
-            if ancillary:
-                bridges = [br.br_name for br in self.agent.ancillary_brs]
-                for br in ancillary:
-                    self.assertIn(br, bridges)
-
-    def test_ancillary_bridges_single(self):
-        bridges = ['br-int', 'br-ex']
-        self._test_ancillary_bridges(bridges, ['br-ex'])
-
-    def test_ancillary_bridges_none(self):
-        bridges = ['br-int']
-        self._test_ancillary_bridges(bridges, [])
-
-    def test_ancillary_bridges_multiple(self):
-        bridges = ['br-int', 'br-ex1', 'br-ex2']
-        self._test_ancillary_bridges(bridges, ['br-ex1', 'br-ex2'])
-
-    def mock_scan_ancillary_ports(self, vif_port_set=None,
-                                  registered_ports=None, sync=False):
-        bridges = ['br-int', 'br-ex']
-        ancillary = ['br-ex']
-
-        with mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                               'setup_integration_br'), \
-                mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                                  '_restore_local_vlan_map'), \
-                mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges',
-                           return_value=bridges), \
-                mock.patch('neutron.agent.common.ovs_lib.BaseOVS.'
-                           'get_bridge_external_bridge_id',
-                           side_effect=ancillary), \
-                mock.patch('neutron.agent.common.ovs_lib.OVSBridge.'
-                           'get_vif_port_set',
-                           return_value=vif_port_set):
-            self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(),
-                                                        cfg.CONF)
-            return self.agent.scan_ancillary_ports(registered_ports, sync)
-
-    def test_scan_ancillary_ports_returns_cur_only_for_unchanged_ports(self):
-        vif_port_set = set([1, 2])
-        registered_ports = set([1, 2])
-        expected = dict(current=vif_port_set)
-        actual = self.mock_scan_ancillary_ports(vif_port_set, registered_ports)
-        self.assertEqual(expected, actual)
-
-    def test_scan_ancillary_ports_returns_port_changes(self):
-        vif_port_set = set([1, 3])
-        registered_ports = set([1, 2])
-        expected = dict(current=vif_port_set, added=set([3]), removed=set([2]))
-        actual = self.mock_scan_ancillary_ports(vif_port_set, registered_ports)
-        self.assertEqual(expected, actual)
-
-    def test_scan_ancillary_ports_returns_port_changes_with_sync(self):
-        vif_port_set = set([1, 3])
-        registered_ports = set([1, 2])
-        expected = dict(current=vif_port_set, added=vif_port_set,
-                        removed=set([2]))
-        actual = self.mock_scan_ancillary_ports(vif_port_set, registered_ports,
-                                                sync=True)
-        self.assertEqual(expected, actual)
-
-
-class AncillaryBridgesTestOFCtl(AncillaryBridgesTest,
-                                ovs_test_base.OVSOFCtlTestBase):
-    pass
-
-
-class AncillaryBridgesTestRyu(AncillaryBridgesTest,
-                              ovs_test_base.OVSRyuTestBase):
-    pass
-
-
-class TestOvsDvrNeutronAgent(object):
-
-    def setUp(self):
-        super(TestOvsDvrNeutronAgent, self).setUp()
-        notifier_p = mock.patch(NOTIFIER)
-        notifier_cls = notifier_p.start()
-        self.notifier = mock.Mock()
-        notifier_cls.return_value = self.notifier
-        cfg.CONF.set_default('firewall_driver',
-                             'neutron.agent.firewall.NoopFirewallDriver',
-                             group='SECURITYGROUP')
-
-        mock.patch('neutron.agent.common.ovs_lib.BaseOVS.config',
-                   new_callable=mock.PropertyMock,
-                   return_value={}).start()
-        with mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                               'setup_integration_br'),\
-                mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                                  'setup_ancillary_bridges',
-                                  return_value=[]),\
-                mock.patch('neutron.agent.linux.utils.get_interface_mac',
-                           return_value='00:00:00:00:00:01'),\
-                mock.patch(
-                    'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),\
-                mock.patch('oslo_service.loopingcall.'
-                           'FixedIntervalLoopingCall',
-                           new=MockFixedIntervalLoopingCall),\
-                mock.patch(
-                    'neutron.agent.common.ovs_lib.OVSBridge.'
-                    'get_ports_attributes',
-                    return_value=[]),\
-                mock.patch(
-                    'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports',
-                    return_value=[]):
-            self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(),
-                                                        cfg.CONF)
-            self.agent.tun_br = self.br_tun_cls(br_name='br-tun')
-        self.agent.sg_agent = mock.Mock()
-
-    def _setup_for_dvr_test(self):
-        self._port = mock.Mock()
-        self._port.ofport = 10
-        self._port.vif_id = "1234-5678-90"
-        self._physical_network = 'physeth1'
-        self._old_local_vlan = None
-        self._segmentation_id = 2001
-        self.agent.enable_distributed_routing = True
-        self.agent.enable_tunneling = True
-        self.agent.patch_tun_ofport = 1
-        self.agent.patch_int_ofport = 2
-        self.agent.dvr_agent.local_ports = {}
-        self.agent.local_vlan_map = {}
-        self.agent.dvr_agent.enable_distributed_routing = True
-        self.agent.dvr_agent.enable_tunneling = True
-        self.agent.dvr_agent.patch_tun_ofport = 1
-        self.agent.dvr_agent.patch_int_ofport = 2
-        self.agent.dvr_agent.tun_br = mock.Mock()
-        self.agent.dvr_agent.phys_brs[self._physical_network] = mock.Mock()
-        self.agent.dvr_agent.bridge_mappings = {self._physical_network:
-                                                'br-eth1'}
-        self.agent.dvr_agent.int_ofports[self._physical_network] = 30
-        self.agent.dvr_agent.phys_ofports[self._physical_network] = 40
-        self.agent.dvr_agent.local_dvr_map = {}
-        self.agent.dvr_agent.registered_dvr_macs = set()
-        self.agent.dvr_agent.dvr_mac_address = 'aa:22:33:44:55:66'
-        self._net_uuid = 'my-net-uuid'
-        self._fixed_ips = [{'subnet_id': 'my-subnet-uuid',
-                            'ip_address': '1.1.1.1'}]
-        self._compute_port = mock.Mock()
-        self._compute_port.ofport = 20
-        self._compute_port.vif_id = "1234-5678-91"
-        self._compute_fixed_ips = [{'subnet_id': 'my-subnet-uuid',
-                                    'ip_address': '1.1.1.3'}]
-
-    @staticmethod
-    def _expected_port_bound(port, lvid, is_dvr=True):
-        resp = [
-            mock.call.db_get_val('Port', port.port_name, 'other_config'),
-            mock.call.set_db_attribute('Port', port.port_name, 'other_config',
-                                       mock.ANY),
-        ]
-        if is_dvr:
-            resp = [mock.call.get_vifs_by_ids([])] + resp
-        return resp
-
-    def _expected_install_dvr_process(self, lvid, port, ip_version,
-                                      gateway_ip, gateway_mac):
-        if ip_version == 4:
-            ipvx_calls = [
-                mock.call.install_dvr_process_ipv4(
-                    vlan_tag=lvid,
-                    gateway_ip=gateway_ip),
-            ]
-        else:
-            ipvx_calls = [
-                mock.call.install_dvr_process_ipv6(
-                    vlan_tag=lvid,
-                    gateway_mac=gateway_mac),
-            ]
-        return ipvx_calls + [
-            mock.call.install_dvr_process(
-                vlan_tag=lvid,
-                dvr_mac_address=self.agent.dvr_agent.dvr_mac_address,
-                vif_mac=port.vif_mac,
-            ),
-        ]
-
-    def _test_port_bound_for_dvr_on_vlan_network(self, device_owner,
-                                                 ip_version=4):
-        self._setup_for_dvr_test()
-        if ip_version == 4:
-            gateway_ip = '1.1.1.1'
-            cidr = '1.1.1.0/24'
-        else:
-            gateway_ip = '2001:100::1'
-            cidr = '2001:100::0/64'
-        self._port.vif_mac = gateway_mac = 'aa:bb:cc:11:22:33'
-        self._compute_port.vif_mac = '77:88:99:00:11:22'
-        physical_network = self._physical_network
-        segmentation_id = self._segmentation_id
-        network_type = p_const.TYPE_VLAN
-        int_br = mock.create_autospec(self.agent.int_br)
-        tun_br = mock.create_autospec(self.agent.tun_br)
-        phys_br = mock.create_autospec(self.br_phys_cls('br-phys'))
-        int_br.set_db_attribute.return_value = True
-        int_br.db_get_val.return_value = {}
-        with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                               'get_subnet_for_dvr',
-                               return_value={'gateway_ip': gateway_ip,
-                                             'cidr': cidr,
-                                             'ip_version': ip_version,
-                                             'gateway_mac': gateway_mac}),\
-                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_ports_on_host_by_subnet',
-                                  return_value=[]),\
-                mock.patch.object(self.agent.dvr_agent.int_br,
-                                  'get_vif_port_by_id',
-                                  return_value=self._port),\
-                mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br),\
-                mock.patch.dict(self.agent.phys_brs,
-                                {physical_network: phys_br}),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
-                mock.patch.dict(self.agent.dvr_agent.phys_brs,
-                                {physical_network: phys_br}):
-            self.agent.port_bound(
-                self._port, self._net_uuid, network_type,
-                physical_network, segmentation_id, self._fixed_ips,
-                n_const.DEVICE_OWNER_DVR_INTERFACE, False)
-            phy_ofp = self.agent.dvr_agent.phys_ofports[physical_network]
-            int_ofp = self.agent.dvr_agent.int_ofports[physical_network]
-            lvid = self.agent.local_vlan_map[self._net_uuid].vlan
-            expected_on_phys_br = [
-                mock.call.provision_local_vlan(
-                    port=phy_ofp,
-                    lvid=lvid,
-                    segmentation_id=segmentation_id,
-                    distributed=True,
-                ),
-            ] + self._expected_install_dvr_process(
-                port=self._port,
-                lvid=lvid,
-                ip_version=ip_version,
-                gateway_ip=gateway_ip,
-                gateway_mac=gateway_mac)
-            expected_on_int_br = [
-                mock.call.provision_local_vlan(
-                    port=int_ofp,
-                    lvid=lvid,
-                    segmentation_id=segmentation_id,
-                ),
-            ] + self._expected_port_bound(self._port, lvid)
-            self.assertEqual(expected_on_int_br, int_br.mock_calls)
-            self.assertEqual([], tun_br.mock_calls)
-            self.assertEqual(expected_on_phys_br, phys_br.mock_calls)
-            int_br.reset_mock()
-            tun_br.reset_mock()
-            phys_br.reset_mock()
-            self.agent.port_bound(self._compute_port, self._net_uuid,
-                                  network_type, physical_network,
-                                  segmentation_id,
-                                  self._compute_fixed_ips,
-                                  device_owner, False)
-            expected_on_int_br = [
-                mock.call.install_dvr_to_src_mac(
-                    network_type=network_type,
-                    gateway_mac=gateway_mac,
-                    dst_mac=self._compute_port.vif_mac,
-                    dst_port=self._compute_port.ofport,
-                    vlan_tag=segmentation_id,
-                ),
-            ] + self._expected_port_bound(self._compute_port, lvid, False)
-            self.assertEqual(expected_on_int_br, int_br.mock_calls)
-            self.assertFalse([], tun_br.mock_calls)
-            self.assertFalse([], phys_br.mock_calls)
-
-    def _test_port_bound_for_dvr_on_vxlan_network(self, device_owner,
-                                                  ip_version=4):
-        self._setup_for_dvr_test()
-        if ip_version == 4:
-            gateway_ip = '1.1.1.1'
-            cidr = '1.1.1.0/24'
-        else:
-            gateway_ip = '2001:100::1'
-            cidr = '2001:100::0/64'
-        network_type = p_const.TYPE_VXLAN
-        self._port.vif_mac = gateway_mac = 'aa:bb:cc:11:22:33'
-        self._compute_port.vif_mac = '77:88:99:00:11:22'
-        physical_network = self._physical_network
-        segmentation_id = self._segmentation_id
-        int_br = mock.create_autospec(self.agent.int_br)
-        tun_br = mock.create_autospec(self.agent.tun_br)
-        phys_br = mock.create_autospec(self.br_phys_cls('br-phys'))
-        int_br.set_db_attribute.return_value = True
-        int_br.db_get_val.return_value = {}
-        with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                               'get_subnet_for_dvr',
-                               return_value={'gateway_ip': gateway_ip,
-                                             'cidr': cidr,
-                                             'ip_version': ip_version,
-                                             'gateway_mac': gateway_mac}),\
-                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_ports_on_host_by_subnet',
-                                  return_value=[]),\
-                mock.patch.object(self.agent.dvr_agent.int_br,
-                                  'get_vif_port_by_id',
-                                  return_value=self._port),\
-                mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br),\
-                mock.patch.dict(self.agent.phys_brs,
-                                {physical_network: phys_br}),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
-                mock.patch.dict(self.agent.dvr_agent.phys_brs,
-                                {physical_network: phys_br}):
-            self.agent.port_bound(
-                self._port, self._net_uuid, network_type,
-                physical_network, segmentation_id, self._fixed_ips,
-                n_const.DEVICE_OWNER_DVR_INTERFACE, False)
-            lvid = self.agent.local_vlan_map[self._net_uuid].vlan
-            expected_on_int_br = self._expected_port_bound(
-                self._port, lvid)
-            expected_on_tun_br = [
-                mock.call.provision_local_vlan(
-                    network_type=network_type,
-                    segmentation_id=segmentation_id,
-                    lvid=lvid,
-                    distributed=True),
-            ] + self._expected_install_dvr_process(
-                port=self._port,
-                lvid=lvid,
-                ip_version=ip_version,
-                gateway_ip=gateway_ip,
-                gateway_mac=gateway_mac)
-            self.assertEqual(expected_on_int_br, int_br.mock_calls)
-            self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
-            self.assertEqual([], phys_br.mock_calls)
-            int_br.reset_mock()
-            tun_br.reset_mock()
-            phys_br.reset_mock()
-            self.agent.port_bound(self._compute_port, self._net_uuid,
-                                  network_type, physical_network,
-                                  segmentation_id,
-                                  self._compute_fixed_ips,
-                                  device_owner, False)
-            expected_on_int_br = [
-                mock.call.install_dvr_to_src_mac(
-                    network_type=network_type,
-                    gateway_mac=gateway_mac,
-                    dst_mac=self._compute_port.vif_mac,
-                    dst_port=self._compute_port.ofport,
-                    vlan_tag=lvid,
-                ),
-            ] + self._expected_port_bound(self._compute_port, lvid, False)
-            self.assertEqual(expected_on_int_br, int_br.mock_calls)
-            self.assertEqual([], tun_br.mock_calls)
-            self.assertEqual([], phys_br.mock_calls)
-
-    def test_port_bound_for_dvr_with_compute_ports(self):
-        self._test_port_bound_for_dvr_on_vlan_network(
-            device_owner=DEVICE_OWNER_COMPUTE)
-        self._test_port_bound_for_dvr_on_vlan_network(
-            device_owner=DEVICE_OWNER_COMPUTE, ip_version=6)
-        self._test_port_bound_for_dvr_on_vxlan_network(
-            device_owner=DEVICE_OWNER_COMPUTE)
-        self._test_port_bound_for_dvr_on_vxlan_network(
-            device_owner=DEVICE_OWNER_COMPUTE, ip_version=6)
-
-    def test_port_bound_for_dvr_with_lbaas_vip_ports(self):
-        self._test_port_bound_for_dvr_on_vlan_network(
-            device_owner=n_const.DEVICE_OWNER_LOADBALANCER)
-        self._test_port_bound_for_dvr_on_vlan_network(
-            device_owner=n_const.DEVICE_OWNER_LOADBALANCER, ip_version=6)
-        self._test_port_bound_for_dvr_on_vxlan_network(
-            device_owner=n_const.DEVICE_OWNER_LOADBALANCER)
-        self._test_port_bound_for_dvr_on_vxlan_network(
-            device_owner=n_const.DEVICE_OWNER_LOADBALANCER, ip_version=6)
-
-    def test_port_bound_for_dvr_with_lbaasv2_vip_ports(self):
-        self._test_port_bound_for_dvr_on_vlan_network(
-            device_owner=n_const.DEVICE_OWNER_LOADBALANCERV2)
-        self._test_port_bound_for_dvr_on_vlan_network(
-            device_owner=n_const.DEVICE_OWNER_LOADBALANCERV2, ip_version=6)
-        self._test_port_bound_for_dvr_on_vxlan_network(
-            device_owner=n_const.DEVICE_OWNER_LOADBALANCERV2)
-        self._test_port_bound_for_dvr_on_vxlan_network(
-            device_owner=n_const.DEVICE_OWNER_LOADBALANCERV2, ip_version=6)
-
-    def test_port_bound_for_dvr_with_dhcp_ports(self):
-        self._test_port_bound_for_dvr_on_vlan_network(
-            device_owner=n_const.DEVICE_OWNER_DHCP)
-        self._test_port_bound_for_dvr_on_vlan_network(
-            device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=6)
-        self._test_port_bound_for_dvr_on_vxlan_network(
-            device_owner=n_const.DEVICE_OWNER_DHCP)
-        self._test_port_bound_for_dvr_on_vxlan_network(
-            device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=6)
-
-    def test_port_bound_for_dvr_with_csnat_ports(self):
-        self._setup_for_dvr_test()
-        int_br = mock.create_autospec(self.agent.int_br)
-        tun_br = mock.create_autospec(self.agent.tun_br)
-        int_br.set_db_attribute.return_value = True
-        int_br.db_get_val.return_value = {}
-        with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                               'get_subnet_for_dvr',
-                               return_value={'gateway_ip': '1.1.1.1',
-                               'cidr': '1.1.1.0/24',
-                               'ip_version': 4,
-                               'gateway_mac': 'aa:bb:cc:11:22:33'}),\
-                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_ports_on_host_by_subnet',
-                                  return_value=[]),\
-                mock.patch.object(self.agent.dvr_agent.int_br,
-                                  'get_vif_port_by_id',
-                                  return_value=self._port),\
-                mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br):
-            self.agent.port_bound(
-                self._port, self._net_uuid, 'vxlan',
-                None, None, self._fixed_ips,
-                n_const.DEVICE_OWNER_ROUTER_SNAT,
-                False)
-            lvid = self.agent.local_vlan_map[self._net_uuid].vlan
-            expected_on_int_br = [
-                mock.call.install_dvr_to_src_mac(
-                    network_type='vxlan',
-                    gateway_mac='aa:bb:cc:11:22:33',
-                    dst_mac=self._port.vif_mac,
-                    dst_port=self._port.ofport,
-                    vlan_tag=lvid,
-                ),
-            ] + self._expected_port_bound(self._port, lvid, is_dvr=False)
-            self.assertEqual(expected_on_int_br, int_br.mock_calls)
-            expected_on_tun_br = [
-                mock.call.provision_local_vlan(
-                    network_type='vxlan',
-                    lvid=lvid,
-                    segmentation_id=None,
-                    distributed=True,
-                ),
-            ]
-            self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
-
-    def test_treat_devices_removed_for_dvr_interface(self):
-        self._test_treat_devices_removed_for_dvr_interface()
-        self._test_treat_devices_removed_for_dvr_interface(ip_version=6)
-        self._test_treat_devices_removed_for_dvr_interface(network_type='vlan')
-        self._test_treat_devices_removed_for_dvr_interface(ip_version=6,
-                                                           network_type='vlan')
-
-    def _test_treat_devices_removed_for_dvr_interface(
-            self, ip_version=4, network_type='vxlan'):
-        self._setup_for_dvr_test()
-        if ip_version == 4:
-            gateway_ip = '1.1.1.1'
-            cidr = '1.1.1.0/24'
-        else:
-            gateway_ip = '2001:100::1'
-            cidr = '2001:100::0/64'
-        gateway_mac = 'aa:bb:cc:11:22:33'
-        int_br = mock.create_autospec(self.agent.int_br)
-        tun_br = mock.create_autospec(self.agent.tun_br)
-        int_br.set_db_attribute.return_value = True
-        int_br.db_get_val.return_value = {}
-        with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                               'get_subnet_for_dvr',
-                               return_value={'gateway_ip': gateway_ip,
-                               'cidr': cidr,
-                               'ip_version': ip_version,
-                               'gateway_mac': gateway_mac}),\
-                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_ports_on_host_by_subnet',
-                                  return_value=[]),\
-                mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
-                mock.patch.object(self.agent.dvr_agent.int_br,
-                                  'get_vif_port_by_id',
-                                  return_value=self._port):
-            if network_type == 'vlan':
-                self.agent.port_bound(self._port, self._net_uuid,
-                                      network_type, self._physical_network,
-                                      self._segmentation_id,
-                                      self._compute_fixed_ips,
-                                      n_const.DEVICE_OWNER_DVR_INTERFACE,
-                                      False)
-            else:
-                self.agent.port_bound(
-                    self._port, self._net_uuid, 'vxlan',
-                    None, None, self._fixed_ips,
-                    n_const.DEVICE_OWNER_DVR_INTERFACE,
-                    False)
-                lvid = self.agent.local_vlan_map[self._net_uuid].vlan
-                self.assertEqual(self._expected_port_bound(self._port, lvid),
-                                 int_br.mock_calls)
-                expected_on_tun_br = [
-                    mock.call.provision_local_vlan(network_type='vxlan',
-                        lvid=lvid, segmentation_id=None, distributed=True),
-                ] + self._expected_install_dvr_process(
-                    port=self._port,
-                    lvid=lvid,
-                    ip_version=ip_version,
-                    gateway_ip=gateway_ip,
-                    gateway_mac=gateway_mac)
-                self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
-
-        int_br.reset_mock()
-        tun_br.reset_mock()
-        phys_br = mock.create_autospec(self.br_phys_cls('br-phys'))
-        with mock.patch.object(self.agent, 'reclaim_local_vlan'),\
-                mock.patch.object(self.agent.plugin_rpc, 'update_device_list',
-                                  return_value={
-                                      'devices_up': [],
-                                      'devices_down': [self._port.vif_id],
-                                      'failed_devices_up': [],
-                                      'failed_devices_down': []}),\
-                mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br),\
-                mock.patch.dict(self.agent.phys_brs,
-                                {self._physical_network: phys_br}),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
-                mock.patch.dict(self.agent.dvr_agent.phys_brs,
-                                {self._physical_network: phys_br}):
-            self.agent.treat_devices_removed([self._port.vif_id])
-            lvid = self.agent.local_vlan_map[self._net_uuid].vlan
-            if ip_version == 4:
-                expected = [
-                    mock.call.delete_dvr_process_ipv4(
-                        vlan_tag=lvid,
-                        gateway_ip=gateway_ip),
-                ]
-            else:
-                expected = [
-                    mock.call.delete_dvr_process_ipv6(
-                        vlan_tag=lvid,
-                        gateway_mac=gateway_mac),
-                ]
-            expected.extend([
-                mock.call.delete_dvr_process(
-                    vlan_tag=lvid,
-                    vif_mac=self._port.vif_mac),
-            ])
-            if network_type == 'vlan':
-                self.assertEqual([], int_br.mock_calls)
-                self.assertEqual([], tun_br.mock_calls)
-                self.assertEqual(expected, phys_br.mock_calls)
-                self.assertEqual({}, self.agent.dvr_agent.local_ports)
-            else:
-                self.assertEqual([], int_br.mock_calls)
-                self.assertEqual(expected, tun_br.mock_calls)
-                self.assertEqual([], phys_br.mock_calls)
-
-    def _test_treat_devices_removed_for_dvr(self, device_owner, ip_version=4):
-        self._setup_for_dvr_test()
-        if ip_version == 4:
-            gateway_ip = '1.1.1.1'
-            cidr = '1.1.1.0/24'
-        else:
-            gateway_ip = '2001:100::1'
-            cidr = '2001:100::0/64'
-        gateway_mac = 'aa:bb:cc:11:22:33'
-        int_br = mock.create_autospec(self.agent.int_br)
-        tun_br = mock.create_autospec(self.agent.tun_br)
-        int_br.set_db_attribute.return_value = True
-        int_br.db_get_val.return_value = {}
-        with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                               'get_subnet_for_dvr',
-                               return_value={'gateway_ip': gateway_ip,
-                               'cidr': cidr,
-                               'ip_version': ip_version,
-                               'gateway_mac': gateway_mac}),\
-                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_ports_on_host_by_subnet',
-                                  return_value=[]),\
-                mock.patch.object(self.agent.dvr_agent.int_br,
-                                  'get_vif_port_by_id',
-                                  return_value=self._port),\
-                mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br):
-            self.agent.port_bound(
-                self._port, self._net_uuid, 'vxlan',
-                None, None, self._fixed_ips,
-                n_const.DEVICE_OWNER_DVR_INTERFACE,
-                False)
-            lvid = self.agent.local_vlan_map[self._net_uuid].vlan
-            self.assertEqual(
-                self._expected_port_bound(self._port, lvid),
-                int_br.mock_calls)
-            expected_on_tun_br = [
-                mock.call.provision_local_vlan(
-                    network_type='vxlan',
-                    segmentation_id=None,
-                    lvid=lvid,
-                    distributed=True),
-            ] + self._expected_install_dvr_process(
-                port=self._port,
-                lvid=lvid,
-                ip_version=ip_version,
-                gateway_ip=gateway_ip,
-                gateway_mac=gateway_mac)
-            self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
-            int_br.reset_mock()
-            tun_br.reset_mock()
-            self.agent.port_bound(self._compute_port,
-                                  self._net_uuid, 'vxlan',
-                                  None, None,
-                                  self._compute_fixed_ips,
-                                  device_owner, False)
-            self.assertEqual(
-                [
-                    mock.call.install_dvr_to_src_mac(
-                        network_type='vxlan',
-                        gateway_mac='aa:bb:cc:11:22:33',
-                        dst_mac=self._compute_port.vif_mac,
-                        dst_port=self._compute_port.ofport,
-                        vlan_tag=lvid,
-                    ),
-                ] + self._expected_port_bound(self._compute_port, lvid, False),
-                int_br.mock_calls)
-            self.assertEqual([], tun_br.mock_calls)
-
-        int_br.reset_mock()
-        tun_br.reset_mock()
-        with mock.patch.object(self.agent, 'reclaim_local_vlan'),\
-                mock.patch.object(self.agent.plugin_rpc, 'update_device_list',
-                                  return_value={
-                                      'devices_up': [],
-                                      'devices_down': [
-                                          self._compute_port.vif_id],
-                                      'failed_devices_up': [],
-                                      'failed_devices_down': []}),\
-                mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br):
-            self.agent.treat_devices_removed([self._compute_port.vif_id])
-            int_br.assert_has_calls([
-                mock.call.delete_dvr_to_src_mac(
-                    network_type='vxlan',
-                    vlan_tag=lvid,
-                    dst_mac=self._compute_port.vif_mac,
-                ),
-            ])
-            self.assertEqual([], tun_br.mock_calls)
-
-    def test_treat_devices_removed_for_dvr_with_compute_ports(self):
-        self._test_treat_devices_removed_for_dvr(
-            device_owner=DEVICE_OWNER_COMPUTE)
-        self._test_treat_devices_removed_for_dvr(
-            device_owner=DEVICE_OWNER_COMPUTE, ip_version=6)
-
-    def test_treat_devices_removed_for_dvr_with_lbaas_vip_ports(self):
-        self._test_treat_devices_removed_for_dvr(
-            device_owner=n_const.DEVICE_OWNER_LOADBALANCER)
-        self._test_treat_devices_removed_for_dvr(
-            device_owner=n_const.DEVICE_OWNER_LOADBALANCER, ip_version=6)
-
-    def test_treat_devices_removed_for_dvr_with_lbaasv2_vip_ports(self):
-        self._test_treat_devices_removed_for_dvr(
-            device_owner=n_const.DEVICE_OWNER_LOADBALANCERV2)
-        self._test_treat_devices_removed_for_dvr(
-            device_owner=n_const.DEVICE_OWNER_LOADBALANCERV2, ip_version=6)
-
-    def test_treat_devices_removed_for_dvr_with_dhcp_ports(self):
-        self._test_treat_devices_removed_for_dvr(
-            device_owner=n_const.DEVICE_OWNER_DHCP)
-        self._test_treat_devices_removed_for_dvr(
-            device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=6)
-
-    def test_treat_devices_removed_for_dvr_csnat_port(self):
-        self._setup_for_dvr_test()
-        gateway_mac = 'aa:bb:cc:11:22:33'
-        int_br = mock.create_autospec(self.agent.int_br)
-        tun_br = mock.create_autospec(self.agent.tun_br)
-        int_br.set_db_attribute.return_value = True
-        int_br.db_get_val.return_value = {}
-        with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                               'get_subnet_for_dvr',
-                               return_value={'gateway_ip': '1.1.1.1',
-                               'cidr': '1.1.1.0/24',
-                               'ip_version': 4,
-                               'gateway_mac': gateway_mac}),\
-                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_ports_on_host_by_subnet',
-                                  return_value=[]),\
-                mock.patch.object(self.agent.dvr_agent.int_br,
-                                  'get_vif_port_by_id',
-                                  return_value=self._port),\
-                mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br):
-            self.agent.port_bound(
-                self._port, self._net_uuid, 'vxlan',
-                None, None, self._fixed_ips,
-                n_const.DEVICE_OWNER_ROUTER_SNAT,
-                False)
-            lvid = self.agent.local_vlan_map[self._net_uuid].vlan
-            expected_on_int_br = [
-                mock.call.install_dvr_to_src_mac(
-                    network_type='vxlan',
-                    gateway_mac=gateway_mac,
-                    dst_mac=self._port.vif_mac,
-                    dst_port=self._port.ofport,
-                    vlan_tag=lvid,
-                ),
-            ] + self._expected_port_bound(self._port, lvid, is_dvr=False)
-            self.assertEqual(expected_on_int_br, int_br.mock_calls)
-            expected_on_tun_br = [
-                mock.call.provision_local_vlan(
-                    network_type='vxlan',
-                    lvid=lvid,
-                    segmentation_id=None,
-                    distributed=True,
-                ),
-            ]
-            self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
-
-        int_br.reset_mock()
-        tun_br.reset_mock()
-        with mock.patch.object(self.agent, 'reclaim_local_vlan'),\
-                mock.patch.object(self.agent.plugin_rpc, 'update_device_list',
-                                  return_value={
-                                      'devices_up': [],
-                                      'devices_down': [self._port.vif_id],
-                                      'failed_devices_up': [],
-                                      'failed_devices_down': []}),\
-                mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br):
-            self.agent.treat_devices_removed([self._port.vif_id])
-            expected_on_int_br = [
-                mock.call.delete_dvr_to_src_mac(
-                    network_type='vxlan',
-                    dst_mac=self._port.vif_mac,
-                    vlan_tag=lvid,
-                ),
-            ]
-            self.assertEqual(expected_on_int_br, int_br.mock_calls)
-            expected_on_tun_br = []
-            self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
-
-    def test_setup_dvr_flows_on_int_br(self):
-        self._setup_for_dvr_test()
-        int_br = mock.create_autospec(self.agent.int_br)
-        tun_br = mock.create_autospec(self.agent.tun_br)
-        with mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
-                mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                                  'get_dvr_mac_address_list',
-                                  return_value=[{'host': 'cn1',
-                                  'mac_address': 'aa:bb:cc:dd:ee:ff'},
-                                  {'host': 'cn2',
-                                  'mac_address': '11:22:33:44:55:66'}]):
-            self.agent.dvr_agent.setup_dvr_flows_on_integ_br()
-            self.assertTrue(self.agent.dvr_agent.in_distributed_mode())
-            physical_networks = list(
-                self.agent.dvr_agent.bridge_mappings.keys())
-            ioport = self.agent.dvr_agent.int_ofports[physical_networks[0]]
-            expected_on_int_br = [
-                # setup_dvr_flows_on_integ_br
-                mock.call.setup_canary_table(),
-                mock.call.install_drop(table_id=constants.DVR_TO_SRC_MAC,
-                                       priority=1),
-                mock.call.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN,
-                                       priority=1),
-                mock.call.install_normal(table_id=constants.LOCAL_SWITCHING,
-                                         priority=1),
-                mock.call.install_drop(table_id=constants.LOCAL_SWITCHING,
-                                       priority=2,
-                                       in_port=ioport),
-            ]
-            self.assertEqual(expected_on_int_br, int_br.mock_calls)
-            self.assertEqual([], tun_br.mock_calls)
-
-    def test_get_dvr_mac_address(self):
-        self._setup_for_dvr_test()
-        self.agent.dvr_agent.dvr_mac_address = None
-        with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                               'get_dvr_mac_address_by_host',
-                               return_value={'host': 'cn1',
-                                  'mac_address': 'aa:22:33:44:55:66'}):
-            self.agent.dvr_agent.get_dvr_mac_address()
-            self.assertEqual('aa:22:33:44:55:66',
-                             self.agent.dvr_agent.dvr_mac_address)
-            self.assertTrue(self.agent.dvr_agent.in_distributed_mode())
-
-    def test_get_dvr_mac_address_exception(self):
-        self._setup_for_dvr_test()
-        self.agent.dvr_agent.dvr_mac_address = None
-        int_br = mock.create_autospec(self.agent.int_br)
-        with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                               'get_dvr_mac_address_by_host',
-                               side_effect=oslo_messaging.RemoteError),\
-                mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br):
-            self.agent.dvr_agent.get_dvr_mac_address()
-            self.assertIsNone(self.agent.dvr_agent.dvr_mac_address)
-            self.assertFalse(self.agent.dvr_agent.in_distributed_mode())
-            self.assertEqual([mock.call.install_normal()], int_br.mock_calls)
-
-    def test_get_dvr_mac_address_retried(self):
-        valid_entry = {'host': 'cn1', 'mac_address': 'aa:22:33:44:55:66'}
-        raise_timeout = oslo_messaging.MessagingTimeout()
-        # Raise a timeout the first 2 times it calls get_dvr_mac_address()
-        self._setup_for_dvr_test()
-        self.agent.dvr_agent.dvr_mac_address = None
-        with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                               'get_dvr_mac_address_by_host',
-                               side_effect=(raise_timeout, raise_timeout,
-                                            valid_entry)):
-            self.agent.dvr_agent.get_dvr_mac_address()
-            self.assertEqual('aa:22:33:44:55:66',
-                             self.agent.dvr_agent.dvr_mac_address)
-            self.assertTrue(self.agent.dvr_agent.in_distributed_mode())
-            self.assertEqual(self.agent.dvr_agent.plugin_rpc.
-                             get_dvr_mac_address_by_host.call_count, 3)
-
-    def test_get_dvr_mac_address_retried_max(self):
-        raise_timeout = oslo_messaging.MessagingTimeout()
-        # Raise a timeout every time until we give up, currently 5 tries
-        self._setup_for_dvr_test()
-        self.agent.dvr_agent.dvr_mac_address = None
-        int_br = mock.create_autospec(self.agent.int_br)
-        with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
-                               'get_dvr_mac_address_by_host',
-                               side_effect=raise_timeout),\
-                mock.patch.object(utils, "execute"),\
-                mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br):
-            self.agent.dvr_agent.get_dvr_mac_address()
-            self.assertIsNone(self.agent.dvr_agent.dvr_mac_address)
-            self.assertFalse(self.agent.dvr_agent.in_distributed_mode())
-            self.assertEqual(self.agent.dvr_agent.plugin_rpc.
-                             get_dvr_mac_address_by_host.call_count, 5)
-
-    def test_dvr_mac_address_update(self):
-        self._setup_for_dvr_test()
-        newhost = 'cn2'
-        newmac = 'aa:bb:cc:dd:ee:ff'
-        int_br = mock.create_autospec(self.agent.int_br)
-        tun_br = mock.create_autospec(self.agent.tun_br)
-        phys_br = mock.create_autospec(self.br_phys_cls('br-phys'))
-        physical_network = 'physeth1'
-        with mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br),\
-                mock.patch.dict(self.agent.phys_brs,
-                                {physical_network: phys_br}),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
-                mock.patch.dict(self.agent.dvr_agent.phys_brs,
-                                {physical_network: phys_br}):
-            self.agent.dvr_agent.\
-                dvr_mac_address_update(
-                    dvr_macs=[{'host': newhost,
-                               'mac_address': newmac}])
-            expected_on_int_br = [
-                mock.call.add_dvr_mac_vlan(
-                    mac=newmac,
-                    port=self.agent.int_ofports[physical_network]),
-                mock.call.add_dvr_mac_tun(
-                    mac=newmac,
-                    port=self.agent.patch_tun_ofport),
-            ]
-            expected_on_tun_br = [
-                mock.call.add_dvr_mac_tun(
-                    mac=newmac,
-                    port=self.agent.patch_int_ofport),
-            ]
-            expected_on_phys_br = [
-                mock.call.add_dvr_mac_vlan(
-                    mac=newmac,
-                    port=self.agent.phys_ofports[physical_network]),
-            ]
-            self.assertEqual(expected_on_int_br, int_br.mock_calls)
-            self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
-            self.assertEqual(expected_on_phys_br, phys_br.mock_calls)
-        int_br.reset_mock()
-        tun_br.reset_mock()
-        phys_br.reset_mock()
-        with mock.patch.object(self.agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br),\
-                mock.patch.dict(self.agent.phys_brs,
-                                {physical_network: phys_br}),\
-                mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
-                mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
-                mock.patch.dict(self.agent.dvr_agent.phys_brs,
-                                {physical_network: phys_br}):
-            self.agent.dvr_agent.dvr_mac_address_update(dvr_macs=[])
-            expected_on_int_br = [
-                mock.call.remove_dvr_mac_vlan(
-                    mac=newmac),
-                mock.call.remove_dvr_mac_tun(
-                    mac=newmac,
-                    port=self.agent.patch_tun_ofport),
-            ]
-            expected_on_tun_br = [
-                mock.call.remove_dvr_mac_tun(
-                    mac=newmac),
-            ]
-            expected_on_phys_br = [
-                mock.call.remove_dvr_mac_vlan(
-                    mac=newmac),
-            ]
-            self.assertEqual(expected_on_int_br, int_br.mock_calls)
-            self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
-            self.assertEqual(expected_on_phys_br, phys_br.mock_calls)
-
-    def test_ovs_restart(self):
-        self._setup_for_dvr_test()
-        reset_methods = (
-            'reset_ovs_parameters', 'reset_dvr_parameters',
-            'setup_dvr_flows_on_integ_br', 'setup_dvr_flows_on_tun_br',
-            'setup_dvr_flows_on_phys_br', 'setup_dvr_mac_flows_on_all_brs')
-        reset_mocks = [mock.patch.object(self.agent.dvr_agent, method).start()
-                       for method in reset_methods]
-        tun_br = mock.create_autospec(self.agent.tun_br)
-        with mock.patch.object(self.agent,
-                               'check_ovs_status',
-                               return_value=constants.OVS_RESTARTED),\
-                mock.patch.object(self.agent,
-                                  '_agent_has_updates',
-                                  side_effect=TypeError('loop exit')),\
-                mock.patch.object(self.agent, 'tun_br', new=tun_br):
-            # block RPC calls and bridge calls
-            self.agent.setup_physical_bridges = mock.Mock()
-            self.agent.setup_integration_br = mock.Mock()
-            self.agent.setup_tunnel_br = mock.Mock()
-            self.agent.state_rpc = mock.Mock()
-            try:
-                self.agent.rpc_loop(polling_manager=mock.Mock())
-            except TypeError:
-                pass
-        self.assertTrue(all([x.called for x in reset_mocks]))
-
-    def _test_scan_ports_failure(self, scan_method_name):
-        with mock.patch.object(self.agent,
-                               'check_ovs_status',
-                               return_value=constants.OVS_RESTARTED),\
-                mock.patch.object(self.agent, scan_method_name,
-                               side_effect=TypeError('broken')),\
-                mock.patch.object(self.agent, '_agent_has_updates',
-                                  return_value=True),\
-                mock.patch.object(self.agent, '_check_and_handle_signal',
-                                  side_effect=[True, False]):
-            # block RPC calls and bridge calls
-            self.agent.setup_physical_bridges = mock.Mock()
-            self.agent.setup_integration_br = mock.Mock()
-            self.agent.reset_tunnel_br = mock.Mock()
-            self.agent.state_rpc = mock.Mock()
-            self.agent.rpc_loop(polling_manager=mock.Mock())
-
-    def test_scan_ports_failure(self):
-        self._test_scan_ports_failure('scan_ports')
-
-    def test_scan_ancillary_ports_failure(self):
-        with mock.patch.object(self.agent, 'scan_ports'):
-            with mock.patch.object(self.agent, 'update_stale_ofport_rules'):
-                self.agent.ancillary_brs = mock.Mock()
-                self._test_scan_ports_failure('scan_ancillary_ports')
-
-
-class TestOvsDvrNeutronAgentOFCtl(TestOvsDvrNeutronAgent,
-                                  ovs_test_base.OVSOFCtlTestBase):
-    pass
-
-
-class TestOvsDvrNeutronAgentRyu(TestOvsDvrNeutronAgent,
-                                ovs_test_base.OVSRyuTestBase):
-    pass
-
-
-class TestValidateTunnelLocalIP(base.BaseTestCase):
-    def test_validate_local_ip_with_valid_ip(self):
-        mock_get_device_by_ip = mock.patch.object(
-            ip_lib.IPWrapper, 'get_device_by_ip').start()
-        ovs_agent.validate_local_ip(FAKE_IP1)
-        mock_get_device_by_ip.assert_called_once_with(FAKE_IP1)
-
-    def test_validate_local_ip_with_none_ip(self):
-        with testtools.ExpectedException(SystemExit):
-            ovs_agent.validate_local_ip(None)
-
-    def test_validate_local_ip_with_invalid_ip(self):
-        mock_get_device_by_ip = mock.patch.object(
-            ip_lib.IPWrapper, 'get_device_by_ip').start()
-        mock_get_device_by_ip.return_value = None
-        with testtools.ExpectedException(SystemExit):
-            ovs_agent.validate_local_ip(FAKE_IP1)
-        mock_get_device_by_ip.assert_called_once_with(FAKE_IP1)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py
deleted file mode 100644 (file)
index a3bb97e..0000000
+++ /dev/null
@@ -1,716 +0,0 @@
-# Copyright 2012 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import time
-
-import mock
-from oslo_config import cfg
-from oslo_log import log
-import six
-
-from neutron.agent.common import ovs_lib
-from neutron.agent.linux import ip_lib
-from neutron.common import constants as n_const
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
-    import ovs_test_base
-
-
-def nonzero(f):
-    if six.PY3:
-        return f.__bool__()
-    else:
-        return f.__nonzero__()
-
-# Useful global dummy variables.
-NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7'
-LS_ID = 420
-LV_ID = 42
-LV_IDS = [42, 43]
-VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8'
-VIF_MAC = '3c:09:24:1e:78:23'
-OFPORT_NUM = 1
-VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM,
-                           VIF_ID, VIF_MAC, 'switch')
-VIF_PORTS = {VIF_ID: VIF_PORT}
-FIXED_IPS = [{'subnet_id': 'my-subnet-uuid',
-              'ip_address': '1.1.1.1'}]
-VM_DEVICE_OWNER = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
-
-TUN_OFPORTS = {p_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}}
-
-BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00"
-UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00"
-
-
-class DummyPort(object):
-    def __init__(self, interface_id):
-        self.interface_id = interface_id
-
-
-class DummyVlanBinding(object):
-    def __init__(self, network_id, vlan_id):
-        self.network_id = network_id
-        self.vlan_id = vlan_id
-
-
-class TunnelTest(object):
-    USE_VETH_INTERCONNECTION = False
-    VETH_MTU = None
-
-    def setUp(self):
-        super(TunnelTest, self).setUp()
-        cfg.CONF.set_default('firewall_driver',
-                             'neutron.agent.firewall.NoopFirewallDriver',
-                             group='SECURITYGROUP')
-        cfg.CONF.set_override('report_interval', 0, 'AGENT')
-
-        self.INT_BRIDGE = 'integration_bridge'
-        self.TUN_BRIDGE = 'tunnel_bridge'
-        self.MAP_TUN_BRIDGE = 'tun_br_map'
-        self.NET_MAPPING = ['net1:%s' % self.MAP_TUN_BRIDGE]
-        self.INT_OFPORT = 11111
-        self.TUN_OFPORT = 22222
-        self.MAP_TUN_INT_OFPORT = 33333
-        self.MAP_TUN_PHY_OFPORT = 44444
-
-        self.LVM = self.mod_agent.LocalVLANMapping(
-            LV_ID, 'gre', None, LS_ID, VIF_PORTS)
-        self.LVM_FLAT = self.mod_agent.LocalVLANMapping(
-            LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS)
-        self.LVM_VLAN = self.mod_agent.LocalVLANMapping(
-            LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS)
-
-        self.inta = mock.Mock()
-        self.intb = mock.Mock()
-
-        mock.patch.object(ovs_lib.BaseOVS, 'config',
-                          new_callable=mock.PropertyMock,
-                          return_value={}).start()
-
-        self.ovs_bridges = {
-            self.INT_BRIDGE: mock.create_autospec(
-                self.br_int_cls('br-int')),
-            self.TUN_BRIDGE: mock.create_autospec(
-                self.br_tun_cls('br-tun')),
-            self.MAP_TUN_BRIDGE: mock.create_autospec(
-                self.br_phys_cls('br-phys')),
-        }
-        self.ovs_int_ofports = {
-            'patch-tun': self.TUN_OFPORT,
-            'int-%s' % self.MAP_TUN_BRIDGE: self.MAP_TUN_INT_OFPORT
-        }
-
-        def lookup_br(br_name, *args, **kwargs):
-            return self.ovs_bridges[br_name]
-
-        self.mock_int_bridge_cls = mock.patch(self._BR_INT_CLASS,
-                                              autospec=True).start()
-        self.mock_int_bridge_cls.side_effect = lookup_br
-        self.mock_phys_bridge_cls = mock.patch(self._BR_PHYS_CLASS,
-                                               autospec=True).start()
-        self.mock_phys_bridge_cls.side_effect = lookup_br
-        self.mock_tun_bridge_cls = mock.patch(self._BR_TUN_CLASS,
-                                              autospec=True).start()
-        self.mock_tun_bridge_cls.side_effect = lookup_br
-
-        self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
-        self.mock_int_bridge.add_port.return_value = self.MAP_TUN_INT_OFPORT
-        self.mock_int_bridge.add_patch_port.side_effect = (
-            lambda tap, peer: self.ovs_int_ofports[tap])
-        self.mock_int_bridge.get_vif_ports.return_value = []
-        self.mock_int_bridge.get_ports_attributes.return_value = []
-        self.mock_int_bridge.db_get_val.return_value = {}
-
-        self.mock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE]
-        self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE
-        self.mock_map_tun_bridge.add_port.return_value = (
-            self.MAP_TUN_PHY_OFPORT)
-        self.mock_map_tun_bridge.add_patch_port.return_value = (
-            self.MAP_TUN_PHY_OFPORT)
-
-        self.mock_tun_bridge = self.ovs_bridges[self.TUN_BRIDGE]
-        self.mock_tun_bridge.add_port.return_value = self.INT_OFPORT
-        self.mock_tun_bridge.add_patch_port.return_value = self.INT_OFPORT
-
-        self.ipdevice = mock.patch.object(ip_lib, 'IPDevice').start()
-
-        self.ipwrapper = mock.patch.object(ip_lib, 'IPWrapper').start()
-        add_veth = self.ipwrapper.return_value.add_veth
-        add_veth.return_value = [self.inta, self.intb]
-
-        self.get_bridges = mock.patch.object(ovs_lib.BaseOVS,
-                                             'get_bridges').start()
-        self.get_bridges.return_value = [self.INT_BRIDGE,
-                                         self.TUN_BRIDGE,
-                                         self.MAP_TUN_BRIDGE]
-
-        self.execute = mock.patch('neutron.agent.common.utils.execute').start()
-
-        self._define_expected_calls()
-
-    def _define_expected_calls(self, arp_responder=False):
-        self.mock_int_bridge_cls_expected = [
-            mock.call(self.INT_BRIDGE,
-                      datapath_type=mock.ANY),
-        ]
-        self.mock_phys_bridge_cls_expected = [
-            mock.call(self.MAP_TUN_BRIDGE,
-                      datapath_type=mock.ANY),
-        ]
-        self.mock_tun_bridge_cls_expected = [
-            mock.call(self.TUN_BRIDGE,
-                      datapath_type=mock.ANY),
-        ]
-
-        self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
-        self.mock_int_bridge_expected = [
-            mock.call.set_agent_uuid_stamp(mock.ANY),
-            mock.call.create(),
-            mock.call.set_secure_mode(),
-            mock.call.setup_controllers(mock.ANY),
-            mock.call.delete_port('patch-tun'),
-            mock.call.setup_default_table(),
-        ]
-
-        self.mock_map_tun_bridge_expected = [
-            mock.call.create(),
-            mock.call.setup_controllers(mock.ANY),
-            mock.call.setup_default_table(),
-            mock.call.add_patch_port('phy-%s' % self.MAP_TUN_BRIDGE,
-                                     constants.NONEXISTENT_PEER), ]
-        self.mock_int_bridge_expected += [
-            mock.call.db_get_val('Interface', 'int-%s' % self.MAP_TUN_BRIDGE,
-                                 'type'),
-            mock.call.add_patch_port('int-%s' % self.MAP_TUN_BRIDGE,
-                                     constants.NONEXISTENT_PEER),
-        ]
-
-        self.mock_int_bridge_expected += [
-            mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT),
-            mock.call.set_db_attribute(
-                'Interface', 'int-%s' % self.MAP_TUN_BRIDGE,
-                'options:peer', 'phy-%s' % self.MAP_TUN_BRIDGE),
-        ]
-        self.mock_map_tun_bridge_expected += [
-            mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT),
-            mock.call.set_db_attribute(
-                'Interface', 'phy-%s' % self.MAP_TUN_BRIDGE,
-                'options:peer', 'int-%s' % self.MAP_TUN_BRIDGE),
-        ]
-
-        self.mock_tun_bridge_expected = [
-            mock.call.set_agent_uuid_stamp(mock.ANY),
-            mock.call.create(secure_mode=True),
-            mock.call.setup_controllers(mock.ANY),
-            mock.call.port_exists('patch-int'),
-            nonzero(mock.call.port_exists()),
-            mock.call.add_patch_port('patch-int', 'patch-tun'),
-        ]
-        self.mock_int_bridge_expected += [
-            mock.call.port_exists('patch-tun'),
-            nonzero(mock.call.port_exists()),
-            mock.call.add_patch_port('patch-tun', 'patch-int'),
-        ]
-        self.mock_int_bridge_expected += [
-            mock.call.get_vif_ports(),
-            mock.call.get_ports_attributes(
-                'Port', columns=['name', 'other_config', 'tag'], ports=[])
-        ]
-
-        self.mock_tun_bridge_expected += [
-            mock.call.setup_default_table(self.INT_OFPORT, arp_responder),
-        ]
-
-        self.ipdevice_expected = []
-        self.ipwrapper_expected = [mock.call()]
-
-        self.get_bridges_expected = [mock.call(), mock.call()]
-
-        self.inta_expected = []
-        self.intb_expected = []
-        self.execute_expected = []
-
-    def _build_agent(self, **config_opts_agent):
-        """Configure and initialize OVS agent.
-
-        :param config_opts_agent: a dict with options to override the
-               default values for the AGENT group.
-        """
-        bridge_classes = {
-            'br_int': self.mock_int_bridge_cls,
-            'br_phys': self.mock_phys_bridge_cls,
-            'br_tun': self.mock_tun_bridge_cls,
-        }
-        cfg.CONF.set_override('integration_bridge', self.INT_BRIDGE, 'OVS')
-        cfg.CONF.set_override('tunnel_bridge', self.TUN_BRIDGE, 'OVS')
-        cfg.CONF.set_override('local_ip', '10.0.0.1', 'OVS')
-        cfg.CONF.set_override('bridge_mappings', self.NET_MAPPING, 'OVS')
-        cfg.CONF.set_override('polling_interval', 2, 'AGENT')
-        cfg.CONF.set_override('tunnel_types', ['gre'], 'AGENT')
-        cfg.CONF.set_override('veth_mtu', self.VETH_MTU, 'AGENT')
-        cfg.CONF.set_override('minimize_polling', False, 'AGENT')
-        cfg.CONF.set_override('use_veth_interconnection',
-                              self.USE_VETH_INTERCONNECTION, 'OVS')
-
-        for k, v in config_opts_agent.items():
-            cfg.CONF.set_override(k, v, 'AGENT')
-
-        return self.mod_agent.OVSNeutronAgent(bridge_classes, cfg.CONF)
-
-    def _verify_mock_call(self, mock_obj, expected):
-        mock_obj.assert_has_calls(expected)
-        self.assertEqual(expected, mock_obj.mock_calls)
-
-    def _verify_mock_calls(self):
-        self._verify_mock_call(self.mock_int_bridge_cls,
-                               self.mock_int_bridge_cls_expected)
-        self._verify_mock_call(self.mock_tun_bridge_cls,
-                               self.mock_tun_bridge_cls_expected)
-        self._verify_mock_call(self.mock_phys_bridge_cls,
-                               self.mock_phys_bridge_cls_expected)
-        self._verify_mock_call(self.mock_int_bridge,
-                               self.mock_int_bridge_expected)
-        self._verify_mock_call(self.mock_map_tun_bridge,
-                               self.mock_map_tun_bridge_expected)
-        self._verify_mock_call(self.mock_tun_bridge,
-                               self.mock_tun_bridge_expected)
-        self._verify_mock_call(self.ipdevice, self.ipdevice_expected)
-        self._verify_mock_call(self.ipwrapper, self.ipwrapper_expected)
-        self._verify_mock_call(self.get_bridges, self.get_bridges_expected)
-        self._verify_mock_call(self.inta, self.inta_expected)
-        self._verify_mock_call(self.intb, self.intb_expected)
-        self._verify_mock_call(self.execute, self.execute_expected)
-
-    def test_construct(self):
-        agent = self._build_agent()
-        self.assertEqual(agent.agent_id, 'ovs-agent-%s' % cfg.CONF.host)
-        self._verify_mock_calls()
-
-    # TODO(ethuleau): Initially, local ARP responder is be dependent to the
-    #                 ML2 l2 population mechanism driver.
-    #                 The next two tests use l2_pop flag to test ARP responder
-    def test_construct_with_arp_responder(self):
-        self._build_agent(l2_population=True, arp_responder=True)
-        self._define_expected_calls(True)
-        self._verify_mock_calls()
-
-    def test_construct_without_arp_responder(self):
-        self._build_agent(l2_population=False, arp_responder=True)
-        self._verify_mock_calls()
-
-    def test_construct_vxlan(self):
-        self._build_agent(tunnel_types=['vxlan'])
-        self._verify_mock_calls()
-
-    def test_provision_local_vlan(self):
-        ofports = list(TUN_OFPORTS[p_const.TYPE_GRE].values())
-        self.mock_tun_bridge_expected += [
-            mock.call.install_flood_to_tun(LV_ID, LS_ID, ofports),
-            mock.call.provision_local_vlan(
-                network_type=p_const.TYPE_GRE,
-                lvid=LV_ID,
-                segmentation_id=LS_ID),
-        ]
-
-        a = self._build_agent()
-        a.available_local_vlans = set([LV_ID])
-        a.tun_br_ofports = TUN_OFPORTS
-        a.provision_local_vlan(NET_UUID, p_const.TYPE_GRE, None, LS_ID)
-        self._verify_mock_calls()
-
-    def test_provision_local_vlan_flat(self):
-        self.mock_map_tun_bridge_expected.append(
-            mock.call.provision_local_vlan(
-                port=self.MAP_TUN_PHY_OFPORT,
-                lvid=LV_ID,
-                segmentation_id=None,
-                distributed=False))
-        self.mock_int_bridge_expected.append(
-            mock.call.provision_local_vlan(
-                port=self.INT_OFPORT,
-                lvid=LV_ID,
-                segmentation_id=None))
-
-        a = self._build_agent()
-        a.available_local_vlans = set([LV_ID])
-        a.phys_brs['net1'] = self.mock_map_tun_bridge
-        a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
-        a.int_ofports['net1'] = self.INT_OFPORT
-        a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net1', LS_ID)
-        self._verify_mock_calls()
-
-    def test_provision_local_vlan_flat_fail(self):
-        a = self._build_agent()
-        a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net2', LS_ID)
-        self._verify_mock_calls()
-
-    def test_provision_local_vlan_vlan(self):
-        self.mock_map_tun_bridge_expected.append(
-            mock.call.provision_local_vlan(
-                port=self.MAP_TUN_PHY_OFPORT,
-                lvid=LV_ID,
-                segmentation_id=LS_ID,
-                distributed=False))
-        self.mock_int_bridge_expected.append(
-            mock.call.provision_local_vlan(
-                port=self.INT_OFPORT,
-                lvid=LV_ID,
-                segmentation_id=LS_ID))
-        a = self._build_agent()
-        a.available_local_vlans = set([LV_ID])
-        a.phys_brs['net1'] = self.mock_map_tun_bridge
-        a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
-        a.int_ofports['net1'] = self.INT_OFPORT
-        a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net1', LS_ID)
-        self._verify_mock_calls()
-
-    def test_provision_local_vlan_vlan_fail(self):
-        a = self._build_agent()
-        a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net2', LS_ID)
-        self._verify_mock_calls()
-
-    def test_reclaim_local_vlan(self):
-        self.mock_tun_bridge_expected += [
-            mock.call.reclaim_local_vlan(network_type='gre',
-                                         segmentation_id=LS_ID),
-            mock.call.delete_flood_to_tun(LV_ID),
-            mock.call.delete_unicast_to_tun(LV_ID, None),
-            mock.call.delete_arp_responder(LV_ID, None),
-        ]
-
-        a = self._build_agent()
-        a.available_local_vlans = set()
-        a.local_vlan_map[NET_UUID] = self.LVM
-        a.reclaim_local_vlan(NET_UUID)
-        self.assertIn(self.LVM.vlan, a.available_local_vlans)
-        self._verify_mock_calls()
-
-    def test_reclaim_local_vlan_flat(self):
-        self.mock_map_tun_bridge_expected.append(
-            mock.call.reclaim_local_vlan(
-                port=self.MAP_TUN_PHY_OFPORT,
-                lvid=self.LVM_FLAT.vlan))
-        self.mock_int_bridge_expected.append(
-            mock.call.reclaim_local_vlan(
-                port=self.INT_OFPORT,
-                segmentation_id=None))
-        a = self._build_agent()
-        a.phys_brs['net1'] = self.mock_map_tun_bridge
-        a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
-        a.int_ofports['net1'] = self.INT_OFPORT
-
-        a.available_local_vlans = set()
-        a.local_vlan_map[NET_UUID] = self.LVM_FLAT
-        a.reclaim_local_vlan(NET_UUID)
-        self.assertIn(self.LVM_FLAT.vlan, a.available_local_vlans)
-        self._verify_mock_calls()
-
-    def test_reclaim_local_vlan_vlan(self):
-        self.mock_map_tun_bridge_expected.append(
-            mock.call.reclaim_local_vlan(
-                port=self.MAP_TUN_PHY_OFPORT,
-                lvid=self.LVM_VLAN.vlan))
-        self.mock_int_bridge_expected.append(
-            mock.call.reclaim_local_vlan(
-                port=self.INT_OFPORT,
-                segmentation_id=LS_ID))
-        a = self._build_agent()
-        a.phys_brs['net1'] = self.mock_map_tun_bridge
-        a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
-        a.int_ofports['net1'] = self.INT_OFPORT
-
-        a.available_local_vlans = set()
-        a.local_vlan_map[NET_UUID] = self.LVM_VLAN
-        a.reclaim_local_vlan(NET_UUID)
-        self.assertIn(self.LVM_VLAN.vlan, a.available_local_vlans)
-        self._verify_mock_calls()
-
-    def test_port_bound(self):
-        vlan_mapping = {'segmentation_id': LS_ID,
-                        'physical_network': None,
-                        'net_uuid': NET_UUID,
-                        'network_type': 'gre'}
-        self.mock_int_bridge_expected += [
-            mock.call.db_get_val('Port', 'port', 'other_config'),
-            mock.call.set_db_attribute('Port', VIF_PORT.port_name,
-                                       'other_config',
-                                       vlan_mapping)]
-
-        a = self._build_agent()
-        a.local_vlan_map[NET_UUID] = self.LVM
-        a.local_dvr_map = {}
-        self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = {}
-        a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID,
-                     FIXED_IPS, VM_DEVICE_OWNER, False)
-        self._verify_mock_calls()
-
-    def test_port_unbound(self):
-        with mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                               'reclaim_local_vlan') as reclaim_local_vlan:
-            a = self._build_agent()
-            a.local_vlan_map[NET_UUID] = self.LVM
-            a.port_unbound(VIF_ID, NET_UUID)
-
-        reclaim_local_vlan.assert_called_once_with(NET_UUID)
-        self._verify_mock_calls()
-
-    def test_port_dead(self):
-        self.mock_int_bridge_expected += [
-            mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag',
-                                 log_errors=True),
-            mock.call.set_db_attribute(
-                'Port', VIF_PORT.port_name,
-                'tag', self.mod_agent.DEAD_VLAN_TAG,
-                log_errors=True),
-            mock.call.drop_port(in_port=VIF_PORT.ofport),
-        ]
-
-        a = self._build_agent()
-        a.available_local_vlans = set([LV_ID])
-        a.local_vlan_map[NET_UUID] = self.LVM
-        self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = mock.Mock()
-        a.port_dead(VIF_PORT)
-        self._verify_mock_calls()
-
-    def test_tunnel_update(self):
-        tunnel_port = '9999'
-        self.mock_tun_bridge.add_tunnel_port.return_value = tunnel_port
-        self.mock_tun_bridge_expected += [
-            mock.call.add_tunnel_port('gre-0a000a01', '10.0.10.1', '10.0.0.1',
-                                      'gre', 4789, True, False),
-            mock.call.setup_tunnel_port('gre', tunnel_port),
-        ]
-
-        a = self._build_agent()
-        a.tunnel_update(
-            mock.sentinel.ctx, tunnel_ip='10.0.10.1',
-            tunnel_type=p_const.TYPE_GRE)
-        self._verify_mock_calls()
-
-    def test_tunnel_update_self(self):
-        a = self._build_agent()
-        a.tunnel_update(
-            mock.sentinel.ctx, tunnel_ip='10.0.0.1')
-        self._verify_mock_calls()
-
-    def test_daemon_loop(self):
-        reply_ge_1 = {'added': set(['tap0']),
-                      'removed': set([])}
-
-        reply_ge_2 = {'added': set([]),
-                  'removed': set(['tap0'])}
-
-        reply_pe_1 = {'current': set(['tap0']),
-                      'added': set(['tap0']),
-                      'removed': set([])}
-
-        reply_pe_2 = {'current': set([]),
-                      'added': set([]),
-                      'removed': set(['tap0'])}
-
-        reply_ancillary = {'current': set([]),
-                           'added': set([]),
-                           'removed': set([])}
-
-        self.mock_int_bridge_expected += [
-            mock.call.check_canary_table(),
-            mock.call.check_canary_table()
-        ]
-
-        self.ovs_bridges[self.INT_BRIDGE].check_canary_table.return_value = \
-            constants.OVS_NORMAL
-        with mock.patch.object(log.KeywordArgumentAdapter,
-                               'exception') as log_exception,\
-                mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                                  'process_ports_events') as process_p_events,\
-                mock.patch.object(
-                    self.mod_agent.OVSNeutronAgent,
-                    'process_network_ports') as process_network_ports,\
-                mock.patch.object(self.mod_agent.OVSNeutronAgent,
-                                  'tunnel_sync'),\
-                mock.patch.object(time, 'sleep'),\
-                mock.patch.object(
-                    self.mod_agent.OVSNeutronAgent,
-                    'update_stale_ofport_rules') as update_stale,\
-                mock.patch.object(
-                    self.mod_agent.OVSNeutronAgent,
-                    'cleanup_stale_flows') as cleanup:
-            log_exception.side_effect = Exception(
-                'Fake exception to get out of the loop')
-            update_stale.return_value = []
-            devices_not_ready = set()
-            process_p_events.side_effect = [
-                (reply_pe_1, reply_ancillary, devices_not_ready),
-                (reply_pe_2, reply_ancillary, devices_not_ready)]
-            interface_polling = mock.Mock()
-            interface_polling.get_events.side_effect = [reply_ge_1, reply_ge_2]
-            process_network_ports.side_effect = [
-                False, Exception('Fake exception to get out of the loop')]
-
-            n_agent = self._build_agent()
-
-            # Hack to test loop
-            # We start method and expect it will raise after 2nd loop
-            # If something goes wrong, assert_has_calls below will catch it
-            try:
-                n_agent.rpc_loop(interface_polling)
-            except Exception:
-                pass
-
-            # FIXME(salv-orlando): There should not be assertions on log
-            # messages
-            log_exception.assert_called_once_with(
-                "Error while processing VIF ports")
-            process_p_events.assert_has_calls([
-                mock.call(reply_ge_1, set(), set(), devices_not_ready, set()),
-                mock.call(reply_ge_2, set(['tap0']), set(), devices_not_ready,
-                          set())
-            ])
-            process_network_ports.assert_has_calls([
-                mock.call({'current': set(['tap0']),
-                           'removed': set([]),
-                           'added': set(['tap0'])}, False),
-            ])
-
-            cleanup.assert_called_once_with()
-            self.assertTrue(update_stale.called)
-            self._verify_mock_calls()
-
-
-class TunnelTestOFCtl(TunnelTest, ovs_test_base.OVSOFCtlTestBase):
-    pass
-
-
-class TunnelTestRyu(TunnelTest, ovs_test_base.OVSRyuTestBase):
-    pass
-
-
-class TunnelTestUseVethInterco(TunnelTest):
-    USE_VETH_INTERCONNECTION = True
-
-    def _define_expected_calls(self, arp_responder=False):
-        self.mock_int_bridge_cls_expected = [
-            mock.call(self.INT_BRIDGE,
-                      datapath_type=mock.ANY),
-        ]
-        self.mock_phys_bridge_cls_expected = [
-            mock.call(self.MAP_TUN_BRIDGE,
-                      datapath_type=mock.ANY),
-        ]
-        self.mock_tun_bridge_cls_expected = [
-            mock.call(self.TUN_BRIDGE,
-                      datapath_type=mock.ANY),
-        ]
-
-        self.mock_int_bridge_expected = [
-            mock.call.set_agent_uuid_stamp(mock.ANY),
-            mock.call.create(),
-            mock.call.set_secure_mode(),
-            mock.call.setup_controllers(mock.ANY),
-            mock.call.delete_port('patch-tun'),
-            mock.call.setup_default_table(),
-        ]
-
-        self.mock_map_tun_bridge_expected = [
-            mock.call.create(),
-            mock.call.setup_controllers(mock.ANY),
-            mock.call.setup_default_table(),
-            mock.call.add_port(self.intb),
-        ]
-        self.mock_int_bridge_expected += [
-            mock.call.db_get_val('Interface', 'int-%s' % self.MAP_TUN_BRIDGE,
-                                 'type'),
-            mock.call.add_port(self.inta)
-        ]
-
-        self.mock_int_bridge_expected += [
-            mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT),
-        ]
-        self.mock_map_tun_bridge_expected += [
-            mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT),
-        ]
-
-        self.mock_tun_bridge_expected = [
-            mock.call.set_agent_uuid_stamp(mock.ANY),
-            mock.call.create(secure_mode=True),
-            mock.call.setup_controllers(mock.ANY),
-            mock.call.port_exists('patch-int'),
-            nonzero(mock.call.port_exists()),
-            mock.call.add_patch_port('patch-int', 'patch-tun'),
-        ]
-        self.mock_int_bridge_expected += [
-            mock.call.port_exists('patch-tun'),
-            nonzero(mock.call.port_exists()),
-            mock.call.add_patch_port('patch-tun', 'patch-int')
-        ]
-        self.mock_int_bridge_expected += [
-            mock.call.get_vif_ports(),
-            mock.call.get_ports_attributes(
-                'Port', columns=['name', 'other_config', 'tag'], ports=[])
-        ]
-        self.mock_tun_bridge_expected += [
-            mock.call.setup_default_table(self.INT_OFPORT, arp_responder),
-        ]
-
-        self.ipdevice_expected = [
-            mock.call('int-%s' % self.MAP_TUN_BRIDGE),
-            mock.call().exists(),
-            nonzero(mock.call().exists()),
-            mock.call().link.delete()
-        ]
-        self.ipwrapper_expected = [
-            mock.call(),
-            mock.call().add_veth('int-%s' % self.MAP_TUN_BRIDGE,
-                                 'phy-%s' % self.MAP_TUN_BRIDGE)
-        ]
-
-        self.get_bridges_expected = [mock.call(), mock.call()]
-
-        self.inta_expected = [mock.call.link.set_up()]
-        self.intb_expected = [mock.call.link.set_up()]
-        self.execute_expected = [mock.call(['udevadm', 'settle',
-                                            '--timeout=10'])]
-
-
-class TunnelTestUseVethIntercoOFCtl(TunnelTestUseVethInterco,
-                                    ovs_test_base.OVSOFCtlTestBase):
-    pass
-
-
-class TunnelTestUseVethIntercoRyu(TunnelTestUseVethInterco,
-                                  ovs_test_base.OVSRyuTestBase):
-    pass
-
-
-class TunnelTestWithMTU(TunnelTestUseVethInterco):
-    VETH_MTU = 1500
-
-    def _define_expected_calls(self, arp_responder=False):
-        super(TunnelTestWithMTU, self)._define_expected_calls(arp_responder)
-        self.inta_expected.append(mock.call.link.set_mtu(self.VETH_MTU))
-        self.intb_expected.append(mock.call.link.set_mtu(self.VETH_MTU))
-
-
-class TunnelTestWithMTUOFCtl(TunnelTestWithMTU,
-                             ovs_test_base.OVSOFCtlTestBase):
-    pass
-
-
-class TunnelTestWithMTURyu(TunnelTestWithMTU,
-                           ovs_test_base.OVSRyuTestBase):
-    pass
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py
deleted file mode 100644 (file)
index cbef154..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron.common import constants
-from neutron.extensions import portbindings
-from neutron.plugins.ml2.drivers.openvswitch.mech_driver \
-    import mech_openvswitch
-from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base
-
-
-class OpenvswitchMechanismBaseTestCase(base.AgentMechanismBaseTestCase):
-    VIF_TYPE = portbindings.VIF_TYPE_OVS
-    VIF_DETAILS = {portbindings.CAP_PORT_FILTER: True,
-                   portbindings.OVS_HYBRID_PLUG: True}
-    AGENT_TYPE = constants.AGENT_TYPE_OVS
-
-    GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'}
-    GOOD_TUNNEL_TYPES = ['gre', 'vxlan']
-    GOOD_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS,
-                    'tunnel_types': GOOD_TUNNEL_TYPES}
-
-    BAD_MAPPINGS = {'wrong_physical_network': 'wrong_bridge'}
-    BAD_TUNNEL_TYPES = ['bad_tunnel_type']
-    BAD_CONFIGS = {'bridge_mappings': BAD_MAPPINGS,
-                   'tunnel_types': BAD_TUNNEL_TYPES}
-
-    AGENTS = [{'alive': True,
-               'configurations': GOOD_CONFIGS,
-               'host': 'host'}]
-    AGENTS_DEAD = [{'alive': False,
-                    'configurations': GOOD_CONFIGS,
-                    'host': 'dead_host'}]
-    AGENTS_BAD = [{'alive': False,
-                   'configurations': GOOD_CONFIGS,
-                   'host': 'bad_host_1'},
-                  {'alive': True,
-                   'configurations': BAD_CONFIGS,
-                   'host': 'bad_host_2'}]
-
-    def setUp(self):
-        super(OpenvswitchMechanismBaseTestCase, self).setUp()
-        self.driver = mech_openvswitch.OpenvswitchMechanismDriver()
-        self.driver.initialize()
-
-
-class OpenvswitchMechanismSGDisabledBaseTestCase(
-    OpenvswitchMechanismBaseTestCase):
-    VIF_DETAILS = {portbindings.CAP_PORT_FILTER: False,
-                   portbindings.OVS_HYBRID_PLUG: False}
-
-    def setUp(self):
-        cfg.CONF.set_override('enable_security_group',
-                              False,
-                              group='SECURITYGROUP')
-        super(OpenvswitchMechanismSGDisabledBaseTestCase, self).setUp()
-
-
-class OpenvswitchMechanismGenericTestCase(OpenvswitchMechanismBaseTestCase,
-                                          base.AgentMechanismGenericTestCase):
-    pass
-
-
-class OpenvswitchMechanismLocalTestCase(OpenvswitchMechanismBaseTestCase,
-                                        base.AgentMechanismLocalTestCase):
-    pass
-
-
-class OpenvswitchMechanismFlatTestCase(OpenvswitchMechanismBaseTestCase,
-                                       base.AgentMechanismFlatTestCase):
-    pass
-
-
-class OpenvswitchMechanismVlanTestCase(OpenvswitchMechanismBaseTestCase,
-                                       base.AgentMechanismVlanTestCase):
-    pass
-
-
-class OpenvswitchMechanismGreTestCase(OpenvswitchMechanismBaseTestCase,
-                                      base.AgentMechanismGreTestCase):
-    pass
-
-
-class OpenvswitchMechanismSGDisabledLocalTestCase(
-    OpenvswitchMechanismSGDisabledBaseTestCase,
-    base.AgentMechanismLocalTestCase):
-    pass
diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_helpers.py b/neutron/tests/unit/plugins/ml2/drivers/test_helpers.py
deleted file mode 100644 (file)
index 8f60734..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-# Copyright (c) 2014 Thales Services SAS
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_db import exception as exc
-from sqlalchemy.orm import query
-
-import neutron.db.api as db
-from neutron.plugins.ml2.drivers import type_vlan
-from neutron.tests.unit import testlib_api
-
-
-TENANT_NET = 'phys_net2'
-VLAN_MIN = 200
-VLAN_MAX = 209
-VLAN_OUTSIDE = 100
-NETWORK_VLAN_RANGES = {
-    TENANT_NET: [(VLAN_MIN, VLAN_MAX)],
-}
-
-
-class HelpersTest(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(HelpersTest, self).setUp()
-        self.driver = type_vlan.VlanTypeDriver()
-        self.driver.network_vlan_ranges = NETWORK_VLAN_RANGES
-        self.driver._sync_vlan_allocations()
-        self.session = db.get_session()
-
-    def check_raw_segment(self, expected, observed):
-        for key, value in expected.items():
-            self.assertEqual(value, observed[key])
-
-    def test_primary_keys(self):
-        self.assertEqual(set(['physical_network', 'vlan_id']),
-                         self.driver.primary_keys)
-
-    def test_allocate_specific_unallocated_segment_in_pools(self):
-        expected = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN)
-        observed = self.driver.allocate_fully_specified_segment(self.session,
-                                                                **expected)
-        self.check_raw_segment(expected, observed)
-
-    def test_allocate_specific_allocated_segment_in_pools(self):
-        raw_segment = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN)
-        self.driver.allocate_fully_specified_segment(self.session,
-                                                     **raw_segment)
-        observed = self.driver.allocate_fully_specified_segment(self.session,
-                                                                **raw_segment)
-        self.assertIsNone(observed)
-
-    def test_allocate_specific_finally_allocated_segment_in_pools(self):
-        # Test case: allocate a specific unallocated segment in pools but
-        # the segment is allocated concurrently between select and update
-
-        raw_segment = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN)
-        with mock.patch.object(query.Query, 'update', return_value=0):
-            observed = self.driver.allocate_fully_specified_segment(
-                self.session, **raw_segment)
-            self.assertIsNone(observed)
-
-    def test_allocate_specific_unallocated_segment_outside_pools(self):
-        expected = dict(physical_network=TENANT_NET, vlan_id=VLAN_OUTSIDE)
-        observed = self.driver.allocate_fully_specified_segment(self.session,
-                                                                **expected)
-        self.check_raw_segment(expected, observed)
-
-    def test_allocate_specific_allocated_segment_outside_pools(self):
-        raw_segment = dict(physical_network=TENANT_NET, vlan_id=VLAN_OUTSIDE)
-        self.driver.allocate_fully_specified_segment(self.session,
-                                                     **raw_segment)
-        observed = self.driver.allocate_fully_specified_segment(self.session,
-                                                                **raw_segment)
-        self.assertIsNone(observed)
-
-    def test_allocate_specific_finally_unallocated_segment_outside_pools(self):
-        # Test case: allocate a specific allocated segment in pools but
-        # the segment is concurrently unallocated after select or update
-
-        expected = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN)
-        with mock.patch.object(self.driver.model, 'save'):
-            observed = self.driver.allocate_fully_specified_segment(
-                self.session, **expected)
-            self.check_raw_segment(expected, observed)
-
-    def test_allocate_partial_segment_without_filters(self):
-        expected = dict(physical_network=TENANT_NET)
-        observed = self.driver.allocate_partially_specified_segment(
-            self.session)
-        self.check_raw_segment(expected, observed)
-
-    def test_allocate_partial_segment_with_filter(self):
-        expected = dict(physical_network=TENANT_NET)
-        observed = self.driver.allocate_partially_specified_segment(
-            self.session, **expected)
-        self.check_raw_segment(expected, observed)
-
-    def test_allocate_partial_segment_no_resource_available(self):
-        for i in range(VLAN_MIN, VLAN_MAX + 1):
-            self.driver.allocate_partially_specified_segment(self.session)
-        observed = self.driver.allocate_partially_specified_segment(
-            self.session)
-        self.assertIsNone(observed)
-
-    def test_allocate_partial_segment_outside_pools(self):
-        raw_segment = dict(physical_network='other_phys_net')
-        observed = self.driver.allocate_partially_specified_segment(
-            self.session, **raw_segment)
-        self.assertIsNone(observed)
-
-    def test_allocate_partial_segment_first_attempt_fails(self):
-        expected = dict(physical_network=TENANT_NET)
-        with mock.patch.object(query.Query, 'update', side_effect=[0, 1]):
-            self.assertRaises(
-                exc.RetryRequest,
-                self.driver.allocate_partially_specified_segment,
-                self.session, **expected)
-            observed = self.driver.allocate_partially_specified_segment(
-                self.session, **expected)
-            self.check_raw_segment(expected, observed)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_type_flat.py b/neutron/tests/unit/plugins/ml2/drivers/test_type_flat.py
deleted file mode 100644 (file)
index 8dd87b9..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright (c) 2014 Thales Services SAS
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import exceptions as exc
-import neutron.db.api as db
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2 import config
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2.drivers import type_flat
-from neutron.tests import base
-from neutron.tests.unit import testlib_api
-
-
-FLAT_NETWORKS = ['flat_net1', 'flat_net2']
-
-
-class FlatTypeTest(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(FlatTypeTest, self).setUp()
-        config.cfg.CONF.set_override('flat_networks', FLAT_NETWORKS,
-                              group='ml2_type_flat')
-        self.driver = type_flat.FlatTypeDriver()
-        self.session = db.get_session()
-        self.driver.physnet_mtus = []
-
-    def _get_allocation(self, session, segment):
-        return session.query(type_flat.FlatAllocation).filter_by(
-            physical_network=segment[api.PHYSICAL_NETWORK]).first()
-
-    def test_is_partial_segment(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
-                   api.PHYSICAL_NETWORK: 'flat_net1'}
-        self.assertFalse(self.driver.is_partial_segment(segment))
-
-    def test_validate_provider_segment(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
-                   api.PHYSICAL_NETWORK: 'flat_net1'}
-        self.driver.validate_provider_segment(segment)
-
-    def test_validate_provider_phynet_name(self):
-        self.driver._parse_networks([])
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
-                   api.PHYSICAL_NETWORK: 'flat_net1'}
-        self.assertRaises(exc.InvalidInput,
-                          self.driver.validate_provider_segment,
-                          segment=segment)
-
-    def test_validate_provider_phynet_name_multiple(self):
-        self.driver._parse_networks(['flat_net1', 'flat_net2'])
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
-                   api.PHYSICAL_NETWORK: 'flat_net1'}
-        self.driver.validate_provider_segment(segment)
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
-                   api.PHYSICAL_NETWORK: 'flat_net2'}
-        self.driver.validate_provider_segment(segment)
-
-    def test_validate_provider_segment_without_physnet_restriction(self):
-        self.driver._parse_networks('*')
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
-                   api.PHYSICAL_NETWORK: 'other_flat_net'}
-        self.driver.validate_provider_segment(segment)
-
-    def test_validate_provider_segment_with_missing_physical_network(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT}
-        self.assertRaises(exc.InvalidInput,
-                          self.driver.validate_provider_segment,
-                          segment)
-
-    def test_validate_provider_segment_with_unsupported_physical_network(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
-                   api.PHYSICAL_NETWORK: 'other_flat_net'}
-        self.assertRaises(exc.InvalidInput,
-                          self.driver.validate_provider_segment,
-                          segment)
-
-    def test_validate_provider_segment_with_unallowed_segmentation_id(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
-                   api.PHYSICAL_NETWORK: 'flat_net1',
-                   api.SEGMENTATION_ID: 1234}
-        self.assertRaises(exc.InvalidInput,
-                          self.driver.validate_provider_segment,
-                          segment)
-
-    def test_reserve_provider_segment(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
-                   api.PHYSICAL_NETWORK: 'flat_net1'}
-        observed = self.driver.reserve_provider_segment(self.session, segment)
-        alloc = self._get_allocation(self.session, observed)
-        self.assertEqual(segment[api.PHYSICAL_NETWORK], alloc.physical_network)
-
-    def test_release_segment(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
-                   api.PHYSICAL_NETWORK: 'flat_net1'}
-        self.driver.reserve_provider_segment(self.session, segment)
-        self.driver.release_segment(self.session, segment)
-        alloc = self._get_allocation(self.session, segment)
-        self.assertIsNone(alloc)
-
-    def test_reserve_provider_segment_already_reserved(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
-                   api.PHYSICAL_NETWORK: 'flat_net1'}
-        self.driver.reserve_provider_segment(self.session, segment)
-        self.assertRaises(exc.FlatNetworkInUse,
-                          self.driver.reserve_provider_segment,
-                          self.session, segment)
-
-    def test_allocate_tenant_segment(self):
-        observed = self.driver.allocate_tenant_segment(self.session)
-        self.assertIsNone(observed)
-
-    def test_get_mtu(self):
-        config.cfg.CONF.set_override('segment_mtu', 1475, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 1400, group='ml2')
-        self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400}
-        self.assertEqual(1450, self.driver.get_mtu('physnet1'))
-
-        config.cfg.CONF.set_override('segment_mtu', 1375, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 1400, group='ml2')
-        self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400}
-        self.assertEqual(1375, self.driver.get_mtu('physnet1'))
-
-        config.cfg.CONF.set_override('segment_mtu', 0, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 1425, group='ml2')
-        self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400}
-        self.assertEqual(1400, self.driver.get_mtu('physnet2'))
-
-        config.cfg.CONF.set_override('segment_mtu', 0, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 0, group='ml2')
-        self.driver.physnet_mtus = {}
-        self.assertEqual(0, self.driver.get_mtu('physnet1'))
-
-
-class FlatTypeDefaultTest(base.BaseTestCase):
-
-    def setUp(self):
-        super(FlatTypeDefaultTest, self).setUp()
-        self.driver = type_flat.FlatTypeDriver()
-        self.driver.physnet_mtus = []
-
-    def test_validate_provider_segment_default(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
-                   api.PHYSICAL_NETWORK: 'other_flat_net'}
-        self.driver.validate_provider_segment(segment)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py b/neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py
deleted file mode 100644 (file)
index fb0ffdf..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.drivers import type_geneve
-from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel
-from neutron.tests.unit.plugins.ml2 import test_rpc
-from neutron.tests.unit import testlib_api
-
-
-TUNNEL_IP_ONE = "10.10.10.77"
-TUNNEL_IP_TWO = "10.10.10.78"
-HOST_ONE = 'fake_host_one1'
-HOST_TWO = 'fake_host_two2'
-
-
-class GeneveTypeTest(base_type_tunnel.TunnelTypeTestMixin,
-                     testlib_api.SqlTestCase):
-    DRIVER_CLASS = type_geneve.GeneveTypeDriver
-    TYPE = p_const.TYPE_GENEVE
-
-    def test_get_endpoints(self):
-        self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE)
-        self.driver.add_endpoint(TUNNEL_IP_TWO, HOST_TWO)
-
-        endpoints = self.driver.get_endpoints()
-        for endpoint in endpoints:
-            if endpoint['ip_address'] == TUNNEL_IP_ONE:
-                self.assertEqual(HOST_ONE, endpoint['host'])
-            elif endpoint['ip_address'] == TUNNEL_IP_TWO:
-                self.assertEqual(HOST_TWO, endpoint['host'])
-
-
-class GeneveTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin,
-                               testlib_api.SqlTestCase):
-    DRIVER_CLASS = type_geneve.GeneveTypeDriver
-
-
-class GeneveTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin,
-                                test_rpc.RpcCallbacksTestCase,
-                                testlib_api.SqlTestCase):
-    DRIVER_CLASS = type_geneve.GeneveTypeDriver
-    TYPE = p_const.TYPE_GENEVE
diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py b/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py
deleted file mode 100644 (file)
index 0471c68..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2 import config
-from neutron.plugins.ml2.drivers import type_gre
-from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel
-from neutron.tests.unit.plugins.ml2 import test_rpc
-from neutron.tests.unit import testlib_api
-
-
-TUNNEL_IP_ONE = "10.10.10.10"
-TUNNEL_IP_TWO = "10.10.10.20"
-HOST_ONE = 'fake_host_one'
-HOST_TWO = 'fake_host_two'
-
-
-def _add_allocation(session, gre_id, allocated=False):
-    allocation = type_gre.GreAllocation(gre_id=gre_id, allocated=allocated)
-    allocation.save(session)
-
-
-def _get_allocation(session, gre_id):
-    return session.query(type_gre.GreAllocation).filter_by(
-        gre_id=gre_id).one()
-
-
-class GreTypeTest(base_type_tunnel.TunnelTypeTestMixin,
-                  testlib_api.SqlTestCase):
-    DRIVER_MODULE = type_gre
-    DRIVER_CLASS = type_gre.GreTypeDriver
-    TYPE = p_const.TYPE_GRE
-
-    def test_get_endpoints(self):
-        self.add_endpoint()
-        self.add_endpoint(
-            base_type_tunnel.TUNNEL_IP_TWO, base_type_tunnel.HOST_TWO)
-
-        endpoints = self.driver.get_endpoints()
-        for endpoint in endpoints:
-            if endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_ONE:
-                self.assertEqual(base_type_tunnel.HOST_ONE, endpoint['host'])
-            elif endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_TWO:
-                self.assertEqual(base_type_tunnel.HOST_TWO, endpoint['host'])
-
-    def test_get_mtu(self):
-        config.cfg.CONF.set_override('segment_mtu', 1500, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 1475, group='ml2')
-        self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400}
-        self.assertEqual(1475 - p_const.GRE_ENCAP_OVERHEAD,
-                         self.driver.get_mtu('physnet1'))
-
-        config.cfg.CONF.set_override('segment_mtu', 1425, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 1475, group='ml2')
-        self.driver.physnet_mtus = {'physnet1': 1400, 'physnet2': 1400}
-        self.assertEqual(1425 - p_const.GRE_ENCAP_OVERHEAD,
-                         self.driver.get_mtu('physnet1'))
-
-        config.cfg.CONF.set_override('segment_mtu', 0, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 1475, group='ml2')
-        self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1425}
-        self.assertEqual(1475 - p_const.GRE_ENCAP_OVERHEAD,
-                         self.driver.get_mtu('physnet2'))
-
-        config.cfg.CONF.set_override('segment_mtu', 0, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 0, group='ml2')
-        self.driver.physnet_mtus = {}
-        self.assertEqual(0, self.driver.get_mtu('physnet1'))
-
-
-class GreTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin,
-                            testlib_api.SqlTestCase):
-    DRIVER_CLASS = type_gre.GreTypeDriver
-
-
-class GreTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin,
-                             test_rpc.RpcCallbacksTestCase,
-                             testlib_api.SqlTestCase):
-    DRIVER_CLASS = type_gre.GreTypeDriver
-    TYPE = p_const.TYPE_GRE
diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_type_local.py b/neutron/tests/unit/plugins/ml2/drivers/test_type_local.py
deleted file mode 100644 (file)
index 441886c..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright (c) 2014 Thales Services SAS
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import exceptions as exc
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2.drivers import type_local
-from neutron.tests import base
-
-
-class LocalTypeTest(base.BaseTestCase):
-
-    def setUp(self):
-        super(LocalTypeTest, self).setUp()
-        self.driver = type_local.LocalTypeDriver()
-        self.session = None
-
-    def test_is_partial_segment(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL}
-        self.assertFalse(self.driver.is_partial_segment(segment))
-
-    def test_validate_provider_segment(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL}
-        self.driver.validate_provider_segment(segment)
-
-    def test_validate_provider_segment_with_unallowed_physical_network(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL,
-                   api.PHYSICAL_NETWORK: 'phys_net'}
-        self.assertRaises(exc.InvalidInput,
-                          self.driver.validate_provider_segment,
-                          segment)
-
-    def test_validate_provider_segment_with_unallowed_segmentation_id(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL,
-                   api.SEGMENTATION_ID: 2}
-        self.assertRaises(exc.InvalidInput,
-                          self.driver.validate_provider_segment,
-                          segment)
-
-    def test_reserve_provider_segment(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL}
-        observed = self.driver.reserve_provider_segment(self.session, segment)
-        self.assertEqual(segment, observed)
-
-    def test_release_provider_segment(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL}
-        observed = self.driver.reserve_provider_segment(self.session, segment)
-        self.driver.release_segment(self.session, observed)
-
-    def test_allocate_tenant_segment(self):
-        expected = {api.NETWORK_TYPE: p_const.TYPE_LOCAL}
-        observed = self.driver.allocate_tenant_segment(self.session)
-        self.assertEqual(expected, observed)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_type_vlan.py b/neutron/tests/unit/plugins/ml2/drivers/test_type_vlan.py
deleted file mode 100644 (file)
index 0444285..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-# Copyright (c) 2014 Thales Services SAS
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from testtools import matchers
-
-from neutron.common import exceptions as exc
-import neutron.db.api as db
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.common import utils as plugin_utils
-from neutron.plugins.ml2 import config
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2.drivers import type_vlan
-from neutron.tests.unit import testlib_api
-
-PROVIDER_NET = 'phys_net1'
-TENANT_NET = 'phys_net2'
-VLAN_MIN = 200
-VLAN_MAX = 209
-NETWORK_VLAN_RANGES = [PROVIDER_NET, "%s:%s:%s" %
-                       (TENANT_NET, VLAN_MIN, VLAN_MAX)]
-UPDATED_VLAN_RANGES = {
-    PROVIDER_NET: [],
-    TENANT_NET: [(VLAN_MIN + 5, VLAN_MAX + 5)],
-}
-
-
-class VlanTypeTest(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(VlanTypeTest, self).setUp()
-        config.cfg.CONF.set_override('network_vlan_ranges',
-                                     NETWORK_VLAN_RANGES,
-                                     group='ml2_type_vlan')
-        self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
-            NETWORK_VLAN_RANGES)
-        self.driver = type_vlan.VlanTypeDriver()
-        self.driver._sync_vlan_allocations()
-        self.session = db.get_session()
-        self.driver.physnet_mtus = []
-
-    def test_parse_network_exception_handling(self):
-        with mock.patch.object(plugin_utils,
-                               'parse_network_vlan_ranges') as parse_ranges:
-            parse_ranges.side_effect = Exception('any exception')
-            self.assertRaises(SystemExit,
-                              self.driver._parse_network_vlan_ranges)
-
-    def _get_allocation(self, session, segment):
-        return session.query(type_vlan.VlanAllocation).filter_by(
-            physical_network=segment[api.PHYSICAL_NETWORK],
-            vlan_id=segment[api.SEGMENTATION_ID]).first()
-
-    def test_partial_segment_is_partial_segment(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN}
-        self.assertTrue(self.driver.is_partial_segment(segment))
-
-    def test_specific_segment_is_not_partial_segment(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                   api.PHYSICAL_NETWORK: PROVIDER_NET,
-                   api.SEGMENTATION_ID: 1}
-        self.assertFalse(self.driver.is_partial_segment(segment))
-
-    def test_validate_provider_segment(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                   api.PHYSICAL_NETWORK: PROVIDER_NET,
-                   api.SEGMENTATION_ID: 1}
-        self.assertIsNone(self.driver.validate_provider_segment(segment))
-
-    def test_validate_provider_segment_without_segmentation_id(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                   api.PHYSICAL_NETWORK: TENANT_NET}
-        self.driver.validate_provider_segment(segment)
-
-    def test_validate_provider_segment_without_physical_network(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN}
-        self.driver.validate_provider_segment(segment)
-
-    def test_validate_provider_segment_with_missing_physical_network(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                   api.SEGMENTATION_ID: 1}
-        self.assertRaises(exc.InvalidInput,
-                          self.driver.validate_provider_segment,
-                          segment)
-
-    def test_validate_provider_segment_with_invalid_physical_network(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                   api.PHYSICAL_NETWORK: 'other_phys_net',
-                   api.SEGMENTATION_ID: 1}
-        self.assertRaises(exc.InvalidInput,
-                          self.driver.validate_provider_segment,
-                          segment)
-
-    def test_validate_provider_segment_with_invalid_segmentation_id(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                   api.PHYSICAL_NETWORK: PROVIDER_NET,
-                   api.SEGMENTATION_ID: 5000}
-        self.assertRaises(exc.InvalidInput,
-                          self.driver.validate_provider_segment,
-                          segment)
-
-    def test_validate_provider_segment_with_invalid_input(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                   api.PHYSICAL_NETWORK: PROVIDER_NET,
-                   api.SEGMENTATION_ID: 1,
-                   'invalid': 1}
-        self.assertRaises(exc.InvalidInput,
-                          self.driver.validate_provider_segment,
-                          segment)
-
-    def test_sync_vlan_allocations(self):
-        def check_in_ranges(network_vlan_ranges):
-            vlan_min, vlan_max = network_vlan_ranges[TENANT_NET][0]
-            segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                       api.PHYSICAL_NETWORK: TENANT_NET}
-
-            segment[api.SEGMENTATION_ID] = vlan_min - 1
-            self.assertIsNone(
-                self._get_allocation(self.session, segment))
-            segment[api.SEGMENTATION_ID] = vlan_max + 1
-            self.assertIsNone(
-                self._get_allocation(self.session, segment))
-
-            segment[api.SEGMENTATION_ID] = vlan_min
-            self.assertFalse(
-                self._get_allocation(self.session, segment).allocated)
-            segment[api.SEGMENTATION_ID] = vlan_max
-            self.assertFalse(
-                self._get_allocation(self.session, segment).allocated)
-
-        check_in_ranges(self.network_vlan_ranges)
-        self.driver.network_vlan_ranges = UPDATED_VLAN_RANGES
-        self.driver._sync_vlan_allocations()
-        check_in_ranges(UPDATED_VLAN_RANGES)
-
-    def test_reserve_provider_segment(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                   api.PHYSICAL_NETWORK: PROVIDER_NET,
-                   api.SEGMENTATION_ID: 101}
-        alloc = self._get_allocation(self.session, segment)
-        self.assertIsNone(alloc)
-        observed = self.driver.reserve_provider_segment(self.session, segment)
-        alloc = self._get_allocation(self.session, observed)
-        self.assertTrue(alloc.allocated)
-
-    def test_reserve_provider_segment_already_allocated(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                   api.PHYSICAL_NETWORK: PROVIDER_NET,
-                   api.SEGMENTATION_ID: 101}
-        observed = self.driver.reserve_provider_segment(self.session, segment)
-        self.assertRaises(exc.VlanIdInUse,
-                          self.driver.reserve_provider_segment,
-                          self.session,
-                          observed)
-
-    def test_reserve_provider_segment_in_tenant_pools(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                   api.PHYSICAL_NETWORK: TENANT_NET,
-                   api.SEGMENTATION_ID: VLAN_MIN}
-        alloc = self._get_allocation(self.session, segment)
-        self.assertFalse(alloc.allocated)
-        observed = self.driver.reserve_provider_segment(self.session, segment)
-        alloc = self._get_allocation(self.session, observed)
-        self.assertTrue(alloc.allocated)
-
-    def test_reserve_provider_segment_without_segmentation_id(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                   api.PHYSICAL_NETWORK: TENANT_NET}
-        observed = self.driver.reserve_provider_segment(self.session, segment)
-        alloc = self._get_allocation(self.session, observed)
-        self.assertTrue(alloc.allocated)
-        vlan_id = observed[api.SEGMENTATION_ID]
-        self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
-        self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
-
-    def test_reserve_provider_segment_without_physical_network(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN}
-        observed = self.driver.reserve_provider_segment(self.session, segment)
-        alloc = self._get_allocation(self.session, observed)
-        self.assertTrue(alloc.allocated)
-        vlan_id = observed[api.SEGMENTATION_ID]
-        self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
-        self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
-        self.assertEqual(TENANT_NET, observed[api.PHYSICAL_NETWORK])
-
-    def test_reserve_provider_segment_all_allocateds(self):
-        for __ in range(VLAN_MIN, VLAN_MAX + 1):
-            self.driver.allocate_tenant_segment(self.session)
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN}
-        self.assertRaises(exc.NoNetworkAvailable,
-                          self.driver.reserve_provider_segment,
-                          self.session,
-                          segment)
-
-    def test_get_mtu(self):
-        config.cfg.CONF.set_override('segment_mtu', 1475, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 1400, group='ml2')
-        self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400}
-        self.assertEqual(1450, self.driver.get_mtu('physnet1'))
-
-        config.cfg.CONF.set_override('segment_mtu', 1375, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 1400, group='ml2')
-        self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400}
-        self.assertEqual(1375, self.driver.get_mtu('physnet1'))
-
-        config.cfg.CONF.set_override('segment_mtu', 0, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 1400, group='ml2')
-        self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400}
-        self.assertEqual(1450, self.driver.get_mtu('physnet1'))
-
-        config.cfg.CONF.set_override('segment_mtu', 0, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 0, group='ml2')
-        self.driver.physnet_mtus = {}
-        self.assertEqual(0, self.driver.get_mtu('physnet1'))
-
-    def test_allocate_tenant_segment(self):
-        for __ in range(VLAN_MIN, VLAN_MAX + 1):
-            segment = self.driver.allocate_tenant_segment(self.session)
-            alloc = self._get_allocation(self.session, segment)
-            self.assertTrue(alloc.allocated)
-            vlan_id = segment[api.SEGMENTATION_ID]
-            self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
-            self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
-            self.assertEqual(TENANT_NET, segment[api.PHYSICAL_NETWORK])
-
-    def test_allocate_tenant_segment_no_available(self):
-        for __ in range(VLAN_MIN, VLAN_MAX + 1):
-            self.driver.allocate_tenant_segment(self.session)
-        segment = self.driver.allocate_tenant_segment(self.session)
-        self.assertIsNone(segment)
-
-    def test_release_segment(self):
-        segment = self.driver.allocate_tenant_segment(self.session)
-        self.driver.release_segment(self.session, segment)
-        alloc = self._get_allocation(self.session, segment)
-        self.assertFalse(alloc.allocated)
-
-    def test_release_segment_unallocated(self):
-        segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
-                   api.PHYSICAL_NETWORK: PROVIDER_NET,
-                   api.SEGMENTATION_ID: 101}
-        with mock.patch.object(type_vlan.LOG, 'warning') as log_warn:
-            self.driver.release_segment(self.session, segment)
-            log_warn.assert_called_once_with(
-                "No vlan_id %(vlan_id)s found on physical network "
-                "%(physical_network)s",
-                {'vlan_id': 101, 'physical_network': PROVIDER_NET})
diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py b/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py
deleted file mode 100644 (file)
index ac27109..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2 import config
-from neutron.plugins.ml2.drivers import type_vxlan
-from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel
-from neutron.tests.unit.plugins.ml2 import test_rpc
-from neutron.tests.unit import testlib_api
-
-
-VXLAN_UDP_PORT_ONE = 9999
-VXLAN_UDP_PORT_TWO = 8888
-
-
-class VxlanTypeTest(base_type_tunnel.TunnelTypeTestMixin,
-                    testlib_api.SqlTestCase):
-    DRIVER_MODULE = type_vxlan
-    DRIVER_CLASS = type_vxlan.VxlanTypeDriver
-    TYPE = p_const.TYPE_VXLAN
-
-    def add_endpoint(self, ip=base_type_tunnel.TUNNEL_IP_ONE,
-                     host=base_type_tunnel.HOST_ONE):
-        if ip == base_type_tunnel.TUNNEL_IP_ONE:
-            port = VXLAN_UDP_PORT_ONE
-        else:
-            port = VXLAN_UDP_PORT_TWO
-        return self.driver.add_endpoint(ip, host, port)
-
-    def test_add_endpoint(self):
-        endpoint = super(VxlanTypeTest, self).test_add_endpoint()
-        self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port)
-
-    def test_get_endpoint_by_host(self):
-        endpoint = super(VxlanTypeTest, self).test_get_endpoint_by_host()
-        self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port)
-
-    def test_get_endpoint_by_ip(self):
-        endpoint = super(VxlanTypeTest, self).test_get_endpoint_by_ip()
-        self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port)
-
-    def test_get_endpoints(self):
-        self.add_endpoint()
-        self.add_endpoint(base_type_tunnel.TUNNEL_IP_TWO,
-                          base_type_tunnel.HOST_TWO)
-
-        endpoints = self.driver.get_endpoints()
-        for endpoint in endpoints:
-            if endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_ONE:
-                self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint['udp_port'])
-                self.assertEqual(base_type_tunnel.HOST_ONE, endpoint['host'])
-            elif endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_TWO:
-                self.assertEqual(VXLAN_UDP_PORT_TWO, endpoint['udp_port'])
-                self.assertEqual(base_type_tunnel.HOST_TWO, endpoint['host'])
-
-    def test_get_mtu(self):
-        config.cfg.CONF.set_override('segment_mtu', 1500, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 1475, group='ml2')
-        self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400}
-        self.assertEqual(1475 - p_const.VXLAN_ENCAP_OVERHEAD,
-                         self.driver.get_mtu('physnet1'))
-
-        config.cfg.CONF.set_override('segment_mtu', 1450, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 1475, group='ml2')
-        self.driver.physnet_mtus = {'physnet1': 1400, 'physnet2': 1425}
-        self.assertEqual(1450 - p_const.VXLAN_ENCAP_OVERHEAD,
-                         self.driver.get_mtu('physnet1'))
-
-        config.cfg.CONF.set_override('segment_mtu', 0, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 1450, group='ml2')
-        self.driver.physnet_mtus = {'physnet1': 1425, 'physnet2': 1400}
-        self.assertEqual(1450 - p_const.VXLAN_ENCAP_OVERHEAD,
-                         self.driver.get_mtu('physnet1'))
-
-        config.cfg.CONF.set_override('segment_mtu', 0, group='ml2')
-        config.cfg.CONF.set_override('path_mtu', 0, group='ml2')
-        self.driver.physnet_mtus = {}
-        self.assertEqual(0, self.driver.get_mtu('physnet1'))
-
-
-class VxlanTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin,
-                              testlib_api.SqlTestCase):
-    DRIVER_CLASS = type_vxlan.VxlanTypeDriver
-
-
-class VxlanTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin,
-                               test_rpc.RpcCallbacksTestCase,
-                               testlib_api.SqlTestCase):
-    DRIVER_CLASS = type_vxlan.VxlanTypeDriver
-    TYPE = p_const.TYPE_VXLAN
diff --git a/neutron/tests/unit/plugins/ml2/extensions/__init__.py b/neutron/tests/unit/plugins/ml2/extensions/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/plugins/ml2/extensions/fake_extension.py b/neutron/tests/unit/plugins/ml2/extensions/fake_extension.py
deleted file mode 100644 (file)
index bc739e4..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron._i18n import _
-from neutron.api import extensions
-from neutron.api.v2 import attributes as attr
-
-
-EXTENDED_ATTRIBUTES_2_0 = {
-    'networks': {
-        'network_extension': {'allow_post': True,
-                              'allow_put': True,
-                              'default': attr.ATTR_NOT_SPECIFIED,
-                              'is_visible': True,
-                              'enforce_policy': True},
-    },
-    'subnets': {
-        'subnet_extension': {'allow_post': True,
-                             'allow_put': True,
-                             'default': attr.ATTR_NOT_SPECIFIED,
-                             'is_visible': True,
-                             'enforce_policy': True},
-    },
-    'ports': {
-        'port_extension': {'allow_post': True,
-                           'allow_put': True,
-                           'default': attr.ATTR_NOT_SPECIFIED,
-                           'is_visible': True,
-                           'enforce_policy': True},
-    },
-}
-
-
-class Fake_extension(extensions.ExtensionDescriptor):
-
-    @classmethod
-    def get_name(cls):
-        return "ML2 fake extension"
-
-    @classmethod
-    def get_alias(cls):
-        return "fake_extension"
-
-    @classmethod
-    def get_description(cls):
-        return _("Adds test attributes to core resources.")
-
-    @classmethod
-    def get_updated(cls):
-        return "2014-07-16T10:00:00-00:00"
-
-    def get_extended_resources(self, version):
-        if version == "2.0":
-            return EXTENDED_ATTRIBUTES_2_0
-        else:
-            return {}
diff --git a/neutron/tests/unit/plugins/ml2/extensions/test_port_security.py b/neutron/tests/unit/plugins/ml2/extensions/test_port_security.py
deleted file mode 100644 (file)
index 06ebd5b..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.extensions import portsecurity as psec
-from neutron.plugins.ml2.extensions import port_security
-from neutron.tests.unit.plugins.ml2 import test_plugin
-
-
-class TestML2ExtensionPortSecurity(test_plugin.Ml2PluginV2TestCase):
-    def test_extend_port_dict_no_port_security(self):
-        """Test _extend_port_security_dict won't crash
-        if port_security item is None
-        """
-        for db_data in ({'port_security': None, 'name': 'net1'}, {}):
-            response_data = {}
-
-            driver = port_security.PortSecurityExtensionDriver()
-            driver._extend_port_security_dict(response_data, db_data)
-
-            self.assertTrue(response_data[psec.PORTSECURITY])
diff --git a/neutron/tests/unit/plugins/ml2/test_agent_scheduler.py b/neutron/tests/unit/plugins/ml2/test_agent_scheduler.py
deleted file mode 100644 (file)
index 174d3c9..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.tests.unit.db import test_agentschedulers_db
-from neutron.tests.unit.plugins.ml2 import test_plugin
-
-
-class Ml2AgentSchedulerTestCase(
-    test_agentschedulers_db.OvsAgentSchedulerTestCase):
-    plugin_str = test_plugin.PLUGIN_NAME
-    l3_plugin = ('neutron.services.l3_router.'
-                 'l3_router_plugin.L3RouterPlugin')
-
-
-class Ml2L3AgentNotifierTestCase(
-    test_agentschedulers_db.OvsL3AgentNotifierTestCase):
-    plugin_str = test_plugin.PLUGIN_NAME
-    l3_plugin = ('neutron.services.l3_router.'
-                 'l3_router_plugin.L3RouterPlugin')
-
-
-class Ml2DhcpAgentNotifierTestCase(
-    test_agentschedulers_db.OvsDhcpAgentNotifierTestCase):
-    plugin_str = test_plugin.PLUGIN_NAME
diff --git a/neutron/tests/unit/plugins/ml2/test_db.py b/neutron/tests/unit/plugins/ml2/test_db.py
deleted file mode 100644 (file)
index 4d4f9d4..0000000
+++ /dev/null
@@ -1,387 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation, all rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-from oslo_utils import uuidutils
-from sqlalchemy.orm import query
-
-from neutron import context
-from neutron.db import db_base_plugin_v2
-from neutron.db import l3_db
-from neutron.db import models_v2
-from neutron.extensions import portbindings
-from neutron.plugins.ml2 import db as ml2_db
-from neutron.plugins.ml2 import driver_api as api
-from neutron.plugins.ml2 import models
-from neutron.tests.unit import testlib_api
-
-
-class Ml2DBTestCase(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(Ml2DBTestCase, self).setUp()
-        self.ctx = context.get_admin_context()
-
-    def _setup_neutron_network(self, network_id):
-        with self.ctx.session.begin(subtransactions=True):
-            self.ctx.session.add(models_v2.Network(id=network_id))
-
-    def _setup_neutron_port(self, network_id, port_id):
-        mac_address = db_base_plugin_v2.NeutronDbPluginV2._generate_mac()
-        with self.ctx.session.begin(subtransactions=True):
-            port = models_v2.Port(id=port_id,
-                                  network_id=network_id,
-                                  mac_address=mac_address,
-                                  admin_state_up=True,
-                                  status='DOWN',
-                                  device_id='',
-                                  device_owner='')
-            self.ctx.session.add(port)
-        return port
-
-    def _setup_neutron_portbinding(self, port_id, vif_type, host):
-        with self.ctx.session.begin(subtransactions=True):
-            self.ctx.session.add(models.PortBinding(port_id=port_id,
-                                                    vif_type=vif_type,
-                                                    host=host))
-
-    def _create_segments(self, segments, is_seg_dynamic=False,
-                         network_id='foo-network-id'):
-        self._setup_neutron_network(network_id)
-        for segment in segments:
-            ml2_db.add_network_segment(
-                self.ctx.session, network_id, segment,
-                is_dynamic=is_seg_dynamic)
-
-        net_segments = ml2_db.get_network_segments(
-                           self.ctx.session, network_id,
-                           filter_dynamic=is_seg_dynamic)
-
-        for segment_index, segment in enumerate(segments):
-            self.assertEqual(segment, net_segments[segment_index])
-
-        return net_segments
-
-    def test_network_segments_for_provider_network(self):
-        segment = {api.NETWORK_TYPE: 'vlan',
-                   api.PHYSICAL_NETWORK: 'physnet1',
-                   api.SEGMENTATION_ID: 1}
-        self._create_segments([segment])
-
-    def test_network_segments_is_dynamic_true(self):
-        segment = {api.NETWORK_TYPE: 'vlan',
-                   api.PHYSICAL_NETWORK: 'physnet1',
-                   api.SEGMENTATION_ID: 1}
-        self._create_segments([segment], is_seg_dynamic=True)
-
-    def test_network_segments_for_multiprovider_network(self):
-        segments = [{api.NETWORK_TYPE: 'vlan',
-                    api.PHYSICAL_NETWORK: 'physnet1',
-                    api.SEGMENTATION_ID: 1},
-                    {api.NETWORK_TYPE: 'vlan',
-                     api.PHYSICAL_NETWORK: 'physnet1',
-                     api.SEGMENTATION_ID: 2}]
-        self._create_segments(segments)
-
-    def test_get_networks_segments(self):
-        segments1 = [{api.NETWORK_TYPE: 'vlan',
-                      api.PHYSICAL_NETWORK: 'physnet1',
-                      api.SEGMENTATION_ID: 1},
-                     {api.NETWORK_TYPE: 'vlan',
-                      api.PHYSICAL_NETWORK: 'physnet1',
-                      api.SEGMENTATION_ID: 2}]
-        segments2 = [{api.NETWORK_TYPE: 'vlan',
-                      api.PHYSICAL_NETWORK: 'physnet1',
-                      api.SEGMENTATION_ID: 3},
-                     {api.NETWORK_TYPE: 'vlan',
-                      api.PHYSICAL_NETWORK: 'physnet1',
-                      api.SEGMENTATION_ID: 4}]
-        net1segs = self._create_segments(segments1, network_id='net1')
-        net2segs = self._create_segments(segments2, network_id='net2')
-        segs = ml2_db.get_networks_segments(self.ctx.session, ['net1', 'net2'])
-        self.assertEqual(net1segs, segs['net1'])
-        self.assertEqual(net2segs, segs['net2'])
-
-    def test_get_networks_segments_no_segments(self):
-        self._create_segments([], network_id='net1')
-        self._create_segments([], network_id='net2')
-        segs = ml2_db.get_networks_segments(self.ctx.session, ['net1', 'net2'])
-        self.assertEqual([], segs['net1'])
-        self.assertEqual([], segs['net2'])
-
-    def test_get_segment_by_id(self):
-        segment = {api.NETWORK_TYPE: 'vlan',
-                   api.PHYSICAL_NETWORK: 'physnet1',
-                   api.SEGMENTATION_ID: 1}
-
-        net_segment = self._create_segments([segment])[0]
-        segment_uuid = net_segment[api.ID]
-
-        net_segment = ml2_db.get_segment_by_id(self.ctx.session, segment_uuid)
-        self.assertEqual(segment, net_segment)
-
-    def test_get_segment_by_id_result_not_found(self):
-        segment_uuid = uuidutils.generate_uuid()
-        net_segment = ml2_db.get_segment_by_id(self.ctx.session, segment_uuid)
-        self.assertIsNone(net_segment)
-
-    def test_delete_network_segment(self):
-        segment = {api.NETWORK_TYPE: 'vlan',
-                   api.PHYSICAL_NETWORK: 'physnet1',
-                   api.SEGMENTATION_ID: 1}
-
-        net_segment = self._create_segments([segment])[0]
-        segment_uuid = net_segment[api.ID]
-
-        ml2_db.delete_network_segment(self.ctx.session, segment_uuid)
-        # Get segment and verify its empty
-        net_segment = ml2_db.get_segment_by_id(self.ctx.session, segment_uuid)
-        self.assertIsNone(net_segment)
-
-    def test_add_port_binding(self):
-        network_id = 'foo-network-id'
-        port_id = 'foo-port-id'
-        self._setup_neutron_network(network_id)
-        self._setup_neutron_port(network_id, port_id)
-
-        port = ml2_db.add_port_binding(self.ctx.session, port_id)
-        self.assertEqual(port_id, port.port_id)
-        self.assertEqual(portbindings.VIF_TYPE_UNBOUND, port.vif_type)
-
-    def test_get_port_binding_host(self):
-        network_id = 'foo-network-id'
-        port_id = 'foo-port-id'
-        host = 'fake_host'
-        vif_type = portbindings.VIF_TYPE_UNBOUND
-        self._setup_neutron_network(network_id)
-        self._setup_neutron_port(network_id, port_id)
-        self._setup_neutron_portbinding(port_id, vif_type, host)
-
-        port_host = ml2_db.get_port_binding_host(self.ctx.session, port_id)
-        self.assertEqual(host, port_host)
-
-    def test_get_port_binding_host_multiple_results_found(self):
-        network_id = 'foo-network-id'
-        port_id = 'foo-port-id'
-        port_id_one = 'foo-port-id-one'
-        port_id_two = 'foo-port-id-two'
-        host = 'fake_host'
-        vif_type = portbindings.VIF_TYPE_UNBOUND
-        self._setup_neutron_network(network_id)
-        self._setup_neutron_port(network_id, port_id_one)
-        self._setup_neutron_portbinding(port_id_one, vif_type, host)
-        self._setup_neutron_port(network_id, port_id_two)
-        self._setup_neutron_portbinding(port_id_two, vif_type, host)
-
-        port_host = ml2_db.get_port_binding_host(self.ctx.session, port_id)
-        self.assertIsNone(port_host)
-
-    def test_get_port_binding_host_result_not_found(self):
-        port_id = uuidutils.generate_uuid()
-
-        port_host = ml2_db.get_port_binding_host(self.ctx.session, port_id)
-        self.assertIsNone(port_host)
-
-    def test_get_port(self):
-        network_id = 'foo-network-id'
-        port_id = 'foo-port-id'
-        self._setup_neutron_network(network_id)
-        self._setup_neutron_port(network_id, port_id)
-
-        port = ml2_db.get_port(self.ctx.session, port_id)
-        self.assertEqual(port_id, port.id)
-
-    def test_get_port_multiple_results_found(self):
-        network_id = 'foo-network-id'
-        port_id = 'foo-port-id'
-        port_id_one = 'foo-port-id-one'
-        port_id_two = 'foo-port-id-two'
-        self._setup_neutron_network(network_id)
-        self._setup_neutron_port(network_id, port_id_one)
-        self._setup_neutron_port(network_id, port_id_two)
-
-        port = ml2_db.get_port(self.ctx.session, port_id)
-        self.assertIsNone(port)
-
-    def test_get_port_result_not_found(self):
-        port_id = uuidutils.generate_uuid()
-        port = ml2_db.get_port(self.ctx.session, port_id)
-        self.assertIsNone(port)
-
-    def test_get_port_from_device_mac(self):
-        network_id = 'foo-network-id'
-        port_id = 'foo-port-id'
-        self._setup_neutron_network(network_id)
-        port = self._setup_neutron_port(network_id, port_id)
-
-        observed_port = ml2_db.get_port_from_device_mac(self.ctx,
-                                                        port['mac_address'])
-        self.assertEqual(port_id, observed_port.id)
-
-    def test_get_locked_port_and_binding(self):
-        network_id = 'foo-network-id'
-        port_id = 'foo-port-id'
-        host = 'fake_host'
-        vif_type = portbindings.VIF_TYPE_UNBOUND
-        self._setup_neutron_network(network_id)
-        self._setup_neutron_port(network_id, port_id)
-        self._setup_neutron_portbinding(port_id, vif_type, host)
-
-        port, binding = ml2_db.get_locked_port_and_binding(self.ctx.session,
-                                                           port_id)
-        self.assertEqual(port_id, port.id)
-        self.assertEqual(port_id, binding.port_id)
-
-    def test_get_locked_port_and_binding_result_not_found(self):
-        port_id = uuidutils.generate_uuid()
-
-        port, binding = ml2_db.get_locked_port_and_binding(self.ctx.session,
-                                                           port_id)
-        self.assertIsNone(port)
-        self.assertIsNone(binding)
-
-
-class Ml2DvrDBTestCase(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(Ml2DvrDBTestCase, self).setUp()
-        self.ctx = context.get_admin_context()
-
-    def _setup_neutron_network(self, network_id, port_ids):
-        with self.ctx.session.begin(subtransactions=True):
-            self.ctx.session.add(models_v2.Network(id=network_id))
-            ports = []
-            for port_id in port_ids:
-                mac_address = (db_base_plugin_v2.NeutronDbPluginV2.
-                               _generate_mac())
-                port = models_v2.Port(id=port_id,
-                                      network_id=network_id,
-                                      mac_address=mac_address,
-                                      admin_state_up=True,
-                                      status='ACTIVE',
-                                      device_id='',
-                                      device_owner='')
-                self.ctx.session.add(port)
-                ports.append(port)
-            return ports
-
-    def _setup_neutron_router(self):
-        with self.ctx.session.begin(subtransactions=True):
-            router = l3_db.Router()
-            self.ctx.session.add(router)
-            return router
-
-    def _setup_dvr_binding(self, network_id, port_id, router_id, host_id):
-        with self.ctx.session.begin(subtransactions=True):
-            record = models.DVRPortBinding(
-                port_id=port_id,
-                host=host_id,
-                router_id=router_id,
-                vif_type=portbindings.VIF_TYPE_UNBOUND,
-                vnic_type=portbindings.VNIC_NORMAL,
-                status='DOWN')
-            self.ctx.session.add(record)
-            return record
-
-    def test_ensure_dvr_port_binding_deals_with_db_duplicate(self):
-        network_id = 'foo_network_id'
-        port_id = 'foo_port_id'
-        router_id = 'foo_router_id'
-        host_id = 'foo_host_id'
-        self._setup_neutron_network(network_id, [port_id])
-        self._setup_dvr_binding(network_id, port_id, router_id, host_id)
-        with mock.patch.object(query.Query, 'first') as query_first:
-            query_first.return_value = []
-            with mock.patch.object(ml2_db.LOG, 'debug') as log_trace:
-                binding = ml2_db.ensure_dvr_port_binding(
-                    self.ctx.session, port_id, host_id, router_id)
-        self.assertTrue(query_first.called)
-        self.assertTrue(log_trace.called)
-        self.assertEqual(port_id, binding.port_id)
-
-    def test_ensure_dvr_port_binding(self):
-        network_id = 'foo_network_id'
-        port_id = 'foo_port_id'
-        self._setup_neutron_network(network_id, [port_id])
-        router = self._setup_neutron_router()
-        ml2_db.ensure_dvr_port_binding(
-            self.ctx.session, port_id, 'foo_host', router.id)
-        expected = (self.ctx.session.query(models.DVRPortBinding).
-                    filter_by(port_id=port_id).one())
-        self.assertEqual(expected.port_id, port_id)
-
-    def test_ensure_dvr_port_binding_multiple_bindings(self):
-        network_id = 'foo_network_id'
-        port_id = 'foo_port_id'
-        self._setup_neutron_network(network_id, [port_id])
-        router = self._setup_neutron_router()
-        ml2_db.ensure_dvr_port_binding(
-            self.ctx.session, port_id, 'foo_host_1', router.id)
-        ml2_db.ensure_dvr_port_binding(
-            self.ctx.session, port_id, 'foo_host_2', router.id)
-        bindings = (self.ctx.session.query(models.DVRPortBinding).
-                    filter_by(port_id=port_id).all())
-        self.assertEqual(2, len(bindings))
-
-    def test_delete_dvr_port_binding(self):
-        network_id = 'foo_network_id'
-        port_id = 'foo_port_id'
-        self._setup_neutron_network(network_id, [port_id])
-        router = self._setup_neutron_router()
-        binding = self._setup_dvr_binding(
-            network_id, port_id, router.id, 'foo_host_id')
-        ml2_db.delete_dvr_port_binding(
-            self.ctx.session, port_id, 'foo_host_id')
-        count = (self.ctx.session.query(models.DVRPortBinding).
-            filter_by(port_id=binding.port_id).count())
-        self.assertFalse(count)
-
-    def test_delete_dvr_port_binding_not_found(self):
-        ml2_db.delete_dvr_port_binding(
-            self.ctx.session, 'foo_port_id', 'foo_host')
-
-    def test_delete_dvr_port_binding_if_stale(self):
-        network_id = 'foo_network_id'
-        port_id = 'foo_port_id'
-        self._setup_neutron_network(network_id, [port_id])
-        binding = self._setup_dvr_binding(
-            network_id, port_id, None, 'foo_host_id')
-
-        ml2_db.delete_dvr_port_binding_if_stale(self.ctx.session, binding)
-        count = (self.ctx.session.query(models.DVRPortBinding).
-            filter_by(port_id=binding.port_id).count())
-        self.assertFalse(count)
-
-    def test_get_dvr_port_binding_by_host_not_found(self):
-        port = ml2_db.get_dvr_port_binding_by_host(
-            self.ctx.session, 'foo_port_id', 'foo_host_id')
-        self.assertIsNone(port)
-
-    def test_get_dvr_port_bindings_not_found(self):
-        port = ml2_db.get_dvr_port_bindings(self.ctx.session, 'foo_port_id')
-        self.assertFalse(len(port))
-
-    def test_get_dvr_port_bindings(self):
-        network_id = 'foo_network_id'
-        port_id_1 = 'foo_port_id_1'
-        port_id_2 = 'foo_port_id_2'
-        self._setup_neutron_network(network_id, [port_id_1, port_id_2])
-        router = self._setup_neutron_router()
-        self._setup_dvr_binding(
-            network_id, port_id_1, router.id, 'foo_host_id_1')
-        self._setup_dvr_binding(
-            network_id, port_id_1, router.id, 'foo_host_id_2')
-        ports = ml2_db.get_dvr_port_bindings(self.ctx.session, 'foo_port_id')
-        self.assertEqual(2, len(ports))
diff --git a/neutron/tests/unit/plugins/ml2/test_driver_context.py b/neutron/tests/unit/plugins/ml2/test_driver_context.py
deleted file mode 100644 (file)
index e30349c..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.common import constants
-from neutron.extensions import portbindings
-from neutron.plugins.ml2 import driver_context
-from neutron.tests import base
-
-
-class TestPortContext(base.BaseTestCase):
-
-    # REVISIT(rkukura): These was originally for DvrPortContext tests,
-    # but DvrPortContext functionality has been folded into the
-    # regular PortContext class. Tests for non-DVR-specific
-    # functionality are needed here as well.
-
-    def test_host(self):
-        plugin = mock.Mock()
-        plugin_context = mock.Mock()
-        network = mock.MagicMock()
-        binding = mock.Mock()
-
-        port = {'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE}
-        binding.host = 'foohost'
-
-        with mock.patch.object(driver_context.db, 'get_network_segments'):
-            ctx = driver_context.PortContext(plugin,
-                                             plugin_context,
-                                             port,
-                                             network,
-                                             binding,
-                                             None)
-        self.assertEqual('foohost', ctx.host)
-
-    def test_host_super(self):
-        plugin = mock.Mock()
-        plugin_context = mock.Mock()
-        network = mock.MagicMock()
-        binding = mock.Mock()
-
-        port = {'device_owner': 'compute',
-                portbindings.HOST_ID: 'host'}
-        binding.host = 'foohost'
-
-        with mock.patch.object(driver_context.db, 'get_network_segments'):
-            ctx = driver_context.PortContext(plugin,
-                                             plugin_context,
-                                             port,
-                                             network,
-                                             binding,
-                                             None)
-        self.assertEqual('host', ctx.host)
-
-    def test_status(self):
-        plugin = mock.Mock()
-        plugin_context = mock.Mock()
-        network = mock.MagicMock()
-        binding = mock.Mock()
-
-        port = {'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE}
-        binding.status = 'foostatus'
-
-        with mock.patch.object(driver_context.db, 'get_network_segments'):
-            ctx = driver_context.PortContext(plugin,
-                                             plugin_context,
-                                             port,
-                                             network,
-                                             binding,
-                                             None)
-        self.assertEqual('foostatus', ctx.status)
-
-    def test_status_super(self):
-        plugin = mock.Mock()
-        plugin_context = mock.Mock()
-        network = mock.MagicMock()
-        binding = mock.Mock()
-
-        port = {'device_owner': 'compute',
-                'status': 'status'}
-        binding.status = 'foostatus'
-
-        with mock.patch.object(driver_context.db, 'get_network_segments'):
-            ctx = driver_context.PortContext(plugin,
-                                             plugin_context,
-                                             port,
-                                             network,
-                                             binding,
-                                             None)
-        self.assertEqual('status', ctx.status)
diff --git a/neutron/tests/unit/plugins/ml2/test_ext_portsecurity.py b/neutron/tests/unit/plugins/ml2/test_ext_portsecurity.py
deleted file mode 100644 (file)
index a9c76bc..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron import context
-from neutron.extensions import portsecurity as psec
-from neutron import manager
-from neutron.plugins.ml2 import config
-from neutron.tests.unit.extensions import test_portsecurity as test_psec
-from neutron.tests.unit.plugins.ml2 import test_plugin
-
-
-class PSExtDriverTestCase(test_plugin.Ml2PluginV2TestCase,
-                          test_psec.TestPortSecurity):
-    _extension_drivers = ['port_security']
-
-    def setUp(self):
-        config.cfg.CONF.set_override('extension_drivers',
-                                     self._extension_drivers,
-                                     group='ml2')
-        super(PSExtDriverTestCase, self).setUp()
-
-    def test_create_net_port_security_default(self):
-        _core_plugin = manager.NeutronManager.get_plugin()
-        admin_ctx = context.get_admin_context()
-        _default_value = (psec.EXTENDED_ATTRIBUTES_2_0['networks']
-                          [psec.PORTSECURITY]['default'])
-        args = {'network':
-                {'name': 'test',
-                 'tenant_id': '',
-                 'shared': False,
-                 'admin_state_up': True,
-                 'status': 'ACTIVE'}}
-        try:
-            network = _core_plugin.create_network(admin_ctx, args)
-            _value = network[psec.PORTSECURITY]
-        finally:
-            if network:
-                _core_plugin.delete_network(admin_ctx, network['id'])
-        self.assertEqual(_default_value, _value)
-
-    def test_create_port_with_secgroup_none_and_port_security_false(self):
-        if self._skip_security_group:
-            self.skipTest("Plugin does not support security groups")
-        with self.network() as net:
-            with self.subnet(network=net):
-                res = self._create_port('json', net['network']['id'],
-                                        arg_list=('security_groups',
-                                                  'port_security_enabled'),
-                                        security_groups=[],
-                                        port_security_enabled=False)
-                self.assertEqual(res.status_int, 201)
-                port = self.deserialize('json', res)
-                self.assertFalse(port['port'][psec.PORTSECURITY])
-                self.assertEqual([], port['port']['security_groups'])
diff --git a/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py b/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py
deleted file mode 100644 (file)
index 2297007..0000000
+++ /dev/null
@@ -1,303 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import uuid
-
-from neutron import context
-from neutron import manager
-from neutron.plugins.ml2 import config
-from neutron.tests.unit.plugins.ml2.drivers import ext_test
-from neutron.tests.unit.plugins.ml2 import test_plugin
-
-
-class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
-
-    _extension_drivers = ['test']
-
-    def setUp(self):
-        config.cfg.CONF.set_override('extension_drivers',
-                                     self._extension_drivers,
-                                     group='ml2')
-        super(ExtensionDriverTestCase, self).setUp()
-        self._plugin = manager.NeutronManager.get_plugin()
-        self._ctxt = context.get_admin_context()
-
-    def _verify_network_create(self, code, exc_reason):
-        tenant_id = str(uuid.uuid4())
-        data = {'network': {'name': 'net1',
-                            'tenant_id': tenant_id}}
-        req = self.new_create_request('networks', data)
-        res = req.get_response(self.api)
-        self.assertEqual(code, res.status_int)
-
-        network = self.deserialize(self.fmt, res)
-        if exc_reason:
-            self.assertEqual(exc_reason,
-                             network['NeutronError']['type'])
-
-        return (network, tenant_id)
-
-    def _verify_network_update(self, network, code, exc_reason):
-        net_id = network['network']['id']
-        new_name = 'a_brand_new_name'
-        data = {'network': {'name': new_name}}
-        req = self.new_update_request('networks', data, net_id)
-        res = req.get_response(self.api)
-        self.assertEqual(code, res.status_int)
-        error = self.deserialize(self.fmt, res)
-        self.assertEqual(exc_reason,
-                         error['NeutronError']['type'])
-
-    def test_faulty_process_create(self):
-        with mock.patch.object(ext_test.TestExtensionDriver,
-                               'process_create_network',
-                               side_effect=TypeError):
-            net, tenant_id = self._verify_network_create(500,
-                                                    'HTTPInternalServerError')
-            # Verify the operation is rolled back
-            query_params = "tenant_id=%s" % tenant_id
-            nets = self._list('networks', query_params=query_params)
-            self.assertFalse(nets['networks'])
-
-    def test_faulty_process_update(self):
-        with mock.patch.object(ext_test.TestExtensionDriver,
-                               'process_update_network',
-                               side_effect=TypeError):
-            network, tid = self._verify_network_create(201, None)
-            self._verify_network_update(network, 500,
-                                        'HTTPInternalServerError')
-
-    def test_faulty_extend_dict(self):
-        with mock.patch.object(ext_test.TestExtensionDriver,
-                               'extend_network_dict',
-                               side_effect=[None, TypeError]):
-            network, tid = self._verify_network_create(201, None)
-            self._verify_network_update(network, 400, 'ExtensionDriverError')
-
-    def test_network_attr(self):
-        with self.network() as network:
-            # Test create network
-            ent = network['network'].get('network_extension')
-            self.assertIsNotNone(ent)
-
-            # Test list networks
-            res = self._list('networks')
-            val = res['networks'][0].get('network_extension')
-            self.assertEqual('Test_Network_Extension_extend', val)
-
-            # Test network update
-            data = {'network':
-                    {'network_extension': 'Test_Network_Extension_Update'}}
-            res = self._update('networks', network['network']['id'], data)
-            val = res['network'].get('network_extension')
-            self.assertEqual('Test_Network_Extension_Update_update', val)
-
-    def test_subnet_attr(self):
-        with self.subnet() as subnet:
-            # Test create subnet
-            ent = subnet['subnet'].get('subnet_extension')
-            self.assertIsNotNone(ent)
-
-            # Test list subnets
-            res = self._list('subnets')
-            val = res['subnets'][0].get('subnet_extension')
-            self.assertEqual('Test_Subnet_Extension_extend', val)
-
-            # Test subnet update
-            data = {'subnet':
-                    {'subnet_extension': 'Test_Subnet_Extension_Update'}}
-            res = self._update('subnets', subnet['subnet']['id'], data)
-            val = res['subnet'].get('subnet_extension')
-            self.assertEqual('Test_Subnet_Extension_Update_update', val)
-
-    def test_port_attr(self):
-        with self.port() as port:
-            # Test create port
-            ent = port['port'].get('port_extension')
-            self.assertIsNotNone(ent)
-
-            # Test list ports
-            res = self._list('ports')
-            val = res['ports'][0].get('port_extension')
-            self.assertEqual('Test_Port_Extension_extend', val)
-
-            # Test port update
-            data = {'port': {'port_extension': 'Test_Port_Extension_Update'}}
-            res = self._update('ports', port['port']['id'], data)
-            val = res['port'].get('port_extension')
-            self.assertEqual('Test_Port_Extension_Update_update', val)
-
-    def test_extend_network_dict(self):
-        with mock.patch.object(ext_test.TestExtensionDriver,
-                               'process_update_network') as ext_update_net,\
-                mock.patch.object(ext_test.TestExtensionDriver,
-                                  'extend_network_dict') as ext_net_dict,\
-                self.network() as network:
-            net_id = network['network']['id']
-            net_data = {'network': {'id': net_id}}
-            self._plugin.update_network(self._ctxt, net_id, net_data)
-            self.assertTrue(ext_update_net.called)
-            self.assertTrue(ext_net_dict.called)
-
-    def test_extend_subnet_dict(self):
-        with mock.patch.object(ext_test.TestExtensionDriver,
-                               'process_update_subnet') as ext_update_subnet,\
-                mock.patch.object(ext_test.TestExtensionDriver,
-                                  'extend_subnet_dict') as ext_subnet_dict,\
-                self.subnet() as subnet:
-            subnet_id = subnet['subnet']['id']
-            subnet_data = {'subnet': {'id': subnet_id}}
-            self._plugin.update_subnet(self._ctxt, subnet_id, subnet_data)
-            self.assertTrue(ext_update_subnet.called)
-            self.assertTrue(ext_subnet_dict.called)
-
-    def test_extend_port_dict(self):
-        with mock.patch.object(ext_test.TestExtensionDriver,
-                               'process_update_port') as ext_update_port,\
-                mock.patch.object(ext_test.TestExtensionDriver,
-                                  'extend_port_dict') as ext_port_dict,\
-                self.port() as port:
-            port_id = port['port']['id']
-            port_data = {'port': {'id': port_id}}
-            self._plugin.update_port(self._ctxt, port_id, port_data)
-            self.assertTrue(ext_update_port.called)
-            self.assertTrue(ext_port_dict.called)
-
-
-class DBExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
-    _extension_drivers = ['testdb']
-
-    def setUp(self):
-        config.cfg.CONF.set_override('extension_drivers',
-                                     self._extension_drivers,
-                                     group='ml2')
-        super(DBExtensionDriverTestCase, self).setUp()
-        self._plugin = manager.NeutronManager.get_plugin()
-        self._ctxt = context.get_admin_context()
-
-    def test_network_attr(self):
-        with self.network() as network:
-            # Test create with default value.
-            net_id = network['network']['id']
-            val = network['network']['network_extension']
-            self.assertEqual("", val)
-            res = self._show('networks', net_id)
-            val = res['network']['network_extension']
-            self.assertEqual("", val)
-
-            # Test list.
-            res = self._list('networks')
-            val = res['networks'][0]['network_extension']
-            self.assertEqual("", val)
-
-        # Test create with explicit value.
-        res = self._create_network(self.fmt,
-                                   'test-network', True,
-                                   arg_list=('network_extension', ),
-                                   network_extension="abc")
-        network = self.deserialize(self.fmt, res)
-        net_id = network['network']['id']
-        val = network['network']['network_extension']
-        self.assertEqual("abc", val)
-        res = self._show('networks', net_id)
-        val = res['network']['network_extension']
-        self.assertEqual("abc", val)
-
-        # Test update.
-        data = {'network': {'network_extension': "def"}}
-        res = self._update('networks', net_id, data)
-        val = res['network']['network_extension']
-        self.assertEqual("def", val)
-        res = self._show('networks', net_id)
-        val = res['network']['network_extension']
-        self.assertEqual("def", val)
-
-    def test_subnet_attr(self):
-        with self.subnet() as subnet:
-            # Test create with default value.
-            net_id = subnet['subnet']['id']
-            val = subnet['subnet']['subnet_extension']
-            self.assertEqual("", val)
-            res = self._show('subnets', net_id)
-            val = res['subnet']['subnet_extension']
-            self.assertEqual("", val)
-
-            # Test list.
-            res = self._list('subnets')
-            val = res['subnets'][0]['subnet_extension']
-            self.assertEqual("", val)
-
-        with self.network() as network:
-            # Test create with explicit value.
-            data = {'subnet':
-                    {'network_id': network['network']['id'],
-                     'cidr': '10.1.0.0/24',
-                     'ip_version': '4',
-                     'tenant_id': self._tenant_id,
-                     'subnet_extension': 'abc'}}
-            req = self.new_create_request('subnets', data, self.fmt)
-            res = req.get_response(self.api)
-            subnet = self.deserialize(self.fmt, res)
-            subnet_id = subnet['subnet']['id']
-            val = subnet['subnet']['subnet_extension']
-            self.assertEqual("abc", val)
-            res = self._show('subnets', subnet_id)
-            val = res['subnet']['subnet_extension']
-            self.assertEqual("abc", val)
-
-            # Test update.
-            data = {'subnet': {'subnet_extension': "def"}}
-            res = self._update('subnets', subnet_id, data)
-            val = res['subnet']['subnet_extension']
-            self.assertEqual("def", val)
-            res = self._show('subnets', subnet_id)
-            val = res['subnet']['subnet_extension']
-            self.assertEqual("def", val)
-
-    def test_port_attr(self):
-        with self.port() as port:
-            # Test create with default value.
-            net_id = port['port']['id']
-            val = port['port']['port_extension']
-            self.assertEqual("", val)
-            res = self._show('ports', net_id)
-            val = res['port']['port_extension']
-            self.assertEqual("", val)
-
-            # Test list.
-            res = self._list('ports')
-            val = res['ports'][0]['port_extension']
-            self.assertEqual("", val)
-
-        with self.network() as network:
-            # Test create with explicit value.
-            res = self._create_port(self.fmt,
-                                    network['network']['id'],
-                                    arg_list=('port_extension', ),
-                                    port_extension="abc")
-            port = self.deserialize(self.fmt, res)
-            port_id = port['port']['id']
-            val = port['port']['port_extension']
-            self.assertEqual("abc", val)
-            res = self._show('ports', port_id)
-            val = res['port']['port_extension']
-            self.assertEqual("abc", val)
-
-            # Test update.
-            data = {'port': {'port_extension': "def"}}
-            res = self._update('ports', port_id, data)
-            val = res['port']['port_extension']
-            self.assertEqual("def", val)
-            res = self._show('ports', port_id)
-            val = res['port']['port_extension']
-            self.assertEqual("def", val)
diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py
deleted file mode 100644 (file)
index 4db89f8..0000000
+++ /dev/null
@@ -1,1879 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import functools
-
-import fixtures
-import mock
-import six
-import testtools
-import uuid
-import webob
-
-from oslo_db import exception as db_exc
-from oslo_utils import uuidutils
-from sqlalchemy.orm import exc as sqla_exc
-
-from neutron._i18n import _
-from neutron.callbacks import registry
-from neutron.common import constants
-from neutron.common import exceptions as exc
-from neutron.common import utils
-from neutron import context
-from neutron.db import agents_db
-from neutron.db import api as db_api
-from neutron.db import db_base_plugin_v2 as base_plugin
-from neutron.db import l3_db
-from neutron.db import models_v2
-from neutron.extensions import availability_zone as az_ext
-from neutron.extensions import external_net
-from neutron.extensions import multiprovidernet as mpnet
-from neutron.extensions import portbindings
-from neutron.extensions import providernet as pnet
-from neutron import manager
-from neutron.plugins.common import constants as p_const
-from neutron.plugins.ml2.common import exceptions as ml2_exc
-from neutron.plugins.ml2 import config
-from neutron.plugins.ml2 import db as ml2_db
-from neutron.plugins.ml2 import driver_api
-from neutron.plugins.ml2 import driver_context
-from neutron.plugins.ml2.drivers import type_vlan
-from neutron.plugins.ml2 import models
-from neutron.plugins.ml2 import plugin as ml2_plugin
-from neutron.services.qos import qos_consts
-from neutron.tests import base
-from neutron.tests.unit import _test_extension_portbindings as test_bindings
-from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc
-from neutron.tests.unit.db import test_allowedaddresspairs_db as test_pair
-from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
-from neutron.tests.unit.db import test_ipam_pluggable_backend as test_ipam
-from neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts
-from neutron.tests.unit.plugins.ml2.drivers import mechanism_logger as \
-     mech_logger
-from neutron.tests.unit.plugins.ml2.drivers import mechanism_test as mech_test
-
-
-config.cfg.CONF.import_opt('network_vlan_ranges',
-                           'neutron.plugins.ml2.drivers.type_vlan',
-                           group='ml2_type_vlan')
-
-
-PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-
-DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
-HOST = 'fake_host'
-
-
-# TODO(marun) - Move to somewhere common for reuse
-class PluginConfFixture(fixtures.Fixture):
-    """Plugin configuration shared across the unit and functional tests."""
-
-    def __init__(self, plugin_name, parent_setup=None):
-        super(PluginConfFixture, self).__init__()
-        self.plugin_name = plugin_name
-        self.parent_setup = parent_setup
-
-    def _setUp(self):
-        if self.parent_setup:
-            self.parent_setup()
-
-
-class Ml2ConfFixture(PluginConfFixture):
-
-    def __init__(self, parent_setup=None):
-        super(Ml2ConfFixture, self).__init__(PLUGIN_NAME, parent_setup)
-
-
-class Ml2PluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
-
-    _mechanism_drivers = ['logger', 'test']
-    l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
-                 'TestL3NatServicePlugin')
-
-    def setup_parent(self):
-        """Perform parent setup with the common plugin configuration class."""
-        service_plugins = {'l3_plugin_name': self.l3_plugin}
-        # Ensure that the parent setup can be called without arguments
-        # by the common configuration setUp.
-        parent_setup = functools.partial(
-            super(Ml2PluginV2TestCase, self).setUp,
-            plugin=PLUGIN_NAME,
-            service_plugins=service_plugins,
-        )
-        self.useFixture(Ml2ConfFixture(parent_setup))
-        self.port_create_status = 'DOWN'
-
-    def setUp(self):
-        # Enable the test mechanism driver to ensure that
-        # we can successfully call through to all mechanism
-        # driver apis.
-        config.cfg.CONF.set_override('mechanism_drivers',
-                                     self._mechanism_drivers,
-                                     group='ml2')
-        self.physnet = 'physnet1'
-        self.vlan_range = '1:100'
-        self.vlan_range2 = '200:300'
-        self.physnet2 = 'physnet2'
-        self.phys_vrange = ':'.join([self.physnet, self.vlan_range])
-        self.phys2_vrange = ':'.join([self.physnet2, self.vlan_range2])
-        config.cfg.CONF.set_override('network_vlan_ranges',
-                                     [self.phys_vrange, self.phys2_vrange],
-                                     group='ml2_type_vlan')
-        self.setup_parent()
-        self.driver = ml2_plugin.Ml2Plugin()
-        self.context = context.get_admin_context()
-
-
-class TestMl2BulkToggleWithoutBulkless(Ml2PluginV2TestCase):
-
-    _mechanism_drivers = ['logger', 'test']
-
-    def test_bulk_enabled_with_bulk_drivers(self):
-        self.assertFalse(self._skip_native_bulk)
-
-
-class TestMl2SupportedQosRuleTypes(Ml2PluginV2TestCase):
-
-    def test_empty_driver_list(self, *mocks):
-        mech_drivers_mock = mock.PropertyMock(return_value=[])
-        with mock.patch.object(self.driver.mechanism_manager,
-                               'ordered_mech_drivers',
-                               new_callable=mech_drivers_mock):
-            self.assertEqual(
-                [], self.driver.mechanism_manager.supported_qos_rule_types)
-
-    def test_no_rule_types_in_common(self):
-        self.assertEqual(
-            [], self.driver.mechanism_manager.supported_qos_rule_types)
-
-    @mock.patch.object(mech_logger.LoggerMechanismDriver,
-                       'supported_qos_rule_types',
-                       new_callable=mock.PropertyMock,
-                       create=True)
-    @mock.patch.object(mech_test.TestMechanismDriver,
-                       'supported_qos_rule_types',
-                       new_callable=mock.PropertyMock,
-                       create=True)
-    def test_rule_type_in_common(self, *mocks):
-        # make sure both plugins have the same supported qos rule types
-        for mock_ in mocks:
-            mock_.return_value = qos_consts.VALID_RULE_TYPES
-        self.assertEqual(
-            qos_consts.VALID_RULE_TYPES,
-            self.driver.mechanism_manager.supported_qos_rule_types)
-
-    @mock.patch.object(mech_test.TestMechanismDriver,
-                       'supported_qos_rule_types',
-                       new_callable=mock.PropertyMock,
-                       return_value=qos_consts.VALID_RULE_TYPES,
-                       create=True)
-    @mock.patch.object(mech_logger.LoggerMechanismDriver,
-                       '_supports_port_binding',
-                       new_callable=mock.PropertyMock,
-                       return_value=False)
-    def test_rule_types_with_driver_that_does_not_implement_binding(self,
-                                                                    *mocks):
-        self.assertEqual(
-            qos_consts.VALID_RULE_TYPES,
-            self.driver.mechanism_manager.supported_qos_rule_types)
-
-
-class TestMl2BasicGet(test_plugin.TestBasicGet,
-                      Ml2PluginV2TestCase):
-    pass
-
-
-class TestMl2V2HTTPResponse(test_plugin.TestV2HTTPResponse,
-                            Ml2PluginV2TestCase):
-    pass
-
-
-class TestMl2NetworksV2(test_plugin.TestNetworksV2,
-                        Ml2PluginV2TestCase):
-    def setUp(self, plugin=None):
-        super(TestMl2NetworksV2, self).setUp()
-        # provider networks
-        self.pnets = [{'name': 'net1',
-                       pnet.NETWORK_TYPE: 'vlan',
-                       pnet.PHYSICAL_NETWORK: 'physnet1',
-                       pnet.SEGMENTATION_ID: 1,
-                       'tenant_id': 'tenant_one'},
-                      {'name': 'net2',
-                       pnet.NETWORK_TYPE: 'vlan',
-                       pnet.PHYSICAL_NETWORK: 'physnet2',
-                       pnet.SEGMENTATION_ID: 210,
-                       'tenant_id': 'tenant_one'},
-                      {'name': 'net3',
-                       pnet.NETWORK_TYPE: 'vlan',
-                       pnet.PHYSICAL_NETWORK: 'physnet2',
-                       pnet.SEGMENTATION_ID: 220,
-                       'tenant_id': 'tenant_one'}
-                      ]
-        # multiprovider networks
-        self.mp_nets = [{'name': 'net4',
-                         mpnet.SEGMENTS:
-                             [{pnet.NETWORK_TYPE: 'vlan',
-                               pnet.PHYSICAL_NETWORK: 'physnet2',
-                               pnet.SEGMENTATION_ID: 1},
-                              {pnet.NETWORK_TYPE: 'vlan',
-                               pnet.PHYSICAL_NETWORK: 'physnet2',
-                               pnet.SEGMENTATION_ID: 202}],
-                         'tenant_id': 'tenant_one'}
-                        ]
-        self.nets = self.mp_nets + self.pnets
-
-    def test_port_delete_helper_tolerates_failure(self):
-        plugin = manager.NeutronManager.get_plugin()
-        with mock.patch.object(plugin, "delete_port",
-                               side_effect=exc.PortNotFound(port_id="123")):
-            plugin._delete_ports(mock.MagicMock(), [mock.MagicMock()])
-
-        with mock.patch.object(plugin, "delete_port",
-                               side_effect=sqla_exc.ObjectDeletedError(None)):
-            plugin._delete_ports(mock.MagicMock(), [mock.MagicMock()])
-
-    def test_subnet_delete_helper_tolerates_failure(self):
-        plugin = manager.NeutronManager.get_plugin()
-        with mock.patch.object(plugin, "delete_subnet",
-                               side_effect=exc.SubnetNotFound(subnet_id="1")):
-            plugin._delete_subnets(mock.MagicMock(), [mock.MagicMock()])
-
-        with mock.patch.object(plugin, "delete_subnet",
-                               side_effect=sqla_exc.ObjectDeletedError(None)):
-            plugin._delete_subnets(mock.MagicMock(), [mock.MagicMock()])
-
-    def _create_and_verify_networks(self, networks):
-        for net_idx, net in enumerate(networks):
-            # create
-            req = self.new_create_request('networks',
-                                          {'network': net})
-            # verify
-            network = self.deserialize(self.fmt,
-                                       req.get_response(self.api))['network']
-            if mpnet.SEGMENTS not in net:
-                for k, v in six.iteritems(net):
-                    self.assertEqual(net[k], network[k])
-                    self.assertNotIn(mpnet.SEGMENTS, network)
-            else:
-                segments = network[mpnet.SEGMENTS]
-                expected_segments = net[mpnet.SEGMENTS]
-                self.assertEqual(len(expected_segments), len(segments))
-                for expected, actual in zip(expected_segments, segments):
-                    self.assertEqual(expected, actual)
-
-    def _lookup_network_by_segmentation_id(self, seg_id, num_expected_nets):
-        params_str = "%s=%s" % (pnet.SEGMENTATION_ID, seg_id)
-        net_req = self.new_list_request('networks', None,
-                                        params=params_str)
-        networks = self.deserialize(self.fmt, net_req.get_response(self.api))
-        if num_expected_nets:
-            self.assertIsNotNone(networks)
-            self.assertEqual(num_expected_nets, len(networks['networks']))
-        else:
-            self.assertIsNone(networks)
-        return networks
-
-    def test_list_networks_with_segmentation_id(self):
-        self._create_and_verify_networks(self.pnets)
-        # verify we can find the network that we expect
-        lookup_vlan_id = 1
-        expected_net = [n for n in self.pnets
-                        if n[pnet.SEGMENTATION_ID] == lookup_vlan_id].pop()
-        networks = self._lookup_network_by_segmentation_id(lookup_vlan_id, 1)
-        # verify all provider attributes
-        network = networks['networks'][0]
-        for attr in pnet.ATTRIBUTES:
-            self.assertEqual(expected_net[attr], network[attr])
-
-    def test_list_mpnetworks_with_segmentation_id(self):
-        self._create_and_verify_networks(self.nets)
-
-        # get all networks with seg_id=1 (including multisegment networks)
-        lookup_vlan_id = 1
-        networks = self._lookup_network_by_segmentation_id(lookup_vlan_id, 2)
-
-        # get the mpnet
-        networks = [n for n in networks['networks'] if mpnet.SEGMENTS in n]
-        network = networks.pop()
-        # verify attributes of the looked up item
-        segments = network[mpnet.SEGMENTS]
-        expected_segments = self.mp_nets[0][mpnet.SEGMENTS]
-        self.assertEqual(len(expected_segments), len(segments))
-        for expected, actual in zip(expected_segments, segments):
-            self.assertEqual(expected, actual)
-
-    def test_create_network_segment_allocation_fails(self):
-        plugin = manager.NeutronManager.get_plugin()
-        with mock.patch.object(
-            plugin.type_manager, 'create_network_segments',
-            side_effect=db_exc.RetryRequest(ValueError())
-        ) as f:
-            data = {'network': {'tenant_id': 'sometenant', 'name': 'dummy',
-                                'admin_state_up': True, 'shared': False}}
-            req = self.new_create_request('networks', data)
-            res = req.get_response(self.api)
-            self.assertEqual(500, res.status_int)
-            self.assertEqual(db_api.MAX_RETRIES + 1, f.call_count)
-
-
-class TestExternalNetwork(Ml2PluginV2TestCase):
-
-    def _create_external_network(self):
-        data = {'network': {'name': 'net1',
-                            'router:external': 'True',
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        network = self.deserialize(self.fmt,
-                                   network_req.get_response(self.api))
-        return network
-
-    def test_external_network_type_none(self):
-        config.cfg.CONF.set_default('external_network_type',
-                                    None,
-                                    group='ml2')
-
-        network = self._create_external_network()
-        # For external network, expected network type to be
-        # tenant_network_types which is by default 'local'.
-        self.assertEqual(p_const.TYPE_LOCAL,
-                         network['network'][pnet.NETWORK_TYPE])
-        # No physical network specified, expected 'None'.
-        self.assertIsNone(network['network'][pnet.PHYSICAL_NETWORK])
-        # External network will not have a segmentation id.
-        self.assertIsNone(network['network'][pnet.SEGMENTATION_ID])
-        # External network will not have multiple segments.
-        self.assertNotIn(mpnet.SEGMENTS, network['network'])
-
-    def test_external_network_type_vlan(self):
-        config.cfg.CONF.set_default('external_network_type',
-                                    p_const.TYPE_VLAN,
-                                    group='ml2')
-
-        network = self._create_external_network()
-        # For external network, expected network type to be 'vlan'.
-        self.assertEqual(p_const.TYPE_VLAN,
-                         network['network'][pnet.NETWORK_TYPE])
-        # Physical network is expected.
-        self.assertIsNotNone(network['network'][pnet.PHYSICAL_NETWORK])
-        # External network will have a segmentation id.
-        self.assertIsNotNone(network['network'][pnet.SEGMENTATION_ID])
-        # External network will not have multiple segments.
-        self.assertNotIn(mpnet.SEGMENTS, network['network'])
-
-
-class TestMl2NetworksWithVlanTransparencyAndMTU(TestMl2NetworksV2):
-    def setUp(self, plugin=None):
-        config.cfg.CONF.set_override('path_mtu', 1000, group='ml2')
-        config.cfg.CONF.set_override('segment_mtu', 1000, group='ml2')
-        config.cfg.CONF.set_override('advertise_mtu', True)
-        config.cfg.CONF.set_override('vlan_transparent', True)
-        super(TestMl2NetworksWithVlanTransparencyAndMTU, self).setUp(plugin)
-
-    def test_create_network_vlan_transparent_and_mtu(self):
-        data = {'network': {'name': 'net1',
-                            mpnet.SEGMENTS:
-                            [{pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1'}],
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        res = network_req.get_response(self.api)
-        self.assertEqual(201, res.status_int)
-        network = self.deserialize(self.fmt, res)['network']
-        self.assertEqual(network['mtu'], 1000)
-        self.assertIn('vlan_transparent', network)
-
-
-class TestMl2NetworksWithAvailabilityZone(TestMl2NetworksV2):
-    def test_create_network_availability_zone(self):
-        az_hints = ['az1', 'az2']
-        data = {'network': {'name': 'net1',
-                            az_ext.AZ_HINTS: az_hints,
-                            'tenant_id': 'tenant_one'}}
-        with mock.patch.object(agents_db.AgentAvailabilityZoneMixin,
-                               'validate_availability_zones'):
-            network_req = self.new_create_request('networks', data)
-            res = network_req.get_response(self.api)
-            self.assertEqual(201, res.status_int)
-            network = self.deserialize(self.fmt, res)['network']
-            self.assertEqual(az_hints, network[az_ext.AZ_HINTS])
-
-
-class TestMl2SubnetsV2(test_plugin.TestSubnetsV2,
-                       Ml2PluginV2TestCase):
-    def test_delete_subnet_race_with_dhcp_port_creation(self):
-        with self.network() as network:
-            with self.subnet(network=network) as subnet:
-                subnet_id = subnet['subnet']['id']
-                attempt = [0]
-
-                def check_and_create_ports(context, subnet_id):
-                    """A method to emulate race condition.
-
-                    Adds dhcp port in the middle of subnet delete
-                    """
-                    if attempt[0] > 0:
-                        return False
-                    attempt[0] += 1
-                    data = {'port': {'network_id': network['network']['id'],
-                                     'tenant_id':
-                                     network['network']['tenant_id'],
-                                     'name': 'port1',
-                                     'admin_state_up': 1,
-                                     'device_owner':
-                                     constants.DEVICE_OWNER_DHCP,
-                                     'fixed_ips': [{'subnet_id': subnet_id}]}}
-                    port_req = self.new_create_request('ports', data)
-                    port_res = port_req.get_response(self.api)
-                    self.assertEqual(201, port_res.status_int)
-                    return (context.session.query(models_v2.IPAllocation).
-                            filter_by(subnet_id=subnet_id).
-                            join(models_v2.Port).first())
-
-                plugin = manager.NeutronManager.get_plugin()
-                # we mock _subnet_check_ip_allocations with method
-                # that creates DHCP port 'in the middle' of subnet_delete
-                # causing retry this way subnet is deleted on the
-                # second attempt
-                with mock.patch.object(plugin, '_subnet_check_ip_allocations',
-                                       side_effect=check_and_create_ports):
-                    req = self.new_delete_request('subnets', subnet_id)
-                    res = req.get_response(self.api)
-                    self.assertEqual(204, res.status_int)
-
-
-class TestMl2DbOperationBounds(test_plugin.DbOperationBoundMixin,
-                               Ml2PluginV2TestCase):
-    """Test cases to assert constant query count for list operations.
-
-    These test cases assert that an increase in the number of objects
-    does not result in an increase of the number of db operations. All
-    database lookups during a list operation should be performed in bulk
-    so the number of queries required for 2 objects instead of 1 should
-    stay the same.
-    """
-
-    def make_network(self):
-        return self._make_network(self.fmt, 'name', True)
-
-    def make_subnet(self):
-        net = self.make_network()
-        setattr(self, '_subnet_count', getattr(self, '_subnet_count', 0) + 1)
-        cidr = '1.%s.0.0/24' % self._subnet_count
-        return self._make_subnet(self.fmt, net, None, cidr)
-
-    def make_port(self):
-        net = self.make_network()
-        return self._make_port(self.fmt, net['network']['id'])
-
-    def test_network_list_queries_constant(self):
-        self._assert_object_list_queries_constant(self.make_network,
-                                                  'networks')
-
-    def test_subnet_list_queries_constant(self):
-        self._assert_object_list_queries_constant(self.make_subnet, 'subnets')
-
-    def test_port_list_queries_constant(self):
-        self._assert_object_list_queries_constant(self.make_port, 'ports')
-
-
-class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
-
-    def test_update_port_status_build(self):
-        with self.port() as port:
-            self.assertEqual('DOWN', port['port']['status'])
-            self.assertEqual('DOWN', self.port_create_status)
-
-    def test_update_port_status_short_id(self):
-        ctx = context.get_admin_context()
-        plugin = manager.NeutronManager.get_plugin()
-        with self.port() as port:
-            with mock.patch.object(ml2_db, 'get_binding_levels',
-                                   return_value=[]) as mock_gbl:
-                port_id = port['port']['id']
-                short_id = port_id[:11]
-                plugin.update_port_status(ctx, short_id, 'UP')
-                mock_gbl.assert_called_once_with(mock.ANY, port_id, mock.ANY)
-
-    def test_update_port_fixed_ip_changed(self):
-        ctx = context.get_admin_context()
-        plugin = manager.NeutronManager.get_plugin()
-        with self.port() as port, mock.patch.object(
-                plugin.notifier,
-                'security_groups_member_updated') as sg_member_update:
-            port['port']['fixed_ips'][0]['ip_address'] = '10.0.0.3'
-            plugin.update_port(ctx, port['port']['id'], port)
-            self.assertTrue(sg_member_update.called)
-
-    def test_update_port_status_with_network(self):
-        ctx = context.get_admin_context()
-        plugin = manager.NeutronManager.get_plugin()
-        with self.port() as port:
-            net = plugin.get_network(ctx, port['port']['network_id'])
-            with mock.patch.object(plugin, 'get_network') as get_net:
-                plugin.update_port_status(ctx, port['port']['id'], 'UP',
-                                          network=net)
-                self.assertFalse(get_net.called)
-
-    def test_update_port_mac(self):
-        self.check_update_port_mac(
-            host_arg={portbindings.HOST_ID: HOST},
-            arg_list=(portbindings.HOST_ID,))
-
-    def test_update_non_existent_port(self):
-        ctx = context.get_admin_context()
-        plugin = manager.NeutronManager.get_plugin()
-        data = {'port': {'admin_state_up': False}}
-        self.assertRaises(exc.PortNotFound, plugin.update_port, ctx,
-                          'invalid-uuid', data)
-
-    def test_delete_non_existent_port(self):
-        ctx = context.get_admin_context()
-        plugin = manager.NeutronManager.get_plugin()
-        with mock.patch.object(ml2_plugin.LOG, 'debug') as log_debug:
-            plugin.delete_port(ctx, 'invalid-uuid', l3_port_check=False)
-            log_debug.assert_has_calls([
-                mock.call(_("Deleting port %s"), 'invalid-uuid'),
-                mock.call(_("The port '%s' was deleted"), 'invalid-uuid')
-            ])
-
-    def test_l3_cleanup_on_net_delete(self):
-        l3plugin = manager.NeutronManager.get_service_plugins().get(
-            p_const.L3_ROUTER_NAT)
-        kwargs = {'arg_list': (external_net.EXTERNAL,),
-                  external_net.EXTERNAL: True}
-        with self.network(**kwargs) as n:
-            with self.subnet(network=n, cidr='200.0.0.0/22'):
-                l3plugin.create_floatingip(
-                    context.get_admin_context(),
-                    {'floatingip': {'floating_network_id': n['network']['id'],
-                                    'tenant_id': n['network']['tenant_id']}}
-                )
-        self._delete('networks', n['network']['id'])
-        flips = l3plugin.get_floatingips(context.get_admin_context())
-        self.assertFalse(flips)
-
-    def test_create_ports_bulk_port_binding_failure(self):
-        ctx = context.get_admin_context()
-        with self.network() as net:
-            plugin = manager.NeutronManager.get_plugin()
-
-            with mock.patch.object(plugin, '_bind_port_if_needed',
-                side_effect=ml2_exc.MechanismDriverError(
-                    method='create_port_bulk')) as _bind_port_if_needed:
-
-                res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
-                                             'test', True, context=ctx)
-
-                self.assertTrue(_bind_port_if_needed.called)
-                # We expect a 500 as we injected a fault in the plugin
-                self._validate_behavior_on_bulk_failure(
-                    res, 'ports', webob.exc.HTTPServerError.code)
-
-    def test_create_ports_bulk_with_sec_grp(self):
-        ctx = context.get_admin_context()
-        plugin = manager.NeutronManager.get_plugin()
-        with self.network() as net,\
-                mock.patch.object(plugin.notifier,
-                                  'security_groups_member_updated') as m_upd,\
-                mock.patch.object(plugin.notifier,
-                                  'security_groups_provider_updated') as p_upd:
-
-            res = self._create_port_bulk(self.fmt, 3, net['network']['id'],
-                                         'test', True, context=ctx)
-            ports = self.deserialize(self.fmt, res)
-            used_sg = ports['ports'][0]['security_groups']
-            m_upd.assert_called_once_with(ctx, used_sg)
-            self.assertFalse(p_upd.called)
-
-    def _check_security_groups_provider_updated_args(self, p_upd_mock, net_id):
-        query_params = "network_id=%s" % net_id
-        network_ports = self._list('ports', query_params=query_params)
-        network_ports_ids = [port['id'] for port in network_ports['ports']]
-        self.assertTrue(p_upd_mock.called)
-        p_upd_args = p_upd_mock.call_args
-        ports_ids = p_upd_args[0][1]
-        self.assertEqual(sorted(network_ports_ids), sorted(ports_ids))
-
-    def test_create_ports_bulk_with_sec_grp_member_provider_update(self):
-        ctx = context.get_admin_context()
-        plugin = manager.NeutronManager.get_plugin()
-        with self.network() as net,\
-                mock.patch.object(plugin.notifier,
-                                  'security_groups_member_updated') as m_upd,\
-                mock.patch.object(plugin.notifier,
-                                  'security_groups_provider_updated') as p_upd:
-
-            net_id = net['network']['id']
-            data = [{
-                    'network_id': net_id,
-                    'tenant_id': self._tenant_id
-                    },
-                    {
-                    'network_id': net_id,
-                    'tenant_id': self._tenant_id,
-                    'device_owner': constants.DEVICE_OWNER_DHCP
-                    }
-                    ]
-
-            res = self._create_bulk_from_list(self.fmt, 'port',
-                                              data, context=ctx)
-            ports = self.deserialize(self.fmt, res)
-            used_sg = ports['ports'][0]['security_groups']
-            m_upd.assert_called_once_with(ctx, used_sg)
-            self._check_security_groups_provider_updated_args(p_upd, net_id)
-            m_upd.reset_mock()
-            p_upd.reset_mock()
-            data[0]['device_owner'] = constants.DEVICE_OWNER_DHCP
-            self._create_bulk_from_list(self.fmt, 'port',
-                                        data, context=ctx)
-            self.assertFalse(m_upd.called)
-            self._check_security_groups_provider_updated_args(p_upd, net_id)
-
-    def test_create_ports_bulk_with_sec_grp_provider_update_ipv6(self):
-        ctx = context.get_admin_context()
-        plugin = manager.NeutronManager.get_plugin()
-        fake_prefix = '2001:db8::/64'
-        fake_gateway = 'fe80::1'
-        with self.network() as net:
-            with self.subnet(net,
-                             gateway_ip=fake_gateway,
-                             cidr=fake_prefix,
-                             ip_version=6) as snet_v6,\
-                    mock.patch.object(
-                        plugin.notifier,
-                        'security_groups_member_updated') as m_upd,\
-                    mock.patch.object(
-                        plugin.notifier,
-                        'security_groups_provider_updated') as p_upd:
-
-                net_id = net['network']['id']
-                data = [{
-                        'network_id': net_id,
-                        'tenant_id': self._tenant_id,
-                        'fixed_ips': [{'subnet_id': snet_v6['subnet']['id']}],
-                        'device_owner': constants.DEVICE_OWNER_ROUTER_INTF
-                        }
-                        ]
-                self._create_bulk_from_list(self.fmt, 'port',
-                                            data, context=ctx)
-                self.assertFalse(m_upd.called)
-                self._check_security_groups_provider_updated_args(
-                    p_upd, net_id)
-
-    def test_delete_port_no_notify_in_disassociate_floatingips(self):
-        ctx = context.get_admin_context()
-        plugin = manager.NeutronManager.get_plugin()
-        l3plugin = manager.NeutronManager.get_service_plugins().get(
-            p_const.L3_ROUTER_NAT)
-        with self.port() as port,\
-                mock.patch.object(
-                    l3plugin,
-                    'disassociate_floatingips') as disassociate_floatingips,\
-                mock.patch.object(registry, 'notify') as notify:
-
-            port_id = port['port']['id']
-            plugin.delete_port(ctx, port_id)
-
-            # check that no notification was requested while under
-            # transaction
-            disassociate_floatingips.assert_has_calls([
-                mock.call(ctx, port_id, do_notify=False)
-            ])
-
-            # check that notifier was still triggered
-            self.assertTrue(notify.call_counts)
-
-    def test_check_if_compute_port_serviced_by_dvr(self):
-        self.assertTrue(utils.is_dvr_serviced(DEVICE_OWNER_COMPUTE))
-
-    def test_check_if_lbaas_vip_port_serviced_by_dvr(self):
-        self.assertTrue(utils.is_dvr_serviced(
-            constants.DEVICE_OWNER_LOADBALANCER))
-
-    def test_check_if_lbaasv2_vip_port_serviced_by_dvr(self):
-        self.assertTrue(utils.is_dvr_serviced(
-            constants.DEVICE_OWNER_LOADBALANCERV2))
-
-    def test_check_if_dhcp_port_serviced_by_dvr(self):
-        self.assertTrue(utils.is_dvr_serviced(constants.DEVICE_OWNER_DHCP))
-
-    def test_check_if_port_not_serviced_by_dvr(self):
-        self.assertFalse(utils.is_dvr_serviced(
-            constants.DEVICE_OWNER_ROUTER_INTF))
-
-    def test_disassociate_floatingips_do_notify_returns_nothing(self):
-        ctx = context.get_admin_context()
-        l3plugin = manager.NeutronManager.get_service_plugins().get(
-            p_const.L3_ROUTER_NAT)
-        with self.port() as port:
-
-            port_id = port['port']['id']
-            # check that nothing is returned when notifications are handled
-            # by the called method
-            self.assertIsNone(l3plugin.disassociate_floatingips(ctx, port_id))
-
-    def test_create_port_tolerates_db_deadlock(self):
-        ctx = context.get_admin_context()
-        with self.network() as net:
-            with self.subnet(network=net) as subnet:
-                segments = ml2_db.get_networks_segments(ctx.session,
-                                                        [net['network']['id']])
-                with mock.patch('neutron.plugins.ml2.plugin.'
-                                'db.get_networks_segments') as get_seg_mock:
-                    get_seg_mock.side_effect = [db_exc.DBDeadlock, segments,
-                                                segments, segments]
-                    with self.port(subnet=subnet) as port:
-                        self.assertTrue(port['port']['id'])
-                        self.assertEqual(4, get_seg_mock.call_count)
-
-    def test_delete_port_tolerates_db_deadlock(self):
-        ctx = context.get_admin_context()
-        plugin = manager.NeutronManager.get_plugin()
-        with self.port() as port:
-            port_db, binding = ml2_db.get_locked_port_and_binding(
-                ctx.session, port['port']['id'])
-            with mock.patch('neutron.plugins.ml2.plugin.'
-                            'db.get_locked_port_and_binding') as lock:
-                lock.side_effect = [db_exc.DBDeadlock,
-                                    (port_db, binding)]
-                req = self.new_delete_request('ports', port['port']['id'])
-                res = req.get_response(self.api)
-                self.assertEqual(204, res.status_int)
-                self.assertEqual(2, lock.call_count)
-                self.assertRaises(
-                    exc.PortNotFound, plugin.get_port, ctx, port['port']['id'])
-
-    def test_port_create_resillient_to_duplicate_records(self):
-
-        def make_port():
-            with self.port():
-                pass
-
-        self._test_operation_resillient_to_ipallocation_failure(make_port)
-
-    def test_port_update_resillient_to_duplicate_records(self):
-        with self.port() as p:
-            data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.9'}]}}
-            req = self.new_update_request('ports', data, p['port']['id'])
-
-            def do_request():
-                self.assertEqual(200, req.get_response(self.api).status_int)
-
-            self._test_operation_resillient_to_ipallocation_failure(do_request)
-
-    def _test_operation_resillient_to_ipallocation_failure(self, func):
-        from sqlalchemy import event
-
-        class IPAllocationsGrenade(object):
-            insert_ip_called = False
-            except_raised = False
-
-            def execute(self, con, curs, stmt, *args, **kwargs):
-                if 'INSERT INTO ipallocations' in stmt:
-                    self.insert_ip_called = True
-
-            def commit(self, con):
-                # we blow up on commit to simulate another thread/server
-                # stealing our IP before our transaction was done
-                if self.insert_ip_called and not self.except_raised:
-                    self.except_raised = True
-                    raise db_exc.DBDuplicateEntry()
-
-        listener = IPAllocationsGrenade()
-        engine = db_api.get_engine()
-        event.listen(engine, 'before_cursor_execute', listener.execute)
-        event.listen(engine, 'commit', listener.commit)
-        self.addCleanup(event.remove, engine, 'before_cursor_execute',
-                        listener.execute)
-        self.addCleanup(event.remove, engine, 'commit',
-                        listener.commit)
-        func()
-        # make sure that the grenade went off during the commit
-        self.assertTrue(listener.except_raised)
-
-
-class TestMl2PluginOnly(Ml2PluginV2TestCase):
-    """For testing methods that don't call drivers"""
-
-    def test__verify_service_plugins_requirements(self):
-        plugin = manager.NeutronManager.get_plugin()
-        with mock.patch.dict(ml2_plugin.SERVICE_PLUGINS_REQUIRED_DRIVERS,
-                             {self.l3_plugin: self._mechanism_drivers}),\
-                mock.patch.object(plugin.extension_manager,
-                                  'names',
-                                  return_value=self._mechanism_drivers):
-
-            plugin._verify_service_plugins_requirements()
-
-    def test__verify_service_plugins_requirements_missing_driver(self):
-        plugin = manager.NeutronManager.get_plugin()
-        with mock.patch.dict(ml2_plugin.SERVICE_PLUGINS_REQUIRED_DRIVERS,
-                             {self.l3_plugin: ['test_required_driver']}),\
-                mock.patch.object(plugin.extension_manager,
-                                  'names',
-                                  return_value=self._mechanism_drivers):
-
-            self.assertRaises(
-                ml2_exc.ExtensionDriverNotFound,
-                plugin._verify_service_plugins_requirements
-            )
-
-    def _test_check_mac_update_allowed(self, vif_type, expect_change=True):
-        plugin = manager.NeutronManager.get_plugin()
-        port = {'mac_address': "fake_mac", 'id': "fake_id"}
-        if expect_change:
-            new_attrs = {"mac_address": "dummy_mac"}
-        else:
-            new_attrs = {"mac_address": port['mac_address']}
-        binding = mock.Mock()
-        binding.vif_type = vif_type
-        mac_changed = plugin._check_mac_update_allowed(port, new_attrs,
-                                                       binding)
-        self.assertEqual(expect_change, mac_changed)
-
-    def test_check_mac_update_allowed_if_no_mac_change(self):
-        self._test_check_mac_update_allowed(portbindings.VIF_TYPE_UNBOUND,
-                                            expect_change=False)
-
-    def test_check_mac_update_allowed_unless_bound(self):
-        with testtools.ExpectedException(exc.PortBound):
-            self._test_check_mac_update_allowed(portbindings.VIF_TYPE_OVS)
-
-    def test__device_to_port_id_prefix_names(self):
-        input_output = [('sg-abcdefg', 'abcdefg'),
-                        ('tap123456', '123456'),
-                        ('qvo567890', '567890')]
-        for device, expected in input_output:
-            self.assertEqual(expected,
-                             ml2_plugin.Ml2Plugin._device_to_port_id(
-                                 self.context, device))
-
-    def test__device_to_port_id_mac_address(self):
-        with self.port() as p:
-            mac = p['port']['mac_address']
-            port_id = p['port']['id']
-            self.assertEqual(port_id,
-                             ml2_plugin.Ml2Plugin._device_to_port_id(
-                                 self.context, mac))
-
-    def test__device_to_port_id_not_uuid_not_mac(self):
-        dev = '1234567'
-        self.assertEqual(dev, ml2_plugin.Ml2Plugin._device_to_port_id(
-            self.context, dev))
-
-    def test__device_to_port_id_UUID(self):
-        port_id = uuidutils.generate_uuid()
-        self.assertEqual(port_id, ml2_plugin.Ml2Plugin._device_to_port_id(
-            self.context, port_id))
-
-
-class TestMl2DvrPortsV2(TestMl2PortsV2):
-    def setUp(self):
-        super(TestMl2DvrPortsV2, self).setUp()
-        extensions = ['router',
-                      constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
-                      constants.L3_DISTRIBUTED_EXT_ALIAS]
-        self.plugin = manager.NeutronManager.get_plugin()
-        self.l3plugin = mock.Mock()
-        type(self.l3plugin).supported_extension_aliases = (
-            mock.PropertyMock(return_value=extensions))
-        self.service_plugins = {'L3_ROUTER_NAT': self.l3plugin}
-
-    def _test_delete_dvr_serviced_port(self, device_owner, floating_ip=False):
-        ns_to_delete = {'host': 'myhost', 'agent_id': 'vm_l3_agent',
-                        'router_id': 'my_router'}
-        fip_set = set()
-        if floating_ip:
-            fip_set.add(ns_to_delete['router_id'])
-
-        with mock.patch.object(manager.NeutronManager,
-                               'get_service_plugins',
-                               return_value=self.service_plugins),\
-                self.port(device_owner=device_owner) as port,\
-                mock.patch.object(registry, 'notify') as notify,\
-                mock.patch.object(self.l3plugin,
-                                  'disassociate_floatingips',
-                                  return_value=fip_set),\
-                mock.patch.object(
-                    self.l3plugin,
-                    'dvr_deletens_if_no_port',
-                    return_value=[ns_to_delete]) as dvr_delns_ifno_port:
-
-            port_id = port['port']['id']
-            self.plugin.delete_port(self.context, port_id)
-
-            self.assertTrue(notify.call_count)
-            dvr_delns_ifno_port.assert_called_once_with(self.context,
-                                                        port['port']['id'])
-
-    def test_delete_last_vm_port(self):
-        self._test_delete_dvr_serviced_port(device_owner=DEVICE_OWNER_COMPUTE)
-
-    def test_delete_last_vm_port_with_floatingip(self):
-        self._test_delete_dvr_serviced_port(device_owner=DEVICE_OWNER_COMPUTE,
-                                            floating_ip=True)
-
-    def test_delete_lbaas_vip_port(self):
-        self._test_delete_dvr_serviced_port(
-            device_owner=constants.DEVICE_OWNER_LOADBALANCER)
-
-    def test_delete_lbaasv2_vip_port(self):
-        self._test_delete_dvr_serviced_port(
-            device_owner=constants.DEVICE_OWNER_LOADBALANCERV2)
-
-    def test_concurrent_csnat_port_delete(self):
-        plugin = manager.NeutronManager.get_service_plugins()[
-            p_const.L3_ROUTER_NAT]
-        r = plugin.create_router(
-            self.context,
-            {'router': {'name': 'router', 'admin_state_up': True,
-             'tenant_id': self.context.tenant_id}})
-        with self.subnet() as s:
-            p = plugin.add_router_interface(self.context, r['id'],
-                                            {'subnet_id': s['subnet']['id']})
-
-        # lie to turn the port into an SNAT interface
-        with self.context.session.begin():
-            rp = self.context.session.query(l3_db.RouterPort).filter_by(
-                port_id=p['port_id']).first()
-            rp.port_type = constants.DEVICE_OWNER_ROUTER_SNAT
-
-        # take the port away before csnat gets a chance to delete it
-        # to simulate a concurrent delete
-        orig_get_ports = plugin._core_plugin.get_ports
-
-        def get_ports_with_delete_first(*args, **kwargs):
-            plugin._core_plugin.delete_port(self.context,
-                                            p['port_id'],
-                                            l3_port_check=False)
-            return orig_get_ports(*args, **kwargs)
-        plugin._core_plugin.get_ports = get_ports_with_delete_first
-
-        # This should be able to handle a concurrent delete without raising
-        # an exception
-        router = plugin._get_router(self.context, r['id'])
-        plugin.delete_csnat_router_interface_ports(self.context, router)
-
-
-class TestMl2PortBinding(Ml2PluginV2TestCase,
-                         test_bindings.PortBindingsTestCase):
-    # Test case does not set binding:host_id, so ml2 does not attempt
-    # to bind port
-    VIF_TYPE = portbindings.VIF_TYPE_UNBOUND
-    HAS_PORT_FILTER = False
-    ENABLE_SG = True
-    FIREWALL_DRIVER = test_sg_rpc.FIREWALL_HYBRID_DRIVER
-
-    def setUp(self, firewall_driver=None):
-        test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER)
-        config.cfg.CONF.set_override(
-            'enable_security_group', self.ENABLE_SG,
-            group='SECURITYGROUP')
-        super(TestMl2PortBinding, self).setUp()
-
-    def _check_port_binding_profile(self, port, profile=None):
-        self.assertIn('id', port)
-        self.assertIn(portbindings.PROFILE, port)
-        value = port[portbindings.PROFILE]
-        self.assertEqual(profile or {}, value)
-
-    def test_create_port_binding_profile(self):
-        self._test_create_port_binding_profile({'a': 1, 'b': 2})
-
-    def test_update_port_binding_profile(self):
-        self._test_update_port_binding_profile({'c': 3})
-
-    def test_create_port_binding_profile_too_big(self):
-        s = 'x' * 5000
-        profile_arg = {portbindings.PROFILE: {'d': s}}
-        try:
-            with self.port(expected_res_status=400,
-                           arg_list=(portbindings.PROFILE,),
-                           **profile_arg):
-                pass
-        except webob.exc.HTTPClientError:
-            pass
-
-    def test_remove_port_binding_profile(self):
-        profile = {'e': 5}
-        profile_arg = {portbindings.PROFILE: profile}
-        with self.port(arg_list=(portbindings.PROFILE,),
-                       **profile_arg) as port:
-            self._check_port_binding_profile(port['port'], profile)
-            port_id = port['port']['id']
-            profile_arg = {portbindings.PROFILE: None}
-            port = self._update('ports', port_id,
-                                {'port': profile_arg})['port']
-            self._check_port_binding_profile(port)
-            port = self._show('ports', port_id)['port']
-            self._check_port_binding_profile(port)
-
-    def test_return_on_concurrent_delete_and_binding(self):
-        # create a port and delete it so we have an expired mechanism context
-        with self.port() as port:
-            plugin = manager.NeutronManager.get_plugin()
-            binding = ml2_db.get_locked_port_and_binding(self.context.session,
-                                                         port['port']['id'])[1]
-            binding['host'] = 'test'
-            mech_context = driver_context.PortContext(
-                plugin, self.context, port['port'],
-                plugin.get_network(self.context, port['port']['network_id']),
-                binding, None)
-        with mock.patch(
-            'neutron.plugins.ml2.plugin.' 'db.get_locked_port_and_binding',
-            return_value=(None, None)) as glpab_mock,\
-                mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.'
-                           '_make_port_dict') as mpd_mock:
-            plugin._bind_port_if_needed(mech_context)
-            # called during deletion to get port
-            self.assertTrue(glpab_mock.mock_calls)
-            # should have returned before calling _make_port_dict
-            self.assertFalse(mpd_mock.mock_calls)
-
-    def test_port_binding_profile_not_changed(self):
-        profile = {'e': 5}
-        profile_arg = {portbindings.PROFILE: profile}
-        with self.port(arg_list=(portbindings.PROFILE,),
-                       **profile_arg) as port:
-            self._check_port_binding_profile(port['port'], profile)
-            port_id = port['port']['id']
-            state_arg = {'admin_state_up': True}
-            port = self._update('ports', port_id,
-                                {'port': state_arg})['port']
-            self._check_port_binding_profile(port, profile)
-            port = self._show('ports', port_id)['port']
-            self._check_port_binding_profile(port, profile)
-
-    def test_update_port_binding_host_id_none(self):
-        with self.port() as port:
-            plugin = manager.NeutronManager.get_plugin()
-            binding = ml2_db.get_locked_port_and_binding(self.context.session,
-                                                         port['port']['id'])[1]
-            binding['host'] = 'test'
-            mech_context = driver_context.PortContext(
-                plugin, self.context, port['port'],
-                plugin.get_network(self.context, port['port']['network_id']),
-                binding, None)
-        with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.'
-                        '_update_port_dict_binding') as update_mock:
-            attrs = {portbindings.HOST_ID: None}
-            plugin._process_port_binding(mech_context, attrs)
-            self.assertTrue(update_mock.mock_calls)
-            self.assertEqual('', binding.host)
-
-    def test_update_port_binding_host_id_not_changed(self):
-        with self.port() as port:
-            plugin = manager.NeutronManager.get_plugin()
-            binding = ml2_db.get_locked_port_and_binding(self.context.session,
-                                                         port['port']['id'])[1]
-            binding['host'] = 'test'
-            mech_context = driver_context.PortContext(
-                plugin, self.context, port['port'],
-                plugin.get_network(self.context, port['port']['network_id']),
-                binding, None)
-        with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.'
-                        '_update_port_dict_binding') as update_mock:
-            attrs = {portbindings.PROFILE: {'e': 5}}
-            plugin._process_port_binding(mech_context, attrs)
-            self.assertTrue(update_mock.mock_calls)
-            self.assertEqual('test', binding.host)
-
-    def test_process_dvr_port_binding_update_router_id(self):
-        host_id = 'host'
-        binding = models.DVRPortBinding(
-                            port_id='port_id',
-                            host=host_id,
-                            router_id='old_router_id',
-                            vif_type=portbindings.VIF_TYPE_OVS,
-                            vnic_type=portbindings.VNIC_NORMAL,
-                            status=constants.PORT_STATUS_DOWN)
-        plugin = manager.NeutronManager.get_plugin()
-        mock_network = {'id': 'net_id'}
-        mock_port = {'id': 'port_id'}
-        context = mock.Mock()
-        new_router_id = 'new_router'
-        attrs = {'device_id': new_router_id, portbindings.HOST_ID: host_id}
-        with mock.patch.object(plugin, '_update_port_dict_binding'):
-            with mock.patch.object(ml2_db, 'get_network_segments',
-                                   return_value=[]):
-                mech_context = driver_context.PortContext(
-                    self, context, mock_port, mock_network, binding, None)
-                plugin._process_dvr_port_binding(mech_context, context, attrs)
-                self.assertEqual(new_router_id,
-                                 mech_context._binding.router_id)
-                self.assertEqual(host_id, mech_context._binding.host)
-
-    def test_update_dvr_port_binding_on_non_existent_port(self):
-        plugin = manager.NeutronManager.get_plugin()
-        port = {
-            'id': 'foo_port_id',
-            portbindings.HOST_ID: 'foo_host',
-        }
-        with mock.patch.object(ml2_db, 'ensure_dvr_port_binding') as mock_dvr:
-            plugin.update_dvr_port_binding(
-                self.context, 'foo_port_id', {'port': port})
-        self.assertFalse(mock_dvr.called)
-
-
-class TestMl2PortBindingNoSG(TestMl2PortBinding):
-    HAS_PORT_FILTER = False
-    ENABLE_SG = False
-    FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER
-
-
-class TestMl2PortBindingHost(Ml2PluginV2TestCase,
-                             test_bindings.PortBindingsHostTestCaseMixin):
-    pass
-
-
-class TestMl2PortBindingVnicType(Ml2PluginV2TestCase,
-                                 test_bindings.PortBindingsVnicTestCaseMixin):
-    pass
-
-
-class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
-
-    def setUp(self, plugin=None):
-        super(TestMultiSegmentNetworks, self).setUp()
-
-    def test_allocate_dynamic_segment(self):
-        data = {'network': {'name': 'net1',
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        network = self.deserialize(self.fmt,
-                                   network_req.get_response(self.api))
-        segment = {driver_api.NETWORK_TYPE: 'vlan',
-                   driver_api.PHYSICAL_NETWORK: 'physnet1'}
-        network_id = network['network']['id']
-        self.driver.type_manager.allocate_dynamic_segment(
-            self.context.session, network_id, segment)
-        dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
-                                                     network_id,
-                                                     'physnet1')
-        self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
-        self.assertEqual('physnet1',
-                         dynamic_segment[driver_api.PHYSICAL_NETWORK])
-        self.assertTrue(dynamic_segment[driver_api.SEGMENTATION_ID] > 0)
-        segment2 = {driver_api.NETWORK_TYPE: 'vlan',
-                    driver_api.SEGMENTATION_ID: 1234,
-                    driver_api.PHYSICAL_NETWORK: 'physnet3'}
-        self.driver.type_manager.allocate_dynamic_segment(
-            self.context.session, network_id, segment2)
-        dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
-                                                     network_id,
-                                                     segmentation_id='1234')
-        self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
-        self.assertEqual('physnet3',
-                         dynamic_segment[driver_api.PHYSICAL_NETWORK])
-        self.assertEqual(dynamic_segment[driver_api.SEGMENTATION_ID], 1234)
-
-    def test_allocate_dynamic_segment_multiple_physnets(self):
-        data = {'network': {'name': 'net1',
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        network = self.deserialize(self.fmt,
-                                   network_req.get_response(self.api))
-        segment = {driver_api.NETWORK_TYPE: 'vlan',
-                   driver_api.PHYSICAL_NETWORK: 'physnet1'}
-        network_id = network['network']['id']
-        self.driver.type_manager.allocate_dynamic_segment(
-            self.context.session, network_id, segment)
-        dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
-                                                     network_id,
-                                                     'physnet1')
-        self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
-        self.assertEqual('physnet1',
-                         dynamic_segment[driver_api.PHYSICAL_NETWORK])
-        dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID]
-        self.assertTrue(dynamic_segmentation_id > 0)
-        dynamic_segment1 = ml2_db.get_dynamic_segment(self.context.session,
-                                                      network_id,
-                                                      'physnet1')
-        dynamic_segment1_id = dynamic_segment1[driver_api.SEGMENTATION_ID]
-        self.assertEqual(dynamic_segmentation_id, dynamic_segment1_id)
-        segment2 = {driver_api.NETWORK_TYPE: 'vlan',
-                    driver_api.PHYSICAL_NETWORK: 'physnet2'}
-        self.driver.type_manager.allocate_dynamic_segment(
-            self.context.session, network_id, segment2)
-        dynamic_segment2 = ml2_db.get_dynamic_segment(self.context.session,
-                                                      network_id,
-                                                      'physnet2')
-        dynamic_segmentation2_id = dynamic_segment2[driver_api.SEGMENTATION_ID]
-        self.assertNotEqual(dynamic_segmentation_id, dynamic_segmentation2_id)
-
-    def test_allocate_release_dynamic_segment(self):
-        data = {'network': {'name': 'net1',
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        network = self.deserialize(self.fmt,
-                                   network_req.get_response(self.api))
-        segment = {driver_api.NETWORK_TYPE: 'vlan',
-                   driver_api.PHYSICAL_NETWORK: 'physnet1'}
-        network_id = network['network']['id']
-        self.driver.type_manager.allocate_dynamic_segment(
-            self.context.session, network_id, segment)
-        dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
-                                                     network_id,
-                                                     'physnet1')
-        self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
-        self.assertEqual('physnet1',
-                         dynamic_segment[driver_api.PHYSICAL_NETWORK])
-        dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID]
-        self.assertTrue(dynamic_segmentation_id > 0)
-        self.driver.type_manager.release_dynamic_segment(
-            self.context.session, dynamic_segment[driver_api.ID])
-        self.assertIsNone(ml2_db.get_dynamic_segment(
-            self.context.session, network_id, 'physnet1'))
-
-    def test_create_network_provider(self):
-        data = {'network': {'name': 'net1',
-                            pnet.NETWORK_TYPE: 'vlan',
-                            pnet.PHYSICAL_NETWORK: 'physnet1',
-                            pnet.SEGMENTATION_ID: 1,
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        network = self.deserialize(self.fmt,
-                                   network_req.get_response(self.api))
-        self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
-        self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
-        self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
-        self.assertNotIn(mpnet.SEGMENTS, network['network'])
-
-    def test_create_network_single_multiprovider(self):
-        data = {'network': {'name': 'net1',
-                            mpnet.SEGMENTS:
-                            [{pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1',
-                              pnet.SEGMENTATION_ID: 1}],
-                            'tenant_id': 'tenant_one'}}
-        net_req = self.new_create_request('networks', data)
-        network = self.deserialize(self.fmt, net_req.get_response(self.api))
-        self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
-        self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
-        self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
-        self.assertNotIn(mpnet.SEGMENTS, network['network'])
-
-        # Tests get_network()
-        net_req = self.new_show_request('networks', network['network']['id'])
-        network = self.deserialize(self.fmt, net_req.get_response(self.api))
-        self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
-        self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
-        self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
-        self.assertNotIn(mpnet.SEGMENTS, network['network'])
-
-    def test_create_network_multiprovider(self):
-        data = {'network': {'name': 'net1',
-                            mpnet.SEGMENTS:
-                            [{pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1',
-                              pnet.SEGMENTATION_ID: 1},
-                             {pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1',
-                              pnet.SEGMENTATION_ID: 2}],
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        network = self.deserialize(self.fmt,
-                                   network_req.get_response(self.api))
-        segments = network['network'][mpnet.SEGMENTS]
-        for segment_index, segment in enumerate(data['network']
-                                                [mpnet.SEGMENTS]):
-            for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
-                          pnet.SEGMENTATION_ID]:
-                self.assertEqual(segment.get(field),
-                            segments[segment_index][field])
-
-        # Tests get_network()
-        net_req = self.new_show_request('networks', network['network']['id'])
-        network = self.deserialize(self.fmt, net_req.get_response(self.api))
-        segments = network['network'][mpnet.SEGMENTS]
-        for segment_index, segment in enumerate(data['network']
-                                                [mpnet.SEGMENTS]):
-            for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
-                          pnet.SEGMENTATION_ID]:
-                self.assertEqual(segment.get(field),
-                            segments[segment_index][field])
-
-    def test_create_network_with_provider_and_multiprovider_fail(self):
-        data = {'network': {'name': 'net1',
-                            mpnet.SEGMENTS:
-                            [{pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1',
-                              pnet.SEGMENTATION_ID: 1}],
-                            pnet.NETWORK_TYPE: 'vlan',
-                            pnet.PHYSICAL_NETWORK: 'physnet1',
-                            pnet.SEGMENTATION_ID: 1,
-                            'tenant_id': 'tenant_one'}}
-
-        network_req = self.new_create_request('networks', data)
-        res = network_req.get_response(self.api)
-        self.assertEqual(400, res.status_int)
-
-    def test_create_network_duplicate_full_segments(self):
-        data = {'network': {'name': 'net1',
-                            mpnet.SEGMENTS:
-                            [{pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1',
-                              pnet.SEGMENTATION_ID: 1},
-                             {pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1',
-                              pnet.SEGMENTATION_ID: 1}],
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        res = network_req.get_response(self.api)
-        self.assertEqual(400, res.status_int)
-
-    def test_create_network_duplicate_partial_segments(self):
-        data = {'network': {'name': 'net1',
-                            mpnet.SEGMENTS:
-                            [{pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1'},
-                             {pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1'}],
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        res = network_req.get_response(self.api)
-        self.assertEqual(201, res.status_int)
-
-    def test_release_network_segments(self):
-        data = {'network': {'name': 'net1',
-                            'admin_state_up': True,
-                            'shared': False,
-                            pnet.NETWORK_TYPE: 'vlan',
-                            pnet.PHYSICAL_NETWORK: 'physnet1',
-                            pnet.SEGMENTATION_ID: 1,
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        res = network_req.get_response(self.api)
-        network = self.deserialize(self.fmt, res)
-        network_id = network['network']['id']
-        segment = {driver_api.NETWORK_TYPE: 'vlan',
-                   driver_api.PHYSICAL_NETWORK: 'physnet2'}
-        self.driver.type_manager.allocate_dynamic_segment(
-            self.context.session, network_id, segment)
-        dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
-                                                     network_id,
-                                                     'physnet2')
-        self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
-        self.assertEqual('physnet2',
-                         dynamic_segment[driver_api.PHYSICAL_NETWORK])
-        self.assertTrue(dynamic_segment[driver_api.SEGMENTATION_ID] > 0)
-
-        with mock.patch.object(type_vlan.VlanTypeDriver,
-                               'release_segment') as rs:
-            req = self.new_delete_request('networks', network_id)
-            res = req.get_response(self.api)
-            self.assertEqual(2, rs.call_count)
-        self.assertEqual([], ml2_db.get_network_segments(
-            self.context.session, network_id))
-        self.assertIsNone(ml2_db.get_dynamic_segment(
-            self.context.session, network_id, 'physnet2'))
-
-    def test_release_segment_no_type_driver(self):
-        data = {'network': {'name': 'net1',
-                            'admin_state_up': True,
-                            'shared': False,
-                            pnet.NETWORK_TYPE: 'vlan',
-                            pnet.PHYSICAL_NETWORK: 'physnet1',
-                            pnet.SEGMENTATION_ID: 1,
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        res = network_req.get_response(self.api)
-        network = self.deserialize(self.fmt, res)
-        network_id = network['network']['id']
-
-        segment = {driver_api.NETWORK_TYPE: 'faketype',
-                   driver_api.PHYSICAL_NETWORK: 'physnet1',
-                   driver_api.ID: 1}
-        with mock.patch('neutron.plugins.ml2.managers.LOG') as log:
-            with mock.patch('neutron.plugins.ml2.managers.db') as db:
-                db.get_network_segments.return_value = (segment,)
-                self.driver.type_manager.release_network_segments(
-                    self.context.session, network_id)
-
-                log.error.assert_called_once_with(
-                    "Failed to release segment '%s' because "
-                    "network type is not supported.", segment)
-
-    def test_create_provider_fail(self):
-        segment = {pnet.NETWORK_TYPE: None,
-                   pnet.PHYSICAL_NETWORK: 'phys_net',
-                   pnet.SEGMENTATION_ID: None}
-        with testtools.ExpectedException(exc.InvalidInput):
-            self.driver.type_manager._process_provider_create(segment)
-
-    def test_create_network_plugin(self):
-        data = {'network': {'name': 'net1',
-                            'admin_state_up': True,
-                            'shared': False,
-                            pnet.NETWORK_TYPE: 'vlan',
-                            pnet.PHYSICAL_NETWORK: 'physnet1',
-                            pnet.SEGMENTATION_ID: 1,
-                            'tenant_id': 'tenant_one'}}
-
-        def raise_mechanism_exc(*args, **kwargs):
-            raise ml2_exc.MechanismDriverError(
-                method='create_network_postcommit')
-
-        with mock.patch('neutron.plugins.ml2.managers.MechanismManager.'
-                        'create_network_precommit', new=raise_mechanism_exc):
-            with testtools.ExpectedException(ml2_exc.MechanismDriverError):
-                self.driver.create_network(self.context, data)
-
-    def test_extend_dictionary_no_segments(self):
-        network = dict(name='net_no_segment', id='5', tenant_id='tenant_one')
-        self.driver.type_manager.extend_network_dict_provider(self.context,
-                                                              network)
-        self.assertIsNone(network[pnet.NETWORK_TYPE])
-        self.assertIsNone(network[pnet.PHYSICAL_NETWORK])
-        self.assertIsNone(network[pnet.SEGMENTATION_ID])
-
-
-class TestMl2AllowedAddressPairs(Ml2PluginV2TestCase,
-                                 test_pair.TestAllowedAddressPairs):
-    _extension_drivers = ['port_security']
-
-    def setUp(self, plugin=None):
-        config.cfg.CONF.set_override('extension_drivers',
-                                     self._extension_drivers,
-                                     group='ml2')
-        super(test_pair.TestAllowedAddressPairs, self).setUp(
-            plugin=PLUGIN_NAME)
-
-
-class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt):
-
-    def setUp(self, plugin=None):
-        super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp(
-            plugin=PLUGIN_NAME)
-
-
-class Ml2PluginV2FaultyDriverTestCase(test_plugin.NeutronDbPluginV2TestCase):
-
-    def setUp(self):
-        # Enable the test mechanism driver to ensure that
-        # we can successfully call through to all mechanism
-        # driver apis.
-        config.cfg.CONF.set_override('mechanism_drivers',
-                                     ['test', 'logger'],
-                                     group='ml2')
-        super(Ml2PluginV2FaultyDriverTestCase, self).setUp(PLUGIN_NAME)
-        self.port_create_status = 'DOWN'
-
-
-class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
-
-    def test_create_network_faulty(self):
-
-        with mock.patch.object(mech_test.TestMechanismDriver,
-                               'create_network_postcommit',
-                               side_effect=ml2_exc.MechanismDriverError):
-            tenant_id = str(uuid.uuid4())
-            data = {'network': {'name': 'net1',
-                                'tenant_id': tenant_id}}
-            req = self.new_create_request('networks', data)
-            res = req.get_response(self.api)
-            self.assertEqual(500, res.status_int)
-            error = self.deserialize(self.fmt, res)
-            self.assertEqual('MechanismDriverError',
-                             error['NeutronError']['type'])
-            query_params = "tenant_id=%s" % tenant_id
-            nets = self._list('networks', query_params=query_params)
-            self.assertFalse(nets['networks'])
-
-    def test_delete_network_faulty(self):
-
-        with mock.patch.object(mech_test.TestMechanismDriver,
-                               'delete_network_postcommit',
-                               side_effect=ml2_exc.MechanismDriverError):
-            with mock.patch.object(mech_logger.LoggerMechanismDriver,
-                                   'delete_network_postcommit') as dnp:
-
-                data = {'network': {'name': 'net1',
-                                    'tenant_id': 'tenant_one'}}
-                network_req = self.new_create_request('networks', data)
-                network_res = network_req.get_response(self.api)
-                self.assertEqual(201, network_res.status_int)
-                network = self.deserialize(self.fmt, network_res)
-                net_id = network['network']['id']
-                req = self.new_delete_request('networks', net_id)
-                res = req.get_response(self.api)
-                self.assertEqual(204, res.status_int)
-                # Test if other mechanism driver was called
-                self.assertTrue(dnp.called)
-                self._show('networks', net_id,
-                           expected_code=webob.exc.HTTPNotFound.code)
-
-    def test_update_network_faulty(self):
-
-        with mock.patch.object(mech_test.TestMechanismDriver,
-                               'update_network_postcommit',
-                               side_effect=ml2_exc.MechanismDriverError):
-            with mock.patch.object(mech_logger.LoggerMechanismDriver,
-                                   'update_network_postcommit') as unp:
-
-                data = {'network': {'name': 'net1',
-                                    'tenant_id': 'tenant_one'}}
-                network_req = self.new_create_request('networks', data)
-                network_res = network_req.get_response(self.api)
-                self.assertEqual(201, network_res.status_int)
-                network = self.deserialize(self.fmt, network_res)
-                net_id = network['network']['id']
-
-                new_name = 'a_brand_new_name'
-                data = {'network': {'name': new_name}}
-                req = self.new_update_request('networks', data, net_id)
-                res = req.get_response(self.api)
-                self.assertEqual(500, res.status_int)
-                error = self.deserialize(self.fmt, res)
-                self.assertEqual('MechanismDriverError',
-                                 error['NeutronError']['type'])
-                # Test if other mechanism driver was called
-                self.assertTrue(unp.called)
-                net = self._show('networks', net_id)
-                self.assertEqual(new_name, net['network']['name'])
-
-                self._delete('networks', net_id)
-
-    def test_create_subnet_faulty(self):
-
-        with mock.patch.object(mech_test.TestMechanismDriver,
-                               'create_subnet_postcommit',
-                               side_effect=ml2_exc.MechanismDriverError):
-
-            with self.network() as network:
-                net_id = network['network']['id']
-                data = {'subnet': {'network_id': net_id,
-                                   'cidr': '10.0.20.0/24',
-                                   'ip_version': '4',
-                                   'name': 'subnet1',
-                                   'tenant_id':
-                                   network['network']['tenant_id'],
-                                   'gateway_ip': '10.0.20.1'}}
-                req = self.new_create_request('subnets', data)
-                res = req.get_response(self.api)
-                self.assertEqual(500, res.status_int)
-                error = self.deserialize(self.fmt, res)
-                self.assertEqual('MechanismDriverError',
-                                 error['NeutronError']['type'])
-                query_params = "network_id=%s" % net_id
-                subnets = self._list('subnets', query_params=query_params)
-                self.assertFalse(subnets['subnets'])
-
-    def test_delete_subnet_faulty(self):
-
-        with mock.patch.object(mech_test.TestMechanismDriver,
-                               'delete_subnet_postcommit',
-                               side_effect=ml2_exc.MechanismDriverError):
-            with mock.patch.object(mech_logger.LoggerMechanismDriver,
-                                   'delete_subnet_postcommit') as dsp:
-
-                with self.network() as network:
-                    data = {'subnet': {'network_id':
-                                       network['network']['id'],
-                                       'cidr': '10.0.20.0/24',
-                                       'ip_version': '4',
-                                       'name': 'subnet1',
-                                       'tenant_id':
-                                       network['network']['tenant_id'],
-                                       'gateway_ip': '10.0.20.1'}}
-                    subnet_req = self.new_create_request('subnets', data)
-                    subnet_res = subnet_req.get_response(self.api)
-                    self.assertEqual(201, subnet_res.status_int)
-                    subnet = self.deserialize(self.fmt, subnet_res)
-                    subnet_id = subnet['subnet']['id']
-
-                    req = self.new_delete_request('subnets', subnet_id)
-                    res = req.get_response(self.api)
-                    self.assertEqual(204, res.status_int)
-                    # Test if other mechanism driver was called
-                    self.assertTrue(dsp.called)
-                    self._show('subnets', subnet_id,
-                               expected_code=webob.exc.HTTPNotFound.code)
-
-    def test_update_subnet_faulty(self):
-
-        with mock.patch.object(mech_test.TestMechanismDriver,
-                               'update_subnet_postcommit',
-                               side_effect=ml2_exc.MechanismDriverError):
-            with mock.patch.object(mech_logger.LoggerMechanismDriver,
-                                   'update_subnet_postcommit') as usp:
-
-                with self.network() as network:
-                    data = {'subnet': {'network_id':
-                                       network['network']['id'],
-                                       'cidr': '10.0.20.0/24',
-                                       'ip_version': '4',
-                                       'name': 'subnet1',
-                                       'tenant_id':
-                                       network['network']['tenant_id'],
-                                       'gateway_ip': '10.0.20.1'}}
-                    subnet_req = self.new_create_request('subnets', data)
-                    subnet_res = subnet_req.get_response(self.api)
-                    self.assertEqual(201, subnet_res.status_int)
-                    subnet = self.deserialize(self.fmt, subnet_res)
-                    subnet_id = subnet['subnet']['id']
-                    new_name = 'a_brand_new_name'
-                    data = {'subnet': {'name': new_name}}
-                    req = self.new_update_request('subnets', data, subnet_id)
-                    res = req.get_response(self.api)
-                    self.assertEqual(500, res.status_int)
-                    error = self.deserialize(self.fmt, res)
-                    self.assertEqual('MechanismDriverError',
-                                     error['NeutronError']['type'])
-                    # Test if other mechanism driver was called
-                    self.assertTrue(usp.called)
-                    subnet = self._show('subnets', subnet_id)
-                    self.assertEqual(new_name, subnet['subnet']['name'])
-
-                    self._delete('subnets', subnet['subnet']['id'])
-
-    def test_create_port_faulty(self):
-
-        with mock.patch.object(mech_test.TestMechanismDriver,
-                               'create_port_postcommit',
-                               side_effect=ml2_exc.MechanismDriverError):
-
-            with self.network() as network:
-                net_id = network['network']['id']
-                data = {'port': {'network_id': net_id,
-                                 'tenant_id':
-                                 network['network']['tenant_id'],
-                                 'name': 'port1',
-                                 'admin_state_up': 1,
-                                 'fixed_ips': []}}
-                req = self.new_create_request('ports', data)
-                res = req.get_response(self.api)
-                self.assertEqual(500, res.status_int)
-                error = self.deserialize(self.fmt, res)
-                self.assertEqual('MechanismDriverError',
-                                 error['NeutronError']['type'])
-                query_params = "network_id=%s" % net_id
-                ports = self._list('ports', query_params=query_params)
-                self.assertFalse(ports['ports'])
-
-    def test_update_port_faulty(self):
-
-        with mock.patch.object(mech_test.TestMechanismDriver,
-                               'update_port_postcommit',
-                               side_effect=ml2_exc.MechanismDriverError):
-            with mock.patch.object(mech_logger.LoggerMechanismDriver,
-                                   'update_port_postcommit') as upp:
-
-                with self.network() as network:
-                    data = {'port': {'network_id': network['network']['id'],
-                                     'tenant_id':
-                                     network['network']['tenant_id'],
-                                     'name': 'port1',
-                                     'admin_state_up': 1,
-                                     'fixed_ips': []}}
-                    port_req = self.new_create_request('ports', data)
-                    port_res = port_req.get_response(self.api)
-                    self.assertEqual(201, port_res.status_int)
-                    port = self.deserialize(self.fmt, port_res)
-                    port_id = port['port']['id']
-
-                    new_name = 'a_brand_new_name'
-                    data = {'port': {'name': new_name}}
-                    req = self.new_update_request('ports', data, port_id)
-                    res = req.get_response(self.api)
-                    self.assertEqual(200, res.status_int)
-                    # Test if other mechanism driver was called
-                    self.assertTrue(upp.called)
-                    port = self._show('ports', port_id)
-                    self.assertEqual(new_name, port['port']['name'])
-
-                    self._delete('ports', port['port']['id'])
-
-    def test_update_dvr_router_interface_port(self):
-        """Test validate dvr router interface update succeeds."""
-        host_id = 'host'
-        binding = models.DVRPortBinding(
-                            port_id='port_id',
-                            host=host_id,
-                            router_id='old_router_id',
-                            vif_type=portbindings.VIF_TYPE_OVS,
-                            vnic_type=portbindings.VNIC_NORMAL,
-                            status=constants.PORT_STATUS_DOWN)
-        with mock.patch.object(
-            mech_test.TestMechanismDriver,
-            'update_port_postcommit',
-            side_effect=ml2_exc.MechanismDriverError) as port_post,\
-                mock.patch.object(
-                    mech_test.TestMechanismDriver,
-                    'update_port_precommit') as port_pre,\
-                mock.patch.object(ml2_db,
-                                  'get_dvr_port_bindings') as dvr_bindings:
-                dvr_bindings.return_value = [binding]
-                port_pre.return_value = True
-                with self.network() as network:
-                    with self.subnet(network=network) as subnet:
-                        subnet_id = subnet['subnet']['id']
-                        data = {'port': {
-                            'network_id': network['network']['id'],
-                            'tenant_id':
-                            network['network']['tenant_id'],
-                            'name': 'port1',
-                            'device_owner':
-                            constants.DEVICE_OWNER_DVR_INTERFACE,
-                            'admin_state_up': 1,
-                            'fixed_ips':
-                            [{'subnet_id': subnet_id}]}}
-                        port_req = self.new_create_request('ports', data)
-                        port_res = port_req.get_response(self.api)
-                        self.assertEqual(201, port_res.status_int)
-                        port = self.deserialize(self.fmt, port_res)
-                        port_id = port['port']['id']
-                        new_name = 'a_brand_new_name'
-                        data = {'port': {'name': new_name}}
-                        req = self.new_update_request('ports', data, port_id)
-                        res = req.get_response(self.api)
-                        self.assertEqual(200, res.status_int)
-                        self.assertTrue(dvr_bindings.called)
-                        self.assertTrue(port_pre.called)
-                        self.assertTrue(port_post.called)
-                        port = self._show('ports', port_id)
-                        self.assertEqual(new_name, port['port']['name'])
-
-
-class TestML2PluggableIPAM(test_ipam.UseIpamMixin, TestMl2SubnetsV2):
-    def test_create_subnet_delete_subnet_call_ipam_driver(self):
-        driver = 'neutron.ipam.drivers.neutrondb_ipam.driver.NeutronDbPool'
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        with mock.patch(driver) as driver_mock:
-            request = mock.Mock()
-            request.subnet_id = uuidutils.generate_uuid()
-            request.subnet_cidr = cidr
-            request.allocation_pools = []
-            request.gateway_ip = gateway_ip
-            request.tenant_id = uuidutils.generate_uuid()
-
-            ipam_subnet = mock.Mock()
-            ipam_subnet.get_details.return_value = request
-            driver_mock().allocate_subnet.return_value = ipam_subnet
-
-            self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr)
-
-            driver_mock().allocate_subnet.assert_called_with(mock.ANY)
-            driver_mock().remove_subnet.assert_called_with(request.subnet_id)
-
-
-class TestMl2PluginCreateUpdateDeletePort(base.BaseTestCase):
-    def setUp(self):
-        super(TestMl2PluginCreateUpdateDeletePort, self).setUp()
-        self.context = mock.MagicMock()
-        self.notify_p = mock.patch('neutron.callbacks.registry.notify')
-        self.notify = self.notify_p.start()
-
-    def _ensure_transaction_is_closed(self):
-        transaction = self.context.session.begin(subtransactions=True)
-        enter = transaction.__enter__.call_count
-        exit = transaction.__exit__.call_count
-        self.assertEqual(enter, exit)
-
-    def _create_plugin_for_create_update_port(self):
-        plugin = ml2_plugin.Ml2Plugin()
-        plugin.extension_manager = mock.Mock()
-        plugin.type_manager = mock.Mock()
-        plugin.mechanism_manager = mock.Mock()
-        plugin.notifier = mock.Mock()
-        plugin._check_mac_update_allowed = mock.Mock(return_value=True)
-        plugin._extend_availability_zone = mock.Mock()
-
-        self.notify.side_effect = (
-            lambda r, e, t, **kwargs: self._ensure_transaction_is_closed())
-
-        return plugin
-
-    def test_create_port_rpc_outside_transaction(self):
-        with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__') as init,\
-                mock.patch.object(base_plugin.NeutronDbPluginV2,
-                                  'create_port') as db_create_port:
-            init.return_value = None
-
-            new_port = mock.MagicMock()
-            db_create_port.return_value = new_port
-            plugin = self._create_plugin_for_create_update_port()
-
-            plugin.create_port(self.context, mock.MagicMock())
-
-            kwargs = {'context': self.context, 'port': new_port}
-            self.notify.assert_called_once_with('port', 'after_create',
-                plugin, **kwargs)
-
-    def test_update_port_rpc_outside_transaction(self):
-        port_id = 'fake_id'
-        net_id = 'mynet'
-        original_port_db = models_v2.Port(
-            id=port_id,
-            tenant_id='tenant',
-            network_id=net_id,
-            mac_address='08:00:01:02:03:04',
-            admin_state_up=True,
-            status='ACTIVE',
-            device_id='vm_id',
-            device_owner=DEVICE_OWNER_COMPUTE)
-
-        binding = mock.Mock()
-        binding.port_id = port_id
-        binding.host = 'vm_host'
-        binding.vnic_type = portbindings.VNIC_NORMAL
-        binding.profile = ''
-        binding.vif_type = ''
-        binding.vif_details = ''
-
-        with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__') as init,\
-                mock.patch.object(ml2_db, 'get_locked_port_and_binding',
-                                  return_value=(original_port_db, binding)),\
-                mock.patch.object(base_plugin.NeutronDbPluginV2,
-                                  'update_port') as db_update_port:
-            init.return_value = None
-            updated_port = mock.MagicMock()
-            db_update_port.return_value = updated_port
-            plugin = self._create_plugin_for_create_update_port()
-            original_port = plugin._make_port_dict(original_port_db)
-
-            plugin.update_port(self.context, port_id, mock.MagicMock())
-
-            kwargs = {
-                'context': self.context,
-                'port': updated_port,
-                'mac_address_updated': True,
-                'original_port': original_port,
-            }
-            self.notify.assert_called_once_with('port', 'after_update',
-                plugin, **kwargs)
-
-    def test_notify_outside_of_delete_transaction(self):
-        self.notify.side_effect = (
-            lambda r, e, t, **kwargs: self._ensure_transaction_is_closed())
-        l3plugin = mock.Mock()
-        l3plugin.supported_extension_aliases = [
-            'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
-            constants.L3_DISTRIBUTED_EXT_ALIAS
-        ]
-        with mock.patch.object(ml2_plugin.Ml2Plugin,
-                               '__init__',
-                               return_value=None),\
-                mock.patch.object(manager.NeutronManager,
-                                  'get_service_plugins',
-                                  return_value={'L3_ROUTER_NAT': l3plugin}):
-            plugin = self._create_plugin_for_create_update_port()
-            # Set backend manually here since __init__ was mocked
-            plugin.set_ipam_backend()
-            # deleting the port will call registry.notify, which will
-            # run the transaction balancing function defined in this test
-            plugin.delete_port(self.context, 'fake_id')
-            self.assertTrue(self.notify.call_count)
diff --git a/neutron/tests/unit/plugins/ml2/test_port_binding.py b/neutron/tests/unit/plugins/ml2/test_port_binding.py
deleted file mode 100644 (file)
index 3f73218..0000000
+++ /dev/null
@@ -1,299 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.common import constants as const
-from neutron import context
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.plugins.ml2 import config as config
-from neutron.plugins.ml2 import models as ml2_models
-from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
-
-
-PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-
-
-class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
-
-    _plugin_name = PLUGIN_NAME
-
-    def setUp(self):
-        # Enable the test mechanism driver to ensure that
-        # we can successfully call through to all mechanism
-        # driver apis.
-        config.cfg.CONF.set_override('mechanism_drivers',
-                                     ['logger', 'test'],
-                                     'ml2')
-        config.cfg.CONF.set_override('network_vlan_ranges',
-                                     ['physnet1:1000:1099'],
-                                     group='ml2_type_vlan')
-        super(PortBindingTestCase, self).setUp(PLUGIN_NAME)
-        self.port_create_status = 'DOWN'
-        self.plugin = manager.NeutronManager.get_plugin()
-        self.plugin.start_rpc_listeners()
-
-    def _check_response(self, port, vif_type, has_port_filter, bound, status):
-        self.assertEqual(port[portbindings.VIF_TYPE], vif_type)
-        vif_details = port[portbindings.VIF_DETAILS]
-        port_status = port['status']
-        if bound:
-            # TODO(rkukura): Replace with new VIF security details
-            self.assertEqual(vif_details[portbindings.CAP_PORT_FILTER],
-                             has_port_filter)
-            self.assertEqual(port_status, status or 'DOWN')
-        else:
-            self.assertEqual(port_status, 'DOWN')
-
-    def _test_port_binding(self, host, vif_type, has_port_filter, bound,
-                           status=None, network_type='local'):
-        mac_address = 'aa:aa:aa:aa:aa:aa'
-        host_arg = {portbindings.HOST_ID: host,
-                    'mac_address': mac_address}
-        with self.port(name='name', arg_list=(portbindings.HOST_ID,),
-                       **host_arg) as port:
-            self._check_response(port['port'], vif_type, has_port_filter,
-                                 bound, status)
-            port_id = port['port']['id']
-            neutron_context = context.get_admin_context()
-            details = self.plugin.endpoints[0].get_device_details(
-                neutron_context, agent_id="theAgentId", device=port_id)
-            if bound:
-                self.assertEqual(details['network_type'], network_type)
-                self.assertEqual(mac_address, details['mac_address'])
-            else:
-                self.assertNotIn('network_type', details)
-                self.assertNotIn('mac_address', details)
-
-    def test_unbound(self):
-        self._test_port_binding("",
-                                portbindings.VIF_TYPE_UNBOUND,
-                                False, False)
-
-    def test_binding_failed(self):
-        self._test_port_binding("host-fail",
-                                portbindings.VIF_TYPE_BINDING_FAILED,
-                                False, False)
-
-    def test_binding_no_filter(self):
-        self._test_port_binding("host-ovs-no_filter",
-                                portbindings.VIF_TYPE_OVS,
-                                False, True)
-
-    def test_binding_filter(self):
-        self._test_port_binding("host-bridge-filter",
-                                portbindings.VIF_TYPE_BRIDGE,
-                                True, True)
-
-    def test_binding_status_active(self):
-        self._test_port_binding("host-ovs-filter-active",
-                                portbindings.VIF_TYPE_OVS,
-                                True, True, 'ACTIVE')
-
-    def test_update_port_binding_no_binding(self):
-        ctx = context.get_admin_context()
-        with self.port(name='name') as port:
-            # emulating concurrent binding deletion
-            (ctx.session.query(ml2_models.PortBinding).
-             filter_by(port_id=port['port']['id']).delete())
-            self.assertIsNone(
-                self.plugin.get_bound_port_context(ctx, port['port']['id']))
-
-    def test_hierarchical_binding(self):
-        self._test_port_binding("host-hierarchical",
-                                portbindings.VIF_TYPE_OVS,
-                                False, True, network_type='vlan')
-
-    def test_get_bound_port_context_cache_hit(self):
-        ctx = context.get_admin_context()
-        with self.port(name='name') as port:
-            cached_network_id = port['port']['network_id']
-            some_network = {'id': cached_network_id}
-            cached_networks = {cached_network_id: some_network}
-            self.plugin.get_network = mock.Mock(return_value=some_network)
-            self.plugin.get_bound_port_context(ctx, port['port']['id'],
-                                               cached_networks=cached_networks)
-            self.assertFalse(self.plugin.get_network.called)
-
-    def test_get_bound_port_context_cache_miss(self):
-        ctx = context.get_admin_context()
-        with self.port(name='name') as port:
-            some_network = {'id': u'2ac23560-7638-44e2-9875-c1888b02af72'}
-            self.plugin.get_network = mock.Mock(return_value=some_network)
-            self.plugin.get_bound_port_context(ctx, port['port']['id'],
-                                               cached_networks={})
-            self.assertEqual(1, self.plugin.get_network.call_count)
-
-    def _test_update_port_binding(self, host, new_host=None):
-        with mock.patch.object(self.plugin,
-                               '_notify_port_updated') as notify_mock:
-            host_arg = {portbindings.HOST_ID: host}
-            update_body = {'name': 'test_update'}
-            if new_host is not None:
-                update_body[portbindings.HOST_ID] = new_host
-            with self.port(name='name', arg_list=(portbindings.HOST_ID,),
-                           **host_arg) as port:
-                neutron_context = context.get_admin_context()
-                updated_port = self._update('ports', port['port']['id'],
-                                            {'port': update_body},
-                                            neutron_context=neutron_context)
-                port_data = updated_port['port']
-                if new_host is not None:
-                    self.assertEqual(port_data[portbindings.HOST_ID],
-                                     new_host)
-                else:
-                    self.assertEqual(port_data[portbindings.HOST_ID], host)
-                if new_host is not None and new_host != host:
-                    notify_mock.assert_called_once_with(mock.ANY)
-                else:
-                    self.assertFalse(notify_mock.called)
-
-    def test_update_with_new_host_binding_notifies_agent(self):
-        self._test_update_port_binding('host-ovs-no_filter',
-                                       'host-bridge-filter')
-
-    def test_update_with_same_host_binding_does_not_notify(self):
-        self._test_update_port_binding('host-ovs-no_filter',
-                                       'host-ovs-no_filter')
-
-    def test_update_without_binding_does_not_notify(self):
-        self._test_update_port_binding('host-ovs-no_filter')
-
-    def testt_update_from_empty_to_host_binding_notifies_agent(self):
-        self._test_update_port_binding('', 'host-ovs-no_filter')
-
-    def test_update_from_host_to_empty_binding_notifies_agent(self):
-        self._test_update_port_binding('host-ovs-no_filter', '')
-
-    def test_dvr_binding(self):
-        ctx = context.get_admin_context()
-        with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port:
-            port_id = port['port']['id']
-
-            # Verify port's VIF type and status.
-            self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
-                             port['port'][portbindings.VIF_TYPE])
-            self.assertEqual('DOWN', port['port']['status'])
-
-            # Update port to bind for a host.
-            self.plugin.update_dvr_port_binding(ctx, port_id, {'port': {
-                portbindings.HOST_ID: 'host-ovs-no_filter',
-                'device_id': 'router1'}})
-
-            # Get port and verify VIF type and status unchanged.
-            port = self._show('ports', port_id)
-            self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
-                             port['port'][portbindings.VIF_TYPE])
-            self.assertEqual('DOWN', port['port']['status'])
-
-            # Get and verify binding details for host
-            details = self.plugin.endpoints[0].get_device_details(
-                ctx, agent_id="theAgentId", device=port_id,
-                host='host-ovs-no_filter')
-            self.assertEqual('local', details['network_type'])
-
-            # Get port and verify VIF type and changed status.
-            port = self._show('ports', port_id)
-            self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
-                             port['port'][portbindings.VIF_TYPE])
-            self.assertEqual('BUILD', port['port']['status'])
-
-            # Mark device up.
-            self.plugin.endpoints[0].update_device_up(
-                ctx, agent_id="theAgentId", device=port_id,
-                host='host-ovs-no_filter')
-
-            # Get port and verify VIF type and changed status.
-            port = self._show('ports', port_id)
-            self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
-                             port['port'][portbindings.VIF_TYPE])
-            self.assertEqual('ACTIVE', port['port']['status'])
-
-            # Mark device down.
-            self.plugin.endpoints[0].update_device_down(
-                ctx, agent_id="theAgentId", device=port_id,
-                host='host-ovs-no_filter')
-
-            # Get port and verify VIF type and changed status.
-            port = self._show('ports', port_id)
-            self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
-                             port['port'][portbindings.VIF_TYPE])
-            self.assertEqual('DOWN', port['port']['status'])
-
-    def test_dvr_binding_multi_host_status(self):
-        ctx = context.get_admin_context()
-        with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port:
-            port_id = port['port']['id']
-
-            # Update port to bind for 1st host.
-            self.plugin.update_dvr_port_binding(ctx, port_id, {'port': {
-                portbindings.HOST_ID: 'host-ovs-no_filter',
-                'device_id': 'router1'}})
-
-            # Mark 1st device up.
-            self.plugin.endpoints[0].update_device_up(
-                ctx, agent_id="theAgentId", device=port_id,
-                host='host-ovs-no_filter')
-
-            # Get port and verify status is ACTIVE.
-            port = self._show('ports', port_id)
-            self.assertEqual('ACTIVE', port['port']['status'])
-
-            # Update port to bind for a 2nd host.
-            self.plugin.update_dvr_port_binding(ctx, port_id, {'port': {
-                portbindings.HOST_ID: 'host-bridge-filter',
-                'device_id': 'router1'}})
-
-            # Mark 2nd device up.
-            self.plugin.endpoints[0].update_device_up(
-                ctx, agent_id="the2ndAgentId", device=port_id,
-                host='host-bridge-filter')
-
-            # Get port and verify status unchanged.
-            port = self._show('ports', port_id)
-            self.assertEqual('ACTIVE', port['port']['status'])
-
-            # Mark 1st device down.
-            self.plugin.endpoints[0].update_device_down(
-                ctx, agent_id="theAgentId", device=port_id,
-                host='host-ovs-no_filter')
-
-            # Get port and verify status unchanged.
-            port = self._show('ports', port_id)
-            self.assertEqual('ACTIVE', port['port']['status'])
-
-            # Mark 2nd device down.
-            self.plugin.endpoints[0].update_device_down(
-                ctx, agent_id="the2ndAgentId", device=port_id,
-                host='host-bridge-filter')
-
-            # Get port and verify status is DOWN.
-            port = self._show('ports', port_id)
-            self.assertEqual('DOWN', port['port']['status'])
-
-    def test_dvr_binding_update_unbound_host(self):
-        ctx = context.get_admin_context()
-        with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port:
-            port_id = port['port']['id']
-
-            # Mark device up without first binding on host.
-            self.plugin.endpoints[0].update_device_up(
-                ctx, agent_id="theAgentId", device=port_id,
-                host='host-ovs-no_filter')
-
-            # Get port and verify status is still DOWN.
-            port = self._show('ports', port_id)
-            self.assertEqual('DOWN', port['port']['status'])
diff --git a/neutron/tests/unit/plugins/ml2/test_rpc.py b/neutron/tests/unit/plugins/ml2/test_rpc.py
deleted file mode 100644 (file)
index 7a54b39..0000000
+++ /dev/null
@@ -1,500 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Unit Tests for ml2 rpc
-"""
-
-import collections
-
-import mock
-from oslo_config import cfg
-from oslo_context import context as oslo_context
-import oslo_messaging
-from sqlalchemy.orm import exc
-
-from neutron.agent import rpc as agent_rpc
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.common import topics
-from neutron.plugins.ml2.drivers import type_tunnel
-from neutron.plugins.ml2 import managers
-from neutron.plugins.ml2 import rpc as plugin_rpc
-from neutron.services.qos import qos_consts
-from neutron.tests import base
-
-
-cfg.CONF.import_group('ml2', 'neutron.plugins.ml2.config')
-
-
-class RpcCallbacksTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(RpcCallbacksTestCase, self).setUp()
-        self.type_manager = managers.TypeManager()
-        self.notifier = plugin_rpc.AgentNotifierApi(topics.AGENT)
-        self.callbacks = plugin_rpc.RpcCallbacks(self.notifier,
-                                                 self.type_manager)
-        self.manager = mock.patch.object(
-            plugin_rpc.manager, 'NeutronManager').start()
-        self.plugin = self.manager.get_plugin()
-
-    def _test_update_device_up(self):
-        kwargs = {
-            'agent_id': 'foo_agent',
-            'device': 'foo_device'
-        }
-        with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin'
-                        '._device_to_port_id'):
-            with mock.patch('neutron.callbacks.registry.notify') as notify:
-                self.callbacks.update_device_up(mock.Mock(), **kwargs)
-                return notify
-
-    def test_update_device_up_notify(self):
-        notify = self._test_update_device_up()
-        kwargs = {
-            'context': mock.ANY, 'port': mock.ANY, 'update_device_up': True
-        }
-        notify.assert_called_once_with(
-            'port', 'after_update', self.plugin, **kwargs)
-
-    def test_update_device_up_notify_not_sent_with_port_not_found(self):
-        self.plugin._get_port.side_effect = (
-            exceptions.PortNotFound(port_id='foo_port_id'))
-        notify = self._test_update_device_up()
-        self.assertFalse(notify.call_count)
-
-    def test_get_device_details_without_port_context(self):
-        self.plugin.get_bound_port_context.return_value = None
-        self.assertEqual(
-            {'device': 'fake_device'},
-            self.callbacks.get_device_details(mock.Mock(),
-                                              device='fake_device'))
-
-    def test_get_device_details_port_context_without_bounded_segment(self):
-        self.plugin.get_bound_port_context().bottom_bound_segment = None
-        self.assertEqual(
-            {'device': 'fake_device'},
-            self.callbacks.get_device_details(mock.Mock(),
-                                              device='fake_device'))
-
-    def test_get_device_details_port_status_equal_new_status(self):
-        port = collections.defaultdict(lambda: 'fake')
-        self.plugin.get_bound_port_context().current = port
-        self.plugin.port_bound_to_host = mock.MagicMock(return_value=True)
-        for admin_state_up in (True, False):
-            new_status = (constants.PORT_STATUS_BUILD if admin_state_up
-                          else constants.PORT_STATUS_DOWN)
-            for status in (constants.PORT_STATUS_ACTIVE,
-                           constants.PORT_STATUS_BUILD,
-                           constants.PORT_STATUS_DOWN,
-                           constants.PORT_STATUS_ERROR):
-                port['admin_state_up'] = admin_state_up
-                port['status'] = status
-                self.plugin.update_port_status.reset_mock()
-                self.callbacks.get_device_details(mock.Mock())
-                self.assertEqual(status == new_status,
-                                 not self.plugin.update_port_status.called)
-
-    def test_get_device_details_caching(self):
-        port = collections.defaultdict(lambda: 'fake_port')
-        cached_networks = {}
-        self.plugin.get_bound_port_context().current = port
-        self.plugin.get_bound_port_context().network.current = (
-            {"id": "fake_network"})
-        self.callbacks.get_device_details(mock.Mock(), host='fake_host',
-                                          cached_networks=cached_networks)
-        self.assertIn('fake_port', cached_networks)
-
-    def test_get_device_details_wrong_host(self):
-        port = collections.defaultdict(lambda: 'fake')
-        port_context = self.plugin.get_bound_port_context()
-        port_context.current = port
-        port_context.host = 'fake'
-        self.plugin.update_port_status.reset_mock()
-        self.callbacks.get_device_details(mock.Mock(),
-                                          host='fake_host')
-        self.assertFalse(self.plugin.update_port_status.called)
-
-    def test_get_device_details_port_no_host(self):
-        port = collections.defaultdict(lambda: 'fake')
-        port_context = self.plugin.get_bound_port_context()
-        port_context.current = port
-        self.plugin.update_port_status.reset_mock()
-        self.callbacks.get_device_details(mock.Mock())
-        self.assertTrue(self.plugin.update_port_status.called)
-
-    def test_get_device_details_qos_policy_id_none(self):
-        port = collections.defaultdict(lambda: 'fake_port')
-        self.plugin.get_bound_port_context().current = port
-        self.plugin.get_bound_port_context().network._network = (
-            {"id": "fake_network"})
-        res = self.callbacks.get_device_details(mock.Mock(), host='fake')
-        self.assertIsNone(res['qos_policy_id'])
-
-    def test_get_device_details_network_qos_policy_id(self):
-        port = collections.defaultdict(lambda: 'fake_port')
-        self.plugin.get_bound_port_context().current = port
-        self.plugin.get_bound_port_context().network._network = (
-            {"id": "fake_network",
-             qos_consts.QOS_POLICY_ID: 'test-policy-id'})
-        res = self.callbacks.get_device_details(mock.Mock(), host='fake')
-        self.assertEqual('test-policy-id', res['network_qos_policy_id'])
-
-    def test_get_device_details_qos_policy_id_from_port(self):
-        port = collections.defaultdict(
-            lambda: 'fake_port',
-            {qos_consts.QOS_POLICY_ID: 'test-port-policy-id'})
-        self.plugin.get_bound_port_context().current = port
-        self.plugin.get_bound_port_context().network._network = (
-            {"id": "fake_network",
-             qos_consts.QOS_POLICY_ID: 'test-net-policy-id'})
-        res = self.callbacks.get_device_details(mock.Mock(), host='fake')
-        self.assertEqual('test-port-policy-id', res['qos_policy_id'])
-
-    def _test_get_devices_list(self, callback, side_effect, expected):
-        devices = [1, 2, 3, 4, 5]
-        kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'}
-        with mock.patch.object(self.callbacks, 'get_device_details',
-                               side_effect=side_effect) as f:
-            res = callback('fake_context', devices=devices, **kwargs)
-            self.assertEqual(expected, res)
-            self.assertEqual(len(devices), f.call_count)
-            calls = [mock.call('fake_context', device=i,
-                               cached_networks={}, **kwargs)
-                     for i in devices]
-            f.assert_has_calls(calls)
-
-    def test_get_devices_details_list(self):
-        devices = [1, 2, 3, 4, 5]
-        expected = devices
-        callback = self.callbacks.get_devices_details_list
-        self._test_get_devices_list(callback, devices, expected)
-
-    def test_get_devices_details_list_with_empty_devices(self):
-        with mock.patch.object(self.callbacks, 'get_device_details') as f:
-            res = self.callbacks.get_devices_details_list('fake_context')
-            self.assertFalse(f.called)
-            self.assertEqual([], res)
-
-    def test_get_devices_details_list_and_failed_devices(self):
-        devices = [1, 2, 3, 4, 5]
-        expected = {'devices': devices, 'failed_devices': []}
-        callback = (
-            self.callbacks.get_devices_details_list_and_failed_devices)
-        self._test_get_devices_list(callback, devices, expected)
-
-    def test_get_devices_details_list_and_failed_devices_failures(self):
-        devices = [1, Exception('testdevice'), 3,
-                   Exception('testdevice'), 5]
-        expected = {'devices': [1, 3, 5], 'failed_devices': [2, 4]}
-        callback = (
-            self.callbacks.get_devices_details_list_and_failed_devices)
-        self._test_get_devices_list(callback, devices, expected)
-
-    def test_get_devices_details_list_and_failed_devices_empty_dev(self):
-        with mock.patch.object(self.callbacks, 'get_device_details') as f:
-            res = self.callbacks.get_devices_details_list_and_failed_devices(
-                'fake_context')
-            self.assertFalse(f.called)
-            self.assertEqual({'devices': [], 'failed_devices': []}, res)
-
-    def _test_update_device_not_bound_to_host(self, func):
-        self.plugin.port_bound_to_host.return_value = False
-        self.plugin._device_to_port_id.return_value = 'fake_port_id'
-        res = func(mock.Mock(), device='fake_device', host='fake_host')
-        self.plugin.port_bound_to_host.assert_called_once_with(mock.ANY,
-                                                               'fake_port_id',
-                                                               'fake_host')
-        return res
-
-    def test_update_device_up_with_device_not_bound_to_host(self):
-        self.assertIsNone(self._test_update_device_not_bound_to_host(
-            self.callbacks.update_device_up))
-
-    def test_update_device_down_with_device_not_bound_to_host(self):
-        self.assertEqual(
-            {'device': 'fake_device', 'exists': True},
-            self._test_update_device_not_bound_to_host(
-                self.callbacks.update_device_down))
-
-    def test_update_device_down_call_update_port_status(self):
-        self.plugin.update_port_status.return_value = False
-        self.plugin._device_to_port_id.return_value = 'fake_port_id'
-        self.assertEqual(
-            {'device': 'fake_device', 'exists': False},
-            self.callbacks.update_device_down(mock.Mock(),
-                                              device='fake_device',
-                                              host='fake_host'))
-        self.plugin.update_port_status.assert_called_once_with(
-            mock.ANY, 'fake_port_id', constants.PORT_STATUS_DOWN,
-            'fake_host')
-
-    def test_update_device_down_call_update_port_status_failed(self):
-        self.plugin.update_port_status.side_effect = exc.StaleDataError
-        self.assertEqual({'device': 'fake_device', 'exists': False},
-                         self.callbacks.update_device_down(
-                             mock.Mock(), device='fake_device'))
-
-    def _test_update_device_list(self, devices_up_side_effect,
-                                 devices_down_side_effect, expected):
-        devices_up = [1, 2, 3]
-        devices_down = [4, 5]
-        kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'}
-        with mock.patch.object(self.callbacks, 'update_device_up',
-                               side_effect=devices_up_side_effect) as f_up, \
-            mock.patch.object(self.callbacks, 'update_device_down',
-                              side_effect=devices_down_side_effect) as f_down:
-            res = self.callbacks.update_device_list(
-                'fake_context', devices_up=devices_up,
-                devices_down=devices_down, **kwargs)
-            self.assertEqual(expected, res)
-            self.assertEqual(len(devices_up), f_up.call_count)
-            self.assertEqual(len(devices_down), f_down.call_count)
-
-    def test_update_device_list_no_failure(self):
-        devices_up_side_effect = [1, 2, 3]
-        devices_down_side_effect = [
-             {'device': 4, 'exists': True},
-             {'device': 5, 'exists': True}]
-        expected = {'devices_up': devices_up_side_effect,
-                    'failed_devices_up': [],
-                    'devices_down':
-                        [{'device': 4, 'exists': True},
-                         {'device': 5, 'exists': True}],
-                    'failed_devices_down': []}
-        self._test_update_device_list(devices_up_side_effect,
-                                      devices_down_side_effect,
-                                      expected)
-
-    def test_update_device_list_failed_devices(self):
-
-        devices_up_side_effect = [1, Exception('testdevice'), 3]
-        devices_down_side_effect = [{'device': 4, 'exists': True},
-                        Exception('testdevice')]
-        expected = {'devices_up': [1, 3],
-                    'failed_devices_up': [2],
-                    'devices_down':
-                        [{'device': 4, 'exists': True}],
-                    'failed_devices_down': [5]}
-
-        self._test_update_device_list(devices_up_side_effect,
-                                      devices_down_side_effect,
-                                      expected)
-
-    def test_update_device_list_empty_devices(self):
-
-        expected = {'devices_up': [],
-                    'failed_devices_up': [],
-                    'devices_down': [],
-                    'failed_devices_down': []}
-
-        kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'}
-        res = self.callbacks.update_device_list(
-            'fake_context', devices_up=[], devices_down=[], **kwargs)
-        self.assertEqual(expected, res)
-
-
-class RpcApiTestCase(base.BaseTestCase):
-
-    def _test_rpc_api(self, rpcapi, topic, method, rpc_method, **kwargs):
-        ctxt = oslo_context.RequestContext('fake_user', 'fake_project')
-        expected_retval = 'foo' if rpc_method == 'call' else None
-        expected_version = kwargs.pop('version', None)
-        fanout = kwargs.pop('fanout', False)
-
-        with mock.patch.object(rpcapi.client, rpc_method) as rpc_mock,\
-                mock.patch.object(rpcapi.client, 'prepare') as prepare_mock:
-            prepare_mock.return_value = rpcapi.client
-            rpc_mock.return_value = expected_retval
-            retval = getattr(rpcapi, method)(ctxt, **kwargs)
-
-        prepare_args = {}
-        if expected_version:
-            prepare_args['version'] = expected_version
-        if fanout:
-            prepare_args['fanout'] = fanout
-        if topic:
-            prepare_args['topic'] = topic
-        prepare_mock.assert_called_once_with(**prepare_args)
-
-        self.assertEqual(retval, expected_retval)
-        rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
-
-    def test_delete_network(self):
-        rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
-        self._test_rpc_api(
-                rpcapi,
-                topics.get_topic_name(topics.AGENT,
-                                      topics.NETWORK,
-                                      topics.DELETE),
-                'network_delete', rpc_method='cast',
-                fanout=True, network_id='fake_request_spec')
-
-    def test_port_update(self):
-        rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
-        self._test_rpc_api(
-                rpcapi,
-                topics.get_topic_name(topics.AGENT,
-                                      topics.PORT,
-                                      topics.UPDATE),
-                'port_update', rpc_method='cast',
-                fanout=True, port='fake_port',
-                network_type='fake_network_type',
-                segmentation_id='fake_segmentation_id',
-                physical_network='fake_physical_network')
-
-    def test_port_delete(self):
-        rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
-        self._test_rpc_api(
-            rpcapi,
-            topics.get_topic_name(topics.AGENT,
-                                  topics.PORT,
-                                  topics.DELETE),
-            'port_delete', rpc_method='cast',
-            fanout=True, port_id='fake_port')
-
-    def test_tunnel_update(self):
-        rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
-        self._test_rpc_api(
-                rpcapi,
-                topics.get_topic_name(topics.AGENT,
-                                      type_tunnel.TUNNEL,
-                                      topics.UPDATE),
-                'tunnel_update', rpc_method='cast',
-                fanout=True,
-                tunnel_ip='fake_ip', tunnel_type='gre')
-
-    def test_tunnel_delete(self):
-        rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
-        self._test_rpc_api(
-                rpcapi,
-                topics.get_topic_name(topics.AGENT,
-                                      type_tunnel.TUNNEL,
-                                      topics.DELETE),
-                'tunnel_delete', rpc_method='cast',
-                fanout=True,
-                tunnel_ip='fake_ip', tunnel_type='gre')
-
-    def test_device_details(self):
-        rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
-        self._test_rpc_api(rpcapi, None,
-                           'get_device_details', rpc_method='call',
-                           device='fake_device',
-                           agent_id='fake_agent_id',
-                           host='fake_host')
-
-    def test_devices_details_list(self):
-        rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
-        self._test_rpc_api(rpcapi, None,
-                           'get_devices_details_list', rpc_method='call',
-                           devices=['fake_device1', 'fake_device2'],
-                           agent_id='fake_agent_id', host='fake_host',
-                           version='1.3')
-
-    def test_update_device_down(self):
-        rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
-        self._test_rpc_api(rpcapi, None,
-                           'update_device_down', rpc_method='call',
-                           device='fake_device',
-                           agent_id='fake_agent_id',
-                           host='fake_host')
-
-    def test_tunnel_sync(self):
-        rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
-        self._test_rpc_api(rpcapi, None,
-                           'tunnel_sync', rpc_method='call',
-                           tunnel_ip='fake_tunnel_ip',
-                           tunnel_type=None,
-                           host='fake_host',
-                           version='1.4')
-
-    def test_update_device_up(self):
-        rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
-        self._test_rpc_api(rpcapi, None,
-                           'update_device_up', rpc_method='call',
-                           device='fake_device',
-                           agent_id='fake_agent_id',
-                           host='fake_host')
-
-    def test_update_device_list(self):
-        rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
-        self._test_rpc_api(rpcapi, None,
-                           'update_device_list', rpc_method='call',
-                           devices_up=['fake_device1', 'fake_device2'],
-                           devices_down=['fake_device3', 'fake_device4'],
-                           agent_id='fake_agent_id',
-                           host='fake_host',
-                           version='1.5')
-
-    def test_update_device_list_unsupported(self):
-        rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
-        ctxt = oslo_context.RequestContext('fake_user', 'fake_project')
-        devices_up = ['fake_device1', 'fake_device2']
-        devices_down = ['fake_device3', 'fake_device4']
-        expected_ret_val = {'devices_up': ['fake_device2'],
-                            'failed_devices_up': ['fake_device1'],
-                            'devices_down': [
-                                {'device': 'fake_device3', 'exists': True}],
-                            'failed_devices_down': ['fake_device4']}
-        rpcapi.update_device_up = mock.Mock(
-            side_effect=[Exception('fake_device1 fails'), None])
-        rpcapi.update_device_down = mock.Mock(
-            side_effect=[{'device': 'fake_device3', 'exists': True},
-                         Exception('fake_device4 fails')])
-        with mock.patch.object(rpcapi.client, 'call'),\
-                mock.patch.object(rpcapi.client, 'prepare') as prepare_mock:
-            prepare_mock.side_effect = oslo_messaging.UnsupportedVersion(
-                'test')
-            res = rpcapi.update_device_list(ctxt, devices_up, devices_down,
-                                            'fake_agent_id', 'fake_host')
-            self.assertEqual(expected_ret_val, res)
-
-    def test_get_devices_details_list_and_failed_devices(self):
-        rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
-        self._test_rpc_api(rpcapi, None,
-                           'get_devices_details_list_and_failed_devices',
-                           rpc_method='call',
-                           devices=['fake_device1', 'fake_device2'],
-                           agent_id='fake_agent_id',
-                           host='fake_host',
-                           version='1.5')
-
-    def test_devices_details_list_and_failed_devices(self):
-        rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
-        self._test_rpc_api(rpcapi, None,
-                           'get_devices_details_list_and_failed_devices',
-                           rpc_method='call',
-                           devices=['fake_device1', 'fake_device2'],
-                           agent_id='fake_agent_id', host='fake_host',
-                           version='1.5')
-
-    def test_get_devices_details_list_and_failed_devices_unsupported(self):
-        rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
-        ctxt = oslo_context.RequestContext('fake_user', 'fake_project')
-        devices = ['fake_device1', 'fake_device2']
-        dev2_details = {'device': 'fake_device2', 'network_id': 'net_id',
-                        'port_id': 'port_id', 'admin_state_up': True}
-        expected_ret_val = {'devices': [dev2_details],
-                            'failed_devices': ['fake_device1']}
-        rpcapi.get_device_details = mock.Mock(
-            side_effect=[Exception('fake_device1 fails'), dev2_details])
-        with mock.patch.object(rpcapi.client, 'call'),\
-                mock.patch.object(rpcapi.client, 'prepare') as prepare_mock:
-            prepare_mock.side_effect = oslo_messaging.UnsupportedVersion(
-                'test')
-            res = rpcapi.get_devices_details_list_and_failed_devices(
-                ctxt, devices, 'fake_agent_id', 'fake_host')
-            self.assertEqual(expected_ret_val, res)
diff --git a/neutron/tests/unit/plugins/ml2/test_security_group.py b/neutron/tests/unit/plugins/ml2/test_security_group.py
deleted file mode 100644 (file)
index b1f1e5d..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import math
-import mock
-
-from neutron.common import constants as const
-from neutron import context
-from neutron.extensions import securitygroup as ext_sg
-from neutron import manager
-from neutron.tests import tools
-from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc
-from neutron.tests.unit.api.v2 import test_base
-from neutron.tests.unit.extensions import test_securitygroup as test_sg
-
-PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
-
-
-class Ml2SecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase):
-    _plugin_name = PLUGIN_NAME
-
-    def setUp(self, plugin=None):
-        test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER)
-        notifier_p = mock.patch(NOTIFIER)
-        notifier_cls = notifier_p.start()
-        self.notifier = mock.Mock()
-        notifier_cls.return_value = self.notifier
-        self.useFixture(tools.AttributeMapMemento())
-        super(Ml2SecurityGroupsTestCase, self).setUp(PLUGIN_NAME)
-
-    def tearDown(self):
-        super(Ml2SecurityGroupsTestCase, self).tearDown()
-
-
-class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase,
-                            test_sg.TestSecurityGroups,
-                            test_sg_rpc.SGNotificationTestMixin):
-    def setUp(self):
-        super(TestMl2SecurityGroups, self).setUp()
-        self.ctx = context.get_admin_context()
-        plugin = manager.NeutronManager.get_plugin()
-        plugin.start_rpc_listeners()
-
-    def _make_port_with_new_sec_group(self, net_id):
-        sg = self._make_security_group(self.fmt, 'name', 'desc')
-        port = self._make_port(
-            self.fmt, net_id, security_groups=[sg['security_group']['id']])
-        return port['port']
-
-    def _make_port_without_sec_group(self, net_id):
-        port = self._make_port(
-            self.fmt, net_id, security_groups=[])
-        return port['port']
-
-    def test_security_group_get_ports_from_devices(self):
-        with self.network() as n:
-            with self.subnet(n):
-                orig_ports = [
-                    self._make_port_with_new_sec_group(n['network']['id']),
-                    self._make_port_with_new_sec_group(n['network']['id']),
-                    self._make_port_without_sec_group(n['network']['id'])
-                ]
-                plugin = manager.NeutronManager.get_plugin()
-                # should match full ID and starting chars
-                ports = plugin.get_ports_from_devices(self.ctx,
-                    [orig_ports[0]['id'], orig_ports[1]['id'][0:8],
-                     orig_ports[2]['id']])
-                self.assertEqual(len(orig_ports), len(ports))
-                for port_dict in ports:
-                    p = next(p for p in orig_ports
-                             if p['id'] == port_dict['id'])
-                    self.assertEqual(p['id'], port_dict['id'])
-                    self.assertEqual(p['security_groups'],
-                                     port_dict[ext_sg.SECURITYGROUPS])
-                    self.assertEqual([], port_dict['security_group_rules'])
-                    self.assertEqual([p['fixed_ips'][0]['ip_address']],
-                                     port_dict['fixed_ips'])
-                    self._delete('ports', p['id'])
-
-    def test_security_group_get_ports_from_devices_with_bad_id(self):
-        plugin = manager.NeutronManager.get_plugin()
-        ports = plugin.get_ports_from_devices(self.ctx, ['bad_device_id'])
-        self.assertFalse(ports)
-
-    def test_security_group_no_db_calls_with_no_ports(self):
-        plugin = manager.NeutronManager.get_plugin()
-        with mock.patch(
-            'neutron.plugins.ml2.db.get_sg_ids_grouped_by_port'
-        ) as get_mock:
-            self.assertFalse(plugin.get_ports_from_devices(self.ctx, []))
-            self.assertFalse(get_mock.called)
-
-    def test_large_port_count_broken_into_parts(self):
-        plugin = manager.NeutronManager.get_plugin()
-        max_ports_per_query = 5
-        ports_to_query = 73
-        for max_ports_per_query in (1, 2, 5, 7, 9, 31):
-            with mock.patch('neutron.plugins.ml2.db.MAX_PORTS_PER_QUERY',
-                            new=max_ports_per_query),\
-                    mock.patch(
-                        'neutron.plugins.ml2.db.get_sg_ids_grouped_by_port',
-                        return_value={}) as get_mock:
-                plugin.get_ports_from_devices(self.ctx,
-                    ['%s%s' % (const.TAP_DEVICE_PREFIX, i)
-                     for i in range(ports_to_query)])
-                all_call_args = [x[1][1] for x in get_mock.mock_calls]
-                last_call_args = all_call_args.pop()
-                # all but last should be getting MAX_PORTS_PER_QUERY ports
-                self.assertTrue(
-                    all(map(lambda x: len(x) == max_ports_per_query,
-                            all_call_args))
-                )
-                remaining = ports_to_query % max_ports_per_query
-                if remaining:
-                    self.assertEqual(remaining, len(last_call_args))
-                # should be broken into ceil(total/MAX_PORTS_PER_QUERY) calls
-                self.assertEqual(
-                    math.ceil(ports_to_query / float(max_ports_per_query)),
-                    get_mock.call_count
-                )
-
-    def test_full_uuids_skip_port_id_lookup(self):
-        plugin = manager.NeutronManager.get_plugin()
-        # when full UUIDs are provided, the _or statement should only
-        # have one matching 'IN' criteria for all of the IDs
-        with mock.patch('neutron.plugins.ml2.db.or_') as or_mock,\
-                mock.patch('sqlalchemy.orm.Session.query') as qmock:
-            fmock = qmock.return_value.outerjoin.return_value.filter
-            # return no ports to exit the method early since we are mocking
-            # the query
-            fmock.return_value = []
-            plugin.get_ports_from_devices(self.ctx,
-                                          [test_base._uuid(),
-                                           test_base._uuid()])
-            # the or_ function should only have one argument
-            or_mock.assert_called_once_with(mock.ANY)
-
-
-class TestMl2SGServerRpcCallBack(
-    Ml2SecurityGroupsTestCase,
-    test_sg_rpc.SGServerRpcCallBackTestCase):
-    pass
diff --git a/neutron/tests/unit/plugins/ml2/test_tracked_resources.py b/neutron/tests/unit/plugins/ml2/test_tracked_resources.py
deleted file mode 100644 (file)
index e0f0040..0000000
+++ /dev/null
@@ -1,292 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron import context
-from neutron.db.quota import api as quota_db_api
-from neutron.tests.unit.extensions import test_securitygroup
-from neutron.tests.unit.plugins.ml2 import test_plugin
-
-
-class SgTestCaseWrapper(test_securitygroup.SecurityGroupDBTestCase):
-    # This wrapper class enables Ml2PluginV2TestCase to correctly call the
-    # setup method in SecurityGroupDBTestCase which does not accept the
-    # service_plugins keyword parameter.
-
-    def setUp(self, plugin, **kwargs):
-        super(SgTestCaseWrapper, self).setUp(plugin)
-
-
-class BaseTestTrackedResources(test_plugin.Ml2PluginV2TestCase,
-                               SgTestCaseWrapper):
-
-    def setUp(self):
-        self.ctx = context.get_admin_context()
-        # Prevent noise from default security group operations
-        def_sec_group_patch = mock.patch(
-            'neutron.db.securitygroups_db.SecurityGroupDbMixin.'
-            '_ensure_default_security_group')
-        def_sec_group_patch.start()
-        get_sec_group_port_patch = mock.patch(
-            'neutron.db.securitygroups_db.SecurityGroupDbMixin.'
-            '_get_security_groups_on_port')
-        get_sec_group_port_patch.start()
-        super(BaseTestTrackedResources, self).setUp()
-        self._tenant_id = uuidutils.generate_uuid()
-
-    def _test_init(self, resource_name):
-        quota_db_api.set_quota_usage(
-            self.ctx, resource_name, self._tenant_id)
-
-
-class TestTrackedResourcesEventHandler(BaseTestTrackedResources):
-
-    def setUp(self):
-        handler_patch = mock.patch(
-            'neutron.quota.resource.TrackedResource._db_event_handler')
-        self.handler_mock = handler_patch.start()
-        super(TestTrackedResourcesEventHandler, self).setUp()
-
-    def _verify_event_handler_calls(self, data, expected_call_count=1):
-        if not hasattr(data, '__iter__') or isinstance(data, dict):
-            data = [data]
-        self.assertEqual(expected_call_count, self.handler_mock.call_count)
-        call_idx = -1
-        for item in data:
-            if item:
-                model = self.handler_mock.call_args_list[call_idx][0][-1]
-                self.assertEqual(model['id'], item['id'])
-                self.assertEqual(model['tenant_id'], item['tenant_id'])
-            call_idx = call_idx - 1
-
-    def test_create_delete_network_triggers_event(self):
-        self._test_init('network')
-        net = self._make_network('json', 'meh', True)['network']
-        self._verify_event_handler_calls(net)
-        self._delete('networks', net['id'])
-        self._verify_event_handler_calls(net, expected_call_count=2)
-
-    def test_create_delete_port_triggers_event(self):
-        self._test_init('port')
-        net = self._make_network('json', 'meh', True)['network']
-        port = self._make_port('json', net['id'])['port']
-        # Expecting 2 calls - 1 for the network, 1 for the port
-        self._verify_event_handler_calls(port, expected_call_count=2)
-        self._delete('ports', port['id'])
-        self._verify_event_handler_calls(port, expected_call_count=3)
-
-    def test_create_delete_subnet_triggers_event(self):
-        self._test_init('subnet')
-        net = self._make_network('json', 'meh', True)
-        subnet = self._make_subnet('json', net, '10.0.0.1',
-                                   '10.0.0.0/24')['subnet']
-        # Expecting 2 calls - 1 for the network, 1 for the subnet
-        self._verify_event_handler_calls([subnet, net['network']],
-                                         expected_call_count=2)
-        self._delete('subnets', subnet['id'])
-        self._verify_event_handler_calls(subnet, expected_call_count=3)
-
-    def test_create_delete_network_with_subnet_triggers_event(self):
-        self._test_init('network')
-        self._test_init('subnet')
-        net = self._make_network('json', 'meh', True)
-        subnet = self._make_subnet('json', net, '10.0.0.1',
-                                   '10.0.0.0/24')['subnet']
-        # Expecting 2 calls - 1 for the network, 1 for the subnet
-        self._verify_event_handler_calls([subnet, net['network']],
-                                         expected_call_count=2)
-        self._delete('networks', net['network']['id'])
-        # Expecting 2 more calls - 1 for the network, 1 for the subnet
-        self._verify_event_handler_calls([net['network'], subnet],
-                                         expected_call_count=4)
-
-    def test_create_delete_subnetpool_triggers_event(self):
-        self._test_init('subnetpool')
-        pool = self._make_subnetpool('json', ['10.0.0.0/8'],
-                                     name='meh',
-                                     tenant_id=self._tenant_id)['subnetpool']
-        self._verify_event_handler_calls(pool)
-        self._delete('subnetpools', pool['id'])
-        self._verify_event_handler_calls(pool, expected_call_count=2)
-
-    def test_create_delete_securitygroup_triggers_event(self):
-        self._test_init('security_group')
-        sec_group = self._make_security_group(
-            'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group']
-        # When a security group is created it also creates 2 rules, therefore
-        # there will be three calls and we need to verify the first
-        self._verify_event_handler_calls([None, None, sec_group],
-                                         expected_call_count=3)
-        self._delete('security-groups', sec_group['id'])
-        # When a security group is deleted it also removes the 2 rules
-        # generated upon creation
-        self._verify_event_handler_calls(sec_group, expected_call_count=6)
-
-    def test_create_delete_securitygrouprule_triggers_event(self):
-        self._test_init('security_group_rule')
-        sec_group = self._make_security_group(
-            'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group']
-        rule_req = self._build_security_group_rule(
-            sec_group['id'], 'ingress', 'TCP', tenant_id=self._tenant_id)
-        sec_group_rule = self._make_security_group_rule(
-            'json', rule_req)['security_group_rule']
-        # When a security group is created it also creates 2 rules, therefore
-        # there will be four calls in total to the event handler
-        self._verify_event_handler_calls(sec_group_rule, expected_call_count=4)
-        self._delete('security-group-rules', sec_group_rule['id'])
-        self._verify_event_handler_calls(sec_group_rule, expected_call_count=5)
-
-
-class TestTrackedResources(BaseTestTrackedResources):
-
-    def _verify_dirty_bit(self, resource_name, expected_value=True):
-        usage = quota_db_api.get_quota_usage_by_resource_and_tenant(
-            self.ctx, resource_name, self._tenant_id)
-        self.assertEqual(expected_value, usage.dirty)
-
-    def test_create_delete_network_marks_dirty(self):
-        self._test_init('network')
-        net = self._make_network('json', 'meh', True)['network']
-        self._verify_dirty_bit('network')
-        # Clear the dirty bit
-        quota_db_api.set_quota_usage_dirty(
-            self.ctx, 'network', self._tenant_id, dirty=False)
-        self._delete('networks', net['id'])
-        self._verify_dirty_bit('network')
-
-    def test_list_networks_clears_dirty(self):
-        self._test_init('network')
-        net = self._make_network('json', 'meh', True)['network']
-        self.ctx.tenant_id = net['tenant_id']
-        self._list('networks', neutron_context=self.ctx)
-        self._verify_dirty_bit('network', expected_value=False)
-
-    def test_create_delete_port_marks_dirty(self):
-        self._test_init('port')
-        net = self._make_network('json', 'meh', True)['network']
-        port = self._make_port('json', net['id'])['port']
-        self._verify_dirty_bit('port')
-        # Clear the dirty bit
-        quota_db_api.set_quota_usage_dirty(
-            self.ctx, 'port', self._tenant_id, dirty=False)
-        self._delete('ports', port['id'])
-        self._verify_dirty_bit('port')
-
-    def test_list_ports_clears_dirty(self):
-        self._test_init('port')
-        net = self._make_network('json', 'meh', True)['network']
-        port = self._make_port('json', net['id'])['port']
-        self.ctx.tenant_id = port['tenant_id']
-        self._list('ports', neutron_context=self.ctx)
-        self._verify_dirty_bit('port', expected_value=False)
-
-    def test_create_delete_subnet_marks_dirty(self):
-        self._test_init('subnet')
-        net = self._make_network('json', 'meh', True)
-        subnet = self._make_subnet('json', net, '10.0.0.1',
-                                   '10.0.0.0/24')['subnet']
-        self._verify_dirty_bit('subnet')
-        # Clear the dirty bit
-        quota_db_api.set_quota_usage_dirty(
-            self.ctx, 'subnet', self._tenant_id, dirty=False)
-        self._delete('subnets', subnet['id'])
-        self._verify_dirty_bit('subnet')
-
-    def test_create_delete_network_with_subnet_marks_dirty(self):
-        self._test_init('network')
-        self._test_init('subnet')
-        net = self._make_network('json', 'meh', True)
-        self._make_subnet('json', net, '10.0.0.1',
-                          '10.0.0.0/24')['subnet']
-        self._verify_dirty_bit('subnet')
-        # Clear the dirty bit
-        quota_db_api.set_quota_usage_dirty(
-            self.ctx, 'subnet', self._tenant_id, dirty=False)
-        self._delete('networks', net['network']['id'])
-        self._verify_dirty_bit('network')
-        self._verify_dirty_bit('subnet')
-
-    def test_list_subnets_clears_dirty(self):
-        self._test_init('subnet')
-        net = self._make_network('json', 'meh', True)
-        subnet = self._make_subnet('json', net, '10.0.0.1',
-                                   '10.0.0.0/24')['subnet']
-        self.ctx.tenant_id = subnet['tenant_id']
-        self._list('subnets', neutron_context=self.ctx)
-        self._verify_dirty_bit('subnet', expected_value=False)
-
-    def test_create_delete_subnetpool_marks_dirty(self):
-        self._test_init('subnetpool')
-        pool = self._make_subnetpool('json', ['10.0.0.0/8'],
-                                     name='meh',
-                                     tenant_id=self._tenant_id)['subnetpool']
-        self._verify_dirty_bit('subnetpool')
-        # Clear the dirty bit
-        quota_db_api.set_quota_usage_dirty(
-            self.ctx, 'subnetpool', self._tenant_id, dirty=False)
-        self._delete('subnetpools', pool['id'])
-        self._verify_dirty_bit('subnetpool')
-
-    def test_list_subnetpools_clears_dirty(self):
-        self._test_init('subnetpool')
-        pool = self._make_subnetpool('json', ['10.0.0.0/8'],
-                                     name='meh',
-                                     tenant_id=self._tenant_id)['subnetpool']
-        self.ctx.tenant_id = pool['tenant_id']
-        self._list('subnetpools', neutron_context=self.ctx)
-        self._verify_dirty_bit('subnetpool', expected_value=False)
-
-    def test_create_delete_securitygroup_marks_dirty(self):
-        self._test_init('security_group')
-        sec_group = self._make_security_group(
-            'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group']
-        self._verify_dirty_bit('security_group')
-        # Clear the dirty bit
-        quota_db_api.set_quota_usage_dirty(
-            self.ctx, 'security_group', self._tenant_id, dirty=False)
-        self._delete('security-groups', sec_group['id'])
-        self._verify_dirty_bit('security_group')
-
-    def test_list_securitygroups_clears_dirty(self):
-        self._test_init('security_group')
-        self._make_security_group(
-            'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group']
-        self.ctx.tenant_id = self._tenant_id
-        self._list('security-groups', neutron_context=self.ctx)
-        self._verify_dirty_bit('security_group', expected_value=False)
-
-    def test_create_delete_securitygrouprule_marks_dirty(self):
-        self._test_init('security_group_rule')
-        sec_group = self._make_security_group(
-            'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group']
-        rule_req = self._build_security_group_rule(
-            sec_group['id'], 'ingress', 'TCP', tenant_id=self._tenant_id)
-        sec_group_rule = self._make_security_group_rule(
-            'json', rule_req)['security_group_rule']
-        self._verify_dirty_bit('security_group_rule')
-        # Clear the dirty bit
-        quota_db_api.set_quota_usage_dirty(
-            self.ctx, 'security_group_rule', self._tenant_id, dirty=False)
-        self._delete('security-group-rules', sec_group_rule['id'])
-        self._verify_dirty_bit('security_group_rule')
-
-    def test_list_securitygrouprules_clears_dirty(self):
-        self._test_init('security_group_rule')
-        self._make_security_group(
-            'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group']
-        # As the security group create operation also creates 2 security group
-        # rules there is no need to explicitly create any rule
-        self.ctx.tenant_id = self._tenant_id
-        self._list('security-group-rules', neutron_context=self.ctx)
-        self._verify_dirty_bit('security_group_rule', expected_value=False)
diff --git a/neutron/tests/unit/quota/__init__.py b/neutron/tests/unit/quota/__init__.py
deleted file mode 100644 (file)
index c8265f9..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy as sa
-
-from neutron.db import model_base
-
-# Model classes for test resources
-
-
-class MehModel(model_base.BASEV2, model_base.HasTenant):
-    meh = sa.Column(sa.String(8), primary_key=True)
-
-
-class OtherMehModel(model_base.BASEV2, model_base.HasTenant):
-    othermeh = sa.Column(sa.String(8), primary_key=True)
diff --git a/neutron/tests/unit/quota/test_resource.py b/neutron/tests/unit/quota/test_resource.py
deleted file mode 100644 (file)
index 2811e3f..0000000
+++ /dev/null
@@ -1,251 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import uuid
-
-from oslo_config import cfg
-
-from neutron import context
-from neutron.db import api as db_api
-from neutron.db.quota import api as quota_api
-from neutron.quota import resource
-from neutron.tests import base
-from neutron.tests.unit import quota as test_quota
-from neutron.tests.unit import testlib_api
-
-
-meh_quota_flag = 'quota_meh'
-meh_quota_opts = [cfg.IntOpt(meh_quota_flag, default=99)]
-
-
-class TestResource(base.DietTestCase):
-    """Unit tests for neutron.quota.resource.BaseResource"""
-
-    def test_create_resource_without_plural_name(self):
-        res = resource.BaseResource('foo', None)
-        self.assertEqual('foos', res.plural_name)
-        res = resource.BaseResource('foy', None)
-        self.assertEqual('foies', res.plural_name)
-
-    def test_create_resource_with_plural_name(self):
-        res = resource.BaseResource('foo', None,
-                                    plural_name='foopsies')
-        self.assertEqual('foopsies', res.plural_name)
-
-    def test_resource_default_value(self):
-        res = resource.BaseResource('foo', 'foo_quota')
-        with mock.patch('oslo_config.cfg.CONF') as mock_cfg:
-            mock_cfg.QUOTAS.foo_quota = 99
-            self.assertEqual(99, res.default)
-
-    def test_resource_negative_default_value(self):
-        res = resource.BaseResource('foo', 'foo_quota')
-        with mock.patch('oslo_config.cfg.CONF') as mock_cfg:
-            mock_cfg.QUOTAS.foo_quota = -99
-            self.assertEqual(-1, res.default)
-
-
-class TestTrackedResource(testlib_api.SqlTestCaseLight):
-
-    def _add_data(self, tenant_id=None):
-        session = db_api.get_session()
-        with session.begin():
-            tenant_id = tenant_id or self.tenant_id
-            session.add(test_quota.MehModel(meh='meh_%s' % uuid.uuid4(),
-                                            tenant_id=tenant_id))
-            session.add(test_quota.MehModel(meh='meh_%s' % uuid.uuid4(),
-                                            tenant_id=tenant_id))
-
-    def _delete_data(self):
-        session = db_api.get_session()
-        with session.begin():
-            query = session.query(test_quota.MehModel).filter_by(
-                tenant_id=self.tenant_id)
-            for item in query:
-                session.delete(item)
-
-    def _update_data(self):
-        session = db_api.get_session()
-        with session.begin():
-            query = session.query(test_quota.MehModel).filter_by(
-                tenant_id=self.tenant_id)
-            for item in query:
-                item['meh'] = 'meh-%s' % item['meh']
-                session.add(item)
-
-    def setUp(self):
-        base.BaseTestCase.config_parse()
-        cfg.CONF.register_opts(meh_quota_opts, 'QUOTAS')
-        self.addCleanup(cfg.CONF.reset)
-        self.resource = 'meh'
-        self.other_resource = 'othermeh'
-        self.tenant_id = 'meh'
-        self.context = context.Context(
-            user_id='', tenant_id=self.tenant_id, is_admin=False)
-        super(TestTrackedResource, self).setUp()
-
-    def _register_events(self, res):
-        res.register_events()
-        self.addCleanup(res.unregister_events)
-
-    def _create_resource(self):
-        res = resource.TrackedResource(
-            self.resource, test_quota.MehModel, meh_quota_flag)
-        self._register_events(res)
-        return res
-
-    def _create_other_resource(self):
-        res = resource.TrackedResource(
-            self.other_resource, test_quota.OtherMehModel, meh_quota_flag)
-        self._register_events(res)
-        return res
-
-    def test_count_first_call_with_dirty_false(self):
-        quota_api.set_quota_usage(
-            self.context, self.resource, self.tenant_id, in_use=1)
-        res = self._create_resource()
-        self._add_data()
-        # explicitly set dirty flag to False
-        quota_api.set_all_quota_usage_dirty(
-            self.context, self.resource, dirty=False)
-        # Expect correct count to be returned anyway since the first call to
-        # count() always resyncs with the db
-        self.assertEqual(2, res.count(self.context, None, self.tenant_id))
-
-    def _test_count(self):
-        res = self._create_resource()
-        quota_api.set_quota_usage(
-            self.context, res.name, self.tenant_id, in_use=0)
-        self._add_data()
-        return res
-
-    def test_count_with_dirty_false(self):
-        res = self._test_count()
-        res.count(self.context, None, self.tenant_id)
-        # At this stage count has been invoked, and the dirty flag should be
-        # false. Another invocation of count should not query the model class
-        set_quota = 'neutron.db.quota.api.set_quota_usage'
-        with mock.patch(set_quota) as mock_set_quota:
-            self.assertEqual(0, mock_set_quota.call_count)
-            self.assertEqual(2, res.count(self.context,
-                                          None,
-                                          self.tenant_id))
-
-    def test_count_with_dirty_true_resync(self):
-        res = self._test_count()
-        # Expect correct count to be returned, which also implies
-        # set_quota_usage has been invoked with the correct parameters
-        self.assertEqual(2, res.count(self.context,
-                                      None,
-                                      self.tenant_id,
-                                      resync_usage=True))
-
-    def test_count_with_dirty_true_resync_calls_set_quota_usage(self):
-        res = self._test_count()
-        set_quota_usage = 'neutron.db.quota.api.set_quota_usage'
-        with mock.patch(set_quota_usage) as mock_set_quota_usage:
-            quota_api.set_quota_usage_dirty(self.context,
-                                            self.resource,
-                                            self.tenant_id)
-            res.count(self.context, None, self.tenant_id,
-                      resync_usage=True)
-            mock_set_quota_usage.assert_called_once_with(
-                self.context, self.resource, self.tenant_id, in_use=2)
-
-    def test_count_with_dirty_true_no_usage_info(self):
-        res = self._create_resource()
-        self._add_data()
-        # Invoke count without having usage info in DB - Expect correct
-        # count to be returned
-        self.assertEqual(2, res.count(self.context, None, self.tenant_id))
-
-    def test_count_with_dirty_true_no_usage_info_calls_set_quota_usage(self):
-        res = self._create_resource()
-        self._add_data()
-        set_quota_usage = 'neutron.db.quota.api.set_quota_usage'
-        with mock.patch(set_quota_usage) as mock_set_quota_usage:
-            quota_api.set_quota_usage_dirty(self.context,
-                                            self.resource,
-                                            self.tenant_id)
-            res.count(self.context, None, self.tenant_id, resync_usage=True)
-            mock_set_quota_usage.assert_called_once_with(
-                self.context, self.resource, self.tenant_id, in_use=2)
-
-    def test_add_delete_data_triggers_event(self):
-        res = self._create_resource()
-        other_res = self._create_other_resource()
-        # Validate dirty tenants since mock does not work well with SQLAlchemy
-        # event handlers.
-        self._add_data()
-        self._add_data('someone_else')
-        self.assertEqual(2, len(res._dirty_tenants))
-        # Also, the dirty flag should not be set for other resources
-        self.assertEqual(0, len(other_res._dirty_tenants))
-        self.assertIn(self.tenant_id, res._dirty_tenants)
-        self.assertIn('someone_else', res._dirty_tenants)
-
-    def test_delete_data_triggers_event(self):
-        res = self._create_resource()
-        self._add_data()
-        self._add_data('someone_else')
-        # Artificially clear _dirty_tenants
-        res._dirty_tenants.clear()
-        self._delete_data()
-        # We did not delete "someone_else", so expect only a single dirty
-        # tenant
-        self.assertEqual(1, len(res._dirty_tenants))
-        self.assertIn(self.tenant_id, res._dirty_tenants)
-
-    def test_update_does_not_trigger_event(self):
-        res = self._create_resource()
-        self._add_data()
-        self._add_data('someone_else')
-        # Artificially clear _dirty_tenants
-        res._dirty_tenants.clear()
-        self._update_data()
-        self.assertEqual(0, len(res._dirty_tenants))
-
-    def test_mark_dirty(self):
-        res = self._create_resource()
-        self._add_data()
-        self._add_data('someone_else')
-        set_quota_usage = 'neutron.db.quota.api.set_quota_usage_dirty'
-        with mock.patch(set_quota_usage) as mock_set_quota_usage:
-            res.mark_dirty(self.context)
-            self.assertEqual(2, mock_set_quota_usage.call_count)
-            mock_set_quota_usage.assert_any_call(
-                self.context, self.resource, self.tenant_id)
-            mock_set_quota_usage.assert_any_call(
-                self.context, self.resource, 'someone_else')
-
-    def test_mark_dirty_no_dirty_tenant(self):
-        res = self._create_resource()
-        set_quota_usage = 'neutron.db.quota.api.set_quota_usage_dirty'
-        with mock.patch(set_quota_usage) as mock_set_quota_usage:
-            res.mark_dirty(self.context)
-            self.assertFalse(mock_set_quota_usage.call_count)
-
-    def test_resync(self):
-        res = self._create_resource()
-        self._add_data()
-        res.mark_dirty(self.context)
-        # self.tenant_id now is out of sync
-        set_quota_usage = 'neutron.db.quota.api.set_quota_usage'
-        with mock.patch(set_quota_usage) as mock_set_quota_usage:
-            res.resync(self.context, self.tenant_id)
-            # and now it should be in sync
-            self.assertNotIn(self.tenant_id, res._out_of_sync_tenants)
-            mock_set_quota_usage.assert_called_once_with(
-                self.context, self.resource, self.tenant_id, in_use=2)
diff --git a/neutron/tests/unit/quota/test_resource_registry.py b/neutron/tests/unit/quota/test_resource_registry.py
deleted file mode 100644 (file)
index fcf27c8..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-# Copyright (c) 2015 OpenStack Foundation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from oslo_config import cfg
-
-from neutron import context
-from neutron.quota import resource
-from neutron.quota import resource_registry
-from neutron.tests import base
-from neutron.tests.unit import quota as test_quota
-
-
-class TestResourceRegistry(base.DietTestCase):
-
-    def setUp(self):
-        super(TestResourceRegistry, self).setUp()
-        self.registry = resource_registry.ResourceRegistry.get_instance()
-        # clean up the registry at every test
-        self.registry.unregister_resources()
-
-    def test_set_tracked_resource_new_resource(self):
-        self.registry.set_tracked_resource('meh', test_quota.MehModel)
-        self.assertEqual(test_quota.MehModel,
-                         self.registry._tracked_resource_mappings['meh'])
-
-    def test_set_tracked_resource_existing_with_override(self):
-        self.test_set_tracked_resource_new_resource()
-        self.registry.set_tracked_resource('meh', test_quota.OtherMehModel,
-                                           override=True)
-        # Override is set to True, the model class should change
-        self.assertEqual(test_quota.OtherMehModel,
-                         self.registry._tracked_resource_mappings['meh'])
-
-    def test_set_tracked_resource_existing_no_override(self):
-        self.test_set_tracked_resource_new_resource()
-        self.registry.set_tracked_resource('meh', test_quota.OtherMehModel)
-        # Override is set to false, the model class should not change
-        self.assertEqual(test_quota.MehModel,
-                         self.registry._tracked_resource_mappings['meh'])
-
-    def _test_register_resource_by_name(self, resource_name, expected_type):
-        self.assertNotIn(resource_name, self.registry._resources)
-        self.registry.register_resource_by_name(resource_name)
-        self.assertIn(resource_name, self.registry._resources)
-        self.assertIsInstance(self.registry.get_resource(resource_name),
-                              expected_type)
-
-    def test_register_resource_by_name_tracked(self):
-        self.test_set_tracked_resource_new_resource()
-        self._test_register_resource_by_name('meh', resource.TrackedResource)
-
-    def test_register_resource_by_name_not_tracked(self):
-        self._test_register_resource_by_name('meh', resource.CountableResource)
-
-    def test_register_resource_by_name_with_tracking_disabled_by_config(self):
-        cfg.CONF.set_override('track_quota_usage', False,
-                              group='QUOTAS')
-        # DietTestCase does not automatically cleans configuration overrides
-        self.addCleanup(cfg.CONF.reset)
-        self.registry.set_tracked_resource('meh', test_quota.MehModel)
-        self.assertNotIn(
-            'meh', self.registry._tracked_resource_mappings)
-        self._test_register_resource_by_name('meh', resource.CountableResource)
-
-
-class TestAuxiliaryFunctions(base.DietTestCase):
-
-    def setUp(self):
-        super(TestAuxiliaryFunctions, self).setUp()
-        self.registry = resource_registry.ResourceRegistry.get_instance()
-        # clean up the registry at every test
-        self.registry.unregister_resources()
-
-    def test_resync_tracking_disabled(self):
-        cfg.CONF.set_override('track_quota_usage', False,
-                              group='QUOTAS')
-        # DietTestCase does not automatically cleans configuration overrides
-        self.addCleanup(cfg.CONF.reset)
-        with mock.patch('neutron.quota.resource.'
-                        'TrackedResource.resync') as mock_resync:
-            self.registry.set_tracked_resource('meh', test_quota.MehModel)
-            self.registry.register_resource_by_name('meh')
-            resource_registry.resync_resource(mock.ANY, 'meh', 'tenant_id')
-            self.assertEqual(0, mock_resync.call_count)
-
-    def test_resync_tracked_resource(self):
-        with mock.patch('neutron.quota.resource.'
-                        'TrackedResource.resync') as mock_resync:
-            self.registry.set_tracked_resource('meh', test_quota.MehModel)
-            self.registry.register_resource_by_name('meh')
-            resource_registry.resync_resource(mock.ANY, 'meh', 'tenant_id')
-            mock_resync.assert_called_once_with(mock.ANY, 'tenant_id')
-
-    def test_resync_non_tracked_resource(self):
-        with mock.patch('neutron.quota.resource.'
-                        'TrackedResource.resync') as mock_resync:
-            self.registry.register_resource_by_name('meh')
-            resource_registry.resync_resource(mock.ANY, 'meh', 'tenant_id')
-            self.assertEqual(0, mock_resync.call_count)
-
-    def test_set_resources_dirty_invoked_with_tracking_disabled(self):
-        cfg.CONF.set_override('track_quota_usage', False,
-                              group='QUOTAS')
-        # DietTestCase does not automatically cleans configuration overrides
-        self.addCleanup(cfg.CONF.reset)
-        with mock.patch('neutron.quota.resource.'
-                        'TrackedResource.mark_dirty') as mock_mark_dirty:
-            self.registry.set_tracked_resource('meh', test_quota.MehModel)
-            self.registry.register_resource_by_name('meh')
-            resource_registry.set_resources_dirty(mock.ANY)
-            self.assertEqual(0, mock_mark_dirty.call_count)
-
-    def test_set_resources_dirty_no_dirty_resource(self):
-        ctx = context.Context('user_id', 'tenant_id',
-                              is_admin=False, is_advsvc=False)
-        with mock.patch('neutron.quota.resource.'
-                        'TrackedResource.mark_dirty') as mock_mark_dirty:
-            self.registry.set_tracked_resource('meh', test_quota.MehModel)
-            self.registry.register_resource_by_name('meh')
-            res = self.registry.get_resource('meh')
-            # This ensures dirty is false
-            res._dirty_tenants.clear()
-            resource_registry.set_resources_dirty(ctx)
-            self.assertEqual(0, mock_mark_dirty.call_count)
-
-    def test_set_resources_dirty_no_tracked_resource(self):
-        ctx = context.Context('user_id', 'tenant_id',
-                              is_admin=False, is_advsvc=False)
-        with mock.patch('neutron.quota.resource.'
-                        'TrackedResource.mark_dirty') as mock_mark_dirty:
-            self.registry.register_resource_by_name('meh')
-            resource_registry.set_resources_dirty(ctx)
-            self.assertEqual(0, mock_mark_dirty.call_count)
-
-    def test_set_resources_dirty(self):
-        ctx = context.Context('user_id', 'tenant_id',
-                              is_admin=False, is_advsvc=False)
-        with mock.patch('neutron.quota.resource.'
-                        'TrackedResource.mark_dirty') as mock_mark_dirty:
-            self.registry.set_tracked_resource('meh', test_quota.MehModel)
-            self.registry.register_resource_by_name('meh')
-            res = self.registry.get_resource('meh')
-            # This ensures dirty is true
-            res._dirty_tenants.add('tenant_id')
-            resource_registry.set_resources_dirty(ctx)
-            mock_mark_dirty.assert_called_once_with(ctx)
diff --git a/neutron/tests/unit/scheduler/__init__.py b/neutron/tests/unit/scheduler/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py
deleted file mode 100644 (file)
index cfce17e..0000000
+++ /dev/null
@@ -1,577 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-from oslo_config import cfg
-from oslo_utils import importutils
-import testscenarios
-
-from neutron.common import constants
-from neutron import context
-from neutron.db import agentschedulers_db as sched_db
-from neutron.db import common_db_mixin
-from neutron.db import models_v2
-from neutron.extensions import dhcpagentscheduler
-from neutron.scheduler import dhcp_agent_scheduler
-from neutron.tests.common import helpers
-from neutron.tests.unit import testlib_api
-
-# Required to generate tests from scenarios. Not compatible with nose.
-load_tests = testscenarios.load_tests_apply_scenarios
-
-HOST_C = 'host-c'
-HOST_D = 'host-d'
-
-
-class TestDhcpSchedulerBaseTestCase(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(TestDhcpSchedulerBaseTestCase, self).setUp()
-        self.ctx = context.get_admin_context()
-        self.network = {'id': 'foo_network_id'}
-        self.network_id = 'foo_network_id'
-        self._save_networks([self.network_id])
-
-    def _create_and_set_agents_down(self, hosts, down_agent_count=0,
-                                    admin_state_up=True,
-                                    az=helpers.DEFAULT_AZ):
-        agents = []
-        for i, host in enumerate(hosts):
-            is_alive = i >= down_agent_count
-            agents.append(helpers.register_dhcp_agent(
-                host,
-                admin_state_up=admin_state_up,
-                alive=is_alive,
-                az=az))
-        return agents
-
-    def _save_networks(self, networks):
-        for network_id in networks:
-            with self.ctx.session.begin(subtransactions=True):
-                self.ctx.session.add(models_v2.Network(id=network_id))
-
-    def _test_schedule_bind_network(self, agents, network_id):
-        scheduler = dhcp_agent_scheduler.ChanceScheduler()
-        scheduler.resource_filter.bind(self.ctx, agents, network_id)
-        results = self.ctx.session.query(
-            sched_db.NetworkDhcpAgentBinding).filter_by(
-            network_id=network_id).all()
-        self.assertEqual(len(agents), len(results))
-        for result in results:
-            self.assertEqual(network_id, result.network_id)
-
-
-class TestDhcpScheduler(TestDhcpSchedulerBaseTestCase):
-
-    def test_schedule_bind_network_single_agent(self):
-        agents = self._create_and_set_agents_down(['host-a'])
-        self._test_schedule_bind_network(agents, self.network_id)
-
-    def test_schedule_bind_network_multi_agents(self):
-        agents = self._create_and_set_agents_down(['host-a', 'host-b'])
-        self._test_schedule_bind_network(agents, self.network_id)
-
-    def test_schedule_bind_network_multi_agent_fail_one(self):
-        agents = self._create_and_set_agents_down(['host-a'])
-        self._test_schedule_bind_network(agents, self.network_id)
-        with mock.patch.object(dhcp_agent_scheduler.LOG, 'info') as fake_log:
-            self._test_schedule_bind_network(agents, self.network_id)
-            self.assertEqual(1, fake_log.call_count)
-
-    def _test_get_agents_and_scheduler_for_dead_agent(self):
-        agents = self._create_and_set_agents_down(['dead_host', 'alive_host'],
-                                                  1)
-        dead_agent = [agents[0]]
-        alive_agent = [agents[1]]
-        self._test_schedule_bind_network(dead_agent, self.network_id)
-        scheduler = dhcp_agent_scheduler.ChanceScheduler()
-        return dead_agent, alive_agent, scheduler
-
-    def _test_reschedule_vs_network_on_dead_agent(self,
-                                                  active_hosts_only):
-        dead_agent, alive_agent, scheduler = (
-            self._test_get_agents_and_scheduler_for_dead_agent())
-        network = {'id': self.network_id}
-        plugin = mock.Mock()
-        plugin.get_subnets.return_value = [{"network_id": self.network_id,
-                                            "enable_dhcp": True}]
-        plugin.get_agents_db.return_value = dead_agent + alive_agent
-        if active_hosts_only:
-            plugin.get_dhcp_agents_hosting_networks.return_value = []
-            self.assertTrue(
-                scheduler.schedule(
-                    plugin, self.ctx, network))
-        else:
-            plugin.get_dhcp_agents_hosting_networks.return_value = dead_agent
-            self.assertFalse(
-                scheduler.schedule(
-                    plugin, self.ctx, network))
-
-    def test_network_rescheduled_when_db_returns_active_hosts(self):
-        self._test_reschedule_vs_network_on_dead_agent(True)
-
-    def test_network_not_rescheduled_when_db_returns_all_hosts(self):
-        self._test_reschedule_vs_network_on_dead_agent(False)
-
-    def _get_agent_binding_from_db(self, agent):
-        return self.ctx.session.query(
-            sched_db.NetworkDhcpAgentBinding
-        ).filter_by(dhcp_agent_id=agent[0].id).all()
-
-    def _test_auto_reschedule_vs_network_on_dead_agent(self,
-                                                       active_hosts_only):
-        dead_agent, alive_agent, scheduler = (
-            self._test_get_agents_and_scheduler_for_dead_agent())
-        plugin = mock.Mock()
-        plugin.get_subnets.return_value = [{"network_id": self.network_id,
-                                            "enable_dhcp": True}]
-        plugin.get_network.return_value = self.network
-        if active_hosts_only:
-            plugin.get_dhcp_agents_hosting_networks.return_value = []
-        else:
-            plugin.get_dhcp_agents_hosting_networks.return_value = dead_agent
-        network_assigned_to_dead_agent = (
-            self._get_agent_binding_from_db(dead_agent))
-        self.assertEqual(1, len(network_assigned_to_dead_agent))
-        self.assertTrue(
-            scheduler.auto_schedule_networks(
-                plugin, self.ctx, "alive_host"))
-        network_assigned_to_dead_agent = (
-            self._get_agent_binding_from_db(dead_agent))
-        network_assigned_to_alive_agent = (
-            self._get_agent_binding_from_db(alive_agent))
-        self.assertEqual(1, len(network_assigned_to_dead_agent))
-        if active_hosts_only:
-            self.assertEqual(1, len(network_assigned_to_alive_agent))
-        else:
-            self.assertEqual(0, len(network_assigned_to_alive_agent))
-
-    def test_network_auto_rescheduled_when_db_returns_active_hosts(self):
-        self._test_auto_reschedule_vs_network_on_dead_agent(True)
-
-    def test_network_not_auto_rescheduled_when_db_returns_all_hosts(self):
-        self._test_auto_reschedule_vs_network_on_dead_agent(False)
-
-
-class TestAutoScheduleNetworks(TestDhcpSchedulerBaseTestCase):
-    """Unit test scenarios for ChanceScheduler.auto_schedule_networks.
-
-    network_present
-        Network is present or not
-
-    enable_dhcp
-        Dhcp is enabled or disabled in the subnet of the network
-
-    scheduled_already
-        Network is already scheduled to the agent or not
-
-    agent_down
-        Dhcp agent is down or alive
-
-    valid_host
-        If true, then an valid host is passed to schedule the network,
-        else an invalid host is passed.
-
-    az_hints
-        'availability_zone_hints' of the network.
-        note that default 'availability_zone' of an agent is 'nova'.
-    """
-    scenarios = [
-        ('Network present',
-         dict(network_present=True,
-              enable_dhcp=True,
-              scheduled_already=False,
-              agent_down=False,
-              valid_host=True,
-              az_hints=[])),
-
-        ('No network',
-         dict(network_present=False,
-              enable_dhcp=False,
-              scheduled_already=False,
-              agent_down=False,
-              valid_host=True,
-              az_hints=[])),
-
-        ('Network already scheduled',
-         dict(network_present=True,
-              enable_dhcp=True,
-              scheduled_already=True,
-              agent_down=False,
-              valid_host=True,
-              az_hints=[])),
-
-        ('Agent down',
-         dict(network_present=True,
-              enable_dhcp=True,
-              scheduled_already=False,
-              agent_down=False,
-              valid_host=True,
-              az_hints=[])),
-
-        ('dhcp disabled',
-         dict(network_present=True,
-              enable_dhcp=False,
-              scheduled_already=False,
-              agent_down=False,
-              valid_host=False,
-              az_hints=[])),
-
-        ('Invalid host',
-         dict(network_present=True,
-              enable_dhcp=True,
-              scheduled_already=False,
-              agent_down=False,
-              valid_host=False,
-              az_hints=[])),
-
-        ('Match AZ',
-         dict(network_present=True,
-              enable_dhcp=True,
-              scheduled_already=False,
-              agent_down=False,
-              valid_host=True,
-              az_hints=['nova'])),
-
-        ('Not match AZ',
-         dict(network_present=True,
-              enable_dhcp=True,
-              scheduled_already=False,
-              agent_down=False,
-              valid_host=True,
-              az_hints=['not-match'])),
-    ]
-
-    def test_auto_schedule_network(self):
-        plugin = mock.MagicMock()
-        plugin.get_subnets.return_value = (
-            [{"network_id": self.network_id, "enable_dhcp": self.enable_dhcp}]
-            if self.network_present else [])
-        plugin.get_network.return_value = {'availability_zone_hints':
-                                           self.az_hints}
-        scheduler = dhcp_agent_scheduler.ChanceScheduler()
-        if self.network_present:
-            down_agent_count = 1 if self.agent_down else 0
-            agents = self._create_and_set_agents_down(
-                ['host-a'], down_agent_count=down_agent_count)
-            if self.scheduled_already:
-                self._test_schedule_bind_network(agents, self.network_id)
-
-        expected_result = (self.network_present and self.enable_dhcp)
-        expected_hosted_agents = (1 if expected_result and
-                                  self.valid_host else 0)
-        if (self.az_hints and
-            agents[0]['availability_zone'] not in self.az_hints):
-            expected_hosted_agents = 0
-        host = "host-a" if self.valid_host else "host-b"
-        observed_ret_value = scheduler.auto_schedule_networks(
-            plugin, self.ctx, host)
-        self.assertEqual(expected_result, observed_ret_value)
-        hosted_agents = self.ctx.session.query(
-            sched_db.NetworkDhcpAgentBinding).all()
-        self.assertEqual(expected_hosted_agents, len(hosted_agents))
-
-
-class TestNetworksFailover(TestDhcpSchedulerBaseTestCase,
-                           sched_db.DhcpAgentSchedulerDbMixin,
-                           common_db_mixin.CommonDbMixin):
-    def test_reschedule_network_from_down_agent(self):
-        agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1)
-        self._test_schedule_bind_network([agents[0]], self.network_id)
-        self._save_networks(["foo-network-2"])
-        self._test_schedule_bind_network([agents[1]], "foo-network-2")
-        with mock.patch.object(self, 'remove_network_from_dhcp_agent') as rn,\
-                mock.patch.object(self,
-                                  'schedule_network',
-                                  return_value=[agents[1]]) as sch,\
-                mock.patch.object(self,
-                                  'get_network',
-                                  create=True,
-                                  return_value={'id': self.network_id}):
-            notifier = mock.MagicMock()
-            self.agent_notifiers[constants.AGENT_TYPE_DHCP] = notifier
-            self.remove_networks_from_down_agents()
-            rn.assert_called_with(mock.ANY, agents[0].id, self.network_id,
-                                  notify=False)
-            sch.assert_called_with(mock.ANY, {'id': self.network_id})
-            notifier.network_added_to_agent.assert_called_with(
-                mock.ANY, self.network_id, agents[1].host)
-
-    def _test_failed_rescheduling(self, rn_side_effect=None):
-        agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1)
-        self._test_schedule_bind_network([agents[0]], self.network_id)
-        with mock.patch.object(self,
-                               'remove_network_from_dhcp_agent',
-                               side_effect=rn_side_effect) as rn,\
-                mock.patch.object(self,
-                                  'schedule_network',
-                                  return_value=None) as sch,\
-                mock.patch.object(self,
-                                  'get_network',
-                                  create=True,
-                                  return_value={'id': self.network_id}):
-            notifier = mock.MagicMock()
-            self.agent_notifiers[constants.AGENT_TYPE_DHCP] = notifier
-            self.remove_networks_from_down_agents()
-            rn.assert_called_with(mock.ANY, agents[0].id, self.network_id,
-                                  notify=False)
-            sch.assert_called_with(mock.ANY, {'id': self.network_id})
-            self.assertFalse(notifier.network_added_to_agent.called)
-
-    def test_reschedule_network_from_down_agent_failed(self):
-        self._test_failed_rescheduling()
-
-    def test_reschedule_network_from_down_agent_concurrent_removal(self):
-        self._test_failed_rescheduling(
-            rn_side_effect=dhcpagentscheduler.NetworkNotHostedByDhcpAgent(
-                network_id='foo', agent_id='bar'))
-
-    def test_filter_bindings(self):
-        bindings = [
-            sched_db.NetworkDhcpAgentBinding(network_id='foo1',
-                                             dhcp_agent={'id': 'id1'}),
-            sched_db.NetworkDhcpAgentBinding(network_id='foo2',
-                                             dhcp_agent={'id': 'id1'}),
-            sched_db.NetworkDhcpAgentBinding(network_id='foo3',
-                                             dhcp_agent={'id': 'id2'}),
-            sched_db.NetworkDhcpAgentBinding(network_id='foo4',
-                                             dhcp_agent={'id': 'id2'})]
-        with mock.patch.object(self, 'agent_starting_up',
-                               side_effect=[True, False]):
-            res = [b for b in self._filter_bindings(None, bindings)]
-            # once per each agent id1 and id2
-            self.assertEqual(2, len(res))
-            res_ids = [b.network_id for b in res]
-            self.assertIn('foo3', res_ids)
-            self.assertIn('foo4', res_ids)
-
-    def test_reschedule_network_from_down_agent_failed_on_unexpected(self):
-        agents = self._create_and_set_agents_down(['host-a'], 1)
-        self._test_schedule_bind_network([agents[0]], self.network_id)
-        with mock.patch.object(
-            self, '_filter_bindings',
-            side_effect=Exception()):
-            # just make sure that no exception is raised
-            self.remove_networks_from_down_agents()
-
-    def test_reschedule_doesnt_occur_if_no_agents(self):
-        agents = self._create_and_set_agents_down(['host-a', 'host-b'], 2)
-        self._test_schedule_bind_network([agents[0]], self.network_id)
-        with mock.patch.object(
-            self, 'remove_network_from_dhcp_agent') as rn:
-            self.remove_networks_from_down_agents()
-            self.assertFalse(rn.called)
-
-
-class DHCPAgentWeightSchedulerTestCase(TestDhcpSchedulerBaseTestCase):
-    """Unit test scenarios for WeightScheduler.schedule."""
-
-    def setUp(self):
-        super(DHCPAgentWeightSchedulerTestCase, self).setUp()
-        DB_PLUGIN_KLASS = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-        self.setup_coreplugin(DB_PLUGIN_KLASS)
-        cfg.CONF.set_override("network_scheduler_driver",
-            'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler')
-        self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.'
-                                                'Ml2Plugin')
-        self.assertEqual(1, self.patched_dhcp_periodic.call_count)
-        self.plugin.network_scheduler = importutils.import_object(
-            'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler'
-        )
-        cfg.CONF.set_override('dhcp_agents_per_network', 1)
-        cfg.CONF.set_override("dhcp_load_type", "networks")
-
-    def test_scheduler_one_agents_per_network(self):
-        self._save_networks(['1111'])
-        helpers.register_dhcp_agent(HOST_C)
-        self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
-                                               {'id': '1111'})
-        agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
-                                                              ['1111'])
-        self.assertEqual(1, len(agents))
-
-    def test_scheduler_two_agents_per_network(self):
-        cfg.CONF.set_override('dhcp_agents_per_network', 2)
-        self._save_networks(['1111'])
-        helpers.register_dhcp_agent(HOST_C)
-        helpers.register_dhcp_agent(HOST_D)
-        self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
-                                               {'id': '1111'})
-        agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
-                                                              ['1111'])
-        self.assertEqual(2, len(agents))
-
-    def test_scheduler_no_active_agents(self):
-        self._save_networks(['1111'])
-        self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
-                                               {'id': '1111'})
-        agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
-                                                              ['1111'])
-        self.assertEqual(0, len(agents))
-
-    def test_scheduler_equal_distribution(self):
-        self._save_networks(['1111', '2222', '3333'])
-        helpers.register_dhcp_agent(HOST_C)
-        helpers.register_dhcp_agent(HOST_D, networks=1)
-        self.plugin.network_scheduler.schedule(
-            self.plugin, context.get_admin_context(), {'id': '1111'})
-        helpers.register_dhcp_agent(HOST_D, networks=2)
-        self.plugin.network_scheduler.schedule(
-            self.plugin, context.get_admin_context(), {'id': '2222'})
-        helpers.register_dhcp_agent(HOST_C, networks=4)
-        self.plugin.network_scheduler.schedule(
-            self.plugin, context.get_admin_context(), {'id': '3333'})
-        agent1 = self.plugin.get_dhcp_agents_hosting_networks(
-            self.ctx, ['1111'])
-        agent2 = self.plugin.get_dhcp_agents_hosting_networks(
-            self.ctx, ['2222'])
-        agent3 = self.plugin.get_dhcp_agents_hosting_networks(
-            self.ctx, ['3333'])
-        self.assertEqual('host-c', agent1[0]['host'])
-        self.assertEqual('host-c', agent2[0]['host'])
-        self.assertEqual('host-d', agent3[0]['host'])
-
-
-class TestDhcpSchedulerFilter(TestDhcpSchedulerBaseTestCase,
-                              sched_db.DhcpAgentSchedulerDbMixin):
-    def _test_get_dhcp_agents_hosting_networks(self, expected, **kwargs):
-        agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1)
-        agents += self._create_and_set_agents_down(['host-c', 'host-d'], 1,
-                                                   admin_state_up=False)
-        self._test_schedule_bind_network(agents, self.network_id)
-        agents = self.get_dhcp_agents_hosting_networks(self.ctx,
-                                                       [self.network_id],
-                                                       **kwargs)
-        host_ids = set(a['host'] for a in agents)
-        self.assertEqual(expected, host_ids)
-
-    def test_get_dhcp_agents_hosting_networks_default(self):
-        self._test_get_dhcp_agents_hosting_networks({'host-a', 'host-b',
-                                                     'host-c', 'host-d'})
-
-    def test_get_dhcp_agents_hosting_networks_active(self):
-        self._test_get_dhcp_agents_hosting_networks({'host-b', 'host-d'},
-                                                    active=True)
-
-    def test_get_dhcp_agents_hosting_networks_admin_up(self):
-        self._test_get_dhcp_agents_hosting_networks({'host-a', 'host-b'},
-                                                    admin_state_up=True)
-
-    def test_get_dhcp_agents_hosting_networks_active_admin_up(self):
-        self._test_get_dhcp_agents_hosting_networks({'host-b'},
-                                                    active=True,
-                                                    admin_state_up=True)
-
-    def test_get_dhcp_agents_hosting_networks_admin_down(self):
-        self._test_get_dhcp_agents_hosting_networks({'host-c', 'host-d'},
-                                                    admin_state_up=False)
-
-    def test_get_dhcp_agents_hosting_networks_active_admin_down(self):
-        self._test_get_dhcp_agents_hosting_networks({'host-d'},
-                                                    active=True,
-                                                    admin_state_up=False)
-
-
-class DHCPAgentAZAwareWeightSchedulerTestCase(TestDhcpSchedulerBaseTestCase):
-
-    def setUp(self):
-        super(DHCPAgentAZAwareWeightSchedulerTestCase, self).setUp()
-        DB_PLUGIN_KLASS = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-        self.setup_coreplugin(DB_PLUGIN_KLASS)
-        cfg.CONF.set_override("network_scheduler_driver",
-            'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler')
-        self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.'
-                                                'Ml2Plugin')
-        cfg.CONF.set_override('dhcp_agents_per_network', 1)
-        cfg.CONF.set_override("dhcp_load_type", "networks")
-
-    def test_az_scheduler_one_az_hints(self):
-        self._save_networks(['1111'])
-        helpers.register_dhcp_agent('az1-host1', networks=1, az='az1')
-        helpers.register_dhcp_agent('az1-host2', networks=2, az='az1')
-        helpers.register_dhcp_agent('az2-host1', networks=3, az='az2')
-        helpers.register_dhcp_agent('az2-host2', networks=4, az='az2')
-        self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
-            {'id': '1111', 'availability_zone_hints': ['az2']})
-        agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
-                                                              ['1111'])
-        self.assertEqual(1, len(agents))
-        self.assertEqual('az2-host1', agents[0]['host'])
-
-    def test_az_scheduler_default_az_hints(self):
-        cfg.CONF.set_override('default_availability_zones', ['az1'])
-        self._save_networks(['1111'])
-        helpers.register_dhcp_agent('az1-host1', networks=1, az='az1')
-        helpers.register_dhcp_agent('az1-host2', networks=2, az='az1')
-        helpers.register_dhcp_agent('az2-host1', networks=3, az='az2')
-        helpers.register_dhcp_agent('az2-host2', networks=4, az='az2')
-        self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
-            {'id': '1111', 'availability_zone_hints': []})
-        agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
-                                                              ['1111'])
-        self.assertEqual(1, len(agents))
-        self.assertEqual('az1-host1', agents[0]['host'])
-
-    def test_az_scheduler_two_az_hints(self):
-        cfg.CONF.set_override('dhcp_agents_per_network', 2)
-        self._save_networks(['1111'])
-        helpers.register_dhcp_agent('az1-host1', networks=1, az='az1')
-        helpers.register_dhcp_agent('az1-host2', networks=2, az='az1')
-        helpers.register_dhcp_agent('az2-host1', networks=3, az='az2')
-        helpers.register_dhcp_agent('az2-host2', networks=4, az='az2')
-        helpers.register_dhcp_agent('az3-host1', networks=5, az='az3')
-        helpers.register_dhcp_agent('az3-host2', networks=6, az='az3')
-        self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
-            {'id': '1111', 'availability_zone_hints': ['az1', 'az3']})
-        agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
-                                                              ['1111'])
-        self.assertEqual(2, len(agents))
-        expected_hosts = set(['az1-host1', 'az3-host1'])
-        hosts = set([a['host'] for a in agents])
-        self.assertEqual(expected_hosts, hosts)
-
-    def test_az_scheduler_two_az_hints_one_available_az(self):
-        cfg.CONF.set_override('dhcp_agents_per_network', 2)
-        self._save_networks(['1111'])
-        helpers.register_dhcp_agent('az1-host1', networks=1, az='az1')
-        helpers.register_dhcp_agent('az1-host2', networks=2, az='az1')
-        helpers.register_dhcp_agent('az2-host1', networks=3, alive=False,
-                                    az='az2')
-        helpers.register_dhcp_agent('az2-host2', networks=4,
-                                    admin_state_up=False, az='az2')
-        self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
-            {'id': '1111', 'availability_zone_hints': ['az1', 'az2']})
-        agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
-                                                              ['1111'])
-        self.assertEqual(2, len(agents))
-        expected_hosts = set(['az1-host1', 'az1-host2'])
-        hosts = set([a['host'] for a in agents])
-        self.assertEqual(expected_hosts, hosts)
-
-    def test_az_scheduler_no_az_hints(self):
-        cfg.CONF.set_override('dhcp_agents_per_network', 2)
-        self._save_networks(['1111'])
-        helpers.register_dhcp_agent('az1-host1', networks=2, az='az1')
-        helpers.register_dhcp_agent('az1-host2', networks=3, az='az1')
-        helpers.register_dhcp_agent('az2-host1', networks=2, az='az2')
-        helpers.register_dhcp_agent('az2-host2', networks=1, az='az2')
-        self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
-            {'id': '1111', 'availability_zone_hints': []})
-        agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
-                                                              ['1111'])
-        self.assertEqual(2, len(agents))
-        expected_hosts = set(['az1-host1', 'az2-host2'])
-        hosts = {a['host'] for a in agents}
-        self.assertEqual(expected_hosts, hosts)
diff --git a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py
deleted file mode 100644 (file)
index 26f2c14..0000000
+++ /dev/null
@@ -1,1954 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import contextlib
-import datetime
-import uuid
-
-import mock
-import testscenarios
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_utils import importutils
-from oslo_utils import timeutils
-from sqlalchemy.orm import query
-
-from neutron.common import constants
-from neutron import context as n_context
-from neutron.db import agents_db
-from neutron.db import common_db_mixin
-from neutron.db import db_base_plugin_v2 as db_v2
-from neutron.db import l3_agentschedulers_db
-from neutron.db import l3_db
-from neutron.db import l3_dvrscheduler_db
-from neutron.db import l3_hamode_db
-from neutron.db import l3_hascheduler_db
-from neutron.extensions import l3_ext_ha_mode as l3_ha
-from neutron.extensions import l3agentscheduler as l3agent
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.scheduler import l3_agent_scheduler
-from neutron.tests import base
-from neutron.tests.common import helpers
-from neutron.tests.unit.db import test_db_base_plugin_v2
-from neutron.tests.unit.extensions import test_l3
-from neutron.tests.unit import testlib_api
-
-# the below code is required for the following reason
-# (as documented in testscenarios)
-"""Multiply tests depending on their 'scenarios' attribute.
-   This can be assigned to 'load_tests' in any test module to make this
-   automatically work across tests in the module.
-"""
-load_tests = testscenarios.load_tests_apply_scenarios
-
-HOST_DVR = 'my_l3_host_dvr'
-HOST_DVR_SNAT = 'my_l3_host_dvr_snat'
-DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
-DEVICE_OWNER_COMPUTE_NOVA = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova'
-
-
-class FakeL3Scheduler(l3_agent_scheduler.L3Scheduler):
-
-    def schedule(self):
-        pass
-
-    def _choose_router_agent(self):
-        pass
-
-    def _choose_router_agents_for_ha(self):
-        pass
-
-
-class FakePortDB(object):
-    def __init__(self, port_list):
-        self._port_list = port_list
-
-    def _get_query_answer(self, port_list, filters):
-        answers = []
-        for port in port_list:
-            matched = True
-            for key, search_values in filters.items():
-                port_value = port.get(key, None)
-                if not port_value:
-                    matched = False
-                    break
-
-                if isinstance(port_value, list):
-                    sub_answers = self._get_query_answer(port_value,
-                                                         search_values)
-                    matched = len(sub_answers) > 0
-                else:
-                    matched = port_value in search_values
-
-                if not matched:
-                    break
-
-            if matched:
-                answers.append(port)
-
-        return answers
-
-    def get_port(self, context, port_id):
-        for port in self._port_list:
-            if port['id'] == port_id:
-                if port['tenant_id'] == context.tenant_id or context.is_admin:
-                    return port
-                break
-
-        return None
-
-    def get_ports(self, context, filters=None):
-        query_filters = dict()
-        if filters:
-            query_filters.update(filters)
-
-        if not context.is_admin:
-            query_filters['tenant_id'] = [context.tenant_id]
-
-        result = self._get_query_answer(self._port_list, query_filters)
-        return result
-
-
-class L3SchedulerBaseTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(L3SchedulerBaseTestCase, self).setUp()
-        self.scheduler = FakeL3Scheduler()
-        self.plugin = mock.Mock()
-
-    def test_auto_schedule_routers(self):
-        self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY]
-        with mock.patch.object(self.scheduler,
-                               '_get_routers_to_schedule') as gs,\
-                mock.patch.object(self.scheduler,
-                                  '_get_routers_can_schedule') as gr:
-            result = self.scheduler.auto_schedule_routers(
-                self.plugin, mock.ANY, mock.ANY, mock.ANY)
-            self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
-            self.assertTrue(result)
-            self.assertTrue(gs.called)
-            self.assertTrue(gr.called)
-
-    def test_auto_schedule_routers_no_agents(self):
-        self.plugin.get_enabled_agent_on_host.return_value = None
-        result = self.scheduler.auto_schedule_routers(
-            self.plugin, mock.ANY, mock.ANY, mock.ANY)
-        self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
-        self.assertFalse(result)
-
-    def test_auto_schedule_routers_no_unscheduled_routers(self):
-        type(self.plugin).supported_extension_aliases = (
-            mock.PropertyMock(return_value=[]))
-        with mock.patch.object(self.scheduler,
-                               '_get_routers_to_schedule') as mock_routers:
-            mock_routers.return_value = []
-            result = self.scheduler.auto_schedule_routers(
-                self.plugin, mock.ANY, mock.ANY, mock.ANY)
-        self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
-        self.assertFalse(result)
-
-    def test_auto_schedule_routers_no_target_routers(self):
-        self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY]
-        with mock.patch.object(
-            self.scheduler,
-            '_get_routers_to_schedule') as mock_unscheduled_routers,\
-                mock.patch.object(
-                    self.scheduler,
-                    '_get_routers_can_schedule') as mock_target_routers:
-            mock_unscheduled_routers.return_value = mock.ANY
-            mock_target_routers.return_value = None
-            result = self.scheduler.auto_schedule_routers(
-                self.plugin, mock.ANY, mock.ANY, mock.ANY)
-        self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
-        self.assertFalse(result)
-
-    def test__get_routers_to_schedule_with_router_ids(self):
-        router_ids = ['foo_router_1', 'foo_router_2']
-        expected_routers = [
-            {'id': 'foo_router1'}, {'id': 'foo_router_2'}
-        ]
-        self.plugin.get_routers.return_value = expected_routers
-        with mock.patch.object(self.scheduler,
-                               '_filter_unscheduled_routers') as mock_filter:
-            mock_filter.return_value = expected_routers
-            unscheduled_routers = self.scheduler._get_routers_to_schedule(
-                mock.ANY, self.plugin, router_ids)
-        mock_filter.assert_called_once_with(
-            mock.ANY, self.plugin, expected_routers)
-        self.assertEqual(expected_routers, unscheduled_routers)
-
-    def test__get_routers_to_schedule_without_router_ids(self):
-        expected_routers = [
-            {'id': 'foo_router1'}, {'id': 'foo_router_2'}
-        ]
-        with mock.patch.object(self.scheduler,
-                               '_get_unscheduled_routers') as mock_get:
-            mock_get.return_value = expected_routers
-            unscheduled_routers = self.scheduler._get_routers_to_schedule(
-                mock.ANY, self.plugin)
-        mock_get.assert_called_once_with(mock.ANY, self.plugin)
-        self.assertEqual(expected_routers, unscheduled_routers)
-
-    def test__get_routers_to_schedule_exclude_distributed(self):
-        routers = [
-            {'id': 'foo_router1', 'distributed': True}, {'id': 'foo_router_2'}
-        ]
-        expected_routers = [{'id': 'foo_router_2'}]
-        with mock.patch.object(self.scheduler,
-                               '_get_unscheduled_routers') as mock_get:
-            mock_get.return_value = routers
-            unscheduled_routers = self.scheduler._get_routers_to_schedule(
-                mock.ANY, self.plugin,
-                router_ids=None, exclude_distributed=True)
-        mock_get.assert_called_once_with(mock.ANY, self.plugin)
-        self.assertEqual(expected_routers, unscheduled_routers)
-
-    def _test__get_routers_can_schedule(self, routers, agent, target_routers):
-        self.plugin.get_l3_agent_candidates.return_value = agent
-        result = self.scheduler._get_routers_can_schedule(
-            mock.ANY, self.plugin, routers, mock.ANY)
-        self.assertEqual(target_routers, result)
-
-    def _test__filter_unscheduled_routers(self, routers, agents, expected):
-        self.plugin.get_l3_agents_hosting_routers.return_value = agents
-        unscheduled_routers = self.scheduler._filter_unscheduled_routers(
-            mock.ANY, self.plugin, routers)
-        self.assertEqual(expected, unscheduled_routers)
-
-    def test__filter_unscheduled_routers_already_scheduled(self):
-        self._test__filter_unscheduled_routers(
-            [{'id': 'foo_router1'}, {'id': 'foo_router_2'}],
-            [{'id': 'foo_agent_id'}], [])
-
-    def test__filter_unscheduled_routers_non_scheduled(self):
-        self._test__filter_unscheduled_routers(
-            [{'id': 'foo_router1'}, {'id': 'foo_router_2'}],
-            None, [{'id': 'foo_router1'}, {'id': 'foo_router_2'}])
-
-    def test__get_routers_can_schedule_with_compat_agent(self):
-        routers = [{'id': 'foo_router'}]
-        self._test__get_routers_can_schedule(routers, mock.ANY, routers)
-
-    def test__get_routers_can_schedule_with_no_compat_agent(self):
-        routers = [{'id': 'foo_router'}]
-        self._test__get_routers_can_schedule(routers, None, [])
-
-    def test__bind_routers_centralized(self):
-        routers = [{'id': 'foo_router'}]
-        with mock.patch.object(self.scheduler, 'bind_router') as mock_bind:
-            self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, mock.ANY)
-        mock_bind.assert_called_once_with(mock.ANY, 'foo_router', mock.ANY)
-
-    def _test__bind_routers_ha(self, has_binding):
-        routers = [{'id': 'foo_router', 'ha': True, 'tenant_id': '42'}]
-        agent = agents_db.Agent(id='foo_agent')
-        with mock.patch.object(self.scheduler,
-                               '_router_has_binding',
-                               return_value=has_binding) as mock_has_binding,\
-                mock.patch.object(self.scheduler,
-                                  'create_ha_port_and_bind') as mock_bind:
-            self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, agent)
-            mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router',
-                                                     'foo_agent')
-            self.assertEqual(not has_binding, mock_bind.called)
-
-    def test__bind_routers_ha_has_binding(self):
-        self._test__bind_routers_ha(has_binding=True)
-
-    def test__bind_routers_ha_no_binding(self):
-        self._test__bind_routers_ha(has_binding=False)
-
-    def test__get_candidates_iterable_on_early_returns(self):
-        plugin = mock.MagicMock()
-        # non-distributed router already hosted
-        plugin.get_l3_agents_hosting_routers.return_value = [{'id': 'a1'}]
-        router = {'distributed': False, 'id': 'falafel'}
-        iter(self.scheduler._get_candidates(plugin, mock.MagicMock(), router))
-        # distributed router but no agents
-        router['distributed'] = True
-        plugin.get_l3_agents.return_value = []
-        iter(self.scheduler._get_candidates(plugin, mock.MagicMock(), router))
-        self.assertFalse(plugin.get_l3_agent_candidates.called)
-
-    def test__get_candidates_skips_get_l3_agent_candidates_if_dvr_scheduled(
-            self):
-        plugin = mock.MagicMock()
-        # distributed router already hosted
-        plugin.get_l3_agents_hosting_routers.return_value = ['a1']
-        router = {'distributed': True, 'id': str(uuid.uuid4())}
-        plugin.get_l3_agents.return_value = ['a1']
-        self.scheduler._get_candidates(plugin, mock.MagicMock(), router)
-        self.assertFalse(plugin.get_l3_agent_candidates.called)
-
-    def test__get_candidates_calls_get_l3_agent_candidates_if_agent_available(
-            self):
-        plugin = mock.MagicMock()
-        # distributed router already hosted in two agent 'a1' and 'a2'
-        plugin.get_l3_agents_hosting_routers.return_value = ['a1', 'a2']
-        router = {'distributed': True, 'id': str(uuid.uuid4())}
-        # Available distributed agents
-        plugin.get_l3_agents.return_value = ['a1', 'a2', 'a3', 'a4', 'a5']
-        unscheduled_agents = ['a3', 'a4', 'a5']
-        plugin.get_l3_agent_candidates.return_value = ['a3', 'a4']
-        agents_returned = self.scheduler._get_candidates(
-            plugin, mock.MagicMock(), router)
-        plugin.get_l3_agent_candidates.called_once_with(
-            mock.ANY, router, unscheduled_agents)
-        self.assertEqual(['a3', 'a4'], sorted(agents_returned))
-
-
-class L3SchedulerBaseMixin(object):
-
-    def _register_l3_agents(self, plugin=None):
-        self.agent1 = helpers.register_l3_agent(
-            'host_1', constants.L3_AGENT_MODE_LEGACY)
-        self.agent_id1 = self.agent1.id
-        self.agent2 = helpers.register_l3_agent(
-            'host_2', constants.L3_AGENT_MODE_LEGACY)
-        self.agent_id2 = self.agent2.id
-
-    def _register_l3_dvr_agents(self):
-        self.l3_dvr_agent = helpers.register_l3_agent(
-            HOST_DVR, constants.L3_AGENT_MODE_DVR)
-        self.l3_dvr_agent_id = self.l3_dvr_agent.id
-        self.l3_dvr_snat_agent = helpers.register_l3_agent(
-            HOST_DVR_SNAT, constants.L3_AGENT_MODE_DVR_SNAT)
-        self.l3_dvr_snat_id = self.l3_dvr_snat_agent.id
-
-    def _set_l3_agent_admin_state(self, context, agent_id, state=True):
-        update = {'agent': {'admin_state_up': state}}
-        self.plugin.update_agent(context, agent_id, update)
-
-    def _set_l3_agent_dead(self, agent_id):
-        update = {
-            'agent': {
-                'heartbeat_timestamp':
-                timeutils.utcnow() - datetime.timedelta(hours=1)}}
-        self.plugin.update_agent(self.adminContext, agent_id, update)
-
-    @contextlib.contextmanager
-    def router_with_ext_gw(self, name='router1', admin_state_up=True,
-                           fmt=None, tenant_id=str(uuid.uuid4()),
-                           external_gateway_info=None,
-                           subnet=None, set_context=False,
-                           **kwargs):
-        router = self._make_router(fmt or self.fmt, tenant_id, name,
-                                   admin_state_up, external_gateway_info,
-                                   set_context, **kwargs)
-        self._add_external_gateway_to_router(
-            router['router']['id'],
-            subnet['subnet']['network_id'])
-
-        yield router
-
-        self._remove_external_gateway_from_router(
-            router['router']['id'], subnet['subnet']['network_id'])
-        self._delete('routers', router['router']['id'])
-
-
-class L3SchedulerTestBaseMixin(object):
-
-    def _test_add_router_to_l3_agent(self,
-                                     distributed=False,
-                                     already_scheduled=False,
-                                     external_gw=None):
-        agent_id = self.agent_id1
-        agent = self.agent1
-        if distributed:
-            self._register_l3_dvr_agents()
-            agent_id = self.l3_dvr_snat_id
-            agent = self.l3_dvr_snat_agent
-        router = self._make_router(self.fmt,
-                                   tenant_id=str(uuid.uuid4()),
-                                   name='r1')
-        router['router']['distributed'] = distributed
-        router['router']['external_gateway_info'] = external_gw
-        if already_scheduled:
-            self._test_schedule_bind_router(agent, router)
-        with mock.patch.object(self, "validate_agent_router_combination"),\
-                mock.patch.object(self,
-                                  "create_router_to_agent_binding") as auto_s,\
-                mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
-                           return_value=router['router']):
-            self.add_router_to_l3_agent(self.adminContext, agent_id,
-                                        router['router']['id'])
-            self.assertNotEqual(already_scheduled, auto_s.called)
-
-    def test__unbind_router_removes_binding(self):
-        agent_id = self.agent_id1
-        agent = self.agent1
-        router = self._make_router(self.fmt,
-                                   tenant_id=str(uuid.uuid4()),
-                                   name='r1')
-        self._test_schedule_bind_router(agent, router)
-        self._unbind_router(self.adminContext,
-                            router['router']['id'],
-                            agent_id)
-        bindings = self._get_l3_bindings_hosting_routers(
-            self.adminContext, [router['router']['id']])
-        self.assertEqual(0, len(bindings))
-
-    def _create_router_for_l3_agent_dvr_test(self,
-                                             distributed=False,
-                                             external_gw=None):
-        router = self._make_router(self.fmt,
-                                   tenant_id=str(uuid.uuid4()),
-                                   name='r1')
-        router['router']['distributed'] = distributed
-        router['router']['external_gateway_info'] = external_gw
-        return router
-
-    def _prepare_l3_agent_dvr_move_exceptions(self,
-                                              distributed=False,
-                                              external_gw=None,
-                                              agent_id=None,
-                                              expected_exception=None):
-        router = self._create_router_for_l3_agent_dvr_test(
-            distributed=distributed, external_gw=external_gw)
-        with mock.patch.object(self, "create_router_to_agent_binding"),\
-                mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
-                           return_value=router['router']):
-            self.assertRaises(expected_exception,
-                              self.add_router_to_l3_agent,
-                              self.adminContext, agent_id,
-                              router['router']['id'])
-
-    def test_add_router_to_l3_agent_mismatch_error_dvr_to_legacy(self):
-        self._register_l3_agents()
-        self._prepare_l3_agent_dvr_move_exceptions(
-            distributed=True,
-            agent_id=self.agent_id1,
-            expected_exception=l3agent.RouterL3AgentMismatch)
-
-    def test_add_router_to_l3_agent_mismatch_error_legacy_to_dvr(self):
-        self._register_l3_dvr_agents()
-        self._prepare_l3_agent_dvr_move_exceptions(
-            agent_id=self.l3_dvr_agent_id,
-            expected_exception=l3agent.DVRL3CannotAssignToDvrAgent)
-
-    def test_add_router_to_l3_agent_mismatch_error_dvr_to_dvr(self):
-        self._register_l3_dvr_agents()
-        self._prepare_l3_agent_dvr_move_exceptions(
-            distributed=True,
-            agent_id=self.l3_dvr_agent_id,
-            expected_exception=l3agent.DVRL3CannotAssignToDvrAgent)
-
-    def test_add_router_to_l3_agent_dvr_to_snat(self):
-        external_gw_info = {
-            "network_id": str(uuid.uuid4()),
-            "enable_snat": True
-        }
-        self._register_l3_dvr_agents()
-        agent_id = self.l3_dvr_snat_id
-        router = self._create_router_for_l3_agent_dvr_test(
-            distributed=True,
-            external_gw=external_gw_info)
-        with mock.patch.object(self, "validate_agent_router_combination"),\
-                mock.patch.object(
-                    self,
-                    "create_router_to_agent_binding") as rtr_agent_binding,\
-                mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
-                           return_value=router['router']):
-
-            self.add_router_to_l3_agent(self.adminContext, agent_id,
-                                        router['router']['id'])
-            rtr_agent_binding.assert_called_once_with(
-                self.adminContext, mock.ANY, router['router'])
-
-    def test_add_router_to_l3_agent(self):
-        self._test_add_router_to_l3_agent()
-
-    def test_add_distributed_router_to_l3_agent(self):
-        external_gw_info = {
-            "network_id": str(uuid.uuid4()),
-            "enable_snat": True
-        }
-        self._test_add_router_to_l3_agent(distributed=True,
-                                          external_gw=external_gw_info)
-
-    def test_add_router_to_l3_agent_already_scheduled(self):
-        self._test_add_router_to_l3_agent(already_scheduled=True)
-
-    def test_add_distributed_router_to_l3_agent_already_scheduled(self):
-        external_gw_info = {
-            "network_id": str(uuid.uuid4()),
-            "enable_snat": True
-        }
-        self._test_add_router_to_l3_agent(distributed=True,
-                                          already_scheduled=True,
-                                          external_gw=external_gw_info)
-
-    def _prepare_schedule_dvr_tests(self):
-        scheduler = l3_agent_scheduler.ChanceScheduler()
-        agent = agents_db.Agent()
-        agent.admin_state_up = True
-        agent.heartbeat_timestamp = timeutils.utcnow()
-        plugin = mock.Mock()
-        plugin.get_l3_agents_hosting_routers.return_value = []
-        plugin.get_l3_agents.return_value = [agent]
-        plugin.get_l3_agent_candidates.return_value = [agent]
-
-        return scheduler, agent, plugin
-
-    def test_schedule_dvr_router_without_snatbinding_and_no_gw(self):
-        scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
-        sync_router = {
-            'id': 'foo_router_id',
-            'distributed': True
-        }
-        plugin.get_router.return_value = sync_router
-        with mock.patch.object(scheduler, 'bind_router'),\
-                mock.patch.object(plugin,
-                                  'get_snat_bindings',
-                                  return_value=False):
-            scheduler._schedule_router(
-                plugin, self.adminContext, 'foo_router_id', None)
-        expected_calls = [
-            mock.call.get_router(mock.ANY, 'foo_router_id'),
-            mock.call.get_l3_agents_hosting_routers(
-                mock.ANY, ['foo_router_id'], admin_state_up=True),
-            mock.call.get_l3_agents(mock.ANY, active=True),
-            mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]),
-        ]
-        plugin.assert_has_calls(expected_calls)
-
-    def test_schedule_dvr_router_with_snatbinding_no_gw(self):
-        scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
-        sync_router = {'id': 'foo_router_id',
-                       'distributed': True}
-        plugin.get_router.return_value = sync_router
-        with mock.patch.object(
-                plugin, 'get_snat_bindings', return_value=True),\
-                mock.patch.object(scheduler, 'bind_router'):
-            scheduler._schedule_router(
-                plugin, self.adminContext, 'foo_router_id', None)
-        expected_calls = [
-            mock.call.get_router(mock.ANY, 'foo_router_id'),
-            mock.call.get_l3_agents_hosting_routers(
-                mock.ANY, ['foo_router_id'], admin_state_up=True),
-            mock.call.get_l3_agents(mock.ANY, active=True),
-            mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]),
-            mock.call.unbind_snat_servicenode(mock.ANY, 'foo_router_id')
-        ]
-        plugin.assert_has_calls(expected_calls)
-
-    def test_schedule_router_distributed(self):
-        scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
-        sync_router = {
-            'id': 'foo_router_id',
-            'distributed': True,
-            'external_gateway_info': {
-                'network_id': str(uuid.uuid4()),
-                'enable_snat': True
-            }
-        }
-        plugin.get_router.return_value = sync_router
-        with mock.patch.object(
-            plugin, 'get_snat_bindings', return_value=False),\
-                mock.patch.object(scheduler, 'bind_router'):
-            scheduler._schedule_router(
-                plugin, self.adminContext, 'foo_router_id', None)
-        expected_calls = [
-            mock.call.get_router(mock.ANY, 'foo_router_id'),
-            mock.call.get_l3_agents_hosting_routers(
-                mock.ANY, ['foo_router_id'], admin_state_up=True),
-            mock.call.get_l3_agents(mock.ANY, active=True),
-            mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]),
-            mock.call.schedule_snat_router(
-                mock.ANY, 'foo_router_id', sync_router),
-        ]
-        plugin.assert_has_calls(expected_calls)
-
-    def _test_schedule_bind_router(self, agent, router):
-        ctx = self.adminContext
-        session = ctx.session
-        db = l3_agentschedulers_db.RouterL3AgentBinding
-        scheduler = l3_agent_scheduler.ChanceScheduler()
-
-        rid = router['router']['id']
-        scheduler.bind_router(ctx, rid, agent)
-        results = (session.query(db).filter_by(router_id=rid).all())
-        self.assertTrue(len(results) > 0)
-        self.assertIn(agent.id, [bind.l3_agent_id for bind in results])
-
-    def test_bind_new_router(self):
-        router = self._make_router(self.fmt,
-                                   tenant_id=str(uuid.uuid4()),
-                                   name='r1')
-        with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
-            self._test_schedule_bind_router(self.agent1, router)
-            self.assertEqual(1, flog.call_count)
-            args, kwargs = flog.call_args
-            self.assertIn('is scheduled', args[0])
-
-    def test_bind_absent_router(self):
-        scheduler = l3_agent_scheduler.ChanceScheduler()
-        # checking that bind_router() is not throwing
-        # when supplied with router_id of non-existing router
-        scheduler.bind_router(self.adminContext, "dummyID", self.agent1)
-
-    def test_bind_existing_router(self):
-        router = self._make_router(self.fmt,
-                                   tenant_id=str(uuid.uuid4()),
-                                   name='r2')
-        self._test_schedule_bind_router(self.agent1, router)
-        with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
-            self._test_schedule_bind_router(self.agent1, router)
-            self.assertEqual(1, flog.call_count)
-            args, kwargs = flog.call_args
-            self.assertIn('has already been scheduled', args[0])
-
-    def _check_get_l3_agent_candidates(
-            self, router, agent_list, exp_host, count=1):
-        candidates = self.get_l3_agent_candidates(self.adminContext,
-                                                  router, agent_list)
-        self.assertEqual(len(candidates), count)
-        if count:
-            self.assertEqual(candidates[0]['host'], exp_host)
-
-    def test_get_l3_agent_candidates_legacy(self):
-        self._register_l3_dvr_agents()
-        router = self._make_router(self.fmt,
-                                   tenant_id=str(uuid.uuid4()),
-                                   name='r2')
-        router['external_gateway_info'] = None
-        router['id'] = str(uuid.uuid4())
-        agent_list = [self.agent1, self.l3_dvr_agent]
-
-        # test legacy agent_mode case: only legacy agent should be candidate
-        router['distributed'] = False
-        exp_host = 'host_1'
-        self._check_get_l3_agent_candidates(router, agent_list, exp_host)
-
-    def test_get_l3_agent_candidates_dvr(self):
-        self._register_l3_dvr_agents()
-        router = self._make_router(self.fmt,
-                                   tenant_id=str(uuid.uuid4()),
-                                   name='r2')
-        router['external_gateway_info'] = None
-        router['id'] = str(uuid.uuid4())
-        agent_list = [self.agent1, self.l3_dvr_agent]
-        # test dvr agent_mode case only dvr agent should be candidate
-        router['distributed'] = True
-        self.get_subnet_ids_on_router = mock.Mock()
-        self.check_dvr_serviceable_ports_on_host = mock.Mock(return_value=True)
-        self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR)
-
-    def test_get_l3_agent_candidates_dvr_no_vms(self):
-        self._register_l3_dvr_agents()
-        router = self._make_router(self.fmt,
-                                   tenant_id=str(uuid.uuid4()),
-                                   name='r2')
-        router['external_gateway_info'] = None
-        router['id'] = str(uuid.uuid4())
-        agent_list = [self.agent1, self.l3_dvr_agent]
-        router['distributed'] = True
-        # Test no VMs present case
-        self.get_subnet_ids_on_router = mock.Mock()
-        self.check_dvr_serviceable_ports_on_host = mock.Mock(
-            return_value=False)
-        self._check_get_l3_agent_candidates(
-            router, agent_list, HOST_DVR, count=0)
-
-    def test_get_l3_agent_candidates_dvr_snat(self):
-        self._register_l3_dvr_agents()
-        router = self._make_router(self.fmt,
-                                   tenant_id=str(uuid.uuid4()),
-                                   name='r2')
-        router['external_gateway_info'] = None
-        router['id'] = str(uuid.uuid4())
-        router['distributed'] = True
-
-        agent_list = [self.l3_dvr_snat_agent]
-        self.get_subnet_ids_on_router = mock.Mock()
-        self.check_dvr_serviceable_ports_on_host = mock.Mock(return_value=True)
-        self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR_SNAT)
-
-    def test_get_l3_agent_candidates_dvr_snat_no_vms(self):
-        self._register_l3_dvr_agents()
-        router = self._make_router(self.fmt,
-                                   tenant_id=str(uuid.uuid4()),
-                                   name='r2')
-        router['external_gateway_info'] = None
-        router['id'] = str(uuid.uuid4())
-        router['distributed'] = True
-
-        agent_list = [self.l3_dvr_snat_agent]
-        self.check_dvr_serviceable_ports_on_host = mock.Mock(
-            return_value=False)
-        # Test no VMs present case
-        self.get_subnet_ids_on_router = mock.Mock()
-        self.check_dvr_serviceable_ports_on_host.return_value = False
-        self._check_get_l3_agent_candidates(
-            router, agent_list, HOST_DVR_SNAT, count=0)
-
-    def test_get_l3_agent_candidates_centralized(self):
-        self._register_l3_dvr_agents()
-        router = self._make_router(self.fmt,
-                                   tenant_id=str(uuid.uuid4()),
-                                   name='r2')
-        router['external_gateway_info'] = None
-        router['id'] = str(uuid.uuid4())
-        # check centralized test case
-        router['distributed'] = False
-        agent_list = [self.l3_dvr_snat_agent]
-        self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR_SNAT)
-
-    def test_get_l3_agents_hosting_routers(self):
-        agent = helpers.register_l3_agent('host_6')
-        router = self._make_router(self.fmt,
-                                   tenant_id=str(uuid.uuid4()),
-                                   name='r1')
-        ctx = self.adminContext
-        router_id = router['router']['id']
-        self.plugin.router_scheduler.bind_router(ctx, router_id, agent)
-        agents = self.get_l3_agents_hosting_routers(ctx,
-                                                    [router_id])
-        self.assertEqual([agent.id], [agt.id for agt in agents])
-        agents = self.get_l3_agents_hosting_routers(ctx,
-                                                    [router_id],
-                                                    admin_state_up=True)
-        self.assertEqual([agent.id], [agt.id for agt in agents])
-
-        self._set_l3_agent_admin_state(ctx, agent.id, False)
-        agents = self.get_l3_agents_hosting_routers(ctx,
-                                                    [router_id])
-        self.assertEqual([agent.id], [agt.id for agt in agents])
-        agents = self.get_l3_agents_hosting_routers(ctx,
-                                                    [router_id],
-                                                    admin_state_up=True)
-        self.assertEqual([], agents)
-
-
-class L3SchedulerTestCaseMixin(l3_agentschedulers_db.L3AgentSchedulerDbMixin,
-                               l3_db.L3_NAT_db_mixin,
-                               common_db_mixin.CommonDbMixin,
-                               test_l3.L3NatTestCaseMixin,
-                               L3SchedulerBaseMixin,
-                               L3SchedulerTestBaseMixin):
-
-    def setUp(self):
-        self.mock_rescheduling = False
-        ext_mgr = test_l3.L3TestExtensionManager()
-        plugin_str = ('neutron.tests.unit.extensions.test_l3.'
-                      'TestL3NatIntAgentSchedulingPlugin')
-        super(L3SchedulerTestCaseMixin, self).setUp(plugin=plugin_str,
-                                                    ext_mgr=ext_mgr)
-
-        self.adminContext = n_context.get_admin_context()
-        self.plugin = manager.NeutronManager.get_plugin()
-        self.plugin.router_scheduler = importutils.import_object(
-            'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
-        )
-        self._register_l3_agents()
-
-
-class L3AgentChanceSchedulerTestCase(L3SchedulerTestCaseMixin,
-                                     test_db_base_plugin_v2.
-                                     NeutronDbPluginV2TestCase):
-
-    def test_random_scheduling(self):
-        random_patch = mock.patch('random.choice')
-        random_mock = random_patch.start()
-
-        def side_effect(seq):
-            return seq[0]
-        random_mock.side_effect = side_effect
-
-        with self.subnet() as subnet:
-            self._set_net_external(subnet['subnet']['network_id'])
-            with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
-                agents = self.get_l3_agents_hosting_routers(
-                    self.adminContext, [r1['router']['id']],
-                    admin_state_up=True)
-
-                self.assertEqual(len(agents), 1)
-                self.assertEqual(random_mock.call_count, 1)
-
-                with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
-                    agents = self.get_l3_agents_hosting_routers(
-                        self.adminContext, [r2['router']['id']],
-                        admin_state_up=True)
-
-                    self.assertEqual(len(agents), 1)
-                    self.assertEqual(random_mock.call_count, 2)
-
-        random_patch.stop()
-
-    def test_scheduler_auto_schedule_when_agent_added(self):
-        self._set_l3_agent_admin_state(self.adminContext,
-                                       self.agent_id1, False)
-        self._set_l3_agent_admin_state(self.adminContext,
-                                       self.agent_id2, False)
-
-        with self.subnet() as subnet:
-            self._set_net_external(subnet['subnet']['network_id'])
-            with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
-                agents = self.get_l3_agents_hosting_routers(
-                    self.adminContext, [r1['router']['id']],
-                    admin_state_up=True)
-                self.assertEqual(0, len(agents))
-
-                self._set_l3_agent_admin_state(self.adminContext,
-                                               self.agent_id1, True)
-                self.plugin.auto_schedule_routers(self.adminContext,
-                                                  'host_1',
-                                                  [r1['router']['id']])
-
-                agents = self.get_l3_agents_hosting_routers(
-                    self.adminContext, [r1['router']['id']],
-                    admin_state_up=True)
-                self.assertEqual('host_1', agents[0]['host'])
-
-
-class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCaseMixin,
-                                           test_db_base_plugin_v2.
-                                           NeutronDbPluginV2TestCase):
-
-    def setUp(self):
-        super(L3AgentLeastRoutersSchedulerTestCase, self).setUp()
-        self.plugin.router_scheduler = importutils.import_object(
-            'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
-        )
-
-    def test_scheduler(self):
-        # disable one agent to force the scheduling to the only one.
-        self._set_l3_agent_admin_state(self.adminContext,
-                                       self.agent_id2, False)
-
-        with self.subnet() as subnet:
-            self._set_net_external(subnet['subnet']['network_id'])
-            with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
-                agents = self.get_l3_agents_hosting_routers(
-                    self.adminContext, [r1['router']['id']],
-                    admin_state_up=True)
-                self.assertEqual(len(agents), 1)
-
-                agent_id1 = agents[0]['id']
-
-                with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
-                    agents = self.get_l3_agents_hosting_routers(
-                        self.adminContext, [r2['router']['id']],
-                        admin_state_up=True)
-                    self.assertEqual(len(agents), 1)
-
-                    agent_id2 = agents[0]['id']
-
-                    self.assertEqual(agent_id1, agent_id2)
-
-                    # re-enable the second agent to see whether the next router
-                    # spawned will be on this one.
-                    self._set_l3_agent_admin_state(self.adminContext,
-                                                   self.agent_id2, True)
-
-                    with self.router_with_ext_gw(name='r3',
-                                                 subnet=subnet) as r3:
-                        agents = self.get_l3_agents_hosting_routers(
-                            self.adminContext, [r3['router']['id']],
-                            admin_state_up=True)
-                        self.assertEqual(len(agents), 1)
-
-                        agent_id3 = agents[0]['id']
-
-                        self.assertNotEqual(agent_id1, agent_id3)
-
-
-class L3DvrScheduler(l3_db.L3_NAT_db_mixin,
-                     l3_dvrscheduler_db.L3_DVRsch_db_mixin):
-    pass
-
-
-class L3DvrSchedulerTestCase(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-        self.setup_coreplugin(plugin)
-        super(L3DvrSchedulerTestCase, self).setUp()
-        self.adminContext = n_context.get_admin_context()
-        self.dut = L3DvrScheduler()
-
-    def test__notify_l3_agent_update_port_no_removing_routers(self):
-        port_id = 'fake-port'
-        kwargs = {
-            'context': self.adminContext,
-            'port': None,
-            'original_port': {
-                'id': port_id,
-                portbindings.HOST_ID: 'vm-host',
-                'device_id': 'vm-id',
-                'device_owner': DEVICE_OWNER_COMPUTE,
-                'mac_address': '02:04:05:17:18:19'
-            },
-            'mac_address_updated': True
-        }
-
-        plugin = manager.NeutronManager.get_plugin()
-        l3plugin = mock.Mock()
-        l3plugin.supported_extension_aliases = [
-            'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
-            constants.L3_DISTRIBUTED_EXT_ALIAS
-        ]
-
-        with mock.patch.object(manager.NeutronManager,
-                               'get_service_plugins',
-                               return_value={'L3_ROUTER_NAT': l3plugin}):
-            l3_dvrscheduler_db._notify_l3_agent_port_update(
-                'port', 'after_update', plugin, **kwargs)
-            self.assertFalse(
-                l3plugin.update_arp_entry_for_dvr_service_port.called)
-            self.assertFalse(
-                l3plugin.dvr_handle_new_service_port.called)
-            self.assertFalse(l3plugin.remove_router_from_l3_agent.called)
-            self.assertFalse(l3plugin.dvr_deletens_if_no_port.called)
-
-    def test__notify_l3_agent_new_port_action(self):
-        kwargs = {
-            'context': self.adminContext,
-            'original_port': None,
-            'port': {
-                'device_owner': DEVICE_OWNER_COMPUTE,
-            },
-        }
-        l3plugin = mock.Mock()
-        with mock.patch.object(manager.NeutronManager,
-                               'get_service_plugins',
-                               return_value={'L3_ROUTER_NAT': l3plugin}):
-            l3_dvrscheduler_db._notify_l3_agent_new_port(
-                'port', 'after_create', mock.ANY, **kwargs)
-            l3plugin.update_arp_entry_for_dvr_service_port.\
-                assert_called_once_with(
-                    self.adminContext, kwargs.get('port'), 'add')
-            l3plugin.dvr_handle_new_service_port.assert_called_once_with(
-                self.adminContext, kwargs.get('port'))
-
-    def test__notify_l3_agent_new_port_no_action(self):
-        kwargs = {
-            'context': self.adminContext,
-            'original_port': None,
-            'port': {
-                'device_owner': 'network:None',
-            }
-        }
-        l3plugin = mock.Mock()
-        with mock.patch.object(manager.NeutronManager,
-                               'get_service_plugins',
-                               return_value={'L3_ROUTER_NAT': l3plugin}):
-            l3_dvrscheduler_db._notify_l3_agent_new_port(
-                'port', 'after_create', mock.ANY, **kwargs)
-            self.assertFalse(
-                l3plugin.update_arp_entry_for_dvr_service_port.called)
-            self.assertFalse(
-                l3plugin.dvr_handle_new_service_port.called)
-
-    def test__notify_l3_agent_update_port_no_action(self):
-        kwargs = {
-            'context': self.adminContext,
-            'original_port': {
-                portbindings.HOST_ID: 'vm-host',
-                'device_owner': DEVICE_OWNER_COMPUTE,
-            },
-            'port': {
-                portbindings.HOST_ID: 'vm-host',
-                'device_owner': DEVICE_OWNER_COMPUTE,
-            },
-        }
-        l3plugin = mock.Mock()
-        with mock.patch.object(manager.NeutronManager,
-                               'get_service_plugins',
-                               return_value={'L3_ROUTER_NAT': l3plugin}):
-            l3_dvrscheduler_db._notify_l3_agent_port_update(
-                'port', 'after_update', mock.ANY, **kwargs)
-
-            self.assertFalse(
-                l3plugin.update_arp_entry_for_dvr_service_port.called)
-            self.assertFalse(
-                l3plugin.dvr_handle_new_service_port.called)
-            self.assertFalse(l3plugin.remove_router_from_l3_agent.called)
-            self.assertFalse(l3plugin.dvr_deletens_if_no_port.called)
-
-    def test__notify_l3_agent_update_port_with_mac_address_update(self):
-        kwargs = {
-            'context': self.adminContext,
-            'original_port': {
-                portbindings.HOST_ID: 'vm-host',
-                'mac_address': '02:04:05:17:18:19'
-            },
-            'port': {
-                portbindings.HOST_ID: 'vm-host',
-                'mac_address': '02:04:05:17:18:29'
-            },
-            'mac_address_updated': True
-        }
-        l3plugin = mock.Mock()
-        with mock.patch.object(manager.NeutronManager,
-                               'get_service_plugins',
-                               return_value={'L3_ROUTER_NAT': l3plugin}):
-            l3_dvrscheduler_db._notify_l3_agent_port_update(
-                'port', 'after_update', mock.ANY, **kwargs)
-
-            l3plugin.update_arp_entry_for_dvr_service_port.\
-                assert_called_once_with(
-                    self.adminContext, kwargs.get('port'), 'add')
-            self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
-
-    def test__notify_l3_agent_update_port_with_port_binding_change(self):
-        kwargs = {
-            'context': self.adminContext,
-            'original_port': {
-                'id': str(uuid.uuid4()),
-                portbindings.HOST_ID: 'vm-host1',
-                'device_owner': DEVICE_OWNER_COMPUTE,
-            },
-            'port': {
-                portbindings.HOST_ID: 'vm-host2',
-                'device_owner': DEVICE_OWNER_COMPUTE,
-            },
-        }
-        l3plugin = mock.Mock()
-        with mock.patch.object(manager.NeutronManager,
-                               'get_service_plugins',
-                               return_value={'L3_ROUTER_NAT': l3plugin}),\
-                mock.patch.object(l3plugin, 'dvr_deletens_if_no_port',
-                                  return_value=[{'agent_id': 'foo_agent',
-                                                 'router_id': 'foo_id'}]):
-            l3_dvrscheduler_db._notify_l3_agent_port_update(
-                'port', 'after_update', mock.ANY, **kwargs)
-            l3plugin.remove_router_from_l3_agent.assert_called_once_with(
-                mock.ANY, 'foo_agent', 'foo_id')
-            self.assertEqual(
-                2, l3plugin.update_arp_entry_for_dvr_service_port.call_count)
-            l3plugin.dvr_handle_new_service_port.assert_called_once_with(
-                self.adminContext, kwargs.get('port'))
-
-    def test__notify_l3_agent_update_port_removing_routers(self):
-        port_id = 'fake-port'
-        kwargs = {
-            'context': self.adminContext,
-            'port': {
-                'id': port_id,
-                portbindings.HOST_ID: None,
-                'device_id': '',
-                'device_owner': ''
-            },
-            'mac_address_updated': False,
-            'original_port': {
-                'id': port_id,
-                portbindings.HOST_ID: 'vm-host',
-                'device_id': 'vm-id',
-                'device_owner': DEVICE_OWNER_COMPUTE
-            }
-        }
-
-        plugin = manager.NeutronManager.get_plugin()
-        l3plugin = mock.Mock()
-        l3plugin.supported_extension_aliases = [
-            'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
-            constants.L3_DISTRIBUTED_EXT_ALIAS
-        ]
-        with mock.patch.object(manager.NeutronManager,
-                               'get_service_plugins',
-                               return_value={'L3_ROUTER_NAT': l3plugin}),\
-                mock.patch.object(l3plugin, 'dvr_deletens_if_no_port',
-                                  return_value=[{'agent_id': 'foo_agent',
-                                             'router_id': 'foo_id'}]):
-            l3_dvrscheduler_db._notify_l3_agent_port_update(
-                'port', 'after_update', plugin, **kwargs)
-
-            self.assertEqual(
-                1, l3plugin.update_arp_entry_for_dvr_service_port.call_count)
-            l3plugin.update_arp_entry_for_dvr_service_port.\
-                assert_called_once_with(
-                    self.adminContext, mock.ANY, 'del')
-
-            self.assertFalse(
-                l3plugin.dvr_handle_new_service_port.called)
-            l3plugin.remove_router_from_l3_agent.assert_called_once_with(
-                mock.ANY, 'foo_agent', 'foo_id')
-
-    def test__notify_port_delete(self):
-        plugin = manager.NeutronManager.get_plugin()
-        l3plugin = mock.Mock()
-        l3plugin.supported_extension_aliases = [
-            'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
-            constants.L3_DISTRIBUTED_EXT_ALIAS
-        ]
-        with mock.patch.object(manager.NeutronManager,
-                               'get_service_plugins',
-                               return_value={'L3_ROUTER_NAT': l3plugin}):
-            kwargs = {
-                'context': self.adminContext,
-                'port': mock.ANY,
-                'removed_routers': [
-                    {'agent_id': 'foo_agent', 'router_id': 'foo_id'},
-                ],
-            }
-            l3_dvrscheduler_db._notify_port_delete(
-                'port', 'after_delete', plugin, **kwargs)
-            l3plugin.update_arp_entry_for_dvr_service_port.\
-                assert_called_once_with(
-                    self.adminContext, mock.ANY, 'del')
-            l3plugin.remove_router_from_l3_agent.assert_called_once_with(
-                mock.ANY, 'foo_agent', 'foo_id')
-
-    def test_dvr_handle_new_service_port(self):
-        port = {
-                'id': 'port1',
-                'device_id': 'abcd',
-                'device_owner': DEVICE_OWNER_COMPUTE_NOVA,
-                portbindings.HOST_ID: 'host1',
-                'fixed_ips': [
-                    {
-                        'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
-                        'ip_address': '10.10.10.3'
-                    }
-                ]
-        }
-        dvr_ports = [
-            {
-                'id': 'dvr_port1',
-                'device_id': 'r1',
-                'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
-                'fixed_ips': [
-                    {
-                        'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
-                        'ip_address': '10.10.10.1'
-                    }
-                ]
-            },
-            {
-                'id': 'dvr_port2',
-                'device_id': 'r2',
-                'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
-                'fixed_ips': [
-                    {
-                        'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
-                        'ip_address': '10.10.10.123'
-                    }
-                ]
-            }
-        ]
-        agent_on_host = {'id': 'agent1'}
-
-        with mock.patch(
-            'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports',
-            return_value=dvr_ports),\
-                mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
-                           '.L3AgentNotifyAPI'),\
-                mock.patch.object(
-                        self.dut, 'get_l3_agents',
-                        return_value=[agent_on_host]) as get_l3_agents:
-            self.dut.dvr_handle_new_service_port(
-                self.adminContext, port)
-
-            get_l3_agents.assert_called_once_with(
-                self.adminContext,
-                filters={'host': [port[portbindings.HOST_ID]]})
-            (self.dut.l3_rpc_notifier.routers_updated_on_host.
-                assert_called_once_with(
-                    self.adminContext, {'r1', 'r2'}, 'host1'))
-            self.assertFalse(self.dut.l3_rpc_notifier.routers_updated.called)
-
-    def test_get_dvr_routers_by_portid(self):
-        dvr_port = {
-                'id': 'dvr_port1',
-                'device_id': 'r1',
-                'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
-                'fixed_ips': [
-                    {
-                        'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
-                        'ip_address': '10.10.10.1'
-                    }
-                ]
-        }
-        r1 = {
-              'id': 'r1',
-              'distributed': True,
-        }
-
-        with mock.patch(
-            'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_port',
-            return_value=dvr_port),\
-                mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
-                           '.get_ports', return_value=[dvr_port]):
-            router_id = self.dut.get_dvr_routers_by_portid(self.adminContext,
-                                                           dvr_port['id'])
-            self.assertEqual(router_id.pop(), r1['id'])
-
-    def test_get_subnet_ids_on_router(self):
-        dvr_port = {
-                'id': 'dvr_port1',
-                'device_id': 'r1',
-                'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
-                'fixed_ips': [
-                    {
-                        'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
-                        'ip_address': '10.10.10.1'
-                    }
-                ]
-        }
-        r1 = {
-              'id': 'r1',
-              'distributed': True,
-        }
-
-        with mock.patch(
-            'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports',
-            return_value=[dvr_port]):
-            sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
-                                                        r1['id'])
-            self.assertEqual(sub_ids.pop(),
-                            dvr_port.get('fixed_ips').pop(0).get('subnet_id'))
-
-    def test_get_subnet_ids_on_router_no_subnet(self):
-        dvr_port = {
-                'id': 'dvr_port1',
-                'device_id': 'r1',
-                'device_owner': 'network:router_interface_distributed',
-                'fixed_ips': []
-        }
-        r1 = {
-              'id': 'r1',
-              'distributed': True,
-        }
-        with mock.patch.object(db_v2.NeutronDbPluginV2, 'get_ports',
-                               return_value=[dvr_port]):
-            sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
-                                                        r1['id'])
-            self.assertEqual(len(sub_ids), 0)
-
-    def _prepare_schedule_snat_tests(self):
-        agent = agents_db.Agent()
-        agent.admin_state_up = True
-        agent.heartbeat_timestamp = timeutils.utcnow()
-        router = {
-            'id': 'foo_router_id',
-            'distributed': True,
-            'external_gateway_info': {
-                'network_id': str(uuid.uuid4()),
-                'enable_snat': True
-            }
-        }
-        return agent, router
-
-    def test_schedule_snat_router_duplicate_entry(self):
-        self._prepare_schedule_snat_tests()
-        with mock.patch.object(self.dut, 'get_l3_agents'),\
-                mock.patch.object(self.dut, 'get_snat_candidates'),\
-                mock.patch.object(
-                    self.dut,
-                    'bind_snat_servicenode',
-                    side_effect=db_exc.DBDuplicateEntry()) as mock_bind_snat,\
-                mock.patch.object(
-                    self.dut,
-                    'bind_dvr_router_servicenode') as mock_bind_dvr:
-            self.dut.schedule_snat_router(self.adminContext, 'foo', 'bar')
-            self.assertTrue(mock_bind_snat.called)
-            self.assertFalse(mock_bind_dvr.called)
-
-    def test_schedule_snat_router_return_value(self):
-        agent, router = self._prepare_schedule_snat_tests()
-        with mock.patch.object(self.dut, 'get_l3_agents'),\
-                mock.patch.object(
-                    self.dut,
-                    'get_snat_candidates') as mock_snat_canidates,\
-                mock.patch.object(self.dut,
-                                  'bind_snat_servicenode') as mock_bind_snat,\
-                mock.patch.object(
-                    self.dut,
-                    'bind_dvr_router_servicenode') as mock_bind_dvr:
-            mock_snat_canidates.return_value = [agent]
-            mock_bind_snat.return_value = [agent]
-            mock_bind_dvr.return_value = [agent]
-            chosen_agent = self.dut.schedule_snat_router(
-                self.adminContext, 'foo_router_id', router)
-        self.assertEqual(chosen_agent, [agent])
-
-    def test_schedule_router_unbind_snat_servicenode_negativetest(self):
-        router = {
-            'id': 'foo_router_id',
-            'distributed': True
-        }
-        with mock.patch.object(self.dut, 'get_router') as mock_rd,\
-                mock.patch.object(self.dut,
-                                  'get_snat_bindings') as mock_snat_bind,\
-                mock.patch.object(self.dut,
-                                  'unbind_snat_servicenode') as mock_unbind:
-            mock_rd.return_value = router
-            mock_snat_bind.return_value = False
-            self.dut.schedule_snat_router(
-                self.adminContext, 'foo_router_id', router)
-            self.assertFalse(mock_unbind.called)
-
-    def test_schedule_snat_router_with_snat_candidates(self):
-        agent, router = self._prepare_schedule_snat_tests()
-        with mock.patch.object(query.Query, 'first') as mock_query,\
-                mock.patch.object(self.dut, 'get_l3_agents') as mock_agents,\
-                mock.patch.object(self.dut,
-                                  'get_snat_candidates') as mock_candidates,\
-                mock.patch.object(self.dut, 'get_router') as mock_rd,\
-                mock.patch.object(self.dut, 'bind_dvr_router_servicenode'),\
-                mock.patch.object(self.dut,
-                                  'bind_snat_servicenode') as mock_bind:
-            mock_rd.return_value = router
-            mock_query.return_value = []
-            mock_agents.return_value = [agent]
-            mock_candidates.return_value = [agent]
-            self.dut.schedule_snat_router(
-                self.adminContext, 'foo_router_id', mock.ANY)
-            mock_bind.assert_called_once_with(
-                self.adminContext, 'foo_router_id', [agent])
-
-    def test_unbind_snat_servicenode(self):
-        router_id = 'foo_router_id'
-        core_plugin = mock.PropertyMock()
-        type(self.dut)._core_plugin = core_plugin
-        (self.dut._core_plugin.get_ports_on_host_by_subnet.
-         return_value) = []
-        core_plugin.reset_mock()
-        l3_notifier = mock.PropertyMock()
-        type(self.dut).l3_rpc_notifier = l3_notifier
-        binding = l3_dvrscheduler_db.CentralizedSnatL3AgentBinding(
-            router_id=router_id, l3_agent_id='foo_l3_agent_id',
-            l3_agent=agents_db.Agent())
-        with mock.patch.object(query.Query, 'first') as mock_first,\
-                mock.patch.object(query.Query, 'delete') as mock_delete,\
-                mock.patch.object(
-                    self.dut,
-                    'get_subnet_ids_on_router') as mock_get_subnets:
-            mock_first.return_value = binding
-            mock_get_subnets.return_value = ['foo_subnet_id']
-            self.dut.unbind_snat_servicenode(self.adminContext, router_id)
-            mock_get_subnets.assert_called_with(self.adminContext, router_id)
-            self.assertTrue(mock_delete.call_count)
-        core_plugin.assert_called_once_with()
-        l3_notifier.assert_called_once_with()
-
-    def _test_remove_router_from_l3_agent_dvr_snat(self, ursn_return):
-        agent_id = 'dvr_snat_l3_agent_id'
-        router_id = 'dvr-router-1'
-        router = {
-            'id': router_id,
-            'distributed': True,
-            'external_gateway_info': {'network_id': str(uuid.uuid4()),
-                                      'enable_snat': True}
-        }
-
-        binding = l3_dvrscheduler_db.CentralizedSnatL3AgentBinding(
-            router_id=router_id, l3_agent_id=agent_id,
-            l3_agent=agents_db.Agent())
-
-        self.dut.l3_rpc_notifier = mock.Mock()
-        with mock.patch.object(self.dut, 'get_router') as mock_gr,\
-                mock.patch.object(self.dut, 'unbind_snat') as mock_us,\
-                mock.patch.object(
-                    self.dut,
-                    'unbind_router_servicenode') as mock_ursn,\
-                mock.patch('neutron.db.l3_agentschedulers_db.'
-                           'L3AgentSchedulerDbMixin.'
-                           'remove_router_from_l3_agent') as mock_super_rrl3a:
-            mock_gr.return_value = router
-            mock_us.return_value = binding
-            mock_ursn.return_value = ursn_return
-
-            self.dut.remove_router_from_l3_agent(self.adminContext,
-                                                 agent_id,
-                                                 router_id)
-            mock_gr.assert_called_once_with(self.adminContext, router_id)
-
-            us_params = {'agent_id': agent_id}
-            mock_us.assert_called_once_with(self.adminContext,
-                                            router_id,
-                                            **us_params)
-            mock_ursn.assert_called_once_with(self.adminContext,
-                                              router_id,
-                                              binding)
-            self.assertFalse(mock_super_rrl3a.called)
-
-            if ursn_return:
-                routers_updated_params = {'schedule_routers': False}
-                (self.dut.l3_rpc_notifier.routers_updated.
-                 assert_called_once_with(self.adminContext,
-                                         [router_id],
-                                         **routers_updated_params))
-            else:
-                self.assertFalse(self.dut.l3_rpc_notifier.
-                                 routers_updated.called)
-
-    def test_remove_router_from_l3_agent_dvr_snat_mode(self):
-        self._test_remove_router_from_l3_agent_dvr_snat(True)
-        self._test_remove_router_from_l3_agent_dvr_snat(False)
-
-    def test_remove_router_from_l3_agent_dvr_mode(self):
-        agent_id = 'dvr_l3_agent_id'
-        router_id = 'dvr-router-1'
-        router = {
-            'id': router_id,
-            'distributed': True,
-            'external_gateway_info': {'network_id': str(uuid.uuid4()),
-                                      'enable_snat': True}
-        }
-
-        self.dut.l3_rpc_notifier = mock.Mock()
-        with mock.patch.object(self.dut, 'get_router') as mock_gr,\
-                mock.patch.object(self.dut, 'unbind_snat') as mock_us,\
-                mock.patch.object(
-                    self.dut,
-                    'unbind_router_servicenode') as mock_ursn,\
-                mock.patch('neutron.db.l3_agentschedulers_db.'
-                           'L3AgentSchedulerDbMixin.'
-                           'remove_router_from_l3_agent') as mock_super_rrl3a:
-            mock_gr.return_value = router
-            mock_us.return_value = None
-            mock_ursn.return_value = True
-
-            self.dut.remove_router_from_l3_agent(self.adminContext,
-                                                 agent_id,
-                                                 router_id)
-
-            mock_gr.assert_called_once_with(self.adminContext, router_id)
-
-            us_params = {'agent_id': agent_id}
-            mock_us.assert_called_once_with(self.adminContext,
-                                            router_id,
-                                            **us_params)
-
-            self.assertFalse(self.dut.l3_rpc_notifier.routers_updated.called)
-            self.assertFalse(mock_ursn.called)
-            mock_super_rrl3a.assert_called_with(self.adminContext,
-                                                agent_id,
-                                                router_id)
-
-
-class L3HAPlugin(db_v2.NeutronDbPluginV2,
-                 l3_hamode_db.L3_HA_NAT_db_mixin,
-                 l3_hascheduler_db.L3_HA_scheduler_db_mixin):
-    supported_extension_aliases = ["l3-ha", "router_availability_zone"]
-
-
-class L3HATestCaseMixin(testlib_api.SqlTestCase,
-                        L3SchedulerBaseMixin):
-
-    def setUp(self):
-        super(L3HATestCaseMixin, self).setUp()
-
-        self.adminContext = n_context.get_admin_context()
-        mock.patch('neutron.common.rpc.get_client').start()
-        self.plugin = L3HAPlugin()
-
-        self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
-        cfg.CONF.set_override('service_plugins',
-                              ['neutron.services.l3_router.'
-                              'l3_router_plugin.L3RouterPlugin'])
-
-        cfg.CONF.set_override('max_l3_agents_per_router', 0)
-        self.plugin.router_scheduler = importutils.import_object(
-            'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
-        )
-
-        self._register_l3_agents()
-
-    def _create_ha_router(self, ha=True, tenant_id='tenant1', az_hints=None):
-        self.adminContext.tenant_id = tenant_id
-        router = {'name': 'router1', 'admin_state_up': True,
-                  'tenant_id': tenant_id}
-        if ha is not None:
-            router['ha'] = ha
-        if az_hints is None:
-            az_hints = []
-        router['availability_zone_hints'] = az_hints
-        return self.plugin.create_router(self.adminContext,
-                                         {'router': router})
-
-
-class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin):
-
-    def _register_l3_agents(self, plugin=None):
-        super(L3_HA_scheduler_db_mixinTestCase,
-              self)._register_l3_agents(plugin=plugin)
-
-        self.agent3 = helpers.register_l3_agent(host='host_3')
-        self.agent_id3 = self.agent3.id
-
-        self.agent4 = helpers.register_l3_agent(host='host_4')
-        self.agent_id4 = self.agent4.id
-
-    def test_get_ha_routers_l3_agents_count(self):
-        router1 = self._create_ha_router()
-        router2 = self._create_ha_router()
-        router3 = self._create_ha_router(ha=False)
-        result = self.plugin.get_ha_routers_l3_agents_count(self.adminContext)
-
-        self.assertEqual(2, len(result))
-        check_result = [(router['id'], agents) for router, agents in result]
-        self.assertIn((router1['id'], 4), check_result)
-        self.assertIn((router2['id'], 4), check_result)
-        self.assertNotIn((router3['id'], mock.ANY), check_result)
-
-    def test_get_ordered_l3_agents_by_num_routers(self):
-        # Mock scheduling so that the test can control it explicitly
-        mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
-                          '_notify_ha_interfaces_updated').start()
-
-        router1 = self._create_ha_router()
-        router2 = self._create_ha_router()
-        router3 = self._create_ha_router(ha=False)
-        router4 = self._create_ha_router(ha=False)
-
-        # Agent 1 will host 0 routers, agent 2 will host 1, agent 3 will
-        # host 2, and agent 4 will host 3.
-        self.plugin.schedule_router(self.adminContext, router1['id'],
-                                    candidates=[self.agent2, self.agent4])
-        self.plugin.schedule_router(self.adminContext, router2['id'],
-                                    candidates=[self.agent3, self.agent4])
-        self.plugin.schedule_router(self.adminContext, router3['id'],
-                                    candidates=[self.agent3])
-        self.plugin.schedule_router(self.adminContext, router4['id'],
-                                    candidates=[self.agent4])
-
-        agent_ids = [self.agent_id1, self.agent_id2, self.agent_id3,
-                     self.agent_id4]
-        result = self.plugin.get_l3_agents_ordered_by_num_routers(
-            self.adminContext, agent_ids)
-
-        self.assertEqual(agent_ids, [record['id'] for record in result])
-
-
-class L3AgentSchedulerDbMixinTestCase(L3HATestCaseMixin):
-
-    def _setup_ha_router(self):
-        router = self._create_ha_router()
-        agents = self._get_agents_scheduled_for_router(router)
-        return router, agents
-
-    def test_reschedule_ha_routers_from_down_agents(self):
-        agents = self._setup_ha_router()[1]
-        self.assertEqual(2, len(agents))
-        self._set_l3_agent_dead(self.agent_id1)
-        with mock.patch.object(self.plugin, 'reschedule_router') as reschedule:
-            self.plugin.reschedule_routers_from_down_agents()
-            self.assertFalse(reschedule.called)
-
-    def test_list_l3_agents_hosting_ha_router(self):
-        router = self._create_ha_router()
-        agents = self.plugin.list_l3_agents_hosting_router(
-            self.adminContext, router['id'])['agents']
-        for agent in agents:
-            self.assertEqual('standby', agent['ha_state'])
-
-        self.plugin.update_routers_states(
-            self.adminContext, {router['id']: 'active'}, self.agent1.host)
-        agents = self.plugin.list_l3_agents_hosting_router(
-            self.adminContext, router['id'])['agents']
-        for agent in agents:
-            expected_state = ('active' if agent['host'] == self.agent1.host
-                              else 'standby')
-            self.assertEqual(expected_state, agent['ha_state'])
-
-    def test_list_l3_agents_hosting_legacy_router(self):
-        router = self._create_ha_router(ha=False)
-        self.plugin.schedule_router(self.adminContext, router['id'])
-
-        agent = self.plugin.list_l3_agents_hosting_router(
-            self.adminContext, router['id'])['agents'][0]
-        self.assertIsNone(agent['ha_state'])
-
-    def test_get_agents_dict_for_router_unscheduled_returns_empty_list(self):
-        self.assertEqual({'agents': []},
-                         self.plugin._get_agents_dict_for_router([]))
-
-    def test_manual_add_ha_router_to_agent(self):
-        cfg.CONF.set_override('max_l3_agents_per_router', 2)
-        router, agents = self._setup_ha_router()
-        self.assertEqual(2, len(agents))
-        agent = helpers.register_l3_agent(host='myhost_3')
-        # We allow to exceed max l3 agents per router via manual scheduling
-        self.plugin.add_router_to_l3_agent(
-            self.adminContext, agent.id, router['id'])
-        agents = self._get_agents_scheduled_for_router(router)
-        self.assertIn(agent.id, [_agent.id for _agent in agents])
-        self.assertEqual(3, len(agents))
-
-    def test_manual_remove_ha_router_from_agent(self):
-        router, agents = self._setup_ha_router()
-        self.assertEqual(2, len(agents))
-        agent = agents.pop()
-        # Remove router from agent and make sure it is removed
-        self.plugin.remove_router_from_l3_agent(
-            self.adminContext, agent.id, router['id'])
-        agents = self._get_agents_scheduled_for_router(router)
-        self.assertEqual(1, len(agents))
-        self.assertNotIn(agent.id, [_agent.id for _agent in agents])
-
-    def test_manual_remove_ha_router_from_all_agents(self):
-        router, agents = self._setup_ha_router()
-        self.assertEqual(2, len(agents))
-        agent = agents.pop()
-        self.plugin.remove_router_from_l3_agent(
-            self.adminContext, agent.id, router['id'])
-        agent = agents.pop()
-        self.plugin.remove_router_from_l3_agent(
-            self.adminContext, agent.id, router['id'])
-        agents = self._get_agents_scheduled_for_router(router)
-        self.assertEqual(0, len(agents))
-
-    def _get_agents_scheduled_for_router(self, router):
-        return self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [router['id']],
-            admin_state_up=True)
-
-    def test_delete_ha_interfaces_from_agent(self):
-        router, agents = self._setup_ha_router()
-        agent = agents.pop()
-        self.plugin.remove_router_from_l3_agent(
-            self.adminContext, agent.id, router['id'])
-        session = self.adminContext.session
-        db = l3_hamode_db.L3HARouterAgentPortBinding
-        results = session.query(db).filter_by(
-            router_id=router['id'])
-        results = [binding.l3_agent_id for binding in results.all()]
-        self.assertNotIn(agent.id, results)
-
-    def test_add_ha_interface_to_l3_agent(self):
-        agent = self.plugin.get_agents_db(self.adminContext)[0]
-        router = self._create_ha_router()
-        self.plugin.add_router_to_l3_agent(self.adminContext, agent.id,
-                                           router['id'])
-        # Verify agent has HA interface
-        ha_ports = self.plugin.get_ha_router_port_bindings(self.adminContext,
-                                                           [router['id']])
-        self.assertIn(agent.id, [ha_port.l3_agent_id for ha_port in ha_ports])
-
-
-class L3HAChanceSchedulerTestCase(L3HATestCaseMixin):
-
-    def test_scheduler_with_ha_enabled(self):
-        router = self._create_ha_router()
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [router['id']],
-            admin_state_up=True)
-        self.assertEqual(2, len(agents))
-
-        for agent in agents:
-            sync_data = self.plugin.get_ha_sync_data_for_host(
-                self.adminContext, router_ids=[router['id']],
-                host=agent.host, agent=agent)
-            self.assertEqual(1, len(sync_data))
-            interface = sync_data[0][constants.HA_INTERFACE_KEY]
-            self.assertIsNotNone(interface)
-
-    def test_auto_schedule(self):
-        # Mock scheduling so that the test can control it explicitly
-        mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
-                          '_notify_ha_interfaces_updated').start()
-
-        router = self._create_ha_router()
-        self.plugin.auto_schedule_routers(
-            self.adminContext, self.agent1.host, None)
-        self.plugin.auto_schedule_routers(
-            self.adminContext, self.agent2.host, None)
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [router['id']])
-        self.assertEqual(2, len(agents))
-
-    def test_auto_schedule_specific_router_when_agent_added(self):
-        self._auto_schedule_when_agent_added(True)
-
-    def test_auto_schedule_all_routers_when_agent_added(self):
-        self._auto_schedule_when_agent_added(False)
-
-    def _auto_schedule_when_agent_added(self, specific_router):
-        router = self._create_ha_router()
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [router['id']],
-            admin_state_up=True)
-        self.assertEqual(2, len(agents))
-        agent_ids = [agent['id'] for agent in agents]
-        self.assertIn(self.agent_id1, agent_ids)
-        self.assertIn(self.agent_id2, agent_ids)
-
-        agent = helpers.register_l3_agent(host='host_3')
-        self.agent_id3 = agent.id
-        routers_to_auto_schedule = [router['id']] if specific_router else []
-        self.plugin.auto_schedule_routers(self.adminContext,
-                                          'host_3',
-                                          routers_to_auto_schedule)
-
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [router['id']],
-            admin_state_up=True)
-        self.assertEqual(3, len(agents))
-
-        # Simulate agent restart to make sure we don't try to re-bind
-        self.plugin.auto_schedule_routers(self.adminContext,
-                                          'host_3',
-                                          routers_to_auto_schedule)
-
-    def test_scheduler_with_ha_enabled_not_enough_agent(self):
-        r1 = self._create_ha_router()
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [r1['id']],
-            admin_state_up=True)
-        self.assertEqual(2, len(agents))
-
-        self._set_l3_agent_admin_state(self.adminContext,
-                                       self.agent_id2, False)
-        self.assertRaises(
-            l3_ha.HANotEnoughAvailableAgents, self._create_ha_router)
-
-
-class L3HALeastRoutersSchedulerTestCase(L3HATestCaseMixin):
-
-    def _register_l3_agents(self, plugin=None):
-        super(L3HALeastRoutersSchedulerTestCase,
-              self)._register_l3_agents(plugin=plugin)
-
-        agent = helpers.register_l3_agent(host='host_3')
-        self.agent_id3 = agent.id
-
-        agent = helpers.register_l3_agent(host='host_4')
-        self.agent_id4 = agent.id
-
-    def setUp(self):
-        super(L3HALeastRoutersSchedulerTestCase, self).setUp()
-        self.plugin.router_scheduler = importutils.import_object(
-            'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
-        )
-
-    def test_scheduler(self):
-        cfg.CONF.set_override('max_l3_agents_per_router', 2)
-
-        # disable the third agent to be sure that the router will
-        # be scheduled of the two firsts
-        self._set_l3_agent_admin_state(self.adminContext,
-                                       self.agent_id3, False)
-        self._set_l3_agent_admin_state(self.adminContext,
-                                       self.agent_id4, False)
-
-        r1 = self._create_ha_router()
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [r1['id']],
-            admin_state_up=True)
-        self.assertEqual(2, len(agents))
-        agent_ids = [agent['id'] for agent in agents]
-        self.assertIn(self.agent_id1, agent_ids)
-        self.assertIn(self.agent_id2, agent_ids)
-
-        self._set_l3_agent_admin_state(self.adminContext,
-                                       self.agent_id3, True)
-        self._set_l3_agent_admin_state(self.adminContext,
-                                       self.agent_id4, True)
-
-        r2 = self._create_ha_router()
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [r2['id']],
-            admin_state_up=True)
-        self.assertEqual(2, len(agents))
-        agent_ids = [agent['id'] for agent in agents]
-        self.assertIn(self.agent_id3, agent_ids)
-        self.assertIn(self.agent_id4, agent_ids)
-
-
-class TestGetL3AgentsWithAgentModeFilter(testlib_api.SqlTestCase,
-                                         L3SchedulerBaseMixin):
-    """Test cases to test get_l3_agents.
-
-    This class tests the L3AgentSchedulerDbMixin.get_l3_agents()
-    for the 'agent_mode' filter with various values.
-
-    5 l3 agents are registered in the order - legacy, dvr_snat, dvr, fake_mode
-    and legacy
-    """
-
-    scenarios = [
-        ('no filter',
-            dict(agent_modes=[],
-                 expected_agent_modes=['legacy', 'dvr_snat', 'dvr',
-                                       'fake_mode', 'legacy'])),
-
-        ('legacy',
-            dict(agent_modes=['legacy'],
-                 expected_agent_modes=['legacy', 'legacy'])),
-
-        ('dvr_snat',
-            dict(agent_modes=['dvr_snat'],
-                 expected_agent_modes=['dvr_snat'])),
-
-        ('dvr ',
-            dict(agent_modes=['dvr'],
-                 expected_agent_modes=['dvr'])),
-
-        ('legacy and dvr snat',
-            dict(agent_modes=['legacy', 'dvr_snat', 'legacy'],
-                 expected_agent_modes=['legacy', 'dvr_snat', 'legacy'])),
-
-        ('legacy and dvr',
-            dict(agent_modes=['legacy', 'dvr'],
-                 expected_agent_modes=['legacy', 'dvr', 'legacy'])),
-
-        ('dvr_snat and dvr',
-            dict(agent_modes=['dvr_snat', 'dvr'],
-                 expected_agent_modes=['dvr_snat', 'dvr'])),
-
-        ('legacy, dvr_snat and dvr',
-            dict(agent_modes=['legacy', 'dvr_snat', 'dvr'],
-                 expected_agent_modes=['legacy', 'dvr_snat', 'dvr',
-                                       'legacy'])),
-
-        ('invalid',
-            dict(agent_modes=['invalid'],
-                 expected_agent_modes=[])),
-    ]
-
-    def setUp(self):
-        super(TestGetL3AgentsWithAgentModeFilter, self).setUp()
-        self.plugin = L3HAPlugin()
-        self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
-        self.adminContext = n_context.get_admin_context()
-        hosts = ['host_1', 'host_2', 'host_3', 'host_4', 'host_5']
-        agent_modes = ['legacy', 'dvr_snat', 'dvr', 'fake_mode', 'legacy']
-        for host, agent_mode in zip(hosts, agent_modes):
-            helpers.register_l3_agent(host, agent_mode)
-
-    def _get_agent_mode(self, agent):
-        agent_conf = self.plugin.get_configuration_dict(agent)
-        return agent_conf.get('agent_mode', 'None')
-
-    def test_get_l3_agents(self):
-        l3_agents = self.plugin.get_l3_agents(
-            self.adminContext, filters={'agent_modes': self.agent_modes})
-        self.assertEqual(len(self.expected_agent_modes), len(l3_agents))
-        returned_agent_modes = [self._get_agent_mode(agent)
-                                for agent in l3_agents]
-        self.assertEqual(self.expected_agent_modes, returned_agent_modes)
-
-
-class L3AgentAZLeastRoutersSchedulerTestCase(L3HATestCaseMixin):
-
-    def setUp(self):
-        super(L3AgentAZLeastRoutersSchedulerTestCase, self).setUp()
-        self.plugin.router_scheduler = importutils.import_object(
-            'neutron.scheduler.l3_agent_scheduler.AZLeastRoutersScheduler')
-        # Mock scheduling so that the test can control it explicitly
-        mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
-                          '_notify_ha_interfaces_updated').start()
-
-    def _register_l3_agents(self):
-        self.agent1 = helpers.register_l3_agent(host='az1-host1', az='az1')
-        self.agent2 = helpers.register_l3_agent(host='az1-host2', az='az1')
-        self.agent3 = helpers.register_l3_agent(host='az2-host1', az='az2')
-        self.agent4 = helpers.register_l3_agent(host='az2-host2', az='az2')
-        self.agent5 = helpers.register_l3_agent(host='az3-host1', az='az3')
-        self.agent6 = helpers.register_l3_agent(host='az3-host2', az='az3')
-
-    def test_az_scheduler_auto_schedule(self):
-        r1 = self._create_ha_router(ha=False, az_hints=['az1'])
-        self.plugin.auto_schedule_routers(self.adminContext,
-                                          'az1-host2', None)
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [r1['id']])
-        self.assertEqual(1, len(agents))
-        self.assertEqual('az1-host2', agents[0]['host'])
-
-    def test_az_scheduler_auto_schedule_no_match(self):
-        r1 = self._create_ha_router(ha=False, az_hints=['az1'])
-        self.plugin.auto_schedule_routers(self.adminContext,
-                                          'az2-host1', None)
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [r1['id']])
-        self.assertEqual(0, len(agents))
-
-    def test_az_scheduler_default_az(self):
-        cfg.CONF.set_override('default_availability_zones', ['az2'])
-        r1 = self._create_ha_router(ha=False)
-        r2 = self._create_ha_router(ha=False)
-        r3 = self._create_ha_router(ha=False)
-        self.plugin.schedule_router(self.adminContext, r1['id'])
-        self.plugin.schedule_router(self.adminContext, r2['id'])
-        self.plugin.schedule_router(self.adminContext, r3['id'])
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [r1['id'], r2['id'], r3['id']])
-        self.assertEqual(3, len(agents))
-        expected_hosts = set(['az2-host1', 'az2-host2'])
-        hosts = set([a['host'] for a in agents])
-        self.assertEqual(expected_hosts, hosts)
-
-    def test_az_scheduler_az_hints(self):
-        r1 = self._create_ha_router(ha=False, az_hints=['az3'])
-        r2 = self._create_ha_router(ha=False, az_hints=['az3'])
-        r3 = self._create_ha_router(ha=False, az_hints=['az3'])
-        self.plugin.schedule_router(self.adminContext, r1['id'])
-        self.plugin.schedule_router(self.adminContext, r2['id'])
-        self.plugin.schedule_router(self.adminContext, r3['id'])
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [r1['id'], r2['id'], r3['id']])
-        self.assertEqual(3, len(agents))
-        expected_hosts = set(['az3-host1', 'az3-host2'])
-        hosts = set([a['host'] for a in agents])
-        self.assertEqual(expected_hosts, hosts)
-
-    def test_az_scheduler_least_routers(self):
-        r1 = self._create_ha_router(ha=False, az_hints=['az1'])
-        r2 = self._create_ha_router(ha=False, az_hints=['az1'])
-        r3 = self._create_ha_router(ha=False, az_hints=['az1'])
-        r4 = self._create_ha_router(ha=False, az_hints=['az1'])
-        self.plugin.schedule_router(self.adminContext, r1['id'])
-        self.plugin.schedule_router(self.adminContext, r2['id'])
-        self.plugin.schedule_router(self.adminContext, r3['id'])
-        self.plugin.schedule_router(self.adminContext, r4['id'])
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [r1['id'], r2['id'], r3['id'], r4['id']])
-        host_num = collections.defaultdict(int)
-        for agent in agents:
-            host_num[agent['host']] += 1
-        self.assertEqual(2, host_num['az1-host1'])
-        self.assertEqual(2, host_num['az1-host2'])
-
-    def test_az_scheduler_ha_az_hints(self):
-        cfg.CONF.set_override('max_l3_agents_per_router', 2)
-        r1 = self._create_ha_router(az_hints=['az1', 'az3'])
-        self.plugin.schedule_router(self.adminContext, r1['id'])
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [r1['id']])
-        self.assertEqual(2, len(agents))
-        expected_azs = set(['az1', 'az3'])
-        azs = set([a['availability_zone'] for a in agents])
-        self.assertEqual(expected_azs, azs)
-
-    def test_az_scheduler_ha_auto_schedule(self):
-        cfg.CONF.set_override('max_l3_agents_per_router', 3)
-        r1 = self._create_ha_router(az_hints=['az1', 'az3'])
-        self._set_l3_agent_admin_state(self.adminContext, self.agent2['id'],
-                                       state=False)
-        self._set_l3_agent_admin_state(self.adminContext, self.agent6['id'],
-                                       state=False)
-        self.plugin.schedule_router(self.adminContext, r1['id'])
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [r1['id']])
-        self.assertEqual(2, len(agents))
-        hosts = set([a['host'] for a in agents])
-        self.assertEqual(set(['az1-host1', 'az3-host1']), hosts)
-        self._set_l3_agent_admin_state(self.adminContext, self.agent6['id'],
-                                       state=True)
-        self.plugin.auto_schedule_routers(self.adminContext,
-                                          'az3-host2', None)
-        agents = self.plugin.get_l3_agents_hosting_routers(
-            self.adminContext, [r1['id']])
-        self.assertEqual(3, len(agents))
-        expected_hosts = set(['az1-host1', 'az3-host1', 'az3-host2'])
-        hosts = set([a['host'] for a in agents])
-        self.assertEqual(expected_hosts, hosts)
diff --git a/neutron/tests/unit/services/__init__.py b/neutron/tests/unit/services/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/l3_router/__init__.py b/neutron/tests/unit/services/l3_router/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/metering/__init__.py b/neutron/tests/unit/services/metering/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/metering/agents/__init__.py b/neutron/tests/unit/services/metering/agents/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/metering/agents/test_metering_agent.py b/neutron/tests/unit/services/metering/agents/test_metering_agent.py
deleted file mode 100644 (file)
index b1c3d25..0000000
+++ /dev/null
@@ -1,221 +0,0 @@
-# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-from neutron.services.metering.agents import metering_agent
-from neutron.tests import base
-from neutron.tests import fake_notifier
-
-
-_uuid = uuidutils.generate_uuid
-
-TENANT_ID = _uuid()
-LABEL_ID = _uuid()
-ROUTERS = [{'status': 'ACTIVE',
-            'name': 'router1',
-            'gw_port_id': None,
-            'admin_state_up': True,
-            'tenant_id': TENANT_ID,
-            '_metering_labels': [{'rules': [],
-                                  'id': LABEL_ID}],
-            'id': _uuid()}]
-
-ROUTERS_WITH_RULE = [{'status': 'ACTIVE',
-                      'name': 'router1',
-                      'gw_port_id': None,
-                      'admin_state_up': True,
-                      'tenant_id': TENANT_ID,
-                      '_metering_labels': [{'rule': {},
-                                            'id': LABEL_ID}],
-                      'id': _uuid()}]
-
-
-class TestMeteringOperations(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestMeteringOperations, self).setUp()
-        cfg.CONF.register_opts(metering_agent.MeteringAgent.Opts)
-
-        self.noop_driver = ('neutron.services.metering.drivers.noop.'
-                            'noop_driver.NoopMeteringDriver')
-        cfg.CONF.set_override('driver', self.noop_driver)
-        cfg.CONF.set_override('measure_interval', 0)
-        cfg.CONF.set_override('report_interval', 0)
-
-        self.setup_notification_driver()
-
-        metering_rpc = ('neutron.services.metering.agents.metering_agent.'
-                        'MeteringPluginRpc._get_sync_data_metering')
-        self.metering_rpc_patch = mock.patch(metering_rpc, return_value=[])
-        self.metering_rpc_patch.start()
-
-        self.driver_patch = mock.patch(self.noop_driver, spec=True)
-        self.driver_patch.start()
-
-        loopingcall_patch = mock.patch(
-            'oslo_service.loopingcall.FixedIntervalLoopingCall')
-        loopingcall_patch.start()
-
-        self.agent = metering_agent.MeteringAgent('my agent', cfg.CONF)
-        self.driver = self.agent.metering_driver
-
-    def test_add_metering_label(self):
-        self.agent.add_metering_label(None, ROUTERS)
-        self.assertEqual(1, self.driver.add_metering_label.call_count)
-
-    def test_remove_metering_label(self):
-        self.agent.remove_metering_label(None, ROUTERS)
-        self.assertEqual(1, self.driver.remove_metering_label.call_count)
-
-    def test_update_metering_label_rule(self):
-        self.agent.update_metering_label_rules(None, ROUTERS)
-        self.assertEqual(1, self.driver.update_metering_label_rules.call_count)
-
-    def test_add_metering_label_rule(self):
-        self.agent.add_metering_label_rule(None, ROUTERS_WITH_RULE)
-        self.assertEqual(1, self.driver.add_metering_label_rule.call_count)
-
-    def test_remove_metering_label_rule(self):
-        self.agent.remove_metering_label_rule(None, ROUTERS_WITH_RULE)
-        self.assertEqual(1, self.driver.remove_metering_label_rule.call_count)
-
-    def test_routers_updated(self):
-        self.agent.routers_updated(None, ROUTERS)
-        self.assertEqual(1, self.driver.update_routers.call_count)
-
-    def test_get_traffic_counters(self):
-        self.agent._get_traffic_counters(None, ROUTERS)
-        self.assertEqual(1, self.driver.get_traffic_counters.call_count)
-
-    def test_notification_report(self):
-        self.agent.routers_updated(None, ROUTERS)
-
-        self.driver.get_traffic_counters.return_value = {LABEL_ID:
-                                                         {'pkts': 88,
-                                                          'bytes': 444}}
-        self.agent._metering_loop()
-
-        self.assertNotEqual(len(fake_notifier.NOTIFICATIONS), 0)
-        for n in fake_notifier.NOTIFICATIONS:
-            if n['event_type'] == 'l3.meter':
-                break
-
-        self.assertEqual('l3.meter', n['event_type'])
-
-        payload = n['payload']
-        self.assertEqual(TENANT_ID, payload['tenant_id'])
-        self.assertEqual(LABEL_ID, payload['label_id'])
-        self.assertEqual(88, payload['pkts'])
-        self.assertEqual(444, payload['bytes'])
-
-    def test_router_deleted(self):
-        label_id = _uuid()
-        self.driver.get_traffic_counters = mock.MagicMock()
-        self.driver.get_traffic_counters.return_value = {label_id:
-                                                         {'pkts': 44,
-                                                          'bytes': 222}}
-        self.agent._add_metering_info = mock.MagicMock()
-
-        self.agent.routers_updated(None, ROUTERS)
-        self.agent.router_deleted(None, ROUTERS[0]['id'])
-
-        self.assertEqual(1, self.agent._add_metering_info.call_count)
-        self.assertEqual(1, self.driver.remove_router.call_count)
-
-        self.agent._add_metering_info.assert_called_with(label_id, 44, 222)
-
-    @mock.patch('time.time')
-    def _test_purge_metering_info(self, current_timestamp, is_empty,
-                                  mock_time):
-        mock_time.return_value = current_timestamp
-        self.agent.metering_infos = {'fake': {'last_update': 1}}
-        self.config(report_interval=1)
-
-        self.agent._purge_metering_info()
-        self.assertEqual(0 if is_empty else 1, len(self.agent.metering_infos))
-        self.assertEqual(1, mock_time.call_count)
-
-    def test_purge_metering_info(self):
-        # 1 < 2 - 1 -> False
-        self._test_purge_metering_info(2, False)
-
-    def test_purge_metering_info_delete(self):
-        # 1 < 3 - 1 -> False
-        self._test_purge_metering_info(3, True)
-
-    @mock.patch('time.time')
-    def _test_add_metering_info(self, expected_info, current_timestamp,
-                                mock_time):
-        mock_time.return_value = current_timestamp
-        actual_info = self.agent._add_metering_info('fake_label_id', 1, 1)
-        self.assertEqual(1, len(self.agent.metering_infos))
-        self.assertEqual(expected_info, actual_info)
-        self.assertEqual(expected_info,
-                         self.agent.metering_infos['fake_label_id'])
-        self.assertEqual(1, mock_time.call_count)
-
-    def test_add_metering_info_create(self):
-        expected_info = {'bytes': 1, 'pkts': 1, 'time': 0, 'first_update': 1,
-                         'last_update': 1}
-        self._test_add_metering_info(expected_info, 1)
-
-    def test_add_metering_info_update(self):
-        expected_info = {'bytes': 1, 'pkts': 1, 'time': 0, 'first_update': 1,
-                         'last_update': 1}
-        self.agent.metering_infos = {'fake_label_id': expected_info}
-        expected_info.update({'bytes': 2, 'pkts': 2, 'time': 1,
-                              'last_update': 2})
-        self._test_add_metering_info(expected_info, 2)
-
-
-class TestMeteringDriver(base.BaseTestCase):
-    def setUp(self):
-        super(TestMeteringDriver, self).setUp()
-        cfg.CONF.register_opts(metering_agent.MeteringAgent.Opts)
-
-        self.noop_driver = ('neutron.services.metering.drivers.noop.'
-                            'noop_driver.NoopMeteringDriver')
-        cfg.CONF.set_override('driver', self.noop_driver)
-
-        self.agent = metering_agent.MeteringAgent('my agent', cfg.CONF)
-        self.driver = mock.Mock()
-        self.agent.metering_driver = self.driver
-
-    def test_add_metering_label_with_bad_driver_impl(self):
-        del self.driver.add_metering_label
-
-        with mock.patch.object(metering_agent, 'LOG') as log:
-            self.agent.add_metering_label(None, ROUTERS)
-            log.exception.assert_called_with(mock.ANY,
-                                             {'driver': self.noop_driver,
-                                              'func': 'add_metering_label'})
-
-    def test_add_metering_label_runtime_error(self):
-        self.driver.add_metering_label.side_effect = RuntimeError
-
-        with mock.patch.object(metering_agent, 'LOG') as log:
-            self.agent.add_metering_label(None, ROUTERS)
-            log.exception.assert_called_with(mock.ANY,
-                                             {'driver': self.noop_driver,
-                                              'func':
-                                              'add_metering_label'})
-
-    def test_init_chain(self):
-        with mock.patch('oslo_service.'
-                        'periodic_task.PeriodicTasks.__init__') as init:
-            metering_agent.MeteringAgent('my agent', cfg.CONF)
-        init.assert_called_once_with(cfg.CONF)
diff --git a/neutron/tests/unit/services/metering/drivers/__init__.py b/neutron/tests/unit/services/metering/drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/metering/drivers/test_iptables.py b/neutron/tests/unit/services/metering/drivers/test_iptables.py
deleted file mode 100644 (file)
index 91858e4..0000000
+++ /dev/null
@@ -1,440 +0,0 @@
-# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-import mock
-from oslo_config import cfg
-
-from neutron.services.metering.drivers.iptables import iptables_driver
-from neutron.tests import base
-
-
-TEST_ROUTERS = [
-    {'_metering_labels': [
-        {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83',
-         'rules': [{
-             'direction': 'ingress',
-             'excluded': False,
-             'id': '7f1a261f-2489-4ed1-870c-a62754501379',
-             'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83',
-             'remote_ip_prefix': '10.0.0.0/24'}]}],
-     'admin_state_up': True,
-     'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee',
-     'id': '473ec392-1711-44e3-b008-3251ccfc5099',
-     'name': 'router1',
-     'status': 'ACTIVE',
-     'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'},
-    {'_metering_labels': [
-        {'id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83',
-         'rules': [{
-             'direction': 'egress',
-             'excluded': False,
-             'id': 'fa2441e8-2489-4ed1-870c-a62754501379',
-             'metering_label_id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83',
-             'remote_ip_prefix': '20.0.0.0/24'}]}],
-     'admin_state_up': True,
-     'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee',
-     'id': '373ec392-1711-44e3-b008-3251ccfc5099',
-     'name': 'router2',
-     'status': 'ACTIVE',
-     'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'},
-]
-
-TEST_ROUTERS_WITH_ONE_RULE = [
-    {'_metering_labels': [
-        {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83',
-         'rule': {
-             'direction': 'ingress',
-             'excluded': False,
-             'id': '7f1a261f-2489-4ed1-870c-a62754501379',
-             'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83',
-             'remote_ip_prefix': '30.0.0.0/24'}}],
-     'admin_state_up': True,
-     'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee',
-     'id': '473ec392-1711-44e3-b008-3251ccfc5099',
-     'name': 'router1',
-     'status': 'ACTIVE',
-     'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'},
-    {'_metering_labels': [
-        {'id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83',
-         'rule': {
-             'direction': 'egress',
-             'excluded': False,
-             'id': 'fa2441e8-2489-4ed1-870c-a62754501379',
-             'metering_label_id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83',
-             'remote_ip_prefix': '40.0.0.0/24'}}],
-     'admin_state_up': True,
-     'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee',
-     'id': '373ec392-1711-44e3-b008-3251ccfc5099',
-     'name': 'router2',
-     'status': 'ACTIVE',
-     'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'},
-]
-
-
-class IptablesDriverTestCase(base.BaseTestCase):
-    def setUp(self):
-        super(IptablesDriverTestCase, self).setUp()
-        self.utils_exec_p = mock.patch(
-            'neutron.agent.linux.utils.execute')
-        self.utils_exec = self.utils_exec_p.start()
-        self.iptables_cls_p = mock.patch(
-            'neutron.agent.linux.iptables_manager.IptablesManager')
-        self.iptables_cls = self.iptables_cls_p.start()
-        self.iptables_inst = mock.Mock()
-        self.v4filter_inst = mock.Mock()
-        self.v6filter_inst = mock.Mock()
-        self.v4filter_inst.chains = []
-        self.v6filter_inst.chains = []
-        self.iptables_inst.ipv4 = {'filter': self.v4filter_inst}
-        self.iptables_inst.ipv6 = {'filter': self.v6filter_inst}
-        self.iptables_cls.return_value = self.iptables_inst
-        cfg.CONF.set_override('interface_driver',
-                              'neutron.agent.linux.interface.NullDriver')
-        self.metering = iptables_driver.IptablesMeteringDriver('metering',
-                                                               cfg.CONF)
-
-    def test_add_metering_label(self):
-        routers = TEST_ROUTERS[:1]
-
-        self.metering.add_metering_label(None, routers)
-        calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_chain('neutron-meter-r-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_rule('neutron-meter-FORWARD', '-j '
-                                    'neutron-meter-r-c5df2fe5-c60',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-l-c5df2fe5-c60',
-                                    '',
-                                    wrap=False)]
-
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_process_metering_label_rules(self):
-        self.metering.add_metering_label(None, TEST_ROUTERS)
-
-        calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_chain('neutron-meter-r-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_rule('neutron-meter-FORWARD', '-j '
-                                    'neutron-meter-r-c5df2fe5-c60',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-l-c5df2fe5-c60',
-                                    '',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-r-c5df2fe5-c60',
-                                    '-i qg-6d411f48-ec -s 10.0.0.0/24'
-                                    ' -j neutron-meter-l-c5df2fe5-c60',
-                                    wrap=False, top=False),
-                 mock.call.add_chain('neutron-meter-l-eeef45da-c60',
-                                     wrap=False),
-                 mock.call.add_chain('neutron-meter-r-eeef45da-c60',
-                                     wrap=False),
-                 mock.call.add_rule('neutron-meter-FORWARD', '-j '
-                                    'neutron-meter-r-eeef45da-c60',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-l-eeef45da-c60',
-                                    '',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-r-eeef45da-c60',
-                                    '-o qg-7d411f48-ec -d 20.0.0.0/24'
-                                    ' -j neutron-meter-l-eeef45da-c60',
-                                    wrap=False, top=False)]
-
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_add_metering_label_with_rules(self):
-        routers = copy.deepcopy(TEST_ROUTERS)
-        routers[1]['_metering_labels'][0]['rules'][0].update({
-            'direction': 'ingress',
-            'excluded': True,
-        })
-
-        self.metering.add_metering_label(None, routers)
-        calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_chain('neutron-meter-r-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_rule('neutron-meter-FORWARD', '-j '
-                                    'neutron-meter-r-c5df2fe5-c60',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-l-c5df2fe5-c60',
-                                    '',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-r-c5df2fe5-c60',
-                                    '-i qg-6d411f48-ec -s 10.0.0.0/24'
-                                    ' -j neutron-meter-l-c5df2fe5-c60',
-                                    wrap=False, top=False),
-                 mock.call.add_chain('neutron-meter-l-eeef45da-c60',
-                                     wrap=False),
-                 mock.call.add_chain('neutron-meter-r-eeef45da-c60',
-                                     wrap=False),
-                 mock.call.add_rule('neutron-meter-FORWARD', '-j '
-                                    'neutron-meter-r-eeef45da-c60',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-l-eeef45da-c60',
-                                    '',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-r-eeef45da-c60',
-                                    '-i qg-7d411f48-ec -s 20.0.0.0/24'
-                                    ' -j RETURN',
-                                    wrap=False, top=True)]
-
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_update_metering_label_rules(self):
-        routers = TEST_ROUTERS[:1]
-
-        self.metering.add_metering_label(None, routers)
-
-        updates = copy.deepcopy(routers)
-        updates[0]['_metering_labels'][0]['rules'] = [{
-            'direction': 'egress',
-            'excluded': True,
-            'id': '7f1a261f-2489-4ed1-870c-a62754501379',
-            'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83',
-            'remote_ip_prefix': '10.0.0.0/24'},
-            {'direction': 'ingress',
-             'excluded': False,
-             'id': '6f1a261f-2489-4ed1-870c-a62754501379',
-             'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83',
-             'remote_ip_prefix': '20.0.0.0/24'}]
-
-        self.metering.update_metering_label_rules(None, updates)
-
-        calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_chain('neutron-meter-r-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_rule('neutron-meter-FORWARD', '-j '
-                                    'neutron-meter-r-c5df2fe5-c60',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-l-c5df2fe5-c60',
-                                    '',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-r-c5df2fe5-c60',
-                                    '-i qg-6d411f48-ec -s 10.0.0.0/24'
-                                    ' -j neutron-meter-l-c5df2fe5-c60',
-                                    wrap=False, top=False),
-                 mock.call.empty_chain('neutron-meter-r-c5df2fe5-c60',
-                                       wrap=False),
-                 mock.call.add_rule('neutron-meter-r-c5df2fe5-c60',
-                                    '-o qg-6d411f48-ec -d 10.0.0.0/24'
-                                    ' -j RETURN',
-                                    wrap=False, top=True),
-                 mock.call.add_rule('neutron-meter-r-c5df2fe5-c60',
-                                    '-i qg-6d411f48-ec -s 20.0.0.0/24 -j '
-                                    'neutron-meter-l-c5df2fe5-c60',
-                                    wrap=False, top=False)]
-
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_remove_metering_label_rule_in_update(self):
-        routers = copy.deepcopy(TEST_ROUTERS[:1])
-        routers[0]['_metering_labels'][0]['rules'].append({
-            'direction': 'ingress',
-            'excluded': False,
-            'id': 'aaaa261f-2489-4ed1-870c-a62754501379',
-            'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83',
-            'remote_ip_prefix': '20.0.0.0/24',
-        })
-
-        self.metering.add_metering_label(None, routers)
-
-        del routers[0]['_metering_labels'][0]['rules'][1]
-
-        self.metering.update_metering_label_rules(None, routers)
-        calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_chain('neutron-meter-r-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_rule('neutron-meter-FORWARD', '-j '
-                                    'neutron-meter-r-c5df2fe5-c60',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-l-c5df2fe5-c60',
-                                    '',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-r-c5df2fe5-c60',
-                                    '-i qg-6d411f48-ec -s 10.0.0.0/24'
-                                    ' -j neutron-meter-l-c5df2fe5-c60',
-                                    wrap=False, top=False),
-                 mock.call.add_rule('neutron-meter-r-c5df2fe5-c60',
-                                    '-i qg-6d411f48-ec -s 20.0.0.0/24'
-                                    ' -j neutron-meter-l-c5df2fe5-c60',
-                                    wrap=False, top=False),
-                 mock.call.empty_chain('neutron-meter-r-c5df2fe5-c60',
-                                       wrap=False),
-                 mock.call.add_rule('neutron-meter-r-c5df2fe5-c60',
-                                    '-i qg-6d411f48-ec -s 10.0.0.0/24'
-                                    ' -j neutron-meter-l-c5df2fe5-c60',
-                                    wrap=False, top=False)]
-
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_add_metering_label_rule(self):
-        new_routers_rules = TEST_ROUTERS_WITH_ONE_RULE
-        self.metering.update_routers(None, TEST_ROUTERS)
-        self.metering.add_metering_label_rule(None, new_routers_rules)
-        calls = [
-                 mock.call.add_rule('neutron-meter-r-c5df2fe5-c60',
-                                    '-i qg-6d411f48-ec -s 30.0.0.0/24'
-                                    ' -j neutron-meter-l-c5df2fe5-c60',
-                                    wrap=False, top=False),
-                 mock.call.add_rule('neutron-meter-r-eeef45da-c60',
-                                    '-o qg-7d411f48-ec -d 40.0.0.0/24'
-                                    ' -j neutron-meter-l-eeef45da-c60',
-                                    wrap=False, top=False),
-
-                ]
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_remove_metering_label_rule(self):
-        new_routers_rules = TEST_ROUTERS_WITH_ONE_RULE
-        self.metering.update_routers(None, TEST_ROUTERS)
-        self.metering.add_metering_label_rule(None, new_routers_rules)
-        self.metering.remove_metering_label_rule(None, new_routers_rules)
-        calls = [
-            mock.call.remove_rule('neutron-meter-r-c5df2fe5-c60',
-                                  '-i qg-6d411f48-ec -s 30.0.0.0/24'
-                                  ' -j neutron-meter-l-c5df2fe5-c60',
-                                  wrap=False, top=False),
-            mock.call.remove_rule('neutron-meter-r-eeef45da-c60',
-                                  '-o qg-7d411f48-ec -d 40.0.0.0/24'
-                                  ' -j neutron-meter-l-eeef45da-c60',
-                                  wrap=False, top=False)
-                ]
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_remove_metering_label(self):
-        routers = TEST_ROUTERS[:1]
-
-        self.metering.add_metering_label(None, routers)
-        self.metering.remove_metering_label(None, routers)
-        calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_chain('neutron-meter-r-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_rule('neutron-meter-FORWARD', '-j '
-                                    'neutron-meter-r-c5df2fe5-c60',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-l-c5df2fe5-c60',
-                                    '',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-r-c5df2fe5-c60',
-                                    '-i qg-6d411f48-ec -s 10.0.0.0/24'
-                                    ' -j neutron-meter-l-c5df2fe5-c60',
-                                    wrap=False, top=False),
-                 mock.call.remove_chain('neutron-meter-l-c5df2fe5-c60',
-                                        wrap=False),
-                 mock.call.remove_chain('neutron-meter-r-c5df2fe5-c60',
-                                        wrap=False)]
-
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_update_routers(self):
-        routers = copy.deepcopy(TEST_ROUTERS)
-        routers[1]['_metering_labels'][0]['rules'][0].update({
-            'direction': 'ingress',
-            'excluded': True,
-        })
-
-        self.metering.add_metering_label(None, routers)
-
-        updates = copy.deepcopy(routers)
-        updates[0]['gw_port_id'] = '587b63c1-22a3-40b3-9834-486d1fb215a5'
-
-        self.metering.update_routers(None, updates)
-        calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_chain('neutron-meter-r-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_rule('neutron-meter-FORWARD', '-j '
-                                    'neutron-meter-r-c5df2fe5-c60',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-l-c5df2fe5-c60',
-                                    '',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-r-c5df2fe5-c60',
-                                    '-i qg-6d411f48-ec -s 10.0.0.0/24'
-                                    ' -j neutron-meter-l-c5df2fe5-c60',
-                                    wrap=False, top=False),
-                 mock.call.add_chain('neutron-meter-l-eeef45da-c60',
-                                     wrap=False),
-                 mock.call.add_chain('neutron-meter-r-eeef45da-c60',
-                                     wrap=False),
-                 mock.call.add_rule('neutron-meter-FORWARD', '-j '
-                                    'neutron-meter-r-eeef45da-c60',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-l-eeef45da-c60',
-                                    '',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-r-eeef45da-c60',
-                                    '-i qg-7d411f48-ec -s 20.0.0.0/24'
-                                    ' -j RETURN',
-                                    wrap=False, top=True),
-                 mock.call.remove_chain('neutron-meter-l-c5df2fe5-c60',
-                                        wrap=False),
-                 mock.call.remove_chain('neutron-meter-r-c5df2fe5-c60',
-                                        wrap=False),
-                 mock.call.add_chain('neutron-meter-l-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_chain('neutron-meter-r-c5df2fe5-c60',
-                                     wrap=False),
-                 mock.call.add_rule('neutron-meter-FORWARD', '-j '
-                                    'neutron-meter-r-c5df2fe5-c60',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-l-c5df2fe5-c60',
-                                    '',
-                                    wrap=False),
-                 mock.call.add_rule('neutron-meter-r-c5df2fe5-c60',
-                                    '-i qg-587b63c1-22 -s 10.0.0.0/24'
-                                    ' -j neutron-meter-l-c5df2fe5-c60',
-                                    wrap=False, top=False)]
-
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_update_routers_removal(self):
-        routers = TEST_ROUTERS
-
-        self.metering.add_metering_label(None, routers)
-
-        # Remove router id '373ec392-1711-44e3-b008-3251ccfc5099'
-        updates = TEST_ROUTERS[:1]
-
-        self.metering.update_routers(None, updates)
-        calls = [mock.call.remove_chain('neutron-meter-l-eeef45da-c60',
-                                        wrap=False),
-                 mock.call.remove_chain('neutron-meter-r-eeef45da-c60',
-                                        wrap=False)]
-
-        self.v4filter_inst.assert_has_calls(calls)
-
-    def test_get_traffic_counters_with_missing_chain(self):
-        for r in TEST_ROUTERS:
-            rm = iptables_driver.RouterWithMetering(self.metering.conf, r)
-            rm.metering_labels = {r['_metering_labels'][0]['id']: 'fake'}
-            self.metering.routers[r['id']] = rm
-
-        mocked_method = self.iptables_cls.return_value.get_traffic_counters
-        mocked_method.side_effect = [{'pkts': 1, 'bytes': 8},
-                                     RuntimeError('Failed to find the chain')]
-
-        counters = self.metering.get_traffic_counters(None, TEST_ROUTERS)
-        expected_label_id = TEST_ROUTERS[0]['_metering_labels'][0]['id']
-        self.assertIn(expected_label_id, counters)
-        self.assertEqual(1, counters[expected_label_id]['pkts'])
-        self.assertEqual(8, counters[expected_label_id]['bytes'])
diff --git a/neutron/tests/unit/services/metering/test_metering_plugin.py b/neutron/tests/unit/services/metering/test_metering_plugin.py
deleted file mode 100644 (file)
index b2443bf..0000000
+++ /dev/null
@@ -1,479 +0,0 @@
-# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo_utils import uuidutils
-
-from neutron.api.v2 import attributes as attr
-from neutron import context
-from neutron.db import agents_db
-from neutron.db import l3_agentschedulers_db
-from neutron.db.metering import metering_rpc
-from neutron.extensions import l3 as ext_l3
-from neutron.extensions import metering as ext_metering
-from neutron import manager
-from neutron.plugins.common import constants
-from neutron.tests.common import helpers
-from neutron.tests import tools
-from neutron.tests.unit.db.metering import test_metering_db
-from neutron.tests.unit.db import test_db_base_plugin_v2
-from neutron.tests.unit.extensions import test_l3
-
-
-_uuid = uuidutils.generate_uuid
-
-METERING_SERVICE_PLUGIN_KLASS = (
-    "neutron.services.metering."
-    "metering_plugin.MeteringPlugin"
-)
-
-
-class MeteringTestExtensionManager(object):
-
-    def get_resources(self):
-        attr.RESOURCE_ATTRIBUTE_MAP.update(ext_metering.RESOURCE_ATTRIBUTE_MAP)
-        attr.RESOURCE_ATTRIBUTE_MAP.update(ext_l3.RESOURCE_ATTRIBUTE_MAP)
-
-        l3_res = ext_l3.L3.get_resources()
-        metering_res = ext_metering.Metering.get_resources()
-
-        return l3_res + metering_res
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
-                         test_l3.L3NatTestCaseMixin,
-                         test_metering_db.MeteringPluginDbTestCaseMixin):
-
-    resource_prefix_map = dict(
-        (k.replace('_', '-'), "/metering")
-        for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys()
-    )
-
-    def setUp(self):
-        plugin = 'neutron.tests.unit.extensions.test_l3.TestL3NatIntPlugin'
-        service_plugins = {'metering_plugin_name':
-                           METERING_SERVICE_PLUGIN_KLASS}
-        ext_mgr = MeteringTestExtensionManager()
-        super(TestMeteringPlugin, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
-                                              service_plugins=service_plugins)
-
-        self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
-
-        uuid = 'oslo_utils.uuidutils.generate_uuid'
-        self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
-        self.mock_uuid = self.uuid_patch.start()
-
-        self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
-        self.ctx = context.Context('', self.tenant_id, is_admin=True)
-        self.context_patch = mock.patch('neutron.context.Context',
-                                        return_value=self.ctx)
-        self.mock_context = self.context_patch.start()
-
-        self.topic = 'metering_agent'
-
-        add = ('neutron.api.rpc.agentnotifiers.' +
-               'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
-               '.add_metering_label')
-        self.add_patch = mock.patch(add)
-        self.mock_add = self.add_patch.start()
-
-        remove = ('neutron.api.rpc.agentnotifiers.' +
-                  'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
-                  '.remove_metering_label')
-        self.remove_patch = mock.patch(remove)
-        self.mock_remove = self.remove_patch.start()
-
-        update = ('neutron.api.rpc.agentnotifiers.' +
-                  'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
-                  '.update_metering_label_rules')
-        self.update_patch = mock.patch(update)
-        self.mock_update = self.update_patch.start()
-
-        add_rule = ('neutron.api.rpc.agentnotifiers.' +
-                    'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
-                    '.add_metering_label_rule')
-        self.add_rule_patch = mock.patch(add_rule)
-        self.mock_add_rule = self.add_rule_patch.start()
-
-        remove_rule = ('neutron.api.rpc.agentnotifiers.' +
-                       'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
-                       '.remove_metering_label_rule')
-        self.remove_rule_patch = mock.patch(remove_rule)
-        self.mock_remove_rule = self.remove_rule_patch.start()
-
-    def test_add_metering_label_rpc_call(self):
-        second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
-        expected = [{'status': 'ACTIVE',
-                     'name': 'router1',
-                     'gw_port_id': None,
-                     'admin_state_up': True,
-                     'tenant_id': self.tenant_id,
-                     '_metering_labels': [
-                         {'rules': [],
-                          'id': self.uuid}],
-                     'id': self.uuid}]
-
-        tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
-        self.mock_uuid.return_value = second_uuid
-        with self.router(name='router2', tenant_id=tenant_id_2,
-                         set_context=True):
-            self.mock_uuid.return_value = self.uuid
-            with self.router(name='router1', tenant_id=self.tenant_id,
-                             set_context=True):
-                with self.metering_label(tenant_id=self.tenant_id,
-                                         set_context=True):
-                    self.mock_add.assert_called_with(self.ctx, expected)
-
-    def test_add_metering_label_shared_rpc_call(self):
-        second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
-        expected = [{'status': 'ACTIVE',
-                     'name': 'router1',
-                     'gw_port_id': None,
-                     'admin_state_up': True,
-                     'tenant_id': self.tenant_id,
-                     '_metering_labels': [
-                         {'rules': [],
-                          'id': self.uuid},
-                         {'rules': [],
-                          'id': second_uuid}],
-                     'id': self.uuid}]
-
-        tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
-        with self.router(name='router1', tenant_id=self.tenant_id,
-                         set_context=True):
-            with self.metering_label(tenant_id=self.tenant_id,
-                                     set_context=True):
-                self.mock_uuid.return_value = second_uuid
-                with self.metering_label(tenant_id=tenant_id_2, shared=True,
-                                         set_context=True):
-                    self.mock_add.assert_called_with(self.ctx, expected)
-
-    def test_remove_metering_label_rpc_call(self):
-        expected = [{'status': 'ACTIVE',
-                     'name': 'router1',
-                     'gw_port_id': None,
-                     'admin_state_up': True,
-                     'tenant_id': self.tenant_id,
-                     '_metering_labels': [
-                         {'rules': [],
-                          'id': self.uuid}],
-                     'id': self.uuid}]
-
-        with self.router(tenant_id=self.tenant_id, set_context=True):
-            with self.metering_label(tenant_id=self.tenant_id,
-                                     set_context=True) as label:
-                self.mock_add.assert_called_with(self.ctx, expected)
-                self._delete('metering-labels',
-                             label['metering_label']['id'])
-            self.mock_remove.assert_called_with(self.ctx, expected)
-
-    def test_remove_one_metering_label_rpc_call(self):
-        second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
-        expected_add = [{'status': 'ACTIVE',
-                         'name': 'router1',
-                         'gw_port_id': None,
-                         'admin_state_up': True,
-                         'tenant_id': self.tenant_id,
-                         '_metering_labels': [
-                             {'rules': [],
-                              'id': self.uuid},
-                             {'rules': [],
-                              'id': second_uuid}],
-                         'id': self.uuid}]
-        expected_remove = [{'status': 'ACTIVE',
-                            'name': 'router1',
-                            'gw_port_id': None,
-                            'admin_state_up': True,
-                            'tenant_id': self.tenant_id,
-                            '_metering_labels': [
-                                {'rules': [],
-                                 'id': second_uuid}],
-                            'id': self.uuid}]
-
-        with self.router(tenant_id=self.tenant_id, set_context=True):
-            with self.metering_label(tenant_id=self.tenant_id,
-                                     set_context=True):
-                self.mock_uuid.return_value = second_uuid
-                with self.metering_label(tenant_id=self.tenant_id,
-                                         set_context=True) as label:
-                    self.mock_add.assert_called_with(self.ctx, expected_add)
-                    self._delete('metering-labels',
-                                 label['metering_label']['id'])
-                self.mock_remove.assert_called_with(self.ctx, expected_remove)
-
-    def test_add_and_remove_metering_label_rule_rpc_call(self):
-        second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
-        expected_add = [{'status': 'ACTIVE',
-                         'name': 'router1',
-                         'gw_port_id': None,
-                         'admin_state_up': True,
-                         'tenant_id': self.tenant_id,
-                         '_metering_labels': [
-                             {'rule': {
-                                 'remote_ip_prefix': '10.0.0.0/24',
-                                 'direction': 'ingress',
-                                 'metering_label_id': self.uuid,
-                                 'excluded': False,
-                                 'id': second_uuid},
-                             'id': self.uuid}],
-                         'id': self.uuid}]
-
-        expected_del = [{'status': 'ACTIVE',
-                         'name': 'router1',
-                         'gw_port_id': None,
-                         'admin_state_up': True,
-                         'tenant_id': self.tenant_id,
-                         '_metering_labels': [
-                             {'rule': {
-                                  'remote_ip_prefix': '10.0.0.0/24',
-                                  'direction': 'ingress',
-                                  'metering_label_id': self.uuid,
-                                  'excluded': False,
-                                   'id': second_uuid},
-                             'id': self.uuid}],
-                         'id': self.uuid}]
-
-        with self.router(tenant_id=self.tenant_id, set_context=True):
-            with self.metering_label(tenant_id=self.tenant_id,
-                                     set_context=True) as label:
-                l = label['metering_label']
-                self.mock_uuid.return_value = second_uuid
-                with self.metering_label_rule(l['id']):
-                    self.mock_add_rule.assert_called_with(self.ctx,
-                                                          expected_add)
-                    self._delete('metering-label-rules', second_uuid)
-                self.mock_remove_rule.assert_called_with(self.ctx,
-                                                         expected_del)
-
-    def test_delete_metering_label_does_not_clear_router_tenant_id(self):
-        tenant_id = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
-        with self.metering_label(tenant_id=tenant_id) as metering_label:
-            with self.router(tenant_id=tenant_id, set_context=True) as r:
-                router = self._show('routers', r['router']['id'])
-                self.assertEqual(tenant_id, router['router']['tenant_id'])
-                metering_label_id = metering_label['metering_label']['id']
-                self._delete('metering-labels', metering_label_id, 204)
-                router = self._show('routers', r['router']['id'])
-                self.assertEqual(tenant_id, router['router']['tenant_id'])
-
-
-class TestMeteringPluginL3AgentScheduler(
-        l3_agentschedulers_db.L3AgentSchedulerDbMixin,
-        test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
-        test_l3.L3NatTestCaseMixin,
-        test_metering_db.MeteringPluginDbTestCaseMixin):
-
-    resource_prefix_map = dict(
-        (k.replace('_', '-'), "/metering")
-        for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys()
-    )
-
-    def setUp(self, plugin_str=None, service_plugins=None, scheduler=None):
-        if not plugin_str:
-            plugin_str = ('neutron.tests.unit.extensions.test_l3.'
-                          'TestL3NatIntAgentSchedulingPlugin')
-
-        if not service_plugins:
-            service_plugins = {'metering_plugin_name':
-                               METERING_SERVICE_PLUGIN_KLASS}
-
-        if not scheduler:
-            scheduler = plugin_str
-
-        ext_mgr = MeteringTestExtensionManager()
-        super(TestMeteringPluginL3AgentScheduler,
-              self).setUp(plugin=plugin_str, ext_mgr=ext_mgr,
-                          service_plugins=service_plugins)
-
-        self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
-
-        uuid = 'oslo_utils.uuidutils.generate_uuid'
-        self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
-        self.mock_uuid = self.uuid_patch.start()
-
-        self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
-        self.ctx = context.Context('', self.tenant_id, is_admin=True)
-        self.context_patch = mock.patch('neutron.context.Context',
-                                        return_value=self.ctx)
-        self.mock_context = self.context_patch.start()
-
-        self.l3routers_patch = mock.patch(scheduler +
-                                          '.get_l3_agents_hosting_routers')
-        self.l3routers_mock = self.l3routers_patch.start()
-
-        self.topic = 'metering_agent'
-
-        add = ('neutron.api.rpc.agentnotifiers.' +
-               'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
-               '.add_metering_label')
-        self.add_patch = mock.patch(add)
-        self.mock_add = self.add_patch.start()
-
-        remove = ('neutron.api.rpc.agentnotifiers.' +
-                  'metering_rpc_agent_api.MeteringAgentNotifyAPI' +
-                  '.remove_metering_label')
-        self.remove_patch = mock.patch(remove)
-        self.mock_remove = self.remove_patch.start()
-
-    def test_add_metering_label_rpc_call(self):
-        second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
-        expected = [{'status': 'ACTIVE',
-                     'name': 'router1',
-                     'gw_port_id': None,
-                     'admin_state_up': True,
-                     'tenant_id': self.tenant_id,
-                     '_metering_labels': [
-                         {'rules': [],
-                          'id': second_uuid}],
-                     'id': self.uuid},
-                    {'status': 'ACTIVE',
-                     'name': 'router2',
-                     'gw_port_id': None,
-                     'admin_state_up': True,
-                     'tenant_id': self.tenant_id,
-                     '_metering_labels': [
-                         {'rules': [],
-                          'id': second_uuid}],
-                     'id': second_uuid}]
-
-        # bind each router to a specific agent
-        agent1 = agents_db.Agent(host='agent1')
-        agent2 = agents_db.Agent(host='agent2')
-
-        agents = {self.uuid: agent1,
-                  second_uuid: agent2}
-
-        def side_effect(context, routers, admin_state_up, active):
-            return [agents[routers[0]]]
-
-        self.l3routers_mock.side_effect = side_effect
-
-        with self.router(name='router1', tenant_id=self.tenant_id,
-                         set_context=True):
-            self.mock_uuid.return_value = second_uuid
-            with self.router(name='router2', tenant_id=self.tenant_id,
-                             set_context=True):
-                with self.metering_label(tenant_id=self.tenant_id,
-                                         set_context=True):
-                    self.mock_add.assert_called_with(
-                        self.ctx, tools.UnorderedList(expected))
-
-
-class TestMeteringPluginL3AgentSchedulerServicePlugin(
-        TestMeteringPluginL3AgentScheduler):
-
-    """Unit tests for the case where separate service plugin
-    implements L3 routing.
-    """
-
-    def setUp(self):
-        l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
-                     'TestL3NatAgentSchedulingServicePlugin')
-        service_plugins = {'metering_plugin_name':
-                           METERING_SERVICE_PLUGIN_KLASS,
-                           'l3_plugin_name': l3_plugin}
-
-        plugin_str = ('neutron.tests.unit.extensions.test_l3.'
-                      'TestNoL3NatPlugin')
-
-        super(TestMeteringPluginL3AgentSchedulerServicePlugin, self).setUp(
-            plugin_str=plugin_str, service_plugins=service_plugins,
-            scheduler=l3_plugin)
-
-
-class TestMeteringPluginRpcFromL3Agent(
-        test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
-        test_l3.L3NatTestCaseMixin,
-        test_metering_db.MeteringPluginDbTestCaseMixin):
-
-    resource_prefix_map = dict(
-        (k.replace('_', '-'), "/metering")
-        for k in ext_metering.RESOURCE_ATTRIBUTE_MAP
-    )
-
-    def setUp(self):
-        service_plugins = {'metering_plugin_name':
-                           METERING_SERVICE_PLUGIN_KLASS}
-
-        plugin = ('neutron.tests.unit.extensions.test_l3.'
-                  'TestL3NatIntAgentSchedulingPlugin')
-
-        ext_mgr = MeteringTestExtensionManager()
-        super(TestMeteringPluginRpcFromL3Agent,
-              self).setUp(plugin=plugin, service_plugins=service_plugins,
-                          ext_mgr=ext_mgr)
-
-        self.meter_plugin = manager.NeutronManager.get_service_plugins().get(
-            constants.METERING)
-
-        self.tenant_id = 'admin_tenant_id'
-        self.tenant_id_1 = 'tenant_id_1'
-        self.tenant_id_2 = 'tenant_id_2'
-
-        self.adminContext = context.get_admin_context()
-        helpers.register_l3_agent(host='agent1')
-
-    def test_get_sync_data_metering(self):
-        with self.subnet() as subnet:
-            s = subnet['subnet']
-            self._set_net_external(s['network_id'])
-            with self.router(name='router1', subnet=subnet) as router:
-                r = router['router']
-                self._add_external_gateway_to_router(r['id'], s['network_id'])
-                with self.metering_label(tenant_id=r['tenant_id']):
-                    callbacks = metering_rpc.MeteringRpcCallbacks(
-                        self.meter_plugin)
-                    data = callbacks.get_sync_data_metering(self.adminContext,
-                                                            host='agent1')
-                    self.assertEqual('router1', data[0]['name'])
-
-                    helpers.register_l3_agent(host='agent2')
-                    data = callbacks.get_sync_data_metering(self.adminContext,
-                                                            host='agent2')
-                    self.assertFalse(data)
-
-                self._remove_external_gateway_from_router(
-                    r['id'], s['network_id'])
-
-    def test_get_sync_data_metering_shared(self):
-        with self.router(name='router1', tenant_id=self.tenant_id_1):
-            with self.router(name='router2', tenant_id=self.tenant_id_2):
-                with self.metering_label(tenant_id=self.tenant_id,
-                                         shared=True):
-                    callbacks = metering_rpc.MeteringRpcCallbacks(
-                        self.meter_plugin)
-                    data = callbacks.get_sync_data_metering(self.adminContext)
-
-                    routers = [router['name'] for router in data]
-
-                    self.assertIn('router1', routers)
-                    self.assertIn('router2', routers)
-
-    def test_get_sync_data_metering_not_shared(self):
-        with self.router(name='router1', tenant_id=self.tenant_id_1):
-            with self.router(name='router2', tenant_id=self.tenant_id_2):
-                with self.metering_label(tenant_id=self.tenant_id):
-                    callbacks = metering_rpc.MeteringRpcCallbacks(
-                        self.meter_plugin)
-                    data = callbacks.get_sync_data_metering(self.adminContext)
-
-                    routers = [router['name'] for router in data]
-
-                    self.assertEqual([], routers)
diff --git a/neutron/tests/unit/services/qos/__init__.py b/neutron/tests/unit/services/qos/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/qos/base.py b/neutron/tests/unit/services/qos/base.py
deleted file mode 100644 (file)
index 633b35a..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.api.rpc.callbacks.consumer import registry as cons_registry
-from neutron.api.rpc.callbacks.producer import registry as prod_registry
-from neutron.api.rpc.callbacks import resource_manager
-from neutron.tests.unit import testlib_api
-
-
-class BaseQosTestCase(testlib_api.SqlTestCase):
-    def setUp(self):
-        super(BaseQosTestCase, self).setUp()
-
-        with mock.patch.object(
-            resource_manager.ResourceCallbacksManager, '_singleton',
-            new_callable=mock.PropertyMock(return_value=False)):
-
-            self.cons_mgr = resource_manager.ConsumerResourceCallbacksManager()
-            self.prod_mgr = resource_manager.ProducerResourceCallbacksManager()
-            for mgr in (self.cons_mgr, self.prod_mgr):
-                mgr.clear()
-
-        mock.patch.object(
-            cons_registry, '_get_manager', return_value=self.cons_mgr).start()
-
-        mock.patch.object(
-            prod_registry, '_get_manager', return_value=self.prod_mgr).start()
diff --git a/neutron/tests/unit/services/qos/notification_drivers/__init__.py b/neutron/tests/unit/services/qos/notification_drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/qos/notification_drivers/dummy.py b/neutron/tests/unit/services/qos/notification_drivers/dummy.py
deleted file mode 100644 (file)
index ce3de1f..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.services.qos.notification_drivers import qos_base
-
-
-class DummyQosServiceNotificationDriver(
-    qos_base.QosServiceNotificationDriverBase):
-    """Dummy service notification driver for QoS."""
-
-    def get_description(self):
-        return "Dummy"
-
-    def create_policy(self, policy):
-        pass
-
-    def update_policy(self, policy):
-        pass
-
-    def delete_policy(self, policy):
-        pass
diff --git a/neutron/tests/unit/services/qos/notification_drivers/test_manager.py b/neutron/tests/unit/services/qos/notification_drivers/test_manager.py
deleted file mode 100644 (file)
index c46e99a..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_config import cfg
-
-from neutron.api.rpc.callbacks import events
-from neutron import context
-from neutron.objects.qos import policy as policy_object
-from neutron.services.qos.notification_drivers import manager as driver_mgr
-from neutron.services.qos.notification_drivers import message_queue
-from neutron.tests.unit.services.qos import base
-
-DUMMY_DRIVER = ("neutron.tests.unit.services.qos.notification_drivers."
-                "dummy.DummyQosServiceNotificationDriver")
-
-
-def _load_multiple_drivers():
-    cfg.CONF.set_override(
-        "notification_drivers",
-        ["message_queue", DUMMY_DRIVER],
-        "qos")
-
-
-class TestQosDriversManagerBase(base.BaseQosTestCase):
-
-    def setUp(self):
-        super(TestQosDriversManagerBase, self).setUp()
-        self.config_parse()
-        self.setup_coreplugin()
-        config = cfg.ConfigOpts()
-        config.register_opts(driver_mgr.QOS_PLUGIN_OPTS, "qos")
-        self.policy_data = {'policy': {
-                            'id': 7777777,
-                            'tenant_id': 888888,
-                            'name': 'test-policy',
-                            'description': 'test policy description',
-                            'shared': True}}
-
-        self.context = context.get_admin_context()
-        self.policy = policy_object.QosPolicy(self.context,
-                        **self.policy_data['policy'])
-        ctxt = None
-        self.kwargs = {'context': ctxt}
-
-
-class TestQosDriversManager(TestQosDriversManagerBase):
-
-    def setUp(self):
-        super(TestQosDriversManager, self).setUp()
-        #TODO(Qos): Fix this unittest to test manager and not message_queue
-        #           notification driver
-        rpc_api_cls = mock.patch('neutron.api.rpc.handlers.resources_rpc'
-                                 '.ResourcesPushRpcApi').start()
-        self.rpc_api = rpc_api_cls.return_value
-        self.driver_manager = driver_mgr.QosServiceNotificationDriverManager()
-
-    def _validate_registry_params(self, event_type, policy):
-        self.rpc_api.push.assert_called_with(self.context, policy,
-                                             event_type)
-
-    def test_create_policy_default_configuration(self):
-        #RPC driver should be loaded by default
-        self.driver_manager.create_policy(self.context, self.policy)
-        self.assertFalse(self.rpc_api.push.called)
-
-    def test_update_policy_default_configuration(self):
-        #RPC driver should be loaded by default
-        self.driver_manager.update_policy(self.context, self.policy)
-        self._validate_registry_params(events.UPDATED, self.policy)
-
-    def test_delete_policy_default_configuration(self):
-        #RPC driver should be loaded by default
-        self.driver_manager.delete_policy(self.context, self.policy)
-        self._validate_registry_params(events.DELETED, self.policy)
-
-
-class TestQosDriversManagerMulti(TestQosDriversManagerBase):
-
-    def _test_multi_drivers_configuration_op(self, op):
-        _load_multiple_drivers()
-        driver_manager = driver_mgr.QosServiceNotificationDriverManager()
-        handler = '%s_policy' % op
-        with mock.patch('.'.join([DUMMY_DRIVER, handler])) as dummy_mock:
-            rpc_driver = message_queue.RpcQosServiceNotificationDriver
-            with mock.patch.object(rpc_driver, handler) as rpc_mock:
-                getattr(driver_manager, handler)(self.context, self.policy)
-        for mock_ in (dummy_mock, rpc_mock):
-            mock_.assert_called_with(self.context, self.policy)
-
-    def test_multi_drivers_configuration_create(self):
-        self._test_multi_drivers_configuration_op('create')
-
-    def test_multi_drivers_configuration_update(self):
-        self._test_multi_drivers_configuration_op('update')
-
-    def test_multi_drivers_configuration_delete(self):
-        self._test_multi_drivers_configuration_op('delete')
diff --git a/neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py b/neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py
deleted file mode 100644 (file)
index 0a95cae..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.api.rpc.callbacks import events
-from neutron import context
-from neutron.objects.qos import policy as policy_object
-from neutron.objects.qos import rule as rule_object
-from neutron.services.qos.notification_drivers import message_queue
-from neutron.tests.unit.services.qos import base
-
-DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
-
-
-class TestQosRpcNotificationDriver(base.BaseQosTestCase):
-
-    def setUp(self):
-        super(TestQosRpcNotificationDriver, self).setUp()
-        rpc_api_cls = mock.patch('neutron.api.rpc.handlers.resources_rpc'
-                                 '.ResourcesPushRpcApi').start()
-        self.rpc_api = rpc_api_cls.return_value
-        self.driver = message_queue.RpcQosServiceNotificationDriver()
-
-        self.policy_data = {'policy': {
-                            'id': 7777777,
-                            'tenant_id': 888888,
-                            'name': 'testi-policy',
-                            'description': 'test policyi description',
-                            'shared': True}}
-
-        self.rule_data = {'bandwidth_limit_rule': {
-                            'id': 7777777,
-                            'max_kbps': 100,
-                            'max_burst_kbps': 150}}
-
-        self.context = context.get_admin_context()
-        self.policy = policy_object.QosPolicy(self.context,
-                        **self.policy_data['policy'])
-
-        self.rule = rule_object.QosBandwidthLimitRule(
-                                self.context,
-                                **self.rule_data['bandwidth_limit_rule'])
-
-    def _validate_push_params(self, event_type, policy):
-        self.rpc_api.push.assert_called_once_with(self.context, policy,
-                                                  event_type)
-
-    def test_create_policy(self):
-        self.driver.create_policy(self.context, self.policy)
-        self.assertFalse(self.rpc_api.push.called)
-
-    def test_update_policy(self):
-        self.driver.update_policy(self.context, self.policy)
-        self._validate_push_params(events.UPDATED, self.policy)
-
-    def test_delete_policy(self):
-        self.driver.delete_policy(self.context, self.policy)
-        self._validate_push_params(events.DELETED, self.policy)
diff --git a/neutron/tests/unit/services/qos/test_qos_plugin.py b/neutron/tests/unit/services/qos/test_qos_plugin.py
deleted file mode 100644 (file)
index 1489b4d..0000000
+++ /dev/null
@@ -1,214 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_config import cfg
-
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron import manager
-from neutron.objects import base as base_object
-from neutron.objects.qos import policy as policy_object
-from neutron.objects.qos import rule as rule_object
-from neutron.plugins.common import constants
-from neutron.tests.unit.services.qos import base
-
-
-DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
-
-
-class TestQosPlugin(base.BaseQosTestCase):
-
-    def setUp(self):
-        super(TestQosPlugin, self).setUp()
-        self.setup_coreplugin()
-
-        mock.patch('neutron.db.api.create_object').start()
-        mock.patch('neutron.db.api.update_object').start()
-        mock.patch('neutron.db.api.delete_object').start()
-        mock.patch('neutron.db.api.get_object').start()
-        mock.patch(
-            'neutron.objects.qos.policy.QosPolicy.obj_load_attr').start()
-
-        cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
-        cfg.CONF.set_override("service_plugins", ["qos"])
-
-        mgr = manager.NeutronManager.get_instance()
-        self.qos_plugin = mgr.get_service_plugins().get(
-            constants.QOS)
-
-        self.qos_plugin.notification_driver_manager = mock.Mock()
-
-        self.ctxt = context.Context('fake_user', 'fake_tenant')
-        self.policy_data = {
-            'policy': {'id': 7777777,
-                       'tenant_id': 888888,
-                       'name': 'test-policy',
-                       'description': 'Test policy description',
-                       'shared': True}}
-
-        self.rule_data = {
-            'bandwidth_limit_rule': {'id': 7777777,
-                                     'max_kbps': 100,
-                                     'max_burst_kbps': 150}}
-
-        self.policy = policy_object.QosPolicy(
-            self.ctxt, **self.policy_data['policy'])
-
-        self.rule = rule_object.QosBandwidthLimitRule(
-            self.ctxt, **self.rule_data['bandwidth_limit_rule'])
-
-    def _validate_notif_driver_params(self, method_name):
-        method = getattr(self.qos_plugin.notification_driver_manager,
-                         method_name)
-        self.assertTrue(method.called)
-        self.assertIsInstance(
-            method.call_args[0][1], policy_object.QosPolicy)
-
-    def test_add_policy(self):
-        self.qos_plugin.create_policy(self.ctxt, self.policy_data)
-        self._validate_notif_driver_params('create_policy')
-
-    def test_update_policy(self):
-        fields = base_object.get_updatable_fields(
-            policy_object.QosPolicy, self.policy_data['policy'])
-        self.qos_plugin.update_policy(
-            self.ctxt, self.policy.id, {'policy': fields})
-        self._validate_notif_driver_params('update_policy')
-
-    @mock.patch('neutron.db.api.get_object', return_value=None)
-    def test_delete_policy(self, *mocks):
-        self.qos_plugin.delete_policy(self.ctxt, self.policy.id)
-        self._validate_notif_driver_params('delete_policy')
-
-    def test_create_policy_rule(self):
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=self.policy):
-            self.qos_plugin.create_policy_bandwidth_limit_rule(
-                self.ctxt, self.policy.id, self.rule_data)
-            self._validate_notif_driver_params('update_policy')
-
-    def test_update_policy_rule(self):
-        _policy = policy_object.QosPolicy(
-            self.ctxt, **self.policy_data['policy'])
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=_policy):
-            setattr(_policy, "rules", [self.rule])
-            self.qos_plugin.update_policy_bandwidth_limit_rule(
-                self.ctxt, self.rule.id, self.policy.id, self.rule_data)
-            self._validate_notif_driver_params('update_policy')
-
-    def test_update_policy_rule_bad_policy(self):
-        _policy = policy_object.QosPolicy(
-            self.ctxt, **self.policy_data['policy'])
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=_policy):
-            setattr(_policy, "rules", [])
-            self.assertRaises(
-                n_exc.QosRuleNotFound,
-                self.qos_plugin.update_policy_bandwidth_limit_rule,
-                self.ctxt, self.rule.id, self.policy.id,
-                self.rule_data)
-
-    def test_delete_policy_rule(self):
-        _policy = policy_object.QosPolicy(
-            self.ctxt, **self.policy_data['policy'])
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=_policy):
-            setattr(_policy, "rules", [self.rule])
-            self.qos_plugin.delete_policy_bandwidth_limit_rule(
-                        self.ctxt, self.rule.id, _policy.id)
-            self._validate_notif_driver_params('update_policy')
-
-    def test_delete_policy_rule_bad_policy(self):
-        _policy = policy_object.QosPolicy(
-            self.ctxt, **self.policy_data['policy'])
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=_policy):
-            setattr(_policy, "rules", [])
-            self.assertRaises(
-                n_exc.QosRuleNotFound,
-                self.qos_plugin.delete_policy_bandwidth_limit_rule,
-                self.ctxt, self.rule.id, _policy.id)
-
-    def test_get_policy_bandwidth_limit_rules_for_policy(self):
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=self.policy):
-            with mock.patch('neutron.objects.qos.rule.'
-                            'QosBandwidthLimitRule.'
-                            'get_objects') as get_object_mock:
-                self.qos_plugin.get_policy_bandwidth_limit_rules(
-                    self.ctxt, self.policy.id)
-                get_object_mock.assert_called_once_with(
-                    self.ctxt, qos_policy_id=self.policy.id)
-
-    def test_get_policy_bandwidth_limit_rules_for_policy_with_filters(self):
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=self.policy):
-            with mock.patch('neutron.objects.qos.rule.'
-                            'QosBandwidthLimitRule.'
-                            'get_objects') as get_object_mock:
-
-                filters = {'filter': 'filter_id'}
-                self.qos_plugin.get_policy_bandwidth_limit_rules(
-                    self.ctxt, self.policy.id, filters=filters)
-                get_object_mock.assert_called_once_with(
-                    self.ctxt, qos_policy_id=self.policy.id,
-                    filter='filter_id')
-
-    def test_get_policy_for_nonexistent_policy(self):
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=None):
-            self.assertRaises(
-                n_exc.QosPolicyNotFound,
-                self.qos_plugin.get_policy,
-                self.ctxt, self.policy.id)
-
-    def test_get_policy_bandwidth_limit_rule_for_nonexistent_policy(self):
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=None):
-            self.assertRaises(
-                n_exc.QosPolicyNotFound,
-                self.qos_plugin.get_policy_bandwidth_limit_rule,
-                self.ctxt, self.rule.id, self.policy.id)
-
-    def test_get_policy_bandwidth_limit_rules_for_nonexistent_policy(self):
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=None):
-            self.assertRaises(
-                n_exc.QosPolicyNotFound,
-                self.qos_plugin.get_policy_bandwidth_limit_rules,
-                self.ctxt, self.policy.id)
-
-    def test_create_policy_rule_for_nonexistent_policy(self):
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=None):
-            self.assertRaises(
-                n_exc.QosPolicyNotFound,
-                self.qos_plugin.create_policy_bandwidth_limit_rule,
-                self.ctxt, self.policy.id, self.rule_data)
-
-    def test_update_policy_rule_for_nonexistent_policy(self):
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=None):
-            self.assertRaises(
-                n_exc.QosPolicyNotFound,
-                self.qos_plugin.update_policy_bandwidth_limit_rule,
-                self.ctxt, self.rule.id, self.policy.id, self.rule_data)
-
-    def test_delete_policy_rule_for_nonexistent_policy(self):
-        with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
-                        return_value=None):
-            self.assertRaises(
-                n_exc.QosPolicyNotFound,
-                self.qos_plugin.delete_policy_bandwidth_limit_rule,
-                self.ctxt, self.rule.id, self.policy.id)
diff --git a/neutron/tests/unit/services/test_provider_configuration.py b/neutron/tests/unit/services/test_provider_configuration.py
deleted file mode 100644 (file)
index 7a06694..0000000
+++ /dev/null
@@ -1,214 +0,0 @@
-# Copyright 2013 VMware, Inc. All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from oslo_config import cfg
-
-from neutron.common import exceptions as n_exc
-from neutron import manager
-from neutron.plugins.common import constants
-from neutron.services import provider_configuration as provconf
-from neutron.tests import base
-
-
-class ParseServiceProviderConfigurationTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(ParseServiceProviderConfigurationTestCase, self).setUp()
-        self.service_providers = mock.patch.object(
-            provconf.NeutronModule, 'service_providers').start()
-
-    def _set_override(self, service_providers):
-        self.service_providers.return_value = service_providers
-
-    def test_default_service_provider_configuration(self):
-        providers = cfg.CONF.service_providers.service_provider
-        self.assertEqual([], providers)
-
-    def test_parse_single_service_provider_opt(self):
-        self._set_override([constants.LOADBALANCER +
-                           ':lbaas:driver_path'])
-        expected = {'service_type': constants.LOADBALANCER,
-                    'name': 'lbaas',
-                    'driver': 'driver_path',
-                    'default': False}
-        res = provconf.parse_service_provider_opt()
-        self.assertEqual(1, len(res))
-        self.assertEqual([expected], res)
-
-    def test_parse_single_default_service_provider_opt(self):
-        self._set_override([constants.LOADBALANCER +
-                           ':lbaas:driver_path:default'])
-        expected = {'service_type': constants.LOADBALANCER,
-                    'name': 'lbaas',
-                    'driver': 'driver_path',
-                    'default': True}
-        res = provconf.parse_service_provider_opt()
-        self.assertEqual(1, len(res))
-        self.assertEqual([expected], res)
-
-    def test_parse_multi_service_provider_opt(self):
-        self._set_override([constants.LOADBALANCER +
-                            ':lbaas:driver_path',
-                            constants.LOADBALANCER + ':name1:path1',
-                            constants.LOADBALANCER +
-                            ':name2:path2:default'])
-        res = provconf.parse_service_provider_opt()
-        # This parsing crosses repos if additional projects are installed,
-        # so check that at least what we expect is there; there may be more.
-        self.assertTrue(len(res) >= 3)
-
-    def test_parse_service_provider_invalid_format(self):
-        self._set_override([constants.LOADBALANCER +
-                           ':lbaas:driver_path',
-                           'svc_type:name1:path1:def'])
-        self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
-        self._set_override([constants.LOADBALANCER +
-                           ':',
-                           'svc_type:name1:path1:def'])
-        self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
-
-    def test_parse_service_provider_name_too_long(self):
-        name = 'a' * 256
-        self._set_override([constants.LOADBALANCER +
-                           ':' + name + ':driver_path',
-                           'svc_type:name1:path1:def'])
-        self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
-
-
-class ProviderConfigurationTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(ProviderConfigurationTestCase, self).setUp()
-        self.service_providers = mock.patch.object(
-            provconf.NeutronModule, 'service_providers').start()
-
-    def _set_override(self, service_providers):
-        self.service_providers.return_value = service_providers
-
-    def test_ensure_driver_unique(self):
-        pconf = provconf.ProviderConfiguration()
-        pconf.providers[('svctype', 'name')] = {'driver': 'driver',
-                                                'default': True}
-        self.assertRaises(n_exc.Invalid,
-                          pconf._ensure_driver_unique, 'driver')
-        self.assertIsNone(pconf._ensure_driver_unique('another_driver1'))
-
-    def test_ensure_default_unique(self):
-        pconf = provconf.ProviderConfiguration()
-        pconf.providers[('svctype', 'name')] = {'driver': 'driver',
-                                                'default': True}
-        self.assertRaises(n_exc.Invalid,
-                          pconf._ensure_default_unique,
-                          'svctype', True)
-        self.assertIsNone(pconf._ensure_default_unique('svctype', False))
-        self.assertIsNone(pconf._ensure_default_unique('svctype1', True))
-        self.assertIsNone(pconf._ensure_default_unique('svctype1', False))
-
-    def test_add_provider(self):
-        pconf = provconf.ProviderConfiguration()
-        prov = {'service_type': constants.LOADBALANCER,
-                'name': 'name',
-                'driver': 'path',
-                'default': False}
-        pconf.add_provider(prov)
-        self.assertEqual(1, len(pconf.providers))
-        self.assertEqual([(constants.LOADBALANCER, 'name')],
-                         list(pconf.providers.keys()))
-        self.assertEqual([{'driver': 'path', 'default': False}],
-                         list(pconf.providers.values()))
-
-    def test_add_duplicate_provider(self):
-        pconf = provconf.ProviderConfiguration()
-        prov = {'service_type': constants.LOADBALANCER,
-                'name': 'name',
-                'driver': 'path',
-                'default': False}
-        pconf.add_provider(prov)
-        self.assertRaises(n_exc.Invalid, pconf.add_provider, prov)
-        self.assertEqual(1, len(pconf.providers))
-
-    def test_get_service_providers(self):
-        self._set_override([constants.LOADBALANCER + ':name:path',
-                            constants.LOADBALANCER + ':name2:path2',
-                            'st2:name:driver:default',
-                            'st3:name2:driver2:default'])
-        provs = [{'service_type': constants.LOADBALANCER,
-                  'name': 'name',
-                  'driver': 'path',
-                  'default': False},
-                 {'service_type': constants.LOADBALANCER,
-                  'name': 'name2',
-                  'driver': 'path2',
-                  'default': False},
-                 {'service_type': 'st2',
-                  'name': 'name',
-                  'driver': 'driver',
-                  'default': True
-                  },
-                 {'service_type': 'st3',
-                  'name': 'name2',
-                  'driver': 'driver2',
-                  'default': True}]
-        pconf = provconf.ProviderConfiguration()
-        for prov in provs:
-            p = pconf.get_service_providers(
-                filters={'name': [prov['name']],
-                         'service_type': prov['service_type']}
-            )
-            self.assertEqual([prov], p)
-
-    def test_get_service_providers_with_fields(self):
-        self._set_override([constants.LOADBALANCER + ":name:path",
-                            constants.LOADBALANCER + ":name2:path2"])
-        provs = [{'service_type': constants.LOADBALANCER,
-                  'name': 'name',
-                  'driver': 'path',
-                  'default': False},
-                 {'service_type': constants.LOADBALANCER,
-                  'name': 'name2',
-                  'driver': 'path2',
-                  'default': False}]
-        pconf = provconf.ProviderConfiguration()
-        for prov in provs:
-            p = pconf.get_service_providers(
-                filters={'name': [prov['name']],
-                         'service_type': prov['service_type']},
-                fields=['name']
-            )
-            self.assertEqual([{'name': prov['name']}], p)
-
-
-class GetProviderDriverClassTestCase(base.BaseTestCase):
-    def test_get_provider_driver_class_hit(self):
-        driver = 'ml2'
-        expected = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-        actual = provconf.get_provider_driver_class(
-            driver,
-            namespace=manager.CORE_PLUGINS_NAMESPACE)
-        self.assertEqual(expected, actual)
-
-    def test_get_provider_driver_class_miss(self):
-        retval = provconf.get_provider_driver_class('foo')
-        self.assertEqual('foo', retval)
-
-
-class NeutronModuleTestCase(base.BaseTestCase):
-
-    def test_can_parse_multi_opt_service_provider_from_conf_file(self):
-        mod = provconf.NeutronModule('neutron_test')
-        mod.ini(base.ETCDIR)
-        self.assertEqual(['foo', 'bar'], mod.service_providers(),
-                         'Expected two providers, only one read')
diff --git a/neutron/tests/unit/test_auth.py b/neutron/tests/unit/test_auth.py
deleted file mode 100644 (file)
index b732a4c..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_middleware import request_id
-import webob
-
-from neutron import auth
-from neutron.tests import base
-
-
-class NeutronKeystoneContextTestCase(base.BaseTestCase):
-    def setUp(self):
-        super(NeutronKeystoneContextTestCase, self).setUp()
-
-        @webob.dec.wsgify
-        def fake_app(req):
-            self.context = req.environ['neutron.context']
-            return webob.Response()
-
-        self.context = None
-        self.middleware = auth.NeutronKeystoneContext(fake_app)
-        self.request = webob.Request.blank('/')
-        self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
-
-    def test_no_user_id(self):
-        self.request.headers['X_PROJECT_ID'] = 'testtenantid'
-        response = self.request.get_response(self.middleware)
-        self.assertEqual('401 Unauthorized', response.status)
-
-    def test_with_user_id(self):
-        self.request.headers['X_PROJECT_ID'] = 'testtenantid'
-        self.request.headers['X_USER_ID'] = 'testuserid'
-        response = self.request.get_response(self.middleware)
-        self.assertEqual('200 OK', response.status)
-        self.assertEqual('testuserid', self.context.user_id)
-        self.assertEqual('testuserid', self.context.user)
-
-    def test_with_tenant_id(self):
-        self.request.headers['X_PROJECT_ID'] = 'testtenantid'
-        self.request.headers['X_USER_ID'] = 'test_user_id'
-        response = self.request.get_response(self.middleware)
-        self.assertEqual('200 OK', response.status)
-        self.assertEqual('testtenantid', self.context.tenant_id)
-        self.assertEqual('testtenantid', self.context.tenant)
-
-    def test_roles_no_admin(self):
-        self.request.headers['X_PROJECT_ID'] = 'testtenantid'
-        self.request.headers['X_USER_ID'] = 'testuserid'
-        self.request.headers['X_ROLES'] = 'role1, role2 , role3,role4,role5'
-        response = self.request.get_response(self.middleware)
-        self.assertEqual('200 OK', response.status)
-        self.assertEqual(['role1', 'role2', 'role3', 'role4', 'role5'],
-                         self.context.roles)
-        self.assertFalse(self.context.is_admin)
-
-    def test_roles_with_admin(self):
-        self.request.headers['X_PROJECT_ID'] = 'testtenantid'
-        self.request.headers['X_USER_ID'] = 'testuserid'
-        self.request.headers['X_ROLES'] = ('role1, role2 , role3,role4,role5,'
-                                           'AdMiN')
-        response = self.request.get_response(self.middleware)
-        self.assertEqual('200 OK', response.status)
-        self.assertEqual(['role1', 'role2', 'role3', 'role4', 'role5',
-                          'AdMiN'], self.context.roles)
-        self.assertTrue(self.context.is_admin)
-
-    def test_with_user_tenant_name(self):
-        self.request.headers['X_PROJECT_ID'] = 'testtenantid'
-        self.request.headers['X_USER_ID'] = 'testuserid'
-        self.request.headers['X_PROJECT_NAME'] = 'testtenantname'
-        self.request.headers['X_USER_NAME'] = 'testusername'
-        response = self.request.get_response(self.middleware)
-        self.assertEqual('200 OK', response.status)
-        self.assertEqual('testuserid', self.context.user_id)
-        self.assertEqual('testusername', self.context.user_name)
-        self.assertEqual('testtenantid', self.context.tenant_id)
-        self.assertEqual('testtenantname', self.context.tenant_name)
-
-    def test_request_id_extracted_from_env(self):
-        req_id = 'dummy-request-id'
-        self.request.headers['X_PROJECT_ID'] = 'testtenantid'
-        self.request.headers['X_USER_ID'] = 'testuserid'
-        self.request.environ[request_id.ENV_REQUEST_ID] = req_id
-        self.request.get_response(self.middleware)
-        self.assertEqual(req_id, self.context.request_id)
-
-    def test_with_auth_token(self):
-        self.request.headers['X_PROJECT_ID'] = 'testtenantid'
-        self.request.headers['X_USER_ID'] = 'testuserid'
-        response = self.request.get_response(self.middleware)
-        self.assertEqual('200 OK', response.status)
-        self.assertEqual('testauthtoken', self.context.auth_token)
-
-    def test_without_auth_token(self):
-        self.request.headers['X_PROJECT_ID'] = 'testtenantid'
-        self.request.headers['X_USER_ID'] = 'testuserid'
-        del self.request.headers['X_AUTH_TOKEN']
-        self.request.get_response(self.middleware)
-        self.assertIsNone(self.context.auth_token)
diff --git a/neutron/tests/unit/test_context.py b/neutron/tests/unit/test_context.py
deleted file mode 100644 (file)
index 362f13e..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright 2012 VMware, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_context import context as oslo_context
-from testtools import matchers
-
-from neutron import context
-from neutron.tests import base
-
-
-class TestNeutronContext(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestNeutronContext, self).setUp()
-        db_api = 'neutron.db.api.get_session'
-        self._db_api_session_patcher = mock.patch(db_api)
-        self.db_api_session = self._db_api_session_patcher.start()
-
-    def test_neutron_context_create(self):
-        ctx = context.Context('user_id', 'tenant_id')
-        self.assertEqual('user_id', ctx.user_id)
-        self.assertEqual('tenant_id', ctx.project_id)
-        self.assertEqual('tenant_id', ctx.tenant_id)
-        request_id = ctx.request_id
-        if isinstance(request_id, bytes):
-            request_id = request_id.decode('utf-8')
-        self.assertThat(request_id, matchers.StartsWith('req-'))
-        self.assertEqual('user_id', ctx.user)
-        self.assertEqual('tenant_id', ctx.tenant)
-        self.assertIsNone(ctx.user_name)
-        self.assertIsNone(ctx.tenant_name)
-        self.assertIsNone(ctx.auth_token)
-
-    def test_neutron_context_create_with_name(self):
-        ctx = context.Context('user_id', 'tenant_id',
-                              tenant_name='tenant_name', user_name='user_name')
-        # Check name is set
-        self.assertEqual('user_name', ctx.user_name)
-        self.assertEqual('tenant_name', ctx.tenant_name)
-        # Check user/tenant contains its ID even if user/tenant_name is passed
-        self.assertEqual('user_id', ctx.user)
-        self.assertEqual('tenant_id', ctx.tenant)
-
-    def test_neutron_context_create_with_request_id(self):
-        ctx = context.Context('user_id', 'tenant_id', request_id='req_id_xxx')
-        self.assertEqual('req_id_xxx', ctx.request_id)
-
-    def test_neutron_context_create_with_auth_token(self):
-        ctx = context.Context('user_id', 'tenant_id',
-                              auth_token='auth_token_xxx')
-        self.assertEqual('auth_token_xxx', ctx.auth_token)
-
-    def test_neutron_context_to_dict(self):
-        ctx = context.Context('user_id', 'tenant_id')
-        ctx_dict = ctx.to_dict()
-        self.assertEqual('user_id', ctx_dict['user_id'])
-        self.assertEqual('tenant_id', ctx_dict['project_id'])
-        self.assertEqual(ctx.request_id, ctx_dict['request_id'])
-        self.assertEqual('user_id', ctx_dict['user'])
-        self.assertEqual('tenant_id', ctx_dict['tenant'])
-        self.assertIsNone(ctx_dict['user_name'])
-        self.assertIsNone(ctx_dict['tenant_name'])
-        self.assertIsNone(ctx_dict['project_name'])
-        self.assertIsNone(ctx_dict['auth_token'])
-
-    def test_neutron_context_to_dict_with_name(self):
-        ctx = context.Context('user_id', 'tenant_id',
-                              tenant_name='tenant_name', user_name='user_name')
-        ctx_dict = ctx.to_dict()
-        self.assertEqual('user_name', ctx_dict['user_name'])
-        self.assertEqual('tenant_name', ctx_dict['tenant_name'])
-        self.assertEqual('tenant_name', ctx_dict['project_name'])
-
-    def test_neutron_context_to_dict_with_auth_token(self):
-        ctx = context.Context('user_id', 'tenant_id',
-                              auth_token='auth_token_xxx')
-        ctx_dict = ctx.to_dict()
-        self.assertEqual('auth_token_xxx', ctx_dict['auth_token'])
-
-    def test_neutron_context_admin_to_dict(self):
-        self.db_api_session.return_value = 'fakesession'
-        ctx = context.get_admin_context()
-        ctx_dict = ctx.to_dict()
-        self.assertIsNone(ctx_dict['user_id'])
-        self.assertIsNone(ctx_dict['tenant_id'])
-        self.assertIsNone(ctx_dict['auth_token'])
-        self.assertIsNotNone(ctx.session)
-        self.assertNotIn('session', ctx_dict)
-
-    def test_neutron_context_admin_without_session_to_dict(self):
-        ctx = context.get_admin_context_without_session()
-        ctx_dict = ctx.to_dict()
-        self.assertIsNone(ctx_dict['user_id'])
-        self.assertIsNone(ctx_dict['tenant_id'])
-        self.assertIsNone(ctx_dict['auth_token'])
-        self.assertFalse(hasattr(ctx, 'session'))
-
-    def test_neutron_context_elevated_retains_request_id(self):
-        ctx = context.Context('user_id', 'tenant_id')
-        self.assertFalse(ctx.is_admin)
-        req_id_before = ctx.request_id
-
-        elevated_ctx = ctx.elevated()
-        self.assertTrue(elevated_ctx.is_admin)
-        self.assertEqual(req_id_before, elevated_ctx.request_id)
-
-    def test_neutron_context_overwrite(self):
-        ctx1 = context.Context('user_id', 'tenant_id')
-        self.assertEqual(ctx1.request_id,
-                         oslo_context.get_current().request_id)
-
-        # If overwrite is not specified, request_id should be updated.
-        ctx2 = context.Context('user_id', 'tenant_id')
-        self.assertNotEqual(ctx2.request_id, ctx1.request_id)
-        self.assertEqual(ctx2.request_id,
-                         oslo_context.get_current().request_id)
-
-        # If overwrite is specified, request_id should be kept.
-        ctx3 = context.Context('user_id', 'tenant_id', overwrite=False)
-        self.assertNotEqual(ctx3.request_id, ctx2.request_id)
-        self.assertEqual(ctx2.request_id,
-                         oslo_context.get_current().request_id)
-
-    def test_neutron_context_get_admin_context_not_update_local_store(self):
-        ctx = context.Context('user_id', 'tenant_id')
-        req_id_before = oslo_context.get_current().request_id
-        self.assertEqual(ctx.request_id, req_id_before)
-
-        ctx_admin = context.get_admin_context()
-        self.assertEqual(req_id_before, oslo_context.get_current().request_id)
-        self.assertNotEqual(req_id_before, ctx_admin.request_id)
diff --git a/neutron/tests/unit/test_manager.py b/neutron/tests/unit/test_manager.py
deleted file mode 100644 (file)
index c90d040..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import weakref
-
-import fixtures
-from oslo_config import cfg
-
-from neutron import manager
-from neutron.plugins.common import constants
-from neutron.tests import base
-from neutron.tests.unit import dummy_plugin
-from neutron.tests.unit import testlib_api
-
-
-DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
-
-
-class MultiServiceCorePlugin(object):
-    supported_extension_aliases = ['lbaas', 'dummy']
-
-
-class CorePluginWithAgentNotifiers(object):
-    agent_notifiers = {'l3': 'l3_agent_notifier',
-                       'dhcp': 'dhcp_agent_notifier'}
-
-
-class NeutronManagerTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(NeutronManagerTestCase, self).setUp()
-        self.config_parse()
-        self.setup_coreplugin()
-        self.useFixture(
-            fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance'))
-
-    def test_service_plugin_is_loaded(self):
-        cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
-        cfg.CONF.set_override("service_plugins",
-                              ["neutron.tests.unit.dummy_plugin."
-                               "DummyServicePlugin"])
-        mgr = manager.NeutronManager.get_instance()
-        plugin = mgr.get_service_plugins()[constants.DUMMY]
-
-        self.assertIsInstance(
-            plugin, dummy_plugin.DummyServicePlugin,
-            "loaded plugin should be of type neutronDummyPlugin")
-
-    def test_service_plugin_by_name_is_loaded(self):
-        cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
-        cfg.CONF.set_override("service_plugins", ["dummy"])
-        mgr = manager.NeutronManager.get_instance()
-        plugin = mgr.get_service_plugins()[constants.DUMMY]
-
-        self.assertIsInstance(
-            plugin, dummy_plugin.DummyServicePlugin,
-            "loaded plugin should be of type neutronDummyPlugin")
-
-    def test_multiple_plugins_specified_for_service_type(self):
-        cfg.CONF.set_override("service_plugins",
-                              ["neutron.tests.unit.dummy_plugin."
-                               "DummyServicePlugin",
-                               "neutron.tests.unit.dummy_plugin."
-                               "DummyServicePlugin"])
-        cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
-        e = self.assertRaises(ValueError, manager.NeutronManager.get_instance)
-        self.assertIn(constants.DUMMY, str(e))
-
-    def test_multiple_plugins_by_name_specified_for_service_type(self):
-        cfg.CONF.set_override("service_plugins", ["dummy", "dummy"])
-        cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
-        self.assertRaises(ValueError, manager.NeutronManager.get_instance)
-
-    def test_multiple_plugins_mixed_specified_for_service_type(self):
-        cfg.CONF.set_override("service_plugins",
-                              ["neutron.tests.unit.dummy_plugin."
-                               "DummyServicePlugin", "dummy"])
-        cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
-        self.assertRaises(ValueError, manager.NeutronManager.get_instance)
-
-    def test_service_plugin_conflicts_with_core_plugin(self):
-        cfg.CONF.set_override("service_plugins",
-                              ["neutron.tests.unit.dummy_plugin."
-                               "DummyServicePlugin"])
-        cfg.CONF.set_override("core_plugin",
-                              "neutron.tests.unit.test_manager."
-                              "MultiServiceCorePlugin")
-        e = self.assertRaises(ValueError, manager.NeutronManager.get_instance)
-        self.assertIn(constants.DUMMY, str(e))
-
-    def test_core_plugin_supports_services(self):
-        cfg.CONF.set_override("core_plugin",
-                              "neutron.tests.unit.test_manager."
-                              "MultiServiceCorePlugin")
-        mgr = manager.NeutronManager.get_instance()
-        svc_plugins = mgr.get_service_plugins()
-        self.assertEqual(3, len(svc_plugins))
-        self.assertIn(constants.CORE, svc_plugins.keys())
-        self.assertIn(constants.LOADBALANCER, svc_plugins.keys())
-        self.assertIn(constants.DUMMY, svc_plugins.keys())
-
-    def test_post_plugin_validation(self):
-        cfg.CONF.import_opt('dhcp_agents_per_network',
-                            'neutron.db.agentschedulers_db')
-
-        self.assertIsNone(manager.validate_post_plugin_load())
-        cfg.CONF.set_override('dhcp_agents_per_network', 2)
-        self.assertIsNone(manager.validate_post_plugin_load())
-        cfg.CONF.set_override('dhcp_agents_per_network', 0)
-        self.assertIsNotNone(manager.validate_post_plugin_load())
-        cfg.CONF.set_override('dhcp_agents_per_network', -1)
-        self.assertIsNotNone(manager.validate_post_plugin_load())
-
-    def test_pre_plugin_validation(self):
-        self.assertIsNotNone(manager.validate_pre_plugin_load())
-        cfg.CONF.set_override('core_plugin', 'dummy.plugin')
-        self.assertIsNone(manager.validate_pre_plugin_load())
-
-    def test_manager_gathers_agent_notifiers_from_service_plugins(self):
-        cfg.CONF.set_override("service_plugins",
-                              ["neutron.tests.unit.dummy_plugin."
-                               "DummyServicePlugin"])
-        cfg.CONF.set_override("core_plugin",
-                              "neutron.tests.unit.test_manager."
-                              "CorePluginWithAgentNotifiers")
-        expected = {'l3': 'l3_agent_notifier',
-                    'dhcp': 'dhcp_agent_notifier',
-                    'dummy': 'dummy_agent_notifier'}
-        core_plugin = manager.NeutronManager.get_plugin()
-        self.assertEqual(expected, core_plugin.agent_notifiers)
-
-    def test_load_class_for_provider(self):
-        manager.NeutronManager.load_class_for_provider(
-            'neutron.core_plugins', 'ml2')
-
-    def test_load_class_for_provider_wrong_plugin(self):
-        with testlib_api.ExpectedException(ImportError):
-            manager.NeutronManager.load_class_for_provider(
-                    'neutron.core_plugins', 'ml2XXXXXX')
-
-    def test_get_service_plugin_by_path_prefix_3(self):
-        cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
-        nm = manager.NeutronManager.get_instance()
-
-        class pclass(object):
-            def __init__(self, path_prefix):
-                self.path_prefix = path_prefix
-
-        x_plugin, y_plugin = pclass('xpa'), pclass('ypa')
-        nm.service_plugins['x'], nm.service_plugins['y'] = x_plugin, y_plugin
-
-        self.assertEqual(weakref.proxy(x_plugin),
-                         nm.get_service_plugin_by_path_prefix('xpa'))
-        self.assertEqual(weakref.proxy(y_plugin),
-                         nm.get_service_plugin_by_path_prefix('ypa'))
-        self.assertIsNone(nm.get_service_plugin_by_path_prefix('abc'))
diff --git a/neutron/tests/unit/test_policy.py b/neutron/tests/unit/test_policy.py
deleted file mode 100644 (file)
index 23ad4d9..0000000
+++ /dev/null
@@ -1,604 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Test of Policy Engine For Neutron"""
-
-import mock
-from oslo_policy import fixture as op_fixture
-from oslo_policy import policy as oslo_policy
-from oslo_serialization import jsonutils
-from oslo_utils import importutils
-
-import neutron
-from neutron.api.v2 import attributes
-from neutron.common import constants as const
-from neutron.common import exceptions
-from neutron import context
-from neutron import manager
-from neutron import policy
-from neutron.tests import base
-
-
-class PolicyFileTestCase(base.BaseTestCase):
-    def setUp(self):
-        super(PolicyFileTestCase, self).setUp()
-        self.context = context.Context('fake', 'fake', is_admin=False)
-        self.target = {'tenant_id': 'fake'}
-
-    def test_modified_policy_reloads(self):
-        tmpfilename = self.get_temp_file_path('policy')
-        action = "example:test"
-        with open(tmpfilename, "w") as policyfile:
-            policyfile.write("""{"example:test": ""}""")
-        policy.refresh(policy_file=tmpfilename)
-        policy.enforce(self.context, action, self.target)
-        with open(tmpfilename, "w") as policyfile:
-            policyfile.write("""{"example:test": "!"}""")
-        policy.refresh(policy_file=tmpfilename)
-        self.target = {'tenant_id': 'fake_tenant'}
-        self.assertRaises(oslo_policy.PolicyNotAuthorized,
-                          policy.enforce,
-                          self.context,
-                          action,
-                          self.target)
-
-
-class PolicyTestCase(base.BaseTestCase):
-    def setUp(self):
-        super(PolicyTestCase, self).setUp()
-        # NOTE(vish): preload rules to circumvent reloading from file
-        rules = {
-            "true": '@',
-            "example:allowed": '@',
-            "example:denied": '!',
-            "example:get_http": "http:http://www.example.com",
-            "example:my_file": "role:compute_admin or tenant_id:%(tenant_id)s",
-            "example:early_and_fail": "! and @",
-            "example:early_or_success": "@ or !",
-            "example:lowercase_admin": "role:admin or role:sysadmin",
-            "example:uppercase_admin": "role:ADMIN or role:sysadmin",
-        }
-        policy.refresh()
-        # NOTE(vish): then overload underlying rules
-        policy.set_rules(oslo_policy.Rules.from_dict(rules))
-        self.context = context.Context('fake', 'fake', roles=['member'])
-        self.target = {}
-
-    def test_enforce_nonexistent_action_throws(self):
-        action = "example:noexist"
-        self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
-                          self.context, action, self.target)
-
-    def test_enforce_bad_action_throws(self):
-        action = "example:denied"
-        self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
-                          self.context, action, self.target)
-
-    def test_check_bad_action_noraise(self):
-        action = "example:denied"
-        result = policy.check(self.context, action, self.target)
-        self.assertFalse(result)
-
-    def test_check_non_existent_action(self):
-        action = "example:idonotexist"
-        result_1 = policy.check(self.context, action, self.target)
-        self.assertFalse(result_1)
-        result_2 = policy.check(self.context, action, self.target,
-                                might_not_exist=True)
-        self.assertTrue(result_2)
-
-    def test_enforce_good_action(self):
-        action = "example:allowed"
-        result = policy.enforce(self.context, action, self.target)
-        self.assertTrue(result)
-
-    def test_enforce_http_true(self):
-        self.useFixture(op_fixture.HttpCheckFixture())
-        action = "example:get_http"
-        target = {}
-        result = policy.enforce(self.context, action, target)
-        self.assertTrue(result)
-
-    def test_enforce_http_false(self):
-        self.useFixture(op_fixture.HttpCheckFixture(False))
-        action = "example:get_http"
-        target = {}
-        self.assertRaises(oslo_policy.PolicyNotAuthorized,
-                          policy.enforce, self.context,
-                          action, target)
-
-    def test_templatized_enforcement(self):
-        target_mine = {'tenant_id': 'fake'}
-        target_not_mine = {'tenant_id': 'another'}
-        action = "example:my_file"
-        policy.enforce(self.context, action, target_mine)
-        self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
-                          self.context, action, target_not_mine)
-
-    def test_early_AND_enforcement(self):
-        action = "example:early_and_fail"
-        self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
-                          self.context, action, self.target)
-
-    def test_early_OR_enforcement(self):
-        action = "example:early_or_success"
-        policy.enforce(self.context, action, self.target)
-
-    def test_ignore_case_role_check(self):
-        lowercase_action = "example:lowercase_admin"
-        uppercase_action = "example:uppercase_admin"
-        # NOTE(dprince) we mix case in the Admin role here to ensure
-        # case is ignored
-        admin_context = context.Context('admin', 'fake', roles=['AdMiN'])
-        policy.enforce(admin_context, lowercase_action, self.target)
-        policy.enforce(admin_context, uppercase_action, self.target)
-
-
-class DefaultPolicyTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(DefaultPolicyTestCase, self).setUp()
-        tmpfilename = self.get_temp_file_path('policy.json')
-        self.rules = {
-            "default": '',
-            "example:exist": '!',
-        }
-        with open(tmpfilename, "w") as policyfile:
-            jsonutils.dump(self.rules, policyfile)
-        policy.refresh(policy_file=tmpfilename)
-
-        self.context = context.Context('fake', 'fake')
-
-    def test_policy_called(self):
-        self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
-                          self.context, "example:exist", {})
-
-    def test_not_found_policy_calls_default(self):
-        policy.enforce(self.context, "example:noexist", {})
-
-
-FAKE_RESOURCE_NAME = 'fake_resource'
-FAKE_SPECIAL_RESOURCE_NAME = 'fake_policy'
-FAKE_RESOURCES = {"%ss" % FAKE_RESOURCE_NAME:
-                  {'attr': {'allow_post': True,
-                            'allow_put': True,
-                            'is_visible': True,
-                            'default': None,
-                            'enforce_policy': True,
-                            'validate': {'type:dict':
-                                         {'sub_attr_1': {'type:string': None},
-                                          'sub_attr_2': {'type:string': None}}}
-                            }},
-                  # special plural name
-                  "%s" % FAKE_SPECIAL_RESOURCE_NAME.replace('y', 'ies'):
-                  {'attr': {'allow_post': True,
-                            'allow_put': True,
-                            'is_visible': True,
-                            'default': None,
-                            'enforce_policy': True,
-                            'validate': {'type:dict':
-                                         {'sub_attr_1': {'type:string': None},
-                                          'sub_attr_2': {'type:string': None}}}
-                            }}}
-
-
-class NeutronPolicyTestCase(base.BaseTestCase):
-
-    def fakepolicyinit(self, **kwargs):
-        enf = policy._ENFORCER
-        enf.set_rules(oslo_policy.Rules(self.rules))
-
-    def setUp(self):
-        super(NeutronPolicyTestCase, self).setUp()
-        policy.refresh()
-        # Add Fake resources to RESOURCE_ATTRIBUTE_MAP
-        attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCES)
-        self._set_rules()
-
-        def remove_fake_resource():
-            del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME]
-
-        self.patcher = mock.patch.object(neutron.policy,
-                                         'init',
-                                         new=self.fakepolicyinit)
-        self.patcher.start()
-        self.addCleanup(remove_fake_resource)
-        self.context = context.Context('fake', 'fake', roles=['user'])
-        plugin_klass = importutils.import_class(
-            "neutron.db.db_base_plugin_v2.NeutronDbPluginV2")
-        self.manager_patcher = mock.patch('neutron.manager.NeutronManager')
-        fake_manager = self.manager_patcher.start()
-        fake_manager_instance = fake_manager.return_value
-        fake_manager_instance.plugin = plugin_klass()
-
-    def _set_rules(self, **kwargs):
-        rules_dict = {
-            "context_is_admin": "role:admin",
-            "context_is_advsvc": "role:advsvc",
-            "admin_or_network_owner": "rule:context_is_admin or "
-                                      "tenant_id:%(network:tenant_id)s",
-            "admin_or_owner": ("rule:context_is_admin or "
-                               "tenant_id:%(tenant_id)s"),
-            "admin_only": "rule:context_is_admin",
-            "regular_user": "role:user",
-            "shared": "field:networks:shared=True",
-            "external": "field:networks:router:external=True",
-            "network_device": "field:port:device_owner=~^network:",
-            "default": '@',
-
-            "create_network": "rule:admin_or_owner",
-            "create_network:shared": "rule:admin_only",
-            "update_network": '@',
-            "update_network:shared": "rule:admin_only",
-            "get_network": "rule:admin_or_owner or rule:shared or "
-                           "rule:external or rule:context_is_advsvc",
-            "create_subnet": "rule:admin_or_network_owner",
-            "create_port:mac": "rule:admin_or_network_owner or "
-                               "rule:context_is_advsvc",
-            "create_port:device_owner": "not rule:network_device",
-            "update_port": "rule:admin_or_owner or rule:context_is_advsvc",
-            "get_port": "rule:admin_or_owner or rule:context_is_advsvc",
-            "delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
-            "create_fake_resource": "rule:admin_or_owner",
-            "create_fake_resource:attr": "rule:admin_or_owner",
-            "create_fake_resource:attr:sub_attr_1": "rule:admin_or_owner",
-            "create_fake_resource:attr:sub_attr_2": "rule:admin_only",
-
-            "create_fake_policy:": "rule:admin_or_owner",
-            "get_firewall_policy": "rule:admin_or_owner or "
-                            "rule:shared",
-            "get_firewall_rule": "rule:admin_or_owner or "
-                            "rule:shared",
-
-            "insert_rule": "rule:admin_or_owner",
-            "remove_rule": "rule:admin_or_owner",
-        }
-        rules_dict.update(**kwargs)
-        self.rules = oslo_policy.Rules.from_dict(rules_dict)
-
-    def test_firewall_policy_insert_rule_with_admin_context(self):
-        action = "insert_rule"
-        target = {}
-        result = policy.check(context.get_admin_context(), action, target)
-        self.assertTrue(result)
-
-    def test_firewall_policy_insert_rule_with_owner(self):
-        action = "insert_rule"
-        target = {"tenant_id": "own_tenant"}
-        user_context = context.Context('', "own_tenant", roles=['user'])
-        result = policy.check(user_context, action, target)
-        self.assertTrue(result)
-
-    def test_firewall_policy_remove_rule_without_admin_or_owner(self):
-        action = "remove_rule"
-        target = {"firewall_rule_id": "rule_id", "tenant_id": "tenantA"}
-        user_context = context.Context('', "another_tenant", roles=['user'])
-        result = policy.check(user_context, action, target)
-        self.assertFalse(result)
-
-    def _test_action_on_attr(self, context, action, obj, attr, value,
-                             exception=None, **kwargs):
-        action = "%s_%s" % (action, obj)
-        target = {'tenant_id': 'the_owner', attr: value}
-        if kwargs:
-            target.update(kwargs)
-        if exception:
-            self.assertRaises(exception, policy.enforce,
-                              context, action, target)
-        else:
-            result = policy.enforce(context, action, target)
-            self.assertTrue(result)
-
-    def _test_nonadmin_action_on_attr(self, action, attr, value,
-                                      exception=None, **kwargs):
-        user_context = context.Context('', "user", roles=['user'])
-        self._test_action_on_attr(user_context, action, "network", attr,
-                                  value, exception, **kwargs)
-
-    def _test_advsvc_action_on_attr(self, action, obj, attr, value,
-                                    exception=None, **kwargs):
-        user_context = context.Context('', "user",
-                                       roles=['user', 'advsvc'])
-        self._test_action_on_attr(user_context, action, obj, attr,
-                                  value, exception, **kwargs)
-
-    def test_nonadmin_write_on_private_fails(self):
-        self._test_nonadmin_action_on_attr('create', 'shared', False,
-                                           oslo_policy.PolicyNotAuthorized)
-
-    def test_nonadmin_read_on_private_fails(self):
-        self._test_nonadmin_action_on_attr('get', 'shared', False,
-                                           oslo_policy.PolicyNotAuthorized)
-
-    def test_nonadmin_write_on_shared_fails(self):
-        self._test_nonadmin_action_on_attr('create', 'shared', True,
-                                           oslo_policy.PolicyNotAuthorized)
-
-    def test_create_port_device_owner_regex(self):
-        blocked_values = (const.DEVICE_OWNER_NETWORK_PREFIX,
-                          'network:abdef',
-                          const.DEVICE_OWNER_DHCP,
-                          const.DEVICE_OWNER_ROUTER_INTF)
-        for val in blocked_values:
-            self._test_advsvc_action_on_attr(
-                'create', 'port', 'device_owner', val,
-                oslo_policy.PolicyNotAuthorized
-            )
-        ok_values = ('network', 'networks', 'my_network:test', 'my_network:')
-        for val in ok_values:
-            self._test_advsvc_action_on_attr(
-                'create', 'port', 'device_owner', val
-            )
-
-    def test_advsvc_get_network_works(self):
-        self._test_advsvc_action_on_attr('get', 'network', 'shared', False)
-
-    def test_advsvc_create_network_fails(self):
-        self._test_advsvc_action_on_attr('create', 'network', 'shared', False,
-                                         oslo_policy.PolicyNotAuthorized)
-
-    def test_advsvc_create_port_works(self):
-        self._test_advsvc_action_on_attr('create', 'port:mac', 'shared', False)
-
-    def test_advsvc_get_port_works(self):
-        self._test_advsvc_action_on_attr('get', 'port', 'shared', False)
-
-    def test_advsvc_update_port_works(self):
-        kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']}
-        self._test_advsvc_action_on_attr('update', 'port', 'shared', True,
-                                         **kwargs)
-
-    def test_advsvc_delete_port_works(self):
-        self._test_advsvc_action_on_attr('delete', 'port', 'shared', False)
-
-    def test_advsvc_create_subnet_fails(self):
-        self._test_advsvc_action_on_attr('create', 'subnet', 'shared', False,
-                                         oslo_policy.PolicyNotAuthorized)
-
-    def test_nonadmin_read_on_shared_succeeds(self):
-        self._test_nonadmin_action_on_attr('get', 'shared', True)
-
-    def test_check_is_admin_with_admin_context_succeeds(self):
-        admin_context = context.get_admin_context()
-        # explicitly set roles as this test verifies user credentials
-        # with the policy engine
-        admin_context.roles = ['admin']
-        self.assertTrue(policy.check_is_admin(admin_context))
-
-    def test_check_is_admin_with_user_context_fails(self):
-        self.assertFalse(policy.check_is_admin(self.context))
-
-    def test_check_is_admin_with_no_admin_policy_fails(self):
-        del self.rules[policy.ADMIN_CTX_POLICY]
-        admin_context = context.get_admin_context()
-        self.assertFalse(policy.check_is_admin(admin_context))
-
-    def test_check_is_advsvc_with_admin_context_fails(self):
-        admin_context = context.get_admin_context()
-        self.assertFalse(policy.check_is_advsvc(admin_context))
-
-    def test_check_is_advsvc_with_svc_context_succeeds(self):
-        svc_context = context.Context('', 'svc', roles=['advsvc'])
-        self.assertTrue(policy.check_is_advsvc(svc_context))
-
-    def test_check_is_advsvc_with_no_advsvc_policy_fails(self):
-        del self.rules[policy.ADVSVC_CTX_POLICY]
-        svc_context = context.Context('', 'svc', roles=['advsvc'])
-        self.assertFalse(policy.check_is_advsvc(svc_context))
-
-    def test_check_is_advsvc_with_user_context_fails(self):
-        self.assertFalse(policy.check_is_advsvc(self.context))
-
-    def _test_enforce_adminonly_attribute(self, action, **kwargs):
-        admin_context = context.get_admin_context()
-        target = {'shared': True}
-        if kwargs:
-            target.update(kwargs)
-        result = policy.enforce(admin_context, action, target)
-        self.assertTrue(result)
-
-    def test_enforce_adminonly_attribute_create(self):
-        self._test_enforce_adminonly_attribute('create_network')
-
-    def test_enforce_adminonly_attribute_update(self):
-        kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']}
-        self._test_enforce_adminonly_attribute('update_network', **kwargs)
-
-    def test_reset_adminonly_attr_to_default_fails(self):
-        kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']}
-        self._test_nonadmin_action_on_attr('update', 'shared', False,
-                                           oslo_policy.PolicyNotAuthorized,
-                                           **kwargs)
-
-    def test_enforce_adminonly_attribute_nonadminctx_returns_403(self):
-        action = "create_network"
-        target = {'shared': True, 'tenant_id': 'somebody_else'}
-        self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
-                          self.context, action, target)
-
-    def _test_build_subattribute_match_rule(self, validate_value):
-        bk = FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate']
-        FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = (
-            validate_value)
-        action = "create_" + FAKE_RESOURCE_NAME
-        target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}}
-        self.assertFalse(policy._build_subattr_match_rule(
-            'attr',
-            FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr'],
-            action,
-            target))
-        FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = bk
-
-    def test_build_subattribute_match_rule_empty_dict_validator(self):
-        self._test_build_subattribute_match_rule({})
-
-    def test_build_subattribute_match_rule_wrong_validation_info(self):
-        self._test_build_subattribute_match_rule(
-            {'type:dict': 'wrong_stuff'})
-
-    def test_build_match_rule_special_pluralized(self):
-        action = "create_" + FAKE_SPECIAL_RESOURCE_NAME
-        pluralized = "create_fake_policies"
-        target = {}
-        result = policy._build_match_rule(action, target, pluralized)
-        self.assertEqual("rule:" + action, str(result))
-
-    def test_build_match_rule_normal_pluralized_when_create(self):
-        action = "create_" + FAKE_RESOURCE_NAME
-        target = {}
-        result = policy._build_match_rule(action, target, None)
-        self.assertEqual("rule:" + action, str(result))
-
-    def test_enforce_subattribute(self):
-        action = "create_" + FAKE_RESOURCE_NAME
-        target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}}
-        result = policy.enforce(self.context, action, target, None)
-        self.assertTrue(result)
-
-    def test_enforce_admin_only_subattribute(self):
-        action = "create_" + FAKE_RESOURCE_NAME
-        target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x',
-                                                'sub_attr_2': 'y'}}
-        result = policy.enforce(context.get_admin_context(),
-                                action, target, None)
-        self.assertTrue(result)
-
-    def test_enforce_admin_only_subattribute_nonadminctx_returns_403(self):
-        action = "create_" + FAKE_RESOURCE_NAME
-        target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x',
-                                                'sub_attr_2': 'y'}}
-        self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
-                          self.context, action, target, None)
-
-    def test_enforce_regularuser_on_read(self):
-        action = "get_network"
-        target = {'shared': True, 'tenant_id': 'somebody_else'}
-        result = policy.enforce(self.context, action, target)
-        self.assertTrue(result)
-
-    def test_enforce_firewall_policy_shared(self):
-        action = "get_firewall_policy"
-        target = {'shared': True, 'tenant_id': 'somebody_else'}
-        result = policy.enforce(self.context, action, target)
-        self.assertTrue(result)
-
-    def test_enforce_firewall_rule_shared(self):
-        action = "get_firewall_rule"
-        target = {'shared': True, 'tenant_id': 'somebody_else'}
-        result = policy.enforce(self.context, action, target)
-        self.assertTrue(result)
-
-    def test_enforce_tenant_id_check(self):
-        # Trigger a policy with rule admin_or_owner
-        action = "create_network"
-        target = {'tenant_id': 'fake'}
-        result = policy.enforce(self.context, action, target)
-        self.assertTrue(result)
-
-    def test_enforce_tenant_id_check_parent_resource(self):
-
-        def fakegetnetwork(*args, **kwargs):
-            return {'tenant_id': 'fake'}
-
-        action = "create_port:mac"
-        with mock.patch.object(manager.NeutronManager.get_instance().plugin,
-                               'get_network', new=fakegetnetwork):
-            target = {'network_id': 'whatever'}
-            result = policy.enforce(self.context, action, target)
-            self.assertTrue(result)
-
-    def test_enforce_plugin_failure(self):
-
-        def fakegetnetwork(*args, **kwargs):
-            raise NotImplementedError('Blast!')
-
-        # the policy check and plugin method we use in this test are irrelevant
-        # so long that we verify that, if *f* blows up, the behavior of the
-        # policy engine to propagate the exception is preserved
-        action = "create_port:mac"
-        with mock.patch.object(manager.NeutronManager.get_instance().plugin,
-                               'get_network', new=fakegetnetwork):
-            target = {'network_id': 'whatever'}
-            self.assertRaises(NotImplementedError,
-                              policy.enforce,
-                              self.context,
-                              action,
-                              target)
-
-    def test_enforce_tenant_id_check_parent_resource_bw_compatibility(self):
-
-        def fakegetnetwork(*args, **kwargs):
-            return {'tenant_id': 'fake'}
-
-        self._set_rules(
-            admin_or_network_owner="role:admin or "
-                                   "tenant_id:%(network_tenant_id)s")
-        action = "create_port:mac"
-        with mock.patch.object(manager.NeutronManager.get_instance().plugin,
-                               'get_network', new=fakegetnetwork):
-            target = {'network_id': 'whatever'}
-            result = policy.enforce(self.context, action, target)
-            self.assertTrue(result)
-
-    def test_tenant_id_check_no_target_field_raises(self):
-        # Try and add a bad rule
-        self.assertRaises(
-            exceptions.PolicyInitError,
-            oslo_policy.Rules.from_dict,
-            {'test_policy': 'tenant_id:(wrong_stuff)'})
-
-    def _test_enforce_tenant_id_raises(self, bad_rule):
-        self._set_rules(admin_or_owner=bad_rule)
-        # Trigger a policy with rule admin_or_owner
-        action = "create_network"
-        target = {'tenant_id': 'fake'}
-        self.fakepolicyinit()
-        self.assertRaises(exceptions.PolicyCheckError,
-                          policy.enforce,
-                          self.context, action, target)
-
-    def test_enforce_tenant_id_check_malformed_target_field_raises(self):
-        self._test_enforce_tenant_id_raises('tenant_id:%(malformed_field)s')
-
-    def test_enforce_tenant_id_check_invalid_parent_resource_raises(self):
-        self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s')
-
-    def test_process_rules(self):
-        action = "create_" + FAKE_RESOURCE_NAME
-        # Construct RuleChecks for an action, attribute and subattribute
-        match_rule = oslo_policy.RuleCheck('rule', action)
-        attr_rule = oslo_policy.RuleCheck(
-            'rule', '%s:%ss' % (action, FAKE_RESOURCE_NAME))
-        sub_attr_rules = [oslo_policy.RuleCheck(
-            'rule', '%s:%s:%s' % (action, 'attr', 'sub_attr_1'))]
-        # Build an AndCheck from the given RuleChecks
-        # Make the checks nested to better check the recursion
-        sub_attr_rules = oslo_policy.AndCheck(sub_attr_rules)
-        attr_rule = oslo_policy.AndCheck(
-            [attr_rule, sub_attr_rules])
-
-        match_rule = oslo_policy.AndCheck([match_rule, attr_rule])
-        # Assert that the rules are correctly extracted from the match_rule
-        rules = policy._process_rules_list([], match_rule)
-        self.assertEqual(['create_fake_resource',
-                          'create_fake_resource:fake_resources',
-                          'create_fake_resource:attr:sub_attr_1'], rules)
-
-    @mock.patch.object(policy.LOG, 'isEnabledFor', return_value=True)
-    @mock.patch.object(policy.LOG, 'debug')
-    def test_log_rule_list(self, mock_debug, mock_is_e):
-        policy.log_rule_list(oslo_policy.RuleCheck('rule', 'create_'))
-        self.assertTrue(mock_is_e.called)
-        self.assertTrue(mock_debug.called)
diff --git a/neutron/tests/unit/test_service.py b/neutron/tests/unit/test_service.py
deleted file mode 100644 (file)
index 6b66245..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2015 Mirantis Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron import service
-from neutron.tests.unit import test_wsgi
-
-
-class TestRpcWorker(test_wsgi.TestServiceBase):
-
-    def test_reset(self):
-        _plugin = mock.Mock()
-        rpc_worker = service.RpcWorker(_plugin)
-        self._test_reset(rpc_worker)
diff --git a/neutron/tests/unit/test_wsgi.py b/neutron/tests/unit/test_wsgi.py
deleted file mode 100644 (file)
index ff6515e..0000000
+++ /dev/null
@@ -1,706 +0,0 @@
-# Copyright 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-import socket
-import ssl
-
-import mock
-from oslo_config import cfg
-import six.moves.urllib.request as urlrequest
-import testtools
-import webob
-import webob.exc
-
-from neutron.common import exceptions as exception
-from neutron.db import api
-from neutron.tests import base
-from neutron.tests.common import helpers
-from neutron import wsgi
-
-CONF = cfg.CONF
-
-TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
-                               '..', 'var'))
-
-
-def open_no_proxy(*args, **kwargs):
-    # NOTE(jamespage):
-    # Deal with more secure certification chain verification
-    # introduced in python 2.7.9 under PEP-0476
-    # https://github.com/python/peps/blob/master/pep-0476.txt
-    if hasattr(ssl, "_create_unverified_context"):
-        opener = urlrequest.build_opener(
-            urlrequest.ProxyHandler({}),
-            urlrequest.HTTPSHandler(context=ssl._create_unverified_context())
-        )
-    else:
-        opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
-    return opener.open(*args, **kwargs)
-
-
-class TestServiceBase(base.BaseTestCase):
-    """Service tests base."""
-
-    @mock.patch("neutron.policy.refresh")
-    @mock.patch("neutron.common.config.setup_logging")
-    def _test_reset(self, worker_service, setup_logging_mock, refresh_mock):
-        worker_service.reset()
-
-        setup_logging_mock.assert_called_once_with()
-        refresh_mock.assert_called_once_with()
-
-
-class TestWorkerService(TestServiceBase):
-    """WorkerService tests."""
-
-    @mock.patch('neutron.db.api.get_engine')
-    def test_start_withoutdb_call(self, apimock):
-        # clear engine from other tests
-        api._FACADE = None
-        _service = mock.Mock()
-        _service.pool.spawn.return_value = None
-
-        _app = mock.Mock()
-        workerservice = wsgi.WorkerService(_service, _app)
-        workerservice.start()
-        self.assertFalse(apimock.called)
-
-    def test_reset(self):
-        _service = mock.Mock()
-        _app = mock.Mock()
-
-        worker_service = wsgi.WorkerService(_service, _app)
-        self._test_reset(worker_service)
-
-
-class TestWSGIServer(base.BaseTestCase):
-    """WSGI server tests."""
-
-    def test_start_random_port(self):
-        server = wsgi.Server("test_random_port")
-        server.start(None, 0, host="127.0.0.1")
-        self.assertNotEqual(0, server.port)
-        server.stop()
-        server.wait()
-
-    @mock.patch('oslo_service.service.ProcessLauncher')
-    def test_start_multiple_workers(self, ProcessLauncher):
-        launcher = ProcessLauncher.return_value
-
-        server = wsgi.Server("test_multiple_processes")
-        server.start(None, 0, host="127.0.0.1", workers=2)
-        launcher.launch_service.assert_called_once_with(mock.ANY, workers=2)
-
-        server.stop()
-        launcher.stop.assert_called_once_with()
-
-        server.wait()
-        launcher.wait.assert_called_once_with()
-
-    def test_start_random_port_with_ipv6(self):
-        server = wsgi.Server("test_random_port")
-        server.start(None, 0, host="::1")
-        self.assertEqual("::1", server.host)
-        self.assertNotEqual(0, server.port)
-        server.stop()
-        server.wait()
-
-    def test_ipv6_listen_called_with_scope(self):
-        server = wsgi.Server("test_app")
-
-        with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen:
-            with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr:
-                mock_get_addr.return_value = [
-                    (socket.AF_INET6,
-                     socket.SOCK_STREAM,
-                     socket.IPPROTO_TCP,
-                     '',
-                     ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2))
-                ]
-                with mock.patch.object(server, 'pool') as mock_pool:
-                    server.start(None,
-                                 1234,
-                                 host="fe80::204:acff:fe96:da87%eth0")
-
-                    mock_get_addr.assert_called_once_with(
-                        "fe80::204:acff:fe96:da87%eth0",
-                        1234,
-                        socket.AF_UNSPEC,
-                        socket.SOCK_STREAM
-                    )
-
-                    mock_listen.assert_called_once_with(
-                        ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2),
-                        family=socket.AF_INET6,
-                        backlog=cfg.CONF.backlog
-                    )
-
-                    mock_pool.spawn.assert_has_calls([
-                        mock.call(
-                            server._run,
-                            None,
-                            mock_listen.return_value.dup.return_value)
-                    ])
-
-    def test_app(self):
-        greetings = 'Hello, World!!!'
-
-        def hello_world(env, start_response):
-            if env['PATH_INFO'] != '/':
-                start_response('404 Not Found',
-                               [('Content-Type', 'text/plain')])
-                return ['Not Found\r\n']
-            start_response('200 OK', [('Content-Type', 'text/plain')])
-            return [greetings]
-
-        server = wsgi.Server("test_app")
-        server.start(hello_world, 0, host="127.0.0.1")
-
-        response = open_no_proxy('http://127.0.0.1:%d/' % server.port)
-
-        self.assertEqual(greetings.encode('utf-8'), response.read())
-
-        server.stop()
-
-    @mock.patch.object(wsgi, 'eventlet')
-    def test__run(self, eventlet_mock):
-        server = wsgi.Server('test')
-        server._run("app", "socket")
-        eventlet_mock.wsgi.server.assert_called_once_with(
-            'socket',
-            'app',
-            max_size=server.num_threads,
-            log=mock.ANY,
-            keepalive=CONF.wsgi_keep_alive,
-            socket_timeout=server.client_socket_timeout
-        )
-
-
-class SerializerTest(base.BaseTestCase):
-    def test_serialize_unknown_content_type(self):
-        """Verify that exception InvalidContentType is raised."""
-        input_dict = {'servers': {'test': 'pass'}}
-        content_type = 'application/unknown'
-        serializer = wsgi.Serializer()
-
-        self.assertRaises(
-            exception.InvalidContentType, serializer.serialize,
-            input_dict, content_type)
-
-    def test_get_deserialize_handler_unknown_content_type(self):
-        """Verify that exception InvalidContentType is raised."""
-        content_type = 'application/unknown'
-        serializer = wsgi.Serializer()
-
-        self.assertRaises(
-            exception.InvalidContentType,
-            serializer.get_deserialize_handler, content_type)
-
-    def test_serialize_content_type_json(self):
-        """Test serialize with content type json."""
-        input_data = {'servers': ['test=pass']}
-        content_type = 'application/json'
-        serializer = wsgi.Serializer()
-        result = serializer.serialize(input_data, content_type)
-
-        self.assertEqual(b'{"servers": ["test=pass"]}', result)
-
-    def test_deserialize_raise_bad_request(self):
-        """Test serialize verifies that exception is raises."""
-        content_type = 'application/unknown'
-        data_string = 'test'
-        serializer = wsgi.Serializer()
-
-        self.assertRaises(
-            webob.exc.HTTPBadRequest,
-            serializer.deserialize, data_string, content_type)
-
-    def test_deserialize_json_content_type(self):
-        """Test Serializer.deserialize with content type json."""
-        content_type = 'application/json'
-        data_string = '{"servers": ["test=pass"]}'
-        serializer = wsgi.Serializer()
-        result = serializer.deserialize(data_string, content_type)
-
-        self.assertEqual({'body': {u'servers': [u'test=pass']}}, result)
-
-
-class RequestDeserializerTest(testtools.TestCase):
-    def setUp(self):
-        super(RequestDeserializerTest, self).setUp()
-
-        class JSONDeserializer(object):
-            def deserialize(self, data, action='default'):
-                return 'pew_json'
-
-        self.body_deserializers = {'application/json': JSONDeserializer()}
-
-        self.deserializer = wsgi.RequestDeserializer(self.body_deserializers)
-
-    def test_get_deserializer(self):
-        """Test RequestDeserializer.get_body_deserializer."""
-        expected_json_serializer = self.deserializer.get_body_deserializer(
-            'application/json')
-
-        self.assertEqual(
-            expected_json_serializer,
-            self.body_deserializers['application/json'])
-
-    def test_get_expected_content_type(self):
-        """Test RequestDeserializer.get_expected_content_type."""
-        request = wsgi.Request.blank('/')
-        request.headers['Accept'] = 'application/json'
-
-        self.assertEqual('application/json',
-                         self.deserializer.get_expected_content_type(request))
-
-    def test_get_action_args(self):
-        """Test RequestDeserializer.get_action_args."""
-        env = {
-            'wsgiorg.routing_args': [None, {
-                'controller': None,
-                'format': None,
-                'action': 'update',
-                'id': 12}]}
-        expected = {'action': 'update', 'id': 12}
-
-        self.assertEqual(expected,
-                         self.deserializer.get_action_args(env))
-
-    def test_deserialize(self):
-        """Test RequestDeserializer.deserialize."""
-        with mock.patch.object(
-            self.deserializer, 'get_action_args') as mock_method:
-            mock_method.return_value = {'action': 'create'}
-            request = wsgi.Request.blank('/')
-            request.headers['Accept'] = 'application/json'
-            deserialized = self.deserializer.deserialize(request)
-            expected = ('create', {}, 'application/json')
-
-            self.assertEqual(expected, deserialized)
-
-    def test_get_body_deserializer_unknown_content_type(self):
-        """Verify that exception InvalidContentType is raised."""
-        content_type = 'application/unknown'
-        deserializer = wsgi.RequestDeserializer()
-        self.assertRaises(
-            exception.InvalidContentType,
-            deserializer.get_body_deserializer, content_type)
-
-
-class ResponseSerializerTest(testtools.TestCase):
-    def setUp(self):
-        super(ResponseSerializerTest, self).setUp()
-
-        class JSONSerializer(object):
-            def serialize(self, data, action='default'):
-                return b'pew_json'
-
-        class HeadersSerializer(object):
-            def serialize(self, response, data, action):
-                response.status_int = 404
-
-        self.body_serializers = {'application/json': JSONSerializer()}
-
-        self.serializer = wsgi.ResponseSerializer(
-            self.body_serializers, HeadersSerializer())
-
-    def test_serialize_unknown_content_type(self):
-        """Verify that exception InvalidContentType is raised."""
-        self.assertRaises(
-            exception.InvalidContentType,
-            self.serializer.serialize,
-            {}, 'application/unknown')
-
-    def test_get_body_serializer(self):
-        """Verify that exception InvalidContentType is raised."""
-        self.assertRaises(
-            exception.InvalidContentType,
-            self.serializer.get_body_serializer, 'application/unknown')
-
-    def test_get_serializer(self):
-        """Test ResponseSerializer.get_body_serializer."""
-        content_type = 'application/json'
-        self.assertEqual(self.body_serializers[content_type],
-                         self.serializer.get_body_serializer(content_type))
-
-    def test_serialize_json_response(self):
-        response = self.serializer.serialize({}, 'application/json')
-
-        self.assertEqual('application/json', response.headers['Content-Type'])
-        self.assertEqual(b'pew_json', response.body)
-        self.assertEqual(404, response.status_int)
-
-    def test_serialize_response_None(self):
-        response = self.serializer.serialize(
-            None, 'application/json')
-
-        self.assertEqual('application/json', response.headers['Content-Type'])
-        self.assertEqual(b'', response.body)
-        self.assertEqual(404, response.status_int)
-
-
-class RequestTest(base.BaseTestCase):
-
-    def test_content_type_missing(self):
-        request = wsgi.Request.blank('/tests/123', method='POST')
-        request.body = b"<body />"
-
-        self.assertIsNone(request.get_content_type())
-
-    def test_content_type_unsupported(self):
-        request = wsgi.Request.blank('/tests/123', method='POST')
-        request.headers["Content-Type"] = "text/html"
-        request.body = b"fake<br />"
-
-        self.assertIsNone(request.get_content_type())
-
-    def test_content_type_with_charset(self):
-        request = wsgi.Request.blank('/tests/123')
-        request.headers["Content-Type"] = "application/json; charset=UTF-8"
-        result = request.get_content_type()
-
-        self.assertEqual("application/json", result)
-
-    def test_content_type_with_given_content_types(self):
-        request = wsgi.Request.blank('/tests/123')
-        request.headers["Content-Type"] = "application/new-type;"
-
-        self.assertIsNone(request.get_content_type())
-
-    def test_content_type_from_accept(self):
-        request = wsgi.Request.blank('/tests/123')
-        request.headers["Accept"] = "application/json"
-        result = request.best_match_content_type()
-
-        self.assertEqual("application/json", result)
-
-        request = wsgi.Request.blank('/tests/123')
-        request.headers["Accept"] = ("application/json; q=0.3")
-        result = request.best_match_content_type()
-
-        self.assertEqual("application/json", result)
-
-    def test_content_type_from_query_extension(self):
-        request = wsgi.Request.blank('/tests/123.json')
-        result = request.best_match_content_type()
-
-        self.assertEqual("application/json", result)
-
-        request = wsgi.Request.blank('/tests/123.invalid')
-        result = request.best_match_content_type()
-
-        self.assertEqual("application/json", result)
-
-    def test_content_type_accept_and_query_extension(self):
-        request = wsgi.Request.blank('/tests/123.json')
-        request.headers["Accept"] = "application/json"
-        result = request.best_match_content_type()
-
-        self.assertEqual("application/json", result)
-
-    def test_content_type_accept_default(self):
-        request = wsgi.Request.blank('/tests/123.unsupported')
-        request.headers["Accept"] = "application/unsupported1"
-        result = request.best_match_content_type()
-
-        self.assertEqual("application/json", result)
-
-    def test_content_type_accept_with_given_content_types(self):
-        request = wsgi.Request.blank('/tests/123')
-        request.headers["Accept"] = "application/new_type"
-        result = request.best_match_content_type()
-
-        self.assertEqual("application/json", result)
-
-
-class ActionDispatcherTest(base.BaseTestCase):
-    def test_dispatch(self):
-        """Test ActionDispatcher.dispatch."""
-        serializer = wsgi.ActionDispatcher()
-        serializer.create = lambda x: x
-
-        self.assertEqual('pants',
-                         serializer.dispatch('pants', action='create'))
-
-    def test_dispatch_action_None(self):
-        """Test ActionDispatcher.dispatch with none action."""
-        serializer = wsgi.ActionDispatcher()
-        serializer.create = lambda x: x + ' pants'
-        serializer.default = lambda x: x + ' trousers'
-
-        self.assertEqual('Two trousers',
-                         serializer.dispatch('Two', action=None))
-
-    def test_dispatch_default(self):
-        serializer = wsgi.ActionDispatcher()
-        serializer.create = lambda x: x + ' pants'
-        serializer.default = lambda x: x + ' trousers'
-
-        self.assertEqual('Two trousers',
-                         serializer.dispatch('Two', action='update'))
-
-
-class ResponseHeadersSerializerTest(base.BaseTestCase):
-    def test_default(self):
-        serializer = wsgi.ResponseHeaderSerializer()
-        response = webob.Response()
-        serializer.serialize(response, {'v': '123'}, 'fake')
-
-        self.assertEqual(200, response.status_int)
-
-    def test_custom(self):
-        class Serializer(wsgi.ResponseHeaderSerializer):
-            def update(self, response, data):
-                response.status_int = 404
-                response.headers['X-Custom-Header'] = data['v']
-        serializer = Serializer()
-        response = webob.Response()
-        serializer.serialize(response, {'v': '123'}, 'update')
-
-        self.assertEqual(404, response.status_int)
-        self.assertEqual('123', response.headers['X-Custom-Header'])
-
-
-class DictSerializerTest(base.BaseTestCase):
-
-    def test_dispatch_default(self):
-        serializer = wsgi.DictSerializer()
-        self.assertEqual('',
-                         serializer.serialize({}, 'NonExistentAction'))
-
-
-class JSONDictSerializerTest(base.BaseTestCase):
-
-    def test_json(self):
-        input_dict = dict(servers=dict(a=(2, 3)))
-        expected_json = b'{"servers":{"a":[2,3]}}'
-        serializer = wsgi.JSONDictSerializer()
-        result = serializer.serialize(input_dict)
-        result = result.replace(b'\n', b'').replace(b' ', b'')
-
-        self.assertEqual(expected_json, result)
-
-    # The tested behaviour is only meant to be witnessed in Python 2, so it is
-    # OK to skip this test with Python 3.
-    @helpers.requires_py2
-    def test_json_with_utf8(self):
-        input_dict = dict(servers=dict(a=(2, '\xe7\xbd\x91\xe7\xbb\x9c')))
-        expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}'
-        serializer = wsgi.JSONDictSerializer()
-        result = serializer.serialize(input_dict)
-        result = result.replace(b'\n', b'').replace(b' ', b'')
-
-        self.assertEqual(expected_json, result)
-
-    def test_json_with_unicode(self):
-        input_dict = dict(servers=dict(a=(2, u'\u7f51\u7edc')))
-        expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}'
-        serializer = wsgi.JSONDictSerializer()
-        result = serializer.serialize(input_dict)
-        result = result.replace(b'\n', b'').replace(b' ', b'')
-
-        self.assertEqual(expected_json, result)
-
-
-class TextDeserializerTest(base.BaseTestCase):
-
-    def test_dispatch_default(self):
-        deserializer = wsgi.TextDeserializer()
-        self.assertEqual({},
-                         deserializer.deserialize({}, 'update'))
-
-
-class JSONDeserializerTest(base.BaseTestCase):
-    def test_json(self):
-        data = """{"a": {
-                "a1": "1",
-                "a2": "2",
-                "bs": ["1", "2", "3", {"c": {"c1": "1"}}],
-                "d": {"e": "1"},
-                "f": "1"}}"""
-        as_dict = {
-            'body': {
-                'a': {
-                    'a1': '1',
-                    'a2': '2',
-                    'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
-                    'd': {'e': '1'},
-                    'f': '1'}}}
-        deserializer = wsgi.JSONDeserializer()
-        self.assertEqual(as_dict,
-                         deserializer.deserialize(data))
-
-    def test_default_raise_Malformed_Exception(self):
-        """Test JsonDeserializer.default.
-
-        Test verifies JsonDeserializer.default raises exception
-        MalformedRequestBody correctly.
-        """
-        data_string = ""
-        deserializer = wsgi.JSONDeserializer()
-
-        self.assertRaises(
-            exception.MalformedRequestBody, deserializer.default, data_string)
-
-    def test_json_with_utf8(self):
-        data = b'{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}'
-        as_dict = {'body': {'a': u'\u7f51\u7edc'}}
-        deserializer = wsgi.JSONDeserializer()
-        self.assertEqual(as_dict,
-                         deserializer.deserialize(data))
-
-    def test_json_with_unicode(self):
-        data = b'{"a": "\u7f51\u7edc"}'
-        as_dict = {'body': {'a': u'\u7f51\u7edc'}}
-        deserializer = wsgi.JSONDeserializer()
-        self.assertEqual(as_dict,
-                         deserializer.deserialize(data))
-
-
-class RequestHeadersDeserializerTest(base.BaseTestCase):
-
-    def test_default(self):
-        deserializer = wsgi.RequestHeadersDeserializer()
-        req = wsgi.Request.blank('/')
-
-        self.assertEqual({},
-                         deserializer.deserialize(req, 'nonExistent'))
-
-    def test_custom(self):
-        class Deserializer(wsgi.RequestHeadersDeserializer):
-            def update(self, request):
-                return {'a': request.headers['X-Custom-Header']}
-        deserializer = Deserializer()
-        req = wsgi.Request.blank('/')
-        req.headers['X-Custom-Header'] = 'b'
-        self.assertEqual({'a': 'b'},
-                         deserializer.deserialize(req, 'update'))
-
-
-class ResourceTest(base.BaseTestCase):
-
-    @staticmethod
-    def my_fault_body_function():
-            return 'off'
-
-    class Controller(object):
-        def index(self, request, index=None):
-            return index
-
-    def test_dispatch(self):
-        resource = wsgi.Resource(self.Controller(),
-                                 self.my_fault_body_function)
-        actual = resource.dispatch(
-            resource.controller, 'index', action_args={'index': 'off'})
-        expected = 'off'
-
-        self.assertEqual(expected, actual)
-
-    def test_dispatch_unknown_controller_action(self):
-        resource = wsgi.Resource(self.Controller(),
-                                 self.my_fault_body_function)
-        self.assertRaises(
-            AttributeError, resource.dispatch,
-            resource.controller, 'create', {})
-
-    def test_malformed_request_body_throws_bad_request(self):
-        resource = wsgi.Resource(None, self.my_fault_body_function)
-        request = wsgi.Request.blank(
-            "/", body=b"{mal:formed", method='POST',
-            headers={'Content-Type': "application/json"})
-
-        response = resource(request)
-        self.assertEqual(400, response.status_int)
-
-    def test_wrong_content_type_throws_unsupported_media_type_error(self):
-        resource = wsgi.Resource(None, self.my_fault_body_function)
-        request = wsgi.Request.blank(
-            "/", body=b"{some:json}", method='POST',
-            headers={'Content-Type': "xxx"})
-
-        response = resource(request)
-        self.assertEqual(400, response.status_int)
-
-    def test_wrong_content_type_server_error(self):
-        resource = wsgi.Resource(None, self.my_fault_body_function)
-        request = wsgi.Request.blank(
-            "/", method='POST', headers={'Content-Type': "unknow"})
-
-        response = resource(request)
-        self.assertEqual(500, response.status_int)
-
-    def test_call_resource_class_bad_request(self):
-        class FakeRequest(object):
-            def __init__(self):
-                self.url = 'http://where.no'
-                self.environ = 'environ'
-                self.body = 'body'
-
-            def method(self):
-                pass
-
-            def best_match_content_type(self):
-                return 'best_match_content_type'
-
-        resource = wsgi.Resource(self.Controller(),
-                                 self.my_fault_body_function)
-        request = FakeRequest()
-        result = resource(request)
-        self.assertEqual(400, result.status_int)
-
-    def test_type_error(self):
-        resource = wsgi.Resource(self.Controller(),
-                                 self.my_fault_body_function)
-        request = wsgi.Request.blank(
-            "/", method='POST', headers={'Content-Type': "json"})
-
-        response = resource.dispatch(
-            request, action='index', action_args='test')
-        self.assertEqual(400, response.status_int)
-
-    def test_call_resource_class_internal_error(self):
-        class FakeRequest(object):
-            def __init__(self):
-                self.url = 'http://where.no'
-                self.environ = 'environ'
-                self.body = '{"Content-Type": "json"}'
-
-            def method(self):
-                pass
-
-            def best_match_content_type(self):
-                return 'application/json'
-
-        resource = wsgi.Resource(self.Controller(),
-                                 self.my_fault_body_function)
-        request = FakeRequest()
-        result = resource(request)
-        self.assertEqual(500, result.status_int)
-
-
-class FaultTest(base.BaseTestCase):
-    def test_call_fault(self):
-        class MyException(object):
-            status_int = 415
-            explanation = 'test'
-
-        my_exceptions = MyException()
-        my_fault = wsgi.Fault(exception=my_exceptions)
-        request = wsgi.Request.blank(
-            "/", method='POST', headers={'Content-Type': "unknow"})
-        response = my_fault(request)
-        self.assertEqual(415, response.status_int)
diff --git a/neutron/tests/unit/testlib_api.py b/neutron/tests/unit/testlib_api.py
deleted file mode 100644 (file)
index 30955cc..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import fixtures
-import six
-import testtools
-
-from neutron.db import api as db_api
-# Import all data models
-from neutron.db.migration.models import head  # noqa
-from neutron.db import model_base
-from neutron.tests import base
-from neutron import wsgi
-
-
-class ExpectedException(testtools.ExpectedException):
-    def __enter__(self):
-        return self
-
-    def __exit__(self, exc_type, exc_value, traceback):
-        if super(ExpectedException, self).__exit__(exc_type,
-                                                   exc_value,
-                                                   traceback):
-            self.exception = exc_value
-            return True
-        return False
-
-
-def create_request(path, body, content_type, method='GET',
-                   query_string=None, context=None):
-    if query_string:
-        url = "%s?%s" % (path, query_string)
-    else:
-        url = path
-    req = wsgi.Request.blank(url)
-    req.method = method
-    req.headers = {}
-    req.headers['Accept'] = content_type
-    if isinstance(body, six.text_type):
-        req.body = body.encode()
-    else:
-        req.body = body
-    if context:
-        req.environ['neutron.context'] = context
-    return req
-
-
-class SqlFixture(fixtures.Fixture):
-
-    # flag to indicate that the models have been loaded
-    _TABLES_ESTABLISHED = False
-
-    def _setUp(self):
-        # Register all data models
-        engine = db_api.get_engine()
-        if not SqlFixture._TABLES_ESTABLISHED:
-            model_base.BASEV2.metadata.create_all(engine)
-            SqlFixture._TABLES_ESTABLISHED = True
-
-        def clear_tables():
-            with engine.begin() as conn:
-                for table in reversed(
-                        model_base.BASEV2.metadata.sorted_tables):
-                    conn.execute(table.delete())
-
-        self.addCleanup(clear_tables)
-
-
-class SqlTestCaseLight(base.DietTestCase):
-    """All SQL taste, zero plugin/rpc sugar"""
-
-    def setUp(self):
-        super(SqlTestCaseLight, self).setUp()
-        self.useFixture(SqlFixture())
-
-
-class SqlTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(SqlTestCase, self).setUp()
-        self.useFixture(SqlFixture())
-
-
-class WebTestCase(SqlTestCase):
-    fmt = 'json'
-
-    def setUp(self):
-        super(WebTestCase, self).setUp()
-        json_deserializer = wsgi.JSONDeserializer()
-        self._deserializers = {
-            'application/json': json_deserializer,
-        }
-
-    def deserialize(self, response):
-        ctype = 'application/%s' % self.fmt
-        data = self._deserializers[ctype].deserialize(response.body)['body']
-        return data
-
-    def serialize(self, data):
-        ctype = 'application/%s' % self.fmt
-        result = wsgi.Serializer().serialize(data, ctype)
-        return result
-
-
-class SubDictMatch(object):
-
-    def __init__(self, sub_dict):
-        self.sub_dict = sub_dict
-
-    def __eq__(self, super_dict):
-        return all(item in super_dict.items()
-                   for item in self.sub_dict.items())
diff --git a/neutron/tests/unit/tests/__init__.py b/neutron/tests/unit/tests/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/tests/test_base.py b/neutron/tests/unit/tests/test_base.py
deleted file mode 100644 (file)
index 8a2bb55..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests to test the test framework"""
-
-import sys
-import unittest2
-
-from neutron.tests import base
-
-
-class SystemExitTestCase(base.DietTestCase):
-    # Embedded to hide from the regular test discovery
-    class MyTestCase(base.DietTestCase):
-        def __init__(self, exitcode):
-            super(SystemExitTestCase.MyTestCase, self).__init__()
-            self.exitcode = exitcode
-
-        def runTest(self):
-            if self.exitcode is not None:
-                sys.exit(self.exitcode)
-
-    def test_no_sysexit(self):
-        result = self.MyTestCase(exitcode=None).run()
-        self.assertTrue(result.wasSuccessful())
-
-    def test_sysexit(self):
-        expectedFails = [self.MyTestCase(exitcode) for exitcode in (0, 1)]
-
-        suite = unittest2.TestSuite(tests=expectedFails)
-        result = self.defaultTestResult()
-        try:
-            suite.run(result)
-        except SystemExit:
-            self.fail('SystemExit escaped!')
-
-        self.assertEqual([], result.errors)
-        self.assertItemsEqual(set(id(t) for t in expectedFails),
-                              set(id(t) for (t, traceback) in result.failures))
diff --git a/neutron/tests/unit/tests/test_post_mortem_debug.py b/neutron/tests/unit/tests/test_post_mortem_debug.py
deleted file mode 100644 (file)
index 514fb10..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sys
-
-import mock
-from six import moves
-
-from neutron.tests import base
-from neutron.tests import post_mortem_debug
-
-
-class TestTesttoolsExceptionHandler(base.BaseTestCase):
-
-    def test_exception_handler(self):
-        try:
-            self.assertTrue(False)
-        except Exception:
-            exc_info = sys.exc_info()
-        with mock.patch('traceback.print_exception') as mock_print_exception:
-            with mock.patch('pdb.post_mortem') as mock_post_mortem:
-                with mock.patch.object(post_mortem_debug,
-                                       'get_ignored_traceback',
-                                       return_value=mock.Mock()):
-                    post_mortem_debug.get_exception_handler('pdb')(exc_info)
-
-        # traceback will become post_mortem_debug.FilteredTraceback
-        filtered_exc_info = (exc_info[0], exc_info[1], mock.ANY)
-        mock_print_exception.assert_called_once_with(*filtered_exc_info)
-        mock_post_mortem.assert_called_once_with(mock.ANY)
-
-    def test__get_debugger(self):
-        def import_mock(name, *args):
-            mod_mock = mock.Mock()
-            mod_mock.__name__ = name
-            mod_mock.post_mortem = mock.Mock()
-            return mod_mock
-
-        with mock.patch('six.moves.builtins.__import__',
-                        side_effect=import_mock):
-                pdb_debugger = post_mortem_debug._get_debugger('pdb')
-                pudb_debugger = post_mortem_debug._get_debugger('pudb')
-                self.assertEqual('pdb', pdb_debugger.__name__)
-                self.assertEqual('pudb', pudb_debugger.__name__)
-
-
-class TestFilteredTraceback(base.BaseTestCase):
-
-    def test_filter_traceback(self):
-        tb1 = mock.Mock()
-        tb2 = mock.Mock()
-        tb1.tb_next = tb2
-        tb2.tb_next = None
-        ftb1 = post_mortem_debug.FilteredTraceback(tb1, tb2)
-        for attr in ['lasti', 'lineno', 'frame']:
-            attr_name = 'tb_%s' % attr
-            self.assertEqual(getattr(tb1, attr_name, None),
-                             getattr(ftb1, attr_name, None))
-        self.assertIsNone(ftb1.tb_next)
-
-
-class TestGetIgnoredTraceback(base.BaseTestCase):
-
-    def _test_get_ignored_traceback(self, ignored_bit_array, expected):
-        root_tb = mock.Mock()
-
-        tb = root_tb
-        tracebacks = [tb]
-        for x in moves.range(len(ignored_bit_array) - 1):
-            tb.tb_next = mock.Mock()
-            tb = tb.tb_next
-            tracebacks.append(tb)
-        tb.tb_next = None
-
-        tb = root_tb
-        for ignored in ignored_bit_array:
-            if ignored:
-                tb.tb_frame.f_globals = ['__unittest']
-            else:
-                tb.tb_frame.f_globals = []
-            tb = tb.tb_next
-
-        actual = post_mortem_debug.get_ignored_traceback(root_tb)
-        if expected is not None:
-            expected = tracebacks[expected]
-        self.assertEqual(expected, actual)
-
-    def test_no_ignored_tracebacks(self):
-        self._test_get_ignored_traceback([0, 0, 0], None)
-
-    def test_single_member_trailing_chain(self):
-        self._test_get_ignored_traceback([0, 0, 1], 2)
-
-    def test_two_member_trailing_chain(self):
-        self._test_get_ignored_traceback([0, 1, 1], 1)
-
-    def test_first_traceback_ignored(self):
-        self._test_get_ignored_traceback([1, 0, 0], None)
-
-    def test_middle_traceback_ignored(self):
-        self._test_get_ignored_traceback([0, 1, 0], None)
diff --git a/neutron/tests/var/ca.crt b/neutron/tests/var/ca.crt
deleted file mode 100644 (file)
index 9d66ca6..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIGDDCCA/SgAwIBAgIJAPSvwQYk4qI4MA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV
-BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMRUwEwYDVQQKEwxPcGVuc3RhY2sg
-Q0ExEjAQBgNVBAsTCUdsYW5jZSBDQTESMBAGA1UEAxMJR2xhbmNlIENBMB4XDTEy
-MDIwOTE3MTAwMloXDTIyMDIwNjE3MTAwMlowYTELMAkGA1UEBhMCQVUxEzARBgNV
-BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ
-R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwggIiMA0GCSqGSIb3DQEBAQUA
-A4ICDwAwggIKAoICAQDmf+fapWfzy1Uylus0KGalw4X/5xZ+ltPVOr+IdCPbstvi
-RTC5g+O+TvXeOP32V/cnSY4ho/+f2q730za+ZA/cgWO252rcm3Q7KTJn3PoqzJvX
-/l3EXe3/TCrbzgZ7lW3QLTCTEE2eEzwYG3wfDTOyoBq+F6ct6ADh+86gmpbIRfYI
-N+ixB0hVyz9427PTof97fL7qxxkjAayB28OfwHrkEBl7iblNhUC0RoH+/H9r5GEl
-GnWiebxfNrONEHug6PHgiaGq7/Dj+u9bwr7J3/NoS84I08ajMnhlPZxZ8bS/O8If
-ceWGZv7clPozyhABT/otDfgVcNH1UdZ4zLlQwc1MuPYN7CwxrElxc8Quf94ttGjb
-tfGTl4RTXkDofYdG1qBWW962PsGl2tWmbYDXV0q5JhV/IwbrE1X9f+OksJQne1/+
-dZDxMhdf2Q1V0P9hZZICu4+YhmTMs5Mc9myKVnzp4NYdX5fXoB/uNYph+G7xG5IK
-WLSODKhr1wFGTTcuaa8LhOH5UREVenGDJuc6DdgX9a9PzyJGIi2ngQ03TJIkCiU/
-4J/r/vsm81ezDiYZSp2j5JbME+ixW0GBLTUWpOIxUSHgUFwH5f7lQwbXWBOgwXQk
-BwpZTmdQx09MfalhBtWeu4/6BnOCOj7e/4+4J0eVxXST0AmVyv8YjJ2nz1F9oQID
-AQABo4HGMIHDMB0GA1UdDgQWBBTk7Krj4bEsTjHXaWEtI2GZ5ACQyTCBkwYDVR0j
-BIGLMIGIgBTk7Krj4bEsTjHXaWEtI2GZ5ACQyaFlpGMwYTELMAkGA1UEBhMCQVUx
-EzARBgNVBAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAG
-A1UECxMJR2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0GCCQD0r8EGJOKiODAM
-BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQA8Zrss/MiwFHGmDlercE0h
-UvzA54n/EvKP9nP3jHM2qW/VPfKdnFw99nEPFLhb+lN553vdjOpCYFm+sW0Z5Mi4
-qsFkk4AmXIIEFOPt6zKxMioLYDQ9Sw/BUv6EZGeANWr/bhmaE+dMcKJt5le/0jJm
-2ahsVB9fbFu9jBFeYb7Ba/x2aLkEGMxaDLla+6EQhj148fTnS1wjmX9G2cNzJvj/
-+C2EfKJIuDJDqw2oS2FGVpP37FA2Bz2vga0QatNneLkGKCFI3ZTenBznoN+fmurX
-TL3eJE4IFNrANCcdfMpdyLAtXz4KpjcehqpZMu70er3d30zbi1l0Ajz4dU+WKz/a
-NQES+vMkT2wqjXHVTjrNwodxw3oLK/EuTgwoxIHJuplx5E5Wrdx9g7Gl1PBIJL8V
-xiOYS5N7CakyALvdhP7cPubA2+TPAjNInxiAcmhdASS/Vrmpvrkat6XhGn8h9liv
-ysDOpMQmYQkmgZBpW8yBKK7JABGGsJADJ3E6J5MMWBX2RR4kFoqVGAzdOU3oyaTy
-I0kz5sfuahaWpdYJVlkO+esc0CRXw8fLDYivabK2tOgUEWeZsZGZ9uK6aV1VxTAY
-9Guu3BJ4Rv/KP/hk7mP8rIeCwotV66/2H8nq72ImQhzSVyWcxbFf2rJiFQJ3BFwA
-WoRMgEwjGJWqzhJZUYpUAQ==
------END CERTIFICATE-----
diff --git a/neutron/tests/var/certandkey.pem b/neutron/tests/var/certandkey.pem
deleted file mode 100644 (file)
index a5baf3a..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV
-BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ
-R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN
-MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0
-ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT
-BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu
-avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb
-Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ
-bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA
-BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q
-8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG
-/64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0
-iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+
-KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2
-0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9
-Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr
-mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC
-AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y
-0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN
-rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k
-yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY
-vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc
-AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2
-KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL
-cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07
-hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2
-Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM
-YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA==
------END CERTIFICATE-----
------BEGIN RSA PRIVATE KEY-----
-MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe
-4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny
-FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD
-/P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K
-gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN
-+Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy
-QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH
-pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7
-rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS
-L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN
-H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA
-AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW
-t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N
-sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/
-8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1
-f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH
-Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r
-VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh
-/W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR
-dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh
-WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw
-1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK
-hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM
-ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh
-sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o
-uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ
-LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U
-4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n
-bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc
-NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn
-7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp
-TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7
-3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL
-5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ
-fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze
-IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz
-JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p
-pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD
-bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB
-utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP
-pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ
-GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq
-ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps
-av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB
-1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX
-juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag
-miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS
-8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed
-TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ=
------END RSA PRIVATE KEY-----
diff --git a/neutron/tests/var/certificate.crt b/neutron/tests/var/certificate.crt
deleted file mode 100644 (file)
index 3c1aa63..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV
-BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ
-R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN
-MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0
-ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT
-BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu
-avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb
-Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ
-bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA
-BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q
-8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG
-/64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0
-iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+
-KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2
-0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9
-Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr
-mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC
-AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y
-0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN
-rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k
-yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY
-vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc
-AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2
-KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL
-cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07
-hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2
-Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM
-YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA==
------END CERTIFICATE-----
diff --git a/neutron/tests/var/privatekey.key b/neutron/tests/var/privatekey.key
deleted file mode 100644 (file)
index b63df3d..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe
-4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny
-FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD
-/P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K
-gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN
-+Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy
-QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH
-pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7
-rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS
-L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN
-H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA
-AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW
-t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N
-sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/
-8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1
-f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH
-Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r
-VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh
-/W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR
-dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh
-WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw
-1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK
-hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM
-ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh
-sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o
-uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ
-LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U
-4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n
-bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc
-NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn
-7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp
-TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7
-3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL
-5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ
-fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze
-IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz
-JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p
-pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD
-bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB
-utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP
-pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ
-GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq
-ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps
-av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB
-1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX
-juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag
-miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS
-8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed
-TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ=
------END RSA PRIVATE KEY-----
diff --git a/neutron/version.py b/neutron/version.py
deleted file mode 100644 (file)
index 2dbf54d..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#    Copyright 2011 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import pbr.version
-
-version_info = pbr.version.VersionInfo('neutron')
diff --git a/neutron/worker.py b/neutron/worker.py
deleted file mode 100644 (file)
index 80a1653..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_service import service
-
-from neutron.callbacks import events
-from neutron.callbacks import registry
-from neutron.callbacks import resources
-
-
-class NeutronWorker(service.ServiceBase):
-    """Partial implementation of the ServiceBase ABC
-
-    Subclasses will still need to add the other abstract methods defined in
-    service.ServiceBase. See oslo_service for more details.
-
-    If a plugin needs to handle synchronization with the Neutron database and
-    do this only once instead of in every API worker, for instance, it would
-    define a NeutronWorker class and the plugin would have get_workers return
-    an array of NeutronWorker instances. For example:
-        class MyPlugin(...):
-            def get_workers(self):
-                return [MyPluginWorker()]
-
-        class MyPluginWorker(NeutronWorker):
-            def start(self):
-                super(MyPluginWorker, self).start()
-                do_sync()
-    """
-    def start(self):
-        registry.notify(resources.PROCESS, events.AFTER_CREATE, self.start)
diff --git a/neutron/wsgi.py b/neutron/wsgi.py
deleted file mode 100644 (file)
index 8f349a9..0000000
+++ /dev/null
@@ -1,804 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Utility methods for working with WSGI servers
-"""
-from __future__ import print_function
-
-import errno
-import socket
-import sys
-import time
-
-import eventlet.wsgi
-from oslo_config import cfg
-import oslo_i18n
-from oslo_log import log as logging
-from oslo_serialization import jsonutils
-from oslo_service import service as common_service
-from oslo_service import sslutils
-from oslo_service import systemd
-from oslo_service import wsgi
-from oslo_utils import excutils
-import six
-import webob.dec
-import webob.exc
-
-from neutron._i18n import _, _LE, _LI
-from neutron.common import config
-from neutron.common import exceptions as exception
-from neutron import context
-from neutron.db import api
-from neutron import worker
-
-socket_opts = [
-    cfg.IntOpt('backlog',
-               default=4096,
-               help=_("Number of backlog requests to configure "
-                      "the socket with")),
-    cfg.IntOpt('retry_until_window',
-               default=30,
-               help=_("Number of seconds to keep retrying to listen")),
-    cfg.BoolOpt('use_ssl',
-                default=False,
-                help=_('Enable SSL on the API server')),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(socket_opts)
-wsgi.register_opts(CONF)
-
-LOG = logging.getLogger(__name__)
-
-
-def encode_body(body):
-    """Encode unicode body.
-
-    WebOb requires to encode unicode body used to update response body.
-    """
-    if isinstance(body, six.text_type):
-        return body.encode('utf-8')
-    return body
-
-
-class WorkerService(worker.NeutronWorker):
-    """Wraps a worker to be handled by ProcessLauncher"""
-    def __init__(self, service, application):
-        self._service = service
-        self._application = application
-        self._server = None
-
-    def start(self):
-        super(WorkerService, self).start()
-        # When api worker is stopped it kills the eventlet wsgi server which
-        # internally closes the wsgi server socket object. This server socket
-        # object becomes not usable which leads to "Bad file descriptor"
-        # errors on service restart.
-        # Duplicate a socket object to keep a file descriptor usable.
-        dup_sock = self._service._socket.dup()
-        if CONF.use_ssl:
-            dup_sock = sslutils.wrap(CONF, dup_sock)
-        self._server = self._service.pool.spawn(self._service._run,
-                                                self._application,
-                                                dup_sock)
-
-    def wait(self):
-        if isinstance(self._server, eventlet.greenthread.GreenThread):
-            self._server.wait()
-
-    def stop(self):
-        if isinstance(self._server, eventlet.greenthread.GreenThread):
-            self._server.kill()
-            self._server = None
-
-    @staticmethod
-    def reset():
-        config.reset_service()
-
-
-class Server(object):
-    """Server class to manage multiple WSGI sockets and applications."""
-
-    def __init__(self, name, num_threads=1000):
-        # Raise the default from 8192 to accommodate large tokens
-        eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
-        self.num_threads = num_threads
-        # Pool for a greenthread in which wsgi server will be running
-        self.pool = eventlet.GreenPool(1)
-        self.name = name
-        self._server = None
-        # A value of 0 is converted to None because None is what causes the
-        # wsgi server to wait forever.
-        self.client_socket_timeout = CONF.client_socket_timeout or None
-        if CONF.use_ssl:
-            sslutils.is_enabled(CONF)
-
-    def _get_socket(self, host, port, backlog):
-        bind_addr = (host, port)
-        # TODO(dims): eventlet's green dns/socket module does not actually
-        # support IPv6 in getaddrinfo(). We need to get around this in the
-        # future or monitor upstream for a fix
-        try:
-            info = socket.getaddrinfo(bind_addr[0],
-                                      bind_addr[1],
-                                      socket.AF_UNSPEC,
-                                      socket.SOCK_STREAM)[0]
-            family = info[0]
-            bind_addr = info[-1]
-        except Exception:
-            LOG.exception(_LE("Unable to listen on %(host)s:%(port)s"),
-                          {'host': host, 'port': port})
-            sys.exit(1)
-
-        sock = None
-        retry_until = time.time() + CONF.retry_until_window
-        while not sock and time.time() < retry_until:
-            try:
-                sock = eventlet.listen(bind_addr,
-                                       backlog=backlog,
-                                       family=family)
-            except socket.error as err:
-                with excutils.save_and_reraise_exception() as ctxt:
-                    if err.errno == errno.EADDRINUSE:
-                        ctxt.reraise = False
-                        eventlet.sleep(0.1)
-        if not sock:
-            raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
-                               "after trying for %(time)d seconds") %
-                               {'host': host,
-                                'port': port,
-                                'time': CONF.retry_until_window})
-        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-        # sockets can hang around forever without keepalive
-        sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-
-        # This option isn't available in the OS X version of eventlet
-        if hasattr(socket, 'TCP_KEEPIDLE'):
-            sock.setsockopt(socket.IPPROTO_TCP,
-                            socket.TCP_KEEPIDLE,
-                            CONF.tcp_keepidle)
-
-        return sock
-
-    def start(self, application, port, host='0.0.0.0', workers=0):
-        """Run a WSGI server with the given application."""
-        self._host = host
-        self._port = port
-        backlog = CONF.backlog
-
-        self._socket = self._get_socket(self._host,
-                                        self._port,
-                                        backlog=backlog)
-
-        self._launch(application, workers)
-
-    def _launch(self, application, workers=0):
-        service = WorkerService(self, application)
-        if workers < 1:
-            # The API service should run in the current process.
-            self._server = service
-            # Dump the initial option values
-            cfg.CONF.log_opt_values(LOG, logging.DEBUG)
-            service.start()
-            systemd.notify_once()
-        else:
-            # dispose the whole pool before os.fork, otherwise there will
-            # be shared DB connections in child processes which may cause
-            # DB errors.
-            api.dispose()
-            # The API service runs in a number of child processes.
-            # Minimize the cost of checking for child exit by extending the
-            # wait interval past the default of 0.01s.
-            self._server = common_service.ProcessLauncher(cfg.CONF,
-                                                          wait_interval=1.0)
-            self._server.launch_service(service, workers=workers)
-
-    @property
-    def host(self):
-        return self._socket.getsockname()[0] if self._socket else self._host
-
-    @property
-    def port(self):
-        return self._socket.getsockname()[1] if self._socket else self._port
-
-    def stop(self):
-        self._server.stop()
-
-    def wait(self):
-        """Wait until all servers have completed running."""
-        try:
-            self._server.wait()
-        except KeyboardInterrupt:
-            pass
-
-    def _run(self, application, socket):
-        """Start a WSGI server in a new green thread."""
-        eventlet.wsgi.server(socket, application,
-                             max_size=self.num_threads,
-                             log=LOG,
-                             keepalive=CONF.wsgi_keep_alive,
-                             socket_timeout=self.client_socket_timeout)
-
-
-class Request(wsgi.Request):
-
-    def best_match_content_type(self):
-        """Determine the most acceptable content-type.
-
-        Based on:
-            1) URI extension (.json)
-            2) Content-type header
-            3) Accept* headers
-        """
-        # First lookup http request path
-        parts = self.path.rsplit('.', 1)
-        if len(parts) > 1:
-            _format = parts[1]
-            if _format in ['json']:
-                return 'application/{0}'.format(_format)
-
-        #Then look up content header
-        type_from_header = self.get_content_type()
-        if type_from_header:
-            return type_from_header
-        ctypes = ['application/json']
-
-        #Finally search in Accept-* headers
-        bm = self.accept.best_match(ctypes)
-        return bm or 'application/json'
-
-    def get_content_type(self):
-        allowed_types = ("application/json")
-        if "Content-Type" not in self.headers:
-            LOG.debug("Missing Content-Type")
-            return None
-        _type = self.content_type
-        if _type in allowed_types:
-            return _type
-        return None
-
-    def best_match_language(self):
-        """Determines best available locale from the Accept-Language header.
-
-        :returns: the best language match or None if the 'Accept-Language'
-                  header was not available in the request.
-        """
-        if not self.accept_language:
-            return None
-        all_languages = oslo_i18n.get_available_languages('neutron')
-        return self.accept_language.best_match(all_languages)
-
-    @property
-    def context(self):
-        if 'neutron.context' not in self.environ:
-            self.environ['neutron.context'] = context.get_admin_context()
-        return self.environ['neutron.context']
-
-
-class ActionDispatcher(object):
-    """Maps method name to local methods through action name."""
-
-    def dispatch(self, *args, **kwargs):
-        """Find and call local method."""
-        action = kwargs.pop('action', 'default')
-        action_method = getattr(self, str(action), self.default)
-        return action_method(*args, **kwargs)
-
-    def default(self, data):
-        raise NotImplementedError()
-
-
-class DictSerializer(ActionDispatcher):
-    """Default request body serialization."""
-
-    def serialize(self, data, action='default'):
-        return self.dispatch(data, action=action)
-
-    def default(self, data):
-        return ""
-
-
-class JSONDictSerializer(DictSerializer):
-    """Default JSON request body serialization."""
-
-    def default(self, data):
-        def sanitizer(obj):
-            return six.text_type(obj)
-        return encode_body(jsonutils.dumps(data, default=sanitizer))
-
-
-class ResponseHeaderSerializer(ActionDispatcher):
-    """Default response headers serialization."""
-
-    def serialize(self, response, data, action):
-        self.dispatch(response, data, action=action)
-
-    def default(self, response, data):
-        response.status_int = 200
-
-
-class ResponseSerializer(object):
-    """Encode the necessary pieces into a response object."""
-
-    def __init__(self, body_serializers=None, headers_serializer=None):
-        self.body_serializers = {
-            'application/json': JSONDictSerializer(),
-        }
-        self.body_serializers.update(body_serializers or {})
-
-        self.headers_serializer = (headers_serializer or
-                                   ResponseHeaderSerializer())
-
-    def serialize(self, response_data, content_type, action='default'):
-        """Serialize a dict into a string and wrap in a wsgi.Request object.
-
-        :param response_data: dict produced by the Controller
-        :param content_type: expected mimetype of serialized response body
-
-        """
-        response = webob.Response()
-        self.serialize_headers(response, response_data, action)
-        self.serialize_body(response, response_data, content_type, action)
-        return response
-
-    def serialize_headers(self, response, data, action):
-        self.headers_serializer.serialize(response, data, action)
-
-    def serialize_body(self, response, data, content_type, action):
-        response.headers['Content-Type'] = content_type
-        if data is not None:
-            serializer = self.get_body_serializer(content_type)
-            response.body = serializer.serialize(data, action)
-
-    def get_body_serializer(self, content_type):
-        try:
-            return self.body_serializers[content_type]
-        except (KeyError, TypeError):
-            raise exception.InvalidContentType(content_type=content_type)
-
-
-class TextDeserializer(ActionDispatcher):
-    """Default request body deserialization."""
-
-    def deserialize(self, datastring, action='default'):
-        return self.dispatch(datastring, action=action)
-
-    def default(self, datastring):
-        return {}
-
-
-class JSONDeserializer(TextDeserializer):
-
-    def _from_json(self, datastring):
-        try:
-            return jsonutils.loads(datastring)
-        except ValueError:
-            msg = _("Cannot understand JSON")
-            raise exception.MalformedRequestBody(reason=msg)
-
-    def default(self, datastring):
-        return {'body': self._from_json(datastring)}
-
-
-class RequestHeadersDeserializer(ActionDispatcher):
-    """Default request headers deserializer."""
-
-    def deserialize(self, request, action):
-        return self.dispatch(request, action=action)
-
-    def default(self, request):
-        return {}
-
-
-class RequestDeserializer(object):
-    """Break up a Request object into more useful pieces."""
-
-    def __init__(self, body_deserializers=None, headers_deserializer=None):
-        self.body_deserializers = {
-            'application/json': JSONDeserializer(),
-        }
-        self.body_deserializers.update(body_deserializers or {})
-
-        self.headers_deserializer = (headers_deserializer or
-                                     RequestHeadersDeserializer())
-
-    def deserialize(self, request):
-        """Extract necessary pieces of the request.
-
-        :param request: Request object
-        :returns tuple of expected controller action name, dictionary of
-                 keyword arguments to pass to the controller, the expected
-                 content type of the response
-
-        """
-        action_args = self.get_action_args(request.environ)
-        action = action_args.pop('action', None)
-
-        action_args.update(self.deserialize_headers(request, action))
-        action_args.update(self.deserialize_body(request, action))
-
-        accept = self.get_expected_content_type(request)
-
-        return (action, action_args, accept)
-
-    def deserialize_headers(self, request, action):
-        return self.headers_deserializer.deserialize(request, action)
-
-    def deserialize_body(self, request, action):
-        try:
-            content_type = request.best_match_content_type()
-        except exception.InvalidContentType:
-            LOG.debug("Unrecognized Content-Type provided in request")
-            return {}
-
-        if content_type is None:
-            LOG.debug("No Content-Type provided in request")
-            return {}
-
-        if not len(request.body) > 0:
-            LOG.debug("Empty body provided in request")
-            return {}
-
-        try:
-            deserializer = self.get_body_deserializer(content_type)
-        except exception.InvalidContentType:
-            with excutils.save_and_reraise_exception():
-                LOG.debug("Unable to deserialize body as provided "
-                          "Content-Type")
-
-        return deserializer.deserialize(request.body, action)
-
-    def get_body_deserializer(self, content_type):
-        try:
-            return self.body_deserializers[content_type]
-        except (KeyError, TypeError):
-            raise exception.InvalidContentType(content_type=content_type)
-
-    def get_expected_content_type(self, request):
-        return request.best_match_content_type()
-
-    def get_action_args(self, request_environment):
-        """Parse dictionary created by routes library."""
-        try:
-            args = request_environment['wsgiorg.routing_args'][1].copy()
-        except Exception:
-            return {}
-
-        try:
-            del args['controller']
-        except KeyError:
-            pass
-
-        try:
-            del args['format']
-        except KeyError:
-            pass
-
-        return args
-
-
-class Application(object):
-    """Base WSGI application wrapper. Subclasses need to implement __call__."""
-
-    @classmethod
-    def factory(cls, global_config, **local_config):
-        """Used for paste app factories in paste.deploy config files.
-
-        Any local configuration (that is, values under the [app:APPNAME]
-        section of the paste config) will be passed into the `__init__` method
-        as kwargs.
-
-        A hypothetical configuration would look like:
-
-            [app:wadl]
-            latest_version = 1.3
-            paste.app_factory = nova.api.fancy_api:Wadl.factory
-
-        which would result in a call to the `Wadl` class as
-
-            import neutron.api.fancy_api
-            fancy_api.Wadl(latest_version='1.3')
-
-        You could of course re-implement the `factory` method in subclasses,
-        but using the kwarg passing it shouldn't be necessary.
-
-        """
-        return cls(**local_config)
-
-    def __call__(self, environ, start_response):
-        r"""Subclasses will probably want to implement __call__ like this:
-
-        @webob.dec.wsgify(RequestClass=Request)
-        def __call__(self, req):
-          # Any of the following objects work as responses:
-
-          # Option 1: simple string
-          res = 'message\n'
-
-          # Option 2: a nicely formatted HTTP exception page
-          res = exc.HTTPForbidden(explanation='Nice try')
-
-          # Option 3: a webob Response object (in case you need to play with
-          # headers, or you want to be treated like an iterable, or or or)
-          res = Response();
-          res.app_iter = open('somefile')
-
-          # Option 4: any wsgi app to be run next
-          res = self.application
-
-          # Option 5: you can get a Response object for a wsgi app, too, to
-          # play with headers etc
-          res = req.get_response(self.application)
-
-          # You can then just return your response...
-          return res
-          # ... or set req.response and return None.
-          req.response = res
-
-        See the end of http://pythonpaste.org/webob/modules/dec.html
-        for more info.
-
-        """
-        raise NotImplementedError(_('You must implement __call__'))
-
-
-class Resource(Application):
-    """WSGI app that handles (de)serialization and controller dispatch.
-
-    WSGI app that reads routing information supplied by RoutesMiddleware
-    and calls the requested action method upon its controller.  All
-    controller action methods must accept a 'req' argument, which is the
-    incoming wsgi.Request. If the operation is a PUT or POST, the controller
-    method must also accept a 'body' argument (the deserialized request body).
-    They may raise a webob.exc exception or return a dict, which will be
-    serialized by requested content type.
-
-    """
-
-    def __init__(self, controller, fault_body_function,
-                 deserializer=None, serializer=None):
-        """Object initialization.
-
-        :param controller: object that implement methods created by routes lib
-        :param deserializer: object that can serialize the output of a
-                             controller into a webob response
-        :param serializer: object that can deserialize a webob request
-                           into necessary pieces
-        :param fault_body_function: a function that will build the response
-                                    body for HTTP errors raised by operations
-                                    on this resource object
-
-        """
-        self.controller = controller
-        self.deserializer = deserializer or RequestDeserializer()
-        self.serializer = serializer or ResponseSerializer()
-        self._fault_body_function = fault_body_function
-
-    @webob.dec.wsgify(RequestClass=Request)
-    def __call__(self, request):
-        """WSGI method that controls (de)serialization and method dispatch."""
-
-        LOG.info(_LI("%(method)s %(url)s"),
-                 {"method": request.method, "url": request.url})
-
-        try:
-            action, args, accept = self.deserializer.deserialize(request)
-        except exception.InvalidContentType:
-            msg = _("Unsupported Content-Type")
-            LOG.exception(_LE("InvalidContentType: %s"), msg)
-            return Fault(webob.exc.HTTPBadRequest(explanation=msg))
-        except exception.MalformedRequestBody:
-            msg = _("Malformed request body")
-            LOG.exception(_LE("MalformedRequestBody: %s"), msg)
-            return Fault(webob.exc.HTTPBadRequest(explanation=msg))
-
-        try:
-            action_result = self.dispatch(request, action, args)
-        except webob.exc.HTTPException as ex:
-            LOG.info(_LI("HTTP exception thrown: %s"), ex)
-            action_result = Fault(ex, self._fault_body_function)
-        except Exception:
-            LOG.exception(_LE("Internal error"))
-            # Do not include the traceback to avoid returning it to clients.
-            action_result = Fault(webob.exc.HTTPServerError(),
-                                  self._fault_body_function)
-
-        if isinstance(action_result, dict) or action_result is None:
-            response = self.serializer.serialize(action_result,
-                                                 accept,
-                                                 action=action)
-        else:
-            response = action_result
-
-        try:
-            LOG.info(_LI("%(url)s returned with HTTP %(status)d"),
-                     dict(url=request.url, status=response.status_int))
-        except AttributeError as e:
-            LOG.info(_LI("%(url)s returned a fault: %(exception)s"),
-                     dict(url=request.url, exception=e))
-
-        return response
-
-    def dispatch(self, request, action, action_args):
-        """Find action-spefic method on controller and call it."""
-
-        controller_method = getattr(self.controller, action)
-        try:
-            #NOTE(salvatore-orlando): the controller method must have
-            # an argument whose name is 'request'
-            return controller_method(request=request, **action_args)
-        except TypeError as exc:
-            LOG.exception(exc)
-            return Fault(webob.exc.HTTPBadRequest())
-
-
-def _default_body_function(wrapped_exc):
-    code = wrapped_exc.status_int
-    fault_data = {
-        'Error': {
-            'code': code,
-            'message': wrapped_exc.explanation}}
-    # 'code' is an attribute on the fault tag itself
-    metadata = {'attributes': {'Error': 'code'}}
-    return fault_data, metadata
-
-
-class Fault(webob.exc.HTTPException):
-    """Generates an HTTP response from a webob HTTP exception."""
-
-    def __init__(self, exception, body_function=None):
-        """Creates a Fault for the given webob.exc.exception."""
-        self.wrapped_exc = exception
-        self.status_int = self.wrapped_exc.status_int
-        self._body_function = body_function or _default_body_function
-
-    @webob.dec.wsgify(RequestClass=Request)
-    def __call__(self, req):
-        """Generate a WSGI response based on the exception passed to ctor."""
-        # Replace the body with fault details.
-        fault_data, metadata = self._body_function(self.wrapped_exc)
-        content_type = req.best_match_content_type()
-        serializer = {
-            'application/json': JSONDictSerializer(),
-        }[content_type]
-
-        self.wrapped_exc.body = serializer.serialize(fault_data)
-        self.wrapped_exc.content_type = content_type
-        return self.wrapped_exc
-
-
-# NOTE(salvatore-orlando): this class will go once the
-# extension API framework is updated
-class Controller(object):
-    """WSGI app that dispatched to methods.
-
-    WSGI app that reads routing information supplied by RoutesMiddleware
-    and calls the requested action method upon itself.  All action methods
-    must, in addition to their normal parameters, accept a 'req' argument
-    which is the incoming wsgi.Request.  They raise a webob.exc exception,
-    or return a dict which will be serialized by requested content type.
-
-    """
-
-    @webob.dec.wsgify(RequestClass=Request)
-    def __call__(self, req):
-        """Call the method specified in req.environ by RoutesMiddleware."""
-        arg_dict = req.environ['wsgiorg.routing_args'][1]
-        action = arg_dict['action']
-        method = getattr(self, action)
-        del arg_dict['controller']
-        del arg_dict['action']
-        if 'format' in arg_dict:
-            del arg_dict['format']
-        arg_dict['request'] = req
-        result = method(**arg_dict)
-
-        if isinstance(result, dict) or result is None:
-            if result is None:
-                status = 204
-                content_type = ''
-                body = None
-            else:
-                status = 200
-                content_type = req.best_match_content_type()
-                body = self._serialize(result, content_type)
-
-            response = webob.Response(status=status,
-                                      content_type=content_type,
-                                      body=body)
-            LOG.debug("%(url)s returned with HTTP %(status)d",
-                      dict(url=req.url, status=response.status_int))
-            return response
-        else:
-            return result
-
-    def _serialize(self, data, content_type):
-        """Serialize the given dict to the provided content_type.
-
-        Uses self._serialization_metadata if it exists, which is a dict mapping
-        MIME types to information needed to serialize to that type.
-
-        """
-        _metadata = getattr(type(self), '_serialization_metadata', {})
-
-        serializer = Serializer(_metadata)
-        try:
-            return serializer.serialize(data, content_type)
-        except exception.InvalidContentType:
-            msg = _('The requested content type %s is invalid.') % content_type
-            raise webob.exc.HTTPNotAcceptable(msg)
-
-    def _deserialize(self, data, content_type):
-        """Deserialize the request body to the specified content type.
-
-        Uses self._serialization_metadata if it exists, which is a dict mapping
-        MIME types to information needed to serialize to that type.
-
-        """
-        _metadata = getattr(type(self), '_serialization_metadata', {})
-        serializer = Serializer(_metadata)
-        return serializer.deserialize(data, content_type)['body']
-
-
-# NOTE(salvatore-orlando): this class will go once the
-# extension API framework is updated
-class Serializer(object):
-    """Serializes and deserializes dictionaries to certain MIME types."""
-
-    def __init__(self, metadata=None):
-        """Create a serializer based on the given WSGI environment.
-
-        'metadata' is an optional dict mapping MIME types to information
-        needed to serialize a dictionary to that type.
-
-        """
-        self.metadata = metadata or {}
-
-    def _get_serialize_handler(self, content_type):
-        handlers = {
-            'application/json': JSONDictSerializer(),
-        }
-
-        try:
-            return handlers[content_type]
-        except Exception:
-            raise exception.InvalidContentType(content_type=content_type)
-
-    def serialize(self, data, content_type):
-        """Serialize a dictionary into the specified content type."""
-        return self._get_serialize_handler(content_type).serialize(data)
-
-    def deserialize(self, datastring, content_type):
-        """Deserialize a string to a dictionary.
-
-        The string must be in the format of a supported MIME type.
-
-        """
-        try:
-            return self.get_deserialize_handler(content_type).deserialize(
-                datastring)
-        except Exception:
-            raise webob.exc.HTTPBadRequest(_("Could not deserialize data"))
-
-    def get_deserialize_handler(self, content_type):
-        handlers = {
-            'application/json': JSONDeserializer(),
-        }
-
-        try:
-            return handlers[content_type]
-        except Exception:
-            raise exception.InvalidContentType(content_type=content_type)
diff --git a/openstack-common.conf b/openstack-common.conf
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst
deleted file mode 100644 (file)
index 9213d95..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-Rally job related files
-=======================
-
-This directory contains rally tasks and plugins that are run by OpenStack CI.
-
-Structure
----------
-
-* plugins - directory where you can add rally plugins. Almost everything in
-  Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic
-  cleanup resources, ....
-
-* extra - all files from this directory will be copy pasted to gates, so you
-  are able to use absolute paths in rally tasks.
-  Files will be located in ~/.rally/extra/*
-
-* neutron-neutron.yaml is a task that is run in gates against OpenStack with
-  Neutron Service deployed by DevStack
-
-Useful links
-------------
-
-* More about Rally: https://rally.readthedocs.org/en/latest/
-
-* Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html
-
-* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html
-
-* About plugins:  https://rally.readthedocs.org/en/latest/plugins.html
-
-* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins
diff --git a/rally-jobs/extra/README.rst b/rally-jobs/extra/README.rst
deleted file mode 100644 (file)
index aab343c..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-Extra files
-===========
-
-All files from this directory will be copy pasted to gates, so you are able to
-use absolute path in rally tasks. Files will be in ~/.rally/extra/*
-
diff --git a/rally-jobs/neutron-neutron.yaml b/rally-jobs/neutron-neutron.yaml
deleted file mode 100644 (file)
index 86852e1..0000000
+++ /dev/null
@@ -1,296 +0,0 @@
----
-  NeutronNetworks.create_and_list_networks:
-    -
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 1
-          users_per_tenant: 1
-        quotas:
-          neutron:
-             network: -1
-      sla:
-        failure_rate:
-          max: 0
-
-  NeutronNetworks.create_and_list_subnets:
-    -
-      args:
-        subnets_per_network: 2
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 1
-          users_per_tenant: 1
-        quotas:
-          neutron:
-             subnet: -1
-             network: -1
-      sla:
-        failure_rate:
-          max: 0
-
-  NeutronNetworks.create_and_list_routers:
-    -
-      args:
-        network_create_args:
-        subnet_create_args:
-        subnet_cidr_start: "1.1.0.0/30"
-        subnets_per_network: 2
-        router_create_args:
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 1
-          users_per_tenant: 1
-        quotas:
-          neutron:
-             network: -1
-             subnet: -1
-             router: -1
-      sla:
-        failure_rate:
-          max: 0
-
-  NeutronNetworks.create_and_list_ports:
-    -
-      args:
-        network_create_args:
-        port_create_args:
-        ports_per_network: 2
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 1
-          users_per_tenant: 1
-        quotas:
-          neutron:
-             network: -1
-             subnet: -1
-             router: -1
-             port: -1
-      sla:
-        failure_rate:
-          max: 0
-
-  NeutronNetworks.create_and_update_networks:
-    -
-      args:
-        network_create_args: {}
-        network_update_args:
-            admin_state_up: False
-            name: "_updated"
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 1
-          users_per_tenant: 1
-        quotas:
-          neutron:
-            network: -1
-      sla:
-        failure_rate:
-          max: 0
-
-  NeutronNetworks.create_and_update_subnets:
-    -
-      args:
-        network_create_args: {}
-        subnet_create_args: {}
-        subnet_cidr_start: "1.4.0.0/16"
-        subnets_per_network: 2
-        subnet_update_args:
-            enable_dhcp: False
-            name: "_subnet_updated"
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 5
-          users_per_tenant: 5
-        quotas:
-          neutron:
-            network: -1
-            subnet: -1
-      sla:
-        failure_rate:
-          max: 0
-
-  NeutronNetworks.create_and_update_routers:
-    -
-      args:
-        network_create_args: {}
-        subnet_create_args: {}
-        subnet_cidr_start: "1.1.0.0/30"
-        subnets_per_network: 2
-        router_create_args: {}
-        router_update_args:
-            admin_state_up: False
-            name: "_router_updated"
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 1
-          users_per_tenant: 1
-        quotas:
-          neutron:
-            network: -1
-            subnet: -1
-            router: -1
-      sla:
-        failure_rate:
-          max: 0
-
-  NeutronNetworks.create_and_update_ports:
-    -
-      args:
-        network_create_args: {}
-        port_create_args: {}
-        ports_per_network: 5
-        port_update_args:
-            admin_state_up: False
-            device_id: "dummy_id"
-            device_owner: "dummy_owner"
-            name: "_port_updated"
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 1
-          users_per_tenant: 1
-        quotas:
-          neutron:
-            network: -1
-            port: -1
-      sla:
-        failure_rate:
-          max: 0
-
-  NeutronNetworks.create_and_delete_networks:
-    -
-      args:
-        network_create_args: {}
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 1
-          users_per_tenant: 1
-        quotas:
-          neutron:
-            network: -1
-            subnet: -1
-      sla:
-        failure_rate:
-          max: 0
-
-  NeutronNetworks.create_and_delete_subnets:
-    -
-      args:
-        network_create_args: {}
-        subnet_create_args: {}
-        subnet_cidr_start: "1.1.0.0/30"
-        subnets_per_network: 2
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 1
-          users_per_tenant: 1
-        quotas:
-          neutron:
-            network: -1
-            subnet: -1
-      sla:
-        failure_rate:
-          max: 0
-
-  NeutronNetworks.create_and_delete_routers:
-    -
-      args:
-        network_create_args: {}
-        subnet_create_args: {}
-        subnet_cidr_start: "1.1.0.0/30"
-        subnets_per_network: 2
-        router_create_args: {}
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 1
-          users_per_tenant: 1
-        quotas:
-          neutron:
-            network: -1
-            subnet: -1
-            router: -1
-      sla:
-          failure_rate:
-            max: 0
-
-  NeutronNetworks.create_and_delete_ports:
-    -
-      args:
-        network_create_args: {}
-        port_create_args: {}
-        ports_per_network: 5
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 1
-          users_per_tenant: 1
-        quotas:
-          neutron:
-            network: -1
-            port: -1
-      sla:
-        failure_rate:
-          max: 0
-
-  Quotas.neutron_update:
-    -
-      args:
-        max_quota: 1024
-      runner:
-        type: "constant"
-        times: 40
-        concurrency: 20
-      context:
-        users:
-          tenants: 20
-          users_per_tenant: 1
-      sla:
-        failure_rate:
-          max: 0
-
diff --git a/rally-jobs/plugins/README.rst b/rally-jobs/plugins/README.rst
deleted file mode 100644 (file)
index 33bec0d..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-Rally plugins
-=============
-
-All *.py modules from this directory will be auto-loaded by Rally and all
-plugins will be discoverable. There is no need of any extra configuration
-and there is no difference between writing them here and in rally code base.
-
-Note that it is better to push all interesting and useful benchmarks to Rally
-code base, this simplifies administration for Operators.
diff --git a/rally-jobs/plugins/__init__.py b/rally-jobs/plugins/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/releasenotes/notes/add-availability-zone-4440cf00be7c54ba.yaml b/releasenotes/notes/add-availability-zone-4440cf00be7c54ba.yaml
deleted file mode 100644 (file)
index edb53cd..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
----
-prelude: >
-    DHCP and L3 Agent scheduling is availability zone aware.
-features:
-  - A DHCP agent is assigned to an availability zone; the network will be
-    hosted by the DHCP agent with availability zone specified by the user.
-  - An L3 agent is assigned to an availability zone; the router will be hosted
-    by the L3 agent with availability zone specified by the user. This supports
-    the use of availability zones with HA routers. DVR isn't supported now
-    because L3HA and DVR integration isn't finished.
-other:
-  - Please read the `OpenStack Networking Guide
-    <http://docs.openstack.org/networking-guide/adv_config_availability_zone.html>`_.
diff --git a/releasenotes/notes/config-file-generation-2eafc6602d57178e.yaml b/releasenotes/notes/config-file-generation-2eafc6602d57178e.yaml
deleted file mode 100644 (file)
index 62f1467..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
----
-prelude: >
-   Core configuration files are automatically generated.
-features:
-  - Neutron no longer includes static example configuration files. Instead,
-    use tools/generate_config_file_samples.sh to generate them. The files are
-    generated with a .sample extension.
diff --git a/releasenotes/notes/default-local-dns-a1c3fa1451f228fa.yaml b/releasenotes/notes/default-local-dns-a1c3fa1451f228fa.yaml
deleted file mode 100644 (file)
index 0c77e96..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
----
-fixes:
-  - Prior to Mitaka, neither specifying DNS resolvers via the
-    'dnsmasq_dns_servers' option in the DHCP agent configuration file nor via
-    neutron subnet options causes the dnsmasq service to offer the IP address
-    on which it resides to instances for name resolution. However, the static
-    dnsmasq '--no-resolv' process argument prevents name resolution via dnsmasq
-    leaving instances without name resolution. In Mitaka+, the
-    'dnsmasq_local_resolv' option in the DHCP agent configuration file enables
-    (by default) the dnsmasq service to provide name resolution for instances
-    via DNS resolvers on the host running the DHCP agent by effectively
-    removing the '--no-resolv' option from the dnsmasq process arguments.
-    Adding custom DNS resolvers to the 'dnsmasq_dns_servers' option in the DHCP
-    agent configuration file disables this feature.
diff --git a/releasenotes/notes/deprecate-router_id-34aca9ea5ee9e789.yaml b/releasenotes/notes/deprecate-router_id-34aca9ea5ee9e789.yaml
deleted file mode 100644 (file)
index fb83376..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
----
-upgrade:
-  - The router_id option is deprecated and will be removed in the 'N' cycle.
diff --git a/releasenotes/notes/deprecated-driver-e368e0befc9bee4c.yaml b/releasenotes/notes/deprecated-driver-e368e0befc9bee4c.yaml
deleted file mode 100644 (file)
index 8330c21..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
----
-prelude: >
-    OFAgent is decomposed and deprecated in the Mitaka cycle.
-other:
-  - The Openflow Agent(OFAgent) mechanism driver is decomposed completely
-    from neutron tree in the Mitaka. The OFAgent driver and its agent also
-    are deprecated in favor of OpenvSwitch mechanism driver with "native"
-    of_interface in the Mitaka and will be removed in the next release.
diff --git a/releasenotes/notes/direct-physical-vnic-878d15bdb758b70e.yaml b/releasenotes/notes/direct-physical-vnic-878d15bdb758b70e.yaml
deleted file mode 100644 (file)
index 4a826fa..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
----
-prelude: >
-    Add new VNIC type for SR-IOV physical functions.
-features:
-  - Neutron now supports creation of ports for exposing physical functions
-    as network devices to guests.
diff --git a/releasenotes/notes/hyperv-neutron-agent-decomposition-ae6a052aeb48c6ac.yaml b/releasenotes/notes/hyperv-neutron-agent-decomposition-ae6a052aeb48c6ac.yaml
deleted file mode 100644 (file)
index af9884c..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
----
-upgrade:
-  - The Hyper-V Neutron Agent has been fully decomposed from Neutron.
-    The `neutron.plugins.hyperv.agent.security_groups_driver.HyperVSecurityGroupsDriver`
-    firewall driver has been deprecated and will be removed in the 'O' cycle.
-    Update the `neutron_hyperv_agent.conf` files on the Hyper-V nodes to
-    use `hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver`,
-    which is the networking_hyperv security groups driver.
diff --git a/releasenotes/notes/linuxbridge-agent-extensions-66bdf9feee25ef99.yaml b/releasenotes/notes/linuxbridge-agent-extensions-66bdf9feee25ef99.yaml
deleted file mode 100644 (file)
index a839c91..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
----
-prelude: >
-    The Linuxbridge agent now supports l2 agent extensions.
-features:
-  - The Linuxbridge agent can now be extended by 3rd parties using a pluggable
-    mechanism.
-fixes:
-  - partially closes bug 1468803
diff --git a/releasenotes/notes/macvtap_assigned_vf_check-f4d07660ffd82a24.yaml b/releasenotes/notes/macvtap_assigned_vf_check-f4d07660ffd82a24.yaml
deleted file mode 100644 (file)
index c74a571..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
----
-fixes:
-  - Fix SR-IOV agent macvtap assigned VF check when linux kernel < 3.13
diff --git a/releasenotes/notes/oslo-reports-166a169037bf64f2.yaml b/releasenotes/notes/oslo-reports-166a169037bf64f2.yaml
deleted file mode 100644 (file)
index ef91a05..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
----
-prelude: >
-    Neutron is integrated with Guru Meditation Reports library.
-features:
-  - Neutron services should respond to SIGUSR2 signal by dumping valuable debug
-    information to standard error output.
diff --git a/releasenotes/notes/rm-notify-entry-points-aa442134a780469a.yaml b/releasenotes/notes/rm-notify-entry-points-aa442134a780469a.yaml
deleted file mode 100644 (file)
index 763450f..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
----
-prelude: >
-    oslo.messaging.notify.drivers entry points are deprecated
-other:
-  - The oslo.messaging.notify.drivers entry points that were left in tree for
-    backward compatibility with Icehouse are deprecated and will be removed
-    after liberty-eol. Configure notifications using the oslo_messaging
-    configuration options in neutron.conf.
diff --git a/releasenotes/notes/sriov_show_l2_agent_extensions-ca852e155a529e99.yaml b/releasenotes/notes/sriov_show_l2_agent_extensions-ca852e155a529e99.yaml
deleted file mode 100644 (file)
index 8387f88..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
----
-fixes:
-  - Loaded agent extensions of SR-IOV agent are now shown in agent state API.
\ No newline at end of file
diff --git a/releasenotes/notes/use-keystoneauth-24f309566001a16b.yaml b/releasenotes/notes/use-keystoneauth-24f309566001a16b.yaml
deleted file mode 100644 (file)
index 2b0d7d7..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
----
-upgrade:
-  - Neutron depends on keystoneauth instead of keystoneclient.
-features:
-  - Neutron can interact with keystone v3.
diff --git a/releasenotes/source/README.rst b/releasenotes/source/README.rst
deleted file mode 100644 (file)
index af22748..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-===========================
-Neutron Release Notes Howto
-===========================
-
-Release notes are a new feature for documenting new features in
-OpenStack projects. Background on the process, tooling, and
-methodology is documented in a `mailing list post by Doug Hellman <http://lists.openstack.org/pipermail/openstack-dev/2015-November/078301.html>`_.
-
-For information on how to create release notes, please consult the
-`Release Notes documentation <http://docs.openstack.org/developer/reno/>`_.
diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
deleted file mode 100644 (file)
index 6a67c8a..0000000
+++ /dev/null
@@ -1,274 +0,0 @@
-# -*- coding: utf-8 -*-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Neutron Release Notes documentation build configuration file, created by
-# sphinx-quickstart on Tue Nov  3 17:40:50 2015.
-#
-# This file is execfile()d with the current directory set to its
-# containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-# sys.path.insert(0, os.path.abspath('.'))
-
-# -- General configuration ------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-# needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = [
-    'oslosphinx',
-    'reno.sphinxext',
-]
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-# source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'Neutron Release Notes'
-copyright = u'2015, Neutron Developers'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-from neutron.version import version_info as neutron_version
-# The full version, including alpha/beta/rc tags.
-release = neutron_version.version_string_with_vcs()
-# The short X.Y version.
-version = neutron_version.canonical_version_string()
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-# language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-# today = ''
-# Else, today_fmt is used as the format for a strftime call.
-# today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = []
-
-# The reST default role (used for this markup: `text`) to use for all
-# documents.
-# default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-# add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-# add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-# show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-# modindex_common_prefix = []
-
-# If true, keep warnings as "system message" paragraphs in the built documents.
-# keep_warnings = False
-
-
-# -- Options for HTML output ----------------------------------------------
-
-# The theme to use for HTML and HTML Help pages.  See the documentation for
-# a list of builtin themes.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-# html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-# html_theme_path = []
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-# html_title = None
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-# html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-# html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-# html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# Add any extra paths that contain custom files (such as robots.txt or
-# .htaccess) here, relative to this directory. These files are copied
-# directly to the root of the documentation.
-# html_extra_path = []
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-# html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-# html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-# html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-# html_additional_pages = {}
-
-# If false, no module index is generated.
-# html_domain_indices = True
-
-# If false, no index is generated.
-# html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-# html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-# html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-# html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-# html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-# html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-# html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'NeutronReleaseNotesdoc'
-
-
-# -- Options for LaTeX output ---------------------------------------------
-
-latex_elements = {
-    # The paper size ('letterpaper' or 'a4paper').
-    # 'papersize': 'letterpaper',
-
-    # The font size ('10pt', '11pt' or '12pt').
-    # 'pointsize': '10pt',
-
-    # Additional stuff for the LaTeX preamble.
-    # 'preamble': '',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title,
-#  author, documentclass [howto, manual, or own class]).
-latex_documents = [
-    ('index', 'NeutronReleaseNotes.tex',
-     u'Neutron Release Notes Documentation',
-     u'Neutron Developers', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-# latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-# latex_use_parts = False
-
-# If true, show page references after internal links.
-# latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-# latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-# latex_appendices = []
-
-# If false, no module index is generated.
-# latex_domain_indices = True
-
-
-# -- Options for manual page output ---------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
-    ('index', 'neutronreleasenotes', u'Neutron Release Notes Documentation',
-     [u'Neutron Developers'], 1)
-]
-
-# If true, show URL addresses after external links.
-# man_show_urls = False
-
-
-# -- Options for Texinfo output -------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-#  dir menu entry, description, category)
-texinfo_documents = [
-    ('index', 'NeutronReleaseNotes', u'Neutron Release Notes Documentation',
-     u'Neutron Developers', 'NeutronReleaseNotes',
-     'One line description of project.',
-     'Miscellaneous'),
-]
-
-# Documents to append as an appendix to all manuals.
-# texinfo_appendices = []
-
-# If false, no module index is generated.
-# texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-# texinfo_show_urls = 'footnote'
-
-# If true, do not generate a @detailmenu in the "Top" node's menu.
-# texinfo_no_detailmenu = False
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
deleted file mode 100644 (file)
index c14df18..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-=======================
- Neutron Release Notes
-=======================
-
-.. toctree::
-   :maxdepth: 1
-
-   README.rst
-   liberty
-   unreleased
diff --git a/releasenotes/source/liberty.rst b/releasenotes/source/liberty.rst
deleted file mode 100644 (file)
index 36217be..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-==============================
- Liberty Series Release Notes
-==============================
-
-.. release-notes::
-   :branch: origin/stable/liberty
diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst
deleted file mode 100644 (file)
index bd360ba..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-=============================
- Current Series Release Notes
-=============================
-
-.. release-notes::
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644 (file)
index 2c1e1e6..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-pbr>=1.6 # Apache-2.0
-
-Paste # MIT
-PasteDeploy>=1.5.0 # MIT
-Routes!=2.0,!=2.1,>=1.12.3;python_version=='2.7' # MIT
-Routes!=2.0,>=1.12.3;python_version!='2.7' # MIT
-debtcollector>=0.3.0 # Apache-2.0
-eventlet>=0.17.4 # MIT
-pecan>=1.0.0 # BSD
-greenlet>=0.3.2 # MIT
-httplib2>=0.7.5 # MIT
-requests!=2.9.0,>=2.8.1 # Apache-2.0
-Jinja2>=2.8 # BSD License (3 clause)
-keystonemiddleware>=4.0.0 # Apache-2.0
-netaddr!=0.7.16,>=0.7.12 # BSD
-python-neutronclient>=2.6.0 # Apache-2.0
-retrying!=1.3.0,>=1.2.3 # Apache-2.0
-ryu!=3.29,>=3.23.2 # Apache-2.0
-SQLAlchemy<1.1.0,>=1.0.10 # MIT
-WebOb>=1.2.3 # MIT
-keystoneauth1>=2.1.0 # Apache-2.0
-alembic>=0.8.0 # MIT
-six>=1.9.0 # MIT
-stevedore>=1.5.0 # Apache-2.0
-oslo.concurrency>=2.3.0 # Apache-2.0
-oslo.config>=3.2.0 # Apache-2.0
-oslo.context>=0.2.0 # Apache-2.0
-oslo.db>=4.1.0 # Apache-2.0
-oslo.i18n>=1.5.0 # Apache-2.0
-oslo.log>=1.14.0 # Apache-2.0
-oslo.messaging!=2.8.0,!=3.1.0,>2.6.1 # Apache-2.0
-oslo.middleware>=3.0.0 # Apache-2.0
-oslo.policy>=0.5.0 # Apache-2.0
-oslo.reports>=0.6.0 # Apache-2.0
-oslo.rootwrap>=2.0.0 # Apache-2.0
-oslo.serialization>=1.10.0 # Apache-2.0
-oslo.service>=1.0.0 # Apache-2.0
-oslo.utils>=3.2.0 # Apache-2.0
-oslo.versionedobjects>=0.13.0 # Apache-2.0
-ovs>=2.4.0;python_version=='2.7' # Apache-2.0
-
-python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0
diff --git a/run_tests.sh b/run_tests.sh
deleted file mode 100755 (executable)
index a32476c..0000000
+++ /dev/null
@@ -1,260 +0,0 @@
-#!/usr/bin/env bash
-
-set -eu
-
-function usage {
-  echo "Usage: $0 [OPTION]..."
-  echo "Run Neutron's test suite(s)"
-  echo ""
-  echo "  -V, --virtual-env           Always use virtualenv.  Install automatically if not present"
-  echo "  -N, --no-virtual-env        Don't use virtualenv.  Run tests in local environment"
-  echo "  -s, --no-site-packages      Isolate the virtualenv from the global Python environment"
-  echo "  -r, --recreate-db           Recreate the test database (deprecated, as this is now the default)."
-  echo "  -n, --no-recreate-db        Don't recreate the test database."
-  echo "  -f, --force                 Force a clean re-build of the virtual environment. Useful when dependencies have been added."
-  echo "  -u, --update                Update the virtual environment with any newer package versions"
-  echo "  -p, --pep8                  Just run PEP8 and HACKING compliance check"
-  echo "  -8, --pep8-only-changed [<basecommit>]"
-  echo "                              Just run PEP8 and HACKING compliance check on files changed since HEAD~1 (or <basecommit>)"
-  echo "  -P, --no-pep8               Don't run static code checks"
-  echo "  -c, --coverage              Generate coverage report"
-  echo "  -d, --debug                 Run tests with testtools instead of testr. This allows you to use the debugger."
-  echo "  -h, --help                  Print this usage message"
-  echo "  --virtual-env-path <path>   Location of the virtualenv directory"
-  echo "                               Default: \$(pwd)"
-  echo "  --virtual-env-name <name>   Name of the virtualenv directory"
-  echo "                               Default: .venv"
-  echo "  --tools-path <dir>          Location of the tools directory"
-  echo "                               Default: \$(pwd)"
-  echo ""
-  echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
-  echo "      If no virtualenv is found, the script will ask if you would like to create one.  If you "
-  echo "      prefer to run tests NOT in a virtual environment, simply pass the -N option."
-  exit
-}
-
-function process_options {
-  i=1
-  while [ $i -le $# ]; do
-    case "${!i}" in
-      -h|--help) usage;;
-      -V|--virtual-env) always_venv=1; never_venv=0;;
-      -N|--no-virtual-env) always_venv=0; never_venv=1;;
-      -s|--no-site-packages) no_site_packages=1;;
-      -r|--recreate-db) recreate_db=1;;
-      -n|--no-recreate-db) recreate_db=0;;
-      -f|--force) force=1;;
-      -u|--update) update=1;;
-      -p|--pep8) just_pep8=1;;
-      -8|--pep8-only-changed) just_pep8_changed=1;;
-      -P|--no-pep8) no_pep8=1;;
-      -c|--coverage) coverage=1;;
-      -d|--debug) debug=1;;
-      --virtual-env-path)
-        (( i++ ))
-        venv_path=${!i}
-        ;;
-      --virtual-env-name)
-        (( i++ ))
-        venv_dir=${!i}
-        ;;
-      --tools-path)
-        (( i++ ))
-        tools_path=${!i}
-        ;;
-      -*) testopts="$testopts ${!i}";;
-      *) testargs="$testargs ${!i}"
-    esac
-    (( i++ ))
-  done
-}
-
-tool_path=${tools_path:-$(pwd)}
-venv_path=${venv_path:-$(pwd)}
-venv_dir=${venv_name:-.venv}
-with_venv=tools/with_venv.sh
-always_venv=0
-never_venv=0
-force=0
-no_site_packages=0
-installvenvopts=
-testargs=
-testopts=
-wrapper=""
-just_pep8=0
-just_pep8_changed=0
-no_pep8=0
-coverage=0
-debug=0
-recreate_db=1
-update=0
-
-LANG=en_US.UTF-8
-LANGUAGE=en_US:en
-LC_ALL=C
-
-process_options $@
-# Make our paths available to other scripts we call
-export venv_path
-export venv_dir
-export venv_name
-export tools_dir
-export venv=${venv_path}/${venv_dir}
-
-if [ $no_site_packages -eq 1 ]; then
-  installvenvopts="--no-site-packages"
-fi
-
-
-function run_tests {
-  # Cleanup *pyc
-  ${wrapper} find . -type f -name "*.pyc" -delete
-
-  if [ $debug -eq 1 ]; then
-    if [ "$testopts" = "" ] && [ "$testargs" = "" ]; then
-      # Default to running all tests if specific test is not
-      # provided.
-      testargs="discover ./neutron/tests"
-    fi
-    ${wrapper} python -m testtools.run $testopts $testargs
-
-    # Short circuit because all of the testr and coverage stuff
-    # below does not make sense when running testtools.run for
-    # debugging purposes.
-    return $?
-  fi
-
-  if [ $coverage -eq 1 ]; then
-    TESTRTESTS="$TESTRTESTS --coverage"
-  else
-    TESTRTESTS="$TESTRTESTS --slowest"
-  fi
-
-  # Just run the test suites in current environment
-  set +e
-  testargs=`echo "$testargs" | sed -e's/^\s*\(.*\)\s*$/\1/'`
-  TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testopts $testargs'"
-  OS_TEST_PATH=`echo $testargs|grep -o 'neutron\.tests[^[:space:]:]\+'|tr . /`
-  if [ -n "$OS_TEST_PATH" ]; then
-      os_test_dir=$(dirname "$OS_TEST_PATH")
-  else
-      os_test_dir=''
-  fi
-  if [ -d "$OS_TEST_PATH" ]; then
-      wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper"
-  elif [ -d "$os_test_dir" ]; then
-      wrapper="OS_TEST_PATH=$os_test_dir $wrapper"
-  fi
-  echo "Running \`${wrapper} $TESTRTESTS\`"
-  bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit"
-  RESULT=$?
-  set -e
-
-  copy_subunit_log
-
-  if [ $coverage -eq 1 ]; then
-    echo "Generating coverage report in covhtml/"
-    # Don't compute coverage for common code, which is tested elsewhere
-    ${wrapper} coverage combine
-    ${wrapper} coverage html --include='neutron/*' --omit='neutron/openstack/common/*' -d covhtml -i
-  fi
-
-  return $RESULT
-}
-
-function copy_subunit_log {
-  LOGNAME=`cat .testrepository/next-stream`
-  LOGNAME=$(($LOGNAME - 1))
-  LOGNAME=".testrepository/${LOGNAME}"
-  cp $LOGNAME subunit.log
-}
-
-function warn_on_flake8_without_venv {
-  if [ $never_venv -eq 1 ]; then
-    echo "**WARNING**:"
-    echo "Running flake8 without virtual env may miss OpenStack HACKING detection"
-  fi
-}
-
-function run_pep8 {
-  echo "Running flake8 ..."
-  warn_on_flake8_without_venv
-  ${wrapper} flake8
-}
-
-function run_pep8_changed {
-    # NOTE(gilliard) We want use flake8 to check the entirety of every file that has
-    # a change in it. Unfortunately the --filenames argument to flake8 only accepts
-    # file *names* and there are no files named (eg) "nova/compute/manager.py".  The
-    # --diff argument behaves surprisingly as well, because although you feed it a
-    # diff, it actually checks the file on disk anyway.
-    local target=${testargs:-HEAD~1}
-    local files=$(git diff --name-only $target | tr '\n' ' ')
-    echo "Running flake8 on ${files}"
-    warn_on_flake8_without_venv
-    diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff
-}
-
-
-TESTRTESTS="python setup.py testr"
-
-if [ $never_venv -eq 0 ]
-then
-  # Remove the virtual environment if --force used
-  if [ $force -eq 1 ]; then
-    echo "Cleaning virtualenv..."
-    rm -rf ${venv}
-  fi
-  if [ $update -eq 1 ]; then
-      echo "Updating virtualenv..."
-      python tools/install_venv.py $installvenvopts
-  fi
-  if [ -e ${venv} ]; then
-    wrapper="${with_venv}"
-  else
-    if [ $always_venv -eq 1 ]; then
-      # Automatically install the virtualenv
-      python tools/install_venv.py $installvenvopts
-      wrapper="${with_venv}"
-    else
-      echo -e "No virtual environment found...create one? (Y/n) \c"
-      read use_ve
-      if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
-        # Install the virtualenv and run the test suite in it
-        python tools/install_venv.py $installvenvopts
-        wrapper=${with_venv}
-      fi
-    fi
-  fi
-fi
-
-# Delete old coverage data from previous runs
-if [ $coverage -eq 1 ]; then
-    ${wrapper} coverage erase
-fi
-
-if [ $just_pep8 -eq 1 ]; then
-    run_pep8
-    exit
-fi
-
-if [ $just_pep8_changed -eq 1 ]; then
-    run_pep8_changed
-    exit
-fi
-
-if [ $recreate_db -eq 1 ]; then
-    rm -f tests.sqlite
-fi
-
-run_tests
-
-# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
-# not when we're running tests individually. To handle this, we need to
-# distinguish between options (testopts), which begin with a '-', and
-# arguments (testargs).
-if [ -z "$testargs" ]; then
-  if [ $no_pep8 -eq 0 ]; then
-    run_pep8
-  fi
-fi
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644 (file)
index 62b79ce..0000000
--- a/setup.cfg
+++ /dev/null
@@ -1,171 +0,0 @@
-[metadata]
-name = neutron
-summary = OpenStack Networking
-description-file =
-    README.rst
-author = OpenStack
-author-email = openstack-dev@lists.openstack.org
-home-page = http://www.openstack.org/
-classifier =
-    Environment :: OpenStack
-    Intended Audience :: Information Technology
-    Intended Audience :: System Administrators
-    License :: OSI Approved :: Apache Software License
-    Operating System :: POSIX :: Linux
-    Programming Language :: Python
-    Programming Language :: Python :: 2
-    Programming Language :: Python :: 2.7
-    Programming Language :: Python :: 3
-    Programming Language :: Python :: 3.4
-
-[files]
-packages =
-    neutron
-data_files =
-    etc/neutron =
-        etc/api-paste.ini
-        etc/policy.json
-        etc/rootwrap.conf
-    etc/neutron/rootwrap.d =
-        etc/neutron/rootwrap.d/debug.filters
-        etc/neutron/rootwrap.d/dhcp.filters
-        etc/neutron/rootwrap.d/iptables-firewall.filters
-        etc/neutron/rootwrap.d/ebtables.filters
-        etc/neutron/rootwrap.d/ipset-firewall.filters
-        etc/neutron/rootwrap.d/l3.filters
-        etc/neutron/rootwrap.d/linuxbridge-plugin.filters
-        etc/neutron/rootwrap.d/openvswitch-plugin.filters
-    etc/neutron/plugins/cisco =
-        etc/neutron/plugins/cisco/cisco_vpn_agent.ini
-scripts =
-    bin/neutron-rootwrap-xen-dom0
-
-[entry_points]
-console_scripts =
-    neutron-db-manage = neutron.db.migration.cli:main
-    neutron-debug = neutron.debug.shell:main
-    neutron-dhcp-agent = neutron.cmd.eventlet.agents.dhcp:main
-    neutron-keepalived-state-change = neutron.cmd.keepalived_state_change:main
-    neutron-ipset-cleanup = neutron.cmd.ipset_cleanup:main
-    neutron-l3-agent = neutron.cmd.eventlet.agents.l3:main
-    neutron-linuxbridge-agent = neutron.cmd.eventlet.plugins.linuxbridge_neutron_agent:main
-    neutron-linuxbridge-cleanup = neutron.cmd.linuxbridge_cleanup:main
-    neutron-metadata-agent = neutron.cmd.eventlet.agents.metadata:main
-    neutron-netns-cleanup = neutron.cmd.netns_cleanup:main
-    neutron-ns-metadata-proxy = neutron.cmd.eventlet.agents.metadata_proxy:main
-    neutron-openvswitch-agent = neutron.cmd.eventlet.plugins.ovs_neutron_agent:main
-    neutron-ovs-cleanup = neutron.cmd.ovs_cleanup:main
-    neutron-pd-notify = neutron.cmd.pd_notify:main
-    neutron-server = neutron.cmd.eventlet.server:main
-    neutron-rpc-server = neutron.cmd.eventlet.server:main_rpc_eventlet
-    neutron-rootwrap = oslo_rootwrap.cmd:main
-    neutron-rootwrap-daemon = oslo_rootwrap.cmd:daemon
-    neutron-usage-audit = neutron.cmd.eventlet.usage_audit:main
-    neutron-metering-agent = neutron.cmd.eventlet.services.metering_agent:main
-    neutron-sriov-nic-agent = neutron.cmd.eventlet.plugins.sriov_nic_neutron_agent:main
-    neutron-sanity-check = neutron.cmd.sanity_check:main
-neutron.core_plugins =
-    ml2 = neutron.plugins.ml2.plugin:Ml2Plugin
-neutron.service_plugins =
-    dummy = neutron.tests.unit.dummy_plugin:DummyServicePlugin
-    router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin
-    firewall = neutron_fwaas.services.firewall.fwaas_plugin:FirewallPlugin
-    lbaas = neutron_lbaas.services.loadbalancer.plugin:LoadBalancerPlugin
-    vpnaas = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin
-    metering = neutron.services.metering.metering_plugin:MeteringPlugin
-    neutron.services.firewall.fwaas_plugin.FirewallPlugin = neutron_fwaas.services.firewall.fwaas_plugin:FirewallPlugin
-    neutron.services.loadbalancer.plugin.LoadBalancerPlugin = neutron_lbaas.services.loadbalancer.plugin:LoadBalancerPlugin
-    neutron.services.vpn.plugin.VPNDriverPlugin = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin
-    qos = neutron.services.qos.qos_plugin:QoSPlugin
-    flavors = neutron.services.flavors.flavors_plugin:FlavorsPlugin
-neutron.qos.notification_drivers =
-    message_queue = neutron.services.qos.notification_drivers.message_queue:RpcQosServiceNotificationDriver
-neutron.ml2.type_drivers =
-    flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver
-    local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver
-    vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver
-    geneve = neutron.plugins.ml2.drivers.type_geneve:GeneveTypeDriver
-    gre = neutron.plugins.ml2.drivers.type_gre:GreTypeDriver
-    vxlan = neutron.plugins.ml2.drivers.type_vxlan:VxlanTypeDriver
-neutron.ml2.mechanism_drivers =
-    logger = neutron.tests.unit.plugins.ml2.drivers.mechanism_logger:LoggerMechanismDriver
-    test = neutron.tests.unit.plugins.ml2.drivers.mechanism_test:TestMechanismDriver
-    linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.mech_driver.mech_linuxbridge:LinuxbridgeMechanismDriver
-    openvswitch = neutron.plugins.ml2.drivers.openvswitch.mech_driver.mech_openvswitch:OpenvswitchMechanismDriver
-    l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver
-    sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver:SriovNicSwitchMechanismDriver
-    fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriver
-neutron.ml2.extension_drivers =
-    test = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestExtensionDriver
-    testdb = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestDBExtensionDriver
-    port_security = neutron.plugins.ml2.extensions.port_security:PortSecurityExtensionDriver
-    qos = neutron.plugins.ml2.extensions.qos:QosExtensionDriver
-neutron.openstack.common.cache.backends =
-    memory = neutron.openstack.common.cache._backends.memory:MemoryBackend
-neutron.ipam_drivers =
-    fake = neutron.tests.unit.ipam.fake_driver:FakeDriver
-    internal = neutron.ipam.drivers.neutrondb_ipam.driver:NeutronDbPool
-neutron.agent.l2.extensions =
-    qos = neutron.agent.l2.extensions.qos:QosAgentExtension
-neutron.qos.agent_drivers =
-    ovs = neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers.qos_driver:QosOVSAgentDriver
-    sriov = neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers.qos_driver:QosSRIOVAgentDriver
-neutron.agent.linux.pd_drivers =
-    dibbler = neutron.agent.linux.dibbler:PDDibbler
-# These are for backwards compat with Icehouse notification_driver configuration values
-# TODO(mriedem): Remove these once liberty-eol happens.
-oslo.messaging.notify.drivers =
-    neutron.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver
-    neutron.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver
-    neutron.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver
-    neutron.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver
-    neutron.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver
-oslo.config.opts =
-    neutron = neutron.opts:list_opts
-    neutron.agent = neutron.opts:list_agent_opts
-    neutron.base.agent = neutron.opts:list_base_agent_opts
-    neutron.db = neutron.opts:list_db_opts
-    neutron.dhcp.agent = neutron.opts:list_dhcp_agent_opts
-    neutron.extensions = neutron.opts:list_extension_opts
-    neutron.l3.agent = neutron.opts:list_l3_agent_opts
-    neutron.metadata.agent = neutron.opts:list_metadata_agent_opts
-    neutron.metering.agent = neutron.opts:list_metering_agent_opts
-    neutron.ml2 = neutron.opts:list_ml2_conf_opts
-    neutron.ml2.linuxbridge.agent = neutron.opts:list_linux_bridge_opts
-    neutron.ml2.ovs.agent = neutron.opts:list_ovs_opts
-    neutron.ml2.sriov = neutron.opts:list_ml2_conf_sriov_opts
-    neutron.ml2.sriov.agent = neutron.opts:list_sriov_agent_opts
-    neutron.qos = neutron.opts:list_qos_opts
-    nova.auth = neutron.opts:list_auth_opts
-neutron.db.alembic_migrations =
-    neutron = neutron.db.migration:alembic_migrations
-neutron.interface_drivers =
-    ivs = neutron.agent.linux.interface:IVSInterfaceDriver
-    linuxbridge = neutron.agent.linux.interface:BridgeInterfaceDriver
-    null = neutron.agent.linux.interface:NullDriver
-    openvswitch = neutron.agent.linux.interface:OVSInterfaceDriver
-
-[build_sphinx]
-all_files = 1
-build-dir = doc/build
-source-dir = doc/source
-
-[extract_messages]
-keywords = _ gettext ngettext l_ lazy_gettext
-mapping_file = babel.cfg
-output_file = neutron/locale/neutron.pot
-
-[compile_catalog]
-directory = neutron/locale
-domain = neutron
-
-[update_catalog]
-domain = neutron
-output_dir = neutron/locale
-input_file = neutron/locale/neutron.pot
-
-[wheel]
-universal = 1
-
-[pbr]
-warnerrors = true
diff --git a/setup.py b/setup.py
deleted file mode 100644 (file)
index 782bb21..0000000
--- a/setup.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
-import setuptools
-
-# In python < 2.7.4, a lazy loading of package `pbr` will break
-# setuptools if some other modules registered functions in `atexit`.
-# solution from: http://bugs.python.org/issue15881#msg170215
-try:
-    import multiprocessing  # noqa
-except ImportError:
-    pass
-
-setuptools.setup(
-    setup_requires=['pbr>=1.8'],
-    pbr=True)
diff --git a/test-requirements.txt b/test-requirements.txt
deleted file mode 100644 (file)
index ee05a10..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-hacking<0.11,>=0.10.0
-
-cliff>=1.15.0 # Apache-2.0
-coverage>=3.6 # Apache-2.0
-fixtures>=1.3.1 # Apache-2.0/BSD
-mock>=1.2 # BSD
-python-subunit>=0.0.18 # Apache-2.0/BSD
-requests-mock>=0.7.0 # Apache-2.0
-sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD
-oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
-testrepository>=0.0.18 # Apache-2.0/BSD
-testtools>=1.4.0 # MIT
-testresources>=0.2.4 # Apache-2.0/BSD
-testscenarios>=0.4 # Apache-2.0/BSD
-WebTest>=2.0 # MIT
-oslotest>=1.10.0 # Apache-2.0
-os-testr>=0.4.1 # Apache-2.0
-tempest-lib>=0.13.0 # Apache-2.0
-ddt>=1.0.1 # MIT
-pylint==1.4.5 # GNU GPL v2
-reno>=0.1.1 # Apache2
-# Needed to run DB commands in virtualenvs
-PyMySQL>=0.6.2 # MIT License
diff --git a/tools/abandon_old_reviews.sh b/tools/abandon_old_reviews.sh
deleted file mode 100755 (executable)
index 96afa28..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/env bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# WARNING!
-# Please do not run this script without talking to the Neutron PTL. Auto
-# abandoning people's changes is a good thing, but must be done with care.
-#
-# before you run this modify your .ssh/config to create a
-# review.openstack.org entry:
-#
-#   Host review.openstack.org
-#   User <yourgerritusername>
-#   Port 29418
-#
-
-# Note: due to gerrit bug somewhere, this double posts messages. :(
-
-# first purge the all reviews that are more than 4w old and blocked by a core -2
-
-if [ "$1" = "--dry-run" ]; then
-    echo "Enabling dry run mode"
-    DRY_RUN=1
-else
-    DRY_RUN=0
-fi
-
-set -o errexit
-
-function abandon_review {
-    local gitid=$1
-    shift
-    local msg=$@
-    # echo ssh review.openstack.org gerrit review $gitid --abandon --message \"$msg\"
-    if [ $DRY_RUN -eq 1 ]; then
-       echo "Would abandon $gitid"
-    else
-       echo "Abandoning $gitid"
-       ssh review.openstack.org gerrit review $gitid --abandon --message \"$msg\"
-    fi
-}
-
-PROJECTS="(project:openstack/neutron OR project:openstack/neutron-fwaas OR \
-           project:openstack/neutron-lbaas OR project:openstack/neutron-vpnaas OR \
-           project:openstack/python-neutronclient OR project:openstack/neutron-specs)"
-
-blocked_reviews=$(ssh review.openstack.org "gerrit query --current-patch-set --format json $PROJECTS status:open age:4w label:Code-Review<=-2" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g')
-
-blocked_msg=$(cat <<EOF
-
-This review is > 4 weeks without comment and currently blocked by a
-core reviewer with a -2. We are abandoning this for now.
-
-Feel free to reactivate the review by pressing the restore button and
-contacting the reviewer with the -2 on this review to ensure you
-address their concerns.
-
-EOF
-)
-
-# For testing, put in a git rev of something you own and uncomment
-# blocked_reviews="b6c4218ae4d75b86c33fa3d37c27bc23b46b6f0f"
-
-for review in $blocked_reviews; do
-    # echo ssh review.openstack.org gerrit review $review --abandon --message \"$msg\"
-    echo "Blocked review $review"
-    abandon_review $review $blocked_msg
-done
-
-# then purge all the reviews that are > 4w with no changes and Jenkins has -1ed
-
-failing_reviews=$(ssh review.openstack.org "gerrit query  --current-patch-set --format json $PROJECTS status:open age:4w NOT label:Verified>=1,jenkins" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g')
-
-failing_msg=$(cat <<EOF
-
-This review is > 4 weeks without comment, and failed Jenkins the last
-time it was checked. We are abandoning this for now.
-
-Feel free to reactivate the review by pressing the restore button and
-leaving a 'recheck' comment to get fresh test results.
-
-EOF
-)
-
-for review in $failing_reviews; do
-    echo "Failing review $review"
-    abandon_review $review $failing_msg
-done
diff --git a/tools/check_unit_test_structure.sh b/tools/check_unit_test_structure.sh
deleted file mode 100755 (executable)
index d47eba8..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env bash
-
-# This script identifies the unit test modules that do not correspond
-# directly with a module in the code tree.  See TESTING.rst for the
-# intended structure.
-
-neutron_path=$(cd "$(dirname "$0")/.." && pwd)
-base_test_path=neutron/tests/unit
-test_path=$neutron_path/$base_test_path
-
-test_files=$(find ${test_path} -iname 'test_*.py')
-
-ignore_regexes=(
-    # The following vendor plugins are not required to confrm to the
-    # structural requirements.
-    "^plugins/ibm.*$"
-    # The following test is required for oslo.versionedobjects
-    "^objects/test_objects.py$"
-    # The following open source plugin tests are not actually unit
-    # tests and are ignored pending their relocation to the functional
-    # test tree.
-    "^plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py$"
-    "^plugins/ml2/test_security_group.py$"
-    "^plugins/ml2/test_port_binding.py$"
-    "^plugins/ml2/test_extension_driver_api.py$"
-    "^plugins/ml2/test_ext_portsecurity.py$"
-    "^plugins/ml2/test_agent_scheduler.py$"
-    "^plugins/ml2/test_tracked_resources.py$"
-    "^plugins/ml2/drivers/openvswitch/agent/test_agent_scheduler.py$"
-    "^plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py$"
-    "^plugins/openvswitch/test_agent_scheduler.py$"
-)
-
-error_count=0
-ignore_count=0
-total_count=0
-for test_file in ${test_files[@]}; do
-    relative_path=${test_file#$test_path/}
-    expected_path=$(dirname $neutron_path/neutron/$relative_path)
-    test_filename=$(basename "$test_file")
-    expected_filename=${test_filename#test_}
-    # Module filename (e.g. foo/bar.py -> foo/test_bar.py)
-    filename=$expected_path/$expected_filename
-    # Package dir (e.g. foo/ -> test_foo.py)
-    package_dir=${filename%.py}
-    if [ ! -f "$filename" ] && [ ! -d "$package_dir" ]; then
-        for ignore_regex in ${ignore_regexes[@]}; do
-            if [[ "$relative_path" =~ $ignore_regex ]]; then
-                ((ignore_count++))
-                continue 2
-            fi
-        done
-        echo "Unexpected test file: $base_test_path/$relative_path"
-        ((error_count++))
-    fi
-    ((total_count++))
-done
-
-if [ "$ignore_count" -ne 0 ]; then
-    echo "$ignore_count unmatched test modules were ignored"
-fi
-
-if [ "$error_count" -eq 0 ]; then
-    echo 'Success!  All test modules match targets in the code tree.'
-    exit 0
-else
-    echo "Failure! $error_count of $total_count test modules do not match targets in the code tree."
-    exit 1
-fi
diff --git a/tools/clean.sh b/tools/clean.sh
deleted file mode 100755 (executable)
index b79f035..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes
-rm -rf */*.deb
-rm -rf ./plugins/**/build/ ./plugins/**/dist
-rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-*
diff --git a/tools/coding-checks.sh b/tools/coding-checks.sh
deleted file mode 100644 (file)
index 287b205..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/sh
-
-set -eu
-
-usage () {
-  echo "Usage: $0 [OPTION]..."
-  echo "Run Neutron's coding check(s)"
-  echo ""
-  echo "  -Y, --pylint [<basecommit>] Run pylint check on the entire neutron module or just files changed in basecommit (e.g. HEAD~1)"
-  echo "  -h, --help                  Print this usage message"
-  echo
-  exit 0
-}
-
-process_options () {
-  i=1
-  while [ $i -le $# ]; do
-    eval opt=\$$i
-    case $opt in
-      -h|--help) usage;;
-      -Y|--pylint) pylint=1;;
-      *) scriptargs="$scriptargs $opt"
-    esac
-    i=$((i+1))
-  done
-}
-
-run_pylint () {
-    local target="${scriptargs:-all}"
-
-    if [ "$target" = "all" ]; then
-        files="neutron"
-    else
-      case "$target" in
-        *HEAD~[0-9]*) files=$(git diff --diff-filter=AM --name-only $target -- "*.py");;
-        *) echo "$target is an unrecognized basecommit"; exit 1;;
-      esac
-    fi
-
-    echo "Running pylint..."
-    echo "You can speed this up by running it on 'HEAD~[0-9]' (e.g. HEAD~1, this change only)..."
-    if [ -n "${files}" ]; then
-        pylint --rcfile=.pylintrc --output-format=colorized ${files}
-    else
-        echo "No python changes in this commit, pylint check not required."
-        exit 0
-    fi
-}
-
-scriptargs=
-pylint=1
-
-process_options $@
-
-if [ $pylint -eq 1 ]; then
-    run_pylint
-    exit 0
-fi
diff --git a/tools/configure_for_func_testing.sh b/tools/configure_for_func_testing.sh
deleted file mode 100755 (executable)
index d434af4..0000000
+++ /dev/null
@@ -1,272 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-set -e
-
-
-# Control variable used to determine whether to execute this script
-# directly or allow the gate_hook to import.
-IS_GATE=${IS_GATE:-False}
-USE_CONSTRAINT_ENV=${USE_CONSTRAINT_ENV:-True}
-
-
-if [[ "$IS_GATE" != "True" ]] && [[ "$#" -lt 1 ]]; then
-    >&2 echo "Usage: $0 /path/to/devstack [-i]
-Configure a host to run Neutron's functional test suite.
-
--i   Install Neutron's package dependencies.  By default, it is assumed
-     that devstack has already been used to deploy neutron to the
-     target host and that package dependencies need not be installed.
-
-Warning: This script relies on devstack to perform extensive
-modification to the underlying host.  It is recommended that it be
-invoked only on a throw-away VM."
-    exit 1
-fi
-
-
-# Skip the first argument
-OPTIND=2
-while getopts ":i" opt; do
-    case $opt in
-        i)
-            INSTALL_BASE_DEPENDENCIES=True
-            ;;
-    esac
-
-done
-
-# Default to environment variables to permit the gate_hook to override
-# when sourcing.
-VENV=${VENV:-dsvm-functional}
-# If executed in the gate, run in a constrained env
-if [[ "$IS_GATE" == "True" && "$USE_CONSTRAINT_ENV" == "True" ]]
-then
-    VENV=$VENV-constraints
-fi
-DEVSTACK_PATH=${DEVSTACK_PATH:-$1}
-PROJECT_NAME=${PROJECT_NAME:-neutron}
-REPO_BASE=${GATE_DEST:-$(cd $(dirname "$0")/../.. && pwd)}
-INSTALL_MYSQL_ONLY=${INSTALL_MYSQL_ONLY:-False}
-# The gate should automatically install dependencies.
-INSTALL_BASE_DEPENDENCIES=${INSTALL_BASE_DEPENDENCIES:-$IS_GATE}
-
-
-if [ ! -f "$DEVSTACK_PATH/stack.sh" ]; then
-  >&2 echo "Unable to find devstack at '$DEVSTACK_PATH'.  Please verify that the specified path points to a valid devstack repo."
-  exit 1
-fi
-
-
-set -x
-
-
-function _init {
-    # Subsequently-called devstack functions depend on the following variables.
-    HOST_IP=127.0.0.1
-    FILES=$DEVSTACK_PATH/files
-    TOP_DIR=$DEVSTACK_PATH
-
-    source $DEVSTACK_PATH/stackrc
-
-    # Allow the gate to override values set by stackrc.
-    DEST=${GATE_DEST:-$DEST}
-    STACK_USER=${GATE_STACK_USER:-$STACK_USER}
-}
-
-
-function _install_base_deps {
-    echo_summary "Installing base dependencies"
-
-    INSTALL_TESTONLY_PACKAGES=True
-    PACKAGES=$(get_packages general,neutron,q-agt,q-l3)
-    # Do not install 'python-' prefixed packages other than
-    # python-dev*.  Neutron's functional testing relies on deployment
-    # to a tox env so there is no point in installing python
-    # dependencies system-wide.
-    PACKAGES=$(echo $PACKAGES | perl -pe 's|python-(?!dev)[^ ]*||g')
-    install_package $PACKAGES
-}
-
-
-function _install_rpc_backend {
-    echo_summary "Installing rabbitmq"
-
-    RABBIT_USERID=${RABBIT_USERID:-stackrabbit}
-    RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST}
-    RABBIT_PASSWORD=${RABBIT_HOST:-secretrabbit}
-
-    source $DEVSTACK_PATH/lib/rpc_backend
-
-    enable_service rabbit
-    install_rpc_backend
-    restart_rpc_backend
-}
-
-
-# _install_databases [install_pg]
-function _install_databases {
-    local install_pg=${1:-True}
-
-    echo_summary "Installing databases"
-
-    # Avoid attempting to configure the db if it appears to already
-    # have run.  The setup as currently defined is not idempotent.
-    if mysql openstack_citest > /dev/null 2>&1 < /dev/null; then
-        echo_summary "DB config appears to be complete, skipping."
-        return 0
-    fi
-
-    MYSQL_PASSWORD=${MYSQL_PASSWORD:-secretmysql}
-    DATABASE_PASSWORD=${DATABASE_PASSWORD:-secretdatabase}
-
-    source $DEVSTACK_PATH/lib/database
-
-    enable_service mysql
-    initialize_database_backends
-    install_database
-    configure_database_mysql
-
-    if [[ "$install_pg" == "True" ]]; then
-        enable_service postgresql
-        initialize_database_backends
-        install_database
-        configure_database_postgresql
-    fi
-
-    # Set up the 'openstack_citest' user and database in each backend
-    tmp_dir=$(mktemp -d)
-    trap "rm -rf $tmp_dir" EXIT
-
-    cat << EOF > $tmp_dir/mysql.sql
-CREATE DATABASE openstack_citest;
-CREATE USER 'openstack_citest'@'localhost' IDENTIFIED BY 'openstack_citest';
-CREATE USER 'openstack_citest' IDENTIFIED BY 'openstack_citest';
-GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'@'localhost';
-GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest';
-FLUSH PRIVILEGES;
-EOF
-    /usr/bin/mysql -u root < $tmp_dir/mysql.sql
-
-    if [[ "$install_pg" == "True" ]]; then
-        cat << EOF > $tmp_dir/postgresql.sql
-CREATE USER openstack_citest WITH CREATEDB LOGIN PASSWORD 'openstack_citest';
-CREATE DATABASE openstack_citest WITH OWNER openstack_citest;
-EOF
-
-        # User/group postgres needs to be given access to tmp_dir
-        setfacl -m g:postgres:rwx $tmp_dir
-        sudo -u postgres /usr/bin/psql --file=$tmp_dir/postgresql.sql
-    fi
-}
-
-
-function _install_agent_deps {
-    echo_summary "Installing agent dependencies"
-
-    source $DEVSTACK_PATH/lib/neutron-legacy
-
-    ENABLED_SERVICES=q-agt,q-dhcp,q-l3
-    install_neutron_agent_packages
-}
-
-
-# Set up the rootwrap sudoers for neutron to target the rootwrap
-# configuration deployed in the venv.
-function _install_rootwrap_sudoers {
-    echo_summary "Installing rootwrap sudoers file"
-
-    PROJECT_VENV=$REPO_BASE/$PROJECT_NAME/.tox/$VENV
-    ROOTWRAP_SUDOER_CMD="$PROJECT_VENV/bin/neutron-rootwrap $PROJECT_VENV/etc/neutron/rootwrap.conf *"
-    ROOTWRAP_DAEMON_SUDOER_CMD="$PROJECT_VENV/bin/neutron-rootwrap-daemon $PROJECT_VENV/etc/neutron/rootwrap.conf"
-    TEMPFILE=$(mktemp)
-    cat << EOF > $TEMPFILE
-# A bug in oslo.rootwrap [1] prevents commands executed with 'ip netns
-# exec' from being automatically qualified with a prefix from
-# rootwrap's configured exec_dirs.  To work around this problem, add
-# the venv bin path to a user-specific secure_path.
-#
-# While it might seem preferable to set a command-specific
-# secure_path, this would only ensure the correct path for 'ip netns
-# exec' and the command targeted for execution in the namespace would
-# not inherit the path.
-#
-# 1: https://bugs.launchpad.net/oslo.rootwrap/+bug/1417331
-#
-Defaults:$STACK_USER  secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PROJECT_VENV/bin"
-$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD
-$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD
-EOF
-    chmod 0440 $TEMPFILE
-    sudo chown root:root $TEMPFILE
-    # Name the functional testing rootwrap to ensure that it will be
-    # loaded after the devstack rootwrap (50_stack_sh if present) so
-    # that the functional testing secure_path (a superset of what
-    # devstack expects) will not be overwritten.
-    sudo mv $TEMPFILE /etc/sudoers.d/60-neutron-func-test-rootwrap
-}
-
-
-function _install_post_devstack {
-    echo_summary "Performing post-devstack installation"
-
-    _install_databases
-    _install_rootwrap_sudoers
-
-    if is_ubuntu; then
-        install_package isc-dhcp-client
-    elif is_fedora; then
-        install_package dhclient
-    else
-        exit_distro_not_supported "installing dhclient package"
-    fi
-
-    # Installing python-openvswitch from packages is a stop-gap while
-    # python-openvswitch remains unavailable from pypi.  This also
-    # requires that sitepackages=True be set in tox.ini to allow the
-    # venv to use the installed package.  Once python-openvswitch
-    # becomes available on pypi, this will no longer be required.
-    #
-    # NOTE: the package name 'python-openvswitch' is common across
-    # supported distros.
-    install_package python-openvswitch
-}
-
-
-function configure_host_for_func_testing {
-    echo_summary "Configuring host for functional testing"
-
-    if [[ "$INSTALL_BASE_DEPENDENCIES" == "True" ]]; then
-        # Installing of the following can be achieved via devstack by
-        # installing neutron, so their installation is conditional to
-        # minimize the work to do on a devstack-configured host.
-        _install_base_deps
-        _install_agent_deps
-        _install_rpc_backend
-    fi
-    _install_post_devstack
-}
-
-
-_init
-
-
-if [[ "$IS_GATE" != "True" ]]; then
-    if [[ "$INSTALL_MYSQL_ONLY" == "True" ]]; then
-        _install_databases nopg
-    else
-        configure_host_for_func_testing
-    fi
-fi
diff --git a/tools/copy_api_tests_from_tempest.sh b/tools/copy_api_tests_from_tempest.sh
deleted file mode 100755 (executable)
index 7084451..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env bash
-
-# This script is intended to allow repeatable migration of the neutron
-# api tests from tempest.  The intention is to allow development to
-# continue in Tempest while the migration strategy evolves.
-
-set -e
-
-if [[ "$#" -ne 1 ]]; then
-    >&2 echo "Usage: $0 /path/to/tempest
-Migrate neutron's api tests from a tempest repo."
-    exit 1
-fi
-
-TEMPEST_PATH=${TEMPEST_PATH:-$1}
-
-if [ ! -f "$TEMPEST_PATH/run_tempest.sh" ]; then
-  >&2 echo "Unable to find tempest at '$TEMPEST_PATH'.  Please verify that the specified path points to a valid tempest repo."
-  exit 1
-fi
-
-NEUTRON_PATH=${NEUTRON_PATH:-$(cd "$(dirname "$0")/.." && pwd)}
-NEUTRON_TEST_PATH=$NEUTRON_PATH/neutron/tests
-
-function copy_files {
-    local tempest_dep_paths=(
-        'tempest'
-        'tempest/common'
-        'tempest/common/generator'
-        'tempest/common/utils'
-        'tempest/services'
-        'tempest/services/identity'
-        'tempest/services/identity/v2'
-        'tempest/services/identity/v2/json'
-        'tempest/services/identity/v3'
-        'tempest/services/identity/v3/json'
-        'tempest/services/network'
-        'tempest/services/network/json'
-    )
-    for tempest_dep_path in ${tempest_dep_paths[@]}; do
-        local target_path=$NEUTRON_TEST_PATH/$tempest_dep_path
-        if [[ ! -d "$target_path" ]]; then
-            mkdir -p "$target_path"
-        fi
-        cp $TEMPEST_PATH/$tempest_dep_path/*.py "$target_path"
-    done
-    local paths_to_remove=(
-        "$NEUTRON_TEST_PATH/tempest/clients.py"
-    )
-    for path_to_remove in ${paths_to_remove[@]}; do
-        if [ -f "$path_to_remove" ]; then
-            rm "$path_to_remove"
-        fi
-    done
-
-    # Tests are now maintained in neutron/tests/api
-    cp $TEMPEST_PATH/tempest/api/network/*.py $NEUTRON_TEST_PATH/api
-    cp $TEMPEST_PATH/tempest/api/network/admin/*.py \
-        $NEUTRON_TEST_PATH/api/admin
-}
-
-function rewrite_imports {
-    regexes=(
-        's/tempest.common.generator/neutron.tests.tempest.common.generator/'
-        "s/tempest.api.network/neutron.tests.api/"
-        's/tempest.test/neutron.tests.tempest.test/'
-        's/from tempest.openstack.common import lockutils/from oslo_concurrency import lockutils/'
-        's/from tempest.openstack.common import importutils/from oslo_utils import importutils/'
-        's/tempest.openstack.common/neutron.openstack.common/'
-        's/from tempest(?!_lib) import clients/from neutron.tests.api import clients/'
-        's/from tempest(?!_lib)/from neutron.tests.tempest/'
-        's/CONF.lock_path/CONF.oslo_concurrency.lock_path/'
-    )
-    files=$(find "$NEUTRON_TEST_PATH/tempest" "$NEUTRON_TEST_PATH/api" -name '*.py')
-    for ((i = 0; i < ${#regexes[@]}; i++)); do
-        perl -p -i -e "${regexes[$i]}" $files
-    done
-}
-
-copy_files
-rewrite_imports
diff --git a/tools/deploy_rootwrap.sh b/tools/deploy_rootwrap.sh
deleted file mode 100755 (executable)
index 27d9a36..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-set -eu
-
-if [ "$#" -ne 3 ]; then
-  >&2 echo "Usage: $0 /path/to/neutron /path/to/target/etc /path/to/target/bin
-Deploy Neutron's rootwrap configuration.
-
-Warning: Any existing rootwrap files at the specified etc path will be
-removed by this script.
-
-Optional: set OS_SUDO_TESTING=1 to deploy the filters required by
-Neutron's functional testing suite."
-  exit 1
-fi
-
-OS_SUDO_TESTING=${OS_SUDO_TESTING:-0}
-
-neutron_path=$1
-target_etc_path=$2
-target_bin_path=$3
-
-src_conf_path=${neutron_path}/etc
-src_conf=${src_conf_path}/rootwrap.conf
-src_rootwrap_path=${src_conf_path}/neutron/rootwrap.d
-
-dst_conf_path=${target_etc_path}/neutron
-dst_conf=${dst_conf_path}/rootwrap.conf
-dst_rootwrap_path=${dst_conf_path}/rootwrap.d
-
-if [[ -d "$dst_rootwrap_path" ]]; then
-    rm -rf ${dst_rootwrap_path}
-fi
-mkdir -p -m 755 ${dst_rootwrap_path}
-
-cp -p ${src_rootwrap_path}/* ${dst_rootwrap_path}/
-cp -p ${src_conf} ${dst_conf}
-sed -i "s:^filters_path=.*$:filters_path=${dst_rootwrap_path}:" ${dst_conf}
-sed -i "s:^\(exec_dirs=.*\)$:\1,${target_bin_path}:" ${dst_conf}
-
-if [[ "$OS_SUDO_TESTING" = "1" ]]; then
-    sed -i 's/use_syslog=False/use_syslog=True/g' ${dst_conf}
-    sed -i 's/syslog_log_level=ERROR/syslog_log_level=DEBUG/g' ${dst_conf}
-    cp -p ${neutron_path}/neutron/tests/contrib/functional-testing.filters \
-        ${dst_rootwrap_path}/
-fi
diff --git a/tools/generate_config_file_samples.sh b/tools/generate_config_file_samples.sh
deleted file mode 100755 (executable)
index 6b0f4ec..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-set -e
-
-GEN_CMD=oslo-config-generator
-
-if ! type "$GEN_CMD" > /dev/null; then
-    echo "ERROR: $GEN_CMD not installed on the system."
-    exit 1
-fi
-
-for file in `ls etc/oslo-config-generator/*`; do
-    $GEN_CMD --config-file=$file
-done
-
-set -x
diff --git a/tools/install_venv.py b/tools/install_venv.py
deleted file mode 100644 (file)
index f8fb8fa..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Copyright 2010 OpenStack Foundation.
-# Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Installation script for Neutron's development virtualenv
-"""
-from __future__ import print_function
-
-import os
-import sys
-
-import install_venv_common as install_venv
-
-
-def print_help():
-    help = """
- Neutron development environment setup is complete.
-
- Neutron development uses virtualenv to track and manage Python dependencies
- while in development and testing.
-
- To activate the Neutron virtualenv for the extent of your current shell
- session you can run:
-
- $ source .venv/bin/activate
-
- Or, if you prefer, you can run commands in the virtualenv on a case by case
- basis by running:
-
- $ tools/with_venv.sh <your command>
-
- Also, make test will automatically use the virtualenv.
-    """
-    print(help)
-
-
-def main(argv):
-    root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-    venv = os.path.join(root, '.venv')
-    pip_requires = os.path.join(root, 'requirements.txt')
-    test_requires = os.path.join(root, 'test-requirements.txt')
-    py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
-    project = 'Neutron'
-    install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
-                                       py_version, project)
-    options = install.parse_args(argv)
-    install.check_python_version()
-    install.check_dependencies()
-    install.create_virtualenv(no_site_packages=options.no_site_packages)
-    install.install_dependencies()
-    print_help()
-
-
-if __name__ == '__main__':
-    main(sys.argv)
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
deleted file mode 100644 (file)
index e279159..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""Provides methods needed by installation script for OpenStack development
-virtual environments.
-
-Since this script is used to bootstrap a virtualenv from the system's Python
-environment, it should be kept strictly compatible with Python 2.6.
-
-Synced in from openstack-common
-"""
-
-from __future__ import print_function
-
-import optparse
-import os
-import subprocess
-import sys
-
-
-class InstallVenv(object):
-
-    def __init__(self, root, venv, requirements,
-                 test_requirements, py_version,
-                 project):
-        self.root = root
-        self.venv = venv
-        self.requirements = requirements
-        self.test_requirements = test_requirements
-        self.py_version = py_version
-        self.project = project
-
-    def die(self, message, *args):
-        print(message % args, file=sys.stderr)
-        sys.exit(1)
-
-    def check_python_version(self):
-        if sys.version_info < (2, 6):
-            self.die("Need Python Version >= 2.6")
-
-    def run_command_with_code(self, cmd, redirect_output=True,
-                              check_exit_code=True):
-        """Runs a command in an out-of-process shell.
-
-        Returns the output of that command. Working directory is self.root.
-        """
-        if redirect_output:
-            stdout = subprocess.PIPE
-        else:
-            stdout = None
-
-        proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
-        output = proc.communicate()[0]
-        if check_exit_code and proc.returncode != 0:
-            self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
-        return (output, proc.returncode)
-
-    def run_command(self, cmd, redirect_output=True, check_exit_code=True):
-        return self.run_command_with_code(cmd, redirect_output,
-                                          check_exit_code)[0]
-
-    def get_distro(self):
-        if (os.path.exists('/etc/fedora-release') or
-                os.path.exists('/etc/redhat-release')):
-            return Fedora(
-                self.root, self.venv, self.requirements,
-                self.test_requirements, self.py_version, self.project)
-        else:
-            return Distro(
-                self.root, self.venv, self.requirements,
-                self.test_requirements, self.py_version, self.project)
-
-    def check_dependencies(self):
-        self.get_distro().install_virtualenv()
-
-    def create_virtualenv(self, no_site_packages=True):
-        """Creates the virtual environment and installs PIP.
-
-        Creates the virtual environment and installs PIP only into the
-        virtual environment.
-        """
-        if not os.path.isdir(self.venv):
-            print('Creating venv...', end=' ')
-            if no_site_packages:
-                self.run_command(['virtualenv', '-q', '--no-site-packages',
-                                 self.venv])
-            else:
-                self.run_command(['virtualenv', '-q', self.venv])
-            print('done.')
-        else:
-            print("venv already exists...")
-            pass
-
-    def pip_install(self, *args):
-        self.run_command(['tools/with_venv.sh',
-                         'pip', 'install', '--upgrade'] + list(args),
-                         redirect_output=False)
-
-    def install_dependencies(self):
-        print('Installing dependencies with pip (this can take a while)...')
-
-        # First things first, make sure our venv has the latest pip and
-        # setuptools and pbr
-        self.pip_install('pip>=1.4')
-        self.pip_install('setuptools')
-        self.pip_install('pbr')
-
-        self.pip_install('-r', self.requirements, '-r', self.test_requirements)
-
-    def parse_args(self, argv):
-        """Parses command-line arguments."""
-        parser = optparse.OptionParser()
-        parser.add_option('-n', '--no-site-packages',
-                          action='store_true',
-                          help="Do not inherit packages from global Python "
-                               "install.")
-        return parser.parse_args(argv[1:])[0]
-
-
-class Distro(InstallVenv):
-
-    def check_cmd(self, cmd):
-        return bool(self.run_command(['which', cmd],
-                    check_exit_code=False).strip())
-
-    def install_virtualenv(self):
-        if self.check_cmd('virtualenv'):
-            return
-
-        if self.check_cmd('easy_install'):
-            print('Installing virtualenv via easy_install...', end=' ')
-            if self.run_command(['easy_install', 'virtualenv']):
-                print('Succeeded')
-                return
-            else:
-                print('Failed')
-
-        self.die('ERROR: virtualenv not found.\n\n%s development'
-                 ' requires virtualenv, please install it using your'
-                 ' favorite package management tool' % self.project)
-
-
-class Fedora(Distro):
-    """This covers all Fedora-based distributions.
-
-    Includes: Fedora, RHEL, CentOS, Scientific Linux
-    """
-
-    def check_pkg(self, pkg):
-        return self.run_command_with_code(['rpm', '-q', pkg],
-                                          check_exit_code=False)[1] == 0
-
-    def install_virtualenv(self):
-        if self.check_cmd('virtualenv'):
-            return
-
-        if not self.check_pkg('python-virtualenv'):
-            self.die("Please install 'python-virtualenv'.")
-
-        super(Fedora, self).install_virtualenv()
diff --git a/tools/milestone-review-dash.py b/tools/milestone-review-dash.py
deleted file mode 100755 (executable)
index 76eca27..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/bin/env python
-
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import argparse
-import sys
-
-from launchpadlib.launchpad import Launchpad
-
-
-def is_milestone_valid(project, name):
-    milestone_names = []
-    for s in project.active_milestones:
-        milestone_names.append(s.name)
-        if name == s.name:
-            return True
-    print("No active milestone found")
-    print("List of active milestones %s" % milestone_names)
-    return False
-
-
-def _search_task(project, **kwargs):
-    bugs = project.searchTasks(**kwargs)
-    if not bugs:
-        return
-    gerrit_query = "("
-    for b in bugs:
-        gerrit_query += ("topic:bug/%d OR " % b.bug.id)
-    gerrit_query = gerrit_query[:-4]
-    gerrit_query += ")\n\n"
-    return gerrit_query
-
-
-def get_approved_rfe_query(project):
-    return _search_task(project, **{'tags': ['rfe-approved']})
-
-
-def get_critical_bugs_query(project):
-    return _search_task(project,
-        **{'status': ["In Progress"], 'importance': ["Critical"]})
-
-
-def get_high_bugs_query(project):
-    return _search_task(project,
-        **{'status': ["In Progress"], 'importance': ["High"]})
-
-
-def get_specs_query(project, milestone):
-    query = "("
-    for s in project.valid_specifications:
-        if s.milestone is not None:
-            if s.milestone.name == milestone:
-                query += ("topic:bp/%s OR " % s.name)
-    if query == "(":
-        # no blueprint was found
-        return
-    query = query[:-4]
-    query += ")\n"
-    return query
-
-
-def write_section(f, section_name, query):
-    print(section_name)
-    if query:
-        f.write("[section \"")
-        f.write(section_name)
-        f.write("\"]\n")
-        f.write("query = ")
-        f.write(query)
-        print(query)
-    else:
-        print("No result found\n")
-
-
-def write_queries_for_project(f, project, milestone):
-    query = get_approved_rfe_query(project)
-    section_name = "Approved RFE %s" % project.name
-    write_section(f, section_name, query)
-
-    query = get_critical_bugs_query(project)
-    section_name = "Critical Bugs %s" % project.name
-    write_section(f, section_name, query)
-
-    query = get_high_bugs_query(project)
-    section_name = "High Bugs %s" % project.name
-    write_section(f, section_name, query)
-
-    query = get_specs_query(project, milestone)
-    section_name = "Blueprints %s" % project.name
-    write_section(f, section_name, query)
-
-
-parser = argparse.ArgumentParser(
-    description='Create dashboard for critical/high bugs, approved rfe and'
-                ' blueprints. A .dash file will be created in the current'
-                ' folder that you can serve as input for gerrit-dash-creator.'
-                ' The output of the script can be used to query Gerrit'
-                ' directly.')
-parser.add_argument('milestone', type=str, help='The release milestone')
-parser.add_argument('-o', '--output', type=str, help='Output file')
-
-args = parser.parse_args()
-milestone = args.milestone
-if args.output:
-    file_name = args.output
-else:
-    file_name = milestone + '.dash'
-
-cachedir = "~/.launchpadlib/cache/"
-launchpad = Launchpad.login_anonymously('just testing', 'production', cachedir,
-                                        version="devel")
-neutron = launchpad.projects['neutron']
-neutron_client = launchpad.projects['python-neutronclient']
-if not is_milestone_valid(neutron, milestone):
-    sys.exit()
-
-with open(file_name, 'w') as f:
-    title = "[dashboard]\ntitle = Neutron %s Review Inbox\n" % milestone
-    f.write(title)
-    f.write("description = Review Inbox\n")
-    f.write("foreach = (project:openstack/neutron OR "
-            "project:openstack/python-neutronclient OR "
-            "project:openstack/neutron-specs OR "
-            "project:openstack/neutron-fwaas OR "
-            "project:openstack/neutron-lbaas OR "
-            "project:openstack/neutron-vpnaas) status:open NOT owner:self "
-            "NOT label:Workflow<=-1 "
-            "NOT label:Code-Review>=-2,self branch:master\n")
-    f.write("\n")
-
-    print("Querying Launchpad, this might take a while...")
-    write_queries_for_project(f, neutron, milestone)
-    write_queries_for_project(f, neutron_client, milestone)
diff --git a/tools/misc-sanity-checks.sh b/tools/misc-sanity-checks.sh
deleted file mode 100644 (file)
index 874b324..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-#! /bin/sh
-
-# Copyright (C) 2014 VA Linux Systems Japan K.K.
-# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-TMPDIR=`mktemp -d /tmp/${0##*/}.XXXXXX` || exit 1
-export TMPDIR
-trap "rm -rf $TMPDIR" EXIT
-
-FAILURES=$TMPDIR/failures
-
-
-check_no_symlinks_allowed () {
-    # Symlinks break the package build process, so ensure that they
-    # do not slip in, except hidden symlinks.
-    if [ $(find . -type l ! -path '*/.*' | wc -l) -ge 1 ]; then
-        echo "Symlinks are not allowed!" >>$FAILURES
-    fi
-}
-
-
-check_pot_files_errors () {
-    # The job neutron-propose-translation-update does not update from
-    # transifex since our po files contain duplicate entries where
-    # obsolete entries duplicate normal entries. Prevent obsolete
-    # entries to slip in
-    find neutron -type f -regex '.*\.pot?' \
-                 -print0|xargs -0 -n 1 msgfmt --check-format \
-                 -o /dev/null
-    if [ "$?" -ne 0 ]; then
-        echo "PO files syntax is not correct!" >>$FAILURES
-    fi
-}
-
-
-check_identical_policy_files () {
-    # For unit tests, we maintain their own policy.json file to make test suite
-    # independent of whether it's executed from the neutron source tree or from
-    # site-packages installation path. We don't want two copies of the same
-    # file to diverge, so checking that they are identical
-    diff etc/policy.json neutron/tests/etc/policy.json 2>&1 > /dev/null
-    if [ "$?" -ne 0 ]; then
-        echo "policy.json files must be identical!" >>$FAILURES
-    fi
-}
-
-# Add your checks here...
-check_no_symlinks_allowed
-check_pot_files_errors
-check_identical_policy_files
-
-# Fail, if there are emitted failures
-if [ -f $FAILURES ]; then
-    cat $FAILURES
-    exit 1
-fi
diff --git a/tools/ostestr_compat_shim.sh b/tools/ostestr_compat_shim.sh
deleted file mode 100755 (executable)
index a483ed1..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-
-# preserve old behavior of using an arg as a regex when '--' is not present
-case $@ in
-  (*--*) ostestr $@;;
-  ('') ostestr;;
-  (*) ostestr --regex "$@"
-esac
diff --git a/tools/pecan_server.sh b/tools/pecan_server.sh
deleted file mode 100755 (executable)
index ef36919..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-# Copyright (c) 2015 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# A script useful to develop changes to the codebase. It launches the pecan
-# API server and will reload it whenever the code changes if inotifywait is
-# installed.
-
-inotifywait --help >/dev/null 2>&1
-if [[ $? -ne 1 ]]; then
-  USE_INOTIFY=0
-else
-  USE_INOTIFY=1
-fi
-
-DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../
-source "$DIR/.tox/py27/bin/activate"
-COMMAND="python -c 'from neutron.cmd.eventlet import server; server.main_wsgi_pecan()'"
-
-function cleanup() {
-  kill $PID
-  exit 0
-}
-
-if [[ $USE_INOTIFY -eq 1 ]]; then
-  trap cleanup INT
-  while true; do
-    eval "$COMMAND &"
-    PID=$!
-    inotifywait -e modify -r $DIR/neutron/
-    kill $PID
-  done
-else
-  eval $COMMAND
-fi
diff --git a/tools/split.sh b/tools/split.sh
deleted file mode 100755 (executable)
index 995d937..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/sh
-#
-# This script has been shamelessly copied and tweaked from original copy:
-#
-# https://github.com/openstack/oslo-incubator/blob/master/tools/graduate.sh
-#
-# Use this script to export a Neutron module to a separate git repo.
-#
-# You can call this script Call script like so:
-#
-#     ./split.sh <path to file containing list of files to export> <project name>
-#
-# The file should be a text file like the one below:
-#
-#  /path/to/file/file1
-#  /path/to/file/file2
-#  ...
-#  /path/to/file/fileN
-#
-# Such a list can be generated with a command like this:
-#
-# find $path -type f  # path is the base dir you want to list files for
-
-set -e
-
-if [ $# -lt 2 ]; then
-  echo "Usage $0 <path to file containing list of files to export> <project name>"
-  exit 1
-fi
-
-set -x
-
-file_list_path="$1"
-project_name="$2"
-files_to_keep=$(cat $file_list_path)
-
-
-# Build the grep pattern for ignoring files that we want to keep
-keep_pattern="\($(echo $files_to_keep | sed -e 's/^/\^/' -e 's/ /\\|\^/g')\)"
-# Prune all other files in every commit
-pruner="git ls-files | grep -v \"$keep_pattern\" | git update-index --force-remove --stdin; git ls-files > /dev/stderr"
-
-# Find all first commits with listed files and find a subset of them that
-# predates all others
-
-roots=""
-for file in $files_to_keep; do
-    file_root=$(git rev-list --reverse HEAD -- $file | head -n1)
-    fail=0
-    for root in $roots; do
-        if git merge-base --is-ancestor $root $file_root; then
-            fail=1
-            break
-        elif !git merge-base --is-ancestor $file_root $root; then
-            new_roots="$new_roots $root"
-        fi
-    done
-    if [ $fail -ne 1 ]; then
-        roots="$new_roots $file_root"
-    fi
-done
-
-# Purge all parents for those commits
-
-set_roots="
-if [ 1 -eq 0 $(for root in $roots; do echo " -o \"\$GIT_COMMIT\" = '$root' "; done) ]; then
-    echo '';
-else
-    cat;
-fi"
-
-# Enhance git_commit_non_empty_tree to skip merges with:
-# a) either two equal parents (commit that was about to land got purged as well
-# as all commits on mainline);
-# b) or with second parent being an ancestor to the first one (just as with a)
-# but when there are some commits on mainline).
-# In both cases drop second parent and let git_commit_non_empty_tree to decide
-# if commit worth doing (most likely not).
-
-skip_empty=$(cat << \EOF
-if [ $# = 5 ] && git merge-base --is-ancestor $5 $3; then
-    git_commit_non_empty_tree $1 -p $3
-else
-    git_commit_non_empty_tree "$@"
-fi
-EOF
-)
-
-# Filter out commits for unrelated files
-echo "Pruning commits for unrelated files..."
-git filter-branch \
-    --index-filter "$pruner" \
-    --parent-filter "$set_roots" \
-    --commit-filter "$skip_empty" \
-    --tag-name-filter cat \
-    -- --all
-
-# Generate the new .gitreview file
-echo "Generating new .gitreview file..."
-cat > .gitreview <<EOF
-[gerrit]
-host=review.openstack.org
-port=29418
-project=stackforge/${project_name}.git
-EOF
-
-git add . && git commit -m "Generated new .gitreview file for ${project_name}"
-
-echo "Done."
diff --git a/tools/with_venv.sh b/tools/with_venv.sh
deleted file mode 100755 (executable)
index 5fb07ea..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-TOOLS=`dirname $0`
-VENV=$TOOLS/../.venv
-source $VENV/bin/activate && "$@"
diff --git a/tox.ini b/tox.ini
deleted file mode 100644 (file)
index 683eb50..0000000
--- a/tox.ini
+++ /dev/null
@@ -1,197 +0,0 @@
-[tox]
-envlist = docs,py34,py27,pep8
-minversion = 2.3
-skipsdist = True
-
-[testenv]
-setenv = VIRTUAL_ENV={envdir}
-passenv = TRACE_FAILONLY GENERATE_HASHES http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
-usedevelop = True
-install_command =
-                  constraints: {[testenv:common-constraints]install_command}
-                  pip install -U {opts} {packages}
-deps = -r{toxinidir}/requirements.txt
-       -r{toxinidir}/test-requirements.txt
-whitelist_externals = sh
-commands =
-  dsvm-functional: {toxinidir}/tools/deploy_rootwrap.sh {toxinidir} {envdir}/etc {envdir}/bin
-  {toxinidir}/tools/ostestr_compat_shim.sh {posargs}
-# there is also secret magic in ostestr which lets you run in a fail only
-# mode. To do this define the TRACE_FAILONLY environmental variable.
-
-[testenv:api]
-basepython = python2.7
-passenv = {[testenv]passenv} TEMPEST_CONFIG_DIR
-setenv = {[testenv]setenv}
-         OS_TEST_PATH=./neutron/tests/api
-         TEMPEST_CONFIG_DIR={env:TEMPEST_CONFIG_DIR:/opt/stack/tempest/etc}
-         OS_TEST_API_WITH_REST=1
-
-[testenv:api-constraints]
-basepython = {[testenv:api]basepython}
-install_command = {[testenv:common-constraints]install_command}
-passenv = {[testenv:api]passenv}
-setenv = {[testenv:api]setenv}
-
-[testenv:common]
-# Fake job to define environment variables shared between dsvm/non-dsvm jobs
-setenv = OS_TEST_TIMEOUT=180
-commands = false
-
-[testenv:common-constraints]
-install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
-
-[testenv:dsvm]
-# Fake job to define environment variables shared between dsvm jobs
-setenv = OS_SUDO_TESTING=1
-         OS_ROOTWRAP_CMD=sudo {envdir}/bin/neutron-rootwrap {envdir}/etc/neutron/rootwrap.conf
-         OS_ROOTWRAP_DAEMON_CMD=sudo {envdir}/bin/neutron-rootwrap-daemon {envdir}/etc/neutron/rootwrap.conf
-         OS_FAIL_ON_MISSING_DEPS=1
-commands = false
-
-[testenv:functional]
-basepython = python2.7
-setenv = {[testenv]setenv}
-         {[testenv:common]setenv}
-         OS_TEST_PATH=./neutron/tests/functional
-deps =
-  {[testenv]deps}
-  -r{toxinidir}/neutron/tests/functional/requirements.txt
-
-[testenv:functional-constraints]
-basepython = {[testenv:functional]basepython}
-install_command = {[testenv:common-constraints]install_command}
-setenv = {[testenv:functional]setenv}
-deps =
-  {[testenv:functional]deps}
-
-[testenv:functional-py34]
-basepython = python3.4
-setenv = {[testenv:functional]setenv}
-deps =
-  {[testenv:functional]deps}
-
-[testenv:dsvm-functional]
-basepython = python2.7
-setenv = {[testenv:functional]setenv}
-         {[testenv:dsvm]setenv}
-sitepackages=True
-deps =
-  {[testenv:functional]deps}
-
-[testenv:dsvm-functional-constraints]
-basepython = {[testenv:dsvm-functional]basepython}
-install_command = {[testenv:common-constraints]install_command}
-setenv = {[testenv:dsvm-functional]setenv}
-sitepackages={[testenv:dsvm-functional]sitepackages}
-deps =
-  {[testenv:functional-constraints]deps}
-
-[testenv:dsvm-functional-py34]
-basepython = python3.4
-setenv = {[testenv:dsvm-functional]setenv}
-sitepackages={[testenv:dsvm-functional]sitepackages}
-deps =
-  {[testenv:dsvm-functional]deps}
-
-[testenv:dsvm-fullstack]
-setenv = {[testenv]setenv}
-         {[testenv:common]setenv}
-         {[testenv:dsvm]setenv}
-         OS_TEST_PATH=./neutron/tests/fullstack
-sitepackages=True
-deps =
-  {[testenv:functional]deps}
-
-[testenv:dsvm-fullstack-constraints]
-install_command = {[testenv:common-constraints]install_command}
-setenv =
-  {[testenv:dsvm-fullstack]setenv}
-sitepackages={[testenv:dsvm-fullstack]sitepackages}
-deps =
-  {[testenv:functional-constraints]deps}
-
-[testenv:releasenotes]
-commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
-
-[testenv:pep8]
-basepython = python2.7
-deps =
-  {[testenv]deps}
-commands=
-  # If it is easier to add a check via a shell script, consider adding it in this file
-  sh ./tools/misc-sanity-checks.sh
-  {toxinidir}/tools/check_unit_test_structure.sh
-  # Checks for coding and style guidelines
-  flake8
-  sh ./tools/coding-checks.sh --pylint '{posargs}'
-  neutron-db-manage --config-file neutron/tests/etc/neutron.conf check_migration
-  {[testenv:genconfig]commands}
-whitelist_externals =
-  sh
-  bash
-
-[testenv:pep8-constraints]
-basepython = {[testenv:pep8]basepython}
-install_command = {[testenv:common-constraints]install_command}
-deps =
-  {[testenv]deps}
-commands=
-  # If it is easier to add a check via a shell script, consider adding it in this file
-  sh ./tools/misc-sanity-checks.sh
-  {toxinidir}/tools/check_unit_test_structure.sh
-  # Checks for coding and style guidelines
-  flake8
-  sh ./tools/coding-checks.sh --pylint '{posargs}'
-  neutron-db-manage --config-file neutron/tests/etc/neutron.conf check_migration
-  {[testenv:genconfig]commands}
-whitelist_externals = {[testenv:pep8]whitelist_externals}
-
-[testenv:cover]
-basepython = python2.7
-commands =
-  python setup.py testr --coverage --testr-args='{posargs}'
-  coverage report
-
-[testenv:cover-constraints]
-basepython = {[testenv:cover]basepython}
-install_command = {[testenv:common-constraints]install_command}
-commands =
-  python setup.py testr --coverage --testr-args='{posargs}'
-
-[testenv:venv]
-commands = {posargs}
-
-[testenv:venv-constraints]
-install_command = {[testenv:common-constraints]install_command}
-commands = {posargs}
-
-[testenv:docs]
-commands = sphinx-build -W -b html doc/source doc/build/html
-
-[testenv:docs-constraints]
-install_command = {[testenv:common-constraints]install_command}
-commands = {[testenv:docs]commands}
-
-[flake8]
-# E125 continuation line does not distinguish itself from next logical line
-# E126 continuation line over-indented for hanging indent
-# E128 continuation line under-indented for visual indent
-# E129 visually indented line with same indent as next logical line
-# E265 block comment should start with ‘# ‘
-# H404 multi line docstring should start with a summary
-# H405 multi line docstring summary not separated with an empty line
-ignore = E125,E126,E128,E129,E265,H404,H405
-show-source = true
-builtins = _
-# neutron/tests/tempest needs to be excluded so long as it continues
-# to be copied directly from tempest, since tempest and neutron do not
-# share a flake8 configuration.
-exclude = ./.*,build,dist,neutron/openstack/common/*,neutron/tests/tempest
-
-[hacking]
-import_exceptions = neutron.i18n, neutron._i18n
-local-check-factory = neutron.hacking.checks.factory
-
-[testenv:genconfig]
-commands = {toxinidir}/tools/generate_config_file_samples.sh
similarity index 100%
rename from debian/changelog
rename to trusty/debian/changelog
similarity index 100%
rename from debian/compat
rename to trusty/debian/compat
similarity index 100%
rename from debian/control
rename to trusty/debian/control
similarity index 100%
rename from debian/copyright
rename to trusty/debian/copyright
similarity index 100%
rename from debian/gbp.conf
rename to trusty/debian/gbp.conf
similarity index 88%
rename from debian/neutron-common.config.in
rename to trusty/debian/neutron-common.config.in
index c179765b799db90d1848ad24cc0dc1e948992bfa..fce8879c11b7581319723a5a5794f0eeb32c569a 100644 (file)
@@ -85,11 +85,11 @@ db_get neutron/plugin-select
 NEUTRON_PLUGIN_NAME=${RET}
 
 read_nova_admin_credentials () {
-       pkgos_read_config -p high ${N_CONF} DEFAULT nova_url neutron/nova_url
-       pkgos_read_config -p high ${N_CONF} DEFAULT nova_region_name neutron/nova_region
-       pkgos_read_config -p medium ${N_CONF} DEFAULT nova_admin_tenant_id neutron/nova_admin_tenant_id
-       pkgos_read_config -p medium ${N_CONF} DEFAULT nova_admin_username neutron/nova_admin_username
-       pkgos_read_config -p high ${N_CONF} DEFAULT nova_admin_password neutron/nova_admin_password
+       pkgos_read_config -p high ${N_CONF} nova url neutron/nova_url
+       pkgos_read_config -p high ${N_CONF} nova region_name neutron/nova_region
+       pkgos_read_config -p medium ${N_CONF} nova project_name neutron/nova_admin_tenant_name
+       pkgos_read_config -p medium ${N_CONF} nova username neutron/nova_admin_username
+       pkgos_read_config -p high ${N_CONF} nova password neutron/nova_admin_password
 }
 
 # OVS specific configurations (if that's the one selected)
similarity index 91%
rename from debian/neutron-common.postinst.in
rename to trusty/debian/neutron-common.postinst.in
index 07bf75cbb40510b3a918d5ec13a59078f8e95607..e65290df94c9782da6c3f89f1aa8464903884577 100644 (file)
@@ -59,15 +59,21 @@ neutron_core_plugin_class () {
 
 write_nova_admin_credentials () {
         db_get neutron/nova_url
-       pkgos_inifile set ${N_CONF} DEFAULT nova_url ${RET}
+       pkgos_inifile set ${N_CONF} nova url ${RET}
+
        db_get neutron/nova_region
-       pkgos_inifile set ${N_CONF} DEFAULT nova_region_name ${RET}
-       db_get neutron/nova_admin_tenant_id
-       pkgos_inifile set ${N_CONF} DEFAULT nova_admin_tenant_id ${RET}
+       pkgos_inifile set ${N_CONF} nova region_name ${RET}
+
+       db_get neutron/nova_admin_tenant_name
+       NOVA_TENANT_NAME_IN_NEUTRON=${RET}
+       pkgos_inifile set ${N_CONF} nova tenant_name ${RET}
+       pkgos_inifile set ${N_CONF} nova project_name ${RET}
+
        db_get neutron/nova_admin_username
-       pkgos_inifile set ${N_CONF} DEFAULT nova_admin_username ${RET}
+       pkgos_inifile set ${N_CONF} nova username ${RET}
+
        db_get neutron/nova_admin_password
-       pkgos_inifile set ${N_CONF} DEFAULT nova_admin_password ${RET}
+       pkgos_inifile set ${N_CONF} nova password ${RET}
 }
 
 if [ "$1" = "configure" ] || [ "$1" = "reconfigure" ] ; then
similarity index 97%
rename from debian/neutron-common.templates
rename to trusty/debian/neutron-common.templates
index bda27e0846c49375f670147200e1a74d98bdfc4a..3b0910bc8aecb2015a17434dc955bc2b8702af86 100644 (file)
@@ -139,12 +139,12 @@ Default: regionOne
 _Description: Nova server region name:
  Please enter the region of the Nova server.
 
-Template: neutron/nova_admin_tenant_id
+Template: neutron/nova_admin_tenant_name
 Type: string
 Default: admin
-_Description: Nova admin tenant ID:
+_Description: Nova admin tenant name:
  Neutron needs to be able to communicate with Nova through Keystone. Therefore
- Neutron needs to know the Nova admin tenant ID, username and password.
+ Neutron needs to know the Nova admin tenant name, username and password.
  .
  Please enter the ID of the admin tenant for Nova.
 
similarity index 100%
rename from debian/po/da.po
rename to trusty/debian/po/da.po
similarity index 100%
rename from debian/po/de.po
rename to trusty/debian/po/de.po
similarity index 100%
rename from debian/po/es.po
rename to trusty/debian/po/es.po
similarity index 100%
rename from debian/po/fr.po
rename to trusty/debian/po/fr.po
similarity index 100%
rename from debian/po/it.po
rename to trusty/debian/po/it.po
similarity index 100%
rename from debian/po/nl.po
rename to trusty/debian/po/nl.po
similarity index 100%
rename from debian/po/pt.po
rename to trusty/debian/po/pt.po
similarity index 100%
rename from debian/po/ru.po
rename to trusty/debian/po/ru.po
similarity index 100%
rename from debian/po/sv.po
rename to trusty/debian/po/sv.po
similarity index 100%
rename from debian/rules
rename to trusty/debian/rules
similarity index 100%
rename from debian/watch
rename to trusty/debian/watch